HHH-12177 - Drop hibernate-infinispan module
This commit is contained in:
parent
8e25e7f350
commit
25854433ac
|
@ -455,7 +455,7 @@ subprojects { subProject ->
|
|||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
// Publishing
|
||||
if ( !subProject.name.equals( 'documentation' ) ) {
|
||||
if ( !subProject.name.equals( 'documentation' ) || !subProject.name.equals( 'hibernate-infinispan' ) ) {
|
||||
// do not configure publishing for the documentation module
|
||||
publishing {
|
||||
publications {
|
||||
|
|
|
@ -4,111 +4,45 @@
|
|||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
configurations {
|
||||
all*.exclude group: 'org.jboss.logging', module: 'jboss-logging-spi'
|
||||
}
|
||||
|
||||
if ( project.hasProperty( 'overrideInfinispanVersionForTesting' ) ) {
|
||||
println 'Overrriding Infinispan version to test vs Infinispan version: ' + overrideInfinispanVersionForTesting
|
||||
configurations.testRuntime.resolutionStrategy.force 'org.infinispan:infinispan-core:' + overrideInfinispanVersionForTesting
|
||||
}
|
||||
|
||||
apply plugin: 'java'
|
||||
|
||||
dependencies {
|
||||
compile project( ':hibernate-core' )
|
||||
compile( libraries.infinispan )
|
||||
|
||||
testCompile project( ':hibernate-testing' )
|
||||
testCompile( libraries.infinispan_test )
|
||||
testCompile( libraries.jboss_common_core )
|
||||
testCompile( libraries.jnp_client )
|
||||
testCompile( libraries.jnp_server )
|
||||
testCompile( libraries.mockito )
|
||||
testCompile( libraries.mockito_inline )
|
||||
testCompile ('mysql:mysql-connector-java:5.1.17')
|
||||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
// Java 9 ftw!
|
||||
if ( JavaVersion.current().isJava9Compatible() ) {
|
||||
// The JDK used to run Gradle is Java 9+, and we assume that that is the same
|
||||
// JDK for executing tasks
|
||||
compile( 'com.sun.xml.bind:jaxb-impl:2.2.11' )
|
||||
compile( 'org.glassfish.jaxb:jaxb-xjc:2.2.11' )
|
||||
compile( 'org.jvnet.jaxb2_commons:jaxb2-basics:0.11.0' )
|
||||
compile( 'org.jvnet.jaxb2_commons:jaxb2-basics-ant:0.11.0' )
|
||||
compile( 'javax:javaee-api:7.0' )
|
||||
|
||||
testCompile( 'com.sun.xml.bind:jaxb-impl:2.2.11' )
|
||||
testCompile( 'org.glassfish.jaxb:jaxb-xjc:2.2.11' )
|
||||
testCompile( 'org.jvnet.jaxb2_commons:jaxb2-basics:0.11.0' )
|
||||
testCompile( 'org.jvnet.jaxb2_commons:jaxb2-basics-ant:0.11.0' )
|
||||
testCompile( 'javax:javaee-api:7.0' )
|
||||
|
||||
testRuntime( 'com.sun.xml.bind:jaxb-impl:2.2.11' )
|
||||
testRuntime( 'org.glassfish.jaxb:jaxb-xjc:2.2.11' )
|
||||
testRuntime( 'org.jvnet.jaxb2_commons:jaxb2-basics:0.11.0' )
|
||||
testRuntime( 'org.jvnet.jaxb2_commons:jaxb2-basics-ant:0.11.0' )
|
||||
testRuntime( 'javax:javaee-api:7.0' )
|
||||
}
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
}
|
||||
|
||||
mavenPom {
|
||||
name = 'Hibernate/Infinispan Integration'
|
||||
description = 'Integration for Infinispan into Hibernate as a second-level caching service'
|
||||
name = '(deprecated - use org.infinispan:infinispan-hibernate-cache instead) Hibernate/Infinispan Integration'
|
||||
description = '(deprecated - use org.infinispan:infinispan-hibernate-cache instead) Integration for Infinispan into Hibernate as a second-level caching service'
|
||||
}
|
||||
|
||||
def osgiDescription() {
|
||||
return mavenPom.description
|
||||
}
|
||||
|
||||
classes.doLast {
|
||||
javaexec {
|
||||
classpath = project.sourceSets.main.runtimeClasspath
|
||||
main = "org.infinispan.factories.components.ComponentMetadataPersister"
|
||||
args = [
|
||||
project.sourceSets.main.output.classesDir,
|
||||
project.sourceSets.main.output.resourcesDir.toPath().resolve("hibernate-infinispan-component-metadata.dat").toString()
|
||||
].toList()
|
||||
standardOutput = { def f = File.createTempFile('metadata-log', null ); f.deleteOnExit(); f.newOutputStream() }()
|
||||
}
|
||||
jar {
|
||||
// The OSGi JAR manifest support does not like a non-existent classes dir,
|
||||
// so make sure we dont use the OSGi one :)
|
||||
manifest = null
|
||||
}
|
||||
|
||||
test {
|
||||
systemProperties['java.net.preferIPv4Stack'] = true
|
||||
systemProperties['jgroups.ping.timeout'] = 500
|
||||
systemProperties['jgroups.ping.num_initial_members'] = 1
|
||||
systemProperties['jgroups.udp.enable_bundling'] = false
|
||||
systemProperties['jgroups.bind_addr'] = 'localhost'
|
||||
// Use Infinispan's test JGroups stack that uses TEST_PING
|
||||
systemProperties['hibernate.cache.infinispan.jgroups_cfg'] = '2lc-test-tcp.xml'
|
||||
if (project.hasProperty('log4jconfig')) {
|
||||
systemProperties['log4j.configuration'] = log4jconfig
|
||||
}
|
||||
enabled = project.hasProperty('testInfinispan')
|
||||
if (project.hasProperty('debugInfinispan')) {
|
||||
jvmArgs += '-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005'
|
||||
enabled = true;
|
||||
}
|
||||
// Without this I have trouble running specific test using --tests switch
|
||||
doFirst {
|
||||
filter.includePatterns.each {
|
||||
include "${it.replaceAll('\\.', "\\${File.separator}")}.class"
|
||||
}
|
||||
filter.setIncludePatterns('*')
|
||||
}
|
||||
sourcesJar {
|
||||
// The OSGi JAR manifest support does not like a non-existent classes dir,
|
||||
// so make sure we dont use the OSGi one :)
|
||||
manifest = null
|
||||
}
|
||||
|
||||
task packageTests(type: Jar) {
|
||||
from sourceSets.test.output
|
||||
classifier = 'tests'
|
||||
def relocatedGroupId = 'org.infinispan'
|
||||
def relocatedArtifactId = 'infinispan-hibernate-cache'
|
||||
def relocatedVersion = '9.1.3.Final'
|
||||
|
||||
publishing {
|
||||
publications {
|
||||
relocationPom( MavenPublication ) {
|
||||
groupId 'org.hibernate'
|
||||
artifactId 'hibernate-infinispan'
|
||||
|
||||
pom.withXml {
|
||||
def relocation = asNode().appendNode( 'distributionManagement' ).appendNode( 'relocation' )
|
||||
relocation.appendNode( 'groupId', relocatedGroupId)
|
||||
relocation.appendNode( 'artifactId', relocatedArtifactId )
|
||||
relocation.appendNode( 'version', relocatedVersion )
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task sourcesTestJar(type: Jar, dependsOn:classes) {
|
||||
from sourceSets.test.allSource
|
||||
classifier = 'test-sources'
|
||||
}
|
||||
|
||||
artifacts.archives packageTests
|
||||
artifacts.archives sourcesTestJar
|
||||
|
|
|
@ -1,776 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Enumeration;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import org.hibernate.boot.registry.classloading.spi.ClassLoaderService;
|
||||
import org.hibernate.MultiTenancyStrategy;
|
||||
import org.hibernate.boot.registry.selector.spi.StrategySelector;
|
||||
import org.hibernate.boot.spi.SessionFactoryOptions;
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.collection.CollectionRegionImpl;
|
||||
import org.hibernate.cache.infinispan.entity.EntityRegionImpl;
|
||||
import org.hibernate.cache.infinispan.impl.BaseRegion;
|
||||
import org.hibernate.cache.infinispan.naturalid.NaturalIdRegionImpl;
|
||||
import org.hibernate.cache.infinispan.query.QueryResultsRegionImpl;
|
||||
import org.hibernate.cache.infinispan.timestamp.ClusteredTimestampsRegionImpl;
|
||||
import org.hibernate.cache.infinispan.timestamp.TimestampsRegionImpl;
|
||||
import org.hibernate.cache.infinispan.tm.HibernateTransactionManagerLookup;
|
||||
import org.hibernate.cache.infinispan.util.CacheCommandFactory;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.hibernate.cache.internal.DefaultCacheKeysFactory;
|
||||
import org.hibernate.cache.internal.SimpleCacheKeysFactory;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.CacheKeysFactory;
|
||||
import org.hibernate.cache.spi.CollectionRegion;
|
||||
import org.hibernate.cache.spi.EntityRegion;
|
||||
import org.hibernate.cache.spi.NaturalIdRegion;
|
||||
import org.hibernate.cache.spi.QueryResultsRegion;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.hibernate.cache.spi.TimestampsRegion;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.cfg.AvailableSettings;
|
||||
import org.hibernate.internal.util.config.ConfigurationHelper;
|
||||
import org.hibernate.service.ServiceRegistry;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.commands.module.ModuleCommandFactory;
|
||||
import org.infinispan.commons.util.FileLookup;
|
||||
import org.infinispan.commons.util.FileLookupFactory;
|
||||
import org.infinispan.commons.util.Util;
|
||||
import org.infinispan.configuration.cache.Configuration;
|
||||
import org.infinispan.configuration.cache.ConfigurationBuilder;
|
||||
import org.infinispan.configuration.cache.TransactionConfiguration;
|
||||
import org.infinispan.configuration.parsing.ConfigurationBuilderHolder;
|
||||
import org.infinispan.configuration.parsing.ParserRegistry;
|
||||
import org.infinispan.eviction.EvictionStrategy;
|
||||
import org.infinispan.factories.GlobalComponentRegistry;
|
||||
import org.infinispan.manager.DefaultCacheManager;
|
||||
import org.infinispan.manager.EmbeddedCacheManager;
|
||||
import org.infinispan.transaction.lookup.GenericTransactionManagerLookup;
|
||||
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
/**
|
||||
* A {@link RegionFactory} for <a href="http://www.jboss.org/infinispan">Infinispan</a>-backed cache
|
||||
* regions.
|
||||
*
|
||||
* @author Chris Bredesen
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class InfinispanRegionFactory implements RegionFactory {
|
||||
private static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog( InfinispanRegionFactory.class );
|
||||
|
||||
private static final String PREFIX = "hibernate.cache.infinispan.";
|
||||
|
||||
private static final String CONFIG_SUFFIX = ".cfg";
|
||||
|
||||
private static final String STRATEGY_SUFFIX = ".eviction.strategy";
|
||||
|
||||
// The attribute was incorrectly named; in fact this sets expiration check interval
|
||||
// (eviction is triggered by writes, expiration is time-based)
|
||||
private static final String DEPRECATED_WAKE_UP_INTERVAL_SUFFIX = ".eviction.wake_up_interval";
|
||||
|
||||
private static final String MAX_ENTRIES_SUFFIX = ".eviction.max_entries";
|
||||
|
||||
private static final String WAKE_UP_INTERVAL_SUFFIX = ".expiration.wake_up_interval";
|
||||
|
||||
private static final String LIFESPAN_SUFFIX = ".expiration.lifespan";
|
||||
|
||||
private static final String MAX_IDLE_SUFFIX = ".expiration.max_idle";
|
||||
|
||||
/**
|
||||
* Classpath or filesystem resource containing Infinispan configurations the factory should use.
|
||||
*
|
||||
* @see #DEF_INFINISPAN_CONFIG_RESOURCE
|
||||
*/
|
||||
public static final String INFINISPAN_CONFIG_RESOURCE_PROP = "hibernate.cache.infinispan.cfg";
|
||||
|
||||
/**
|
||||
* Property name that controls whether Infinispan statistics are enabled.
|
||||
* The property value is expected to be a boolean true or false, and it
|
||||
* overrides statistic configuration in base Infinispan configuration,
|
||||
* if provided.
|
||||
*/
|
||||
public static final String INFINISPAN_GLOBAL_STATISTICS_PROP = "hibernate.cache.infinispan.statistics";
|
||||
|
||||
/**
|
||||
* Property that controls whether Infinispan should interact with the
|
||||
* transaction manager as a {@link javax.transaction.Synchronization} or as
|
||||
* an XA resource.
|
||||
* @deprecated Infinispan Second Level Cache is designed to always register as synchronization
|
||||
* on transactional caches, or use non-transactional caches.
|
||||
*
|
||||
* @see #DEF_USE_SYNCHRONIZATION
|
||||
*/
|
||||
@Deprecated
|
||||
public static final String INFINISPAN_USE_SYNCHRONIZATION_PROP = "hibernate.cache.infinispan.use_synchronization";
|
||||
|
||||
private static final Consumer<Configuration> NO_VALIDATION = c -> {};
|
||||
|
||||
public enum DataType {
|
||||
ENTITY("entity", DEF_ENTITY_RESOURCE, NO_VALIDATION),
|
||||
NATURAL_ID("naturalid", DEF_ENTITY_RESOURCE, NO_VALIDATION),
|
||||
COLLECTION("collection", DEF_ENTITY_RESOURCE, NO_VALIDATION),
|
||||
IMMUTABLE_ENTITY("immutable-entity", DEF_ENTITY_RESOURCE, NO_VALIDATION),
|
||||
TIMESTAMPS("timestamps", DEF_TIMESTAMPS_RESOURCE, c -> {
|
||||
if ( c.clustering().cacheMode().isInvalidation() ) {
|
||||
throw log.timestampsMustNotUseInvalidation();
|
||||
}
|
||||
if (c.eviction().strategy().isEnabled()) {
|
||||
throw log.timestampsMustNotUseEviction();
|
||||
}
|
||||
}),
|
||||
QUERY("query", DEF_QUERY_RESOURCE, NO_VALIDATION),
|
||||
PENDING_PUTS("pending-puts", DEF_PENDING_PUTS_RESOURCE, c -> {
|
||||
if (!c.isTemplate()) {
|
||||
log.pendingPutsShouldBeTemplate();
|
||||
}
|
||||
if (c.clustering().cacheMode().isClustered()) {
|
||||
throw log.pendingPutsMustNotBeClustered();
|
||||
}
|
||||
if (c.transaction().transactionMode().isTransactional()) {
|
||||
throw log.pendingPutsMustNotBeTransactional();
|
||||
}
|
||||
if (c.expiration().maxIdle() <= 0) {
|
||||
throw log.pendingPutsMustHaveMaxIdle();
|
||||
}
|
||||
});
|
||||
|
||||
public final String key;
|
||||
private final String defaultCacheName;
|
||||
private final Consumer<Configuration> validation;
|
||||
|
||||
DataType(String key, String defaultCacheName, Consumer<Configuration> validation) {
|
||||
this.key = key;
|
||||
this.defaultCacheName = defaultCacheName;
|
||||
this.validation = validation;
|
||||
}
|
||||
|
||||
public void validate(Configuration configuration) {
|
||||
validation.accept(configuration);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Name of the configuration that should be used for natural id caches.
|
||||
*
|
||||
* @see #DEF_ENTITY_RESOURCE
|
||||
*/
|
||||
@SuppressWarnings("UnusedDeclaration")
|
||||
public static final String NATURAL_ID_CACHE_RESOURCE_PROP = PREFIX + DataType.NATURAL_ID.key + CONFIG_SUFFIX;
|
||||
|
||||
/**
|
||||
* Name of the configuration that should be used for entity caches.
|
||||
*
|
||||
* @see #DEF_ENTITY_RESOURCE
|
||||
*/
|
||||
@SuppressWarnings("UnusedDeclaration")
|
||||
public static final String ENTITY_CACHE_RESOURCE_PROP = PREFIX + DataType.ENTITY.key + CONFIG_SUFFIX;
|
||||
|
||||
/**
|
||||
* Name of the configuration that should be used for immutable entity caches.
|
||||
* Defaults to the same configuration as {@link #ENTITY_CACHE_RESOURCE_PROP} - {@link #DEF_ENTITY_RESOURCE}
|
||||
*/
|
||||
@SuppressWarnings("UnusedDeclaration")
|
||||
public static final String IMMUTABLE_ENTITY_CACHE_RESOURCE_PROP = PREFIX + DataType.IMMUTABLE_ENTITY.key + CONFIG_SUFFIX;
|
||||
|
||||
/**
|
||||
* Name of the configuration that should be used for collection caches.
|
||||
* No default value, as by default we try to use the same Infinispan cache
|
||||
* instance we use for entity caching.
|
||||
*
|
||||
* @see #ENTITY_CACHE_RESOURCE_PROP
|
||||
* @see #DEF_ENTITY_RESOURCE
|
||||
*/
|
||||
@SuppressWarnings("UnusedDeclaration")
|
||||
public static final String COLLECTION_CACHE_RESOURCE_PROP = PREFIX + DataType.COLLECTION.key + CONFIG_SUFFIX;
|
||||
|
||||
/**
|
||||
* Name of the configuration that should be used for timestamp caches.
|
||||
*
|
||||
* @see #DEF_TIMESTAMPS_RESOURCE
|
||||
*/
|
||||
@SuppressWarnings("UnusedDeclaration")
|
||||
public static final String TIMESTAMPS_CACHE_RESOURCE_PROP = PREFIX + DataType.TIMESTAMPS.key + CONFIG_SUFFIX;
|
||||
|
||||
/**
|
||||
* Name of the configuration that should be used for query caches.
|
||||
*
|
||||
* @see #DEF_QUERY_RESOURCE
|
||||
*/
|
||||
public static final String QUERY_CACHE_RESOURCE_PROP = PREFIX + DataType.QUERY.key + CONFIG_SUFFIX;
|
||||
|
||||
/**
|
||||
* Name of the configuration that should be used for pending-puts caches.
|
||||
*
|
||||
* @see #DEF_PENDING_PUTS_RESOURCE
|
||||
*/
|
||||
@SuppressWarnings("UnusedDeclaration")
|
||||
public static final String PENDING_PUTS_CACHE_RESOURCE_PROP = PREFIX + DataType.PENDING_PUTS.key + CONFIG_SUFFIX;
|
||||
|
||||
/**
|
||||
* Default value for {@link #INFINISPAN_CONFIG_RESOURCE_PROP}. Specifies the "infinispan-configs.xml" file in this package.
|
||||
*/
|
||||
public static final String DEF_INFINISPAN_CONFIG_RESOURCE = "org/hibernate/cache/infinispan/builder/infinispan-configs.xml";
|
||||
|
||||
/**
|
||||
* Default configuration for cases where non-clustered cache manager is provided.
|
||||
*/
|
||||
public static final String INFINISPAN_CONFIG_LOCAL_RESOURCE = "org/hibernate/cache/infinispan/builder/infinispan-configs-local.xml";
|
||||
|
||||
/**
|
||||
* Default value for {@link #ENTITY_CACHE_RESOURCE_PROP}.
|
||||
*/
|
||||
public static final String DEF_ENTITY_RESOURCE = "entity";
|
||||
|
||||
/**
|
||||
* Default value for {@link #TIMESTAMPS_CACHE_RESOURCE_PROP}.
|
||||
*/
|
||||
public static final String DEF_TIMESTAMPS_RESOURCE = "timestamps";
|
||||
|
||||
/**
|
||||
* Default value for {@link #QUERY_CACHE_RESOURCE_PROP}.
|
||||
*/
|
||||
public static final String DEF_QUERY_RESOURCE = "local-query";
|
||||
|
||||
/**
|
||||
* Default value for {@link #PENDING_PUTS_CACHE_RESOURCE_PROP}
|
||||
*/
|
||||
public static final String DEF_PENDING_PUTS_RESOURCE = "pending-puts";
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #DEF_PENDING_PUTS_RESOURCE} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public static final String PENDING_PUTS_CACHE_NAME = DEF_PENDING_PUTS_RESOURCE;
|
||||
|
||||
/**
|
||||
* Default value for {@link #INFINISPAN_USE_SYNCHRONIZATION_PROP}.
|
||||
*/
|
||||
public static final boolean DEF_USE_SYNCHRONIZATION = true;
|
||||
|
||||
/**
|
||||
* Defines custom mapping for regionName -> cacheName and also DataType.key -> cacheName
|
||||
* (for the case that you want to change the cache configuration for whole type)
|
||||
*/
|
||||
protected final Map<String, String> baseConfigurations = new HashMap<>();
|
||||
/**
|
||||
* Defines configuration properties applied on top of configuration set in any file, by regionName or DataType.key
|
||||
*/
|
||||
protected final Map<String, ConfigurationBuilder> configOverrides = new HashMap<>();
|
||||
|
||||
private CacheKeysFactory cacheKeysFactory;
|
||||
private ConfigurationBuilderHolder defaultConfiguration;
|
||||
private final Map<DataType, Configuration> dataTypeConfigurations = new HashMap<>();
|
||||
private EmbeddedCacheManager manager;
|
||||
|
||||
private org.infinispan.transaction.lookup.TransactionManagerLookup transactionManagerlookup;
|
||||
private TransactionManager transactionManager;
|
||||
|
||||
private List<BaseRegion> regions = new ArrayList<>();
|
||||
private SessionFactoryOptions settings;
|
||||
|
||||
private Boolean globalStats;
|
||||
|
||||
/**
|
||||
* Create a new instance using the default configuration.
|
||||
*/
|
||||
public InfinispanRegionFactory() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new instance using conifguration properties in <code>props</code>.
|
||||
*
|
||||
* @param props Environmental properties; currently unused.
|
||||
*/
|
||||
@SuppressWarnings("UnusedParameters")
|
||||
public InfinispanRegionFactory(Properties props) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public CollectionRegion buildCollectionRegion(String regionName, Map<String, Object> configValues, CacheDataDescription metadata) {
|
||||
if ( log.isDebugEnabled() ) {
|
||||
log.debug( "Building collection cache region [" + regionName + "]" );
|
||||
}
|
||||
final AdvancedCache cache = getCache( regionName, DataType.COLLECTION, metadata);
|
||||
final CollectionRegionImpl region = new CollectionRegionImpl( cache, regionName, transactionManager, metadata, this, getCacheKeysFactory() );
|
||||
startRegion( region );
|
||||
return region;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public CollectionRegion buildCollectionRegion(
|
||||
String regionName,
|
||||
Properties properties,
|
||||
CacheDataDescription metadata) throws CacheException {
|
||||
return buildCollectionRegion( regionName, (Map) properties, metadata );
|
||||
}
|
||||
|
||||
@Override
|
||||
public EntityRegion buildEntityRegion(String regionName, Map<String, Object> configValues, CacheDataDescription metadata) {
|
||||
if ( log.isDebugEnabled() ) {
|
||||
log.debugf(
|
||||
"Building entity cache region [%s] (mutable=%s, versioned=%s)",
|
||||
regionName,
|
||||
metadata.isMutable(),
|
||||
metadata.isVersioned()
|
||||
);
|
||||
}
|
||||
final AdvancedCache cache = getCache( regionName, metadata.isMutable() ? DataType.ENTITY : DataType.IMMUTABLE_ENTITY, metadata );
|
||||
final EntityRegionImpl region = new EntityRegionImpl( cache, regionName, transactionManager, metadata, this, getCacheKeysFactory() );
|
||||
startRegion( region );
|
||||
return region;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public EntityRegion buildEntityRegion(String regionName, Properties properties, CacheDataDescription metadata) {
|
||||
return buildEntityRegion( regionName, (Map) properties, metadata );
|
||||
}
|
||||
|
||||
@Override
|
||||
public NaturalIdRegion buildNaturalIdRegion(String regionName, Map<String, Object> configValues, CacheDataDescription metadata) {
|
||||
if ( log.isDebugEnabled() ) {
|
||||
log.debug("Building natural id cache region [" + regionName + "]");
|
||||
}
|
||||
final AdvancedCache cache = getCache( regionName, DataType.NATURAL_ID, metadata);
|
||||
final NaturalIdRegionImpl region = new NaturalIdRegionImpl( cache, regionName, transactionManager, metadata, this, getCacheKeysFactory());
|
||||
startRegion( region );
|
||||
return region;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public NaturalIdRegion buildNaturalIdRegion(String regionName, Properties properties, CacheDataDescription metadata) {
|
||||
return buildNaturalIdRegion( regionName, (Map) properties, metadata );
|
||||
}
|
||||
|
||||
@Override
|
||||
public QueryResultsRegion buildQueryResultsRegion(String regionName, Map<String, Object> configValues) {
|
||||
if ( log.isDebugEnabled() ) {
|
||||
log.debug( "Building query results cache region [" + regionName + "]" );
|
||||
}
|
||||
|
||||
final AdvancedCache cache = getCache( regionName, DataType.QUERY, null);
|
||||
final QueryResultsRegionImpl region = new QueryResultsRegionImpl( cache, regionName, transactionManager, this );
|
||||
startRegion( region );
|
||||
return region;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public QueryResultsRegion buildQueryResultsRegion(String regionName, Properties properties) {
|
||||
return buildQueryResultsRegion( regionName, (Map) properties );
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimestampsRegion buildTimestampsRegion(String regionName, Map<String, Object> configValues) {
|
||||
if ( log.isDebugEnabled() ) {
|
||||
log.debug( "Building timestamps cache region [" + regionName + "]" );
|
||||
}
|
||||
final AdvancedCache cache = getCache( regionName, DataType.TIMESTAMPS, null);
|
||||
final TimestampsRegionImpl region = createTimestampsRegion( cache, regionName );
|
||||
startRegion( region );
|
||||
return region;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public TimestampsRegion buildTimestampsRegion(String regionName, Properties properties) {
|
||||
return buildTimestampsRegion( regionName, (Map) properties );
|
||||
}
|
||||
|
||||
protected TimestampsRegionImpl createTimestampsRegion(
|
||||
AdvancedCache cache, String regionName) {
|
||||
if ( Caches.isClustered(cache) ) {
|
||||
return new ClusteredTimestampsRegionImpl( cache, regionName, this );
|
||||
}
|
||||
else {
|
||||
return new TimestampsRegionImpl( cache, regionName, this );
|
||||
}
|
||||
}
|
||||
|
||||
public Configuration getPendingPutsCacheConfiguration() {
|
||||
return dataTypeConfigurations.get(DataType.PENDING_PUTS);
|
||||
}
|
||||
|
||||
private CacheKeysFactory getCacheKeysFactory() {
|
||||
return cacheKeysFactory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isMinimalPutsEnabledByDefault() {
|
||||
// TODO: change to false
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AccessType getDefaultAccessType() {
|
||||
return AccessType.TRANSACTIONAL;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextTimestamp() {
|
||||
return System.currentTimeMillis();
|
||||
}
|
||||
|
||||
public void setCacheManager(EmbeddedCacheManager manager) {
|
||||
this.manager = manager;
|
||||
}
|
||||
|
||||
public EmbeddedCacheManager getCacheManager() {
|
||||
return manager;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start(SessionFactoryOptions settings, Properties properties) throws CacheException {
|
||||
log.debug( "Starting Infinispan region factory" );
|
||||
|
||||
// determine the CacheKeysFactory to use...
|
||||
this.cacheKeysFactory = determineCacheKeysFactory( settings, properties );
|
||||
|
||||
try {
|
||||
this.settings = settings;
|
||||
transactionManagerlookup = createTransactionManagerLookup( settings, properties );
|
||||
transactionManager = transactionManagerlookup.getTransactionManager();
|
||||
|
||||
final Enumeration keys = properties.propertyNames();
|
||||
while ( keys.hasMoreElements() ) {
|
||||
final String key = (String) keys.nextElement();
|
||||
int prefixLoc;
|
||||
if ( (prefixLoc = key.indexOf( PREFIX )) != -1 ) {
|
||||
parseProperty( prefixLoc, key, extractProperty(key, properties));
|
||||
}
|
||||
}
|
||||
|
||||
defaultConfiguration = loadConfiguration(settings.getServiceRegistry(), DEF_INFINISPAN_CONFIG_RESOURCE);
|
||||
manager = createCacheManager(properties, settings.getServiceRegistry());
|
||||
if (!manager.getCacheManagerConfiguration().isClustered()) {
|
||||
// If we got non-clustered cache manager, use non-clustered (local) configuration as defaults
|
||||
// for the data types
|
||||
defaultConfiguration = loadConfiguration(settings.getServiceRegistry(), INFINISPAN_CONFIG_LOCAL_RESOURCE);
|
||||
}
|
||||
defineDataTypeCacheConfigurations();
|
||||
}
|
||||
catch (CacheException ce) {
|
||||
throw ce;
|
||||
}
|
||||
catch (Throwable t) {
|
||||
throw log.unableToStart(t);
|
||||
}
|
||||
}
|
||||
|
||||
private CacheKeysFactory determineCacheKeysFactory(SessionFactoryOptions settings, Properties properties) {
|
||||
return settings.getServiceRegistry().getService( StrategySelector.class ).resolveDefaultableStrategy(
|
||||
CacheKeysFactory.class,
|
||||
properties.get( AvailableSettings.CACHE_KEYS_FACTORY ),
|
||||
DefaultCacheKeysFactory.INSTANCE
|
||||
);
|
||||
}
|
||||
|
||||
/* This method is overridden in WildFly, so the signature must not change. */
|
||||
/* In WF, the global configuration setting is ignored */
|
||||
protected EmbeddedCacheManager createCacheManager(Properties properties, ServiceRegistry serviceRegistry) {
|
||||
if (properties.containsKey(INFINISPAN_USE_SYNCHRONIZATION_PROP)) {
|
||||
log.propertyUseSynchronizationDeprecated();
|
||||
}
|
||||
ConfigurationBuilderHolder cfgHolder;
|
||||
String configFile = ConfigurationHelper.extractPropertyValue(INFINISPAN_CONFIG_RESOURCE_PROP, properties);
|
||||
if (configFile != null) {
|
||||
cfgHolder = loadConfiguration(serviceRegistry, configFile);
|
||||
}
|
||||
else {
|
||||
cfgHolder = defaultConfiguration;
|
||||
}
|
||||
|
||||
// We cannot just add the default configurations not defined in provided configuration
|
||||
// since WF overrides this method - we have to deal with missing configuration for each cache separately
|
||||
String globalStatsStr = extractProperty( INFINISPAN_GLOBAL_STATISTICS_PROP, properties );
|
||||
if ( globalStatsStr != null ) {
|
||||
globalStats = Boolean.parseBoolean(globalStatsStr);
|
||||
}
|
||||
if (globalStats != null) {
|
||||
cfgHolder.getGlobalConfigurationBuilder().globalJmxStatistics().enabled(globalStats);
|
||||
}
|
||||
|
||||
return createCacheManager(cfgHolder);
|
||||
}
|
||||
|
||||
protected EmbeddedCacheManager createCacheManager(ConfigurationBuilderHolder cfgHolder) {
|
||||
return new DefaultCacheManager( cfgHolder, true );
|
||||
}
|
||||
|
||||
protected org.infinispan.transaction.lookup.TransactionManagerLookup createTransactionManagerLookup(
|
||||
SessionFactoryOptions settings, Properties properties) {
|
||||
return new HibernateTransactionManagerLookup( settings, properties );
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
log.debug( "Stop region factory" );
|
||||
stopCacheRegions();
|
||||
stopCacheManager();
|
||||
}
|
||||
|
||||
protected void stopCacheRegions() {
|
||||
log.debug( "Clear region references" );
|
||||
getCacheCommandFactory().clearRegions( regions );
|
||||
// Ensure we cleanup any caches we created
|
||||
regions.forEach( region -> {
|
||||
region.getCache().stop();
|
||||
manager.undefineConfiguration( region.getCache().getName() );
|
||||
} );
|
||||
regions.clear();
|
||||
}
|
||||
|
||||
protected void stopCacheManager() {
|
||||
log.debug( "Stop cache manager" );
|
||||
manager.stop();
|
||||
}
|
||||
|
||||
private ConfigurationBuilderHolder loadConfiguration(ServiceRegistry serviceRegistry, String configFile) {
|
||||
final FileLookup fileLookup = FileLookupFactory.newInstance();
|
||||
final ClassLoader infinispanClassLoader = InfinispanRegionFactory.class.getClassLoader();
|
||||
return serviceRegistry.getService( ClassLoaderService.class ).workWithClassLoader(
|
||||
new ClassLoaderService.Work<ConfigurationBuilderHolder>() {
|
||||
@Override
|
||||
public ConfigurationBuilderHolder doWork(ClassLoader classLoader) {
|
||||
InputStream is = null;
|
||||
try {
|
||||
is = fileLookup.lookupFile(configFile, classLoader );
|
||||
if ( is == null ) {
|
||||
// when it's not a user-provided configuration file, it might be a default configuration file,
|
||||
// and if that's included in [this] module might not be visible to the ClassLoaderService:
|
||||
classLoader = infinispanClassLoader;
|
||||
// This time use lookupFile*Strict* so to provide an exception if we can't find it yet:
|
||||
is = FileLookupFactory.newInstance().lookupFileStrict(configFile, classLoader );
|
||||
}
|
||||
final ParserRegistry parserRegistry = new ParserRegistry( infinispanClassLoader );
|
||||
final ConfigurationBuilderHolder holder = parseWithOverridenClassLoader( parserRegistry, is, infinispanClassLoader );
|
||||
|
||||
return holder;
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw log.unableToCreateCacheManager(e);
|
||||
}
|
||||
finally {
|
||||
Util.close( is );
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
private static ConfigurationBuilderHolder parseWithOverridenClassLoader(ParserRegistry configurationParser, InputStream is, ClassLoader infinispanClassLoader) {
|
||||
// Infinispan requires the context ClassLoader to have full visibility on all
|
||||
// its components and eventual extension points even *during* configuration parsing.
|
||||
final Thread currentThread = Thread.currentThread();
|
||||
final ClassLoader originalContextClassLoader = currentThread.getContextClassLoader();
|
||||
try {
|
||||
currentThread.setContextClassLoader( infinispanClassLoader );
|
||||
ConfigurationBuilderHolder builderHolder = configurationParser.parse( is );
|
||||
// Workaround Infinispan's ClassLoader strategies to bend to our will:
|
||||
builderHolder.getGlobalConfigurationBuilder().classLoader( infinispanClassLoader );
|
||||
return builderHolder;
|
||||
}
|
||||
finally {
|
||||
currentThread.setContextClassLoader( originalContextClassLoader );
|
||||
}
|
||||
}
|
||||
|
||||
private void startRegion(BaseRegion region) {
|
||||
regions.add( region );
|
||||
getCacheCommandFactory().addRegion( region );
|
||||
}
|
||||
|
||||
private void parseProperty(int prefixLoc, String key, String value) {
|
||||
final ConfigurationBuilder builder;
|
||||
int suffixLoc;
|
||||
if ( (suffixLoc = key.indexOf( CONFIG_SUFFIX )) != -1 && !key.equals( INFINISPAN_CONFIG_RESOURCE_PROP )) {
|
||||
String regionName = key.substring( prefixLoc + PREFIX.length(), suffixLoc );
|
||||
baseConfigurations.put(regionName, value);
|
||||
}
|
||||
else if ( (suffixLoc = key.indexOf( STRATEGY_SUFFIX )) != -1 ) {
|
||||
builder = getOrCreateConfig( prefixLoc, key, suffixLoc );
|
||||
builder.eviction().strategy( EvictionStrategy.valueOf(value) );
|
||||
}
|
||||
else if ( (suffixLoc = key.indexOf( WAKE_UP_INTERVAL_SUFFIX )) != -1
|
||||
|| (suffixLoc = key.indexOf(DEPRECATED_WAKE_UP_INTERVAL_SUFFIX)) != -1 ) {
|
||||
builder = getOrCreateConfig( prefixLoc, key, suffixLoc );
|
||||
builder.expiration().wakeUpInterval( Long.parseLong(value) );
|
||||
}
|
||||
else if ( (suffixLoc = key.indexOf( MAX_ENTRIES_SUFFIX )) != -1 ) {
|
||||
builder = getOrCreateConfig( prefixLoc, key, suffixLoc );
|
||||
builder.eviction().size( Long.parseLong(value) );
|
||||
}
|
||||
else if ( (suffixLoc = key.indexOf( LIFESPAN_SUFFIX )) != -1 ) {
|
||||
builder = getOrCreateConfig( prefixLoc, key, suffixLoc );
|
||||
builder.expiration().lifespan( Long.parseLong(value) );
|
||||
}
|
||||
else if ( (suffixLoc = key.indexOf( MAX_IDLE_SUFFIX )) != -1 ) {
|
||||
builder = getOrCreateConfig( prefixLoc, key, suffixLoc );
|
||||
builder.expiration().maxIdle( Long.parseLong(value) );
|
||||
}
|
||||
}
|
||||
|
||||
private String extractProperty(String key, Properties properties) {
|
||||
final String value = ConfigurationHelper.extractPropertyValue( key, properties );
|
||||
log.debugf( "Configuration override via property %s: %s", key, value );
|
||||
return value;
|
||||
}
|
||||
|
||||
private ConfigurationBuilder getOrCreateConfig(int prefixLoc, String key, int suffixLoc) {
|
||||
final String name = key.substring( prefixLoc + PREFIX.length(), suffixLoc );
|
||||
ConfigurationBuilder builder = configOverrides.get( name );
|
||||
if ( builder == null ) {
|
||||
builder = new ConfigurationBuilder();
|
||||
configOverrides.put( name, builder );
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
private void defineDataTypeCacheConfigurations() {
|
||||
for ( DataType type : DataType.values() ) {
|
||||
String cacheName = baseConfigurations.get(type.key);
|
||||
if (cacheName == null) {
|
||||
cacheName = type.defaultCacheName;
|
||||
}
|
||||
Configuration configuration = manager.getCacheConfiguration(cacheName);
|
||||
ConfigurationBuilder builder;
|
||||
if (configuration == null) {
|
||||
log.debugf("Cache configuration not found for %s", type);
|
||||
if (!cacheName.equals(type.defaultCacheName)) {
|
||||
log.customConfigForTypeNotFound(cacheName, type.key);
|
||||
}
|
||||
builder = defaultConfiguration.getNamedConfigurationBuilders().get(type.defaultCacheName);
|
||||
if (builder == null) {
|
||||
throw new IllegalStateException("Generic data types must have default configuration, none found for " + type);
|
||||
}
|
||||
}
|
||||
else {
|
||||
builder = new ConfigurationBuilder().read(configuration);
|
||||
}
|
||||
ConfigurationBuilder override = configOverrides.get( type.key );
|
||||
if (override != null) {
|
||||
builder.read(override.build(false));
|
||||
}
|
||||
builder.template(true);
|
||||
configureTransactionManager( builder );
|
||||
dataTypeConfigurations.put(type, builder.build());
|
||||
}
|
||||
}
|
||||
|
||||
protected AdvancedCache getCache(String regionName, DataType type, CacheDataDescription metadata) {
|
||||
if (!manager.cacheExists(regionName)) {
|
||||
String templateCacheName = baseConfigurations.get(regionName);
|
||||
Configuration configuration = null;
|
||||
ConfigurationBuilder builder = new ConfigurationBuilder();
|
||||
if (templateCacheName != null) {
|
||||
configuration = manager.getCacheConfiguration(templateCacheName);
|
||||
if (configuration == null) {
|
||||
log.customConfigForRegionNotFound(templateCacheName, regionName, type.key);
|
||||
}
|
||||
else {
|
||||
log.debugf("Region '%s' will use cache template '%s'", regionName, templateCacheName);
|
||||
builder.read(configuration);
|
||||
configureTransactionManager(builder);
|
||||
// do not apply data type overrides to regions that set special cache configuration
|
||||
}
|
||||
}
|
||||
if (configuration == null) {
|
||||
configuration = dataTypeConfigurations.get(type);
|
||||
if (configuration == null) {
|
||||
throw new IllegalStateException("Configuration not defined for type " + type.key);
|
||||
}
|
||||
builder.read(configuration);
|
||||
// overrides for data types are already applied, but we should check custom ones
|
||||
}
|
||||
ConfigurationBuilder override = configOverrides.get(regionName);
|
||||
if (override != null) {
|
||||
log.debugf("Region '%s' has additional configuration set through properties.", regionName);
|
||||
builder.read(override.build(false));
|
||||
}
|
||||
if (getCacheKeysFactory() instanceof SimpleCacheKeysFactory) {
|
||||
// the keys may not define hashCode/equals correctly (e.g. arrays)
|
||||
if (metadata != null && metadata.getKeyType() != null) {
|
||||
builder.dataContainer().keyEquivalence(new TypeEquivalance(metadata.getKeyType()));
|
||||
}
|
||||
}
|
||||
if (globalStats != null) {
|
||||
builder.jmxStatistics().enabled(globalStats).available(globalStats);
|
||||
}
|
||||
configuration = builder.build();
|
||||
type.validate(configuration);
|
||||
manager.defineConfiguration(regionName, configuration);
|
||||
}
|
||||
final AdvancedCache cache = manager.getCache( regionName ).getAdvancedCache();
|
||||
// TODO: not sure if this is needed in recent Infinispan
|
||||
if ( !cache.getStatus().allowInvocations() ) {
|
||||
cache.start();
|
||||
}
|
||||
return createCacheWrapper( cache );
|
||||
}
|
||||
|
||||
private CacheCommandFactory getCacheCommandFactory() {
|
||||
final GlobalComponentRegistry globalCr = manager.getGlobalComponentRegistry();
|
||||
|
||||
final Map<Byte, ModuleCommandFactory> factories =
|
||||
(Map<Byte, ModuleCommandFactory>) globalCr.getComponent( "org.infinispan.modules.command.factories" );
|
||||
|
||||
for ( ModuleCommandFactory factory : factories.values() ) {
|
||||
if ( factory instanceof CacheCommandFactory ) {
|
||||
return (CacheCommandFactory) factory;
|
||||
}
|
||||
}
|
||||
|
||||
throw log.cannotInstallCommandFactory();
|
||||
}
|
||||
|
||||
protected AdvancedCache createCacheWrapper(AdvancedCache cache) {
|
||||
return cache;
|
||||
}
|
||||
|
||||
private void configureTransactionManager(ConfigurationBuilder builder) {
|
||||
TransactionConfiguration transaction = builder.transaction().create();
|
||||
if (transaction.transactionMode().isTransactional() ) {
|
||||
final String ispnTmLookupClassName = transaction.transactionManagerLookup().getClass().getName();
|
||||
final String hbTmLookupClassName = org.hibernate.cache.infinispan.tm.HibernateTransactionManagerLookup.class.getName();
|
||||
if ( GenericTransactionManagerLookup.class.getName().equals( ispnTmLookupClassName ) ) {
|
||||
log.debug(
|
||||
"Using default Infinispan transaction manager lookup " +
|
||||
"instance (GenericTransactionManagerLookup), overriding it " +
|
||||
"with Hibernate transaction manager lookup"
|
||||
);
|
||||
builder.transaction().transactionManagerLookup( transactionManagerlookup );
|
||||
}
|
||||
else if ( ispnTmLookupClassName != null && !ispnTmLookupClassName.equals( hbTmLookupClassName ) ) {
|
||||
log.debug(
|
||||
"Infinispan is configured [" + ispnTmLookupClassName + "] with a different transaction manager lookup " +
|
||||
"class than Hibernate [" + hbTmLookupClassName + "]"
|
||||
);
|
||||
}
|
||||
else {
|
||||
// Infinispan TM lookup class null, so apply Hibernate one directly
|
||||
builder.transaction().transactionManagerLookup( transactionManagerlookup );
|
||||
}
|
||||
builder.transaction().useSynchronization( DEF_USE_SYNCHRONIZATION );
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan;
|
||||
|
||||
import java.util.Properties;
|
||||
import javax.naming.Context;
|
||||
import javax.naming.InitialContext;
|
||||
import javax.naming.NamingException;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.hibernate.internal.util.config.ConfigurationHelper;
|
||||
import org.hibernate.internal.util.jndi.JndiHelper;
|
||||
import org.hibernate.service.ServiceRegistry;
|
||||
|
||||
import org.infinispan.manager.EmbeddedCacheManager;
|
||||
|
||||
/**
|
||||
* A {@link org.hibernate.cache.spi.RegionFactory} for <a href="http://www.jboss.org/infinispan">Infinispan</a>-backed cache
|
||||
* regions that finds its cache manager in JNDI rather than creating one itself.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class JndiInfinispanRegionFactory extends InfinispanRegionFactory {
|
||||
|
||||
private static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog( JndiInfinispanRegionFactory.class );
|
||||
|
||||
/**
|
||||
* Specifies the JNDI name under which the {@link EmbeddedCacheManager} to use is bound.
|
||||
* There is no default value -- the user must specify the property.
|
||||
*/
|
||||
public static final String CACHE_MANAGER_RESOURCE_PROP = "hibernate.cache.infinispan.cachemanager";
|
||||
|
||||
/**
|
||||
* Constructs a JndiInfinispanRegionFactory
|
||||
*/
|
||||
@SuppressWarnings("UnusedDeclaration")
|
||||
public JndiInfinispanRegionFactory() {
|
||||
super();
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a JndiInfinispanRegionFactory
|
||||
*
|
||||
* @param props Any properties to apply (not used).
|
||||
*/
|
||||
@SuppressWarnings("UnusedDeclaration")
|
||||
public JndiInfinispanRegionFactory(Properties props) {
|
||||
super( props );
|
||||
}
|
||||
|
||||
@Override
|
||||
protected EmbeddedCacheManager createCacheManager(
|
||||
Properties properties,
|
||||
ServiceRegistry serviceRegistry) throws CacheException {
|
||||
final String name = ConfigurationHelper.getString( CACHE_MANAGER_RESOURCE_PROP, properties, null );
|
||||
if ( name == null ) {
|
||||
throw log.propertyCacheManagerResourceNotSet();
|
||||
}
|
||||
return locateCacheManager( name, JndiHelper.extractJndiProperties( properties ) );
|
||||
}
|
||||
|
||||
private EmbeddedCacheManager locateCacheManager(String jndiNamespace, Properties jndiProperties) {
|
||||
Context ctx = null;
|
||||
try {
|
||||
ctx = new InitialContext( jndiProperties );
|
||||
return (EmbeddedCacheManager) ctx.lookup( jndiNamespace );
|
||||
}
|
||||
catch (NamingException ne) {
|
||||
throw log.unableToRetrieveCmFromJndi(jndiNamespace);
|
||||
}
|
||||
finally {
|
||||
if ( ctx != null ) {
|
||||
try {
|
||||
ctx.close();
|
||||
}
|
||||
catch (NamingException ne) {
|
||||
log.unableToReleaseContext(ne);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
// Do not attempt to stop a cache manager because it wasn't created by this region factory.
|
||||
}
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.hibernate.boot.registry.selector.SimpleStrategyRegistrationImpl;
|
||||
import org.hibernate.boot.registry.selector.StrategyRegistration;
|
||||
import org.hibernate.boot.registry.selector.StrategyRegistrationProvider;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
|
||||
/**
|
||||
* Makes the 2 contained region factory implementations available to the Hibernate
|
||||
* {@link org.hibernate.boot.registry.selector.spi.StrategySelector} service.
|
||||
*
|
||||
* @author Steve Ebersole
|
||||
*/
|
||||
public class StrategyRegistrationProviderImpl implements StrategyRegistrationProvider {
|
||||
@Override
|
||||
public Iterable<StrategyRegistration> getStrategyRegistrations() {
|
||||
final List<StrategyRegistration> strategyRegistrations = new ArrayList<StrategyRegistration>();
|
||||
|
||||
strategyRegistrations.add(
|
||||
new SimpleStrategyRegistrationImpl<RegionFactory>(
|
||||
RegionFactory.class,
|
||||
InfinispanRegionFactory.class,
|
||||
"infinispan",
|
||||
InfinispanRegionFactory.class.getName(),
|
||||
InfinispanRegionFactory.class.getSimpleName()
|
||||
)
|
||||
);
|
||||
|
||||
strategyRegistrations.add(
|
||||
new SimpleStrategyRegistrationImpl<RegionFactory>(
|
||||
RegionFactory.class,
|
||||
JndiInfinispanRegionFactory.class,
|
||||
"infinispan-jndi",
|
||||
JndiInfinispanRegionFactory.class.getName(),
|
||||
JndiInfinispanRegionFactory.class.getSimpleName()
|
||||
)
|
||||
);
|
||||
|
||||
return strategyRegistrations;
|
||||
}
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan;
|
||||
|
||||
import org.hibernate.type.Type;
|
||||
import org.infinispan.commons.equivalence.Equivalence;
|
||||
|
||||
/**
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class TypeEquivalance implements Equivalence<Object> {
|
||||
private final Type type;
|
||||
|
||||
public TypeEquivalance(Type type) {
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode(Object o) {
|
||||
return type.getHashCode(o);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object x, Object y) {
|
||||
return type.isEqual(x, y);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(Object o) {
|
||||
return String.valueOf(o);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isComparable(Object o) {
|
||||
return true; // cannot guess from the type
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(Object x, Object y) {
|
||||
return type.compare(x, y);
|
||||
}
|
||||
}
|
|
@ -1,209 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.HashSet;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
|
||||
import org.infinispan.configuration.cache.Configuration;
|
||||
import org.infinispan.configuration.cache.ConfigurationBuilder;
|
||||
import org.infinispan.eviction.EvictionStrategy;
|
||||
|
||||
/**
|
||||
* This class represents Infinispan cache parameters that can be configured via hibernate configuration properties
|
||||
* for either general entity/collection/query/timestamp data type caches and overrides for individual entity or
|
||||
* collection caches. Configuration these properties override previously defined properties in XML file.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class TypeOverrides implements Serializable {
|
||||
|
||||
private final Set<String> overridden = new HashSet<String>();
|
||||
|
||||
private String cacheName;
|
||||
|
||||
private EvictionStrategy evictionStrategy;
|
||||
|
||||
private long evictionWakeUpInterval;
|
||||
|
||||
private int evictionMaxEntries;
|
||||
|
||||
private long expirationLifespan;
|
||||
|
||||
private long expirationMaxIdle;
|
||||
|
||||
private boolean isExposeStatistics;
|
||||
|
||||
public String getCacheName() {
|
||||
return cacheName;
|
||||
}
|
||||
|
||||
public void setCacheName(String cacheName) {
|
||||
this.cacheName = cacheName;
|
||||
}
|
||||
|
||||
public EvictionStrategy getEvictionStrategy() {
|
||||
return evictionStrategy;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets eviction strategy for cached type.
|
||||
*
|
||||
* @param evictionStrategy String defining eviction strategy allowed.
|
||||
* Possible values are defined in {@link EvictionStrategy}
|
||||
*/
|
||||
public void setEvictionStrategy(String evictionStrategy) {
|
||||
markAsOverriden( "evictionStrategy" );
|
||||
this.evictionStrategy = EvictionStrategy.valueOf( uc( evictionStrategy ) );
|
||||
}
|
||||
|
||||
public long getEvictionWakeUpInterval() {
|
||||
return evictionWakeUpInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets how often eviction process should be run for the cached type.
|
||||
*
|
||||
* @param evictionWakeUpInterval long representing the frequency for executing
|
||||
* the eviction process, in milliseconds
|
||||
*
|
||||
*/
|
||||
public void setEvictionWakeUpInterval(long evictionWakeUpInterval) {
|
||||
markAsOverriden( "evictionWakeUpInterval" );
|
||||
this.evictionWakeUpInterval = evictionWakeUpInterval;
|
||||
}
|
||||
|
||||
public int getEvictionMaxEntries() {
|
||||
return evictionMaxEntries;
|
||||
}
|
||||
|
||||
/**
|
||||
* Maximum number of entries in a cache for this cached type. Cache size
|
||||
* is guaranteed not to exceed upper limit specified by max entries.
|
||||
* However, due to the nature of eviction it is unlikely to ever be
|
||||
* exactly maximum number of entries specified here.
|
||||
*
|
||||
* @param evictionMaxEntries number of maximum cache entries
|
||||
*/
|
||||
public void setEvictionMaxEntries(int evictionMaxEntries) {
|
||||
markAsOverriden( "evictionMaxEntries" );
|
||||
this.evictionMaxEntries = evictionMaxEntries;
|
||||
}
|
||||
|
||||
public long getExpirationLifespan() {
|
||||
return expirationLifespan;
|
||||
}
|
||||
|
||||
/**
|
||||
* Maximum lifespan of a cache entry, after which the entry is expired
|
||||
* cluster-wide, in milliseconds. -1 means the entries never expire.
|
||||
*
|
||||
* @param expirationLifespan long representing the maximum lifespan,
|
||||
* in milliseconds, for a cached entry before
|
||||
* it's expired
|
||||
*/
|
||||
public void setExpirationLifespan(long expirationLifespan) {
|
||||
markAsOverriden( "expirationLifespan" );
|
||||
this.expirationLifespan = expirationLifespan;
|
||||
}
|
||||
|
||||
public long getExpirationMaxIdle() {
|
||||
return expirationMaxIdle;
|
||||
}
|
||||
|
||||
/**
|
||||
* Maximum idle time a cache entry will be maintained in the cache, in
|
||||
* milliseconds. If the idle time is exceeded, the entry will be expired
|
||||
* cluster-wide. -1 means the entries never expire.
|
||||
*
|
||||
* @param expirationMaxIdle long representing the maximum idle time, in
|
||||
* milliseconds, for a cached entry before it's
|
||||
* expired
|
||||
*/
|
||||
public void setExpirationMaxIdle(long expirationMaxIdle) {
|
||||
markAsOverriden( "expirationMaxIdle" );
|
||||
this.expirationMaxIdle = expirationMaxIdle;
|
||||
}
|
||||
|
||||
public boolean isExposeStatistics() {
|
||||
return isExposeStatistics;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable statistics gathering and reporting via JMX.
|
||||
*
|
||||
* @param isExposeStatistics boolean indicating whether statistics should
|
||||
* be enabled or disabled
|
||||
*/
|
||||
public void setExposeStatistics(boolean isExposeStatistics) {
|
||||
markAsOverriden( "isExposeStatistics" );
|
||||
this.isExposeStatistics = isExposeStatistics;
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply the configuration overrides in this {@link TypeOverrides} instance
|
||||
* to the cache configuration builder passed as parameter.
|
||||
*
|
||||
* @param builder cache configuration builder on which to apply
|
||||
* configuration overrides
|
||||
*/
|
||||
public void applyTo(ConfigurationBuilder builder) {
|
||||
if ( overridden.contains( "evictionStrategy" ) ) {
|
||||
builder.eviction().strategy( evictionStrategy );
|
||||
}
|
||||
if ( overridden.contains( "evictionWakeUpInterval" ) ) {
|
||||
builder.expiration().wakeUpInterval( evictionWakeUpInterval );
|
||||
}
|
||||
if ( overridden.contains( "evictionMaxEntries" ) ) {
|
||||
builder.eviction().maxEntries( evictionMaxEntries );
|
||||
}
|
||||
if ( overridden.contains( "expirationLifespan" ) ) {
|
||||
builder.expiration().lifespan( expirationLifespan );
|
||||
}
|
||||
if ( overridden.contains( "expirationMaxIdle" ) ) {
|
||||
builder.expiration().maxIdle( expirationMaxIdle );
|
||||
}
|
||||
if ( overridden.contains( "isExposeStatistics" ) && isExposeStatistics ) {
|
||||
builder.jmxStatistics().enable();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate the configuration for this cached type.
|
||||
*
|
||||
* @param cfg configuration to validate
|
||||
* @throws CacheException if validation fails
|
||||
*/
|
||||
public void validateInfinispanConfiguration(Configuration cfg) throws CacheException {
|
||||
// no-op, method overriden
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getClass().getSimpleName()
|
||||
+ '{' + "cache=" + cacheName
|
||||
+ ", strategy=" + evictionStrategy
|
||||
+ ", wakeUpInterval=" + evictionWakeUpInterval
|
||||
+ ", maxEntries=" + evictionMaxEntries
|
||||
+ ", lifespan=" + expirationLifespan
|
||||
+ ", maxIdle=" + expirationMaxIdle
|
||||
+ '}';
|
||||
}
|
||||
|
||||
private String uc(String s) {
|
||||
return s == null ? null : s.toUpperCase( Locale.ENGLISH );
|
||||
}
|
||||
|
||||
private void markAsOverriden(String fieldName) {
|
||||
overridden.add( fieldName );
|
||||
}
|
||||
}
|
|
@ -1,157 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.spi.access.SoftLock;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
|
||||
/**
|
||||
* Defines the strategy for access to entity or collection data in a Infinispan instance.
|
||||
* <p/>
|
||||
* The intent of this class is to encapsulate common code and serve as a delegate for
|
||||
* {@link org.hibernate.cache.spi.access.EntityRegionAccessStrategy}
|
||||
* and {@link org.hibernate.cache.spi.access.CollectionRegionAccessStrategy} implementations.
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public interface AccessDelegate {
|
||||
Object get(SharedSessionContractImplementor session, Object key, long txTimestamp) throws CacheException;
|
||||
|
||||
/**
|
||||
* Attempt to cache an object, after loading from the database.
|
||||
*
|
||||
* @param session Current session
|
||||
* @param key The item key
|
||||
* @param value The item
|
||||
* @param txTimestamp a timestamp prior to the transaction start time
|
||||
* @param version the item version number
|
||||
* @return <tt>true</tt> if the object was successfully cached
|
||||
*/
|
||||
boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version);
|
||||
|
||||
/**
|
||||
* Attempt to cache an object, after loading from the database, explicitly
|
||||
* specifying the minimalPut behavior.
|
||||
*
|
||||
* @param session Current session.
|
||||
* @param key The item key
|
||||
* @param value The item
|
||||
* @param txTimestamp a timestamp prior to the transaction start time
|
||||
* @param version the item version number
|
||||
* @param minimalPutOverride Explicit minimalPut flag
|
||||
* @return <tt>true</tt> if the object was successfully cached
|
||||
* @throws org.hibernate.cache.CacheException Propogated from underlying {@link org.hibernate.cache.spi.Region}
|
||||
*/
|
||||
boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version, boolean minimalPutOverride)
|
||||
throws CacheException;
|
||||
|
||||
/**
|
||||
* Called after an item has been inserted (before the transaction completes),
|
||||
* instead of calling evict().
|
||||
*
|
||||
* @param session Current session
|
||||
* @param key The item key
|
||||
* @param value The item
|
||||
* @param version The item's version value
|
||||
* @return Were the contents of the cache actual changed by this operation?
|
||||
* @throws CacheException if the insert fails
|
||||
*/
|
||||
boolean insert(SharedSessionContractImplementor session, Object key, Object value, Object version) throws CacheException;
|
||||
|
||||
/**
|
||||
* Called after an item has been updated (before the transaction completes),
|
||||
* instead of calling evict().
|
||||
*
|
||||
* @param session Current session
|
||||
* @param key The item key
|
||||
* @param value The item
|
||||
* @param currentVersion The item's current version value
|
||||
* @param previousVersion The item's previous version value
|
||||
* @return Whether the contents of the cache actual changed by this operation
|
||||
* @throws CacheException if the update fails
|
||||
*/
|
||||
boolean update(SharedSessionContractImplementor session, Object key, Object value, Object currentVersion, Object previousVersion)
|
||||
throws CacheException;
|
||||
|
||||
/**
|
||||
* Called after an item has become stale (before the transaction completes).
|
||||
*
|
||||
* @param session Current session
|
||||
* @param key The key of the item to remove
|
||||
* @throws CacheException if removing the cached item fails
|
||||
*/
|
||||
void remove(SharedSessionContractImplementor session, Object key) throws CacheException;
|
||||
|
||||
/**
|
||||
* Called to evict data from the entire region
|
||||
*
|
||||
* @throws CacheException if eviction the region fails
|
||||
*/
|
||||
void removeAll() throws CacheException;
|
||||
|
||||
/**
|
||||
* Forcibly evict an item from the cache immediately without regard for transaction
|
||||
* isolation.
|
||||
*
|
||||
* @param key The key of the item to remove
|
||||
* @throws CacheException if evicting the item fails
|
||||
*/
|
||||
void evict(Object key) throws CacheException;
|
||||
|
||||
/**
|
||||
* Forcibly evict all items from the cache immediately without regard for transaction
|
||||
* isolation.
|
||||
*
|
||||
* @throws CacheException if evicting items fails
|
||||
*/
|
||||
void evictAll() throws CacheException;
|
||||
|
||||
/**
|
||||
* Called when we have finished the attempted update/delete (which may or
|
||||
* may not have been successful), after transaction completion. This method
|
||||
* is used by "asynchronous" concurrency strategies.
|
||||
*
|
||||
*
|
||||
* @param session
|
||||
* @param key The item key
|
||||
* @throws org.hibernate.cache.CacheException Propogated from underlying {@link org.hibernate.cache.spi.Region}
|
||||
*/
|
||||
void unlockItem(SharedSessionContractImplementor session, Object key) throws CacheException;
|
||||
|
||||
/**
|
||||
* Called after an item has been inserted (after the transaction completes),
|
||||
* instead of calling release().
|
||||
* This method is used by "asynchronous" concurrency strategies.
|
||||
*
|
||||
*
|
||||
* @param session
|
||||
* @param key The item key
|
||||
* @param value The item
|
||||
* @param version The item's version value
|
||||
* @return Were the contents of the cache actual changed by this operation?
|
||||
* @throws CacheException Propagated from underlying {@link org.hibernate.cache.spi.Region}
|
||||
*/
|
||||
boolean afterInsert(SharedSessionContractImplementor session, Object key, Object value, Object version);
|
||||
|
||||
/**
|
||||
* Called after an item has been updated (after the transaction completes),
|
||||
* instead of calling release(). This method is used by "asynchronous"
|
||||
* concurrency strategies.
|
||||
*
|
||||
*
|
||||
* @param session
|
||||
* @param key The item key
|
||||
* @param value The item
|
||||
* @param currentVersion The item's current version value
|
||||
* @param previousVersion The item's previous version value
|
||||
* @param lock The lock previously obtained from {@link #lockItem}
|
||||
* @return Were the contents of the cache actual changed by this operation?
|
||||
* @throws CacheException Propagated from underlying {@link org.hibernate.cache.spi.Region}
|
||||
*/
|
||||
boolean afterUpdate(SharedSessionContractImplementor session, Object key, Object value, Object currentVersion, Object previousVersion, SoftLock lock);
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.commands.CommandsFactory;
|
||||
import org.infinispan.commands.FlagAffectedCommand;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.factories.annotations.Inject;
|
||||
import org.infinispan.factories.annotations.Start;
|
||||
import org.infinispan.interceptors.base.BaseRpcInterceptor;
|
||||
import org.infinispan.jmx.JmxStatisticsExposer;
|
||||
import org.infinispan.jmx.annotations.DataType;
|
||||
import org.infinispan.jmx.annotations.ManagedAttribute;
|
||||
import org.infinispan.jmx.annotations.ManagedOperation;
|
||||
import org.infinispan.jmx.annotations.MeasurementType;
|
||||
import org.infinispan.jmx.annotations.Parameter;
|
||||
import org.infinispan.remoting.inboundhandler.DeliverOrder;
|
||||
import org.infinispan.remoting.rpc.ResponseMode;
|
||||
import org.infinispan.remoting.rpc.RpcOptions;
|
||||
import org.infinispan.remoting.transport.Address;
|
||||
import org.infinispan.statetransfer.StateTransferManager;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public abstract class BaseInvalidationInterceptor extends BaseRpcInterceptor implements JmxStatisticsExposer {
|
||||
private final AtomicLong invalidations = new AtomicLong(0);
|
||||
protected CommandsFactory commandsFactory;
|
||||
protected StateTransferManager stateTransferManager;
|
||||
protected String cacheName;
|
||||
protected boolean statisticsEnabled;
|
||||
protected RpcOptions syncRpcOptions;
|
||||
protected RpcOptions asyncRpcOptions;
|
||||
|
||||
@Inject
|
||||
public void injectDependencies(CommandsFactory commandsFactory, StateTransferManager stateTransferManager, Cache cache) {
|
||||
this.commandsFactory = commandsFactory;
|
||||
this.stateTransferManager = stateTransferManager;
|
||||
this.cacheName = cache.getName();
|
||||
}
|
||||
|
||||
@Start
|
||||
private void start() {
|
||||
this.setStatisticsEnabled(cacheConfiguration.jmxStatistics().enabled());
|
||||
syncRpcOptions = rpcManager.getRpcOptionsBuilder(ResponseMode.SYNCHRONOUS_IGNORE_LEAVERS, DeliverOrder.NONE).build();
|
||||
asyncRpcOptions = rpcManager.getDefaultRpcOptions(false);
|
||||
}
|
||||
|
||||
@ManagedOperation(
|
||||
description = "Resets statistics gathered by this component",
|
||||
displayName = "Reset statistics"
|
||||
)
|
||||
public void resetStatistics() {
|
||||
invalidations.set(0);
|
||||
}
|
||||
|
||||
@ManagedAttribute(
|
||||
displayName = "Statistics enabled",
|
||||
description = "Enables or disables the gathering of statistics by this component",
|
||||
dataType = DataType.TRAIT,
|
||||
writable = true
|
||||
)
|
||||
public boolean getStatisticsEnabled() {
|
||||
return this.statisticsEnabled;
|
||||
}
|
||||
|
||||
public void setStatisticsEnabled(@Parameter(name = "enabled", description = "Whether statistics should be enabled or disabled (true/false)") boolean enabled) {
|
||||
this.statisticsEnabled = enabled;
|
||||
}
|
||||
|
||||
@ManagedAttribute(
|
||||
description = "Number of invalidations",
|
||||
displayName = "Number of invalidations",
|
||||
measurementType = MeasurementType.TRENDSUP
|
||||
)
|
||||
public long getInvalidations() {
|
||||
return invalidations.get();
|
||||
}
|
||||
|
||||
protected void incrementInvalidations() {
|
||||
if (statisticsEnabled) {
|
||||
invalidations.incrementAndGet();
|
||||
}
|
||||
}
|
||||
|
||||
protected List<Address> getMembers() {
|
||||
return stateTransferManager.getCacheTopology().getMembers();
|
||||
}
|
||||
|
||||
protected boolean isPutForExternalRead(FlagAffectedCommand command) {
|
||||
if (command.hasFlag(Flag.PUT_FOR_EXTERNAL_READ)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -1,67 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
import org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion;
|
||||
import org.hibernate.cache.infinispan.util.FutureUpdate;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.hibernate.cache.infinispan.util.InvocationAfterCompletion;
|
||||
import org.hibernate.resource.transaction.spi.TransactionCoordinator;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
/**
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class FutureUpdateSynchronization extends InvocationAfterCompletion {
|
||||
private static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog( FutureUpdateSynchronization.class );
|
||||
|
||||
private final UUID uuid = UUID.randomUUID();
|
||||
private final Object key;
|
||||
private final Object value;
|
||||
private final BaseTransactionalDataRegion region;
|
||||
private final long sessionTimestamp;
|
||||
private final AdvancedCache cache;
|
||||
|
||||
public FutureUpdateSynchronization(TransactionCoordinator tc, AdvancedCache cache, boolean requiresTransaction,
|
||||
Object key, Object value, BaseTransactionalDataRegion region, long sessionTimestamp) {
|
||||
|
||||
super(tc, requiresTransaction);
|
||||
this.cache = cache;
|
||||
this.key = key;
|
||||
this.value = value;
|
||||
this.region = region;
|
||||
this.sessionTimestamp = sessionTimestamp;
|
||||
}
|
||||
|
||||
public UUID getUuid() {
|
||||
return uuid;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void invoke(boolean success) {
|
||||
// If the region was invalidated during this session, we can't know that the value we're inserting is valid
|
||||
// so we'll just null the tombstone
|
||||
if (sessionTimestamp < region.getLastRegionInvalidation()) {
|
||||
success = false;
|
||||
}
|
||||
// Exceptions in #afterCompletion() are silently ignored, since the transaction
|
||||
// is already committed in DB. However we must not return until we update the cache.
|
||||
FutureUpdate futureUpdate = new FutureUpdate(uuid, region.nextTimestamp(), success ? this.value : null);
|
||||
for (;;) {
|
||||
try {
|
||||
cache.put(key, futureUpdate);
|
||||
return;
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.failureInAfterCompletion(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,175 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.impl.BaseRegion;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Brian Stansberry
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public abstract class InvalidationCacheAccessDelegate implements AccessDelegate {
|
||||
protected static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog( InvalidationCacheAccessDelegate.class );
|
||||
protected static final boolean TRACE_ENABLED = log.isTraceEnabled();
|
||||
protected final AdvancedCache cache;
|
||||
protected final BaseRegion region;
|
||||
protected final PutFromLoadValidator putValidator;
|
||||
protected final AdvancedCache<Object, Object> writeCache;
|
||||
|
||||
/**
|
||||
* Create a new transactional access delegate instance.
|
||||
*
|
||||
* @param region to control access to
|
||||
* @param validator put from load validator
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
protected InvalidationCacheAccessDelegate(BaseRegion region, PutFromLoadValidator validator) {
|
||||
this.region = region;
|
||||
this.cache = region.getCache();
|
||||
this.putValidator = validator;
|
||||
this.writeCache = Caches.ignoreReturnValuesCache( cache );
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to retrieve an object from the cache.
|
||||
*
|
||||
*
|
||||
* @param session
|
||||
* @param key The key of the item to be retrieved
|
||||
* @param txTimestamp a timestamp prior to the transaction start time
|
||||
* @return the cached object or <tt>null</tt>
|
||||
* @throws CacheException if the cache retrieval failed
|
||||
*/
|
||||
@Override
|
||||
@SuppressWarnings("UnusedParameters")
|
||||
public Object get(SharedSessionContractImplementor session, Object key, long txTimestamp) throws CacheException {
|
||||
if ( !region.checkValid() ) {
|
||||
return null;
|
||||
}
|
||||
final Object val = cache.get( key );
|
||||
if ( val == null ) {
|
||||
putValidator.registerPendingPut(session, key, txTimestamp );
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version) {
|
||||
return putFromLoad(session, key, value, txTimestamp, version, false );
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to cache an object, after loading from the database, explicitly
|
||||
* specifying the minimalPut behavior.
|
||||
*
|
||||
* @param session Current session
|
||||
* @param key The item key
|
||||
* @param value The item
|
||||
* @param txTimestamp a timestamp prior to the transaction start time
|
||||
* @param version the item version number
|
||||
* @param minimalPutOverride Explicit minimalPut flag
|
||||
* @return <tt>true</tt> if the object was successfully cached
|
||||
* @throws CacheException if storing the object failed
|
||||
*/
|
||||
@Override
|
||||
@SuppressWarnings("UnusedParameters")
|
||||
public boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version, boolean minimalPutOverride)
|
||||
throws CacheException {
|
||||
if ( !region.checkValid() ) {
|
||||
if ( TRACE_ENABLED ) {
|
||||
log.tracef( "Region %s not valid", region.getName() );
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// In theory, since putForExternalRead is already as minimal as it can
|
||||
// get, we shouldn't be need this check. However, without the check and
|
||||
// without https://issues.jboss.org/browse/ISPN-1986, it's impossible to
|
||||
// know whether the put actually occurred. Knowing this is crucial so
|
||||
// that Hibernate can expose accurate statistics.
|
||||
if ( minimalPutOverride && cache.containsKey( key ) ) {
|
||||
return false;
|
||||
}
|
||||
|
||||
PutFromLoadValidator.Lock lock = putValidator.acquirePutFromLoadLock(session, key, txTimestamp);
|
||||
if ( lock == null) {
|
||||
if ( TRACE_ENABLED ) {
|
||||
log.tracef( "Put from load lock not acquired for key %s", key );
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
writeCache.putForExternalRead( key, value );
|
||||
}
|
||||
finally {
|
||||
putValidator.releasePutFromLoadLock( key, lock);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(SharedSessionContractImplementor session, Object key) throws CacheException {
|
||||
putValidator.setCurrentSession(session);
|
||||
try {
|
||||
// We update whether or not the region is valid. Other nodes
|
||||
// may have already restored the region so they need to
|
||||
// be informed of the change.
|
||||
writeCache.remove(key);
|
||||
}
|
||||
finally {
|
||||
putValidator.resetCurrentSession();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAll() throws CacheException {
|
||||
try {
|
||||
if (!putValidator.beginInvalidatingRegion()) {
|
||||
log.failedInvalidateRegion(region.getName());
|
||||
}
|
||||
Caches.removeAll(cache);
|
||||
}
|
||||
finally {
|
||||
putValidator.endInvalidatingRegion();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void evict(Object key) throws CacheException {
|
||||
writeCache.remove( key );
|
||||
}
|
||||
|
||||
@Override
|
||||
public void evictAll() throws CacheException {
|
||||
try {
|
||||
if (!putValidator.beginInvalidatingRegion()) {
|
||||
log.failedInvalidateRegion(region.getName());
|
||||
}
|
||||
|
||||
// Invalidate the local region and then go remote
|
||||
region.invalidateRegion();
|
||||
Caches.broadcastEvictAll(cache);
|
||||
}
|
||||
finally {
|
||||
putValidator.endInvalidatingRegion();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unlockItem(SharedSessionContractImplementor session, Object key) throws CacheException {
|
||||
}
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import javax.transaction.Status;
|
||||
|
||||
/**
|
||||
* Synchronization that should release the locks after invalidation is complete.
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class InvalidationSynchronization implements javax.transaction.Synchronization {
|
||||
public final Object lockOwner;
|
||||
private final NonTxPutFromLoadInterceptor nonTxPutFromLoadInterceptor;
|
||||
private final Object key;
|
||||
|
||||
public InvalidationSynchronization(NonTxPutFromLoadInterceptor nonTxPutFromLoadInterceptor, Object key, Object lockOwner) {
|
||||
this.nonTxPutFromLoadInterceptor = nonTxPutFromLoadInterceptor;
|
||||
this.key = key;
|
||||
this.lockOwner = lockOwner;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeCompletion() {}
|
||||
|
||||
@Override
|
||||
public void afterCompletion(int status) {
|
||||
nonTxPutFromLoadInterceptor.endInvalidating(key, lockOwner, status == Status.STATUS_COMMITTED || status == Status.STATUS_COMMITTING);
|
||||
}
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import org.infinispan.commands.write.DataWriteCommand;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.context.InvocationContext;
|
||||
import org.infinispan.interceptors.locking.NonTransactionalLockingInterceptor;
|
||||
import org.infinispan.util.concurrent.TimeoutException;
|
||||
import org.infinispan.util.logging.Log;
|
||||
import org.infinispan.util.logging.LogFactory;
|
||||
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.CompletionException;
|
||||
|
||||
/**
|
||||
* With regular {@link org.infinispan.interceptors.locking.NonTransactionalLockingInterceptor},
|
||||
* async replication does not work in combination with synchronous replication: sync replication
|
||||
* relies on locking to order writes on backup while async replication relies on FIFO-ordering
|
||||
* from primary to backup. If these two combine, there's a possibility that on backup two modifications
|
||||
* modifications will proceed concurrently.
|
||||
* Similar issue threatens consistency when the command has {@link org.infinispan.context.Flag#CACHE_MODE_LOCAL}
|
||||
* - these commands don't acquire locks either.
|
||||
*
|
||||
* Therefore, this interceptor locks the entry all the time. {@link UnorderedDistributionInterceptor} does not forward
|
||||
* the message from non-origin to any other node, and the distribution interceptor won't block on RPC but will return
|
||||
* {@link CompletableFuture} and we'll wait for it here.
|
||||
*/
|
||||
public class LockingInterceptor extends NonTransactionalLockingInterceptor {
|
||||
private static final Log log = LogFactory.getLog(LockingInterceptor.class);
|
||||
private static final boolean trace = log.isTraceEnabled();
|
||||
|
||||
@Override
|
||||
protected Object visitDataWriteCommand(InvocationContext ctx, DataWriteCommand command) throws Throwable {
|
||||
Object returnValue = null;
|
||||
try {
|
||||
// Clear any metadata; we'll set them as appropriate in TombstoneCallInterceptor
|
||||
command.setMetadata(null);
|
||||
|
||||
lockAndRecord(ctx, command.getKey(), getLockTimeoutMillis(command));
|
||||
|
||||
returnValue = invokeNextInterceptor(ctx, command);
|
||||
return returnValue;
|
||||
}
|
||||
catch (TimeoutException e) {
|
||||
if (!ctx.isOriginLocal() && command.hasFlag(Flag.ZERO_LOCK_ACQUISITION_TIMEOUT)) {
|
||||
// FAIL_SILENTLY flag is not replicated to remote nodes and zero acquisition timeouts cause
|
||||
// very noisy logs.
|
||||
if (trace) {
|
||||
log.tracef("Silently ignoring exception", e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
finally {
|
||||
lockManager.unlockAll(ctx);
|
||||
if (returnValue instanceof CompletableFuture) {
|
||||
try {
|
||||
((CompletableFuture) returnValue).join();
|
||||
}
|
||||
catch (CompletionException e) {
|
||||
throw e.getCause();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,198 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import java.util.Comparator;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.hibernate.cache.infinispan.util.VersionedEntry;
|
||||
import org.hibernate.cache.spi.access.SoftLock;
|
||||
import org.hibernate.cache.spi.entry.CacheEntry;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
import org.hibernate.resource.transaction.spi.TransactionCoordinator;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.configuration.cache.Configuration;
|
||||
import org.infinispan.context.Flag;
|
||||
|
||||
/**
|
||||
* Access delegate that relaxes the consistency a bit: stale reads are prohibited only after the transaction
|
||||
* commits. This should also be able to work with async caches, and that would allow the replication delay
|
||||
* even after the commit.
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class NonStrictAccessDelegate implements AccessDelegate {
|
||||
private static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog( NonStrictAccessDelegate.class );
|
||||
private static final boolean trace = log.isTraceEnabled();
|
||||
|
||||
private final BaseTransactionalDataRegion region;
|
||||
private final AdvancedCache cache;
|
||||
private final AdvancedCache writeCache;
|
||||
private final AdvancedCache putFromLoadCache;
|
||||
private final Comparator versionComparator;
|
||||
|
||||
|
||||
public NonStrictAccessDelegate(BaseTransactionalDataRegion region) {
|
||||
this.region = region;
|
||||
this.cache = region.getCache();
|
||||
this.writeCache = Caches.ignoreReturnValuesCache(cache);
|
||||
// Note that correct behaviour of local and async writes depends on LockingInterceptor (see there for details)
|
||||
this.putFromLoadCache = writeCache.withFlags( Flag.ZERO_LOCK_ACQUISITION_TIMEOUT, Flag.FAIL_SILENTLY, Flag.FORCE_ASYNCHRONOUS );
|
||||
Configuration configuration = cache.getCacheConfiguration();
|
||||
if (configuration.clustering().cacheMode().isInvalidation()) {
|
||||
throw new IllegalArgumentException("Nonstrict-read-write mode cannot use invalidation.");
|
||||
}
|
||||
if (configuration.transaction().transactionMode().isTransactional()) {
|
||||
throw new IllegalArgumentException("Currently transactional caches are not supported.");
|
||||
}
|
||||
this.versionComparator = region.getCacheDataDescription().getVersionComparator();
|
||||
if (versionComparator == null) {
|
||||
throw new IllegalArgumentException("This strategy requires versioned entities/collections but region " + region.getName() + " contains non-versioned data!");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object get(SharedSessionContractImplementor session, Object key, long txTimestamp) throws CacheException {
|
||||
if (txTimestamp < region.getLastRegionInvalidation() ) {
|
||||
return null;
|
||||
}
|
||||
Object value = cache.get(key);
|
||||
if (value instanceof VersionedEntry) {
|
||||
return ((VersionedEntry) value).getValue();
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version) {
|
||||
return putFromLoad(session, key, value, txTimestamp, version, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version, boolean minimalPutOverride) throws CacheException {
|
||||
long lastRegionInvalidation = region.getLastRegionInvalidation();
|
||||
if (txTimestamp < lastRegionInvalidation) {
|
||||
log.tracef("putFromLoad not executed since tx started at %d, before last region invalidation finished = %d", txTimestamp, lastRegionInvalidation);
|
||||
return false;
|
||||
}
|
||||
assert version != null;
|
||||
|
||||
if (minimalPutOverride) {
|
||||
Object prev = cache.get(key);
|
||||
if (prev != null) {
|
||||
Object oldVersion = getVersion(prev);
|
||||
if (oldVersion != null) {
|
||||
if (versionComparator.compare(version, oldVersion) <= 0) {
|
||||
if (trace) {
|
||||
log.tracef("putFromLoad not executed since version(%s) <= oldVersion(%s)", version, oldVersion);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else if (prev instanceof VersionedEntry && txTimestamp <= ((VersionedEntry) prev).getTimestamp()) {
|
||||
if (trace) {
|
||||
log.tracef("putFromLoad not executed since tx started at %d and entry was invalidated at %d",
|
||||
txTimestamp, ((VersionedEntry) prev).getTimestamp());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
// we can't use putForExternalRead since the PFER flag means that entry is not wrapped into context
|
||||
// when it is present in the container. TombstoneCallInterceptor will deal with this.
|
||||
// Even if value is instanceof CacheEntry, we have to wrap it in VersionedEntry and add transaction timestamp.
|
||||
// Otherwise, old eviction record wouldn't be overwritten.
|
||||
putFromLoadCache.put(key, new VersionedEntry(value, version, txTimestamp));
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean insert(SharedSessionContractImplementor session, Object key, Object value, Object version) throws CacheException {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean update(SharedSessionContractImplementor session, Object key, Object value, Object currentVersion, Object previousVersion) throws CacheException {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(SharedSessionContractImplementor session, Object key) throws CacheException {
|
||||
// there's no 'afterRemove', so we have to use our own synchronization
|
||||
// the API does not provide version of removed item but we can't load it from the cache
|
||||
// as that would be prone to race conditions - if the entry was updated in the meantime
|
||||
// the remove could be discarded and we would end up with stale record
|
||||
// See VersionedTest#testCollectionUpdate for such situation
|
||||
TransactionCoordinator transactionCoordinator = session.getTransactionCoordinator();
|
||||
RemovalSynchronization sync = new RemovalSynchronization(transactionCoordinator, writeCache, false, region, key);
|
||||
transactionCoordinator.getLocalSynchronizations().registerSynchronization(sync);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAll() throws CacheException {
|
||||
region.beginInvalidation();
|
||||
try {
|
||||
Caches.broadcastEvictAll(cache);
|
||||
}
|
||||
finally {
|
||||
region.endInvalidation();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void evict(Object key) throws CacheException {
|
||||
writeCache.put(key, new VersionedEntry(null, null, region.nextTimestamp()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void evictAll() throws CacheException {
|
||||
region.beginInvalidation();
|
||||
try {
|
||||
Caches.broadcastEvictAll(cache);
|
||||
}
|
||||
finally {
|
||||
region.endInvalidation();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unlockItem(SharedSessionContractImplementor session, Object key) throws CacheException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean afterInsert(SharedSessionContractImplementor session, Object key, Object value, Object version) {
|
||||
writeCache.put(key, getVersioned(value, version, session.getTimestamp()));
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean afterUpdate(SharedSessionContractImplementor session, Object key, Object value, Object currentVersion, Object previousVersion, SoftLock lock) {
|
||||
writeCache.put(key, getVersioned(value, currentVersion, session.getTimestamp()));
|
||||
return true;
|
||||
}
|
||||
|
||||
protected Object getVersion(Object value) {
|
||||
if (value instanceof CacheEntry) {
|
||||
return ((CacheEntry) value).getVersion();
|
||||
}
|
||||
else if (value instanceof VersionedEntry) {
|
||||
return ((VersionedEntry) value).getVersion();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
protected Object getVersioned(Object value, Object version, long timestamp) {
|
||||
assert value != null;
|
||||
assert version != null;
|
||||
return new VersionedEntry(value, version, timestamp);
|
||||
}
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.impl.BaseRegion;
|
||||
import org.hibernate.cache.spi.access.SoftLock;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
|
||||
/**
|
||||
* Delegate for non-transactional caches
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class NonTxInvalidationCacheAccessDelegate extends InvalidationCacheAccessDelegate {
|
||||
public NonTxInvalidationCacheAccessDelegate(BaseRegion region, PutFromLoadValidator validator) {
|
||||
super(region, validator);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("UnusedParameters")
|
||||
public boolean insert(SharedSessionContractImplementor session, Object key, Object value, Object version) throws CacheException {
|
||||
if ( !region.checkValid() ) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We need to be invalidating even for regular writes; if we were not and the write was followed by eviction
|
||||
// (or any other invalidation), naked put that was started after the eviction ended but before this insert
|
||||
// ended could insert the stale entry into the cache (since the entry was removed by eviction).
|
||||
putValidator.setCurrentSession(session);
|
||||
try {
|
||||
// NonTxInvalidationInterceptor will call beginInvalidatingWithPFER and change this to a removal because
|
||||
// we must publish the new value only after invalidation ends.
|
||||
writeCache.put(key, value);
|
||||
}
|
||||
finally {
|
||||
putValidator.resetCurrentSession();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("UnusedParameters")
|
||||
public boolean update(SharedSessionContractImplementor session, Object key, Object value, Object currentVersion, Object previousVersion)
|
||||
throws CacheException {
|
||||
// We update whether or not the region is valid. Other nodes
|
||||
// may have already restored the region so they need to
|
||||
// be informed of the change.
|
||||
|
||||
// We need to be invalidating even for regular writes; if we were not and the write was followed by eviction
|
||||
// (or any other invalidation), naked put that was started after the eviction ended but before this update
|
||||
// ended could insert the stale entry into the cache (since the entry was removed by eviction).
|
||||
putValidator.setCurrentSession(session);
|
||||
try {
|
||||
// NonTxInvalidationInterceptor will call beginInvalidatingWithPFER and change this to a removal because
|
||||
// we must publish the new value only after invalidation ends.
|
||||
writeCache.put(key, value);
|
||||
}
|
||||
finally {
|
||||
putValidator.resetCurrentSession();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean afterInsert(SharedSessionContractImplementor session, Object key, Object value, Object version) {
|
||||
// endInvalidatingKeys is called from NonTxInvalidationInterceptor, from the synchronization callback
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean afterUpdate(SharedSessionContractImplementor session, Object key, Object value, Object currentVersion, Object previousVersion, SoftLock lock) {
|
||||
// endInvalidatingKeys is called from NonTxInvalidationInterceptor, from the synchronization callback
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAll() throws CacheException {
|
||||
try {
|
||||
if (!putValidator.beginInvalidatingRegion()) {
|
||||
log.failedInvalidateRegion(region.getName());
|
||||
}
|
||||
cache.clear();
|
||||
}
|
||||
finally {
|
||||
putValidator.endInvalidatingRegion();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,135 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import org.hibernate.cache.infinispan.util.CacheCommandInitializer;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.infinispan.commands.write.ClearCommand;
|
||||
import org.infinispan.commands.write.InvalidateCommand;
|
||||
import org.infinispan.commands.write.PutKeyValueCommand;
|
||||
import org.infinispan.commands.write.PutMapCommand;
|
||||
import org.infinispan.commands.write.RemoveCommand;
|
||||
import org.infinispan.commands.write.ReplaceCommand;
|
||||
import org.infinispan.commands.write.WriteCommand;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.context.InvocationContext;
|
||||
import org.infinispan.factories.annotations.Inject;
|
||||
import org.infinispan.interceptors.InvalidationInterceptor;
|
||||
import org.infinispan.jmx.annotations.MBean;
|
||||
import org.infinispan.util.concurrent.locks.RemoteLockCommand;
|
||||
|
||||
import java.util.Collections;
|
||||
|
||||
/**
|
||||
* This interceptor should completely replace default InvalidationInterceptor.
|
||||
* We need to send custom invalidation commands with transaction identifier (as the invalidation)
|
||||
* since we have to do a two-phase invalidation (releasing the locks as JTA synchronization),
|
||||
* although the cache itself is non-transactional.
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
* @author Mircea.Markus@jboss.com
|
||||
* @author Galder Zamarreño
|
||||
*/
|
||||
@MBean(objectName = "Invalidation", description = "Component responsible for invalidating entries on remote caches when entries are written to locally.")
|
||||
public class NonTxInvalidationInterceptor extends BaseInvalidationInterceptor {
|
||||
private final PutFromLoadValidator putFromLoadValidator;
|
||||
private CacheCommandInitializer commandInitializer;
|
||||
|
||||
private static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog(InvalidationInterceptor.class);
|
||||
|
||||
public NonTxInvalidationInterceptor(PutFromLoadValidator putFromLoadValidator) {
|
||||
this.putFromLoadValidator = putFromLoadValidator;
|
||||
}
|
||||
|
||||
@Inject
|
||||
public void injectDependencies(CacheCommandInitializer commandInitializer) {
|
||||
this.commandInitializer = commandInitializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
|
||||
if (command.hasFlag(Flag.PUT_FOR_EXTERNAL_READ)) {
|
||||
return invokeNextInterceptor(ctx, command);
|
||||
}
|
||||
else {
|
||||
boolean isTransactional = putFromLoadValidator.registerRemoteInvalidation(command.getKey(), command.getKeyLockOwner());
|
||||
if (!isTransactional) {
|
||||
throw new IllegalStateException("Put executed without transaction!");
|
||||
}
|
||||
if (!putFromLoadValidator.beginInvalidatingWithPFER(command.getKeyLockOwner(), command.getKey(), command.getValue())) {
|
||||
log.failedInvalidatePendingPut(command.getKey(), cacheName);
|
||||
}
|
||||
RemoveCommand removeCommand = commandsFactory.buildRemoveCommand(command.getKey(), null, command.getFlags());
|
||||
Object retval = invokeNextInterceptor(ctx, removeCommand);
|
||||
if (command.isSuccessful()) {
|
||||
invalidateAcrossCluster(command, isTransactional, command.getKey());
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command) throws Throwable {
|
||||
throw new UnsupportedOperationException("Unexpected replace");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) throws Throwable {
|
||||
boolean isTransactional = putFromLoadValidator.registerRemoteInvalidation(command.getKey(), command.getKeyLockOwner());
|
||||
if (isTransactional) {
|
||||
if (!putFromLoadValidator.beginInvalidatingKey(command.getKeyLockOwner(), command.getKey())) {
|
||||
log.failedInvalidatePendingPut(command.getKey(), cacheName);
|
||||
}
|
||||
}
|
||||
else {
|
||||
log.trace("This is an eviction, not invalidating anything");
|
||||
}
|
||||
Object retval = invokeNextInterceptor(ctx, command);
|
||||
if (command.isSuccessful()) {
|
||||
invalidateAcrossCluster(command, isTransactional, command.getKey());
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitClearCommand(InvocationContext ctx, ClearCommand command) throws Throwable {
|
||||
Object retval = invokeNextInterceptor(ctx, command);
|
||||
if (!isLocalModeForced(command)) {
|
||||
// just broadcast the clear command - this is simplest!
|
||||
if (ctx.isOriginLocal()) {
|
||||
rpcManager.invokeRemotely(getMembers(), command, isSynchronous(command) ? syncRpcOptions : asyncRpcOptions);
|
||||
}
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command) throws Throwable {
|
||||
throw new UnsupportedOperationException("Unexpected putAll");
|
||||
}
|
||||
|
||||
private <T extends WriteCommand & RemoteLockCommand> void invalidateAcrossCluster(T command, boolean isTransactional, Object key) throws Throwable {
|
||||
// increment invalidations counter if statistics maintained
|
||||
incrementInvalidations();
|
||||
InvalidateCommand invalidateCommand;
|
||||
if (!isLocalModeForced(command)) {
|
||||
if (isTransactional) {
|
||||
invalidateCommand = commandInitializer.buildBeginInvalidationCommand(
|
||||
Collections.emptySet(), new Object[] { key }, command.getKeyLockOwner());
|
||||
}
|
||||
else {
|
||||
invalidateCommand = commandsFactory.buildInvalidateCommand(Collections.emptySet(), new Object[] { key });
|
||||
}
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Cache [" + rpcManager.getAddress() + "] replicating " + invalidateCommand);
|
||||
}
|
||||
|
||||
rpcManager.invokeRemotely(getMembers(), invalidateCommand, isSynchronous(command) ? syncRpcOptions : asyncRpcOptions);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import org.hibernate.cache.infinispan.util.BeginInvalidationCommand;
|
||||
import org.hibernate.cache.infinispan.util.CacheCommandInitializer;
|
||||
import org.hibernate.cache.infinispan.util.EndInvalidationCommand;
|
||||
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.infinispan.commands.CommandsFactory;
|
||||
import org.infinispan.commands.write.InvalidateCommand;
|
||||
import org.infinispan.commands.write.PutKeyValueCommand;
|
||||
import org.infinispan.commands.write.RemoveCommand;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.context.InvocationContext;
|
||||
import org.infinispan.factories.annotations.Inject;
|
||||
import org.infinispan.factories.annotations.Start;
|
||||
import org.infinispan.interceptors.base.BaseCustomInterceptor;
|
||||
import org.infinispan.remoting.inboundhandler.DeliverOrder;
|
||||
import org.infinispan.remoting.rpc.ResponseMode;
|
||||
import org.infinispan.remoting.rpc.RpcManager;
|
||||
import org.infinispan.remoting.rpc.RpcOptions;
|
||||
import org.infinispan.remoting.transport.Address;
|
||||
import org.infinispan.statetransfer.StateTransferManager;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Non-transactional counterpart of {@link TxPutFromLoadInterceptor}.
|
||||
* Invokes {@link PutFromLoadValidator#beginInvalidatingKey(Object, Object)} for each invalidation from
|
||||
* remote node ({@link BeginInvalidationCommand} and sends {@link EndInvalidationCommand} after the transaction
|
||||
* is complete, with help of {@link InvalidationSynchronization};
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class NonTxPutFromLoadInterceptor extends BaseCustomInterceptor {
|
||||
private final static InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog(NonTxPutFromLoadInterceptor.class);
|
||||
private final String cacheName;
|
||||
private final PutFromLoadValidator putFromLoadValidator;
|
||||
private CacheCommandInitializer commandInitializer;
|
||||
private RpcManager rpcManager;
|
||||
private StateTransferManager stateTransferManager;
|
||||
private RpcOptions asyncUnordered;
|
||||
|
||||
public NonTxPutFromLoadInterceptor(PutFromLoadValidator putFromLoadValidator, String cacheName) {
|
||||
this.putFromLoadValidator = putFromLoadValidator;
|
||||
this.cacheName = cacheName;
|
||||
}
|
||||
|
||||
@Inject
|
||||
public void injectDependencies(CacheCommandInitializer commandInitializer, RpcManager rpcManager, StateTransferManager stateTransferManager) {
|
||||
this.commandInitializer = commandInitializer;
|
||||
this.rpcManager = rpcManager;
|
||||
this.stateTransferManager = stateTransferManager;
|
||||
}
|
||||
|
||||
@Start
|
||||
public void start() {
|
||||
asyncUnordered = rpcManager.getRpcOptionsBuilder(ResponseMode.ASYNCHRONOUS, DeliverOrder.NONE).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitInvalidateCommand(InvocationContext ctx, InvalidateCommand command) throws Throwable {
|
||||
if (!ctx.isOriginLocal() && command instanceof BeginInvalidationCommand) {
|
||||
for (Object key : command.getKeys()) {
|
||||
putFromLoadValidator.beginInvalidatingKey(((BeginInvalidationCommand) command).getLockOwner(), key);
|
||||
}
|
||||
}
|
||||
return invokeNextInterceptor(ctx, command);
|
||||
}
|
||||
|
||||
public void endInvalidating(Object key, Object lockOwner, boolean successful) {
|
||||
assert lockOwner != null;
|
||||
if (!putFromLoadValidator.endInvalidatingKey(lockOwner, key, successful)) {
|
||||
log.failedEndInvalidating(key, cacheName);
|
||||
}
|
||||
|
||||
EndInvalidationCommand endInvalidationCommand = commandInitializer.buildEndInvalidationCommand(
|
||||
cacheName, new Object[] { key }, lockOwner);
|
||||
List<Address> members = stateTransferManager.getCacheTopology().getMembers();
|
||||
rpcManager.invokeRemotely(members, endInvalidationCommand, asyncUnordered);
|
||||
}
|
||||
}
|
|
@ -1,972 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.util.CacheCommandInitializer;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
import org.hibernate.resource.transaction.spi.TransactionCoordinator;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.configuration.cache.CacheMode;
|
||||
import org.infinispan.configuration.cache.Configuration;
|
||||
import org.infinispan.configuration.cache.ConfigurationBuilder;
|
||||
import org.infinispan.interceptors.EntryWrappingInterceptor;
|
||||
import org.infinispan.interceptors.InvalidationInterceptor;
|
||||
import org.infinispan.interceptors.base.CommandInterceptor;
|
||||
import org.infinispan.manager.EmbeddedCacheManager;
|
||||
|
||||
/**
|
||||
* Encapsulates logic to allow a {@link InvalidationCacheAccessDelegate} to determine
|
||||
* whether a {@link InvalidationCacheAccessDelegate#putFromLoad(org.hibernate.engine.spi.SharedSessionContractImplementor, Object, Object, long, Object, boolean)}
|
||||
* call should be allowed to update the cache. A <code>putFromLoad</code> has
|
||||
* the potential to store stale data, since the data may have been removed from the
|
||||
* database and the cache between the time when the data was read from the database
|
||||
* and the actual call to <code>putFromLoad</code>.
|
||||
* <p>
|
||||
* The expected usage of this class by a thread that read the cache and did
|
||||
* not find data is:
|
||||
* <p/>
|
||||
* <ol>
|
||||
* <li> Call {@link #registerPendingPut(SharedSessionContractImplementor, Object, long)}</li>
|
||||
* <li> Read the database</li>
|
||||
* <li> Call {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long)}
|
||||
* <li> if above returns <code>null</code>, the thread should not cache the data;
|
||||
* only if above returns instance of <code>AcquiredLock</code>, put data in the cache and...</li>
|
||||
* <li> then call {@link #releasePutFromLoadLock(Object, Lock)}</li>
|
||||
* </ol>
|
||||
* </p>
|
||||
* <p/>
|
||||
* <p>
|
||||
* The expected usage by a thread that is taking an action such that any pending
|
||||
* <code>putFromLoad</code> may have stale data and should not cache it is to either
|
||||
* call
|
||||
* <p/>
|
||||
* <ul>
|
||||
* <li> {@link #beginInvalidatingKey(Object, Object)} (for a single key invalidation)</li>
|
||||
* <li>or {@link #beginInvalidatingRegion()} followed by {@link #endInvalidatingRegion()}
|
||||
* (for a general invalidation all pending puts)</li>
|
||||
* </ul>
|
||||
* After transaction commit (when the DB is updated) {@link #endInvalidatingKey(Object, Object)} should
|
||||
* be called in order to allow further attempts to cache entry.
|
||||
* </p>
|
||||
* <p/>
|
||||
* <p>
|
||||
* This class also supports the concept of "naked puts", which are calls to
|
||||
* {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long)} without a preceding {@link #registerPendingPut(SharedSessionContractImplementor, Object, long)}.
|
||||
* Besides not acquiring lock in {@link #registerPendingPut(SharedSessionContractImplementor, Object, long)} this can happen when collection
|
||||
* elements are loaded after the collection has not been found in the cache, where the elements
|
||||
* don't have their own table but can be listed as 'select ... from Element where collection_id = ...'.
|
||||
* Naked puts are handled according to txTimestamp obtained by calling {@link RegionFactory#nextTimestamp()}
|
||||
* before the transaction is started. The timestamp is compared with timestamp of last invalidation end time
|
||||
* and the write to the cache is denied if it is lower or equal.
|
||||
* </p>
|
||||
*
|
||||
* @author Brian Stansberry
|
||||
* @version $Revision: $
|
||||
*/
|
||||
public class PutFromLoadValidator {
|
||||
private static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog(PutFromLoadValidator.class);
|
||||
private static final boolean trace = log.isTraceEnabled();
|
||||
|
||||
/**
|
||||
* Period after which ongoing invalidation is removed. Value is retrieved from cache configuration.
|
||||
*/
|
||||
private final long expirationPeriod;
|
||||
|
||||
/**
|
||||
* Registry of expected, future, isPutValid calls. If a key+owner is registered in this map, it
|
||||
* is not a "naked put" and is allowed to proceed.
|
||||
*/
|
||||
private final ConcurrentMap<Object, PendingPutMap> pendingPuts;
|
||||
|
||||
/**
|
||||
* Main cache where the entities/collections are stored. This is not modified from within this class.
|
||||
*/
|
||||
private final AdvancedCache cache;
|
||||
|
||||
private final InfinispanRegionFactory regionFactory;
|
||||
|
||||
/**
|
||||
* Injected interceptor
|
||||
*/
|
||||
private NonTxPutFromLoadInterceptor nonTxPutFromLoadInterceptor;
|
||||
|
||||
/**
|
||||
* The time of the last call to {@link #endInvalidatingRegion()}. Puts from transactions started after
|
||||
* this timestamp are denied.
|
||||
*/
|
||||
private volatile long regionInvalidationTimestamp = Long.MIN_VALUE;
|
||||
|
||||
/**
|
||||
* Number of ongoing concurrent invalidations.
|
||||
*/
|
||||
private int regionInvalidations = 0;
|
||||
|
||||
/**
|
||||
* Allows propagation of current Session to callbacks invoked from interceptors
|
||||
*/
|
||||
private final ThreadLocal<SharedSessionContractImplementor> currentSession = new ThreadLocal<SharedSessionContractImplementor>();
|
||||
|
||||
/**
|
||||
* Creates a new put from load validator instance.
|
||||
*
|
||||
* @param cache Cache instance on which to store pending put information.
|
||||
*/
|
||||
public PutFromLoadValidator(AdvancedCache cache, InfinispanRegionFactory regionFactory) {
|
||||
this( cache, regionFactory, cache.getCacheManager());
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new put from load validator instance.
|
||||
* @param cache Cache instance on which to store pending put information.
|
||||
* @param regionFactory
|
||||
* @param cacheManager where to find a cache to store pending put information
|
||||
*/
|
||||
public PutFromLoadValidator(AdvancedCache cache, InfinispanRegionFactory regionFactory, EmbeddedCacheManager cacheManager) {
|
||||
this.regionFactory = regionFactory;
|
||||
Configuration cacheConfiguration = cache.getCacheConfiguration();
|
||||
Configuration pendingPutsConfiguration = regionFactory.getPendingPutsCacheConfiguration();
|
||||
ConfigurationBuilder configurationBuilder = new ConfigurationBuilder();
|
||||
configurationBuilder.read(pendingPutsConfiguration);
|
||||
configurationBuilder.dataContainer().keyEquivalence(cacheConfiguration.dataContainer().keyEquivalence());
|
||||
String pendingPutsName = cache.getName() + "-" + InfinispanRegionFactory.DEF_PENDING_PUTS_RESOURCE;
|
||||
cacheManager.defineConfiguration(pendingPutsName, configurationBuilder.build());
|
||||
|
||||
if (pendingPutsConfiguration.expiration() != null && pendingPutsConfiguration.expiration().maxIdle() > 0) {
|
||||
this.expirationPeriod = pendingPutsConfiguration.expiration().maxIdle();
|
||||
}
|
||||
else {
|
||||
throw log.pendingPutsMustHaveMaxIdle();
|
||||
}
|
||||
CacheMode cacheMode = cache.getCacheConfiguration().clustering().cacheMode();
|
||||
// Since we need to intercept both invalidations of entries that are in the cache and those
|
||||
// that are not, we need to use custom interceptor, not listeners (which fire only for present entries).
|
||||
NonTxPutFromLoadInterceptor nonTxPutFromLoadInterceptor = null;
|
||||
if (cacheMode.isClustered()) {
|
||||
if (!cacheMode.isInvalidation()) {
|
||||
throw new IllegalArgumentException("PutFromLoadValidator in clustered caches requires invalidation mode.");
|
||||
}
|
||||
addToCache(cache, this);
|
||||
}
|
||||
|
||||
this.cache = cache;
|
||||
this.pendingPuts = cacheManager.getCache(pendingPutsName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Besides the call from constructor, this should be called only from tests when mocking the validator.
|
||||
*/
|
||||
public static void addToCache(AdvancedCache cache, PutFromLoadValidator validator) {
|
||||
List<CommandInterceptor> interceptorChain = cache.getInterceptorChain();
|
||||
log.debug("Interceptor chain was: " + interceptorChain);
|
||||
int position = 0;
|
||||
// add interceptor before uses exact match, not instanceof match
|
||||
int invalidationPosition = 0;
|
||||
int entryWrappingPosition = 0;
|
||||
for (CommandInterceptor ci : interceptorChain) {
|
||||
if (ci instanceof InvalidationInterceptor) {
|
||||
invalidationPosition = position;
|
||||
}
|
||||
if (ci instanceof EntryWrappingInterceptor) {
|
||||
entryWrappingPosition = position;
|
||||
}
|
||||
position++;
|
||||
}
|
||||
boolean transactional = cache.getCacheConfiguration().transaction().transactionMode().isTransactional();
|
||||
if (transactional) {
|
||||
cache.removeInterceptor(invalidationPosition);
|
||||
TxInvalidationInterceptor txInvalidationInterceptor = new TxInvalidationInterceptor();
|
||||
cache.getComponentRegistry().registerComponent(txInvalidationInterceptor, TxInvalidationInterceptor.class);
|
||||
cache.addInterceptor(txInvalidationInterceptor, invalidationPosition);
|
||||
|
||||
// Note that invalidation does *NOT* acquire locks; therefore, we have to start invalidating before
|
||||
// wrapping the entry, since if putFromLoad was invoked between wrap and beginInvalidatingKey, the invalidation
|
||||
// would not commit the entry removal (as during wrap the entry was not in cache)
|
||||
TxPutFromLoadInterceptor txPutFromLoadInterceptor = new TxPutFromLoadInterceptor(validator, cache.getName());
|
||||
cache.getComponentRegistry().registerComponent(txPutFromLoadInterceptor, TxPutFromLoadInterceptor.class);
|
||||
cache.addInterceptor(txPutFromLoadInterceptor, entryWrappingPosition);
|
||||
}
|
||||
else {
|
||||
cache.removeInterceptor(invalidationPosition);
|
||||
NonTxInvalidationInterceptor nonTxInvalidationInterceptor = new NonTxInvalidationInterceptor(validator);
|
||||
cache.getComponentRegistry().registerComponent(nonTxInvalidationInterceptor, NonTxInvalidationInterceptor.class);
|
||||
cache.addInterceptor(nonTxInvalidationInterceptor, invalidationPosition);
|
||||
|
||||
NonTxPutFromLoadInterceptor nonTxPutFromLoadInterceptor = new NonTxPutFromLoadInterceptor(validator, cache.getName());
|
||||
cache.getComponentRegistry().registerComponent(nonTxPutFromLoadInterceptor, NonTxPutFromLoadInterceptor.class);
|
||||
cache.addInterceptor(nonTxPutFromLoadInterceptor, entryWrappingPosition);
|
||||
validator.nonTxPutFromLoadInterceptor = nonTxPutFromLoadInterceptor;
|
||||
}
|
||||
log.debug("New interceptor chain is: " + cache.getInterceptorChain());
|
||||
|
||||
CacheCommandInitializer cacheCommandInitializer = cache.getComponentRegistry().getComponent(CacheCommandInitializer.class);
|
||||
cacheCommandInitializer.addPutFromLoadValidator(cache.getName(), validator);
|
||||
}
|
||||
|
||||
/**
|
||||
* This methods should be called only from tests; it removes existing validator from the cache structures
|
||||
* in order to replace it with new one.
|
||||
*
|
||||
* @param cache
|
||||
*/
|
||||
public static PutFromLoadValidator removeFromCache(AdvancedCache cache) {
|
||||
cache.removeInterceptor(TxPutFromLoadInterceptor.class);
|
||||
cache.removeInterceptor(NonTxPutFromLoadInterceptor.class);
|
||||
for (Object i : cache.getInterceptorChain()) {
|
||||
if (i instanceof NonTxInvalidationInterceptor) {
|
||||
InvalidationInterceptor invalidationInterceptor = new InvalidationInterceptor();
|
||||
cache.getComponentRegistry().registerComponent(invalidationInterceptor, InvalidationInterceptor.class);
|
||||
cache.addInterceptorBefore(invalidationInterceptor, NonTxInvalidationInterceptor.class);
|
||||
cache.removeInterceptor(NonTxInvalidationInterceptor.class);
|
||||
break;
|
||||
}
|
||||
else if (i instanceof TxInvalidationInterceptor) {
|
||||
InvalidationInterceptor invalidationInterceptor = new InvalidationInterceptor();
|
||||
cache.getComponentRegistry().registerComponent(invalidationInterceptor, InvalidationInterceptor.class);
|
||||
cache.addInterceptorBefore(invalidationInterceptor, TxInvalidationInterceptor.class);
|
||||
cache.removeInterceptor(TxInvalidationInterceptor.class);
|
||||
break;
|
||||
}
|
||||
}
|
||||
CacheCommandInitializer cci = cache.getComponentRegistry().getComponent(CacheCommandInitializer.class);
|
||||
return cci.removePutFromLoadValidator(cache.getName());
|
||||
}
|
||||
|
||||
public void setCurrentSession(SharedSessionContractImplementor session) {
|
||||
currentSession.set(session);
|
||||
}
|
||||
|
||||
public void resetCurrentSession() {
|
||||
currentSession.remove();
|
||||
}
|
||||
|
||||
/**
|
||||
* Marker for lock acquired in {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long)}
|
||||
*/
|
||||
public static abstract class Lock {
|
||||
private Lock() {}
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquire a lock giving the calling thread the right to put data in the
|
||||
* cache for the given key.
|
||||
* <p>
|
||||
* <strong>NOTE:</strong> A call to this method that returns <code>true</code>
|
||||
* should always be matched with a call to {@link #releasePutFromLoadLock(Object, Lock)}.
|
||||
* </p>
|
||||
*
|
||||
* @param session
|
||||
* @param key the key
|
||||
*
|
||||
* @param txTimestamp
|
||||
* @return <code>AcquiredLock</code> if the lock is acquired and the cache put
|
||||
* can proceed; <code>null</code> if the data should not be cached
|
||||
*/
|
||||
public Lock acquirePutFromLoadLock(SharedSessionContractImplementor session, Object key, long txTimestamp) {
|
||||
if (trace) {
|
||||
log.tracef("acquirePutFromLoadLock(%s#%s, %d)", cache.getName(), key, txTimestamp);
|
||||
}
|
||||
boolean locked = false;
|
||||
|
||||
PendingPutMap pending = pendingPuts.get( key );
|
||||
for (;;) {
|
||||
try {
|
||||
if (pending != null) {
|
||||
locked = pending.acquireLock(100, TimeUnit.MILLISECONDS);
|
||||
if (locked) {
|
||||
boolean valid = false;
|
||||
try {
|
||||
if (pending.isRemoved()) {
|
||||
// this deals with a race between retrieving the map from cache vs. removing that
|
||||
// and locking the map
|
||||
pending.releaseLock();
|
||||
locked = false;
|
||||
pending = null;
|
||||
if (trace) {
|
||||
log.tracef("Record removed when waiting for the lock.");
|
||||
}
|
||||
continue;
|
||||
}
|
||||
final PendingPut toCancel = pending.remove(session);
|
||||
if (toCancel != null) {
|
||||
valid = !toCancel.completed;
|
||||
toCancel.completed = true;
|
||||
}
|
||||
else {
|
||||
// this is a naked put
|
||||
if (pending.hasInvalidator()) {
|
||||
valid = false;
|
||||
}
|
||||
// we need this check since registerPendingPut (creating new pp) can get between invalidation
|
||||
// and naked put caused by the invalidation
|
||||
else if (pending.lastInvalidationEnd != Long.MIN_VALUE) {
|
||||
// if this transaction started after last invalidation we can continue
|
||||
valid = txTimestamp > pending.lastInvalidationEnd;
|
||||
}
|
||||
else {
|
||||
valid = txTimestamp > regionInvalidationTimestamp;
|
||||
}
|
||||
}
|
||||
return valid ? pending : null;
|
||||
}
|
||||
finally {
|
||||
if (!valid && pending != null) {
|
||||
pending.releaseLock();
|
||||
locked = false;
|
||||
}
|
||||
if (trace) {
|
||||
log.tracef("acquirePutFromLoadLock(%s#%s, %d) ended with %s, valid: %s", cache.getName(), key, txTimestamp, pending, valid);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (trace) {
|
||||
log.tracef("acquirePutFromLoadLock(%s#%s, %d) failed to lock", cache.getName(), key, txTimestamp);
|
||||
}
|
||||
// oops, we have leaked record for this owner, but we don't want to wait here
|
||||
return null;
|
||||
}
|
||||
}
|
||||
else {
|
||||
long regionInvalidationTimestamp = this.regionInvalidationTimestamp;
|
||||
if (txTimestamp <= regionInvalidationTimestamp) {
|
||||
if (trace) {
|
||||
log.tracef("acquirePutFromLoadLock(%s#%s, %d) failed due to region invalidated at %d", cache.getName(), key, txTimestamp, regionInvalidationTimestamp);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
else {
|
||||
if (trace) {
|
||||
log.tracef("Region invalidated at %d, this transaction started at %d", regionInvalidationTimestamp, txTimestamp);
|
||||
}
|
||||
}
|
||||
|
||||
PendingPut pendingPut = new PendingPut(session);
|
||||
pending = new PendingPutMap(pendingPut);
|
||||
PendingPutMap existing = pendingPuts.putIfAbsent(key, pending);
|
||||
if (existing != null) {
|
||||
pending = existing;
|
||||
}
|
||||
// continue in next loop with lock acquisition
|
||||
}
|
||||
}
|
||||
catch (Throwable t) {
|
||||
if (locked) {
|
||||
pending.releaseLock();
|
||||
}
|
||||
|
||||
if (t instanceof RuntimeException) {
|
||||
throw (RuntimeException) t;
|
||||
}
|
||||
else if (t instanceof Error) {
|
||||
throw (Error) t;
|
||||
}
|
||||
else {
|
||||
throw new RuntimeException(t);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases the lock previously obtained by a call to
|
||||
* {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long)}.
|
||||
*
|
||||
* @param key the key
|
||||
*/
|
||||
public void releasePutFromLoadLock(Object key, Lock lock) {
|
||||
if (trace) {
|
||||
log.tracef("releasePutFromLoadLock(%s#%s, %s)", cache.getName(), key, lock);
|
||||
}
|
||||
final PendingPutMap pending = (PendingPutMap) lock;
|
||||
if ( pending != null ) {
|
||||
if ( pending.canRemove() ) {
|
||||
pending.setRemoved();
|
||||
pendingPuts.remove( key, pending );
|
||||
}
|
||||
pending.releaseLock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Invalidates all {@link #registerPendingPut(SharedSessionContractImplementor, Object, long) previously registered pending puts} ensuring a subsequent call to
|
||||
* {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long)} will return <code>false</code>. <p> This method will block until any
|
||||
* concurrent thread that has {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long) acquired the putFromLoad lock} for the any key has
|
||||
* released the lock. This allows the caller to be certain the putFromLoad will not execute after this method returns,
|
||||
* possibly caching stale data. </p>
|
||||
*
|
||||
* @return <code>true</code> if the invalidation was successful; <code>false</code> if a problem occurred (which the
|
||||
* caller should treat as an exception condition)
|
||||
*/
|
||||
public boolean beginInvalidatingRegion() {
|
||||
if (trace) {
|
||||
log.trace("Started invalidating region " + cache.getName());
|
||||
}
|
||||
boolean ok = true;
|
||||
long now = regionFactory.nextTimestamp();
|
||||
// deny all puts until endInvalidatingRegion is called; at that time the region should be already
|
||||
// in INVALID state, therefore all new requests should be blocked and ongoing should fail by timestamp
|
||||
synchronized (this) {
|
||||
regionInvalidationTimestamp = Long.MAX_VALUE;
|
||||
regionInvalidations++;
|
||||
}
|
||||
|
||||
try {
|
||||
// Acquire the lock for each entry to ensure any ongoing
|
||||
// work associated with it is completed before we return
|
||||
// We cannot erase the map: if there was ongoing invalidation and we removed it, registerPendingPut
|
||||
// started after that would have no way of finding out that the entity *is* invalidated (it was
|
||||
// removed from the cache and now the DB is about to be updated).
|
||||
for (Iterator<PendingPutMap> it = pendingPuts.values().iterator(); it.hasNext(); ) {
|
||||
PendingPutMap entry = it.next();
|
||||
if (entry.acquireLock(60, TimeUnit.SECONDS)) {
|
||||
try {
|
||||
entry.invalidate(now);
|
||||
}
|
||||
finally {
|
||||
entry.releaseLock();
|
||||
}
|
||||
}
|
||||
else {
|
||||
ok = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
ok = false;
|
||||
}
|
||||
return ok;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when the region invalidation is finished.
|
||||
*/
|
||||
public void endInvalidatingRegion() {
|
||||
synchronized (this) {
|
||||
if (--regionInvalidations == 0) {
|
||||
regionInvalidationTimestamp = regionFactory.nextTimestamp();
|
||||
if (trace) {
|
||||
log.tracef("Finished invalidating region %s at %d", cache.getName(), regionInvalidationTimestamp);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (trace) {
|
||||
log.tracef("Finished invalidating region %s, but there are %d ongoing invalidations", cache.getName(), regionInvalidations);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Notifies this validator that it is expected that a database read followed by a subsequent {@link
|
||||
* #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long)} call will occur. The intent is this method would be called following a cache miss
|
||||
* wherein it is expected that a database read plus cache put will occur. Calling this method allows the validator to
|
||||
* treat the subsequent <code>acquirePutFromLoadLock</code> as if the database read occurred when this method was
|
||||
* invoked. This allows the validator to compare the timestamp of this call against the timestamp of subsequent removal
|
||||
* notifications.
|
||||
*
|
||||
* @param session
|
||||
* @param key key that will be used for subsequent cache put
|
||||
* @param txTimestamp
|
||||
*/
|
||||
public void registerPendingPut(SharedSessionContractImplementor session, Object key, long txTimestamp) {
|
||||
long invalidationTimestamp = this.regionInvalidationTimestamp;
|
||||
if (txTimestamp <= invalidationTimestamp) {
|
||||
if (trace) {
|
||||
log.tracef("registerPendingPut(%s#%s, %d) skipped due to region invalidation (%d)", cache.getName(), key, txTimestamp, invalidationTimestamp);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
final PendingPut pendingPut = new PendingPut( session );
|
||||
final PendingPutMap pendingForKey = new PendingPutMap( pendingPut );
|
||||
|
||||
for (;;) {
|
||||
final PendingPutMap existing = pendingPuts.putIfAbsent(key, pendingForKey);
|
||||
if (existing != null) {
|
||||
if (existing.acquireLock(10, TimeUnit.SECONDS)) {
|
||||
try {
|
||||
if (existing.isRemoved()) {
|
||||
if (trace) {
|
||||
log.tracef("Record removed when waiting for the lock.");
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (!existing.hasInvalidator()) {
|
||||
existing.put(pendingPut);
|
||||
}
|
||||
}
|
||||
finally {
|
||||
existing.releaseLock();
|
||||
}
|
||||
if (trace) {
|
||||
log.tracef("registerPendingPut(%s#%s, %d) ended with %s", cache.getName(), key, txTimestamp, existing);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (trace) {
|
||||
log.tracef("registerPendingPut(%s#%s, %d) failed to acquire lock", cache.getName(), key, txTimestamp);
|
||||
}
|
||||
// Can't get the lock; when we come back we'll be a "naked put"
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (trace) {
|
||||
log.tracef("registerPendingPut(%s#%s, %d) registered using putIfAbsent: %s", cache.getName(), key, txTimestamp, pendingForKey);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Invalidates any {@link #registerPendingPut(SharedSessionContractImplementor, Object, long) previously registered pending puts}
|
||||
* and disables further registrations ensuring a subsequent call to {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long)}
|
||||
* will return <code>false</code>. <p> This method will block until any concurrent thread that has
|
||||
* {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long) acquired the putFromLoad lock} for the given key
|
||||
* has released the lock. This allows the caller to be certain the putFromLoad will not execute after this method
|
||||
* returns, possibly caching stale data. </p>
|
||||
* After this transaction completes, {@link #endInvalidatingKey(Object, Object)} needs to be called }
|
||||
*
|
||||
* @param key key identifying data whose pending puts should be invalidated
|
||||
*
|
||||
* @return <code>true</code> if the invalidation was successful; <code>false</code> if a problem occurred (which the
|
||||
* caller should treat as an exception condition)
|
||||
*/
|
||||
public boolean beginInvalidatingKey(Object lockOwner, Object key) {
|
||||
return beginInvalidatingWithPFER(lockOwner, key, null);
|
||||
}
|
||||
|
||||
public boolean beginInvalidatingWithPFER(Object lockOwner, Object key, Object valueForPFER) {
|
||||
for (;;) {
|
||||
PendingPutMap pending = new PendingPutMap(null);
|
||||
PendingPutMap prev = pendingPuts.putIfAbsent(key, pending);
|
||||
if (prev != null) {
|
||||
pending = prev;
|
||||
}
|
||||
if (pending.acquireLock(60, TimeUnit.SECONDS)) {
|
||||
try {
|
||||
if (pending.isRemoved()) {
|
||||
if (trace) {
|
||||
log.tracef("Record removed when waiting for the lock.");
|
||||
}
|
||||
continue;
|
||||
}
|
||||
long now = regionFactory.nextTimestamp();
|
||||
pending.invalidate(now);
|
||||
pending.addInvalidator(lockOwner, valueForPFER, now);
|
||||
}
|
||||
finally {
|
||||
pending.releaseLock();
|
||||
}
|
||||
if (trace) {
|
||||
log.tracef("beginInvalidatingKey(%s#%s, %s) ends with %s", cache.getName(), key, lockOwnerToString(lockOwner), pending);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
log.tracef("beginInvalidatingKey(%s#%s, %s) failed to acquire lock", cache.getName(), key, lockOwnerToString(lockOwner));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public boolean endInvalidatingKey(Object lockOwner, Object key) {
|
||||
return endInvalidatingKey(lockOwner, key, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called after the transaction completes, allowing caching of entries. It is possible that this method
|
||||
* is called without previous invocation of {@link #beginInvalidatingKey(Object, Object)}, then it should be a no-op.
|
||||
*
|
||||
* @param lockOwner owner of the invalidation - transaction or thread
|
||||
* @param key
|
||||
* @return
|
||||
*/
|
||||
public boolean endInvalidatingKey(Object lockOwner, Object key, boolean doPFER) {
|
||||
PendingPutMap pending = pendingPuts.get(key);
|
||||
if (pending == null) {
|
||||
if (trace) {
|
||||
log.tracef("endInvalidatingKey(%s#%s, %s) could not find pending puts", cache.getName(), key, lockOwnerToString(lockOwner));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
if (pending.acquireLock(60, TimeUnit.SECONDS)) {
|
||||
try {
|
||||
long now = regionFactory.nextTimestamp();
|
||||
pending.removeInvalidator(lockOwner, key, now, doPFER);
|
||||
// we can't remove the pending put yet because we wait for naked puts
|
||||
// pendingPuts should be configured with maxIdle time so won't have memory leak
|
||||
return true;
|
||||
}
|
||||
finally {
|
||||
pending.releaseLock();
|
||||
if (trace) {
|
||||
log.tracef("endInvalidatingKey(%s#%s, %s) ends with %s", cache.getName(), key, lockOwnerToString(lockOwner), pending);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (trace) {
|
||||
log.tracef("endInvalidatingKey(%s#%s, %s) failed to acquire lock", cache.getName(), key, lockOwnerToString(lockOwner));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean registerRemoteInvalidation(Object key, Object lockOwner) {
|
||||
SharedSessionContractImplementor session = currentSession.get();
|
||||
TransactionCoordinator transactionCoordinator = session == null ? null : session.getTransactionCoordinator();
|
||||
if (transactionCoordinator != null) {
|
||||
if (trace) {
|
||||
log.tracef("Registering synchronization on transaction in %s, cache %s: %s", lockOwnerToString(session), cache.getName(), key);
|
||||
}
|
||||
InvalidationSynchronization sync = new InvalidationSynchronization(nonTxPutFromLoadInterceptor, key, lockOwner);
|
||||
transactionCoordinator.getLocalSynchronizations().registerSynchronization(sync);
|
||||
return true;
|
||||
}
|
||||
// evict() command is not executed in session context
|
||||
return false;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------- Private
|
||||
|
||||
// we can't use SessionImpl.toString() concurrently
|
||||
private static String lockOwnerToString(Object lockOwner) {
|
||||
return lockOwner instanceof SharedSessionContractImplementor ? "Session#" + lockOwner.hashCode() : lockOwner.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Lazy-initialization map for PendingPut. Optimized for the expected usual case where only a
|
||||
* single put is pending for a given key.
|
||||
* <p/>
|
||||
* This class is NOT THREAD SAFE. All operations on it must be performed with the lock held.
|
||||
*/
|
||||
private class PendingPutMap extends Lock {
|
||||
// Number of pending puts which trigger garbage collection
|
||||
private static final int GC_THRESHOLD = 10;
|
||||
private PendingPut singlePendingPut;
|
||||
private Map<Object, PendingPut> fullMap;
|
||||
private final java.util.concurrent.locks.Lock lock = new ReentrantLock();
|
||||
private Invalidator singleInvalidator;
|
||||
private Map<Object, Invalidator> invalidators;
|
||||
private long lastInvalidationEnd = Long.MIN_VALUE;
|
||||
private boolean removed = false;
|
||||
|
||||
PendingPutMap(PendingPut singleItem) {
|
||||
this.singlePendingPut = singleItem;
|
||||
}
|
||||
|
||||
// toString should be called only for debugging purposes
|
||||
public String toString() {
|
||||
if (lock.tryLock()) {
|
||||
try {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("{ PendingPuts=");
|
||||
if (singlePendingPut == null) {
|
||||
if (fullMap == null) {
|
||||
sb.append("[]");
|
||||
}
|
||||
else {
|
||||
sb.append(fullMap.values());
|
||||
}
|
||||
}
|
||||
else {
|
||||
sb.append('[').append(singlePendingPut).append(']');
|
||||
}
|
||||
sb.append(", Invalidators=");
|
||||
if (singleInvalidator == null) {
|
||||
if (invalidators == null) {
|
||||
sb.append("[]");
|
||||
}
|
||||
else {
|
||||
sb.append(invalidators.values());
|
||||
}
|
||||
}
|
||||
else {
|
||||
sb.append('[').append(singleInvalidator).append(']');
|
||||
}
|
||||
sb.append(", LastInvalidationEnd=");
|
||||
if (lastInvalidationEnd == Long.MIN_VALUE) {
|
||||
sb.append("<none>");
|
||||
}
|
||||
else {
|
||||
sb.append(lastInvalidationEnd);
|
||||
}
|
||||
return sb.append(", Removed=").append(removed).append("}").toString();
|
||||
}
|
||||
finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
else {
|
||||
return "PendingPutMap: <locked>";
|
||||
}
|
||||
}
|
||||
|
||||
public void put(PendingPut pendingPut) {
|
||||
if ( singlePendingPut == null ) {
|
||||
if ( fullMap == null ) {
|
||||
// initial put
|
||||
singlePendingPut = pendingPut;
|
||||
}
|
||||
else {
|
||||
fullMap.put( pendingPut.owner, pendingPut );
|
||||
if (fullMap.size() >= GC_THRESHOLD) {
|
||||
gc();
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// 2nd put; need a map
|
||||
fullMap = new HashMap<Object, PendingPut>( 4 );
|
||||
fullMap.put( singlePendingPut.owner, singlePendingPut );
|
||||
singlePendingPut = null;
|
||||
fullMap.put( pendingPut.owner, pendingPut );
|
||||
}
|
||||
}
|
||||
|
||||
public PendingPut remove(Object ownerForPut) {
|
||||
PendingPut removed = null;
|
||||
if ( fullMap == null ) {
|
||||
if ( singlePendingPut != null
|
||||
&& singlePendingPut.owner.equals( ownerForPut ) ) {
|
||||
removed = singlePendingPut;
|
||||
singlePendingPut = null;
|
||||
}
|
||||
}
|
||||
else {
|
||||
removed = fullMap.remove( ownerForPut );
|
||||
}
|
||||
return removed;
|
||||
}
|
||||
|
||||
public int size() {
|
||||
return fullMap == null ? (singlePendingPut == null ? 0 : 1)
|
||||
: fullMap.size();
|
||||
}
|
||||
|
||||
public boolean acquireLock(long time, TimeUnit unit) {
|
||||
try {
|
||||
return lock.tryLock( time, unit );
|
||||
}
|
||||
catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public void releaseLock() {
|
||||
lock.unlock();
|
||||
}
|
||||
|
||||
public void invalidate(long now) {
|
||||
if ( singlePendingPut != null ) {
|
||||
if (singlePendingPut.invalidate(now, expirationPeriod)) {
|
||||
singlePendingPut = null;
|
||||
}
|
||||
}
|
||||
else if ( fullMap != null ) {
|
||||
for ( Iterator<PendingPut> it = fullMap.values().iterator(); it.hasNext(); ) {
|
||||
PendingPut pp = it.next();
|
||||
if (pp.invalidate(now, expirationPeriod)) {
|
||||
it.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Running {@link #gc()} is important when the key is regularly queried but it is not
|
||||
* present in DB. In such case, the putFromLoad would not be called at all and we would
|
||||
* leak pending puts. Cache expiration should handle the case when the pending puts
|
||||
* are not accessed frequently; when these are accessed, we have to do the housekeeping
|
||||
* internally to prevent unlimited growth of the map.
|
||||
* The pending puts will get their timestamps when the map reaches {@link #GC_THRESHOLD}
|
||||
* entries; after expiration period these will be removed completely either through
|
||||
* invalidation or when we try to register next pending put.
|
||||
*/
|
||||
private void gc() {
|
||||
assert fullMap != null;
|
||||
long now = regionFactory.nextTimestamp();
|
||||
log.tracef("Contains %d, doing GC at %d, expiration %d", size(), now, expirationPeriod);
|
||||
for ( Iterator<PendingPut> it = fullMap.values().iterator(); it.hasNext(); ) {
|
||||
PendingPut pp = it.next();
|
||||
if (pp.gc(now, expirationPeriod)) {
|
||||
it.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void addInvalidator(Object owner, Object valueForPFER, long now) {
|
||||
assert owner != null;
|
||||
if (invalidators == null) {
|
||||
if (singleInvalidator == null) {
|
||||
singleInvalidator = new Invalidator(owner, now, valueForPFER);
|
||||
put(new PendingPut(owner));
|
||||
}
|
||||
else {
|
||||
if (singleInvalidator.registeredTimestamp + expirationPeriod < now) {
|
||||
// override leaked invalidator
|
||||
singleInvalidator = new Invalidator(owner, now, valueForPFER);
|
||||
put(new PendingPut(owner));
|
||||
}
|
||||
invalidators = new HashMap<Object, Invalidator>();
|
||||
invalidators.put(singleInvalidator.owner, singleInvalidator);
|
||||
// with multiple invalidations the PFER must not be executed
|
||||
invalidators.put(owner, new Invalidator(owner, now, null));
|
||||
singleInvalidator = null;
|
||||
}
|
||||
}
|
||||
else {
|
||||
long allowedRegistration = now - expirationPeriod;
|
||||
// remove leaked invalidators
|
||||
for (Iterator<Invalidator> it = invalidators.values().iterator(); it.hasNext(); ) {
|
||||
if (it.next().registeredTimestamp < allowedRegistration) {
|
||||
it.remove();
|
||||
}
|
||||
}
|
||||
// With multiple invalidations in parallel we don't know the order in which
|
||||
// the writes were applied into DB and therefore we can't update the cache
|
||||
// with the most recent value.
|
||||
if (invalidators.isEmpty()) {
|
||||
put(new PendingPut(owner));
|
||||
}
|
||||
else {
|
||||
valueForPFER = null;
|
||||
}
|
||||
invalidators.put(owner, new Invalidator(owner, now, valueForPFER));
|
||||
}
|
||||
}
|
||||
|
||||
public boolean hasInvalidator() {
|
||||
return singleInvalidator != null || (invalidators != null && !invalidators.isEmpty());
|
||||
}
|
||||
|
||||
// Debug introspection method, do not use in production code!
|
||||
public Collection<Invalidator> getInvalidators() {
|
||||
lock.lock();
|
||||
try {
|
||||
if (singleInvalidator != null) {
|
||||
return Collections.singleton(singleInvalidator);
|
||||
}
|
||||
else if (invalidators != null) {
|
||||
return new ArrayList<Invalidator>(invalidators.values());
|
||||
}
|
||||
else {
|
||||
return Collections.EMPTY_LIST;
|
||||
}
|
||||
}
|
||||
finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
public void removeInvalidator(Object owner, Object key, long now, boolean doPFER) {
|
||||
if (invalidators == null) {
|
||||
if (singleInvalidator != null && singleInvalidator.owner.equals(owner)) {
|
||||
pferValueIfNeeded(owner, key, singleInvalidator.valueForPFER, doPFER);
|
||||
singleInvalidator = null;
|
||||
}
|
||||
}
|
||||
else {
|
||||
Invalidator invalidator = invalidators.remove(owner);
|
||||
if (invalidator != null) {
|
||||
pferValueIfNeeded(owner, key, invalidator.valueForPFER, doPFER);
|
||||
}
|
||||
}
|
||||
lastInvalidationEnd = Math.max(lastInvalidationEnd, now);
|
||||
}
|
||||
|
||||
private void pferValueIfNeeded(Object owner, Object key, Object valueForPFER, boolean doPFER) {
|
||||
if (valueForPFER != null) {
|
||||
PendingPut pendingPut = remove(owner);
|
||||
if (doPFER && pendingPut != null && !pendingPut.completed) {
|
||||
cache.putForExternalRead(key, valueForPFER);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public boolean canRemove() {
|
||||
return size() == 0 && !hasInvalidator() && lastInvalidationEnd == Long.MIN_VALUE;
|
||||
}
|
||||
|
||||
public void setRemoved() {
|
||||
removed = true;
|
||||
}
|
||||
|
||||
public boolean isRemoved() {
|
||||
return removed;
|
||||
}
|
||||
}
|
||||
|
||||
private static class PendingPut {
|
||||
private final Object owner;
|
||||
private boolean completed;
|
||||
// the timestamp is not filled during registration in order to avoid expensive currentTimeMillis() calls
|
||||
private long registeredTimestamp = Long.MIN_VALUE;
|
||||
|
||||
private PendingPut(Object owner) {
|
||||
this.owner = owner;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
// we can't use SessionImpl.toString() concurrently
|
||||
return (completed ? "C@" : "R@") + lockOwnerToString(owner);
|
||||
}
|
||||
|
||||
public boolean invalidate(long now, long expirationPeriod) {
|
||||
completed = true;
|
||||
return gc(now, expirationPeriod);
|
||||
}
|
||||
|
||||
public boolean gc(long now, long expirationPeriod) {
|
||||
if (registeredTimestamp == Long.MIN_VALUE) {
|
||||
registeredTimestamp = now;
|
||||
}
|
||||
else if (registeredTimestamp + expirationPeriod < now){
|
||||
return true; // this is a leaked pending put
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private static class Invalidator {
|
||||
private final Object owner;
|
||||
private final long registeredTimestamp;
|
||||
private final Object valueForPFER;
|
||||
|
||||
private Invalidator(Object owner, long registeredTimestamp, Object valueForPFER) {
|
||||
this.owner = owner;
|
||||
this.registeredTimestamp = registeredTimestamp;
|
||||
this.valueForPFER = valueForPFER;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("{");
|
||||
sb.append("Owner=").append(lockOwnerToString(owner));
|
||||
sb.append(", Timestamp=").append(registeredTimestamp);
|
||||
sb.append('}');
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion;
|
||||
import org.hibernate.cache.infinispan.util.InvocationAfterCompletion;
|
||||
import org.hibernate.cache.infinispan.util.VersionedEntry;
|
||||
import org.hibernate.resource.transaction.spi.TransactionCoordinator;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
/**
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class RemovalSynchronization extends InvocationAfterCompletion {
|
||||
private final BaseTransactionalDataRegion region;
|
||||
private final Object key;
|
||||
private final AdvancedCache cache;
|
||||
|
||||
public RemovalSynchronization(TransactionCoordinator tc, AdvancedCache cache, boolean requiresTransaction, BaseTransactionalDataRegion region, Object key) {
|
||||
super(tc, requiresTransaction);
|
||||
this.cache = cache;
|
||||
this.region = region;
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void invoke(boolean success) {
|
||||
if (success) {
|
||||
cache.put(key, new VersionedEntry(null, null, region.nextTimestamp()), region.getTombstoneExpiration(), TimeUnit.MILLISECONDS);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,173 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.cache.infinispan.util.FutureUpdate;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.hibernate.cache.infinispan.util.Tombstone;
|
||||
import org.hibernate.cache.infinispan.util.TombstoneUpdate;
|
||||
import org.hibernate.cache.spi.access.SoftLock;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
import org.hibernate.resource.transaction.spi.TransactionCoordinator;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.configuration.cache.Configuration;
|
||||
import org.infinispan.context.Flag;
|
||||
|
||||
/**
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class TombstoneAccessDelegate implements AccessDelegate {
|
||||
private static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog( TombstoneAccessDelegate.class );
|
||||
|
||||
protected final BaseTransactionalDataRegion region;
|
||||
protected final AdvancedCache cache;
|
||||
protected final AdvancedCache writeCache;
|
||||
protected final AdvancedCache asyncWriteCache;
|
||||
protected final AdvancedCache putFromLoadCache;
|
||||
protected final boolean requiresTransaction;
|
||||
|
||||
public TombstoneAccessDelegate(BaseTransactionalDataRegion region) {
|
||||
this.region = region;
|
||||
this.cache = region.getCache();
|
||||
this.writeCache = Caches.ignoreReturnValuesCache(cache);
|
||||
// Note that correct behaviour of local and async writes depends on LockingInterceptor (see there for details)
|
||||
this.asyncWriteCache = writeCache.withFlags(Flag.FORCE_ASYNCHRONOUS);
|
||||
this.putFromLoadCache = asyncWriteCache.withFlags(Flag.ZERO_LOCK_ACQUISITION_TIMEOUT, Flag.FAIL_SILENTLY);
|
||||
Configuration configuration = cache.getCacheConfiguration();
|
||||
if (configuration.clustering().cacheMode().isInvalidation()) {
|
||||
throw new IllegalArgumentException("For tombstone-based caching, invalidation cache is not allowed.");
|
||||
}
|
||||
if (configuration.transaction().transactionMode().isTransactional()) {
|
||||
throw new IllegalArgumentException("Currently transactional caches are not supported.");
|
||||
}
|
||||
requiresTransaction = configuration.transaction().transactionMode().isTransactional()
|
||||
&& !configuration.transaction().autoCommit();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object get(SharedSessionContractImplementor session, Object key, long txTimestamp) throws CacheException {
|
||||
if (txTimestamp < region.getLastRegionInvalidation() ) {
|
||||
return null;
|
||||
}
|
||||
Object value = cache.get(key);
|
||||
if (value instanceof Tombstone) {
|
||||
return null;
|
||||
}
|
||||
else if (value instanceof FutureUpdate) {
|
||||
return ((FutureUpdate) value).getValue();
|
||||
}
|
||||
else {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version) {
|
||||
return putFromLoad(session, key, value, txTimestamp, version, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version, boolean minimalPutOverride) throws CacheException {
|
||||
long lastRegionInvalidation = region.getLastRegionInvalidation();
|
||||
if (txTimestamp < lastRegionInvalidation) {
|
||||
log.tracef("putFromLoad not executed since tx started at %d, before last region invalidation finished = %d", txTimestamp, lastRegionInvalidation);
|
||||
return false;
|
||||
}
|
||||
if (minimalPutOverride) {
|
||||
Object prev = cache.get(key);
|
||||
if (prev instanceof Tombstone) {
|
||||
Tombstone tombstone = (Tombstone) prev;
|
||||
long lastTimestamp = tombstone.getLastTimestamp();
|
||||
if (txTimestamp <= lastTimestamp) {
|
||||
log.tracef("putFromLoad not executed since tx started at %d, before last invalidation finished = %d", txTimestamp, lastTimestamp);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else if (prev != null) {
|
||||
log.tracef("putFromLoad not executed since cache contains %s", prev);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// we can't use putForExternalRead since the PFER flag means that entry is not wrapped into context
|
||||
// when it is present in the container. TombstoneCallInterceptor will deal with this.
|
||||
putFromLoadCache.put(key, new TombstoneUpdate(session.getTimestamp(), value));
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean insert(SharedSessionContractImplementor session, Object key, Object value, Object version) throws CacheException {
|
||||
write(session, key, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean update(SharedSessionContractImplementor session, Object key, Object value, Object currentVersion, Object previousVersion) throws CacheException {
|
||||
write(session, key, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(SharedSessionContractImplementor session, Object key) throws CacheException {
|
||||
write(session, key, null);
|
||||
}
|
||||
|
||||
protected void write(SharedSessionContractImplementor session, Object key, Object value) {
|
||||
TransactionCoordinator tc = session.getTransactionCoordinator();
|
||||
FutureUpdateSynchronization sync = new FutureUpdateSynchronization(tc, asyncWriteCache, requiresTransaction, key, value, region, session.getTimestamp());
|
||||
// The update will be invalidating all putFromLoads for the duration of expiration or until removed by the synchronization
|
||||
Tombstone tombstone = new Tombstone(sync.getUuid(), region.nextTimestamp() + region.getTombstoneExpiration());
|
||||
// The outcome of this operation is actually defined in TombstoneCallInterceptor
|
||||
// Metadata in PKVC are cleared and set in the interceptor, too
|
||||
writeCache.put(key, tombstone);
|
||||
tc.getLocalSynchronizations().registerSynchronization(sync);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAll() throws CacheException {
|
||||
region.beginInvalidation();
|
||||
try {
|
||||
Caches.broadcastEvictAll(cache);
|
||||
}
|
||||
finally {
|
||||
region.endInvalidation();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void evict(Object key) throws CacheException {
|
||||
writeCache.put(key, new TombstoneUpdate<>(region.nextTimestamp(), null));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void evictAll() throws CacheException {
|
||||
region.beginInvalidation();
|
||||
try {
|
||||
Caches.broadcastEvictAll(cache);
|
||||
}
|
||||
finally {
|
||||
region.endInvalidation();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unlockItem(SharedSessionContractImplementor session, Object key) throws CacheException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean afterInsert(SharedSessionContractImplementor session, Object key, Object value, Object version) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean afterUpdate(SharedSessionContractImplementor session, Object key, Object value, Object currentVersion, Object previousVersion, SoftLock lock) {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -1,213 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion;
|
||||
import org.hibernate.cache.infinispan.util.FutureUpdate;
|
||||
import org.hibernate.cache.infinispan.util.TombstoneUpdate;
|
||||
import org.hibernate.cache.infinispan.util.Tombstone;
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.commands.read.SizeCommand;
|
||||
import org.infinispan.commands.write.PutKeyValueCommand;
|
||||
import org.infinispan.commands.write.ValueMatcher;
|
||||
import org.infinispan.commons.logging.Log;
|
||||
import org.infinispan.commons.logging.LogFactory;
|
||||
import org.infinispan.commons.util.CloseableIterable;
|
||||
import org.infinispan.container.entries.CacheEntry;
|
||||
import org.infinispan.container.entries.MVCCEntry;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.context.InvocationContext;
|
||||
import org.infinispan.factories.annotations.Inject;
|
||||
import org.infinispan.factories.annotations.Start;
|
||||
import org.infinispan.filter.NullValueConverter;
|
||||
import org.infinispan.interceptors.CallInterceptor;
|
||||
import org.infinispan.metadata.EmbeddedMetadata;
|
||||
import org.infinispan.metadata.Metadata;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Note that this does not implement all commands, only those appropriate for {@link TombstoneAccessDelegate}
|
||||
* and {@link org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion}
|
||||
*
|
||||
* The behaviour here also breaks notifications, which are not used for 2LC caches.
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class TombstoneCallInterceptor extends CallInterceptor {
|
||||
private static final Log log = LogFactory.getLog(TombstoneCallInterceptor.class);
|
||||
private static final UUID ZERO = new UUID(0, 0);
|
||||
|
||||
private final BaseTransactionalDataRegion region;
|
||||
private final Metadata expiringMetadata;
|
||||
private Metadata defaultMetadata;
|
||||
private AdvancedCache cache;
|
||||
|
||||
public TombstoneCallInterceptor(BaseTransactionalDataRegion region) {
|
||||
this.region = region;
|
||||
expiringMetadata = new EmbeddedMetadata.Builder().lifespan(region.getTombstoneExpiration(), TimeUnit.MILLISECONDS).build();
|
||||
}
|
||||
|
||||
@Inject
|
||||
public void injectDependencies(AdvancedCache cache) {
|
||||
this.cache = cache;
|
||||
}
|
||||
|
||||
@Start
|
||||
public void start() {
|
||||
defaultMetadata = new EmbeddedMetadata.Builder()
|
||||
.lifespan(cacheConfiguration.expiration().lifespan())
|
||||
.maxIdle(cacheConfiguration.expiration().maxIdle()).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
|
||||
MVCCEntry e = (MVCCEntry) ctx.lookupEntry(command.getKey());
|
||||
if (e == null) {
|
||||
return null;
|
||||
}
|
||||
log.tracef("In cache %s(%d) applying update %s to %s", cache.getName(), region.getLastRegionInvalidation(), command.getValue(), e.getValue());
|
||||
try {
|
||||
Object value = command.getValue();
|
||||
if (value instanceof TombstoneUpdate) {
|
||||
return handleTombstoneUpdate(e, (TombstoneUpdate) value, command);
|
||||
}
|
||||
else if (value instanceof Tombstone) {
|
||||
return handleTombstone(e, (Tombstone) value);
|
||||
}
|
||||
else if (value instanceof FutureUpdate) {
|
||||
return handleFutureUpdate(e, (FutureUpdate) value, command);
|
||||
}
|
||||
else {
|
||||
return super.visitPutKeyValueCommand(ctx, command);
|
||||
}
|
||||
}
|
||||
finally {
|
||||
log.tracef("Result is %s", e.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
private Object handleFutureUpdate(MVCCEntry e, FutureUpdate futureUpdate, PutKeyValueCommand command) {
|
||||
Object storedValue = e.getValue();
|
||||
if (storedValue instanceof Tombstone) {
|
||||
// Note that the update has to keep tombstone even if the transaction was unsuccessful;
|
||||
// before write we have removed the value and we have to protect the entry against stale putFromLoads
|
||||
Tombstone tombstone = (Tombstone) storedValue;
|
||||
setValue(e, tombstone.applyUpdate(futureUpdate.getUuid(), futureUpdate.getTimestamp(), futureUpdate.getValue()));
|
||||
|
||||
}
|
||||
else {
|
||||
// This is an async future update, and it's timestamp may be vastly outdated
|
||||
// We need to first execute the async update and then local one, because if we're on the primary
|
||||
// owner the local future update would fail the async one.
|
||||
// TODO: There is some discrepancy with TombstoneUpdate handling which does not fail the update
|
||||
setFailed(command);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private Object handleTombstone(MVCCEntry e, Tombstone tombstone) {
|
||||
// Tombstones always come with lifespan in metadata
|
||||
Object storedValue = e.getValue();
|
||||
if (storedValue instanceof Tombstone) {
|
||||
setValue(e, ((Tombstone) storedValue).merge(tombstone));
|
||||
}
|
||||
else {
|
||||
setValue(e, tombstone);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
protected Object handleTombstoneUpdate(MVCCEntry e, TombstoneUpdate tombstoneUpdate, PutKeyValueCommand command) {
|
||||
Object storedValue = e.getValue();
|
||||
Object value = tombstoneUpdate.getValue();
|
||||
|
||||
if (value == null) {
|
||||
// eviction
|
||||
if (storedValue == null || storedValue instanceof Tombstone) {
|
||||
setFailed(command);
|
||||
}
|
||||
else {
|
||||
// We have to keep Tombstone, because otherwise putFromLoad could insert a stale entry
|
||||
// (after it has been already updated and *then* evicted)
|
||||
setValue(e, new Tombstone(ZERO, tombstoneUpdate.getTimestamp()));
|
||||
}
|
||||
}
|
||||
else if (storedValue instanceof Tombstone) {
|
||||
Tombstone tombstone = (Tombstone) storedValue;
|
||||
if (tombstone.getLastTimestamp() < tombstoneUpdate.getTimestamp()) {
|
||||
setValue(e, value);
|
||||
}
|
||||
}
|
||||
else if (storedValue == null) {
|
||||
// async putFromLoads shouldn't cross the invalidation timestamp
|
||||
if (region.getLastRegionInvalidation() < tombstoneUpdate.getTimestamp()) {
|
||||
setValue(e, value);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Don't do anything locally. This could be the async remote write, though, when local
|
||||
// value has been already updated: let it propagate to remote nodes, too
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private Object setValue(MVCCEntry e, Object value) {
|
||||
if (e.isRemoved()) {
|
||||
e.setRemoved(false);
|
||||
e.setCreated(true);
|
||||
e.setValid(true);
|
||||
}
|
||||
else {
|
||||
e.setChanged(true);
|
||||
}
|
||||
if (value instanceof Tombstone) {
|
||||
e.setMetadata(expiringMetadata);
|
||||
}
|
||||
else {
|
||||
e.setMetadata(defaultMetadata);
|
||||
}
|
||||
return e.setValue(value);
|
||||
}
|
||||
|
||||
private void setFailed(PutKeyValueCommand command) {
|
||||
// This sets command to be unsuccessful, since we don't want to replicate it to backup owner
|
||||
command.setValueMatcher(ValueMatcher.MATCH_NEVER);
|
||||
try {
|
||||
command.perform(null);
|
||||
}
|
||||
catch (Throwable ignored) {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitSizeCommand(InvocationContext ctx, SizeCommand command) throws Throwable {
|
||||
Set<Flag> flags = command.getFlags();
|
||||
int size = 0;
|
||||
AdvancedCache decoratedCache = cache.getAdvancedCache();
|
||||
if (flags != null) {
|
||||
decoratedCache = decoratedCache.withFlags(flags.toArray(new Flag[flags.size()]));
|
||||
}
|
||||
// In non-transactional caches we don't care about context
|
||||
CloseableIterable<CacheEntry<Object, Object>> iterable = decoratedCache
|
||||
.filterEntries(Tombstone.EXCLUDE_TOMBSTONES).converter(NullValueConverter.getInstance());
|
||||
try {
|
||||
for (CacheEntry<Object, Object> entry : iterable) {
|
||||
if (size++ == Integer.MAX_VALUE) {
|
||||
return Integer.MAX_VALUE;
|
||||
}
|
||||
}
|
||||
}
|
||||
finally {
|
||||
iterable.close();
|
||||
}
|
||||
return size;
|
||||
}
|
||||
}
|
|
@ -1,68 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.impl.BaseRegion;
|
||||
import org.hibernate.cache.spi.access.SoftLock;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
|
||||
/**
|
||||
* Delegate for transactional caches
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class TxInvalidationCacheAccessDelegate extends InvalidationCacheAccessDelegate {
|
||||
public TxInvalidationCacheAccessDelegate(BaseRegion region, PutFromLoadValidator validator) {
|
||||
super(region, validator);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("UnusedParameters")
|
||||
public boolean insert(SharedSessionContractImplementor session, Object key, Object value, Object version) throws CacheException {
|
||||
if ( !region.checkValid() ) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We need to be invalidating even for regular writes; if we were not and the write was followed by eviction
|
||||
// (or any other invalidation), naked put that was started after the eviction ended but before this insert
|
||||
// ended could insert the stale entry into the cache (since the entry was removed by eviction).
|
||||
|
||||
// The beginInvalidateKey(...) is called from TxPutFromLoadInterceptor because we need the global transaction id.
|
||||
writeCache.put(key, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("UnusedParameters")
|
||||
public boolean update(SharedSessionContractImplementor session, Object key, Object value, Object currentVersion, Object previousVersion)
|
||||
throws CacheException {
|
||||
// We update whether or not the region is valid. Other nodes
|
||||
// may have already restored the region so they need to
|
||||
// be informed of the change.
|
||||
|
||||
// We need to be invalidating even for regular writes; if we were not and the write was followed by eviction
|
||||
// (or any other invalidation), naked put that was started after the eviction ended but before this update
|
||||
// ended could insert the stale entry into the cache (since the entry was removed by eviction).
|
||||
|
||||
// The beginInvalidateKey(...) is called from TxPutFromLoadInterceptor because we need the global transaction id.
|
||||
writeCache.put(key, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean afterInsert(SharedSessionContractImplementor session, Object key, Object value, Object version) {
|
||||
// The endInvalidatingKey(...) is called from TxPutFromLoadInterceptor because we need the global transaction id.
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean afterUpdate(SharedSessionContractImplementor session, Object key, Object value, Object currentVersion, Object previousVersion, SoftLock lock) {
|
||||
// The endInvalidatingKey(...) is called from TxPutFromLoadInterceptor because we need the global transaction id.
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -1,224 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
|
||||
import org.infinispan.commands.AbstractVisitor;
|
||||
import org.infinispan.commands.FlagAffectedCommand;
|
||||
import org.infinispan.commands.ReplicableCommand;
|
||||
import org.infinispan.commands.control.LockControlCommand;
|
||||
import org.infinispan.commands.tx.PrepareCommand;
|
||||
import org.infinispan.commands.write.ClearCommand;
|
||||
import org.infinispan.commands.write.InvalidateCommand;
|
||||
import org.infinispan.commands.write.PutKeyValueCommand;
|
||||
import org.infinispan.commands.write.PutMapCommand;
|
||||
import org.infinispan.commands.write.RemoveCommand;
|
||||
import org.infinispan.commands.write.ReplaceCommand;
|
||||
import org.infinispan.commands.write.WriteCommand;
|
||||
import org.infinispan.commons.util.InfinispanCollections;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.context.InvocationContext;
|
||||
import org.infinispan.context.impl.LocalTxInvocationContext;
|
||||
import org.infinispan.context.impl.TxInvocationContext;
|
||||
import org.infinispan.jmx.annotations.MBean;
|
||||
import org.infinispan.remoting.transport.Address;
|
||||
|
||||
/**
|
||||
* This interceptor acts as a replacement to the replication interceptor when the CacheImpl is configured with
|
||||
* ClusteredSyncMode as INVALIDATE.
|
||||
* <p/>
|
||||
* The idea is that rather than replicating changes to all caches in a cluster when write methods are called, simply
|
||||
* broadcast an {@link InvalidateCommand} on the remote caches containing all keys modified. This allows the remote
|
||||
* cache to look up the value in a shared cache loader which would have been updated with the changes.
|
||||
*
|
||||
* @author Manik Surtani
|
||||
* @author Galder Zamarreño
|
||||
* @author Mircea.Markus@jboss.com
|
||||
* @since 4.0
|
||||
*/
|
||||
@MBean(objectName = "Invalidation", description = "Component responsible for invalidating entries on remote caches when entries are written to locally.")
|
||||
public class TxInvalidationInterceptor extends BaseInvalidationInterceptor {
|
||||
private static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog( TxInvalidationInterceptor.class );
|
||||
|
||||
@Override
|
||||
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
|
||||
if ( !isPutForExternalRead( command ) ) {
|
||||
return handleInvalidate( ctx, command, command.getKey() );
|
||||
}
|
||||
return invokeNextInterceptor( ctx, command );
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command) throws Throwable {
|
||||
return handleInvalidate( ctx, command, command.getKey() );
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) throws Throwable {
|
||||
return handleInvalidate( ctx, command, command.getKey() );
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitClearCommand(InvocationContext ctx, ClearCommand command) throws Throwable {
|
||||
Object retval = invokeNextInterceptor( ctx, command );
|
||||
if ( !isLocalModeForced( command ) ) {
|
||||
// just broadcast the clear command - this is simplest!
|
||||
if ( ctx.isOriginLocal() ) {
|
||||
rpcManager.invokeRemotely( getMembers(), command, isSynchronous(command) ? syncRpcOptions : asyncRpcOptions );
|
||||
}
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command) throws Throwable {
|
||||
return handleInvalidate( ctx, command, command.getMap().keySet().toArray() );
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
|
||||
Object retval = invokeNextInterceptor( ctx, command );
|
||||
log.tracef( "Entering InvalidationInterceptor's prepare phase. Ctx flags are empty" );
|
||||
// fetch the modifications before the transaction is committed (and thus removed from the txTable)
|
||||
if ( shouldInvokeRemoteTxCommand( ctx ) ) {
|
||||
if ( ctx.getTransaction() == null ) {
|
||||
throw new IllegalStateException( "We must have an associated transaction" );
|
||||
}
|
||||
|
||||
List<WriteCommand> mods = Arrays.asList( command.getModifications() );
|
||||
broadcastInvalidateForPrepare( mods, ctx );
|
||||
}
|
||||
else {
|
||||
log.tracef( "Nothing to invalidate - no modifications in the transaction." );
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitLockControlCommand(TxInvocationContext ctx, LockControlCommand command) throws Throwable {
|
||||
Object retVal = invokeNextInterceptor( ctx, command );
|
||||
if ( ctx.isOriginLocal() ) {
|
||||
//unlock will happen async as it is a best effort
|
||||
boolean sync = !command.isUnlock();
|
||||
List<Address> members = getMembers();
|
||||
( (LocalTxInvocationContext) ctx ).remoteLocksAcquired(members);
|
||||
rpcManager.invokeRemotely(members, command, sync ? syncRpcOptions : asyncRpcOptions );
|
||||
}
|
||||
return retVal;
|
||||
}
|
||||
|
||||
private Object handleInvalidate(InvocationContext ctx, WriteCommand command, Object... keys) throws Throwable {
|
||||
Object retval = invokeNextInterceptor( ctx, command );
|
||||
if ( command.isSuccessful() && !ctx.isInTxScope() ) {
|
||||
if ( keys != null && keys.length != 0 ) {
|
||||
if ( !isLocalModeForced( command ) ) {
|
||||
invalidateAcrossCluster( isSynchronous( command ), keys, ctx );
|
||||
}
|
||||
}
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
private void broadcastInvalidateForPrepare(List<WriteCommand> modifications, InvocationContext ctx) throws Throwable {
|
||||
// A prepare does not carry flags, so skip checking whether is local or not
|
||||
if ( ctx.isInTxScope() ) {
|
||||
if ( modifications.isEmpty() ) {
|
||||
return;
|
||||
}
|
||||
|
||||
InvalidationFilterVisitor filterVisitor = new InvalidationFilterVisitor( modifications.size() );
|
||||
filterVisitor.visitCollection( null, modifications );
|
||||
|
||||
if ( filterVisitor.containsPutForExternalRead ) {
|
||||
log.debug( "Modification list contains a putForExternalRead operation. Not invalidating." );
|
||||
}
|
||||
else if ( filterVisitor.containsLocalModeFlag ) {
|
||||
log.debug( "Modification list contains a local mode flagged operation. Not invalidating." );
|
||||
}
|
||||
else {
|
||||
try {
|
||||
invalidateAcrossCluster( defaultSynchronous, filterVisitor.result.toArray(), ctx );
|
||||
}
|
||||
catch (Throwable t) {
|
||||
log.unableToRollbackInvalidationsDuringPrepare( t );
|
||||
if ( t instanceof RuntimeException ) {
|
||||
throw t;
|
||||
}
|
||||
else {
|
||||
throw new RuntimeException( "Unable to broadcast invalidation messages", t );
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class InvalidationFilterVisitor extends AbstractVisitor {
|
||||
|
||||
Set<Object> result;
|
||||
public boolean containsPutForExternalRead = false;
|
||||
public boolean containsLocalModeFlag = false;
|
||||
|
||||
public InvalidationFilterVisitor(int maxSetSize) {
|
||||
result = new HashSet<Object>( maxSetSize );
|
||||
}
|
||||
|
||||
private void processCommand(FlagAffectedCommand command) {
|
||||
containsLocalModeFlag = containsLocalModeFlag || ( command.getFlags() != null && command.getFlags().contains( Flag.CACHE_MODE_LOCAL ) );
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
|
||||
processCommand( command );
|
||||
containsPutForExternalRead =
|
||||
containsPutForExternalRead || ( command.getFlags() != null && command.getFlags().contains( Flag.PUT_FOR_EXTERNAL_READ ) );
|
||||
result.add( command.getKey() );
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) throws Throwable {
|
||||
processCommand( command );
|
||||
result.add( command.getKey() );
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command) throws Throwable {
|
||||
processCommand( command );
|
||||
result.addAll( command.getAffectedKeys() );
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private void invalidateAcrossCluster(boolean synchronous, Object[] keys, InvocationContext ctx) throws Throwable {
|
||||
// increment invalidations counter if statistics maintained
|
||||
incrementInvalidations();
|
||||
final InvalidateCommand invalidateCommand = commandsFactory.buildInvalidateCommand( InfinispanCollections.<Flag>emptySet(), keys );
|
||||
if ( log.isDebugEnabled() ) {
|
||||
log.debug( "Cache [" + rpcManager.getAddress() + "] replicating " + invalidateCommand );
|
||||
}
|
||||
|
||||
ReplicableCommand command = invalidateCommand;
|
||||
if ( ctx.isInTxScope() ) {
|
||||
TxInvocationContext txCtx = (TxInvocationContext) ctx;
|
||||
// A Prepare command containing the invalidation command in its 'modifications' list is sent to the remote nodes
|
||||
// so that the invalidation is executed in the same transaction and locks can be acquired and released properly.
|
||||
// This is 1PC on purpose, as an optimisation, even if the current TX is 2PC.
|
||||
// If the cache uses 2PC it's possible that the remotes will commit the invalidation and the originator rolls back,
|
||||
// but this does not impact consistency and the speed benefit is worth it.
|
||||
command = commandsFactory.buildPrepareCommand( txCtx.getGlobalTransaction(), Collections.<WriteCommand>singletonList( invalidateCommand ), true );
|
||||
}
|
||||
rpcManager.invokeRemotely( getMembers(), command, synchronous ? syncRpcOptions : asyncRpcOptions );
|
||||
}
|
||||
}
|
|
@ -1,183 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.hibernate.cache.infinispan.util.CacheCommandInitializer;
|
||||
import org.hibernate.cache.infinispan.util.EndInvalidationCommand;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
|
||||
import org.infinispan.commands.VisitableCommand;
|
||||
import org.infinispan.commands.tx.CommitCommand;
|
||||
import org.infinispan.commands.tx.PrepareCommand;
|
||||
import org.infinispan.commands.tx.RollbackCommand;
|
||||
import org.infinispan.commands.write.PutKeyValueCommand;
|
||||
import org.infinispan.commands.write.RemoveCommand;
|
||||
import org.infinispan.commands.write.WriteCommand;
|
||||
import org.infinispan.container.DataContainer;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.context.InvocationContext;
|
||||
import org.infinispan.context.impl.TxInvocationContext;
|
||||
import org.infinispan.factories.annotations.Inject;
|
||||
import org.infinispan.factories.annotations.Start;
|
||||
import org.infinispan.interceptors.base.BaseRpcInterceptor;
|
||||
import org.infinispan.remoting.inboundhandler.DeliverOrder;
|
||||
import org.infinispan.remoting.rpc.ResponseMode;
|
||||
import org.infinispan.remoting.rpc.RpcManager;
|
||||
import org.infinispan.remoting.rpc.RpcOptions;
|
||||
import org.infinispan.remoting.transport.Address;
|
||||
import org.infinispan.statetransfer.StateTransferManager;
|
||||
import org.infinispan.transaction.xa.GlobalTransaction;
|
||||
|
||||
/**
|
||||
* Intercepts transactions in Infinispan, calling {@link PutFromLoadValidator#beginInvalidatingKey(Object, Object)}
|
||||
* before locks are acquired (and the entry is invalidated) and sends {@link EndInvalidationCommand} to release
|
||||
* invalidation throught {@link PutFromLoadValidator#endInvalidatingKey(Object, Object)} after the transaction
|
||||
* is committed.
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
class TxPutFromLoadInterceptor extends BaseRpcInterceptor {
|
||||
private final static InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog(TxPutFromLoadInterceptor.class);
|
||||
private PutFromLoadValidator putFromLoadValidator;
|
||||
private final String cacheName;
|
||||
private RpcManager rpcManager;
|
||||
private CacheCommandInitializer cacheCommandInitializer;
|
||||
private DataContainer dataContainer;
|
||||
private StateTransferManager stateTransferManager;
|
||||
private RpcOptions asyncUnordered;
|
||||
|
||||
public TxPutFromLoadInterceptor(PutFromLoadValidator putFromLoadValidator, String cacheName) {
|
||||
this.putFromLoadValidator = putFromLoadValidator;
|
||||
this.cacheName = cacheName;
|
||||
}
|
||||
|
||||
@Inject
|
||||
public void injectDependencies(RpcManager rpcManager, CacheCommandInitializer cacheCommandInitializer, DataContainer dataContainer, StateTransferManager stateTransferManager) {
|
||||
this.rpcManager = rpcManager;
|
||||
this.cacheCommandInitializer = cacheCommandInitializer;
|
||||
this.dataContainer = dataContainer;
|
||||
this.stateTransferManager = stateTransferManager;
|
||||
}
|
||||
|
||||
@Start
|
||||
public void start() {
|
||||
asyncUnordered = rpcManager.getRpcOptionsBuilder(ResponseMode.ASYNCHRONOUS, DeliverOrder.NONE).build();
|
||||
}
|
||||
|
||||
private void beginInvalidating(InvocationContext ctx, Object key) {
|
||||
TxInvocationContext txCtx = (TxInvocationContext) ctx;
|
||||
// make sure that the command is registered in the transaction
|
||||
txCtx.addAffectedKey(key);
|
||||
|
||||
GlobalTransaction globalTransaction = txCtx.getGlobalTransaction();
|
||||
if (!putFromLoadValidator.beginInvalidatingKey(globalTransaction, key)) {
|
||||
log.failedInvalidatePendingPut(key, cacheName);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
|
||||
if (!command.hasFlag(Flag.PUT_FOR_EXTERNAL_READ)) {
|
||||
beginInvalidating(ctx, command.getKey());
|
||||
}
|
||||
return invokeNextInterceptor(ctx, command);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) throws Throwable {
|
||||
beginInvalidating(ctx, command.getKey());
|
||||
return invokeNextInterceptor(ctx, command);
|
||||
}
|
||||
|
||||
// We need to intercept PrepareCommand, not InvalidateCommand since the interception takes
|
||||
// place before EntryWrappingInterceptor and the PrepareCommand is multiplexed into InvalidateCommands
|
||||
// as part of EntryWrappingInterceptor
|
||||
@Override
|
||||
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
|
||||
if (ctx.isOriginLocal()) {
|
||||
// We can't wait to commit phase to remove the entry locally (invalidations are processed in 1pc
|
||||
// on remote nodes, so only local case matters here). The problem is that while the entry is locked
|
||||
// reads still can take place and we can read outdated collection after reading updated entity
|
||||
// owning this collection from DB; when this happens, the version lock on entity cannot protect
|
||||
// us against concurrent modification of the collection. Therefore, we need to remove the entry
|
||||
// here (even without lock!) and let possible update happen in commit phase.
|
||||
for (WriteCommand wc : command.getModifications()) {
|
||||
for (Object key : wc.getAffectedKeys()) {
|
||||
dataContainer.remove(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (WriteCommand wc : command.getModifications()) {
|
||||
Set<Object> keys = wc.getAffectedKeys();
|
||||
if (log.isTraceEnabled()) {
|
||||
log.tracef("Invalidating keys %s with lock owner %s", keys, ctx.getLockOwner());
|
||||
}
|
||||
for (Object key : keys ) {
|
||||
putFromLoadValidator.beginInvalidatingKey(ctx.getLockOwner(), key);
|
||||
}
|
||||
}
|
||||
}
|
||||
return invokeNextInterceptor(ctx, command);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) throws Throwable {
|
||||
if (log.isTraceEnabled()) {
|
||||
log.tracef( "Commit command received, end invalidation" );
|
||||
}
|
||||
|
||||
return endInvalidationAndInvokeNextInterceptor(ctx, command);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitRollbackCommand(TxInvocationContext ctx, RollbackCommand command) throws Throwable {
|
||||
if (log.isTraceEnabled()) {
|
||||
log.tracef( "Rollback command received, end invalidation" );
|
||||
}
|
||||
|
||||
return endInvalidationAndInvokeNextInterceptor(ctx, command);
|
||||
}
|
||||
|
||||
protected Object endInvalidationAndInvokeNextInterceptor(TxInvocationContext<?> ctx, VisitableCommand command) throws Throwable {
|
||||
try {
|
||||
if (ctx.isOriginLocal()) {
|
||||
// We cannot use directly ctx.getAffectedKeys() and that includes keys from local-only operations.
|
||||
// During evictAll inside transaction this would cause unnecessary invalidate command
|
||||
if (!ctx.getModifications().isEmpty()) {
|
||||
Object[] keys = ctx.getModifications().stream()
|
||||
.flatMap(mod -> mod.getAffectedKeys().stream()).distinct().toArray();
|
||||
|
||||
if (log.isTraceEnabled()) {
|
||||
log.tracef( "Sending end invalidation for keys %s asynchronously, modifications are %s",
|
||||
Arrays.toString(keys), ctx.getCacheTransaction().getModifications());
|
||||
}
|
||||
|
||||
GlobalTransaction globalTransaction = ctx.getGlobalTransaction();
|
||||
EndInvalidationCommand commitCommand = cacheCommandInitializer.buildEndInvalidationCommand(
|
||||
cacheName, keys, globalTransaction);
|
||||
List<Address> members = stateTransferManager.getCacheTopology().getMembers();
|
||||
rpcManager.invokeRemotely(members, commitCommand, asyncUnordered);
|
||||
|
||||
// If the transaction is not successful, *RegionAccessStrategy would not be called, therefore
|
||||
// we have to end invalidation from here manually (in successful case as well)
|
||||
for (Object key : keys) {
|
||||
putFromLoadValidator.endInvalidatingKey(globalTransaction, key);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
finally {
|
||||
return invokeNextInterceptor(ctx, command);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,90 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import org.infinispan.commands.write.PutKeyValueCommand;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.context.InvocationContext;
|
||||
import org.infinispan.distribution.DistributionManager;
|
||||
import org.infinispan.distribution.ch.ConsistentHash;
|
||||
import org.infinispan.factories.annotations.Inject;
|
||||
import org.infinispan.factories.annotations.Start;
|
||||
import org.infinispan.interceptors.distribution.NonTxDistributionInterceptor;
|
||||
import org.infinispan.remoting.inboundhandler.DeliverOrder;
|
||||
import org.infinispan.remoting.rpc.ResponseMode;
|
||||
import org.infinispan.remoting.rpc.RpcOptions;
|
||||
import org.infinispan.remoting.transport.Address;
|
||||
import org.infinispan.statetransfer.OutdatedTopologyException;
|
||||
import org.infinispan.util.logging.Log;
|
||||
import org.infinispan.util.logging.LogFactory;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Since the data handled in {@link TombstoneCallInterceptor} or {@link VersionedCallInterceptor}
|
||||
* does not rely on the order how these are applied (the updates are commutative), this interceptor
|
||||
* simply sends any command to all other owners without ordering them through primary owner.
|
||||
* Note that {@link LockingInterceptor} is required in the stack as locking on backup is not guaranteed
|
||||
* by primary owner.
|
||||
*/
|
||||
public class UnorderedDistributionInterceptor extends NonTxDistributionInterceptor {
|
||||
private static Log log = LogFactory.getLog(UnorderedDistributionInterceptor.class);
|
||||
private static final boolean trace = log.isTraceEnabled();
|
||||
|
||||
private DistributionManager distributionManager;
|
||||
private RpcOptions syncRpcOptions, asyncRpcOptions;
|
||||
|
||||
@Inject
|
||||
public void inject(DistributionManager distributionManager) {
|
||||
this.distributionManager = distributionManager;
|
||||
}
|
||||
|
||||
@Start
|
||||
public void start() {
|
||||
syncRpcOptions = rpcManager.getRpcOptionsBuilder(ResponseMode.SYNCHRONOUS_IGNORE_LEAVERS, DeliverOrder.NONE).build();
|
||||
// We don't have to guarantee ordering even for asynchronous messages
|
||||
asyncRpcOptions = rpcManager.getRpcOptionsBuilder(ResponseMode.ASYNCHRONOUS, DeliverOrder.NONE).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
|
||||
if (command.hasFlag(Flag.CACHE_MODE_LOCAL)) {
|
||||
// for state-transfer related writes
|
||||
return invokeNextInterceptor(ctx, command);
|
||||
}
|
||||
int commandTopologyId = command.getTopologyId();
|
||||
int currentTopologyId = stateTransferManager.getCacheTopology().getTopologyId();
|
||||
if (commandTopologyId != -1 && currentTopologyId != commandTopologyId) {
|
||||
throw new OutdatedTopologyException("Cache topology changed while the command was executing: expected " +
|
||||
commandTopologyId + ", got " + currentTopologyId);
|
||||
}
|
||||
|
||||
ConsistentHash writeCH = distributionManager.getWriteConsistentHash();
|
||||
List<Address> owners = null;
|
||||
if (writeCH.isReplicated()) {
|
||||
// local result is always ignored
|
||||
invokeNextInterceptor(ctx, command);
|
||||
}
|
||||
else {
|
||||
owners = writeCH.locateOwners(command.getKey());
|
||||
if (owners.contains(rpcManager.getAddress())) {
|
||||
invokeNextInterceptor(ctx, command);
|
||||
}
|
||||
else {
|
||||
log.tracef("Not invoking %s on %s since it is not an owner", command, rpcManager.getAddress());
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx.isOriginLocal() && command.isSuccessful()) {
|
||||
// This is called with the entry locked. In order to avoid deadlocks we must not wait for RPC while
|
||||
// holding the lock, therefore we'll return a future and wait for it in LockingInterceptor after
|
||||
// unlocking (and committing) the entry.
|
||||
return rpcManager.invokeRemotelyAsync(owners, command, isSynchronous(command) ? syncRpcOptions : asyncRpcOptions);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -1,164 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
||||
|
||||
import org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion;
|
||||
import org.hibernate.cache.infinispan.util.VersionedEntry;
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.commands.read.SizeCommand;
|
||||
import org.infinispan.commands.write.PutKeyValueCommand;
|
||||
import org.infinispan.commons.util.CloseableIterable;
|
||||
import org.infinispan.container.entries.CacheEntry;
|
||||
import org.infinispan.container.entries.MVCCEntry;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.context.InvocationContext;
|
||||
import org.infinispan.factories.annotations.Inject;
|
||||
import org.infinispan.factories.annotations.Start;
|
||||
import org.infinispan.filter.NullValueConverter;
|
||||
import org.infinispan.interceptors.CallInterceptor;
|
||||
import org.infinispan.metadata.EmbeddedMetadata;
|
||||
import org.infinispan.metadata.Metadata;
|
||||
|
||||
import java.util.Comparator;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Note that this does not implement all commands, only those appropriate for {@link TombstoneAccessDelegate}
|
||||
* and {@link org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion}
|
||||
*
|
||||
* The behaviour here also breaks notifications, which are not used for 2LC caches.
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class VersionedCallInterceptor extends CallInterceptor {
|
||||
private final Comparator<Object> versionComparator;
|
||||
private final Metadata expiringMetadata;
|
||||
private AdvancedCache cache;
|
||||
private Metadata defaultMetadata;
|
||||
|
||||
public VersionedCallInterceptor(BaseTransactionalDataRegion region, Comparator<Object> versionComparator) {
|
||||
this.versionComparator = versionComparator;
|
||||
expiringMetadata = new EmbeddedMetadata.Builder().lifespan(region.getTombstoneExpiration(), TimeUnit.MILLISECONDS).build();
|
||||
}
|
||||
|
||||
@Inject
|
||||
public void injectDependencies(AdvancedCache cache) {
|
||||
this.cache = cache;
|
||||
}
|
||||
|
||||
@Start
|
||||
public void start() {
|
||||
defaultMetadata = new EmbeddedMetadata.Builder()
|
||||
.lifespan(cacheConfiguration.expiration().lifespan())
|
||||
.maxIdle(cacheConfiguration.expiration().maxIdle()).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
|
||||
MVCCEntry e = (MVCCEntry) ctx.lookupEntry(command.getKey());
|
||||
if (e == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Object oldValue = e.getValue();
|
||||
Object oldVersion = null;
|
||||
long oldTimestamp = Long.MIN_VALUE;
|
||||
if (oldValue instanceof VersionedEntry) {
|
||||
oldVersion = ((VersionedEntry) oldValue).getVersion();
|
||||
oldTimestamp = ((VersionedEntry) oldValue).getTimestamp();
|
||||
oldValue = ((VersionedEntry) oldValue).getValue();
|
||||
}
|
||||
else if (oldValue instanceof org.hibernate.cache.spi.entry.CacheEntry) {
|
||||
oldVersion = ((org.hibernate.cache.spi.entry.CacheEntry) oldValue).getVersion();
|
||||
}
|
||||
|
||||
Object newValue = command.getValue();
|
||||
Object newVersion;
|
||||
long newTimestamp;
|
||||
Object actualNewValue = newValue;
|
||||
boolean isRemoval = false;
|
||||
if (newValue instanceof VersionedEntry) {
|
||||
VersionedEntry ve = (VersionedEntry) newValue;
|
||||
newVersion = ve.getVersion();
|
||||
newTimestamp = ve.getTimestamp();
|
||||
if (ve.getValue() == null) {
|
||||
isRemoval = true;
|
||||
}
|
||||
else if (ve.getValue() instanceof org.hibernate.cache.spi.entry.CacheEntry) {
|
||||
actualNewValue = ve.getValue();
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw new IllegalArgumentException(String.valueOf(newValue));
|
||||
}
|
||||
|
||||
if (newVersion == null) {
|
||||
// eviction or post-commit removal: we'll store it with given timestamp
|
||||
setValue(e, newValue, expiringMetadata);
|
||||
return null;
|
||||
}
|
||||
if (oldVersion == null) {
|
||||
assert oldValue == null || oldTimestamp != Long.MIN_VALUE;
|
||||
if (newTimestamp <= oldTimestamp) {
|
||||
// either putFromLoad or regular update/insert - in either case this update might come
|
||||
// when it was evicted/region-invalidated. In both cases, with old timestamp we'll leave
|
||||
// the invalid value
|
||||
assert oldValue == null;
|
||||
}
|
||||
else {
|
||||
setValue(e, actualNewValue, defaultMetadata);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
int compareResult = versionComparator.compare(newVersion, oldVersion);
|
||||
if (isRemoval && compareResult >= 0) {
|
||||
setValue(e, actualNewValue, expiringMetadata);
|
||||
}
|
||||
else if (compareResult > 0) {
|
||||
setValue(e, actualNewValue, defaultMetadata);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private Object setValue(MVCCEntry e, Object value, Metadata metadata) {
|
||||
if (e.isRemoved()) {
|
||||
e.setRemoved(false);
|
||||
e.setCreated(true);
|
||||
e.setValid(true);
|
||||
}
|
||||
else {
|
||||
e.setChanged(true);
|
||||
}
|
||||
e.setMetadata(metadata);
|
||||
return e.setValue(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitSizeCommand(InvocationContext ctx, SizeCommand command) throws Throwable {
|
||||
Set<Flag> flags = command.getFlags();
|
||||
int size = 0;
|
||||
AdvancedCache decoratedCache = cache.getAdvancedCache();
|
||||
if (flags != null) {
|
||||
decoratedCache = decoratedCache.withFlags(flags.toArray(new Flag[flags.size()]));
|
||||
}
|
||||
// In non-transactional caches we don't care about context
|
||||
CloseableIterable<CacheEntry<Object, Void>> iterable = decoratedCache
|
||||
.filterEntries(VersionedEntry.EXCLUDE_EMPTY_EXTRACT_VALUE).converter(NullValueConverter.getInstance());
|
||||
try {
|
||||
for (CacheEntry<Object, Void> entry : iterable) {
|
||||
if (size++ == Integer.MAX_VALUE) {
|
||||
return Integer.MAX_VALUE;
|
||||
}
|
||||
}
|
||||
}
|
||||
finally {
|
||||
iterable.close();
|
||||
}
|
||||
return size;
|
||||
}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Internal Infinispan-based implementation of the cache region access strategies
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.access;
|
|
@ -1,92 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.collection;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.access.AccessDelegate;
|
||||
import org.hibernate.cache.spi.CollectionRegion;
|
||||
import org.hibernate.cache.spi.access.CollectionRegionAccessStrategy;
|
||||
import org.hibernate.cache.spi.access.SoftLock;
|
||||
import org.hibernate.engine.spi.SessionFactoryImplementor;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
import org.hibernate.persister.collection.CollectionPersister;
|
||||
|
||||
/**
|
||||
* Collection region access for Infinispan.
|
||||
*
|
||||
* @author Chris Bredesen
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
class CollectionAccess implements CollectionRegionAccessStrategy {
|
||||
private final CollectionRegionImpl region;
|
||||
private final AccessDelegate delegate;
|
||||
|
||||
CollectionAccess(CollectionRegionImpl region, AccessDelegate delegate) {
|
||||
this.region = region;
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
public void evict(Object key) throws CacheException {
|
||||
delegate.evict( key );
|
||||
}
|
||||
|
||||
public void evictAll() throws CacheException {
|
||||
delegate.evictAll();
|
||||
}
|
||||
|
||||
public Object get(SharedSessionContractImplementor session, Object key, long txTimestamp) throws CacheException {
|
||||
return delegate.get( session, key, txTimestamp );
|
||||
}
|
||||
|
||||
public boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version) throws CacheException {
|
||||
return delegate.putFromLoad( session, key, value, txTimestamp, version );
|
||||
}
|
||||
|
||||
public boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version, boolean minimalPutOverride)
|
||||
throws CacheException {
|
||||
return delegate.putFromLoad( session, key, value, txTimestamp, version, minimalPutOverride );
|
||||
}
|
||||
|
||||
public void remove(SharedSessionContractImplementor session, Object key) throws CacheException {
|
||||
delegate.remove( session, key );
|
||||
}
|
||||
|
||||
public void removeAll() throws CacheException {
|
||||
delegate.removeAll();
|
||||
}
|
||||
|
||||
public CollectionRegion getRegion() {
|
||||
return region;
|
||||
}
|
||||
|
||||
public SoftLock lockItem(SharedSessionContractImplementor session, Object key, Object version) throws CacheException {
|
||||
return null;
|
||||
}
|
||||
|
||||
public SoftLock lockRegion() throws CacheException {
|
||||
return null;
|
||||
}
|
||||
|
||||
public void unlockItem(SharedSessionContractImplementor session, Object key, SoftLock lock) throws CacheException {
|
||||
delegate.unlockItem( session, key);
|
||||
}
|
||||
|
||||
public void unlockRegion(SoftLock lock) throws CacheException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object generateCacheKey(Object id, CollectionPersister persister, SessionFactoryImplementor factory, String tenantIdentifier) {
|
||||
return region.getCacheKeysFactory().createCollectionKey(id, persister, factory, tenantIdentifier);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getCacheKeyId(Object cacheKey) {
|
||||
return region.getCacheKeysFactory().getCollectionId(cacheKey);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.collection;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.access.AccessDelegate;
|
||||
import org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.CacheKeysFactory;
|
||||
import org.hibernate.cache.spi.CollectionRegion;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.cache.spi.access.CollectionRegionAccessStrategy;
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
/**
|
||||
* Collection region implementation
|
||||
*
|
||||
* @author Chris Bredesen
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class CollectionRegionImpl extends BaseTransactionalDataRegion implements CollectionRegion {
|
||||
/**
|
||||
* Construct a collection region
|
||||
* @param cache instance to store collection instances
|
||||
* @param name of collection type
|
||||
* @param transactionManager
|
||||
* @param metadata for the collection type
|
||||
* @param factory for the region
|
||||
* @param cacheKeysFactory factory for cache keys
|
||||
*/
|
||||
public CollectionRegionImpl(
|
||||
AdvancedCache cache, String name, TransactionManager transactionManager,
|
||||
CacheDataDescription metadata, InfinispanRegionFactory factory, CacheKeysFactory cacheKeysFactory) {
|
||||
super( cache, name, transactionManager, metadata, factory, cacheKeysFactory );
|
||||
}
|
||||
|
||||
@Override
|
||||
public CollectionRegionAccessStrategy buildAccessStrategy(AccessType accessType) throws CacheException {
|
||||
checkAccessType( accessType );
|
||||
AccessDelegate accessDelegate = createAccessDelegate(accessType);
|
||||
return new CollectionAccess( this, accessDelegate );
|
||||
}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Internal Infinispan-based implementation of the collection cache region
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.collection;
|
|
@ -1,57 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.entity;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.access.AccessDelegate;
|
||||
import org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.CacheKeysFactory;
|
||||
import org.hibernate.cache.spi.EntityRegion;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.cache.spi.access.EntityRegionAccessStrategy;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
/**
|
||||
* Entity region implementation
|
||||
*
|
||||
* @author Chris Bredesen
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class EntityRegionImpl extends BaseTransactionalDataRegion implements EntityRegion {
|
||||
/**
|
||||
* Construct a entity region
|
||||
* @param cache instance to store entity instances
|
||||
* @param name of entity type
|
||||
* @param transactionManager
|
||||
* @param metadata for the entity type
|
||||
* @param factory for the region
|
||||
* @param cacheKeysFactory factory for cache keys
|
||||
*/
|
||||
public EntityRegionImpl(
|
||||
AdvancedCache cache, String name, TransactionManager transactionManager,
|
||||
CacheDataDescription metadata, InfinispanRegionFactory factory, CacheKeysFactory cacheKeysFactory) {
|
||||
super( cache, name, transactionManager, metadata, factory, cacheKeysFactory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public EntityRegionAccessStrategy buildAccessStrategy(AccessType accessType) throws CacheException {
|
||||
checkAccessType(accessType);
|
||||
AccessDelegate accessDelegate = createAccessDelegate(accessType);
|
||||
if ( accessType == AccessType.READ_ONLY || !getCacheDataDescription().isMutable() ) {
|
||||
return new ReadOnlyAccess( this, accessDelegate );
|
||||
}
|
||||
else {
|
||||
return new ReadWriteAccess( this, accessDelegate );
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,114 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.entity;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.access.AccessDelegate;
|
||||
import org.hibernate.cache.spi.EntityRegion;
|
||||
import org.hibernate.cache.spi.access.EntityRegionAccessStrategy;
|
||||
import org.hibernate.cache.spi.access.SoftLock;
|
||||
import org.hibernate.engine.spi.SessionFactoryImplementor;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
import org.hibernate.persister.entity.EntityPersister;
|
||||
|
||||
/**
|
||||
* A specialization of {@link ReadWriteAccess} that ensures we never update data.
|
||||
*
|
||||
* @author Chris Bredesen
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
class ReadOnlyAccess implements EntityRegionAccessStrategy {
|
||||
|
||||
protected final EntityRegionImpl region;
|
||||
protected final AccessDelegate delegate;
|
||||
|
||||
ReadOnlyAccess(EntityRegionImpl region, AccessDelegate delegate) {
|
||||
this.region = region;
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
public void evict(Object key) throws CacheException {
|
||||
delegate.evict( key );
|
||||
}
|
||||
|
||||
public void evictAll() throws CacheException {
|
||||
delegate.evictAll();
|
||||
}
|
||||
|
||||
public Object get(SharedSessionContractImplementor session, Object key, long txTimestamp) throws CacheException {
|
||||
return delegate.get( session, key, txTimestamp );
|
||||
}
|
||||
|
||||
public EntityRegion getRegion() {
|
||||
return this.region;
|
||||
}
|
||||
|
||||
public boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version) throws CacheException {
|
||||
return delegate.putFromLoad( session, key, value, txTimestamp, version );
|
||||
}
|
||||
|
||||
public boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version, boolean minimalPutOverride)
|
||||
throws CacheException {
|
||||
return delegate.putFromLoad( session, key, value, txTimestamp, version, minimalPutOverride );
|
||||
}
|
||||
|
||||
public void remove(SharedSessionContractImplementor session, Object key) throws CacheException {
|
||||
delegate.remove ( session, key );
|
||||
}
|
||||
|
||||
public void removeAll() throws CacheException {
|
||||
delegate.removeAll();
|
||||
}
|
||||
|
||||
public boolean insert(SharedSessionContractImplementor session, Object key, Object value, Object version) throws CacheException {
|
||||
return delegate.insert( session, key, value, version );
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean update(
|
||||
SharedSessionContractImplementor session, Object key, Object value, Object currentVersion,
|
||||
Object previousVersion) throws CacheException {
|
||||
throw new UnsupportedOperationException( "Illegal attempt to edit read only item" );
|
||||
}
|
||||
|
||||
public SoftLock lockItem(SharedSessionContractImplementor session, Object key, Object version) throws CacheException {
|
||||
return null;
|
||||
}
|
||||
|
||||
public SoftLock lockRegion() throws CacheException {
|
||||
return null;
|
||||
}
|
||||
|
||||
public void unlockItem(SharedSessionContractImplementor session, Object key, SoftLock lock) throws CacheException {
|
||||
delegate.unlockItem( session, key );
|
||||
}
|
||||
|
||||
public void unlockRegion(SoftLock lock) throws CacheException {
|
||||
}
|
||||
|
||||
public boolean afterInsert(SharedSessionContractImplementor session, Object key, Object value, Object version) throws CacheException {
|
||||
return delegate.afterInsert( session, key, value, version );
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean afterUpdate(
|
||||
SharedSessionContractImplementor session, Object key, Object value, Object currentVersion,
|
||||
Object previousVersion, SoftLock lock) throws CacheException {
|
||||
throw new UnsupportedOperationException( "Illegal attempt to edit read only item" );
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object generateCacheKey(Object id, EntityPersister persister, SessionFactoryImplementor factory, String tenantIdentifier) {
|
||||
return region.getCacheKeysFactory().createEntityKey(id, persister, factory, tenantIdentifier);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getCacheKeyId(Object cacheKey) {
|
||||
return region.getCacheKeysFactory().getEntityId(cacheKey);
|
||||
}
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.entity;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.access.AccessDelegate;
|
||||
import org.hibernate.cache.spi.access.SoftLock;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
|
||||
/**
|
||||
* Read-write or transactional entity region access for Infinispan.
|
||||
*
|
||||
* @author Chris Bredesen
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
class ReadWriteAccess extends ReadOnlyAccess {
|
||||
|
||||
ReadWriteAccess(EntityRegionImpl region, AccessDelegate delegate) {
|
||||
super(region, delegate);
|
||||
}
|
||||
|
||||
public boolean update(SharedSessionContractImplementor session, Object key, Object value, Object currentVersion, Object previousVersion)
|
||||
throws CacheException {
|
||||
return delegate.update( session, key, value, currentVersion, previousVersion );
|
||||
}
|
||||
|
||||
public boolean afterUpdate(SharedSessionContractImplementor session, Object key, Object value, Object currentVersion, Object previousVersion, SoftLock lock)
|
||||
throws CacheException {
|
||||
return delegate.afterUpdate( session, key, value, currentVersion, previousVersion, lock );
|
||||
}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Internal Infinispan-based implementation of the entity cache region
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.entity;
|
|
@ -1,63 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.impl;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.cache.spi.GeneralDataRegion;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
/**
|
||||
* Support for Infinispan {@link GeneralDataRegion} implementors.
|
||||
*
|
||||
* @author Chris Bredesen
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public abstract class BaseGeneralDataRegion extends BaseRegion implements GeneralDataRegion {
|
||||
private final AdvancedCache putCache;
|
||||
|
||||
/**
|
||||
* General data region constructor.
|
||||
*
|
||||
* @param cache instance for the region
|
||||
* @param name of the region
|
||||
* @param factory for this region
|
||||
*/
|
||||
public BaseGeneralDataRegion(
|
||||
AdvancedCache cache, String name,
|
||||
InfinispanRegionFactory factory) {
|
||||
super( cache, name, null, factory );
|
||||
this.putCache = Caches.ignoreReturnValuesCache( cache );
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void evict(Object key) throws CacheException {
|
||||
cache.evict( key );
|
||||
}
|
||||
|
||||
@Override
|
||||
public void evictAll() throws CacheException {
|
||||
cache.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object get(SharedSessionContractImplementor session, Object key) throws CacheException {
|
||||
return cache.get( key );
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void put(SharedSessionContractImplementor session, Object key, Object value) throws CacheException {
|
||||
putCache.put( key, value );
|
||||
}
|
||||
|
||||
}
|
|
@ -1,258 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.impl;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import javax.transaction.SystemException;
|
||||
import javax.transaction.Transaction;
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.hibernate.cache.spi.Region;
|
||||
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.context.Flag;
|
||||
|
||||
/**
|
||||
* Support for Infinispan {@link Region}s. Handles common "utility" methods for an underlying named
|
||||
* Cache. In other words, this implementation doesn't actually read or write data. Subclasses are
|
||||
* expected to provide core cache interaction appropriate to the semantics needed.
|
||||
*
|
||||
* @author Chris Bredesen
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public abstract class BaseRegion implements Region {
|
||||
|
||||
private static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog( BaseRegion.class );
|
||||
|
||||
protected final String name;
|
||||
protected final AdvancedCache cache;
|
||||
protected final AdvancedCache localAndSkipLoadCache;
|
||||
protected final TransactionManager tm;
|
||||
protected final InfinispanRegionFactory factory;
|
||||
|
||||
protected volatile long lastRegionInvalidation = Long.MIN_VALUE;
|
||||
protected int invalidations = 0;
|
||||
|
||||
/**
|
||||
* Base region constructor.
|
||||
*
|
||||
* @param cache instance for the region
|
||||
* @param name of the region
|
||||
* @param transactionManager transaction manager may be needed even for non-transactional caches.
|
||||
* @param factory for this region
|
||||
*/
|
||||
public BaseRegion(AdvancedCache cache, String name, TransactionManager transactionManager, InfinispanRegionFactory factory) {
|
||||
this.cache = cache;
|
||||
this.name = name;
|
||||
this.tm = transactionManager;
|
||||
this.factory = factory;
|
||||
this.localAndSkipLoadCache = cache.withFlags(
|
||||
Flag.CACHE_MODE_LOCAL, Flag.ZERO_LOCK_ACQUISITION_TIMEOUT,
|
||||
Flag.SKIP_CACHE_LOAD
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getElementCountInMemory() {
|
||||
if ( checkValid() ) {
|
||||
return localAndSkipLoadCache.size();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* <p/>
|
||||
* Not supported; returns -1
|
||||
*/
|
||||
@Override
|
||||
public long getElementCountOnDisk() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* <p/>
|
||||
* Not supported; returns -1
|
||||
*/
|
||||
@Override
|
||||
public long getSizeInMemory() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getTimeout() {
|
||||
// 60 seconds
|
||||
return 60000;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextTimestamp() {
|
||||
return factory.nextTimestamp();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map toMap() {
|
||||
if ( checkValid() ) {
|
||||
return cache;
|
||||
}
|
||||
|
||||
return Collections.EMPTY_MAP;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() throws CacheException {
|
||||
cache.stop();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean contains(Object key) {
|
||||
return checkValid() && cache.containsKey( key );
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the region is valid for operations such as storing new data
|
||||
* in the region, or retrieving data from the region.
|
||||
*
|
||||
* @return true if the region is valid, false otherwise
|
||||
*/
|
||||
public boolean checkValid() {
|
||||
return lastRegionInvalidation != Long.MAX_VALUE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tell the TransactionManager to suspend any ongoing transaction.
|
||||
*
|
||||
* @return the transaction that was suspended, or <code>null</code> if
|
||||
* there wasn't one
|
||||
*/
|
||||
public Transaction suspend() {
|
||||
Transaction tx = null;
|
||||
try {
|
||||
if ( tm != null ) {
|
||||
tx = tm.suspend();
|
||||
}
|
||||
}
|
||||
catch (SystemException se) {
|
||||
throw log.cannotSuspendTx(se);
|
||||
}
|
||||
return tx;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tell the TransactionManager to resume the given transaction
|
||||
*
|
||||
* @param tx the transaction to suspend. May be <code>null</code>.
|
||||
*/
|
||||
public void resume(Transaction tx) {
|
||||
try {
|
||||
if ( tx != null ) {
|
||||
tm.resume( tx );
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw log.cannotResumeTx( e );
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Invalidates the region.
|
||||
*/
|
||||
public void invalidateRegion() {
|
||||
// this is called only from EvictAllCommand, we don't have any ongoing transaction
|
||||
beginInvalidation();
|
||||
endInvalidation();
|
||||
}
|
||||
|
||||
public void beginInvalidation() {
|
||||
if (log.isTraceEnabled()) {
|
||||
log.trace( "Begin invalidating region: " + name );
|
||||
}
|
||||
synchronized (this) {
|
||||
lastRegionInvalidation = Long.MAX_VALUE;
|
||||
++invalidations;
|
||||
}
|
||||
runInvalidation(getCurrentTransaction() != null);
|
||||
}
|
||||
|
||||
public void endInvalidation() {
|
||||
synchronized (this) {
|
||||
if (--invalidations == 0) {
|
||||
lastRegionInvalidation = nextTimestamp();
|
||||
}
|
||||
}
|
||||
if (log.isTraceEnabled()) {
|
||||
log.trace( "End invalidating region: " + name );
|
||||
}
|
||||
}
|
||||
|
||||
public TransactionManager getTransactionManager() {
|
||||
return tm;
|
||||
}
|
||||
|
||||
// Used to satisfy TransactionalDataRegion.isTransactionAware in subclasses
|
||||
@SuppressWarnings("unused")
|
||||
public boolean isTransactionAware() {
|
||||
return tm != null;
|
||||
}
|
||||
|
||||
public AdvancedCache getCache() {
|
||||
return cache;
|
||||
}
|
||||
|
||||
protected Transaction getCurrentTransaction() {
|
||||
try {
|
||||
// Transaction manager could be null
|
||||
return tm != null ? tm.getTransaction() : null;
|
||||
}
|
||||
catch (SystemException e) {
|
||||
throw log.cannotGetCurrentTx(e);
|
||||
}
|
||||
}
|
||||
|
||||
protected void checkAccessType(AccessType accessType) {
|
||||
if (accessType == AccessType.TRANSACTIONAL && !cache.getCacheConfiguration().transaction().transactionMode().isTransactional()) {
|
||||
log.transactionalStrategyNonTransactionalCache();
|
||||
}
|
||||
else if (accessType == AccessType.READ_WRITE && cache.getCacheConfiguration().transaction().transactionMode().isTransactional()) {
|
||||
log.readWriteStrategyTransactionalCache();
|
||||
}
|
||||
}
|
||||
|
||||
protected void runInvalidation(boolean inTransaction) {
|
||||
// If we're running inside a transaction, we need to remove elements one-by-one
|
||||
// to clean the context as well (cache.clear() does not do that).
|
||||
// When we don't have transaction, we can do a clear operation (since we don't
|
||||
// case about context) and can't do the one-by-one remove: remove() on tx cache
|
||||
// requires transactional context.
|
||||
if ( inTransaction && cache.getCacheConfiguration().transaction().transactionMode().isTransactional() ) {
|
||||
log.tracef( "Transaction, clearing one element at the time" );
|
||||
Caches.removeAll( localAndSkipLoadCache );
|
||||
}
|
||||
else {
|
||||
log.tracef( "Non-transactional, clear in one go" );
|
||||
localAndSkipLoadCache.clear();
|
||||
}
|
||||
}
|
||||
|
||||
public InfinispanRegionFactory getRegionFactory() {
|
||||
return factory;
|
||||
}
|
||||
}
|
|
@ -1,355 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.impl;
|
||||
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.access.AccessDelegate;
|
||||
import org.hibernate.cache.infinispan.access.LockingInterceptor;
|
||||
import org.hibernate.cache.infinispan.access.NonStrictAccessDelegate;
|
||||
import org.hibernate.cache.infinispan.access.NonTxInvalidationCacheAccessDelegate;
|
||||
import org.hibernate.cache.infinispan.access.PutFromLoadValidator;
|
||||
import org.hibernate.cache.infinispan.access.TombstoneAccessDelegate;
|
||||
import org.hibernate.cache.infinispan.access.TombstoneCallInterceptor;
|
||||
import org.hibernate.cache.infinispan.access.TxInvalidationCacheAccessDelegate;
|
||||
import org.hibernate.cache.infinispan.access.UnorderedDistributionInterceptor;
|
||||
import org.hibernate.cache.infinispan.access.VersionedCallInterceptor;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.cache.infinispan.util.FutureUpdate;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.hibernate.cache.infinispan.util.Tombstone;
|
||||
import org.hibernate.cache.infinispan.util.VersionedEntry;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.CacheKeysFactory;
|
||||
import org.hibernate.cache.spi.TransactionalDataRegion;
|
||||
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.commons.util.CloseableIterator;
|
||||
import org.infinispan.configuration.cache.CacheMode;
|
||||
import org.infinispan.configuration.cache.Configuration;
|
||||
import org.infinispan.container.entries.CacheEntry;
|
||||
import org.infinispan.expiration.ExpirationManager;
|
||||
import org.infinispan.expiration.impl.ClusterExpirationManager;
|
||||
import org.infinispan.expiration.impl.ExpirationManagerImpl;
|
||||
import org.infinispan.filter.KeyValueFilter;
|
||||
import org.infinispan.interceptors.CallInterceptor;
|
||||
import org.infinispan.interceptors.EntryWrappingInterceptor;
|
||||
import org.infinispan.interceptors.base.CommandInterceptor;
|
||||
import org.infinispan.interceptors.distribution.NonTxDistributionInterceptor;
|
||||
import org.infinispan.interceptors.locking.NonTransactionalLockingInterceptor;
|
||||
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Support for Inifinispan {@link org.hibernate.cache.spi.TransactionalDataRegion} implementors.
|
||||
*
|
||||
* @author Chris Bredesen
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public abstract class BaseTransactionalDataRegion
|
||||
extends BaseRegion implements TransactionalDataRegion {
|
||||
private static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog( BaseTransactionalDataRegion.class );
|
||||
private final CacheDataDescription metadata;
|
||||
private final CacheKeysFactory cacheKeysFactory;
|
||||
private final boolean requiresTransaction;
|
||||
|
||||
private long tombstoneExpiration;
|
||||
private PutFromLoadValidator validator;
|
||||
|
||||
private AccessType accessType;
|
||||
private Strategy strategy;
|
||||
|
||||
protected enum Strategy {
|
||||
NONE, VALIDATION, TOMBSTONES, VERSIONED_ENTRIES
|
||||
}
|
||||
|
||||
/**
|
||||
* Base transactional region constructor
|
||||
* @param cache instance to store transactional data
|
||||
* @param name of the transactional region
|
||||
* @param transactionManager
|
||||
* @param metadata for the transactional region
|
||||
* @param factory for the transactional region
|
||||
* @param cacheKeysFactory factory for cache keys
|
||||
*/
|
||||
public BaseTransactionalDataRegion(
|
||||
AdvancedCache cache, String name, TransactionManager transactionManager,
|
||||
CacheDataDescription metadata, InfinispanRegionFactory factory, CacheKeysFactory cacheKeysFactory) {
|
||||
super( cache, name, transactionManager, factory);
|
||||
this.metadata = metadata;
|
||||
this.cacheKeysFactory = cacheKeysFactory;
|
||||
|
||||
Configuration configuration = cache.getCacheConfiguration();
|
||||
requiresTransaction = configuration.transaction().transactionMode().isTransactional()
|
||||
&& !configuration.transaction().autoCommit();
|
||||
tombstoneExpiration = factory.getPendingPutsCacheConfiguration().expiration().maxIdle();
|
||||
if (!isRegionAccessStrategyEnabled()) {
|
||||
strategy = Strategy.NONE;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return True if this region is accessed through RegionAccessStrategy, false if it is accessed directly.
|
||||
*/
|
||||
protected boolean isRegionAccessStrategyEnabled() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CacheDataDescription getCacheDataDescription() {
|
||||
return metadata;
|
||||
}
|
||||
|
||||
public CacheKeysFactory getCacheKeysFactory() {
|
||||
return cacheKeysFactory;
|
||||
}
|
||||
|
||||
protected synchronized AccessDelegate createAccessDelegate(AccessType accessType) {
|
||||
if (accessType == null) {
|
||||
throw new IllegalArgumentException();
|
||||
}
|
||||
if (this.accessType != null && !this.accessType.equals(accessType)) {
|
||||
throw new IllegalStateException("This region was already set up for " + this.accessType + ", cannot use using " + accessType);
|
||||
}
|
||||
this.accessType = accessType;
|
||||
|
||||
CacheMode cacheMode = cache.getCacheConfiguration().clustering().cacheMode();
|
||||
if (accessType == AccessType.NONSTRICT_READ_WRITE) {
|
||||
prepareForVersionedEntries();
|
||||
return new NonStrictAccessDelegate(this);
|
||||
}
|
||||
if (cacheMode.isDistributed() || cacheMode.isReplicated()) {
|
||||
prepareForTombstones();
|
||||
return new TombstoneAccessDelegate(this);
|
||||
}
|
||||
else {
|
||||
prepareForValidation();
|
||||
if (cache.getCacheConfiguration().transaction().transactionMode().isTransactional()) {
|
||||
return new TxInvalidationCacheAccessDelegate(this, validator);
|
||||
}
|
||||
else {
|
||||
return new NonTxInvalidationCacheAccessDelegate(this, validator);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void prepareForValidation() {
|
||||
if (strategy != null) {
|
||||
assert strategy == Strategy.VALIDATION;
|
||||
return;
|
||||
}
|
||||
validator = new PutFromLoadValidator(cache, factory);
|
||||
strategy = Strategy.VALIDATION;
|
||||
}
|
||||
|
||||
protected void prepareForVersionedEntries() {
|
||||
if (strategy != null) {
|
||||
assert strategy == Strategy.VERSIONED_ENTRIES;
|
||||
return;
|
||||
}
|
||||
|
||||
replaceCommonInterceptors();
|
||||
replaceExpirationManager();
|
||||
|
||||
cache.removeInterceptor(CallInterceptor.class);
|
||||
VersionedCallInterceptor tombstoneCallInterceptor = new VersionedCallInterceptor(this, metadata.getVersionComparator());
|
||||
cache.getComponentRegistry().registerComponent(tombstoneCallInterceptor, VersionedCallInterceptor.class);
|
||||
List<CommandInterceptor> interceptorChain = cache.getInterceptorChain();
|
||||
cache.addInterceptor(tombstoneCallInterceptor, interceptorChain.size());
|
||||
|
||||
strategy = Strategy.VERSIONED_ENTRIES;
|
||||
}
|
||||
|
||||
private void prepareForTombstones() {
|
||||
if (strategy != null) {
|
||||
assert strategy == Strategy.TOMBSTONES;
|
||||
return;
|
||||
}
|
||||
Configuration configuration = cache.getCacheConfiguration();
|
||||
if (configuration.eviction().maxEntries() >= 0) {
|
||||
log.evictionWithTombstones();
|
||||
}
|
||||
|
||||
replaceCommonInterceptors();
|
||||
replaceExpirationManager();
|
||||
|
||||
cache.removeInterceptor(CallInterceptor.class);
|
||||
TombstoneCallInterceptor tombstoneCallInterceptor = new TombstoneCallInterceptor(this);
|
||||
cache.getComponentRegistry().registerComponent(tombstoneCallInterceptor, TombstoneCallInterceptor.class);
|
||||
List<CommandInterceptor> interceptorChain = cache.getInterceptorChain();
|
||||
cache.addInterceptor(tombstoneCallInterceptor, interceptorChain.size());
|
||||
|
||||
strategy = Strategy.TOMBSTONES;
|
||||
}
|
||||
|
||||
private void replaceCommonInterceptors() {
|
||||
CacheMode cacheMode = cache.getCacheConfiguration().clustering().cacheMode();
|
||||
if (!cacheMode.isReplicated() && !cacheMode.isDistributed()) {
|
||||
return;
|
||||
}
|
||||
|
||||
LockingInterceptor lockingInterceptor = new LockingInterceptor();
|
||||
cache.getComponentRegistry().registerComponent(lockingInterceptor, LockingInterceptor.class);
|
||||
if (!cache.addInterceptorBefore(lockingInterceptor, NonTransactionalLockingInterceptor.class)) {
|
||||
throw new IllegalStateException("Misconfigured cache, interceptor chain is " + cache.getInterceptorChain());
|
||||
}
|
||||
cache.removeInterceptor(NonTransactionalLockingInterceptor.class);
|
||||
|
||||
UnorderedDistributionInterceptor distributionInterceptor = new UnorderedDistributionInterceptor();
|
||||
cache.getComponentRegistry().registerComponent(distributionInterceptor, UnorderedDistributionInterceptor.class);
|
||||
if (!cache.addInterceptorBefore(distributionInterceptor, NonTxDistributionInterceptor.class)) {
|
||||
throw new IllegalStateException("Misconfigured cache, interceptor chain is " + cache.getInterceptorChain());
|
||||
}
|
||||
cache.removeInterceptor(NonTxDistributionInterceptor.class);
|
||||
|
||||
EntryWrappingInterceptor ewi = cache.getComponentRegistry().getComponent(EntryWrappingInterceptor.class);
|
||||
try {
|
||||
Field isUsingLockDelegation = EntryWrappingInterceptor.class.getDeclaredField("isUsingLockDelegation");
|
||||
isUsingLockDelegation.setAccessible(true);
|
||||
isUsingLockDelegation.set(ewi, false);
|
||||
}
|
||||
catch (NoSuchFieldException | IllegalAccessException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private void replaceExpirationManager() {
|
||||
// ClusteredExpirationManager sends RemoteExpirationCommands to remote nodes which causes
|
||||
// undesired overhead. When get() triggers a RemoteExpirationCommand executed in async executor
|
||||
// this locks the entry for the duration of RPC, and putFromLoad with ZERO_LOCK_ACQUISITION_TIMEOUT
|
||||
// fails as it finds the entry being blocked.
|
||||
ExpirationManager expirationManager = cache.getComponentRegistry().getComponent(ExpirationManager.class);
|
||||
if ((expirationManager instanceof ClusterExpirationManager)) {
|
||||
// re-registering component does not stop the old one
|
||||
((ClusterExpirationManager) expirationManager).stop();
|
||||
cache.getComponentRegistry().registerComponent(new ExpirationManagerImpl<>(), ExpirationManager.class);
|
||||
cache.getComponentRegistry().rewire();
|
||||
}
|
||||
else if (expirationManager instanceof ExpirationManagerImpl) {
|
||||
// do nothing
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("Expected clustered expiration manager, found " + expirationManager);
|
||||
}
|
||||
}
|
||||
|
||||
public long getTombstoneExpiration() {
|
||||
return tombstoneExpiration;
|
||||
}
|
||||
|
||||
public long getLastRegionInvalidation() {
|
||||
return lastRegionInvalidation;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void runInvalidation(boolean inTransaction) {
|
||||
if (strategy == null) {
|
||||
throw new IllegalStateException("Strategy was not set");
|
||||
}
|
||||
switch (strategy) {
|
||||
case NONE:
|
||||
case VALIDATION:
|
||||
super.runInvalidation(inTransaction);
|
||||
return;
|
||||
case TOMBSTONES:
|
||||
removeEntries(inTransaction, Tombstone.EXCLUDE_TOMBSTONES);
|
||||
return;
|
||||
case VERSIONED_ENTRIES:
|
||||
removeEntries(inTransaction, VersionedEntry.EXCLUDE_EMPTY_EXTRACT_VALUE);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
private void removeEntries(boolean inTransaction, KeyValueFilter filter) {
|
||||
// If the transaction is required, we simply need it -> will create our own
|
||||
boolean startedTx = false;
|
||||
if ( !inTransaction && requiresTransaction) {
|
||||
try {
|
||||
tm.begin();
|
||||
startedTx = true;
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
// We can never use cache.clear() since tombstones must be kept.
|
||||
try {
|
||||
AdvancedCache localCache = Caches.localCache(cache);
|
||||
CloseableIterator<CacheEntry> it = Caches.entrySet(localCache, Tombstone.EXCLUDE_TOMBSTONES).iterator();
|
||||
long now = nextTimestamp();
|
||||
try {
|
||||
while (it.hasNext()) {
|
||||
// Cannot use it.next(); it.remove() due to ISPN-5653
|
||||
CacheEntry entry = it.next();
|
||||
switch (strategy) {
|
||||
case TOMBSTONES:
|
||||
localCache.remove(entry.getKey(), entry.getValue());
|
||||
break;
|
||||
case VERSIONED_ENTRIES:
|
||||
localCache.put(entry.getKey(), new VersionedEntry(null, null, now), tombstoneExpiration, TimeUnit.MILLISECONDS);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
finally {
|
||||
it.close();
|
||||
}
|
||||
}
|
||||
finally {
|
||||
if (startedTx) {
|
||||
try {
|
||||
tm.commit();
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map toMap() {
|
||||
if (strategy == null) {
|
||||
throw new IllegalStateException("Strategy was not set");
|
||||
}
|
||||
switch (strategy) {
|
||||
case NONE:
|
||||
case VALIDATION:
|
||||
return super.toMap();
|
||||
case TOMBSTONES:
|
||||
return Caches.entrySet(Caches.localCache(cache), Tombstone.EXCLUDE_TOMBSTONES).toMap();
|
||||
case VERSIONED_ENTRIES:
|
||||
return Caches.entrySet(Caches.localCache(cache), VersionedEntry.EXCLUDE_EMPTY_EXTRACT_VALUE, VersionedEntry.EXCLUDE_EMPTY_EXTRACT_VALUE).toMap();
|
||||
default:
|
||||
throw new IllegalStateException(strategy.toString());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean contains(Object key) {
|
||||
if (!checkValid()) {
|
||||
return false;
|
||||
}
|
||||
Object value = cache.get(key);
|
||||
if (value instanceof Tombstone) {
|
||||
return false;
|
||||
}
|
||||
if (value instanceof FutureUpdate) {
|
||||
return ((FutureUpdate) value).getValue() != null;
|
||||
}
|
||||
if (value instanceof VersionedEntry) {
|
||||
return ((VersionedEntry) value).getValue() != null;
|
||||
}
|
||||
return value != null;
|
||||
}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Internal Infinispan-specific base cache region implementations
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.impl;
|
|
@ -1,57 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.naturalid;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.access.AccessDelegate;
|
||||
import org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.CacheKeysFactory;
|
||||
import org.hibernate.cache.spi.NaturalIdRegion;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.cache.spi.access.NaturalIdRegionAccessStrategy;
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
/**
|
||||
* Natural ID cache region
|
||||
*
|
||||
* @author Strong Liu <stliu@hibernate.org>
|
||||
* @author Galder Zamarreño
|
||||
*/
|
||||
public class NaturalIdRegionImpl extends BaseTransactionalDataRegion
|
||||
implements NaturalIdRegion {
|
||||
|
||||
/**
|
||||
* Constructor for the natural id region.
|
||||
* @param cache instance to store natural ids
|
||||
* @param name of natural id region
|
||||
* @param transactionManager
|
||||
* @param metadata for the natural id region
|
||||
* @param factory for the natural id region
|
||||
* @param cacheKeysFactory factory for cache keys
|
||||
*/
|
||||
public NaturalIdRegionImpl(
|
||||
AdvancedCache cache, String name, TransactionManager transactionManager,
|
||||
CacheDataDescription metadata, InfinispanRegionFactory factory, CacheKeysFactory cacheKeysFactory) {
|
||||
super( cache, name, transactionManager, metadata, factory, cacheKeysFactory );
|
||||
}
|
||||
|
||||
@Override
|
||||
public NaturalIdRegionAccessStrategy buildAccessStrategy(AccessType accessType) throws CacheException {
|
||||
checkAccessType( accessType );
|
||||
AccessDelegate accessDelegate = createAccessDelegate(accessType);
|
||||
if ( accessType == AccessType.READ_ONLY || !getCacheDataDescription().isMutable() ) {
|
||||
return new ReadOnlyAccess( this, accessDelegate );
|
||||
}
|
||||
else {
|
||||
return new ReadWriteAccess( this, accessDelegate );
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,119 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.naturalid;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.access.AccessDelegate;
|
||||
import org.hibernate.cache.spi.NaturalIdRegion;
|
||||
import org.hibernate.cache.spi.access.NaturalIdRegionAccessStrategy;
|
||||
import org.hibernate.cache.spi.access.SoftLock;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
import org.hibernate.persister.entity.EntityPersister;
|
||||
|
||||
/**
|
||||
* @author Strong Liu <stliu@hibernate.org>
|
||||
*/
|
||||
class ReadOnlyAccess implements NaturalIdRegionAccessStrategy {
|
||||
|
||||
protected final NaturalIdRegionImpl region;
|
||||
protected final AccessDelegate delegate;
|
||||
|
||||
ReadOnlyAccess(NaturalIdRegionImpl region, AccessDelegate delegate) {
|
||||
this.region = region;
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean insert(SharedSessionContractImplementor session, Object key, Object value) throws CacheException {
|
||||
return delegate.insert( session, key, value, null );
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean update(SharedSessionContractImplementor session, Object key, Object value) throws CacheException {
|
||||
throw new UnsupportedOperationException( "Illegal attempt to edit read only item" );
|
||||
}
|
||||
|
||||
@Override
|
||||
public NaturalIdRegion getRegion() {
|
||||
return region;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void evict(Object key) throws CacheException {
|
||||
delegate.evict( key );
|
||||
}
|
||||
|
||||
@Override
|
||||
public void evictAll() throws CacheException {
|
||||
delegate.evictAll();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object get(SharedSessionContractImplementor session, Object key, long txTimestamp) throws CacheException {
|
||||
return delegate.get( session, key, txTimestamp );
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version) throws CacheException {
|
||||
return delegate.putFromLoad( session, key, value, txTimestamp, version );
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version, boolean minimalPutOverride)
|
||||
throws CacheException {
|
||||
return delegate.putFromLoad( session, key, value, txTimestamp, version, minimalPutOverride );
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(SharedSessionContractImplementor session, Object key) throws CacheException {
|
||||
delegate.remove( session, key );
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAll() throws CacheException {
|
||||
delegate.removeAll();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SoftLock lockItem(SharedSessionContractImplementor session, Object key, Object version) throws CacheException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SoftLock lockRegion() throws CacheException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unlockItem(SharedSessionContractImplementor session, Object key, SoftLock lock) throws CacheException {
|
||||
delegate.unlockItem( session, key );
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unlockRegion(SoftLock lock) throws CacheException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean afterInsert(SharedSessionContractImplementor session, Object key, Object value) throws CacheException {
|
||||
return delegate.afterInsert( session, key, value, null );
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean afterUpdate(SharedSessionContractImplementor session, Object key, Object value, SoftLock lock) throws CacheException {
|
||||
throw new UnsupportedOperationException( "Illegal attempt to edit read only item" );
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object generateCacheKey(Object[] naturalIdValues, EntityPersister persister, SharedSessionContractImplementor session) {
|
||||
return region.getCacheKeysFactory().createNaturalIdKey(naturalIdValues, persister, session);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object[] getNaturalIdValues(Object cacheKey) {
|
||||
return region.getCacheKeysFactory().getNaturalIdValues(cacheKey);
|
||||
}
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.naturalid;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.access.AccessDelegate;
|
||||
import org.hibernate.cache.spi.access.SoftLock;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
|
||||
/**
|
||||
* @author Strong Liu <stliu@hibernate.org>
|
||||
*/
|
||||
class ReadWriteAccess extends ReadOnlyAccess {
|
||||
|
||||
ReadWriteAccess(NaturalIdRegionImpl region, AccessDelegate delegate) {
|
||||
super(region, delegate);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean update(SharedSessionContractImplementor session, Object key, Object value) throws CacheException {
|
||||
return delegate.update( session, key, value, null, null );
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean afterUpdate(SharedSessionContractImplementor session, Object key, Object value, SoftLock lock) throws CacheException {
|
||||
return delegate.afterUpdate( session, key, value, null, null, lock );
|
||||
}
|
||||
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Internal Infinispan-based implementation of the natural-id cache region
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.naturalid;
|
|
@ -1,11 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Defines the integration with Infinispan as a second-level cache service.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan;
|
|
@ -1,191 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.query;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import javax.transaction.Transaction;
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.impl.BaseTransactionalDataRegion;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.hibernate.cache.infinispan.util.InvocationAfterCompletion;
|
||||
import org.hibernate.cache.spi.QueryResultsRegion;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
import org.hibernate.resource.transaction.spi.TransactionCoordinator;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.configuration.cache.TransactionConfiguration;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.transaction.TransactionMode;
|
||||
|
||||
/**
|
||||
* Region for caching query results.
|
||||
*
|
||||
* @author Chris Bredesen
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class QueryResultsRegionImpl extends BaseTransactionalDataRegion implements QueryResultsRegion {
|
||||
private static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog( QueryResultsRegionImpl.class );
|
||||
|
||||
private final AdvancedCache evictCache;
|
||||
private final AdvancedCache putCache;
|
||||
private final AdvancedCache getCache;
|
||||
private final ConcurrentMap<SharedSessionContractImplementor, Map> transactionContext = new ConcurrentHashMap<SharedSessionContractImplementor, Map>();
|
||||
private final boolean putCacheRequiresTransaction;
|
||||
|
||||
/**
|
||||
* Query region constructor
|
||||
* @param cache instance to store queries
|
||||
* @param name of the query region
|
||||
* @param factory for the query region
|
||||
*/
|
||||
public QueryResultsRegionImpl(AdvancedCache cache, String name, TransactionManager transactionManager, InfinispanRegionFactory factory) {
|
||||
super( cache, name, transactionManager, null, factory, null );
|
||||
// If Infinispan is using INVALIDATION for query cache, we don't want to propagate changes.
|
||||
// We use the Timestamps cache to manage invalidation
|
||||
final boolean localOnly = Caches.isInvalidationCache( cache );
|
||||
|
||||
this.evictCache = localOnly ? Caches.localCache( cache ) : cache;
|
||||
|
||||
this.putCache = localOnly ?
|
||||
Caches.failSilentWriteCache( cache, Flag.CACHE_MODE_LOCAL ) :
|
||||
Caches.failSilentWriteCache( cache );
|
||||
|
||||
this.getCache = Caches.failSilentReadCache( cache );
|
||||
|
||||
TransactionConfiguration transactionConfiguration = putCache.getCacheConfiguration().transaction();
|
||||
boolean transactional = transactionConfiguration.transactionMode() != TransactionMode.NON_TRANSACTIONAL;
|
||||
this.putCacheRequiresTransaction = transactional && !transactionConfiguration.autoCommit();
|
||||
// Since we execute the query update explicitly form transaction synchronization, the putCache does not need
|
||||
// to be transactional anymore (it had to be in the past to prevent revealing uncommitted changes).
|
||||
if (transactional) {
|
||||
log.useNonTransactionalQueryCache();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean isRegionAccessStrategyEnabled() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void evict(Object key) throws CacheException {
|
||||
for (Map map : transactionContext.values()) {
|
||||
map.remove(key);
|
||||
}
|
||||
evictCache.remove( key );
|
||||
}
|
||||
|
||||
@Override
|
||||
public void evictAll() throws CacheException {
|
||||
transactionContext.clear();
|
||||
final Transaction tx = suspend();
|
||||
try {
|
||||
// Invalidate the local region and then go remote
|
||||
invalidateRegion();
|
||||
Caches.broadcastEvictAll( cache );
|
||||
}
|
||||
finally {
|
||||
resume( tx );
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object get(SharedSessionContractImplementor session, Object key) throws CacheException {
|
||||
if ( !checkValid() ) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// In Infinispan get doesn't acquire any locks, so no need to suspend the tx.
|
||||
// In the past, when get operations acquired locks, suspending the tx was a way
|
||||
// to avoid holding locks that would prevent updates.
|
||||
// Add a zero (or low) timeout option so we don't block
|
||||
// waiting for tx's that did a put to commit
|
||||
Object result = null;
|
||||
Map map = transactionContext.get(session);
|
||||
if (map != null) {
|
||||
result = map.get(key);
|
||||
}
|
||||
if (result == null) {
|
||||
result = getCache.get( key );
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void put(SharedSessionContractImplementor session, Object key, Object value) throws CacheException {
|
||||
if ( checkValid() ) {
|
||||
// See HHH-7898: Even with FAIL_SILENTLY flag, failure to write in transaction
|
||||
// fails the whole transaction. It is an Infinispan quirk that cannot be fixed
|
||||
// ISPN-5356 tracks that. This is because if the transaction continued the
|
||||
// value could be committed on backup owners, including the failed operation,
|
||||
// and the result would not be consistent.
|
||||
TransactionCoordinator tc = session.getTransactionCoordinator();
|
||||
if (tc != null && tc.isJoined()) {
|
||||
tc.getLocalSynchronizations().registerSynchronization(new PostTransactionQueryUpdate(tc, session, key, value));
|
||||
// no need to synchronize as the transaction will be accessed by only one thread
|
||||
Map map = transactionContext.get(session);
|
||||
if (map == null) {
|
||||
transactionContext.put(session, map = new HashMap());
|
||||
}
|
||||
map.put(key, value);
|
||||
return;
|
||||
}
|
||||
// Here we don't want to suspend the tx. If we do:
|
||||
// 1) We might be caching query results that reflect uncommitted
|
||||
// changes. No tx == no WL on cache node, so other threads
|
||||
// can prematurely see those query results
|
||||
// 2) No tx == immediate replication. More overhead, plus we
|
||||
// spread issue #1 above around the cluster
|
||||
|
||||
// Add a zero (or quite low) timeout option so we don't block.
|
||||
// Ignore any TimeoutException. Basically we forego caching the
|
||||
// query result in order to avoid blocking.
|
||||
// Reads are done with suspended tx, so they should not hold the
|
||||
// lock for long. Not caching the query result is OK, since
|
||||
// any subsequent read will just see the old result with its
|
||||
// out-of-date timestamp; that result will be discarded and the
|
||||
// db query performed again.
|
||||
putCache.put( key, value );
|
||||
}
|
||||
}
|
||||
|
||||
private class PostTransactionQueryUpdate extends InvocationAfterCompletion {
|
||||
private final SharedSessionContractImplementor session;
|
||||
private final Object key;
|
||||
private final Object value;
|
||||
|
||||
public PostTransactionQueryUpdate(TransactionCoordinator tc, SharedSessionContractImplementor session, Object key, Object value) {
|
||||
super(tc, putCacheRequiresTransaction);
|
||||
this.session = session;
|
||||
this.key = key;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterCompletion(int status) {
|
||||
transactionContext.remove(session);
|
||||
super.afterCompletion(status);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void invoke(boolean success) {
|
||||
if (success) {
|
||||
putCache.put(key, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Internal Infinispan-based implementation of the "query results" cache region
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.query;
|
|
@ -1,165 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.timestamp;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import javax.transaction.Transaction;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.commons.util.CloseableIterator;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.notifications.Listener;
|
||||
import org.infinispan.notifications.cachelistener.annotation.CacheEntryModified;
|
||||
import org.infinispan.notifications.cachelistener.annotation.CacheEntryRemoved;
|
||||
import org.infinispan.notifications.cachelistener.event.CacheEntryModifiedEvent;
|
||||
import org.infinispan.notifications.cachelistener.event.CacheEntryRemovedEvent;
|
||||
|
||||
/**
|
||||
* Timestamp cache region for clustered environments.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 4.1
|
||||
*/
|
||||
@Listener
|
||||
public class ClusteredTimestampsRegionImpl extends TimestampsRegionImpl {
|
||||
|
||||
/**
|
||||
* Maintains a local (authoritative) cache of timestamps along with the
|
||||
* replicated cache held in Infinispan. It listens for changes in the
|
||||
* cache and updates the local cache accordingly. This approach allows
|
||||
* timestamp changes to be replicated asynchronously.
|
||||
*/
|
||||
private final Map localCache = new ConcurrentHashMap();
|
||||
|
||||
/**
|
||||
* Clustered timestamps region constructor.
|
||||
*
|
||||
* @param cache instance to store update timestamps
|
||||
* @param name of the update timestamps region
|
||||
* @param factory for the update timestamps region
|
||||
*/
|
||||
public ClusteredTimestampsRegionImpl(
|
||||
AdvancedCache cache,
|
||||
String name, InfinispanRegionFactory factory) {
|
||||
super( cache, name, factory );
|
||||
cache.addListener( this );
|
||||
populateLocalCache();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AdvancedCache getTimestampsPutCache(AdvancedCache cache) {
|
||||
return Caches.asyncWriteCache( cache, Flag.SKIP_LOCKING );
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public Object get(SharedSessionContractImplementor session, Object key) throws CacheException {
|
||||
Object value = localCache.get( key );
|
||||
|
||||
if ( value == null && checkValid() ) {
|
||||
value = cache.get( key );
|
||||
|
||||
if ( value != null ) {
|
||||
localCache.put( key, value );
|
||||
}
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(SharedSessionContractImplementor session, Object key, Object value) throws CacheException {
|
||||
updateLocalCache(key, value);
|
||||
super.put(session, key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void evictAll() throws CacheException {
|
||||
// TODO Is this a valid operation on a timestamps cache?
|
||||
final Transaction tx = suspend();
|
||||
try {
|
||||
// Invalidate the local region and then go remote
|
||||
invalidateRegion();
|
||||
Caches.broadcastEvictAll( cache );
|
||||
}
|
||||
finally {
|
||||
resume( tx );
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void invalidateRegion() {
|
||||
// Invalidate first
|
||||
super.invalidateRegion();
|
||||
localCache.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() throws CacheException {
|
||||
localCache.clear();
|
||||
cache.removeListener( this );
|
||||
super.destroy();
|
||||
}
|
||||
|
||||
/**
|
||||
* Brings all data from the distributed cache into our local cache.
|
||||
*/
|
||||
private void populateLocalCache() {
|
||||
CloseableIterator iterator = cache.keySet().iterator();
|
||||
try {
|
||||
while (iterator.hasNext()) {
|
||||
get(null, iterator.next());
|
||||
}
|
||||
}
|
||||
finally {
|
||||
iterator.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Monitors cache events and updates the local cache
|
||||
*
|
||||
* @param event The event
|
||||
*/
|
||||
@CacheEntryModified
|
||||
@SuppressWarnings({"unused", "unchecked"})
|
||||
public void nodeModified(CacheEntryModifiedEvent event) {
|
||||
if ( !event.isPre() ) {
|
||||
updateLocalCache( event.getKey(), event.getValue() );
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Monitors cache events and updates the local cache
|
||||
*
|
||||
* @param event The event
|
||||
*/
|
||||
@CacheEntryRemoved
|
||||
@SuppressWarnings("unused")
|
||||
public void nodeRemoved(CacheEntryRemovedEvent event) {
|
||||
if ( event.isPre() ) {
|
||||
return;
|
||||
}
|
||||
localCache.remove( event.getKey() );
|
||||
}
|
||||
|
||||
private void updateLocalCache(Object key, Object value) {
|
||||
localCache.compute(key, (k, v) -> {
|
||||
if (v instanceof Number && value instanceof Number) {
|
||||
return Math.max(((Number) v).longValue(), ((Number) value).longValue());
|
||||
}
|
||||
else {
|
||||
return value;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -1,105 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.timestamp;
|
||||
|
||||
import javax.transaction.Transaction;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.impl.BaseGeneralDataRegion;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.cache.spi.TimestampsRegion;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.context.Flag;
|
||||
|
||||
/**
|
||||
* Defines the behavior of the timestamps cache region for Infinispan.
|
||||
*
|
||||
* @author Chris Bredesen
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class TimestampsRegionImpl extends BaseGeneralDataRegion implements TimestampsRegion {
|
||||
|
||||
private final AdvancedCache removeCache;
|
||||
private final AdvancedCache timestampsPutCache;
|
||||
|
||||
/**
|
||||
* Local timestamps region constructor.
|
||||
*
|
||||
* @param cache instance to store update timestamps
|
||||
* @param name of the update timestamps region
|
||||
* @param factory for the update timestamps region
|
||||
*/
|
||||
public TimestampsRegionImpl(
|
||||
AdvancedCache cache, String name,
|
||||
InfinispanRegionFactory factory) {
|
||||
super( cache, name, factory );
|
||||
this.removeCache = Caches.ignoreReturnValuesCache( cache );
|
||||
|
||||
// Skip locking when updating timestamps to provide better performance
|
||||
// under highly concurrent insert scenarios, where update timestamps
|
||||
// for an entity/collection type are constantly updated, creating
|
||||
// contention.
|
||||
//
|
||||
// The worst it can happen is that an earlier an earlier timestamp
|
||||
// (i.e. ts=1) will override a later on (i.e. ts=2), so it means that
|
||||
// in highly concurrent environments, queries might be considered stale
|
||||
// earlier in time. The upside is that inserts/updates are way faster
|
||||
// in local set ups.
|
||||
this.timestampsPutCache = getTimestampsPutCache( cache );
|
||||
}
|
||||
|
||||
protected AdvancedCache getTimestampsPutCache(AdvancedCache cache) {
|
||||
return Caches.ignoreReturnValuesCache( cache, Flag.SKIP_LOCKING );
|
||||
}
|
||||
|
||||
@Override
|
||||
public void evict(Object key) throws CacheException {
|
||||
// TODO Is this a valid operation on a timestamps cache?
|
||||
removeCache.remove( key );
|
||||
}
|
||||
|
||||
@Override
|
||||
public void evictAll() throws CacheException {
|
||||
// TODO Is this a valid operation on a timestamps cache?
|
||||
final Transaction tx = suspend();
|
||||
try {
|
||||
// Invalidate the local region
|
||||
invalidateRegion();
|
||||
}
|
||||
finally {
|
||||
resume( tx );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Object get(SharedSessionContractImplementor session, Object key) throws CacheException {
|
||||
if ( checkValid() ) {
|
||||
return cache.get( key );
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void put(SharedSessionContractImplementor session, final Object key, final Object value) throws CacheException {
|
||||
try {
|
||||
// We ensure ASYNC semantics (JBCACHE-1175) and make sure previous
|
||||
// value is not loaded from cache store cos it's not needed.
|
||||
timestampsPutCache.put( key, value );
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new CacheException( e );
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Internal Infinispan-based implementation of the "update timestamps" cache region
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.timestamp;
|
|
@ -1,40 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.tm;
|
||||
|
||||
import java.util.Properties;
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.hibernate.boot.spi.SessionFactoryOptions;
|
||||
import org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform;
|
||||
|
||||
/**
|
||||
* Hibernate transaction manager lookup class for Infinispan, so that
|
||||
* Hibernate's transaction manager can be hooked onto Infinispan.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class HibernateTransactionManagerLookup implements org.infinispan.transaction.lookup.TransactionManagerLookup {
|
||||
private final JtaPlatform jtaPlatform;
|
||||
|
||||
/**
|
||||
* Transaction manager lookup constructor.
|
||||
*
|
||||
* @param settings for the Hibernate application
|
||||
* @param properties for the Hibernate application
|
||||
*/
|
||||
public HibernateTransactionManagerLookup(SessionFactoryOptions settings, Properties properties) {
|
||||
this.jtaPlatform = settings != null ? settings.getServiceRegistry().getService( JtaPlatform.class ) : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransactionManager getTransactionManager() throws Exception {
|
||||
return jtaPlatform == null ? null : jtaPlatform.retrieveTransactionManager();
|
||||
}
|
||||
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Internal bridging between Infinispan and Hibernate notions of talking to JTA
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.tm;
|
|
@ -1,80 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import org.infinispan.commands.CommandInvocationId;
|
||||
import org.infinispan.commands.write.InvalidateCommand;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.notifications.cachelistener.CacheNotifier;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInput;
|
||||
import java.io.ObjectOutput;
|
||||
import java.util.Arrays;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class BeginInvalidationCommand extends InvalidateCommand {
|
||||
private Object lockOwner;
|
||||
|
||||
public BeginInvalidationCommand() {
|
||||
}
|
||||
|
||||
public BeginInvalidationCommand(CacheNotifier notifier, Set<Flag> flags, CommandInvocationId commandInvocationId, Object[] keys, Object lockOwner) {
|
||||
super(notifier, flags, commandInvocationId, keys);
|
||||
this.lockOwner = lockOwner;
|
||||
}
|
||||
|
||||
public Object getLockOwner() {
|
||||
return lockOwner;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(ObjectOutput output) throws IOException {
|
||||
super.writeTo(output);
|
||||
output.writeObject(lockOwner);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
|
||||
super.readFrom(input);
|
||||
lockOwner = input.readObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte getCommandId() {
|
||||
return CacheCommandIds.BEGIN_INVALIDATION;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (!super.equals(o)) {
|
||||
return false;
|
||||
}
|
||||
if (o instanceof BeginInvalidationCommand) {
|
||||
BeginInvalidationCommand bic = (BeginInvalidationCommand) o;
|
||||
return Objects.equals(lockOwner, bic.lockOwner);
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return super.hashCode() + (lockOwner == null ? 0 : lockOwner.hashCode());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "BeginInvalidateCommand{keys=" + Arrays.toString(keys) +
|
||||
", sessionTransactionId=" + lockOwner + '}';
|
||||
}
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import org.infinispan.commands.module.ExtendedModuleCommandFactory;
|
||||
import org.infinispan.commands.module.ModuleCommandExtensions;
|
||||
import org.infinispan.commands.module.ModuleCommandInitializer;
|
||||
|
||||
/**
|
||||
* Command extensions for second-level cache use case
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 4.0
|
||||
*/
|
||||
public class CacheCommandExtensions implements ModuleCommandExtensions {
|
||||
final CacheCommandFactory cacheCommandFactory = new CacheCommandFactory();
|
||||
final CacheCommandInitializer cacheCommandInitializer = new CacheCommandInitializer();
|
||||
|
||||
@Override
|
||||
public ExtendedModuleCommandFactory getModuleCommandFactory() {
|
||||
return cacheCommandFactory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ModuleCommandInitializer getModuleCommandInitializer() {
|
||||
return cacheCommandInitializer;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,94 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
|
||||
import org.hibernate.cache.infinispan.impl.BaseRegion;
|
||||
|
||||
import org.infinispan.commands.ReplicableCommand;
|
||||
import org.infinispan.commands.module.ExtendedModuleCommandFactory;
|
||||
import org.infinispan.commands.remote.CacheRpcCommand;
|
||||
|
||||
/**
|
||||
* Command factory
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 4.0
|
||||
*/
|
||||
public class CacheCommandFactory implements ExtendedModuleCommandFactory {
|
||||
|
||||
/**
|
||||
* Keeps track of regions to which second-level cache specific
|
||||
* commands have been plugged.
|
||||
*/
|
||||
private ConcurrentMap<String, BaseRegion> allRegions =
|
||||
new ConcurrentHashMap<String, BaseRegion>();
|
||||
|
||||
/**
|
||||
* Add region so that commands can be cleared on shutdown.
|
||||
*
|
||||
* @param region instance to keep track of
|
||||
*/
|
||||
public void addRegion(BaseRegion region) {
|
||||
allRegions.put( region.getName(), region );
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all regions from this command factory.
|
||||
*
|
||||
* @param regions collection of regions to clear
|
||||
*/
|
||||
public void clearRegions(Collection<BaseRegion> regions) {
|
||||
regions.forEach( region -> allRegions.remove( region.getName() ) );
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<Byte, Class<? extends ReplicableCommand>> getModuleCommands() {
|
||||
final Map<Byte, Class<? extends ReplicableCommand>> map = new HashMap<Byte, Class<? extends ReplicableCommand>>( 3 );
|
||||
map.put( CacheCommandIds.EVICT_ALL, EvictAllCommand.class );
|
||||
map.put( CacheCommandIds.END_INVALIDATION, EndInvalidationCommand.class );
|
||||
map.put( CacheCommandIds.BEGIN_INVALIDATION, BeginInvalidationCommand.class );
|
||||
return map;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CacheRpcCommand fromStream(byte commandId, Object[] args, String cacheName) {
|
||||
CacheRpcCommand c;
|
||||
switch ( commandId ) {
|
||||
case CacheCommandIds.EVICT_ALL:
|
||||
c = new EvictAllCommand( cacheName, allRegions.get( cacheName ) );
|
||||
break;
|
||||
case CacheCommandIds.END_INVALIDATION:
|
||||
c = new EndInvalidationCommand(cacheName);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException( "Not registered to handle command id " + commandId );
|
||||
}
|
||||
c.setParameters( commandId, args );
|
||||
return c;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReplicableCommand fromStream(byte commandId, Object[] args) {
|
||||
ReplicableCommand c;
|
||||
switch ( commandId ) {
|
||||
case CacheCommandIds.BEGIN_INVALIDATION:
|
||||
c = new BeginInvalidationCommand();
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException( "Not registered to handle command id " + commandId );
|
||||
}
|
||||
c.setParameters( commandId, args );
|
||||
return c;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
/**
|
||||
* Command id range assigned to Hibernate second level cache: 120 - 139
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 4.0
|
||||
*/
|
||||
public interface CacheCommandIds {
|
||||
/**
|
||||
* {@link EvictAllCommand} id
|
||||
*/
|
||||
byte EVICT_ALL = 120;
|
||||
|
||||
/**
|
||||
* {@link EndInvalidationCommand} id
|
||||
*/
|
||||
byte END_INVALIDATION = 121;
|
||||
|
||||
/**
|
||||
* {@link BeginInvalidationCommand} id
|
||||
*/
|
||||
byte BEGIN_INVALIDATION = 122;
|
||||
}
|
|
@ -1,90 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import org.hibernate.cache.infinispan.access.PutFromLoadValidator;
|
||||
import org.infinispan.commands.CommandInvocationId;
|
||||
import org.infinispan.commands.ReplicableCommand;
|
||||
import org.infinispan.commands.module.ModuleCommandInitializer;
|
||||
import org.infinispan.configuration.cache.Configuration;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.factories.annotations.Inject;
|
||||
import org.infinispan.interceptors.locking.ClusteringDependentLogic;
|
||||
import org.infinispan.notifications.cachelistener.CacheNotifier;
|
||||
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
/**
|
||||
* Command initializer
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 4.0
|
||||
*/
|
||||
public class CacheCommandInitializer implements ModuleCommandInitializer {
|
||||
|
||||
private final ConcurrentHashMap<String, PutFromLoadValidator> putFromLoadValidators
|
||||
= new ConcurrentHashMap<String, PutFromLoadValidator>();
|
||||
private CacheNotifier notifier;
|
||||
private Configuration configuration;
|
||||
private ClusteringDependentLogic clusteringDependentLogic;
|
||||
|
||||
@Inject
|
||||
public void injectDependencies(CacheNotifier notifier, Configuration configuration, ClusteringDependentLogic clusteringDependentLogic) {
|
||||
this.notifier = notifier;
|
||||
this.configuration = configuration;
|
||||
this.clusteringDependentLogic = clusteringDependentLogic;
|
||||
}
|
||||
|
||||
public void addPutFromLoadValidator(String cacheName, PutFromLoadValidator putFromLoadValidator) {
|
||||
// there could be two instances of PutFromLoadValidator bound to the same cache when
|
||||
// there are two JndiInfinispanRegionFactories bound to the same cacheManager via JNDI.
|
||||
// In that case, as putFromLoadValidator does not really own the pendingPuts cache,
|
||||
// it's safe to have more instances.
|
||||
putFromLoadValidators.put(cacheName, putFromLoadValidator);
|
||||
}
|
||||
|
||||
public PutFromLoadValidator removePutFromLoadValidator(String cacheName) {
|
||||
return putFromLoadValidators.remove(cacheName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build an instance of {@link EvictAllCommand} for a given region.
|
||||
*
|
||||
* @param regionName name of region for {@link EvictAllCommand}
|
||||
* @return a new instance of {@link EvictAllCommand}
|
||||
*/
|
||||
public EvictAllCommand buildEvictAllCommand(String regionName) {
|
||||
// No need to pass region factory because no information on that object
|
||||
// is sent around the cluster. However, when the command factory builds
|
||||
// and evict all command remotely, it does need to initialize it with
|
||||
// the right region factory so that it can call it back.
|
||||
return new EvictAllCommand( regionName );
|
||||
}
|
||||
|
||||
public BeginInvalidationCommand buildBeginInvalidationCommand(Set<Flag> flags, Object[] keys, Object lockOwner) {
|
||||
return new BeginInvalidationCommand(notifier, flags, CommandInvocationId.generateId(clusteringDependentLogic.getAddress()), keys, lockOwner);
|
||||
}
|
||||
|
||||
public EndInvalidationCommand buildEndInvalidationCommand(String cacheName, Object[] keys, Object lockOwner) {
|
||||
return new EndInvalidationCommand( cacheName, keys, lockOwner );
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initializeReplicableCommand(ReplicableCommand c, boolean isRemote) {
|
||||
switch (c.getCommandId()) {
|
||||
case CacheCommandIds.END_INVALIDATION:
|
||||
EndInvalidationCommand endInvalidationCommand = (EndInvalidationCommand) c;
|
||||
endInvalidationCommand.setPutFromLoadValidator(putFromLoadValidators.get(endInvalidationCommand.getCacheName()));
|
||||
break;
|
||||
case CacheCommandIds.BEGIN_INVALIDATION:
|
||||
BeginInvalidationCommand beginInvalidationCommand = (BeginInvalidationCommand) c;
|
||||
beginInvalidationCommand.init(notifier, configuration);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,515 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import javax.transaction.Status;
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.commons.util.CloseableIterable;
|
||||
import org.infinispan.commons.util.CloseableIterator;
|
||||
import org.infinispan.container.entries.CacheEntry;
|
||||
import org.infinispan.context.Flag;
|
||||
import org.infinispan.filter.AcceptAllKeyValueFilter;
|
||||
import org.infinispan.filter.Converter;
|
||||
import org.infinispan.filter.KeyValueFilter;
|
||||
import org.infinispan.filter.NullValueConverter;
|
||||
import org.infinispan.remoting.rpc.RpcManager;
|
||||
import org.infinispan.remoting.rpc.RpcOptions;
|
||||
|
||||
/**
|
||||
* Helper for dealing with Infinispan cache instances.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 4.1
|
||||
*/
|
||||
public class Caches {
|
||||
|
||||
private Caches() {
|
||||
// Suppresses default constructor, ensuring non-instantiability.
|
||||
}
|
||||
|
||||
/**
|
||||
* Call an operation within a transaction. This method guarantees that the
|
||||
* right pattern is used to make sure that the transaction is always either
|
||||
* committed or rollback.
|
||||
*
|
||||
* @param cache instance whose transaction manager to use
|
||||
* @param c callable instance to run within a transaction
|
||||
* @param <T> type of callable return
|
||||
* @return returns whatever the callable returns
|
||||
* @throws Exception if any operation within the transaction fails
|
||||
*/
|
||||
public static <T> T withinTx(
|
||||
AdvancedCache cache,
|
||||
Callable<T> c) throws Exception {
|
||||
// Retrieve transaction manager
|
||||
return withinTx( cache.getTransactionManager(), c );
|
||||
}
|
||||
|
||||
/**
|
||||
* Call an operation within a transaction. This method guarantees that the
|
||||
* right pattern is used to make sure that the transaction is always either
|
||||
* committed or rollbacked.
|
||||
*
|
||||
* @param tm transaction manager
|
||||
* @param c callable instance to run within a transaction
|
||||
* @param <T> type of callable return
|
||||
* @return returns whatever the callable returns
|
||||
* @throws Exception if any operation within the transaction fails
|
||||
*/
|
||||
public static <T> T withinTx(
|
||||
TransactionManager tm,
|
||||
Callable<T> c) throws Exception {
|
||||
if ( tm == null ) {
|
||||
try {
|
||||
return c.call();
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
else {
|
||||
tm.begin();
|
||||
try {
|
||||
return c.call();
|
||||
}
|
||||
catch (Exception e) {
|
||||
tm.setRollbackOnly();
|
||||
throw e;
|
||||
}
|
||||
finally {
|
||||
if ( tm.getStatus() == Status.STATUS_ACTIVE ) {
|
||||
tm.commit();
|
||||
}
|
||||
else {
|
||||
tm.rollback();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void withinTx(TransactionManager tm, final Runnable runnable) throws Exception {
|
||||
withinTx(tm, new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
runnable.run();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Transform a given cache into a local cache
|
||||
*
|
||||
* @param cache to be transformed
|
||||
* @return a cache that operates only in local-mode
|
||||
*/
|
||||
public static AdvancedCache localCache(AdvancedCache cache) {
|
||||
return cache.withFlags( Flag.CACHE_MODE_LOCAL );
|
||||
}
|
||||
|
||||
/**
|
||||
* Transform a given cache into a cache that ignores return values for
|
||||
* operations returning previous values, i.e. {@link AdvancedCache#put(Object, Object)}
|
||||
*
|
||||
* @param cache to be transformed
|
||||
* @return a cache that ignores return values
|
||||
*/
|
||||
public static AdvancedCache ignoreReturnValuesCache(AdvancedCache cache) {
|
||||
return cache.withFlags( Flag.SKIP_CACHE_LOAD, Flag.SKIP_REMOTE_LOOKUP, Flag.IGNORE_RETURN_VALUES );
|
||||
}
|
||||
|
||||
/**
|
||||
* Transform a given cache into a cache that ignores return values for
|
||||
* operations returning previous values, i.e. {@link AdvancedCache#put(Object, Object)},
|
||||
* adding an extra flag.
|
||||
*
|
||||
* @param cache to be transformed
|
||||
* @param extraFlag to add to the returned cache
|
||||
* @return a cache that ignores return values
|
||||
*/
|
||||
public static AdvancedCache ignoreReturnValuesCache(
|
||||
AdvancedCache cache, Flag extraFlag) {
|
||||
return cache.withFlags(
|
||||
Flag.SKIP_CACHE_LOAD, Flag.SKIP_REMOTE_LOOKUP, Flag.IGNORE_RETURN_VALUES, extraFlag
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Transform a given cache into a cache that writes cache entries without
|
||||
* waiting for them to complete, adding an extra flag.
|
||||
*
|
||||
* @param cache to be transformed
|
||||
* @param extraFlag to add to the returned cache
|
||||
* @return a cache that writes asynchronously
|
||||
*/
|
||||
public static AdvancedCache asyncWriteCache(
|
||||
AdvancedCache cache,
|
||||
Flag extraFlag) {
|
||||
return cache.withFlags(
|
||||
Flag.SKIP_CACHE_LOAD,
|
||||
Flag.SKIP_REMOTE_LOOKUP,
|
||||
Flag.FORCE_ASYNCHRONOUS,
|
||||
extraFlag
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Transform a given cache into a cache that fails silently if cache writes fail.
|
||||
*
|
||||
* @param cache to be transformed
|
||||
* @return a cache that fails silently if cache writes fail
|
||||
*/
|
||||
public static AdvancedCache failSilentWriteCache(AdvancedCache cache) {
|
||||
return cache.withFlags(
|
||||
Flag.FAIL_SILENTLY,
|
||||
Flag.ZERO_LOCK_ACQUISITION_TIMEOUT,
|
||||
Flag.SKIP_CACHE_LOAD,
|
||||
Flag.SKIP_REMOTE_LOOKUP
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Transform a given cache into a cache that fails silently if
|
||||
* cache writes fail, adding an extra flag.
|
||||
*
|
||||
* @param cache to be transformed
|
||||
* @param extraFlag to be added to returned cache
|
||||
* @return a cache that fails silently if cache writes fail
|
||||
*/
|
||||
public static AdvancedCache failSilentWriteCache(
|
||||
AdvancedCache cache,
|
||||
Flag extraFlag) {
|
||||
return cache.withFlags(
|
||||
Flag.FAIL_SILENTLY,
|
||||
Flag.ZERO_LOCK_ACQUISITION_TIMEOUT,
|
||||
Flag.SKIP_CACHE_LOAD,
|
||||
Flag.SKIP_REMOTE_LOOKUP,
|
||||
extraFlag
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Transform a given cache into a cache that fails silently if
|
||||
* cache reads fail.
|
||||
*
|
||||
* @param cache to be transformed
|
||||
* @return a cache that fails silently if cache reads fail
|
||||
*/
|
||||
public static AdvancedCache failSilentReadCache(AdvancedCache cache) {
|
||||
return cache.withFlags(
|
||||
Flag.FAIL_SILENTLY,
|
||||
Flag.ZERO_LOCK_ACQUISITION_TIMEOUT
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Broadcast an evict-all command with the given cache instance.
|
||||
*
|
||||
* @param cache instance used to broadcast command
|
||||
*/
|
||||
public static void broadcastEvictAll(AdvancedCache cache) {
|
||||
final RpcManager rpcManager = cache.getRpcManager();
|
||||
if ( rpcManager != null ) {
|
||||
// Only broadcast evict all if it's clustered
|
||||
final CacheCommandInitializer factory = cache.getComponentRegistry()
|
||||
.getComponent( CacheCommandInitializer.class );
|
||||
final boolean isSync = isSynchronousCache( cache );
|
||||
|
||||
final EvictAllCommand cmd = factory.buildEvictAllCommand( cache.getName() );
|
||||
final RpcOptions options = rpcManager.getDefaultRpcOptions( isSync );
|
||||
rpcManager.invokeRemotely( null, cmd, options );
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether the given cache is configured with
|
||||
* {@link org.infinispan.configuration.cache.CacheMode#INVALIDATION_ASYNC} or
|
||||
* {@link org.infinispan.configuration.cache.CacheMode#INVALIDATION_SYNC}.
|
||||
*
|
||||
* @param cache to check for invalidation configuration
|
||||
* @return true if the cache is configured with invalidation, false otherwise
|
||||
*/
|
||||
public static boolean isInvalidationCache(AdvancedCache cache) {
|
||||
return cache.getCacheConfiguration()
|
||||
.clustering().cacheMode().isInvalidation();
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether the given cache is configured with
|
||||
* {@link org.infinispan.configuration.cache.CacheMode#REPL_SYNC},
|
||||
* {@link org.infinispan.configuration.cache.CacheMode#INVALIDATION_SYNC}, or
|
||||
* {@link org.infinispan.configuration.cache.CacheMode#DIST_SYNC}.
|
||||
*
|
||||
* @param cache to check for synchronous configuration
|
||||
* @return true if the cache is configured with synchronous mode, false otherwise
|
||||
*/
|
||||
public static boolean isSynchronousCache(AdvancedCache cache) {
|
||||
return cache.getCacheConfiguration()
|
||||
.clustering().cacheMode().isSynchronous();
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether the given cache is configured to cluster its contents.
|
||||
* A cache is considered to clustered if it's configured with any cache mode
|
||||
* except {@link org.infinispan.configuration.cache.CacheMode#LOCAL}
|
||||
*
|
||||
* @param cache to check whether it clusters its contents
|
||||
* @return true if the cache is configured with clustering, false otherwise
|
||||
*/
|
||||
public static boolean isClustered(AdvancedCache cache) {
|
||||
return cache.getCacheConfiguration()
|
||||
.clustering().cacheMode().isClustered();
|
||||
}
|
||||
|
||||
public static boolean isTransactionalCache(AdvancedCache cache) {
|
||||
return cache.getCacheConfiguration().transaction().transactionMode().isTransactional();
|
||||
}
|
||||
|
||||
|
||||
public static void removeAll(AdvancedCache cache) {
|
||||
CloseableIterator it = cache.keySet().iterator();
|
||||
try {
|
||||
while (it.hasNext()) {
|
||||
// Cannot use it.next(); it.remove() due to ISPN-5653
|
||||
cache.remove(it.next());
|
||||
}
|
||||
}
|
||||
finally {
|
||||
it.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This interface is provided for convenient fluent use of CloseableIterable
|
||||
*/
|
||||
public interface CollectableCloseableIterable<T> extends CloseableIterable<T> {
|
||||
Set<T> toSet();
|
||||
}
|
||||
|
||||
public interface MapCollectableCloseableIterable<K, V> extends CloseableIterable<CacheEntry<K, V>> {
|
||||
Map<K, V> toMap();
|
||||
}
|
||||
|
||||
public static <K, V> CollectableCloseableIterable<K> keys(AdvancedCache<K, V> cache, KeyValueFilter<K, V> filter) {
|
||||
// HHH-10023: we can't use keySet()
|
||||
final CloseableIterable<CacheEntry<K, Void>> entryIterable = cache
|
||||
.filterEntries( filter )
|
||||
.converter( NullValueConverter.getInstance() );
|
||||
return new CollectableCloseableIterableImpl<K, Void, K>(entryIterable, Selector.KEY);
|
||||
}
|
||||
|
||||
public static <K, V> CollectableCloseableIterable<V> values(AdvancedCache<K, V> cache, KeyValueFilter<K, V> filter) {
|
||||
if (cache.getCacheConfiguration().transaction().transactionMode().isTransactional()) {
|
||||
// Dummy read to enlist the LocalTransaction as workaround for ISPN-5676
|
||||
cache.containsKey(false);
|
||||
}
|
||||
// HHH-10023: we can't use values()
|
||||
final CloseableIterable<CacheEntry<K, V>> entryIterable = cache.filterEntries(filter);
|
||||
return new CollectableCloseableIterableImpl<K, V, V>(entryIterable, Selector.VALUE);
|
||||
}
|
||||
|
||||
public static <K, V, T> CollectableCloseableIterable<T> values(AdvancedCache<K, V> cache, KeyValueFilter<K, V> filter, Converter<K, V, T> converter) {
|
||||
if (cache.getCacheConfiguration().transaction().transactionMode().isTransactional()) {
|
||||
// Dummy read to enlist the LocalTransaction as workaround for ISPN-5676
|
||||
cache.containsKey(false);
|
||||
}
|
||||
// HHH-10023: we can't use values()
|
||||
final CloseableIterable<CacheEntry<K, T>> entryIterable = cache.filterEntries(filter).converter(converter);
|
||||
return new CollectableCloseableIterableImpl<K, T, T>(entryIterable, Selector.VALUE);
|
||||
}
|
||||
|
||||
public static <K, V> MapCollectableCloseableIterable<K, V> entrySet(AdvancedCache<K, V> cache) {
|
||||
return entrySet(cache, (KeyValueFilter<K, V>) AcceptAllKeyValueFilter.getInstance());
|
||||
}
|
||||
|
||||
public static <K, V> MapCollectableCloseableIterable<K, V> entrySet(AdvancedCache<K, V> cache, KeyValueFilter<K, V> filter) {
|
||||
if (cache.getCacheConfiguration().transaction().transactionMode().isTransactional()) {
|
||||
// Dummy read to enlist the LocalTransaction as workaround for ISPN-5676
|
||||
cache.containsKey(false);
|
||||
}
|
||||
// HHH-10023: we can't use values()
|
||||
final CloseableIterable<CacheEntry<K, V>> entryIterable = cache.filterEntries(filter);
|
||||
return new MapCollectableCloseableIterableImpl<K, V>(entryIterable);
|
||||
}
|
||||
|
||||
public static <K, V, T> MapCollectableCloseableIterable<K, T> entrySet(AdvancedCache<K, V> cache, KeyValueFilter<K, V> filter, Converter<K, V, T> converter) {
|
||||
if (cache.getCacheConfiguration().transaction().transactionMode().isTransactional()) {
|
||||
// Dummy read to enlist the LocalTransaction as workaround for ISPN-5676
|
||||
cache.containsKey(false);
|
||||
}
|
||||
// HHH-10023: we can't use values()
|
||||
final CloseableIterable<CacheEntry<K, T>> entryIterable = cache.filterEntries(filter).converter(converter);
|
||||
return new MapCollectableCloseableIterableImpl<K, T>(entryIterable);
|
||||
}
|
||||
|
||||
/* Function<CacheEntry<K, V>, T> */
|
||||
private interface Selector<K, V, T> {
|
||||
Selector KEY = new Selector<Object, Void, Object>() {
|
||||
@Override
|
||||
public Object apply(CacheEntry<Object, Void> entry) {
|
||||
return entry.getKey();
|
||||
}
|
||||
};
|
||||
|
||||
Selector VALUE = new Selector<Object, Object, Object>() {
|
||||
@Override
|
||||
public Object apply(CacheEntry<Object, Object> entry) {
|
||||
return entry.getValue();
|
||||
}
|
||||
};
|
||||
|
||||
T apply(CacheEntry<K, V> entry);
|
||||
}
|
||||
|
||||
private static class CollectableCloseableIterableImpl<K, V, T> implements CollectableCloseableIterable<T> {
|
||||
private final CloseableIterable<CacheEntry<K, V>> entryIterable;
|
||||
private final Selector<K, V, T> selector;
|
||||
|
||||
public CollectableCloseableIterableImpl(CloseableIterable<CacheEntry<K, V>> entryIterable, Selector<K, V, T> selector) {
|
||||
this.entryIterable = entryIterable;
|
||||
this.selector = selector;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
entryIterable.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public CloseableIterator<T> iterator() {
|
||||
final CloseableIterator<CacheEntry<K, V>> entryIterator = entryIterable.iterator();
|
||||
return new CloseableIterator<T>() {
|
||||
@Override
|
||||
public void close() {
|
||||
entryIterator.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return entryIterator.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public T next() {
|
||||
return selector.apply(entryIterator.next());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove() {
|
||||
throw new UnsupportedOperationException( "remove() not supported" );
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
CloseableIterator<CacheEntry<K, V>> it = entryIterable.iterator();
|
||||
try {
|
||||
if (!it.hasNext()) {
|
||||
return "[]";
|
||||
}
|
||||
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append('[');
|
||||
for (; ; ) {
|
||||
CacheEntry<K, V> entry = it.next();
|
||||
sb.append(selector.apply(entry));
|
||||
if (!it.hasNext()) {
|
||||
return sb.append(']').toString();
|
||||
}
|
||||
sb.append(',').append(' ');
|
||||
}
|
||||
}
|
||||
finally {
|
||||
it.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set toSet() {
|
||||
HashSet set = new HashSet();
|
||||
CloseableIterator it = iterator();
|
||||
try {
|
||||
while (it.hasNext()) {
|
||||
set.add(it.next());
|
||||
}
|
||||
}
|
||||
finally {
|
||||
it.close();
|
||||
}
|
||||
return set;
|
||||
}
|
||||
}
|
||||
|
||||
private static class MapCollectableCloseableIterableImpl<K, V> implements MapCollectableCloseableIterable<K, V> {
|
||||
private final CloseableIterable<CacheEntry<K, V>> entryIterable;
|
||||
|
||||
public MapCollectableCloseableIterableImpl(CloseableIterable<CacheEntry<K, V>> entryIterable) {
|
||||
this.entryIterable = entryIterable;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<K, V> toMap() {
|
||||
Map<K, V> map = new HashMap<K, V>();
|
||||
CloseableIterator<CacheEntry<K, V>> it = entryIterable.iterator();
|
||||
try {
|
||||
while (it.hasNext()) {
|
||||
CacheEntry<K, V> entry = it.next();
|
||||
V value = entry.getValue();
|
||||
if (value != null) {
|
||||
map.put(entry.getKey(), value);
|
||||
}
|
||||
}
|
||||
return map;
|
||||
}
|
||||
finally {
|
||||
it.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
CloseableIterator<CacheEntry<K, V>> it = entryIterable.iterator();
|
||||
try {
|
||||
if (!it.hasNext()) {
|
||||
return "{}";
|
||||
}
|
||||
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append('{');
|
||||
for (; ; ) {
|
||||
CacheEntry<K, V> entry = it.next();
|
||||
sb.append(entry.getKey()).append('=').append(entry.getValue());
|
||||
if (!it.hasNext()) {
|
||||
return sb.append('}').toString();
|
||||
}
|
||||
sb.append(',').append(' ');
|
||||
}
|
||||
}
|
||||
finally {
|
||||
it.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
entryIterable.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public CloseableIterator<CacheEntry<K, V>> iterator() {
|
||||
return entryIterable.iterator();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,121 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInput;
|
||||
import java.io.ObjectOutput;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.hibernate.cache.infinispan.access.PutFromLoadValidator;
|
||||
|
||||
import org.infinispan.commands.remote.BaseRpcCommand;
|
||||
import org.infinispan.commons.marshall.MarshallUtil;
|
||||
import org.infinispan.context.InvocationContext;
|
||||
|
||||
/**
|
||||
* Sent in commit phase (after DB commit) to remote nodes in order to stop invalidating
|
||||
* putFromLoads.
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class EndInvalidationCommand extends BaseRpcCommand {
|
||||
private Object[] keys;
|
||||
private Object lockOwner;
|
||||
private PutFromLoadValidator putFromLoadValidator;
|
||||
|
||||
public EndInvalidationCommand(String cacheName) {
|
||||
this(cacheName, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param cacheName name of the cache to evict
|
||||
*/
|
||||
public EndInvalidationCommand(String cacheName, Object[] keys, Object lockOwner) {
|
||||
super(cacheName);
|
||||
this.keys = keys;
|
||||
this.lockOwner = lockOwner;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object perform(InvocationContext ctx) throws Throwable {
|
||||
for (Object key : keys) {
|
||||
putFromLoadValidator.endInvalidatingKey(lockOwner, key);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte getCommandId() {
|
||||
return CacheCommandIds.END_INVALIDATION;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(ObjectOutput output) throws IOException {
|
||||
MarshallUtil.marshallArray(keys, output);
|
||||
output.writeObject(lockOwner);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
|
||||
keys = MarshallUtil.unmarshallArray(input, Object[]::new);
|
||||
lockOwner = input.readObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isReturnValueExpected() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canBlock() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public void setPutFromLoadValidator(PutFromLoadValidator putFromLoadValidator) {
|
||||
this.putFromLoadValidator = putFromLoadValidator;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (!(o instanceof EndInvalidationCommand)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
EndInvalidationCommand that = (EndInvalidationCommand) o;
|
||||
|
||||
if (cacheName == null ? cacheName != null : !cacheName.equals(that.cacheName)) {
|
||||
return false;
|
||||
}
|
||||
if (!Arrays.equals(keys, that.keys)) {
|
||||
return false;
|
||||
}
|
||||
return !(lockOwner != null ? !lockOwner.equals(that.lockOwner) : that.lockOwner != null);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = cacheName != null ? cacheName.hashCode() : 0;
|
||||
result = 31 * result + (keys != null ? Arrays.hashCode(keys) : 0);
|
||||
result = 31 * result + (lockOwner != null ? lockOwner.hashCode() : 0);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("EndInvalidationCommand{");
|
||||
sb.append("cacheName=").append(cacheName);
|
||||
sb.append(", keys=").append(Arrays.toString(keys));
|
||||
sb.append(", sessionTransactionId=").append(lockOwner);
|
||||
sb.append('}');
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import org.hibernate.cache.infinispan.impl.BaseRegion;
|
||||
|
||||
import org.infinispan.commands.remote.BaseRpcCommand;
|
||||
import org.infinispan.context.InvocationContext;
|
||||
|
||||
/**
|
||||
* Evict all command
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 4.0
|
||||
*/
|
||||
public class EvictAllCommand extends BaseRpcCommand {
|
||||
|
||||
private final BaseRegion region;
|
||||
|
||||
/**
|
||||
* Evict all command constructor.
|
||||
*
|
||||
* @param regionName name of the region to evict
|
||||
* @param region to evict
|
||||
*/
|
||||
public EvictAllCommand(String regionName, BaseRegion region) {
|
||||
// region name and cache names are the same...
|
||||
super( regionName );
|
||||
this.region = region;
|
||||
}
|
||||
|
||||
/**
|
||||
* Evict all command constructor.
|
||||
*
|
||||
* @param regionName name of the region to evict
|
||||
*/
|
||||
public EvictAllCommand(String regionName) {
|
||||
this( regionName, null );
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object perform(InvocationContext ctx) throws Throwable {
|
||||
// When a node is joining the cluster, it may receive an EvictAllCommand before the regions
|
||||
// are started up. It's safe to ignore such invalidation at this point since no data got in.
|
||||
if (region != null) {
|
||||
region.invalidateRegion();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte getCommandId() {
|
||||
return CacheCommandIds.EVICT_ALL;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object[] getParameters() {
|
||||
return new Object[0];
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setParameters(int commandId, Object[] parameters) {
|
||||
// No-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isReturnValueExpected() {
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import org.infinispan.commons.marshall.AdvancedExternalizer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInput;
|
||||
import java.io.ObjectOutput;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
/**
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class Externalizers {
|
||||
|
||||
public final static int UUID = 1200;
|
||||
public final static int TOMBSTONE = 1201;
|
||||
public final static int EXCLUDE_TOMBSTONES_FILTER = 1202;
|
||||
public final static int TOMBSTONE_UPDATE = 1203;
|
||||
public final static int FUTURE_UPDATE = 1204;
|
||||
public final static int VALUE_EXTRACTOR = 1205;
|
||||
public final static int VERSIONED_ENTRY = 1206;
|
||||
public final static int EXCLUDE_EMPTY_EXTRACT_VALUE = 1207;
|
||||
|
||||
public static class UUIDExternalizer implements AdvancedExternalizer<UUID> {
|
||||
|
||||
@Override
|
||||
public Set<Class<? extends UUID>> getTypeClasses() {
|
||||
return Collections.<Class<? extends UUID>>singleton(UUID.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer getId() {
|
||||
return UUID;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeObject(ObjectOutput output, UUID uuid) throws IOException {
|
||||
output.writeLong(uuid.getMostSignificantBits());
|
||||
output.writeLong(uuid.getLeastSignificantBits());
|
||||
}
|
||||
|
||||
@Override
|
||||
public UUID readObject(ObjectInput input) throws IOException, ClassNotFoundException {
|
||||
return new UUID(input.readLong(), input.readLong());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import org.infinispan.commons.marshall.AdvancedExternalizer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInput;
|
||||
import java.io.ObjectOutput;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
/**
|
||||
* Request to update the tombstone, coming from insert/update/remove operation.
|
||||
*
|
||||
* This object should *not* be stored in cache.
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class FutureUpdate {
|
||||
private final UUID uuid;
|
||||
private final long timestamp;
|
||||
private final Object value;
|
||||
|
||||
public FutureUpdate(UUID uuid, long timestamp, Object value) {
|
||||
this.uuid = uuid;
|
||||
this.timestamp = timestamp;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("FutureUpdate{");
|
||||
sb.append("uuid=").append(uuid);
|
||||
sb.append(", timestamp=").append(timestamp);
|
||||
sb.append(", value=").append(value);
|
||||
sb.append('}');
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public UUID getUuid() {
|
||||
return uuid;
|
||||
}
|
||||
|
||||
public Object getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public static class Externalizer implements AdvancedExternalizer<FutureUpdate> {
|
||||
|
||||
@Override
|
||||
public void writeObject(ObjectOutput output, FutureUpdate object) throws IOException {
|
||||
output.writeLong(object.uuid.getMostSignificantBits());
|
||||
output.writeLong(object.uuid.getLeastSignificantBits());
|
||||
output.writeLong(object.timestamp);
|
||||
output.writeObject(object.value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FutureUpdate readObject(ObjectInput input) throws IOException, ClassNotFoundException {
|
||||
long msb = input.readLong();
|
||||
long lsb = input.readLong();
|
||||
long timestamp = input.readLong();
|
||||
Object value = input.readObject();
|
||||
return new FutureUpdate(new UUID(msb, lsb), timestamp, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<Class<? extends FutureUpdate>> getTypeClasses() {
|
||||
return Collections.<Class<? extends FutureUpdate>>singleton(FutureUpdate.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer getId() {
|
||||
return Externalizers.FUTURE_UPDATE;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,132 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.JndiInfinispanRegionFactory;
|
||||
import org.jboss.logging.BasicLogger;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.jboss.logging.annotations.Cause;
|
||||
import org.jboss.logging.annotations.LogMessage;
|
||||
import org.jboss.logging.annotations.Message;
|
||||
import org.jboss.logging.annotations.MessageLogger;
|
||||
|
||||
import javax.naming.NamingException;
|
||||
import javax.transaction.SystemException;
|
||||
|
||||
import static org.jboss.logging.Logger.Level.*;
|
||||
|
||||
/**
|
||||
* The jboss-logging {@link MessageLogger} for the hibernate-infinispan module. It reserves message ids ranging from
|
||||
* 25001 to 30000 inclusively.
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
@MessageLogger(projectCode = "HHH")
|
||||
public interface InfinispanMessageLogger extends BasicLogger {
|
||||
// Workaround for JBLOGGING-120: cannot add static interface method
|
||||
class Provider {
|
||||
public static InfinispanMessageLogger getLog(Class clazz) {
|
||||
return Logger.getMessageLogger(InfinispanMessageLogger.class, clazz.getName());
|
||||
}
|
||||
}
|
||||
|
||||
@Message(value = "Pending-puts cache must not be clustered!", id = 25001)
|
||||
CacheException pendingPutsMustNotBeClustered();
|
||||
|
||||
@Message(value = "Pending-puts cache must not be transactional!", id = 25002)
|
||||
CacheException pendingPutsMustNotBeTransactional();
|
||||
|
||||
@LogMessage(level = WARN)
|
||||
@Message(value = "Pending-puts cache configuration should be a template.", id = 25003)
|
||||
void pendingPutsShouldBeTemplate();
|
||||
|
||||
@Message(value = "Pending-puts cache must have expiration.max-idle set", id = 25004)
|
||||
CacheException pendingPutsMustHaveMaxIdle();
|
||||
|
||||
@LogMessage(level = WARN)
|
||||
@Message(value = "Property '" + InfinispanRegionFactory.INFINISPAN_USE_SYNCHRONIZATION_PROP + "' is deprecated; 2LC with transactional cache must always use synchronizations.", id = 25005)
|
||||
void propertyUseSynchronizationDeprecated();
|
||||
|
||||
@LogMessage(level = ERROR)
|
||||
@Message(value = "Custom cache configuration '%s' was requested for type %s but it was not found!", id = 25006)
|
||||
void customConfigForTypeNotFound(String cacheName, String type);
|
||||
|
||||
@LogMessage(level = ERROR)
|
||||
@Message(value = "Custom cache configuration '%s' was requested for region %s but it was not found - using configuration by type (%s).", id = 25007)
|
||||
void customConfigForRegionNotFound(String templateCacheName, String regionName, String type);
|
||||
|
||||
@Message(value = "Timestamps cache must not use eviction!", id = 25008)
|
||||
CacheException timestampsMustNotUseEviction();
|
||||
|
||||
@Message(value = "Unable to start region factory", id = 25009)
|
||||
CacheException unableToStart(@Cause Throwable t);
|
||||
|
||||
@Message(value = "Unable to create default cache manager", id = 25010)
|
||||
CacheException unableToCreateCacheManager(@Cause Throwable t);
|
||||
|
||||
@Message(value = "Infinispan custom cache command factory not installed (possibly because the classloader where Infinispan lives couldn't find the Hibernate Infinispan cache provider)", id = 25011)
|
||||
CacheException cannotInstallCommandFactory();
|
||||
|
||||
@LogMessage(level = WARN)
|
||||
@Message(value = "Requesting TRANSACTIONAL cache concurrency strategy but the cache is not configured as transactional.", id = 25012)
|
||||
void transactionalStrategyNonTransactionalCache();
|
||||
|
||||
@LogMessage(level = WARN)
|
||||
@Message(value = "Requesting READ_WRITE cache concurrency strategy but the cache was configured as transactional.", id = 25013)
|
||||
void readWriteStrategyTransactionalCache();
|
||||
|
||||
@LogMessage(level = WARN)
|
||||
@Message(value = "Setting eviction on cache using tombstones can introduce inconsistencies!", id = 25014)
|
||||
void evictionWithTombstones();
|
||||
|
||||
@LogMessage(level = ERROR)
|
||||
@Message(value = "Failure updating cache in afterCompletion, will retry", id = 25015)
|
||||
void failureInAfterCompletion(@Cause Exception e);
|
||||
|
||||
@LogMessage(level = ERROR)
|
||||
@Message(value = "Failed to end invalidating pending putFromLoad calls for key %s from region %s; the key won't be cached until invalidation expires.", id = 25016)
|
||||
void failedEndInvalidating(Object key, String name);
|
||||
|
||||
@Message(value = "Unable to retrieve CacheManager from JNDI [%s]", id = 25017)
|
||||
CacheException unableToRetrieveCmFromJndi(String jndiNamespace);
|
||||
|
||||
@LogMessage(level = WARN)
|
||||
@Message(value = "Unable to release initial context", id = 25018)
|
||||
void unableToReleaseContext(@Cause NamingException ne);
|
||||
|
||||
@LogMessage(level = WARN)
|
||||
@Message(value = "Use non-transactional query caches for best performance!", id = 25019)
|
||||
void useNonTransactionalQueryCache();
|
||||
|
||||
@LogMessage(level = ERROR)
|
||||
@Message(value = "Unable to broadcast invalidations as a part of the prepare phase. Rolling back.", id = 25020)
|
||||
void unableToRollbackInvalidationsDuringPrepare(@Cause Throwable t);
|
||||
|
||||
@Message(value = "Could not suspend transaction", id = 25021)
|
||||
CacheException cannotSuspendTx(@Cause SystemException se);
|
||||
|
||||
@Message(value = "Could not resume transaction", id = 25022)
|
||||
CacheException cannotResumeTx(@Cause Exception e);
|
||||
|
||||
@Message(value = "Unable to get current transaction", id = 25023)
|
||||
CacheException cannotGetCurrentTx(@Cause SystemException e);
|
||||
|
||||
@Message(value = "Failed to invalidate pending putFromLoad calls for key %s from region %s", id = 25024)
|
||||
CacheException failedInvalidatePendingPut(Object key, String regionName);
|
||||
|
||||
@LogMessage(level = ERROR)
|
||||
@Message(value = "Failed to invalidate pending putFromLoad calls for region %s", id = 25025)
|
||||
void failedInvalidateRegion(String regionName);
|
||||
|
||||
@Message(value = "Property '" + JndiInfinispanRegionFactory.CACHE_MANAGER_RESOURCE_PROP + "' not set", id = 25026)
|
||||
CacheException propertyCacheManagerResourceNotSet();
|
||||
|
||||
@Message(value = "Timestamp cache cannot be configured with invalidation", id = 25027)
|
||||
CacheException timestampsMustNotUseInvalidation();
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.SQLException;
|
||||
import javax.transaction.Status;
|
||||
import javax.transaction.Synchronization;
|
||||
|
||||
import org.hibernate.HibernateException;
|
||||
import org.hibernate.jdbc.WorkExecutor;
|
||||
import org.hibernate.jdbc.WorkExecutorVisitable;
|
||||
import org.hibernate.resource.transaction.spi.TransactionCoordinator;
|
||||
|
||||
/**
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public abstract class InvocationAfterCompletion implements Synchronization {
|
||||
protected static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog( InvocationAfterCompletion.class );
|
||||
|
||||
protected final TransactionCoordinator tc;
|
||||
protected final boolean requiresTransaction;
|
||||
|
||||
public InvocationAfterCompletion(TransactionCoordinator tc, boolean requiresTransaction) {
|
||||
this.tc = tc;
|
||||
this.requiresTransaction = requiresTransaction;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeCompletion() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterCompletion(int status) {
|
||||
switch (status) {
|
||||
case Status.STATUS_COMMITTING:
|
||||
case Status.STATUS_COMMITTED:
|
||||
invokeIsolated(true);
|
||||
break;
|
||||
default:
|
||||
// it would be nicer to react only on ROLLING_BACK and ROLLED_BACK statuses
|
||||
// but TransactionCoordinator gives us UNKNOWN on rollback
|
||||
invokeIsolated(false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
protected void invokeIsolated(final boolean success) {
|
||||
try {
|
||||
// TODO: isolation without obtaining Connection -> needs HHH-9993
|
||||
tc.createIsolationDelegate().delegateWork(new WorkExecutorVisitable<Void>() {
|
||||
@Override
|
||||
public Void accept(WorkExecutor<Void> executor, Connection connection) throws SQLException {
|
||||
invoke(success);
|
||||
return null;
|
||||
}
|
||||
}, requiresTransaction);
|
||||
}
|
||||
catch (HibernateException e) {
|
||||
// silently fail any exceptions
|
||||
if (log.isTraceEnabled()) {
|
||||
log.trace("Exception during query cache update", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void invoke(boolean success);
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import org.infinispan.commons.marshall.AdvancedExternalizer;
|
||||
import org.infinispan.configuration.global.GlobalConfiguration;
|
||||
import org.infinispan.factories.GlobalComponentRegistry;
|
||||
import org.infinispan.lifecycle.AbstractModuleLifecycle;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
public class LifecycleCallbacks extends AbstractModuleLifecycle {
|
||||
|
||||
@Override
|
||||
public void cacheManagerStarting(GlobalComponentRegistry gcr, GlobalConfiguration globalCfg) {
|
||||
Map<Integer, AdvancedExternalizer<?>> externalizerMap = globalCfg.serialization().advancedExternalizers();
|
||||
externalizerMap.put( Externalizers.UUID, new Externalizers.UUIDExternalizer() );
|
||||
externalizerMap.put( Externalizers.TOMBSTONE, new Tombstone.Externalizer() );
|
||||
externalizerMap.put( Externalizers.EXCLUDE_TOMBSTONES_FILTER, new Tombstone.ExcludeTombstonesFilterExternalizer() );
|
||||
externalizerMap.put( Externalizers.TOMBSTONE_UPDATE, new TombstoneUpdate.Externalizer() );
|
||||
externalizerMap.put( Externalizers.FUTURE_UPDATE, new FutureUpdate.Externalizer() );
|
||||
externalizerMap.put( Externalizers.VERSIONED_ENTRY, new VersionedEntry.Externalizer() );
|
||||
externalizerMap.put( Externalizers.EXCLUDE_EMPTY_EXTRACT_VALUE, new VersionedEntry.ExcludeEmptyExtractValueExternalizer() );
|
||||
}
|
||||
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import org.infinispan.factories.components.ModuleMetadataFileFinder;
|
||||
|
||||
/**
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class SecondLevelCacheMetadataModuleFinder implements ModuleMetadataFileFinder {
|
||||
@Override
|
||||
public String getMetadataFilename() {
|
||||
return "hibernate-infinispan-component-metadata.dat";
|
||||
}
|
||||
}
|
|
@ -1,218 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import org.infinispan.commons.marshall.AdvancedExternalizer;
|
||||
import org.infinispan.filter.KeyValueFilter;
|
||||
import org.infinispan.metadata.Metadata;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInput;
|
||||
import java.io.ObjectOutput;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
/**
|
||||
* This is used both as the storage in entry, and for efficiency also directly in the cache.put() commands.
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class Tombstone {
|
||||
public static final ExcludeTombstonesFilter EXCLUDE_TOMBSTONES = new ExcludeTombstonesFilter();
|
||||
|
||||
// the format of data is repeated (timestamp, UUID.LSB, UUID.MSB)
|
||||
private final long[] data;
|
||||
|
||||
public Tombstone(UUID uuid, long timestamp) {
|
||||
this.data = new long[] { timestamp, uuid.getLeastSignificantBits(), uuid.getMostSignificantBits() };
|
||||
}
|
||||
|
||||
private Tombstone(long[] data) {
|
||||
this.data = data;
|
||||
}
|
||||
|
||||
public long getLastTimestamp() {
|
||||
long max = data[0];
|
||||
for (int i = 3; i < data.length; i += 3) {
|
||||
max = Math.max(max, data[i]);
|
||||
}
|
||||
return max;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("Tombstone{");
|
||||
for (int i = 0; i < data.length; i += 3) {
|
||||
if (i != 0) {
|
||||
sb.append(", ");
|
||||
}
|
||||
sb.append(new UUID(data[i + 2], data[i + 1])).append('=').append(data[i]);
|
||||
}
|
||||
sb.append('}');
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public Tombstone merge(Tombstone update) {
|
||||
assert update != null;
|
||||
assert update.data.length == 3;
|
||||
int toRemove = 0;
|
||||
for (int i = 0; i < data.length; i += 3) {
|
||||
if (data[i] < update.data[0]) {
|
||||
toRemove += 3;
|
||||
}
|
||||
else if (update.data[1] == data[i + 1] && update.data[2] == data[i + 2]) {
|
||||
// UUID matches - second update during retry?
|
||||
toRemove += 3;
|
||||
}
|
||||
}
|
||||
if (data.length == toRemove) {
|
||||
// applying the update second time?
|
||||
return update;
|
||||
}
|
||||
else {
|
||||
long[] newData = new long[data.length - toRemove + 3]; // 3 for the update
|
||||
int j = 0;
|
||||
boolean uuidMatch = false;
|
||||
for (int i = 0; i < data.length; i += 3) {
|
||||
if (data[i] < update.data[0]) {
|
||||
// This is an old eviction
|
||||
continue;
|
||||
}
|
||||
else if (update.data[1] == data[i + 1] && update.data[2] == data[i + 2]) {
|
||||
// UUID matches
|
||||
System.arraycopy(update.data, 0, newData, j, 3);
|
||||
uuidMatch = true;
|
||||
j += 3;
|
||||
}
|
||||
else {
|
||||
System.arraycopy(data, i, newData, j, 3);
|
||||
j += 3;
|
||||
}
|
||||
}
|
||||
assert (uuidMatch && j == newData.length) || (!uuidMatch && j == newData.length - 3);
|
||||
if (!uuidMatch) {
|
||||
System.arraycopy(update.data, 0, newData, j, 3);
|
||||
}
|
||||
return new Tombstone(newData);
|
||||
}
|
||||
}
|
||||
|
||||
public Object applyUpdate(UUID uuid, long timestamp, Object value) {
|
||||
int toRemove = 0;
|
||||
for (int i = 0; i < data.length; i += 3) {
|
||||
if (data[i] < timestamp) {
|
||||
toRemove += 3;
|
||||
}
|
||||
else if (uuid.getLeastSignificantBits() == data[i + 1] && uuid.getMostSignificantBits() == data[i + 2]) {
|
||||
toRemove += 3;
|
||||
}
|
||||
}
|
||||
if (data.length == toRemove) {
|
||||
if (value == null) {
|
||||
return new Tombstone(uuid, timestamp);
|
||||
}
|
||||
else {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
else {
|
||||
long[] newData = new long[data.length - toRemove + 3]; // 3 for the update
|
||||
int j = 0;
|
||||
boolean uuidMatch = false;
|
||||
for (int i = 0; i < data.length; i += 3) {
|
||||
if (data[i] < timestamp) {
|
||||
// This is an old eviction
|
||||
continue;
|
||||
}
|
||||
else if (uuid.getLeastSignificantBits() == data[i + 1] && uuid.getMostSignificantBits() == data[i + 2]) {
|
||||
newData[j] = timestamp;
|
||||
newData[j + 1] = uuid.getLeastSignificantBits();
|
||||
newData[j + 2] = uuid.getMostSignificantBits();
|
||||
uuidMatch = true;
|
||||
j += 3;
|
||||
}
|
||||
else {
|
||||
System.arraycopy(data, i, newData, j, 3);
|
||||
j += 3;
|
||||
}
|
||||
}
|
||||
assert (uuidMatch && j == newData.length) || (!uuidMatch && j == newData.length - 3);
|
||||
if (!uuidMatch) {
|
||||
newData[j] = timestamp;
|
||||
newData[j + 1] = uuid.getLeastSignificantBits();
|
||||
newData[j + 2] = uuid.getMostSignificantBits();
|
||||
}
|
||||
return new Tombstone(newData);
|
||||
}
|
||||
}
|
||||
|
||||
// Used only for testing purposes
|
||||
public int size() {
|
||||
return data.length / 3;
|
||||
}
|
||||
|
||||
public static class Externalizer implements AdvancedExternalizer<Tombstone> {
|
||||
@Override
|
||||
public Set<Class<? extends Tombstone>> getTypeClasses() {
|
||||
return Collections.<Class<? extends Tombstone>>singleton(Tombstone.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer getId() {
|
||||
return Externalizers.TOMBSTONE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeObject(ObjectOutput output, Tombstone tombstone) throws IOException {
|
||||
output.writeInt(tombstone.data.length);
|
||||
for (int i = 0; i < tombstone.data.length; ++i) {
|
||||
output.writeLong(tombstone.data[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Tombstone readObject(ObjectInput input) throws IOException, ClassNotFoundException {
|
||||
int length = input.readInt();
|
||||
long[] data = new long[length];
|
||||
for (int i = 0; i < data.length; ++i) {
|
||||
data[i] = input.readLong();
|
||||
}
|
||||
return new Tombstone(data);
|
||||
}
|
||||
}
|
||||
|
||||
public static class ExcludeTombstonesFilter implements KeyValueFilter {
|
||||
private ExcludeTombstonesFilter() {}
|
||||
|
||||
@Override
|
||||
public boolean accept(Object key, Object value, Metadata metadata) {
|
||||
return !(value instanceof Tombstone);
|
||||
}
|
||||
}
|
||||
|
||||
public static class ExcludeTombstonesFilterExternalizer implements AdvancedExternalizer<ExcludeTombstonesFilter> {
|
||||
@Override
|
||||
public Set<Class<? extends ExcludeTombstonesFilter>> getTypeClasses() {
|
||||
return Collections.<Class<? extends ExcludeTombstonesFilter>>singleton(ExcludeTombstonesFilter.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer getId() {
|
||||
return Externalizers.EXCLUDE_TOMBSTONES_FILTER;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeObject(ObjectOutput output, ExcludeTombstonesFilter object) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public ExcludeTombstonesFilter readObject(ObjectInput input) throws IOException, ClassNotFoundException {
|
||||
return EXCLUDE_TOMBSTONES;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import org.infinispan.commons.marshall.AdvancedExternalizer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInput;
|
||||
import java.io.ObjectOutput;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Request to update cache either as a result of putFromLoad (if {@link #getValue()} is non-null
|
||||
* or evict (if it is null).
|
||||
*
|
||||
* This object should *not* be stored in cache.
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class TombstoneUpdate<T> {
|
||||
private final long timestamp;
|
||||
private final T value;
|
||||
|
||||
public TombstoneUpdate(long timestamp, T value) {
|
||||
this.timestamp = timestamp;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public T getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("TombstoneUpdate{");
|
||||
sb.append("timestamp=").append(timestamp);
|
||||
sb.append(", value=").append(value);
|
||||
sb.append('}');
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public static class Externalizer implements AdvancedExternalizer<TombstoneUpdate> {
|
||||
@Override
|
||||
public Set<Class<? extends TombstoneUpdate>> getTypeClasses() {
|
||||
return Collections.singleton(TombstoneUpdate.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer getId() {
|
||||
return Externalizers.TOMBSTONE_UPDATE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeObject(ObjectOutput output, TombstoneUpdate object) throws IOException {
|
||||
output.writeObject(object.getValue());
|
||||
output.writeLong(object.getTimestamp());
|
||||
}
|
||||
|
||||
@Override
|
||||
public TombstoneUpdate readObject(ObjectInput input) throws IOException, ClassNotFoundException {
|
||||
Object value = input.readObject();
|
||||
long timestamp = input.readLong();
|
||||
return new TombstoneUpdate(timestamp, value);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,122 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
||||
|
||||
import org.infinispan.commons.marshall.AdvancedExternalizer;
|
||||
import org.infinispan.filter.Converter;
|
||||
import org.infinispan.filter.KeyValueFilter;
|
||||
import org.infinispan.metadata.Metadata;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInput;
|
||||
import java.io.ObjectOutput;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class VersionedEntry {
|
||||
public static final ExcludeEmptyFilter EXCLUDE_EMPTY_EXTRACT_VALUE = new ExcludeEmptyFilter();
|
||||
private final Object value;
|
||||
private final Object version;
|
||||
private final long timestamp;
|
||||
|
||||
public VersionedEntry(Object value, Object version, long timestamp) {
|
||||
this.value = value;
|
||||
this.version = version;
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
public Object getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public Object getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("VersionedEntry{");
|
||||
sb.append("value=").append(value);
|
||||
sb.append(", version=").append(version);
|
||||
sb.append(", timestamp=").append(timestamp);
|
||||
sb.append('}');
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private static class ExcludeEmptyFilter implements KeyValueFilter<Object, Object>, Converter<Object, Object, Object> {
|
||||
@Override
|
||||
public boolean accept(Object key, Object value, Metadata metadata) {
|
||||
if (value instanceof VersionedEntry) {
|
||||
return ((VersionedEntry) value).getValue() != null;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object convert(Object key, Object value, Metadata metadata) {
|
||||
if (value instanceof VersionedEntry) {
|
||||
return ((VersionedEntry) value).getValue();
|
||||
}
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
public static class Externalizer implements AdvancedExternalizer<VersionedEntry> {
|
||||
@Override
|
||||
public Set<Class<? extends VersionedEntry>> getTypeClasses() {
|
||||
return Collections.<Class<? extends VersionedEntry>>singleton(VersionedEntry.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer getId() {
|
||||
return Externalizers.VERSIONED_ENTRY;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeObject(ObjectOutput output, VersionedEntry object) throws IOException {
|
||||
output.writeObject(object.value);
|
||||
output.writeObject(object.version);
|
||||
output.writeLong(object.timestamp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public VersionedEntry readObject(ObjectInput input) throws IOException, ClassNotFoundException {
|
||||
Object value = input.readObject();
|
||||
Object version = input.readObject();
|
||||
long timestamp = input.readLong();
|
||||
return new VersionedEntry(value, version, timestamp);
|
||||
}
|
||||
}
|
||||
|
||||
public static class ExcludeEmptyExtractValueExternalizer implements AdvancedExternalizer<ExcludeEmptyFilter> {
|
||||
@Override
|
||||
public Set<Class<? extends ExcludeEmptyFilter>> getTypeClasses() {
|
||||
return Collections.<Class<? extends ExcludeEmptyFilter>>singleton(ExcludeEmptyFilter.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer getId() {
|
||||
return Externalizers.EXCLUDE_EMPTY_EXTRACT_VALUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeObject(ObjectOutput output, ExcludeEmptyFilter object) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public ExcludeEmptyFilter readObject(ObjectInput input) throws IOException, ClassNotFoundException {
|
||||
return EXCLUDE_EMPTY_EXTRACT_VALUE;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Internal utilities for the Infinispan integration
|
||||
*/
|
||||
package org.hibernate.cache.infinispan.util;
|
|
@ -1,13 +0,0 @@
|
|||
#
|
||||
# Hibernate, Relational Persistence for Idiomatic Java
|
||||
#
|
||||
# License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
# See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
#
|
||||
#
|
||||
# Hibernate, Relational Persistence for Idiomatic Java
|
||||
#
|
||||
# License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
# See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
#
|
||||
org.hibernate.cache.infinispan.StrategyRegistrationProviderImpl
|
|
@ -1,13 +0,0 @@
|
|||
#
|
||||
# Hibernate, Relational Persistence for Idiomatic Java
|
||||
#
|
||||
# License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
# See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
#
|
||||
#
|
||||
# Hibernate, Relational Persistence for Idiomatic Java
|
||||
#
|
||||
# License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
# See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
#
|
||||
org.hibernate.cache.infinispan.util.CacheCommandExtensions
|
|
@ -1 +0,0 @@
|
|||
org.hibernate.cache.infinispan.util.SecondLevelCacheMetadataModuleFinder
|
|
@ -1 +0,0 @@
|
|||
org.hibernate.cache.infinispan.util.LifecycleCallbacks
|
|
@ -1,16 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!--
|
||||
~ Hibernate, Relational Persistence for Idiomatic Java
|
||||
~
|
||||
~ License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
~ See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
-->
|
||||
<blueprint default-activation="eager"
|
||||
xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
|
||||
<bean id="strategyRegistrationProvider" class="org.hibernate.cache.infinispan.StrategyRegistrationProviderImpl"/>
|
||||
<service ref="strategyRegistrationProvider" interface="org.hibernate.boot.registry.selector.StrategyRegistrationProvider"/>
|
||||
|
||||
</blueprint>
|
|
@ -1,49 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
~ Hibernate, Relational Persistence for Idiomatic Java
|
||||
~
|
||||
~ License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
~ See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
-->
|
||||
<infinispan xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xmlns="urn:infinispan:config:8.2"
|
||||
xsi:schemaLocation="urn:infinispan:config:8.2 http://www.infinispan.org/schemas/infinispan-config-8.2.xsd">
|
||||
|
||||
<!-- This configuration is suitable for non-clustered environments, where only single instance accesses the DB -->
|
||||
<cache-container name="SampleCacheManager" statistics="false" default-cache="the-default-cache" shutdown-hook="DEFAULT">
|
||||
<jmx duplicate-domains="true"/>
|
||||
|
||||
<local-cache-configuration name="the-default-cache" statistics="false" />
|
||||
|
||||
<!-- Default configuration is appropriate for entity/collection caching. -->
|
||||
<local-cache-configuration name="entity" simple-cache="true" statistics="false" statistics-available="false">
|
||||
<transaction mode="NONE" />
|
||||
<eviction size="10000" strategy="LRU"/>
|
||||
<expiration max-idle="100000" interval="5000"/>
|
||||
</local-cache-configuration>
|
||||
|
||||
<!-- A config appropriate for query caching. Does not replicate queries. -->
|
||||
<local-cache-configuration name="local-query" simple-cache="true" statistics="false" statistics-available="false">
|
||||
<transaction mode="NONE" />
|
||||
<eviction size="10000" strategy="LRU"/>
|
||||
<expiration max-idle="100000" interval="5000"/>
|
||||
</local-cache-configuration>
|
||||
|
||||
<local-cache-configuration name="timestamps" simple-cache="true" statistics="false" statistics-available="false">
|
||||
<locking concurrency-level="1000" acquire-timeout="15000"/>
|
||||
<!-- Explicitly non transactional -->
|
||||
<transaction mode="NONE"/>
|
||||
<!-- Don't ever evict modification timestamps -->
|
||||
<eviction strategy="NONE"/>
|
||||
<expiration interval="0"/>
|
||||
</local-cache-configuration>
|
||||
|
||||
<!-- When providing custom configuration, always make this cache local and non-transactional.
|
||||
To avoid possible leaks, use expiration (max idle time). Optimize for speed.-->
|
||||
<local-cache-configuration name="pending-puts" simple-cache="true" statistics="false" statistics-available="false">
|
||||
<transaction mode="NONE"/>
|
||||
<expiration max-idle="60000" />
|
||||
</local-cache-configuration>
|
||||
</cache-container>
|
||||
|
||||
</infinispan>
|
|
@ -1,72 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
~ Hibernate, Relational Persistence for Idiomatic Java
|
||||
~
|
||||
~ License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
~ See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
-->
|
||||
<infinispan xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xmlns="urn:infinispan:config:8.2"
|
||||
xsi:schemaLocation="urn:infinispan:config:8.2 http://www.infinispan.org/schemas/infinispan-config-8.2.xsd">
|
||||
|
||||
<jgroups>
|
||||
<stack-file name="hibernate-jgroups" path="${hibernate.cache.infinispan.jgroups_cfg:default-configs/default-jgroups-tcp.xml}"/>
|
||||
</jgroups>
|
||||
|
||||
<cache-container name="SampleCacheManager" statistics="false" default-cache="the-default-cache" shutdown-hook="DEFAULT">
|
||||
<transport stack="hibernate-jgroups" cluster="infinispan-hibernate-cluster"/>
|
||||
<jmx duplicate-domains="true"/>
|
||||
|
||||
<local-cache-configuration name="the-default-cache" statistics="false" />
|
||||
|
||||
<!-- Default configuration is appropriate for entity/collection caching. -->
|
||||
<invalidation-cache-configuration name="entity" mode="SYNC" remote-timeout="20000" statistics="false" statistics-available="false">
|
||||
<locking concurrency-level="1000" acquire-timeout="15000"/>
|
||||
<transaction mode="NONE" />
|
||||
<eviction size="10000" strategy="LRU"/>
|
||||
<expiration max-idle="100000" interval="5000"/>
|
||||
</invalidation-cache-configuration>
|
||||
|
||||
<replicated-cache-configuration name="replicated-entity" mode="SYNC" remote-timeout="20000" statistics="false" statistics-available="false">
|
||||
<locking concurrency-level="1000" acquire-timeout="15000"/>
|
||||
<transaction mode="NONE" />
|
||||
<expiration max-idle="100000" interval="5000"/>
|
||||
</replicated-cache-configuration>
|
||||
|
||||
<!-- A config appropriate for query caching. Does not replicate queries. -->
|
||||
<local-cache-configuration name="local-query" statistics="false" statistics-available="false">
|
||||
<locking concurrency-level="1000" acquire-timeout="15000"/>
|
||||
<transaction mode="NONE" />
|
||||
<eviction size="10000" strategy="LRU"/>
|
||||
<expiration max-idle="100000" interval="5000"/>
|
||||
</local-cache-configuration>
|
||||
|
||||
<!-- A query cache that replicates queries. Replication is asynchronous. -->
|
||||
<replicated-cache-configuration name="replicated-query" mode="ASYNC" statistics="false" statistics-available="false">
|
||||
<locking concurrency-level="1000" acquire-timeout="15000"/>
|
||||
<transaction mode="NONE" />
|
||||
<eviction size="10000" strategy="LRU"/>
|
||||
<expiration max-idle="100000" interval="5000"/>
|
||||
</replicated-cache-configuration>
|
||||
|
||||
<!-- Optimized for timestamp caching. A clustered timestamp cache
|
||||
is required if query caching is used, even if the query cache
|
||||
itself is configured with CacheMode=LOCAL. -->
|
||||
<replicated-cache-configuration name="timestamps" mode="ASYNC" statistics="false" statistics-available="false">
|
||||
<locking concurrency-level="1000" acquire-timeout="15000"/>
|
||||
<!-- Explicitly non transactional -->
|
||||
<transaction mode="NONE"/>
|
||||
<!-- Don't ever evict modification timestamps -->
|
||||
<eviction strategy="NONE"/>
|
||||
<expiration interval="0"/>
|
||||
</replicated-cache-configuration>
|
||||
|
||||
<!-- When providing custom configuration, always make this cache local and non-transactional.
|
||||
To avoid possible leaks, use expiration (max idle time). Optimize for speed.-->
|
||||
<local-cache-configuration name="pending-puts" simple-cache="true" statistics="false" statistics-available="false">
|
||||
<transaction mode="NONE"/>
|
||||
<expiration max-idle="60000" />
|
||||
</local-cache-configuration>
|
||||
</cache-container>
|
||||
|
||||
</infinispan>
|
|
@ -1,126 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
import org.hibernate.boot.registry.StandardServiceRegistry;
|
||||
import org.hibernate.boot.registry.StandardServiceRegistryBuilder;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.internal.CacheDataDescriptionImpl;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.hibernate.cache.spi.TransactionalDataRegion;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.internal.util.compare.ComparableComparator;
|
||||
import org.hibernate.test.cache.infinispan.util.CacheTestUtil;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Base class for tests of EntityRegion and CollectionRegion implementations.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public abstract class AbstractEntityCollectionRegionTest extends AbstractRegionImplTest {
|
||||
protected static CacheDataDescription MUTABLE_NON_VERSIONED = new CacheDataDescriptionImpl(true, false, ComparableComparator.INSTANCE, null);
|
||||
|
||||
@Test
|
||||
public void testSupportedAccessTypes() throws Exception {
|
||||
supportedAccessTypeTest();
|
||||
}
|
||||
|
||||
private void supportedAccessTypeTest() throws Exception {
|
||||
StandardServiceRegistryBuilder ssrb = createStandardServiceRegistryBuilder();
|
||||
final StandardServiceRegistry registry = ssrb.build();
|
||||
try {
|
||||
InfinispanRegionFactory regionFactory = CacheTestUtil.startRegionFactory(
|
||||
registry,
|
||||
getCacheTestSupport()
|
||||
);
|
||||
supportedAccessTypeTest( regionFactory, CacheTestUtil.toProperties( ssrb.getSettings() ) );
|
||||
}
|
||||
finally {
|
||||
StandardServiceRegistryBuilder.destroy( registry );
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Region using the given factory, and then ensure that it handles calls to
|
||||
* buildAccessStrategy as expected when all the various {@link AccessType}s are passed as
|
||||
* arguments.
|
||||
*/
|
||||
protected abstract void supportedAccessTypeTest(RegionFactory regionFactory, Properties properties);
|
||||
|
||||
@Test
|
||||
public void testIsTransactionAware() throws Exception {
|
||||
StandardServiceRegistryBuilder ssrb = CacheTestUtil.buildBaselineStandardServiceRegistryBuilder(
|
||||
"test",
|
||||
InfinispanRegionFactory.class,
|
||||
true,
|
||||
false,
|
||||
jtaPlatform
|
||||
);
|
||||
final StandardServiceRegistry registry = ssrb.build();
|
||||
try {
|
||||
Properties properties = CacheTestUtil.toProperties( ssrb.getSettings() );
|
||||
InfinispanRegionFactory regionFactory = CacheTestUtil.startRegionFactory(
|
||||
registry,
|
||||
getCacheTestSupport()
|
||||
);
|
||||
TransactionalDataRegion region = (TransactionalDataRegion) createRegion(
|
||||
regionFactory,
|
||||
"test/test",
|
||||
properties,
|
||||
getCacheDataDescription()
|
||||
);
|
||||
assertTrue( "Region is transaction-aware", region.isTransactionAware() );
|
||||
CacheTestUtil.stopRegionFactory( regionFactory, getCacheTestSupport() );
|
||||
}
|
||||
finally {
|
||||
StandardServiceRegistryBuilder.destroy( registry );
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetCacheDataDescription() throws Exception {
|
||||
StandardServiceRegistryBuilder ssrb = CacheTestUtil.buildBaselineStandardServiceRegistryBuilder(
|
||||
"test",
|
||||
InfinispanRegionFactory.class,
|
||||
true,
|
||||
false,
|
||||
jtaPlatform
|
||||
);
|
||||
final StandardServiceRegistry registry = ssrb.build();
|
||||
try {
|
||||
Properties properties = CacheTestUtil.toProperties( ssrb.getSettings() );
|
||||
InfinispanRegionFactory regionFactory = CacheTestUtil.startRegionFactory(
|
||||
registry,
|
||||
getCacheTestSupport()
|
||||
);
|
||||
TransactionalDataRegion region = (TransactionalDataRegion) createRegion(
|
||||
regionFactory,
|
||||
"test/test",
|
||||
properties,
|
||||
getCacheDataDescription()
|
||||
);
|
||||
CacheDataDescription cdd = region.getCacheDataDescription();
|
||||
assertNotNull( cdd );
|
||||
CacheDataDescription expected = getCacheDataDescription();
|
||||
assertEquals( expected.isMutable(), cdd.isMutable() );
|
||||
assertEquals( expected.isVersioned(), cdd.isVersioned() );
|
||||
assertEquals( expected.getVersionComparator(), cdd.getVersionComparator() );
|
||||
}
|
||||
finally {
|
||||
StandardServiceRegistryBuilder.destroy( registry );
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
package org.hibernate.test.cache.infinispan;
|
||||
|
||||
import org.hibernate.cache.internal.CacheDataDescriptionImpl;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.access.RegionAccessStrategy;
|
||||
import org.hibernate.cache.spi.access.SoftLock;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
import org.hibernate.internal.util.compare.ComparableComparator;
|
||||
|
||||
import org.hibernate.test.cache.infinispan.util.TestingKeyFactory;
|
||||
import org.hibernate.testing.AfterClassOnce;
|
||||
import org.hibernate.testing.BeforeClassOnce;
|
||||
import org.infinispan.test.fwk.TestResourceTracker;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
/**
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public abstract class AbstractExtraAPITest<S extends RegionAccessStrategy> extends AbstractNonFunctionalTest {
|
||||
|
||||
public static final String REGION_NAME = "test/com.foo.test";
|
||||
public static final Object KEY = TestingKeyFactory.generateCollectionCacheKey( "KEY" );
|
||||
public static final CacheDataDescription CACHE_DATA_DESCRIPTION
|
||||
= new CacheDataDescriptionImpl(true, true, ComparableComparator.INSTANCE, null);
|
||||
protected static final SharedSessionContractImplementor SESSION = mock(SharedSessionContractImplementor.class);
|
||||
|
||||
protected S accessStrategy;
|
||||
protected NodeEnvironment environment;
|
||||
|
||||
@BeforeClassOnce
|
||||
public final void prepareLocalAccessStrategy() throws Exception {
|
||||
TestResourceTracker.testStarted(getClass().getSimpleName());
|
||||
environment = new NodeEnvironment( createStandardServiceRegistryBuilder() );
|
||||
environment.prepare();
|
||||
|
||||
accessStrategy = getAccessStrategy();
|
||||
}
|
||||
|
||||
protected abstract S getAccessStrategy();
|
||||
|
||||
@AfterClassOnce
|
||||
public final void releaseLocalAccessStrategy() throws Exception {
|
||||
if ( environment != null ) {
|
||||
environment.release();
|
||||
}
|
||||
TestResourceTracker.testFinished(getClass().getSimpleName());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLockItem() {
|
||||
assertNull( accessStrategy.lockItem(SESSION, KEY, Integer.valueOf( 1 ) ) );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLockRegion() {
|
||||
assertNull( accessStrategy.lockRegion() );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnlockItem() {
|
||||
accessStrategy.unlockItem(SESSION, KEY, new MockSoftLock() );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnlockRegion() {
|
||||
accessStrategy.unlockItem(SESSION, KEY, new MockSoftLock() );
|
||||
}
|
||||
|
||||
public static class MockSoftLock implements SoftLock {
|
||||
}
|
||||
}
|
|
@ -1,226 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.hibernate.SessionFactory;
|
||||
import org.hibernate.Transaction;
|
||||
import org.hibernate.boot.MetadataSources;
|
||||
import org.hibernate.boot.registry.StandardServiceRegistry;
|
||||
import org.hibernate.boot.registry.StandardServiceRegistryBuilder;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.impl.BaseGeneralDataRegion;
|
||||
import org.hibernate.cache.infinispan.impl.BaseRegion;
|
||||
import org.hibernate.cache.spi.GeneralDataRegion;
|
||||
import org.hibernate.cache.spi.QueryResultsRegion;
|
||||
import org.hibernate.cache.spi.Region;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.cfg.AvailableSettings;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
|
||||
import org.hibernate.test.cache.infinispan.util.CacheTestUtil;
|
||||
import org.hibernate.test.cache.infinispan.util.ExpectingInterceptor;
|
||||
import org.hibernate.test.cache.infinispan.util.TestInfinispanRegionFactory;
|
||||
import org.infinispan.commands.write.PutKeyValueCommand;
|
||||
import org.infinispan.commands.write.RemoveCommand;
|
||||
import org.infinispan.configuration.cache.CacheMode;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Base class for tests of QueryResultsRegion and TimestampsRegion.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public abstract class AbstractGeneralDataRegionTest extends AbstractRegionImplTest {
|
||||
protected static final String KEY = "Key";
|
||||
|
||||
protected static final String VALUE1 = "value1";
|
||||
protected static final String VALUE2 = "value2";
|
||||
protected static final String VALUE3 = "value3";
|
||||
|
||||
@Override
|
||||
public List<Object[]> getCacheModeParameters() {
|
||||
// the actual cache mode and access type is irrelevant for the general data regions
|
||||
return Arrays.<Object[]>asList(new Object[]{ CacheMode.INVALIDATION_SYNC, AccessType.TRANSACTIONAL });
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void putInRegion(Region region, Object key, Object value) {
|
||||
((GeneralDataRegion) region).put(null, key, value );
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void removeFromRegion(Region region, Object key) {
|
||||
((GeneralDataRegion) region).evict( key );
|
||||
}
|
||||
|
||||
protected interface SFRConsumer {
|
||||
void accept(List<SessionFactory> sessionFactories, List<GeneralDataRegion> regions) throws Exception;
|
||||
}
|
||||
|
||||
protected void withSessionFactoriesAndRegions(int num, SFRConsumer consumer) throws Exception {
|
||||
StandardServiceRegistryBuilder ssrb = createStandardServiceRegistryBuilder()
|
||||
.applySetting(AvailableSettings.CACHE_REGION_FACTORY, TestInfinispanRegionFactory.class.getName());
|
||||
Properties properties = CacheTestUtil.toProperties( ssrb.getSettings() );
|
||||
List<StandardServiceRegistry> registries = new ArrayList<>();
|
||||
List<SessionFactory> sessionFactories = new ArrayList<>();
|
||||
List<GeneralDataRegion> regions = new ArrayList<>();
|
||||
for (int i = 0; i < num; ++i) {
|
||||
StandardServiceRegistry registry = ssrb.build();
|
||||
registries.add(registry);
|
||||
|
||||
SessionFactory sessionFactory = new MetadataSources(registry).buildMetadata().buildSessionFactory();
|
||||
sessionFactories.add(sessionFactory);
|
||||
|
||||
InfinispanRegionFactory regionFactory = (InfinispanRegionFactory) registry.getService(RegionFactory.class);
|
||||
GeneralDataRegion region = (GeneralDataRegion) createRegion(
|
||||
regionFactory,
|
||||
getStandardRegionName( REGION_PREFIX ),
|
||||
properties,
|
||||
null
|
||||
);
|
||||
regions.add(region);
|
||||
}
|
||||
try {
|
||||
consumer.accept(sessionFactories, regions);
|
||||
} finally {
|
||||
for (SessionFactory sessionFactory : sessionFactories) {
|
||||
sessionFactory.close();
|
||||
}
|
||||
for (StandardServiceRegistry registry : registries) {
|
||||
StandardServiceRegistryBuilder.destroy( registry );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEvict() throws Exception {
|
||||
withSessionFactoriesAndRegions(2, ((sessionFactories, regions) -> {
|
||||
GeneralDataRegion localRegion = regions.get(0);
|
||||
GeneralDataRegion remoteRegion = regions.get(1);
|
||||
SharedSessionContractImplementor localSession = (SharedSessionContractImplementor) sessionFactories.get(0).openSession();
|
||||
SharedSessionContractImplementor remoteSession = (SharedSessionContractImplementor) sessionFactories.get(1).openSession();
|
||||
AdvancedCache localCache = ((BaseRegion) localRegion).getCache();
|
||||
AdvancedCache remoteCache = ((BaseRegion) remoteRegion).getCache();
|
||||
try {
|
||||
assertNull("local is clean", localRegion.get(localSession, KEY));
|
||||
assertNull("remote is clean", remoteRegion.get(remoteSession, KEY));
|
||||
|
||||
// If this node is backup owner, it will see the update once as originator and then when getting the value from primary
|
||||
boolean isLocalNodeBackupOwner = localCache.getDistributionManager().locate(KEY).indexOf(localCache.getCacheManager().getAddress()) > 0;
|
||||
CountDownLatch insertLatch = new CountDownLatch(isLocalNodeBackupOwner ? 3 : 2);
|
||||
ExpectingInterceptor.get(localCache).when((ctx, cmd) -> cmd instanceof PutKeyValueCommand).countDown(insertLatch);
|
||||
ExpectingInterceptor.get(remoteCache).when((ctx, cmd) -> cmd instanceof PutKeyValueCommand).countDown(insertLatch);
|
||||
|
||||
Transaction tx = localSession.getTransaction();
|
||||
tx.begin();
|
||||
try {
|
||||
localRegion.put(localSession, KEY, VALUE1);
|
||||
tx.commit();
|
||||
} catch (Exception e) {
|
||||
tx.rollback();
|
||||
throw e;
|
||||
}
|
||||
|
||||
assertTrue(insertLatch.await(2, TimeUnit.SECONDS));
|
||||
assertEquals(VALUE1, localRegion.get(localSession, KEY));
|
||||
assertEquals(VALUE1, remoteRegion.get(remoteSession, KEY));
|
||||
|
||||
CountDownLatch removeLatch = new CountDownLatch(isLocalNodeBackupOwner ? 3 : 2);
|
||||
ExpectingInterceptor.get(localCache).when((ctx, cmd) -> cmd instanceof RemoveCommand).countDown(removeLatch);
|
||||
ExpectingInterceptor.get(remoteCache).when((ctx, cmd) -> cmd instanceof RemoveCommand).countDown(removeLatch);
|
||||
|
||||
regionEvict(localRegion);
|
||||
|
||||
assertTrue(removeLatch.await(2, TimeUnit.SECONDS));
|
||||
assertEquals(null, localRegion.get(localSession, KEY));
|
||||
assertEquals(null, remoteRegion.get(remoteSession, KEY));
|
||||
} finally {
|
||||
localSession.close();
|
||||
remoteSession.close();
|
||||
|
||||
ExpectingInterceptor.cleanup(localCache, remoteCache);
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
protected void regionEvict(GeneralDataRegion region) throws Exception {
|
||||
region.evict(KEY);
|
||||
}
|
||||
|
||||
protected abstract String getStandardRegionName(String regionPrefix);
|
||||
|
||||
/**
|
||||
* Test method for {@link QueryResultsRegion#evictAll()}.
|
||||
* <p/>
|
||||
* FIXME add testing of the "immediately without regard for transaction isolation" bit in the
|
||||
* CollectionRegionAccessStrategy API.
|
||||
*/
|
||||
public void testEvictAll() throws Exception {
|
||||
withSessionFactoriesAndRegions(2, (sessionFactories, regions) -> {
|
||||
GeneralDataRegion localRegion = regions.get(0);
|
||||
GeneralDataRegion remoteRegion = regions.get(1);
|
||||
AdvancedCache localCache = ((BaseGeneralDataRegion) localRegion).getCache();
|
||||
AdvancedCache remoteCache = ((BaseGeneralDataRegion) remoteRegion).getCache();
|
||||
SharedSessionContractImplementor localSession = (SharedSessionContractImplementor) sessionFactories.get(0).openSession();
|
||||
SharedSessionContractImplementor remoteSession = (SharedSessionContractImplementor) sessionFactories.get(1).openSession();
|
||||
|
||||
try {
|
||||
Set localKeys = localCache.keySet();
|
||||
assertEquals( "No valid children in " + localKeys, 0, localKeys.size() );
|
||||
|
||||
Set remoteKeys = remoteCache.keySet();
|
||||
assertEquals( "No valid children in " + remoteKeys, 0, remoteKeys.size() );
|
||||
|
||||
assertNull( "local is clean", localRegion.get(null, KEY ) );
|
||||
assertNull( "remote is clean", remoteRegion.get(null, KEY ) );
|
||||
|
||||
localRegion.put(localSession, KEY, VALUE1);
|
||||
assertEquals( VALUE1, localRegion.get(null, KEY ) );
|
||||
|
||||
remoteRegion.put(remoteSession, KEY, VALUE1);
|
||||
assertEquals( VALUE1, remoteRegion.get(null, KEY ) );
|
||||
|
||||
localRegion.evictAll();
|
||||
|
||||
// This should re-establish the region root node in the optimistic case
|
||||
assertNull( localRegion.get(null, KEY ) );
|
||||
localKeys = localCache.keySet();
|
||||
assertEquals( "No valid children in " + localKeys, 0, localKeys.size() );
|
||||
|
||||
// Re-establishing the region root on the local node doesn't
|
||||
// propagate it to other nodes. Do a get on the remote node to re-establish
|
||||
// This only adds a node in the case of optimistic locking
|
||||
assertEquals( null, remoteRegion.get(null, KEY ) );
|
||||
remoteKeys = remoteCache.keySet();
|
||||
assertEquals( "No valid children in " + remoteKeys, 0, remoteKeys.size() );
|
||||
|
||||
assertEquals( "local is clean", null, localRegion.get(null, KEY ) );
|
||||
assertEquals( "remote is clean", null, remoteRegion.get(null, KEY ) );
|
||||
} finally {
|
||||
localSession.close();
|
||||
remoteSession.close();
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
}
|
|
@ -1,203 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Callable;
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.hibernate.Session;
|
||||
import org.hibernate.Transaction;
|
||||
import org.hibernate.boot.registry.StandardServiceRegistryBuilder;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
import org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform;
|
||||
import org.hibernate.resource.transaction.spi.TransactionStatus;
|
||||
|
||||
import org.hibernate.testing.junit4.BaseUnitTestCase;
|
||||
import org.hibernate.testing.junit4.CustomParameterized;
|
||||
import org.hibernate.test.cache.infinispan.util.BatchModeJtaPlatform;
|
||||
import org.hibernate.test.cache.infinispan.util.CacheTestSupport;
|
||||
import org.hibernate.test.cache.infinispan.util.CacheTestUtil;
|
||||
import org.hibernate.test.cache.infinispan.util.InfinispanTestingSetup;
|
||||
import org.hibernate.test.cache.infinispan.util.TestInfinispanRegionFactory;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.configuration.cache.CacheMode;
|
||||
|
||||
import org.jboss.logging.Logger;
|
||||
|
||||
/**
|
||||
* Base class for all non-functional tests of Infinispan integration.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
@RunWith(CustomParameterized.class)
|
||||
public abstract class AbstractNonFunctionalTest extends BaseUnitTestCase {
|
||||
|
||||
@Rule
|
||||
public InfinispanTestingSetup infinispanTestIdentifier = new InfinispanTestingSetup();
|
||||
|
||||
@CustomParameterized.Order(0)
|
||||
@Parameterized.Parameters(name = "{0}")
|
||||
public List<Object[]> getJtaParameters() {
|
||||
return Arrays.asList(
|
||||
new Object[] { "JTA", BatchModeJtaPlatform.class },
|
||||
new Object[] { "non-JTA", null });
|
||||
}
|
||||
|
||||
@CustomParameterized.Order(1)
|
||||
@Parameterized.Parameters(name = "{2},{3}")
|
||||
public List<Object[]> getCacheModeParameters() {
|
||||
ArrayList<Object[]> modes = new ArrayList<>();
|
||||
for (AccessType accessType : new AccessType[] {
|
||||
AccessType.TRANSACTIONAL,
|
||||
AccessType.READ_ONLY,
|
||||
AccessType.READ_WRITE
|
||||
}) {
|
||||
modes.add(new Object[]{CacheMode.INVALIDATION_SYNC, accessType});
|
||||
}
|
||||
for (AccessType accessType : new AccessType[] {
|
||||
AccessType.READ_ONLY,
|
||||
AccessType.READ_WRITE,
|
||||
AccessType.NONSTRICT_READ_WRITE
|
||||
}) {
|
||||
modes.add(new Object[]{CacheMode.REPL_SYNC, accessType});
|
||||
modes.add(new Object[]{CacheMode.DIST_SYNC, accessType});
|
||||
if (canUseLocalMode()) {
|
||||
modes.add(new Object[]{CacheMode.LOCAL, accessType});
|
||||
}
|
||||
}
|
||||
if (canUseLocalMode()) {
|
||||
modes.add(new Object[]{CacheMode.LOCAL, AccessType.TRANSACTIONAL});
|
||||
}
|
||||
return modes;
|
||||
}
|
||||
|
||||
@Parameterized.Parameter(0)
|
||||
public String mode;
|
||||
|
||||
@Parameterized.Parameter(1)
|
||||
public Class<? extends JtaPlatform> jtaPlatform;
|
||||
|
||||
@Parameterized.Parameter(2)
|
||||
public CacheMode cacheMode;
|
||||
|
||||
@Parameterized.Parameter(3)
|
||||
public AccessType accessType;
|
||||
|
||||
|
||||
public static final String REGION_PREFIX = "test";
|
||||
|
||||
private static final String PREFER_IPV4STACK = "java.net.preferIPv4Stack";
|
||||
private String preferIPv4Stack;
|
||||
private static final String JGROUPS_CFG_FILE = "hibernate.cache.infinispan.jgroups_cfg";
|
||||
private String jgroupsCfgFile;
|
||||
|
||||
private CacheTestSupport testSupport = new CacheTestSupport();
|
||||
|
||||
@Before
|
||||
public void prepareCacheSupport() throws Exception {
|
||||
preferIPv4Stack = System.getProperty(PREFER_IPV4STACK);
|
||||
System.setProperty(PREFER_IPV4STACK, "true");
|
||||
jgroupsCfgFile = System.getProperty(JGROUPS_CFG_FILE);
|
||||
System.setProperty(JGROUPS_CFG_FILE, "2lc-test-tcp.xml");
|
||||
|
||||
testSupport.setUp();
|
||||
}
|
||||
|
||||
@After
|
||||
public void releaseCachSupport() throws Exception {
|
||||
testSupport.tearDown();
|
||||
|
||||
if (preferIPv4Stack == null) {
|
||||
System.clearProperty(PREFER_IPV4STACK);
|
||||
} else {
|
||||
System.setProperty(PREFER_IPV4STACK, preferIPv4Stack);
|
||||
}
|
||||
|
||||
if (jgroupsCfgFile == null)
|
||||
System.clearProperty(JGROUPS_CFG_FILE);
|
||||
else
|
||||
System.setProperty(JGROUPS_CFG_FILE, jgroupsCfgFile);
|
||||
}
|
||||
|
||||
protected boolean canUseLocalMode() {
|
||||
return true;
|
||||
}
|
||||
|
||||
protected <T> T withTx(NodeEnvironment environment, SharedSessionContractImplementor session, Callable<T> callable) throws Exception {
|
||||
if (jtaPlatform != null) {
|
||||
TransactionManager tm = environment.getServiceRegistry().getService(JtaPlatform.class).retrieveTransactionManager();
|
||||
return Caches.withinTx(tm, callable);
|
||||
} else {
|
||||
Transaction transaction = ((Session) session).beginTransaction();
|
||||
boolean rollingBack = false;
|
||||
try {
|
||||
T retval = callable.call();
|
||||
if (transaction.getStatus() == TransactionStatus.ACTIVE) {
|
||||
transaction.commit();
|
||||
} else {
|
||||
rollingBack = true;
|
||||
transaction.rollback();
|
||||
}
|
||||
return retval;
|
||||
} catch (Exception e) {
|
||||
if (!rollingBack) {
|
||||
try {
|
||||
transaction.rollback();
|
||||
} catch (Exception suppressed) {
|
||||
e.addSuppressed(suppressed);
|
||||
}
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void registerCache(Cache cache) {
|
||||
testSupport.registerCache(cache);
|
||||
}
|
||||
|
||||
protected void unregisterCache(Cache cache) {
|
||||
testSupport.unregisterCache(cache);
|
||||
}
|
||||
|
||||
protected void registerFactory(RegionFactory factory) {
|
||||
testSupport.registerFactory(factory);
|
||||
}
|
||||
|
||||
protected void unregisterFactory(RegionFactory factory) {
|
||||
testSupport.unregisterFactory(factory);
|
||||
}
|
||||
|
||||
protected CacheTestSupport getCacheTestSupport() {
|
||||
return testSupport;
|
||||
}
|
||||
|
||||
protected StandardServiceRegistryBuilder createStandardServiceRegistryBuilder() {
|
||||
final StandardServiceRegistryBuilder ssrb = CacheTestUtil.buildBaselineStandardServiceRegistryBuilder(
|
||||
REGION_PREFIX, getRegionFactoryClass(), true, false, jtaPlatform);
|
||||
ssrb.applySetting(TestInfinispanRegionFactory.TRANSACTIONAL, accessType == AccessType.TRANSACTIONAL);
|
||||
ssrb.applySetting(TestInfinispanRegionFactory.CACHE_MODE, cacheMode);
|
||||
return ssrb;
|
||||
}
|
||||
|
||||
protected Class<? extends RegionFactory> getRegionFactoryClass() {
|
||||
return TestInfinispanRegionFactory.class;
|
||||
}
|
||||
}
|
|
@ -1,589 +0,0 @@
|
|||
package org.hibernate.test.cache.infinispan;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import javax.transaction.RollbackException;
|
||||
import javax.transaction.SystemException;
|
||||
|
||||
import org.hibernate.Session;
|
||||
import org.hibernate.Transaction;
|
||||
import org.hibernate.boot.registry.StandardServiceRegistryBuilder;
|
||||
import org.hibernate.cache.infinispan.access.PutFromLoadValidator;
|
||||
import org.hibernate.cache.infinispan.impl.BaseRegion;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.cache.infinispan.util.FutureUpdate;
|
||||
import org.hibernate.cache.infinispan.util.TombstoneUpdate;
|
||||
import org.hibernate.cache.internal.CacheDataDescriptionImpl;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.cache.spi.access.RegionAccessStrategy;
|
||||
import org.hibernate.cache.spi.access.SoftLock;
|
||||
import org.hibernate.engine.jdbc.connections.spi.JdbcConnectionAccess;
|
||||
import org.hibernate.engine.jdbc.spi.JdbcServices;
|
||||
import org.hibernate.engine.jdbc.spi.SqlExceptionHelper;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
import org.hibernate.engine.transaction.internal.TransactionImpl;
|
||||
import org.hibernate.internal.util.compare.ComparableComparator;
|
||||
import org.hibernate.resource.jdbc.spi.JdbcSessionContext;
|
||||
import org.hibernate.resource.jdbc.spi.JdbcSessionOwner;
|
||||
import org.hibernate.resource.transaction.backend.jdbc.internal.JdbcResourceLocalTransactionCoordinatorBuilderImpl;
|
||||
import org.hibernate.resource.transaction.backend.jdbc.spi.JdbcResourceTransactionAccess;
|
||||
import org.hibernate.resource.transaction.spi.TransactionCoordinator;
|
||||
import org.hibernate.resource.transaction.spi.TransactionCoordinatorOwner;
|
||||
import org.hibernate.service.ServiceRegistry;
|
||||
|
||||
import org.hibernate.test.cache.infinispan.util.BatchModeJtaPlatform;
|
||||
import org.hibernate.test.cache.infinispan.util.BatchModeTransactionCoordinator;
|
||||
import org.hibernate.test.cache.infinispan.util.ExpectingInterceptor;
|
||||
import org.hibernate.test.cache.infinispan.util.JdbcResourceTransactionMock;
|
||||
import org.hibernate.test.cache.infinispan.util.TestInfinispanRegionFactory;
|
||||
import org.hibernate.test.cache.infinispan.util.TestSynchronization;
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.commands.write.ClearCommand;
|
||||
import org.infinispan.test.TestingUtil;
|
||||
import org.jboss.logging.Logger;
|
||||
import org.hibernate.test.cache.infinispan.util.TestTimeService;
|
||||
import org.hibernate.testing.AfterClassOnce;
|
||||
import org.hibernate.testing.BeforeClassOnce;
|
||||
import org.infinispan.commands.write.InvalidateCommand;
|
||||
import org.infinispan.test.fwk.TestResourceTracker;
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.commands.write.PutKeyValueCommand;
|
||||
import org.junit.After;
|
||||
import org.junit.Test;
|
||||
import junit.framework.AssertionFailedError;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.spy;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
/**
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public abstract class AbstractRegionAccessStrategyTest<R extends BaseRegion, S extends RegionAccessStrategy>
|
||||
extends AbstractNonFunctionalTest {
|
||||
protected final Logger log = Logger.getLogger(getClass());
|
||||
|
||||
public static final String REGION_NAME = "test/com.foo.test";
|
||||
public static final String KEY_BASE = "KEY";
|
||||
public static final String VALUE1 = "VALUE1";
|
||||
public static final String VALUE2 = "VALUE2";
|
||||
public static final CacheDataDescription CACHE_DATA_DESCRIPTION
|
||||
= new CacheDataDescriptionImpl(true, true, ComparableComparator.INSTANCE, null);
|
||||
|
||||
protected static final TestTimeService TIME_SERVICE = new TestTimeService();
|
||||
|
||||
protected NodeEnvironment localEnvironment;
|
||||
protected R localRegion;
|
||||
protected S localAccessStrategy;
|
||||
|
||||
protected NodeEnvironment remoteEnvironment;
|
||||
protected R remoteRegion;
|
||||
protected S remoteAccessStrategy;
|
||||
|
||||
protected boolean transactional;
|
||||
protected boolean invalidation;
|
||||
protected boolean synchronous;
|
||||
protected Exception node1Exception;
|
||||
protected Exception node2Exception;
|
||||
protected AssertionFailedError node1Failure;
|
||||
protected AssertionFailedError node2Failure;
|
||||
|
||||
protected List<Runnable> cleanup = new ArrayList<>();
|
||||
|
||||
@Override
|
||||
protected boolean canUseLocalMode() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@BeforeClassOnce
|
||||
public void prepareResources() throws Exception {
|
||||
TestResourceTracker.testStarted( getClass().getSimpleName() );
|
||||
// to mimic exactly the old code results, both environments here are exactly the same...
|
||||
StandardServiceRegistryBuilder ssrb = createStandardServiceRegistryBuilder();
|
||||
localEnvironment = new NodeEnvironment( ssrb );
|
||||
localEnvironment.prepare();
|
||||
|
||||
localRegion = getRegion(localEnvironment);
|
||||
localAccessStrategy = getAccessStrategy(localRegion);
|
||||
|
||||
transactional = Caches.isTransactionalCache(localRegion.getCache());
|
||||
invalidation = Caches.isInvalidationCache(localRegion.getCache());
|
||||
synchronous = Caches.isSynchronousCache(localRegion.getCache());
|
||||
|
||||
remoteEnvironment = new NodeEnvironment( ssrb );
|
||||
remoteEnvironment.prepare();
|
||||
|
||||
remoteRegion = getRegion(remoteEnvironment);
|
||||
remoteAccessStrategy = getAccessStrategy(remoteRegion);
|
||||
|
||||
waitForClusterToForm(localRegion.getCache(), remoteRegion.getCache());
|
||||
}
|
||||
|
||||
@After
|
||||
public void cleanup() {
|
||||
cleanup.forEach(Runnable::run);
|
||||
cleanup.clear();
|
||||
if (localRegion != null) localRegion.getCache().clear();
|
||||
if (remoteRegion != null) remoteRegion.getCache().clear();
|
||||
}
|
||||
|
||||
@AfterClassOnce
|
||||
public void releaseResources() throws Exception {
|
||||
try {
|
||||
if (localEnvironment != null) {
|
||||
localEnvironment.release();
|
||||
}
|
||||
}
|
||||
finally {
|
||||
if (remoteEnvironment != null) {
|
||||
remoteEnvironment.release();
|
||||
}
|
||||
}
|
||||
TestResourceTracker.testFinished(getClass().getSimpleName());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected StandardServiceRegistryBuilder createStandardServiceRegistryBuilder() {
|
||||
StandardServiceRegistryBuilder ssrb = super.createStandardServiceRegistryBuilder();
|
||||
ssrb.applySetting(TestInfinispanRegionFactory.TIME_SERVICE, TIME_SERVICE);
|
||||
return ssrb;
|
||||
}
|
||||
|
||||
/**
|
||||
* Simulate 2 nodes, both start, tx do a get, experience a cache miss, then
|
||||
* 'read from db.' First does a putFromLoad, then an update (or removal if it is a collection).
|
||||
* Second tries to do a putFromLoad with stale data (i.e. it took longer to read from the db).
|
||||
* Both commit their tx. Then both start a new tx and get. First should see
|
||||
* the updated data; second should either see the updated data
|
||||
* (isInvalidation() == false) or null (isInvalidation() == true).
|
||||
*
|
||||
* @param useMinimalAPI
|
||||
* @param isRemoval
|
||||
* @throws Exception
|
||||
*/
|
||||
protected void putFromLoadTest(final boolean useMinimalAPI, boolean isRemoval) throws Exception {
|
||||
|
||||
final Object KEY = generateNextKey();
|
||||
|
||||
final CountDownLatch writeLatch1 = new CountDownLatch(1);
|
||||
final CountDownLatch writeLatch2 = new CountDownLatch(1);
|
||||
final CountDownLatch completionLatch = new CountDownLatch(2);
|
||||
|
||||
Thread node1 = new Thread(() -> {
|
||||
try {
|
||||
SharedSessionContractImplementor session = mockedSession();
|
||||
withTx(localEnvironment, session, () -> {
|
||||
assertNull(localAccessStrategy.get(session, KEY, session.getTimestamp()));
|
||||
|
||||
writeLatch1.await();
|
||||
|
||||
if (useMinimalAPI) {
|
||||
localAccessStrategy.putFromLoad(session, KEY, VALUE1, session.getTimestamp(), 1, true);
|
||||
} else {
|
||||
localAccessStrategy.putFromLoad(session, KEY, VALUE1, session.getTimestamp(), 1);
|
||||
}
|
||||
|
||||
doUpdate(localAccessStrategy, session, KEY, VALUE2, 2);
|
||||
return null;
|
||||
});
|
||||
} catch (Exception e) {
|
||||
log.error("node1 caught exception", e);
|
||||
node1Exception = e;
|
||||
} catch (AssertionFailedError e) {
|
||||
node1Failure = e;
|
||||
} finally {
|
||||
// Let node2 write
|
||||
writeLatch2.countDown();
|
||||
completionLatch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
Thread node2 = new Thread(() -> {
|
||||
try {
|
||||
SharedSessionContractImplementor session = mockedSession();
|
||||
withTx(remoteEnvironment, session, () -> {
|
||||
|
||||
assertNull(remoteAccessStrategy.get(session, KEY, session.getTimestamp()));
|
||||
|
||||
// Let node1 write
|
||||
writeLatch1.countDown();
|
||||
// Wait for node1 to finish
|
||||
writeLatch2.await();
|
||||
|
||||
if (useMinimalAPI) {
|
||||
remoteAccessStrategy.putFromLoad(session, KEY, VALUE1, session.getTimestamp(), 1, true);
|
||||
} else {
|
||||
remoteAccessStrategy.putFromLoad(session, KEY, VALUE1, session.getTimestamp(), 1);
|
||||
}
|
||||
return null;
|
||||
});
|
||||
} catch (Exception e) {
|
||||
log.error("node2 caught exception", e);
|
||||
node2Exception = e;
|
||||
} catch (AssertionFailedError e) {
|
||||
node2Failure = e;
|
||||
} finally {
|
||||
completionLatch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
node1.setDaemon(true);
|
||||
node2.setDaemon(true);
|
||||
|
||||
CountDownLatch remoteUpdate = expectAfterUpdate();
|
||||
|
||||
node1.start();
|
||||
node2.start();
|
||||
|
||||
assertTrue("Threads completed", completionLatch.await(2, TimeUnit.SECONDS));
|
||||
|
||||
assertThreadsRanCleanly();
|
||||
assertTrue("Update was replicated", remoteUpdate.await(2, TimeUnit.SECONDS));
|
||||
|
||||
SharedSessionContractImplementor s1 = mockedSession();
|
||||
assertEquals( isRemoval ? null : VALUE2, localAccessStrategy.get(s1, KEY, s1.getTimestamp()));
|
||||
SharedSessionContractImplementor s2 = mockedSession();
|
||||
Object remoteValue = remoteAccessStrategy.get(s2, KEY, s2.getTimestamp());
|
||||
if (isUsingInvalidation() || isRemoval) {
|
||||
// invalidation command invalidates pending put
|
||||
assertNull(remoteValue);
|
||||
}
|
||||
else {
|
||||
// The node1 update is replicated, preventing the node2 PFER
|
||||
assertEquals( VALUE2, remoteValue);
|
||||
}
|
||||
}
|
||||
|
||||
protected CountDownLatch expectAfterUpdate() {
|
||||
return expectPutWithValue(value -> value instanceof FutureUpdate);
|
||||
}
|
||||
|
||||
protected CountDownLatch expectPutWithValue(Predicate<Object> valuePredicate) {
|
||||
if (!isUsingInvalidation() && accessType != AccessType.NONSTRICT_READ_WRITE) {
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
ExpectingInterceptor.get(remoteRegion.getCache())
|
||||
.when((ctx, cmd) -> cmd instanceof PutKeyValueCommand && valuePredicate.test(((PutKeyValueCommand) cmd).getValue()))
|
||||
.countDown(latch);
|
||||
cleanup.add(() -> ExpectingInterceptor.cleanup(remoteRegion.getCache()));
|
||||
return latch;
|
||||
} else {
|
||||
return new CountDownLatch(0);
|
||||
}
|
||||
}
|
||||
|
||||
protected CountDownLatch expectPutFromLoad() {
|
||||
return expectPutWithValue(value -> value instanceof TombstoneUpdate);
|
||||
}
|
||||
|
||||
protected abstract void doUpdate(S strategy, SharedSessionContractImplementor session, Object key, Object value, Object version) throws RollbackException, SystemException;
|
||||
|
||||
private interface SessionMock extends Session, SharedSessionContractImplementor {
|
||||
}
|
||||
|
||||
private interface NonJtaTransactionCoordinator extends TransactionCoordinatorOwner, JdbcResourceTransactionAccess {
|
||||
}
|
||||
|
||||
protected SharedSessionContractImplementor mockedSession() {
|
||||
SessionMock session = mock(SessionMock.class);
|
||||
when(session.isClosed()).thenReturn(false);
|
||||
when(session.getTimestamp()).thenReturn(TIME_SERVICE.wallClockTime());
|
||||
if (jtaPlatform == BatchModeJtaPlatform.class) {
|
||||
BatchModeTransactionCoordinator txCoord = new BatchModeTransactionCoordinator();
|
||||
when(session.getTransactionCoordinator()).thenReturn(txCoord);
|
||||
when(session.beginTransaction()).then(invocation -> {
|
||||
Transaction tx = txCoord.newTransaction();
|
||||
tx.begin();
|
||||
return tx;
|
||||
});
|
||||
} else if (jtaPlatform == null) {
|
||||
Connection connection = mock(Connection.class);
|
||||
JdbcConnectionAccess jdbcConnectionAccess = mock(JdbcConnectionAccess.class);
|
||||
try {
|
||||
when(jdbcConnectionAccess.obtainConnection()).thenReturn(connection);
|
||||
} catch (SQLException e) {
|
||||
// never thrown from mock
|
||||
}
|
||||
JdbcSessionOwner jdbcSessionOwner = mock(JdbcSessionOwner.class);
|
||||
when(jdbcSessionOwner.getJdbcConnectionAccess()).thenReturn(jdbcConnectionAccess);
|
||||
SqlExceptionHelper sqlExceptionHelper = mock(SqlExceptionHelper.class);
|
||||
JdbcServices jdbcServices = mock(JdbcServices.class);
|
||||
when(jdbcServices.getSqlExceptionHelper()).thenReturn(sqlExceptionHelper);
|
||||
ServiceRegistry serviceRegistry = mock(ServiceRegistry.class);
|
||||
when(serviceRegistry.getService(JdbcServices.class)).thenReturn(jdbcServices);
|
||||
JdbcSessionContext jdbcSessionContext = mock(JdbcSessionContext.class);
|
||||
when(jdbcSessionContext.getServiceRegistry()).thenReturn(serviceRegistry);
|
||||
when(jdbcSessionOwner.getJdbcSessionContext()).thenReturn(jdbcSessionContext);
|
||||
NonJtaTransactionCoordinator txOwner = mock(NonJtaTransactionCoordinator.class);
|
||||
when(txOwner.getResourceLocalTransaction()).thenReturn(new JdbcResourceTransactionMock());
|
||||
when(txOwner.getJdbcSessionOwner()).thenReturn(jdbcSessionOwner);
|
||||
when(txOwner.isActive()).thenReturn(true);
|
||||
TransactionCoordinator txCoord = JdbcResourceLocalTransactionCoordinatorBuilderImpl.INSTANCE
|
||||
.buildTransactionCoordinator(txOwner, null);
|
||||
when(session.getTransactionCoordinator()).thenReturn(txCoord);
|
||||
when(session.beginTransaction()).then(invocation -> {
|
||||
Transaction tx = new TransactionImpl(
|
||||
txCoord,
|
||||
session.getExceptionConverter(),
|
||||
session.getFactory().getSessionFactoryOptions().getJpaCompliance() );
|
||||
tx.begin();
|
||||
return tx;
|
||||
});
|
||||
} else {
|
||||
throw new IllegalStateException("Unknown JtaPlatform: " + jtaPlatform);
|
||||
}
|
||||
return session;
|
||||
}
|
||||
|
||||
protected abstract S getAccessStrategy(R region);
|
||||
|
||||
@Test
|
||||
public void testRemove() throws Exception {
|
||||
evictOrRemoveTest( false );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEvict() throws Exception {
|
||||
evictOrRemoveTest( true );
|
||||
}
|
||||
|
||||
protected abstract R getRegion(NodeEnvironment environment);
|
||||
|
||||
protected void waitForClusterToForm(Cache... caches) {
|
||||
TestingUtil.blockUntilViewsReceived(10000, Arrays.asList(caches));
|
||||
}
|
||||
|
||||
protected boolean isTransactional() {
|
||||
return transactional;
|
||||
}
|
||||
|
||||
protected boolean isUsingInvalidation() {
|
||||
return invalidation;
|
||||
}
|
||||
|
||||
protected boolean isSynchronous() {
|
||||
return synchronous;
|
||||
}
|
||||
|
||||
protected void evictOrRemoveTest(final boolean evict) throws Exception {
|
||||
final Object KEY = generateNextKey();
|
||||
assertEquals(0, localRegion.getCache().size());
|
||||
assertEquals(0, remoteRegion.getCache().size());
|
||||
|
||||
CountDownLatch localPutFromLoadLatch = expectRemotePutFromLoad(remoteRegion.getCache(), localRegion.getCache());
|
||||
CountDownLatch remotePutFromLoadLatch = expectRemotePutFromLoad(localRegion.getCache(), remoteRegion.getCache());
|
||||
|
||||
SharedSessionContractImplementor s1 = mockedSession();
|
||||
assertNull("local is clean", localAccessStrategy.get(s1, KEY, s1.getTimestamp()));
|
||||
SharedSessionContractImplementor s2 = mockedSession();
|
||||
assertNull("remote is clean", remoteAccessStrategy.get(s2, KEY, s2.getTimestamp()));
|
||||
|
||||
SharedSessionContractImplementor s3 = mockedSession();
|
||||
localAccessStrategy.putFromLoad(s3, KEY, VALUE1, s3.getTimestamp(), 1);
|
||||
SharedSessionContractImplementor s5 = mockedSession();
|
||||
remoteAccessStrategy.putFromLoad(s5, KEY, VALUE1, s5.getTimestamp(), 1);
|
||||
|
||||
// putFromLoad is applied on local node synchronously, but if there's a concurrent update
|
||||
// from the other node it can silently fail when acquiring the loc . Then we could try to read
|
||||
// before the update is fully applied.
|
||||
assertTrue(localPutFromLoadLatch.await(1, TimeUnit.SECONDS));
|
||||
assertTrue(remotePutFromLoadLatch.await(1, TimeUnit.SECONDS));
|
||||
|
||||
SharedSessionContractImplementor s4 = mockedSession();
|
||||
assertEquals(VALUE1, localAccessStrategy.get(s4, KEY, s4.getTimestamp()));
|
||||
SharedSessionContractImplementor s6 = mockedSession();
|
||||
assertEquals(VALUE1, remoteAccessStrategy.get(s6, KEY, s6.getTimestamp()));
|
||||
|
||||
SharedSessionContractImplementor session = mockedSession();
|
||||
withTx(localEnvironment, session, () -> {
|
||||
if (evict) {
|
||||
localAccessStrategy.evict(KEY);
|
||||
}
|
||||
else {
|
||||
doRemove(localAccessStrategy, session, KEY);
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
SharedSessionContractImplementor s7 = mockedSession();
|
||||
assertNull(localAccessStrategy.get(s7, KEY, s7.getTimestamp()));
|
||||
assertEquals(0, localRegion.getCache().size());
|
||||
SharedSessionContractImplementor s8 = mockedSession();
|
||||
assertNull(remoteAccessStrategy.get(s8, KEY, s8.getTimestamp()));
|
||||
assertEquals(0, remoteRegion.getCache().size());
|
||||
}
|
||||
|
||||
protected void doRemove(S strategy, SharedSessionContractImplementor session, Object key) throws SystemException, RollbackException {
|
||||
SoftLock softLock = strategy.lockItem(session, key, null);
|
||||
strategy.remove(session, key);
|
||||
session.getTransactionCoordinator().getLocalSynchronizations().registerSynchronization(
|
||||
new TestSynchronization.UnlockItem(strategy, session, key, softLock));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemoveAll() throws Exception {
|
||||
evictOrRemoveAllTest(false);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEvictAll() throws Exception {
|
||||
evictOrRemoveAllTest(true);
|
||||
}
|
||||
|
||||
protected void assertThreadsRanCleanly() {
|
||||
if (node1Failure != null) {
|
||||
throw node1Failure;
|
||||
}
|
||||
if (node2Failure != null) {
|
||||
throw node2Failure;
|
||||
}
|
||||
|
||||
if (node1Exception != null) {
|
||||
log.error("node1 saw an exception", node1Exception);
|
||||
assertEquals("node1 saw no exceptions", null, node1Exception);
|
||||
}
|
||||
|
||||
if (node2Exception != null) {
|
||||
log.error("node2 saw an exception", node2Exception);
|
||||
assertEquals("node2 saw no exceptions", null, node2Exception);
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract Object generateNextKey();
|
||||
|
||||
protected void evictOrRemoveAllTest(final boolean evict) throws Exception {
|
||||
final Object KEY = generateNextKey();
|
||||
assertEquals(0, localRegion.getCache().size());
|
||||
assertEquals(0, remoteRegion.getCache().size());
|
||||
SharedSessionContractImplementor s1 = mockedSession();
|
||||
assertNull("local is clean", localAccessStrategy.get(s1, KEY, s1.getTimestamp()));
|
||||
SharedSessionContractImplementor s2 = mockedSession();
|
||||
assertNull("remote is clean", remoteAccessStrategy.get(s2, KEY, s2.getTimestamp()));
|
||||
|
||||
CountDownLatch localPutFromLoadLatch = expectRemotePutFromLoad(remoteRegion.getCache(), localRegion.getCache());
|
||||
CountDownLatch remotePutFromLoadLatch = expectRemotePutFromLoad(localRegion.getCache(), remoteRegion.getCache());
|
||||
|
||||
SharedSessionContractImplementor s3 = mockedSession();
|
||||
localAccessStrategy.putFromLoad(s3, KEY, VALUE1, s3.getTimestamp(), 1);
|
||||
SharedSessionContractImplementor s5 = mockedSession();
|
||||
remoteAccessStrategy.putFromLoad(s5, KEY, VALUE1, s5.getTimestamp(), 1);
|
||||
|
||||
// putFromLoad is applied on local node synchronously, but if there's a concurrent update
|
||||
// from the other node it can silently fail when acquiring the loc . Then we could try to read
|
||||
// before the update is fully applied.
|
||||
assertTrue(localPutFromLoadLatch.await(1, TimeUnit.SECONDS));
|
||||
assertTrue(remotePutFromLoadLatch.await(1, TimeUnit.SECONDS));
|
||||
|
||||
SharedSessionContractImplementor s4 = mockedSession();
|
||||
SharedSessionContractImplementor s6 = mockedSession();
|
||||
assertEquals(VALUE1, localAccessStrategy.get(s4, KEY, s4.getTimestamp()));
|
||||
assertEquals(VALUE1, remoteAccessStrategy.get(s6, KEY, s6.getTimestamp()));
|
||||
|
||||
CountDownLatch endInvalidationLatch;
|
||||
if (invalidation && !evict) {
|
||||
// removeAll causes transactional remove commands which trigger EndInvalidationCommands on the remote side
|
||||
// if the cache is non-transactional, PutFromLoadValidator.registerRemoteInvalidations cannot find
|
||||
// current session nor register tx synchronization, so it falls back to simple InvalidationCommand.
|
||||
endInvalidationLatch = new CountDownLatch(1);
|
||||
if (transactional) {
|
||||
PutFromLoadValidator originalValidator = PutFromLoadValidator.removeFromCache(remoteRegion.getCache());
|
||||
assertEquals(PutFromLoadValidator.class, originalValidator.getClass());
|
||||
PutFromLoadValidator mockValidator = spy(originalValidator);
|
||||
doAnswer(invocation -> {
|
||||
try {
|
||||
return invocation.callRealMethod();
|
||||
} finally {
|
||||
endInvalidationLatch.countDown();
|
||||
}
|
||||
}).when(mockValidator).endInvalidatingKey(any(), any());
|
||||
PutFromLoadValidator.addToCache(remoteRegion.getCache(), mockValidator);
|
||||
cleanup.add(() -> {
|
||||
PutFromLoadValidator.removeFromCache(remoteRegion.getCache());
|
||||
PutFromLoadValidator.addToCache(remoteRegion.getCache(), originalValidator);
|
||||
});
|
||||
} else {
|
||||
ExpectingInterceptor.get(remoteRegion.getCache())
|
||||
.when((ctx, cmd) -> cmd instanceof InvalidateCommand || cmd instanceof ClearCommand)
|
||||
.countDown(endInvalidationLatch);
|
||||
cleanup.add(() -> ExpectingInterceptor.cleanup(remoteRegion.getCache()));
|
||||
}
|
||||
} else {
|
||||
endInvalidationLatch = new CountDownLatch(0);
|
||||
}
|
||||
|
||||
withTx(localEnvironment, mockedSession(), () -> {
|
||||
if (evict) {
|
||||
localAccessStrategy.evictAll();
|
||||
} else {
|
||||
SoftLock softLock = localAccessStrategy.lockRegion();
|
||||
localAccessStrategy.removeAll();
|
||||
localAccessStrategy.unlockRegion(softLock);
|
||||
}
|
||||
return null;
|
||||
});
|
||||
SharedSessionContractImplementor s7 = mockedSession();
|
||||
assertNull(localAccessStrategy.get(s7, KEY, s7.getTimestamp()));
|
||||
assertEquals(0, localRegion.getCache().size());
|
||||
|
||||
SharedSessionContractImplementor s8 = mockedSession();
|
||||
assertNull(remoteAccessStrategy.get(s8, KEY, s8.getTimestamp()));
|
||||
assertEquals(0, remoteRegion.getCache().size());
|
||||
|
||||
// Wait for async propagation of EndInvalidationCommand before executing naked put
|
||||
assertTrue(endInvalidationLatch.await(1, TimeUnit.SECONDS));
|
||||
TIME_SERVICE.advance(1);
|
||||
|
||||
CountDownLatch lastPutFromLoadLatch = expectRemotePutFromLoad(remoteRegion.getCache(), localRegion.getCache());
|
||||
|
||||
// Test whether the get above messes up the optimistic version
|
||||
SharedSessionContractImplementor s9 = mockedSession();
|
||||
assertTrue(remoteAccessStrategy.putFromLoad(s9, KEY, VALUE1, s9.getTimestamp(), 1));
|
||||
SharedSessionContractImplementor s10 = mockedSession();
|
||||
assertEquals(VALUE1, remoteAccessStrategy.get(s10, KEY, s10.getTimestamp()));
|
||||
assertEquals(1, remoteRegion.getCache().size());
|
||||
|
||||
assertTrue(lastPutFromLoadLatch.await(1, TimeUnit.SECONDS));
|
||||
|
||||
SharedSessionContractImplementor s11 = mockedSession();
|
||||
assertEquals((isUsingInvalidation() ? null : VALUE1), localAccessStrategy.get(s11, KEY, s11.getTimestamp()));
|
||||
SharedSessionContractImplementor s12 = mockedSession();
|
||||
assertEquals(VALUE1, remoteAccessStrategy.get(s12, KEY, s12.getTimestamp()));
|
||||
}
|
||||
|
||||
private CountDownLatch expectRemotePutFromLoad(AdvancedCache localCache, AdvancedCache remoteCache) {
|
||||
CountDownLatch putFromLoadLatch;
|
||||
if (!isUsingInvalidation()) {
|
||||
putFromLoadLatch = new CountDownLatch(1);
|
||||
// The command may fail to replicate if it can't acquire lock locally
|
||||
ExpectingInterceptor.Condition remoteCondition = ExpectingInterceptor.get(remoteCache)
|
||||
.when((ctx, cmd) -> !ctx.isOriginLocal() && cmd instanceof PutKeyValueCommand);
|
||||
ExpectingInterceptor.Condition localCondition = ExpectingInterceptor.get(localCache)
|
||||
.whenFails((ctx, cmd) -> ctx.isOriginLocal() && cmd instanceof PutKeyValueCommand);
|
||||
remoteCondition.run(() -> {
|
||||
localCondition.cancel();
|
||||
putFromLoadLatch.countDown();
|
||||
});
|
||||
localCondition.run(() -> {
|
||||
remoteCondition.cancel();
|
||||
putFromLoadLatch.countDown();
|
||||
});
|
||||
// just for case the test fails and does not remove the interceptor itself
|
||||
cleanup.add(() -> ExpectingInterceptor.cleanup(localCache, remoteCache));
|
||||
} else {
|
||||
putFromLoadLatch = new CountDownLatch(0);
|
||||
}
|
||||
return putFromLoadLatch;
|
||||
}
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.internal.CacheDataDescriptionImpl;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.Region;
|
||||
import org.hibernate.internal.util.compare.ComparableComparator;
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
/**
|
||||
* Base class for tests of Region implementations.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public abstract class AbstractRegionImplTest extends AbstractNonFunctionalTest {
|
||||
|
||||
protected abstract AdvancedCache getInfinispanCache(InfinispanRegionFactory regionFactory);
|
||||
|
||||
protected abstract Region createRegion(InfinispanRegionFactory regionFactory, String regionName, Properties properties, CacheDataDescription cdd);
|
||||
|
||||
protected abstract void putInRegion(Region region, Object key, Object value);
|
||||
|
||||
protected abstract void removeFromRegion(Region region, Object key);
|
||||
|
||||
protected CacheDataDescription getCacheDataDescription() {
|
||||
return new CacheDataDescriptionImpl(true, true, ComparableComparator.INSTANCE, null);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,118 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.ObjectInputStream;
|
||||
import java.io.ObjectOutputStream;
|
||||
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.hibernate.cache.internal.DefaultCacheKeysFactory;
|
||||
import org.hibernate.cache.internal.SimpleCacheKeysFactory;
|
||||
import org.hibernate.cache.spi.CacheKeysFactory;
|
||||
import org.hibernate.cfg.Configuration;
|
||||
import org.hibernate.cfg.Environment;
|
||||
import org.hibernate.engine.spi.SessionFactoryImplementor;
|
||||
import org.hibernate.jpa.AvailableSettings;
|
||||
import org.hibernate.persister.entity.EntityPersister;
|
||||
import org.hibernate.test.cache.infinispan.functional.entities.WithEmbeddedId;
|
||||
import org.hibernate.test.cache.infinispan.functional.entities.PK;
|
||||
import org.hibernate.test.cache.infinispan.functional.entities.WithSimpleId;
|
||||
import org.hibernate.test.cache.infinispan.util.InfinispanTestingSetup;
|
||||
import org.hibernate.test.cache.infinispan.util.TestInfinispanRegionFactory;
|
||||
import org.hibernate.testing.TestForIssue;
|
||||
import org.hibernate.testing.junit4.BaseUnitTestCase;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* @author Gail Badner
|
||||
*/
|
||||
public class CacheKeySerializationTest extends BaseUnitTestCase {
|
||||
@Rule
|
||||
public InfinispanTestingSetup infinispanTestIdentifier = new InfinispanTestingSetup();
|
||||
|
||||
private SessionFactoryImplementor getSessionFactory(String cacheKeysFactory) {
|
||||
Configuration configuration = new Configuration()
|
||||
.setProperty( Environment.USE_SECOND_LEVEL_CACHE, "true")
|
||||
.setProperty(Environment.CACHE_REGION_FACTORY, TestInfinispanRegionFactory.class.getName())
|
||||
.setProperty(Environment.DEFAULT_CACHE_CONCURRENCY_STRATEGY, "transactional")
|
||||
.setProperty( AvailableSettings.SHARED_CACHE_MODE, "ALL")
|
||||
.setProperty(Environment.HBM2DDL_AUTO, "create-drop");
|
||||
if (cacheKeysFactory != null) {
|
||||
configuration.setProperty(Environment.CACHE_KEYS_FACTORY, cacheKeysFactory);
|
||||
}
|
||||
configuration.addAnnotatedClass( WithSimpleId.class );
|
||||
configuration.addAnnotatedClass( WithEmbeddedId.class );
|
||||
return (SessionFactoryImplementor) configuration.buildSessionFactory();
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestForIssue(jiraKey = "HHH-11202")
|
||||
public void testSimpleCacheKeySimpleId() throws Exception {
|
||||
testId( SimpleCacheKeysFactory.INSTANCE, WithSimpleId.class.getName(), 1L );
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestForIssue(jiraKey = "HHH-11202")
|
||||
public void testSimpleCacheKeyEmbeddedId() throws Exception {
|
||||
testId( SimpleCacheKeysFactory.INSTANCE, WithEmbeddedId.class.getName(), new PK( 1L ) );
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestForIssue(jiraKey = "HHH-11202")
|
||||
public void testDefaultCacheKeySimpleId() throws Exception {
|
||||
testId( DefaultCacheKeysFactory.INSTANCE, WithSimpleId.class.getName(), 1L );
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestForIssue(jiraKey = "HHH-11202")
|
||||
public void testDefaultCacheKeyEmbeddedId() throws Exception {
|
||||
testId( DefaultCacheKeysFactory.INSTANCE, WithEmbeddedId.class.getName(), new PK( 1L ) );
|
||||
}
|
||||
|
||||
private void testId(CacheKeysFactory cacheKeysFactory, String entityName, Object id) throws Exception {
|
||||
final SessionFactoryImplementor sessionFactory = getSessionFactory( cacheKeysFactory.getClass().getName() );
|
||||
final EntityPersister persister = sessionFactory.getEntityPersister( entityName );
|
||||
final Object key = cacheKeysFactory.createEntityKey(
|
||||
id,
|
||||
persister,
|
||||
sessionFactory,
|
||||
null
|
||||
);
|
||||
|
||||
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||
final ObjectOutputStream oos = new ObjectOutputStream(baos);
|
||||
oos.writeObject( key );
|
||||
|
||||
final ObjectInputStream ois = new ObjectInputStream( new ByteArrayInputStream( baos.toByteArray() ) );
|
||||
final Object keyClone = ois.readObject();
|
||||
|
||||
try {
|
||||
assertEquals( key, keyClone );
|
||||
assertEquals( keyClone, key );
|
||||
|
||||
assertEquals( key.hashCode(), keyClone.hashCode() );
|
||||
|
||||
final Object idClone = cacheKeysFactory.getEntityId( keyClone );
|
||||
|
||||
assertEquals( id.hashCode(), idClone.hashCode() );
|
||||
assertEquals( id, idClone );
|
||||
assertEquals( idClone, id );
|
||||
assertTrue( persister.getIdentifierType().isEqual( id, idClone, sessionFactory ) );
|
||||
assertTrue( persister.getIdentifierType().isEqual( idClone, id, sessionFactory ) );
|
||||
sessionFactory.close();
|
||||
}
|
||||
finally {
|
||||
sessionFactory.close();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,86 +0,0 @@
|
|||
package org.hibernate.test.cache.infinispan;
|
||||
|
||||
import org.hibernate.SessionFactory;
|
||||
import org.hibernate.cache.internal.DefaultCacheKeysFactory;
|
||||
import org.hibernate.cache.internal.SimpleCacheKeysFactory;
|
||||
import org.hibernate.cfg.Configuration;
|
||||
import org.hibernate.cfg.Environment;
|
||||
import org.hibernate.engine.spi.CacheImplementor;
|
||||
import org.hibernate.test.cache.infinispan.functional.entities.Name;
|
||||
import org.hibernate.test.cache.infinispan.functional.entities.Person;
|
||||
import org.hibernate.test.cache.infinispan.util.InfinispanTestingSetup;
|
||||
import org.hibernate.test.cache.infinispan.util.TestInfinispanRegionFactory;
|
||||
import org.hibernate.testing.junit4.BaseUnitTestCase;
|
||||
import org.infinispan.Cache;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Iterator;
|
||||
|
||||
import static org.hibernate.test.cache.infinispan.util.TxUtil.withTxSession;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
public class CacheKeysFactoryTest extends BaseUnitTestCase {
|
||||
@Rule
|
||||
public InfinispanTestingSetup infinispanTestIdentifier = new InfinispanTestingSetup();
|
||||
|
||||
private SessionFactory getSessionFactory(String cacheKeysFactory) {
|
||||
Configuration configuration = new Configuration()
|
||||
.setProperty(Environment.USE_SECOND_LEVEL_CACHE, "true")
|
||||
.setProperty(Environment.CACHE_REGION_FACTORY, TestInfinispanRegionFactory.class.getName())
|
||||
.setProperty(Environment.DEFAULT_CACHE_CONCURRENCY_STRATEGY, "transactional")
|
||||
.setProperty(Environment.JPA_SHARED_CACHE_MODE, "ALL")
|
||||
.setProperty(Environment.HBM2DDL_AUTO, "create-drop");
|
||||
if (cacheKeysFactory != null) {
|
||||
configuration.setProperty(Environment.CACHE_KEYS_FACTORY, cacheKeysFactory);
|
||||
}
|
||||
configuration.addAnnotatedClass(Person.class);
|
||||
return configuration.buildSessionFactory();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNotSet() throws Exception {
|
||||
test(null, "CacheKeyImplementation");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDefault() throws Exception {
|
||||
test(DefaultCacheKeysFactory.SHORT_NAME, "CacheKeyImplementation");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDefaultClass() throws Exception {
|
||||
test(DefaultCacheKeysFactory.class.getName(), "CacheKeyImplementation");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSimple() throws Exception {
|
||||
test(SimpleCacheKeysFactory.SHORT_NAME, Name.class.getSimpleName());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSimpleClass() throws Exception {
|
||||
test(SimpleCacheKeysFactory.class.getName(), Name.class.getSimpleName());
|
||||
}
|
||||
|
||||
private void test(String cacheKeysFactory, String keyClassName) throws Exception {
|
||||
SessionFactory sessionFactory = getSessionFactory(cacheKeysFactory);
|
||||
withTxSession(false, sessionFactory, s -> {
|
||||
Person person = new Person("John", "Black", 39);
|
||||
s.persist(person);
|
||||
});
|
||||
|
||||
TestInfinispanRegionFactory regionFactory = (TestInfinispanRegionFactory) ((CacheImplementor) sessionFactory.getCache()).getRegionFactory();
|
||||
Cache<Object, Object> cache = regionFactory.getCacheManager().getCache(Person.class.getName());
|
||||
Iterator<Object> iterator = cache.getAdvancedCache().getDataContainer().keySet().iterator();
|
||||
assertTrue(iterator.hasNext());
|
||||
Object key = iterator.next();
|
||||
assertEquals(keyClassName, key.getClass().getSimpleName());
|
||||
|
||||
withTxSession(false, sessionFactory, s -> {
|
||||
Person person = s.load(Person.class, new Name("John", "Black"));
|
||||
assertEquals(39, person.getAge());
|
||||
});
|
||||
}
|
||||
}
|
|
@ -1,682 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan;
|
||||
|
||||
import java.io.InputStream;
|
||||
import java.util.Properties;
|
||||
import java.util.function.BiConsumer;
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.hibernate.boot.spi.SessionFactoryOptions;
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.collection.CollectionRegionImpl;
|
||||
import org.hibernate.cache.infinispan.entity.EntityRegionImpl;
|
||||
import org.hibernate.cache.infinispan.query.QueryResultsRegionImpl;
|
||||
import org.hibernate.cache.infinispan.timestamp.TimestampsRegionImpl;
|
||||
import org.hibernate.cache.infinispan.tm.HibernateTransactionManagerLookup;
|
||||
import org.hibernate.cache.internal.CacheDataDescriptionImpl;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cfg.Environment;
|
||||
import org.hibernate.engine.transaction.jta.platform.internal.AbstractJtaPlatform;
|
||||
import org.hibernate.engine.transaction.jta.platform.internal.JBossStandAloneJtaPlatform;
|
||||
import org.hibernate.service.ServiceRegistry;
|
||||
|
||||
import org.hibernate.testing.ServiceRegistryBuilder;
|
||||
import org.hibernate.test.cache.infinispan.util.CacheTestUtil;
|
||||
import org.hibernate.test.cache.infinispan.util.InfinispanTestingSetup;
|
||||
import org.infinispan.commons.util.FileLookupFactory;
|
||||
import org.infinispan.configuration.parsing.ConfigurationBuilderHolder;
|
||||
import org.infinispan.configuration.parsing.ParserRegistry;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.configuration.cache.CacheMode;
|
||||
import org.infinispan.configuration.cache.ClusteringConfigurationBuilder;
|
||||
import org.infinispan.configuration.cache.Configuration;
|
||||
import org.infinispan.configuration.cache.ConfigurationBuilder;
|
||||
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
|
||||
import org.infinispan.eviction.EvictionStrategy;
|
||||
import org.infinispan.manager.DefaultCacheManager;
|
||||
import org.infinispan.manager.EmbeddedCacheManager;
|
||||
import org.infinispan.test.TestingUtil;
|
||||
import org.infinispan.transaction.TransactionMode;
|
||||
|
||||
import static org.hibernate.cache.infinispan.InfinispanRegionFactory.DEF_PENDING_PUTS_RESOURCE;
|
||||
import static org.hibernate.cache.infinispan.InfinispanRegionFactory.DEF_TIMESTAMPS_RESOURCE;
|
||||
import static org.hibernate.cache.infinispan.InfinispanRegionFactory.DataType;
|
||||
import static org.hibernate.cache.infinispan.InfinispanRegionFactory.INFINISPAN_CONFIG_RESOURCE_PROP;
|
||||
import static org.hibernate.cache.infinispan.InfinispanRegionFactory.TIMESTAMPS_CACHE_RESOURCE_PROP;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
/**
|
||||
* InfinispanRegionFactoryTestCase.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class InfinispanRegionFactoryTestCase {
|
||||
private static final CacheDataDescription MUTABLE_NON_VERSIONED = new CacheDataDescriptionImpl(true, false, null, null);
|
||||
private static final CacheDataDescription IMMUTABLE_NON_VERSIONED = new CacheDataDescriptionImpl(false, false, null, null);
|
||||
|
||||
@Rule
|
||||
public InfinispanTestingSetup infinispanTestIdentifier = new InfinispanTestingSetup();
|
||||
|
||||
@Test
|
||||
public void testConfigurationProcessing() {
|
||||
final String person = "com.acme.Person";
|
||||
final String addresses = "com.acme.Person.addresses";
|
||||
Properties p = createProperties();
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.cfg", "person-cache");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.eviction.strategy", "LRU");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.eviction.max_entries", "5000");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.expiration.wake_up_interval", "2000");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.expiration.lifespan", "60000");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.expiration.max_idle", "30000");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.addresses.cfg", "person-addresses-cache");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.addresses.expiration.lifespan", "120000");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.addresses.expiration.max_idle", "60000");
|
||||
p.setProperty("hibernate.cache.infinispan.query.cfg", "my-query-cache");
|
||||
p.setProperty("hibernate.cache.infinispan.query.eviction.strategy", "LIRS");
|
||||
p.setProperty("hibernate.cache.infinispan.query.expiration.wake_up_interval", "3000");
|
||||
p.setProperty("hibernate.cache.infinispan.query.eviction.max_entries", "10000");
|
||||
|
||||
TestInfinispanRegionFactory factory = createRegionFactory(p);
|
||||
|
||||
try {
|
||||
assertEquals("person-cache", factory.getBaseConfiguration(person));
|
||||
Configuration personOverride = factory.getConfigurationOverride(person);
|
||||
assertEquals(EvictionStrategy.LRU, personOverride.eviction().strategy());
|
||||
assertEquals(5000, personOverride.eviction().maxEntries());
|
||||
assertEquals(2000, personOverride.expiration().wakeUpInterval());
|
||||
assertEquals(60000, personOverride.expiration().lifespan());
|
||||
assertEquals(30000, personOverride.expiration().maxIdle());
|
||||
|
||||
assertEquals("person-addresses-cache", factory.getBaseConfiguration(addresses));
|
||||
Configuration addressesOverride = factory.getConfigurationOverride(addresses);
|
||||
assertEquals(120000, addressesOverride.expiration().lifespan());
|
||||
assertEquals(60000, addressesOverride.expiration().maxIdle());
|
||||
|
||||
assertEquals("my-query-cache", factory.getBaseConfiguration(DataType.QUERY));
|
||||
Configuration queryOverride = factory.getConfigurationOverride(DataType.QUERY);
|
||||
assertEquals(EvictionStrategy.LIRS, queryOverride.eviction().strategy());
|
||||
assertEquals(10000, queryOverride.eviction().maxEntries());
|
||||
assertEquals(3000, queryOverride.expiration().wakeUpInterval());
|
||||
} finally {
|
||||
factory.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuildEntityCollectionRegionsPersonPlusEntityCollectionOverrides() {
|
||||
final String person = "com.acme.Person";
|
||||
final String address = "com.acme.Address";
|
||||
final String car = "com.acme.Car";
|
||||
final String addresses = "com.acme.Person.addresses";
|
||||
final String parts = "com.acme.Car.parts";
|
||||
Properties p = createProperties();
|
||||
// First option, cache defined for entity and overrides for generic entity data type and entity itself.
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.cfg", "person-cache");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.eviction.strategy", "LRU");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.eviction.max_entries", "5000");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.expiration.wake_up_interval", "2000");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.expiration.lifespan", "60000");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.expiration.max_idle", "30000");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.cfg", "myentity-cache");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.eviction.strategy", "LIRS");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.expiration.wake_up_interval", "3000");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.eviction.max_entries", "20000");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.addresses.cfg", "addresses-cache");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.addresses.eviction.strategy", "LIRS");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.addresses.eviction.max_entries", "5500");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.addresses.expiration.wake_up_interval", "2500");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.addresses.expiration.lifespan", "65000");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.addresses.expiration.max_idle", "35000");
|
||||
p.setProperty("hibernate.cache.infinispan.collection.cfg", "mycollection-cache");
|
||||
p.setProperty("hibernate.cache.infinispan.collection.eviction.strategy", "LRU");
|
||||
p.setProperty("hibernate.cache.infinispan.collection.expiration.wake_up_interval", "3500");
|
||||
p.setProperty("hibernate.cache.infinispan.collection.eviction.max_entries", "25000");
|
||||
TestInfinispanRegionFactory factory = createRegionFactory(p);
|
||||
try {
|
||||
EmbeddedCacheManager manager = factory.getCacheManager();
|
||||
assertFalse(manager.getCacheManagerConfiguration().globalJmxStatistics().enabled());
|
||||
assertNotNull(factory.getBaseConfiguration(person));
|
||||
assertFalse(isDefinedCache(factory, person));
|
||||
assertNotNull(factory.getBaseConfiguration(addresses));
|
||||
assertFalse(isDefinedCache(factory, addresses));
|
||||
assertNull(factory.getBaseConfiguration(address));
|
||||
assertNull(factory.getBaseConfiguration(parts));
|
||||
AdvancedCache cache;
|
||||
|
||||
EntityRegionImpl region = (EntityRegionImpl) factory.buildEntityRegion(person, p, MUTABLE_NON_VERSIONED);
|
||||
assertTrue(isDefinedCache(factory, person));
|
||||
cache = region.getCache();
|
||||
Configuration cacheCfg = cache.getCacheConfiguration();
|
||||
assertEquals(EvictionStrategy.LRU, cacheCfg.eviction().strategy());
|
||||
assertEquals(2000, cacheCfg.expiration().wakeUpInterval());
|
||||
assertEquals(5000, cacheCfg.eviction().maxEntries());
|
||||
assertEquals(60000, cacheCfg.expiration().lifespan());
|
||||
assertEquals(30000, cacheCfg.expiration().maxIdle());
|
||||
assertFalse(cacheCfg.jmxStatistics().enabled());
|
||||
|
||||
region = (EntityRegionImpl) factory.buildEntityRegion(address, p, MUTABLE_NON_VERSIONED);
|
||||
assertTrue(isDefinedCache(factory, person));
|
||||
cache = region.getCache();
|
||||
cacheCfg = cache.getCacheConfiguration();
|
||||
assertEquals(EvictionStrategy.LIRS, cacheCfg.eviction().strategy());
|
||||
assertEquals(3000, cacheCfg.expiration().wakeUpInterval());
|
||||
assertEquals(20000, cacheCfg.eviction().maxEntries());
|
||||
assertFalse(cacheCfg.jmxStatistics().enabled());
|
||||
|
||||
region = (EntityRegionImpl) factory.buildEntityRegion(car, p, MUTABLE_NON_VERSIONED);
|
||||
assertTrue(isDefinedCache(factory, person));
|
||||
cache = region.getCache();
|
||||
cacheCfg = cache.getCacheConfiguration();
|
||||
assertEquals(EvictionStrategy.LIRS, cacheCfg.eviction().strategy());
|
||||
assertEquals(3000, cacheCfg.expiration().wakeUpInterval());
|
||||
assertEquals(20000, cacheCfg.eviction().maxEntries());
|
||||
assertFalse(cacheCfg.jmxStatistics().enabled());
|
||||
|
||||
CollectionRegionImpl collectionRegion = (CollectionRegionImpl)
|
||||
factory.buildCollectionRegion(addresses, p, MUTABLE_NON_VERSIONED);
|
||||
assertTrue(isDefinedCache(factory, person));
|
||||
|
||||
cache = collectionRegion .getCache();
|
||||
cacheCfg = cache.getCacheConfiguration();
|
||||
assertEquals(EvictionStrategy.LIRS, cacheCfg.eviction().strategy());
|
||||
assertEquals(2500, cacheCfg.expiration().wakeUpInterval());
|
||||
assertEquals(5500, cacheCfg.eviction().maxEntries());
|
||||
assertEquals(65000, cacheCfg.expiration().lifespan());
|
||||
assertEquals(35000, cacheCfg.expiration().maxIdle());
|
||||
assertFalse(cacheCfg.jmxStatistics().enabled());
|
||||
|
||||
collectionRegion = (CollectionRegionImpl) factory.buildCollectionRegion(parts, p, MUTABLE_NON_VERSIONED);
|
||||
assertTrue(isDefinedCache(factory, addresses));
|
||||
cache = collectionRegion.getCache();
|
||||
cacheCfg = cache.getCacheConfiguration();
|
||||
assertEquals(EvictionStrategy.LRU, cacheCfg.eviction().strategy());
|
||||
assertEquals(3500, cacheCfg.expiration().wakeUpInterval());
|
||||
assertEquals(25000, cacheCfg.eviction().maxEntries());
|
||||
assertFalse(cacheCfg.jmxStatistics().enabled());
|
||||
|
||||
collectionRegion = (CollectionRegionImpl) factory.buildCollectionRegion(parts, p, MUTABLE_NON_VERSIONED);
|
||||
assertTrue(isDefinedCache(factory, addresses));
|
||||
cache = collectionRegion.getCache();
|
||||
cacheCfg = cache.getCacheConfiguration();
|
||||
assertEquals(EvictionStrategy.LRU, cacheCfg.eviction().strategy());
|
||||
assertEquals(3500, cacheCfg.expiration().wakeUpInterval());
|
||||
assertEquals(25000, cacheCfg.eviction().maxEntries());
|
||||
assertFalse(cacheCfg.jmxStatistics().enabled());
|
||||
} finally {
|
||||
factory.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuildEntityCollectionRegionOverridesOnly() {
|
||||
final String address = "com.acme.Address";
|
||||
final String personAddressses = "com.acme.Person.addresses";
|
||||
AdvancedCache cache;
|
||||
Properties p = createProperties();
|
||||
p.setProperty("hibernate.cache.infinispan.entity.eviction.strategy", "LIRS");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.eviction.max_entries", "30000");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.expiration.wake_up_interval", "3000");
|
||||
p.setProperty("hibernate.cache.infinispan.collection.eviction.strategy", "LRU");
|
||||
p.setProperty("hibernate.cache.infinispan.collection.eviction.max_entries", "35000");
|
||||
p.setProperty("hibernate.cache.infinispan.collection.expiration.wake_up_interval", "3500");
|
||||
TestInfinispanRegionFactory factory = createRegionFactory(p);
|
||||
try {
|
||||
factory.getCacheManager();
|
||||
EntityRegionImpl region = (EntityRegionImpl) factory.buildEntityRegion(address, p, MUTABLE_NON_VERSIONED);
|
||||
assertNull(factory.getBaseConfiguration(address));
|
||||
cache = region.getCache();
|
||||
Configuration cacheCfg = cache.getCacheConfiguration();
|
||||
assertEquals(EvictionStrategy.LIRS, cacheCfg.eviction().strategy());
|
||||
assertEquals(3000, cacheCfg.expiration().wakeUpInterval());
|
||||
assertEquals(30000, cacheCfg.eviction().maxEntries());
|
||||
// Max idle value comes from base XML configuration
|
||||
assertEquals(100000, cacheCfg.expiration().maxIdle());
|
||||
CollectionRegionImpl collectionRegion = (CollectionRegionImpl)
|
||||
factory.buildCollectionRegion(personAddressses, p, MUTABLE_NON_VERSIONED);
|
||||
assertNull(factory.getBaseConfiguration(personAddressses));
|
||||
cache = collectionRegion.getCache();
|
||||
cacheCfg = cache.getCacheConfiguration();
|
||||
assertEquals(EvictionStrategy.LRU, cacheCfg.eviction().strategy());
|
||||
assertEquals(3500, cacheCfg.expiration().wakeUpInterval());
|
||||
assertEquals(35000, cacheCfg.eviction().maxEntries());
|
||||
assertEquals(100000, cacheCfg.expiration().maxIdle());
|
||||
} finally {
|
||||
factory.stop();
|
||||
}
|
||||
}
|
||||
@Test
|
||||
public void testBuildEntityRegionPersonPlusEntityOverridesWithoutCfg() {
|
||||
final String person = "com.acme.Person";
|
||||
Properties p = createProperties();
|
||||
// Third option, no cache defined for entity and overrides for generic entity data type and entity itself.
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.eviction.strategy", "LRU");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.expiration.lifespan", "60000");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.expiration.max_idle", "30000");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.cfg", "myentity-cache");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.eviction.strategy", "FIFO");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.eviction.max_entries", "10000");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.expiration.wake_up_interval", "3000");
|
||||
TestInfinispanRegionFactory factory = createRegionFactory(p);
|
||||
try {
|
||||
factory.getCacheManager();
|
||||
assertFalse( isDefinedCache(factory, person ) );
|
||||
EntityRegionImpl region = (EntityRegionImpl) factory.buildEntityRegion( person, p, MUTABLE_NON_VERSIONED );
|
||||
assertTrue( isDefinedCache(factory, person ) );
|
||||
AdvancedCache cache = region.getCache();
|
||||
Configuration cacheCfg = cache.getCacheConfiguration();
|
||||
assertEquals(EvictionStrategy.LRU, cacheCfg.eviction().strategy());
|
||||
assertEquals(3000, cacheCfg.expiration().wakeUpInterval());
|
||||
assertEquals(10000, cacheCfg.eviction().maxEntries());
|
||||
assertEquals(60000, cacheCfg.expiration().lifespan());
|
||||
assertEquals(30000, cacheCfg.expiration().maxIdle());
|
||||
} finally {
|
||||
factory.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuildImmutableEntityRegion() {
|
||||
AdvancedCache cache;
|
||||
Properties p = new Properties();
|
||||
TestInfinispanRegionFactory factory = createRegionFactory(p);
|
||||
try {
|
||||
factory.getCacheManager();
|
||||
EntityRegionImpl region = (EntityRegionImpl) factory.buildEntityRegion("com.acme.Address", p, IMMUTABLE_NON_VERSIONED);
|
||||
assertNull( factory.getBaseConfiguration( "com.acme.Address" ) );
|
||||
cache = region.getCache();
|
||||
Configuration cacheCfg = cache.getCacheConfiguration();
|
||||
assertEquals("Immutable entity should get non-transactional cache", TransactionMode.NON_TRANSACTIONAL, cacheCfg.transaction().transactionMode());
|
||||
} finally {
|
||||
factory.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Test(expected = CacheException.class)
|
||||
public void testTimestampValidation() {
|
||||
final String timestamps = "org.hibernate.cache.spi.UpdateTimestampsCache";
|
||||
Properties p = createProperties();
|
||||
InputStream configStream = FileLookupFactory.newInstance().lookupFile(InfinispanRegionFactory.DEF_INFINISPAN_CONFIG_RESOURCE, getClass().getClassLoader());
|
||||
ConfigurationBuilderHolder cbh = new ParserRegistry().parse(configStream);
|
||||
DefaultCacheManager manager = new DefaultCacheManager(cbh, true);
|
||||
ConfigurationBuilder builder = new ConfigurationBuilder();
|
||||
builder.clustering().cacheMode(CacheMode.INVALIDATION_SYNC);
|
||||
manager.defineConfiguration( DEF_TIMESTAMPS_RESOURCE, builder.build() );
|
||||
try {
|
||||
InfinispanRegionFactory factory = createRegionFactory( manager, p, null );
|
||||
factory.start( CacheTestUtil.sfOptionsForStart(), p );
|
||||
TimestampsRegionImpl region = (TimestampsRegionImpl) factory.buildTimestampsRegion( timestamps, p );
|
||||
fail( "Should have failed saying that invalidation is not allowed for timestamp caches." );
|
||||
} finally {
|
||||
TestingUtil.killCacheManagers( manager );
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuildDefaultTimestampsRegion() {
|
||||
final String timestamps = "org.hibernate.cache.spi.UpdateTimestampsCache";
|
||||
Properties p = createProperties();
|
||||
InfinispanRegionFactory factory = createRegionFactory(p);
|
||||
try {
|
||||
assertTrue(isDefinedCache(factory, DEF_TIMESTAMPS_RESOURCE));
|
||||
TimestampsRegionImpl region = (TimestampsRegionImpl) factory.buildTimestampsRegion(timestamps, p);
|
||||
AdvancedCache cache = region.getCache();
|
||||
assertEquals(timestamps, cache.getName());
|
||||
Configuration cacheCfg = cache.getCacheConfiguration();
|
||||
assertEquals( EvictionStrategy.NONE, cacheCfg.eviction().strategy() );
|
||||
assertEquals( CacheMode.REPL_ASYNC, cacheCfg.clustering().cacheMode() );
|
||||
assertFalse( cacheCfg.jmxStatistics().enabled() );
|
||||
} finally {
|
||||
factory.stop();
|
||||
}
|
||||
}
|
||||
|
||||
protected boolean isDefinedCache(InfinispanRegionFactory factory, String cacheName) {
|
||||
return factory.getCacheManager().getCacheConfiguration(cacheName) != null;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuildDiffCacheNameTimestampsRegion() {
|
||||
final String timestamps = "org.hibernate.cache.spi.UpdateTimestampsCache";
|
||||
final String unrecommendedTimestamps = "unrecommended-timestamps";
|
||||
Properties p = createProperties();
|
||||
p.setProperty( TIMESTAMPS_CACHE_RESOURCE_PROP, unrecommendedTimestamps);
|
||||
TestInfinispanRegionFactory factory = createRegionFactory(p, (f, m) -> {
|
||||
ConfigurationBuilder builder = new ConfigurationBuilder();
|
||||
builder.clustering().stateTransfer().fetchInMemoryState(true);
|
||||
builder.clustering().cacheMode( CacheMode.REPL_SYNC );
|
||||
m.defineConfiguration(unrecommendedTimestamps, builder.build() );
|
||||
});
|
||||
try {
|
||||
assertEquals(unrecommendedTimestamps, factory.getBaseConfiguration(DataType.TIMESTAMPS));
|
||||
TimestampsRegionImpl region = (TimestampsRegionImpl) factory.buildTimestampsRegion(timestamps, p);
|
||||
AdvancedCache cache = region.getCache();
|
||||
Configuration cacheCfg = cache.getCacheConfiguration();
|
||||
assertEquals(EvictionStrategy.NONE, cacheCfg.eviction().strategy());
|
||||
assertEquals(CacheMode.REPL_SYNC, cacheCfg.clustering().cacheMode());
|
||||
assertFalse( cacheCfg.storeAsBinary().enabled() );
|
||||
assertFalse(cacheCfg.jmxStatistics().enabled());
|
||||
} finally {
|
||||
factory.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuildTimestampsRegionWithCacheNameOverride() {
|
||||
final String timestamps = "org.hibernate.cache.spi.UpdateTimestampsCache";
|
||||
final String myTimestampsCache = "mytimestamps-cache";
|
||||
Properties p = createProperties();
|
||||
p.setProperty(TIMESTAMPS_CACHE_RESOURCE_PROP, myTimestampsCache);
|
||||
InfinispanRegionFactory factory = createRegionFactory(p, (f, m) -> {
|
||||
ClusteringConfigurationBuilder builder = new ConfigurationBuilder().clustering().cacheMode(CacheMode.LOCAL);
|
||||
m.defineConfiguration(myTimestampsCache, builder.build());
|
||||
});
|
||||
try {
|
||||
TimestampsRegionImpl region = (TimestampsRegionImpl) factory.buildTimestampsRegion(timestamps, p);
|
||||
assertTrue(isDefinedCache(factory, timestamps));
|
||||
// default timestamps cache is async replicated
|
||||
assertEquals(CacheMode.LOCAL, region.getCache().getCacheConfiguration().clustering().cacheMode());
|
||||
} finally {
|
||||
factory.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Test(expected = CacheException.class)
|
||||
public void testBuildTimestampsRegionWithFifoEvictionOverride() {
|
||||
final String timestamps = "org.hibernate.cache.spi.UpdateTimestampsCache";
|
||||
final String myTimestampsCache = "mytimestamps-cache";
|
||||
Properties p = createProperties();
|
||||
p.setProperty(TIMESTAMPS_CACHE_RESOURCE_PROP, myTimestampsCache);
|
||||
p.setProperty("hibernate.cache.infinispan.timestamps.eviction.strategy", "FIFO");
|
||||
p.setProperty("hibernate.cache.infinispan.timestamps.eviction.max_entries", "10000");
|
||||
p.setProperty("hibernate.cache.infinispan.timestamps.expiration.wake_up_interval", "3000");
|
||||
InfinispanRegionFactory factory = null;
|
||||
try {
|
||||
factory = createRegionFactory(p);
|
||||
factory.buildTimestampsRegion(timestamps, p);
|
||||
} finally {
|
||||
if (factory != null) factory.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuildTimestampsRegionWithNoneEvictionOverride() {
|
||||
final String timestamps = "org.hibernate.cache.spi.UpdateTimestampsCache";
|
||||
final String timestampsNoEviction = "timestamps-no-eviction";
|
||||
Properties p = createProperties();
|
||||
p.setProperty("hibernate.cache.infinispan.timestamps.cfg", timestampsNoEviction);
|
||||
p.setProperty("hibernate.cache.infinispan.timestamps.eviction.strategy", "NONE");
|
||||
p.setProperty("hibernate.cache.infinispan.timestamps.eviction.max_entries", "0");
|
||||
p.setProperty("hibernate.cache.infinispan.timestamps.expiration.wake_up_interval", "3000");
|
||||
InfinispanRegionFactory factory = createRegionFactory(p);
|
||||
try {
|
||||
TimestampsRegionImpl region = (TimestampsRegionImpl) factory.buildTimestampsRegion( timestamps, p );
|
||||
assertTrue( isDefinedCache(factory, timestamps) );
|
||||
assertEquals(3000, region.getCache().getCacheConfiguration().expiration().wakeUpInterval());
|
||||
} finally {
|
||||
factory.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuildQueryRegion() {
|
||||
final String query = "org.hibernate.cache.internal.StandardQueryCache";
|
||||
Properties p = createProperties();
|
||||
InfinispanRegionFactory factory = createRegionFactory(p);
|
||||
try {
|
||||
assertTrue(isDefinedCache(factory, "local-query"));
|
||||
QueryResultsRegionImpl region = (QueryResultsRegionImpl) factory.buildQueryResultsRegion(query, p);
|
||||
AdvancedCache cache = region.getCache();
|
||||
Configuration cacheCfg = cache.getCacheConfiguration();
|
||||
assertEquals( CacheMode.LOCAL, cacheCfg.clustering().cacheMode() );
|
||||
assertFalse( cacheCfg.jmxStatistics().enabled() );
|
||||
} finally {
|
||||
factory.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuildQueryRegionWithCustomRegionName() {
|
||||
final String queryRegionName = "myquery";
|
||||
Properties p = createProperties();
|
||||
p.setProperty("hibernate.cache.infinispan.myquery.cfg", "timestamps-none-eviction");
|
||||
p.setProperty("hibernate.cache.infinispan.myquery.eviction.strategy", "LIRS");
|
||||
p.setProperty("hibernate.cache.infinispan.myquery.expiration.wake_up_interval", "2222");
|
||||
p.setProperty("hibernate.cache.infinispan.myquery.eviction.max_entries", "11111");
|
||||
TestInfinispanRegionFactory factory = createRegionFactory(p);
|
||||
try {
|
||||
assertTrue(isDefinedCache(factory, "local-query"));
|
||||
QueryResultsRegionImpl region = (QueryResultsRegionImpl) factory.buildQueryResultsRegion(queryRegionName, p);
|
||||
assertNotNull(factory.getBaseConfiguration(queryRegionName));
|
||||
assertTrue(isDefinedCache(factory, queryRegionName));
|
||||
AdvancedCache cache = region.getCache();
|
||||
Configuration cacheCfg = cache.getCacheConfiguration();
|
||||
assertEquals(EvictionStrategy.LIRS, cacheCfg.eviction().strategy());
|
||||
assertEquals(2222, cacheCfg.expiration().wakeUpInterval());
|
||||
assertEquals( 11111, cacheCfg.eviction().maxEntries() );
|
||||
} finally {
|
||||
factory.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEnableStatistics() {
|
||||
Properties p = createProperties();
|
||||
p.setProperty("hibernate.cache.infinispan.statistics", "true");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.expiration.lifespan", "60000");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.expiration.max_idle", "30000");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.cfg", "myentity-cache");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.eviction.strategy", "FIFO");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.expiration.wake_up_interval", "3000");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.eviction.max_entries", "10000");
|
||||
InfinispanRegionFactory factory = createRegionFactory(p);
|
||||
try {
|
||||
EmbeddedCacheManager manager = factory.getCacheManager();
|
||||
assertTrue(manager.getCacheManagerConfiguration().globalJmxStatistics().enabled());
|
||||
EntityRegionImpl region = (EntityRegionImpl) factory.buildEntityRegion("com.acme.Address", p, MUTABLE_NON_VERSIONED);
|
||||
AdvancedCache cache = region.getCache();
|
||||
assertTrue(cache.getCacheConfiguration().jmxStatistics().enabled());
|
||||
|
||||
region = (EntityRegionImpl) factory.buildEntityRegion("com.acme.Person", p, MUTABLE_NON_VERSIONED);
|
||||
cache = region.getCache();
|
||||
assertTrue(cache.getCacheConfiguration().jmxStatistics().enabled());
|
||||
|
||||
final String query = "org.hibernate.cache.internal.StandardQueryCache";
|
||||
QueryResultsRegionImpl queryRegion = (QueryResultsRegionImpl)
|
||||
factory.buildQueryResultsRegion(query, p);
|
||||
cache = queryRegion.getCache();
|
||||
assertTrue(cache.getCacheConfiguration().jmxStatistics().enabled());
|
||||
|
||||
final String timestamps = "org.hibernate.cache.spi.UpdateTimestampsCache";
|
||||
ConfigurationBuilder builder = new ConfigurationBuilder();
|
||||
builder.clustering().stateTransfer().fetchInMemoryState(true);
|
||||
manager.defineConfiguration("timestamps", builder.build());
|
||||
TimestampsRegionImpl timestampsRegion = (TimestampsRegionImpl)
|
||||
factory.buildTimestampsRegion(timestamps, p);
|
||||
cache = timestampsRegion.getCache();
|
||||
assertTrue(cache.getCacheConfiguration().jmxStatistics().enabled());
|
||||
|
||||
CollectionRegionImpl collectionRegion = (CollectionRegionImpl)
|
||||
factory.buildCollectionRegion("com.acme.Person.addresses", p, MUTABLE_NON_VERSIONED);
|
||||
cache = collectionRegion.getCache();
|
||||
assertTrue(cache.getCacheConfiguration().jmxStatistics().enabled());
|
||||
} finally {
|
||||
factory.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDisableStatistics() {
|
||||
Properties p = createProperties();
|
||||
p.setProperty("hibernate.cache.infinispan.statistics", "false");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.expiration.lifespan", "60000");
|
||||
p.setProperty("hibernate.cache.infinispan.com.acme.Person.expiration.max_idle", "30000");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.cfg", "myentity-cache");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.eviction.strategy", "FIFO");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.expiration.wake_up_interval", "3000");
|
||||
p.setProperty("hibernate.cache.infinispan.entity.eviction.max_entries", "10000");
|
||||
InfinispanRegionFactory factory = createRegionFactory(p);
|
||||
try {
|
||||
EntityRegionImpl region = (EntityRegionImpl) factory.buildEntityRegion("com.acme.Address", p, MUTABLE_NON_VERSIONED);
|
||||
AdvancedCache cache = region.getCache();
|
||||
assertFalse( cache.getCacheConfiguration().jmxStatistics().enabled() );
|
||||
|
||||
region = (EntityRegionImpl) factory.buildEntityRegion("com.acme.Person", p, MUTABLE_NON_VERSIONED);
|
||||
cache = region.getCache();
|
||||
assertFalse( cache.getCacheConfiguration().jmxStatistics().enabled() );
|
||||
|
||||
final String query = "org.hibernate.cache.internal.StandardQueryCache";
|
||||
QueryResultsRegionImpl queryRegion = (QueryResultsRegionImpl) factory.buildQueryResultsRegion(query, p);
|
||||
cache = queryRegion.getCache();
|
||||
assertFalse( cache.getCacheConfiguration().jmxStatistics().enabled() );
|
||||
|
||||
final String timestamps = "org.hibernate.cache.spi.UpdateTimestampsCache";
|
||||
ConfigurationBuilder builder = new ConfigurationBuilder();
|
||||
builder.clustering().stateTransfer().fetchInMemoryState(true);
|
||||
factory.getCacheManager().defineConfiguration( "timestamps", builder.build() );
|
||||
TimestampsRegionImpl timestampsRegion = (TimestampsRegionImpl)
|
||||
factory.buildTimestampsRegion(timestamps, p);
|
||||
cache = timestampsRegion.getCache();
|
||||
assertFalse( cache.getCacheConfiguration().jmxStatistics().enabled() );
|
||||
|
||||
CollectionRegionImpl collectionRegion = (CollectionRegionImpl)
|
||||
factory.buildCollectionRegion("com.acme.Person.addresses", p, MUTABLE_NON_VERSIONED);
|
||||
cache = collectionRegion.getCache();
|
||||
assertFalse( cache.getCacheConfiguration().jmxStatistics().enabled() );
|
||||
} finally {
|
||||
factory.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDefaultPendingPutsCache() {
|
||||
Properties p = createProperties();
|
||||
InfinispanRegionFactory factory = createRegionFactory(p);
|
||||
try {
|
||||
Configuration ppConfig = factory.getCacheManager().getCacheConfiguration(DEF_PENDING_PUTS_RESOURCE);
|
||||
|
||||
assertTrue(ppConfig.isTemplate());
|
||||
assertFalse(ppConfig.clustering().cacheMode().isClustered());
|
||||
assertTrue(ppConfig.simpleCache());
|
||||
assertEquals(TransactionMode.NON_TRANSACTIONAL, ppConfig.transaction().transactionMode());
|
||||
assertEquals(60000, ppConfig.expiration().maxIdle());
|
||||
assertFalse(ppConfig.jmxStatistics().enabled());
|
||||
assertFalse(ppConfig.jmxStatistics().available());
|
||||
} finally {
|
||||
factory.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCustomPendingPutsCache() {
|
||||
Properties p = createProperties();
|
||||
p.setProperty(INFINISPAN_CONFIG_RESOURCE_PROP, "alternative-infinispan-configs.xml");
|
||||
InfinispanRegionFactory factory = createRegionFactory(p);
|
||||
try {
|
||||
Configuration ppConfig = factory.getCacheManager().getCacheConfiguration(DEF_PENDING_PUTS_RESOURCE);
|
||||
assertEquals(120000, ppConfig.expiration().maxIdle());
|
||||
} finally {
|
||||
factory.stop();
|
||||
}
|
||||
}
|
||||
|
||||
private TestInfinispanRegionFactory createRegionFactory(Properties p) {
|
||||
return createRegionFactory(null, p, null);
|
||||
}
|
||||
|
||||
private TestInfinispanRegionFactory createRegionFactory(Properties p,
|
||||
BiConsumer<TestInfinispanRegionFactory, EmbeddedCacheManager> hook) {
|
||||
return createRegionFactory(null, p, hook);
|
||||
}
|
||||
|
||||
private TestInfinispanRegionFactory createRegionFactory(final EmbeddedCacheManager manager, Properties p,
|
||||
BiConsumer<TestInfinispanRegionFactory, EmbeddedCacheManager> hook) {
|
||||
final TestInfinispanRegionFactory factory = new TestInfinispanRegionFactory(manager, hook);
|
||||
factory.start( CacheTestUtil.sfOptionsForStart(), p );
|
||||
return factory;
|
||||
}
|
||||
|
||||
private static Properties createProperties() {
|
||||
final Properties properties = new Properties();
|
||||
// If configured in the environment, add configuration file name to properties.
|
||||
final String cfgFileName =
|
||||
(String) Environment.getProperties().get( INFINISPAN_CONFIG_RESOURCE_PROP );
|
||||
if ( cfgFileName != null ) {
|
||||
properties.put( INFINISPAN_CONFIG_RESOURCE_PROP, cfgFileName );
|
||||
}
|
||||
return properties;
|
||||
}
|
||||
|
||||
private static class TestInfinispanRegionFactory extends org.hibernate.test.cache.infinispan.util.TestInfinispanRegionFactory {
|
||||
private final EmbeddedCacheManager providedManager;
|
||||
private final BiConsumer<TestInfinispanRegionFactory, EmbeddedCacheManager> afterCacheManagerCreated;
|
||||
|
||||
public TestInfinispanRegionFactory(EmbeddedCacheManager providedManager,
|
||||
BiConsumer<TestInfinispanRegionFactory, EmbeddedCacheManager> afterCacheManagerCreated) {
|
||||
super(new Properties());
|
||||
this.providedManager = providedManager;
|
||||
this.afterCacheManagerCreated = afterCacheManagerCreated;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected org.infinispan.transaction.lookup.TransactionManagerLookup createTransactionManagerLookup(SessionFactoryOptions settings, Properties properties) {
|
||||
return new HibernateTransactionManagerLookup(null, null) {
|
||||
@Override
|
||||
public TransactionManager getTransactionManager() throws Exception {
|
||||
AbstractJtaPlatform jta = new JBossStandAloneJtaPlatform();
|
||||
jta.injectServices(ServiceRegistryBuilder.buildServiceRegistry());
|
||||
return jta.getTransactionManager();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
protected EmbeddedCacheManager createCacheManager(Properties properties, ServiceRegistry serviceRegistry) throws CacheException {
|
||||
EmbeddedCacheManager m;
|
||||
if (providedManager != null)
|
||||
m = providedManager;
|
||||
else
|
||||
m = super.createCacheManager(properties, serviceRegistry);
|
||||
// since data type cache configuration templates are defined when cache manager is created,
|
||||
// we have to use hooks and set the configuration before the whole factory starts
|
||||
if (afterCacheManagerCreated != null) {
|
||||
afterCacheManagerCreated.accept(this, m);
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
/* Used for testing */
|
||||
public String getBaseConfiguration(String regionName) {
|
||||
return baseConfigurations.get(regionName);
|
||||
}
|
||||
|
||||
/* Used for testing */
|
||||
public String getBaseConfiguration(DataType dataType) {
|
||||
return baseConfigurations.get(dataType.key);
|
||||
}
|
||||
|
||||
/* Used for testing */
|
||||
public Configuration getConfigurationOverride(String regionName) {
|
||||
return configOverrides.get(regionName).build(false);
|
||||
}
|
||||
|
||||
/* Used for testing */
|
||||
public Configuration getConfigurationOverride(DataType dataType) {
|
||||
return configOverrides.get(dataType.key).build(false);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan;
|
||||
|
||||
import org.hibernate.boot.registry.StandardServiceRegistry;
|
||||
import org.hibernate.boot.registry.StandardServiceRegistryBuilder;
|
||||
import org.hibernate.cache.infinispan.JndiInfinispanRegionFactory;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.hibernate.cfg.AvailableSettings;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.hibernate.testing.junit4.ExtraAssertions.assertTyping;
|
||||
|
||||
/**
|
||||
* // TODO: Document this
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class JndiInfinispanRegionFactoryTestCase {
|
||||
@Test
|
||||
public void testConstruction() {
|
||||
StandardServiceRegistry ssr = new StandardServiceRegistryBuilder()
|
||||
.applySetting( AvailableSettings.CACHE_REGION_FACTORY, JndiInfinispanRegionFactory.class.getName() )
|
||||
.build();
|
||||
try {
|
||||
RegionFactory regionFactory = ssr.getService( RegionFactory.class );
|
||||
assertTyping( JndiInfinispanRegionFactory.class, regionFactory );
|
||||
}
|
||||
finally {
|
||||
StandardServiceRegistryBuilder.destroy( ssr );
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,134 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
|
||||
import org.hibernate.boot.registry.StandardServiceRegistry;
|
||||
import org.hibernate.boot.registry.StandardServiceRegistryBuilder;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.collection.CollectionRegionImpl;
|
||||
import org.hibernate.cache.infinispan.entity.EntityRegionImpl;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
|
||||
import org.hibernate.test.cache.infinispan.util.CacheTestUtil;
|
||||
|
||||
/**
|
||||
* Defines the environment for a node.
|
||||
*
|
||||
* @author Steve Ebersole
|
||||
*/
|
||||
public class NodeEnvironment {
|
||||
private final StandardServiceRegistryBuilder ssrb;
|
||||
private final Properties properties;
|
||||
|
||||
private StandardServiceRegistry serviceRegistry;
|
||||
private InfinispanRegionFactory regionFactory;
|
||||
|
||||
private Map<String, EntityRegionImpl> entityRegionMap;
|
||||
private Map<String, CollectionRegionImpl> collectionRegionMap;
|
||||
|
||||
public NodeEnvironment(StandardServiceRegistryBuilder ssrb) {
|
||||
this.ssrb = ssrb;
|
||||
properties = CacheTestUtil.toProperties( ssrb.getSettings() );
|
||||
}
|
||||
|
||||
public StandardServiceRegistry getServiceRegistry() {
|
||||
return serviceRegistry;
|
||||
}
|
||||
|
||||
public EntityRegionImpl getEntityRegion(String name, CacheDataDescription cacheDataDescription) {
|
||||
if (entityRegionMap == null) {
|
||||
entityRegionMap = new HashMap<String, EntityRegionImpl>();
|
||||
return buildAndStoreEntityRegion(name, cacheDataDescription);
|
||||
}
|
||||
EntityRegionImpl region = entityRegionMap.get(name);
|
||||
if (region == null) {
|
||||
region = buildAndStoreEntityRegion(name, cacheDataDescription);
|
||||
}
|
||||
return region;
|
||||
}
|
||||
|
||||
private EntityRegionImpl buildAndStoreEntityRegion(String name, CacheDataDescription cacheDataDescription) {
|
||||
EntityRegionImpl region = (EntityRegionImpl) regionFactory.buildEntityRegion(
|
||||
name,
|
||||
properties,
|
||||
cacheDataDescription
|
||||
);
|
||||
entityRegionMap.put(name, region);
|
||||
return region;
|
||||
}
|
||||
|
||||
public CollectionRegionImpl getCollectionRegion(String name, CacheDataDescription cacheDataDescription) {
|
||||
if (collectionRegionMap == null) {
|
||||
collectionRegionMap = new HashMap<String, CollectionRegionImpl>();
|
||||
return buildAndStoreCollectionRegion(name, cacheDataDescription);
|
||||
}
|
||||
CollectionRegionImpl region = collectionRegionMap.get(name);
|
||||
if (region == null) {
|
||||
region = buildAndStoreCollectionRegion(name, cacheDataDescription);
|
||||
collectionRegionMap.put(name, region);
|
||||
}
|
||||
return region;
|
||||
}
|
||||
|
||||
private CollectionRegionImpl buildAndStoreCollectionRegion(String name, CacheDataDescription cacheDataDescription) {
|
||||
CollectionRegionImpl region;
|
||||
region = (CollectionRegionImpl) regionFactory.buildCollectionRegion(
|
||||
name,
|
||||
properties,
|
||||
cacheDataDescription
|
||||
);
|
||||
return region;
|
||||
}
|
||||
|
||||
public void prepare() throws Exception {
|
||||
serviceRegistry = ssrb.build();
|
||||
regionFactory = CacheTestUtil.startRegionFactory( serviceRegistry );
|
||||
}
|
||||
|
||||
public void release() throws Exception {
|
||||
try {
|
||||
if (entityRegionMap != null) {
|
||||
for (EntityRegionImpl region : entityRegionMap.values()) {
|
||||
try {
|
||||
region.getCache().stop();
|
||||
} catch (Exception e) {
|
||||
// Ignore...
|
||||
}
|
||||
}
|
||||
entityRegionMap.clear();
|
||||
}
|
||||
if (collectionRegionMap != null) {
|
||||
for (CollectionRegionImpl reg : collectionRegionMap.values()) {
|
||||
try {
|
||||
reg.getCache().stop();
|
||||
} catch (Exception e) {
|
||||
// Ignore...
|
||||
}
|
||||
}
|
||||
collectionRegionMap.clear();
|
||||
}
|
||||
}
|
||||
finally {
|
||||
try {
|
||||
if (regionFactory != null) {
|
||||
// Currently the RegionFactory is shutdown by its registration
|
||||
// with the CacheTestSetup from CacheTestUtil when built
|
||||
regionFactory.stop();
|
||||
}
|
||||
}
|
||||
finally {
|
||||
if (serviceRegistry != null) {
|
||||
StandardServiceRegistryBuilder.destroy( serviceRegistry );
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,529 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan.access;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import javax.transaction.TransactionManager;
|
||||
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.access.PutFromLoadValidator;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
|
||||
import org.hibernate.test.cache.infinispan.util.TestInfinispanRegionFactory;
|
||||
import org.hibernate.test.cache.infinispan.util.TestTimeService;
|
||||
import org.hibernate.testing.AfterClassOnce;
|
||||
import org.hibernate.testing.BeforeClassOnce;
|
||||
import org.hibernate.testing.TestForIssue;
|
||||
import org.hibernate.test.cache.infinispan.functional.cluster.DualNodeJtaTransactionManagerImpl;
|
||||
import org.hibernate.test.cache.infinispan.util.CacheTestUtil;
|
||||
import org.hibernate.testing.junit4.CustomRunner;
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.test.fwk.TestResourceTracker;
|
||||
import org.junit.After;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.infinispan.configuration.cache.Configuration;
|
||||
import org.infinispan.configuration.cache.ConfigurationBuilder;
|
||||
import org.infinispan.manager.EmbeddedCacheManager;
|
||||
import org.infinispan.test.fwk.TestCacheManagerFactory;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import static org.infinispan.test.Exceptions.expectException;
|
||||
import static org.infinispan.test.TestingUtil.withTx;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
/**
|
||||
* Tests of {@link PutFromLoadValidator}.
|
||||
*
|
||||
* @author Brian Stansberry
|
||||
* @author Galder Zamarreño
|
||||
* @version $Revision: $
|
||||
*/
|
||||
@RunWith(CustomRunner.class)
|
||||
public class PutFromLoadValidatorUnitTest {
|
||||
|
||||
private static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog(
|
||||
PutFromLoadValidatorUnitTest.class);
|
||||
private static final TestTimeService TIME_SERVICE = new TestTimeService();
|
||||
|
||||
private Object KEY1 = "KEY1";
|
||||
|
||||
private TransactionManager tm;
|
||||
private EmbeddedCacheManager cm;
|
||||
private AdvancedCache<Object, Object> cache;
|
||||
private List<Runnable> cleanup = new ArrayList<>();
|
||||
|
||||
@BeforeClassOnce
|
||||
public void setUp() throws Exception {
|
||||
TestResourceTracker.testStarted(getClass().getSimpleName());
|
||||
tm = DualNodeJtaTransactionManagerImpl.getInstance("test");
|
||||
cm = TestCacheManagerFactory.createCacheManager(true);
|
||||
cache = cm.getCache().getAdvancedCache();
|
||||
}
|
||||
|
||||
@AfterClassOnce
|
||||
public void stop() {
|
||||
tm = null;
|
||||
cm.stop();
|
||||
TestResourceTracker.testFinished(getClass().getSimpleName());
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
cleanup.forEach(Runnable::run);
|
||||
cleanup.clear();
|
||||
try {
|
||||
DualNodeJtaTransactionManagerImpl.cleanupTransactions();
|
||||
}
|
||||
finally {
|
||||
DualNodeJtaTransactionManagerImpl.cleanupTransactionManagers();
|
||||
}
|
||||
cache.clear();
|
||||
cm.getCache(cache.getName() + "-" + InfinispanRegionFactory.DEF_PENDING_PUTS_RESOURCE).clear();
|
||||
}
|
||||
|
||||
private static InfinispanRegionFactory regionFactory(EmbeddedCacheManager cm) {
|
||||
Properties properties = new Properties();
|
||||
properties.put(TestInfinispanRegionFactory.TIME_SERVICE, TIME_SERVICE);
|
||||
InfinispanRegionFactory regionFactory = new TestInfinispanRegionFactory(properties);
|
||||
regionFactory.setCacheManager(cm);
|
||||
regionFactory.start(CacheTestUtil.sfOptionsForStart(), properties);
|
||||
return regionFactory;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNakedPut() throws Exception {
|
||||
nakedPutTest(false);
|
||||
}
|
||||
@Test
|
||||
public void testNakedPutTransactional() throws Exception {
|
||||
nakedPutTest(true);
|
||||
}
|
||||
|
||||
private void nakedPutTest(final boolean transactional) throws Exception {
|
||||
PutFromLoadValidator testee = new PutFromLoadValidator(cache, regionFactory(cm));
|
||||
exec(transactional, new NakedPut(testee, true));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRegisteredPut() throws Exception {
|
||||
registeredPutTest(false);
|
||||
}
|
||||
@Test
|
||||
public void testRegisteredPutTransactional() throws Exception {
|
||||
registeredPutTest(true);
|
||||
}
|
||||
|
||||
private void registeredPutTest(final boolean transactional) throws Exception {
|
||||
PutFromLoadValidator testee = new PutFromLoadValidator(cache, regionFactory(cm));
|
||||
exec(transactional, new RegularPut(testee));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNakedPutAfterKeyRemoval() throws Exception {
|
||||
nakedPutAfterRemovalTest(false, false);
|
||||
}
|
||||
@Test
|
||||
public void testNakedPutAfterKeyRemovalTransactional() throws Exception {
|
||||
nakedPutAfterRemovalTest(true, false);
|
||||
}
|
||||
@Test
|
||||
public void testNakedPutAfterRegionRemoval() throws Exception {
|
||||
nakedPutAfterRemovalTest(false, true);
|
||||
}
|
||||
@Test
|
||||
public void testNakedPutAfterRegionRemovalTransactional() throws Exception {
|
||||
nakedPutAfterRemovalTest(true, true);
|
||||
}
|
||||
|
||||
private void nakedPutAfterRemovalTest(final boolean transactional,
|
||||
final boolean removeRegion) throws Exception {
|
||||
PutFromLoadValidator testee = new PutFromLoadValidator(cache, regionFactory(cm));
|
||||
Invalidation invalidation = new Invalidation(testee, removeRegion);
|
||||
// the naked put can succeed because it has txTimestamp after invalidation
|
||||
NakedPut nakedPut = new NakedPut(testee, true);
|
||||
exec(transactional, invalidation, nakedPut);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRegisteredPutAfterKeyRemoval() throws Exception {
|
||||
registeredPutAfterRemovalTest(false, false);
|
||||
}
|
||||
@Test
|
||||
public void testRegisteredPutAfterKeyRemovalTransactional() throws Exception {
|
||||
registeredPutAfterRemovalTest(true, false);
|
||||
}
|
||||
@Test
|
||||
public void testRegisteredPutAfterRegionRemoval() throws Exception {
|
||||
registeredPutAfterRemovalTest(false, true);
|
||||
}
|
||||
@Test
|
||||
public void testRegisteredPutAfterRegionRemovalTransactional() throws Exception {
|
||||
registeredPutAfterRemovalTest(true, true);
|
||||
}
|
||||
|
||||
private void registeredPutAfterRemovalTest(final boolean transactional,
|
||||
final boolean removeRegion) throws Exception {
|
||||
PutFromLoadValidator testee = new PutFromLoadValidator(cache, regionFactory(cm));
|
||||
Invalidation invalidation = new Invalidation(testee, removeRegion);
|
||||
RegularPut regularPut = new RegularPut(testee);
|
||||
exec(transactional, invalidation, regularPut);
|
||||
}
|
||||
@Test
|
||||
public void testRegisteredPutWithInterveningKeyRemoval() throws Exception {
|
||||
registeredPutWithInterveningRemovalTest(false, false);
|
||||
}
|
||||
@Test
|
||||
public void testRegisteredPutWithInterveningKeyRemovalTransactional() throws Exception {
|
||||
registeredPutWithInterveningRemovalTest(true, false);
|
||||
}
|
||||
@Test
|
||||
public void testRegisteredPutWithInterveningRegionRemoval() throws Exception {
|
||||
registeredPutWithInterveningRemovalTest(false, true);
|
||||
}
|
||||
@Test
|
||||
public void testRegisteredPutWithInterveningRegionRemovalTransactional() throws Exception {
|
||||
registeredPutWithInterveningRemovalTest(true, true);
|
||||
}
|
||||
|
||||
private void registeredPutWithInterveningRemovalTest(
|
||||
final boolean transactional, final boolean removeRegion)
|
||||
throws Exception {
|
||||
PutFromLoadValidator testee = new PutFromLoadValidator(cache, regionFactory(cm));
|
||||
try {
|
||||
long txTimestamp = TIME_SERVICE.wallClockTime();
|
||||
if (transactional) {
|
||||
tm.begin();
|
||||
}
|
||||
SharedSessionContractImplementor session1 = mock(SharedSessionContractImplementor.class);
|
||||
SharedSessionContractImplementor session2 = mock(SharedSessionContractImplementor.class);
|
||||
testee.registerPendingPut(session1, KEY1, txTimestamp);
|
||||
if (removeRegion) {
|
||||
testee.beginInvalidatingRegion();
|
||||
} else {
|
||||
testee.beginInvalidatingKey(session2, KEY1);
|
||||
}
|
||||
|
||||
PutFromLoadValidator.Lock lock = testee.acquirePutFromLoadLock(session1, KEY1, txTimestamp);
|
||||
try {
|
||||
assertNull(lock);
|
||||
}
|
||||
finally {
|
||||
if (lock != null) {
|
||||
testee.releasePutFromLoadLock(KEY1, lock);
|
||||
}
|
||||
if (removeRegion) {
|
||||
testee.endInvalidatingRegion();
|
||||
} else {
|
||||
testee.endInvalidatingKey(session2, KEY1);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMultipleRegistrations() throws Exception {
|
||||
multipleRegistrationtest(false);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMultipleRegistrationsTransactional() throws Exception {
|
||||
multipleRegistrationtest(true);
|
||||
}
|
||||
|
||||
private void multipleRegistrationtest(final boolean transactional) throws Exception {
|
||||
final PutFromLoadValidator testee = new PutFromLoadValidator(cache, regionFactory(cm));
|
||||
|
||||
final CountDownLatch registeredLatch = new CountDownLatch(3);
|
||||
final CountDownLatch finishedLatch = new CountDownLatch(3);
|
||||
final AtomicInteger success = new AtomicInteger();
|
||||
|
||||
Runnable r = () -> {
|
||||
try {
|
||||
long txTimestamp = TIME_SERVICE.wallClockTime();
|
||||
if (transactional) {
|
||||
tm.begin();
|
||||
}
|
||||
SharedSessionContractImplementor session = mock (SharedSessionContractImplementor.class);
|
||||
testee.registerPendingPut(session, KEY1, txTimestamp);
|
||||
registeredLatch.countDown();
|
||||
registeredLatch.await(5, TimeUnit.SECONDS);
|
||||
PutFromLoadValidator.Lock lock = testee.acquirePutFromLoadLock(session, KEY1, txTimestamp);
|
||||
if (lock != null) {
|
||||
try {
|
||||
log.trace("Put from load lock acquired for key = " + KEY1);
|
||||
success.incrementAndGet();
|
||||
} finally {
|
||||
testee.releasePutFromLoadLock(KEY1, lock);
|
||||
}
|
||||
} else {
|
||||
log.trace("Unable to acquired putFromLoad lock for key = " + KEY1);
|
||||
}
|
||||
finishedLatch.countDown();
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
};
|
||||
|
||||
ExecutorService executor = Executors.newFixedThreadPool(3);
|
||||
cleanup.add(() -> executor.shutdownNow());
|
||||
|
||||
// Start with a removal so the "isPutValid" calls will fail if
|
||||
// any of the concurrent activity isn't handled properly
|
||||
|
||||
testee.beginInvalidatingRegion();
|
||||
testee.endInvalidatingRegion();
|
||||
TIME_SERVICE.advance(1);
|
||||
|
||||
// Do the registration + isPutValid calls
|
||||
executor.execute(r);
|
||||
executor.execute(r);
|
||||
executor.execute(r);
|
||||
|
||||
assertTrue(finishedLatch.await(5, TimeUnit.SECONDS));
|
||||
|
||||
assertEquals("All threads succeeded", 3, success.get());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInvalidateKeyBlocksForInProgressPut() throws Exception {
|
||||
invalidationBlocksForInProgressPutTest(true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInvalidateRegionBlocksForInProgressPut() throws Exception {
|
||||
invalidationBlocksForInProgressPutTest(false);
|
||||
}
|
||||
|
||||
private void invalidationBlocksForInProgressPutTest(final boolean keyOnly) throws Exception {
|
||||
final PutFromLoadValidator testee = new PutFromLoadValidator(cache, regionFactory(cm));
|
||||
final CountDownLatch removeLatch = new CountDownLatch(1);
|
||||
final CountDownLatch pferLatch = new CountDownLatch(1);
|
||||
final AtomicReference<Object> cache = new AtomicReference<>("INITIAL");
|
||||
|
||||
Callable<Boolean> pferCallable = () -> {
|
||||
long txTimestamp = TIME_SERVICE.wallClockTime();
|
||||
SharedSessionContractImplementor session = mock (SharedSessionContractImplementor.class);
|
||||
testee.registerPendingPut(session, KEY1, txTimestamp);
|
||||
PutFromLoadValidator.Lock lock = testee.acquirePutFromLoadLock(session, KEY1, txTimestamp);
|
||||
if (lock != null) {
|
||||
try {
|
||||
removeLatch.countDown();
|
||||
pferLatch.await();
|
||||
cache.set("PFER");
|
||||
return Boolean.TRUE;
|
||||
}
|
||||
finally {
|
||||
testee.releasePutFromLoadLock(KEY1, lock);
|
||||
}
|
||||
}
|
||||
return Boolean.FALSE;
|
||||
};
|
||||
|
||||
Callable<Void> invalidateCallable = () -> {
|
||||
removeLatch.await();
|
||||
if (keyOnly) {
|
||||
SharedSessionContractImplementor session = mock (SharedSessionContractImplementor.class);
|
||||
testee.beginInvalidatingKey(session, KEY1);
|
||||
} else {
|
||||
testee.beginInvalidatingRegion();
|
||||
}
|
||||
cache.set(null);
|
||||
return null;
|
||||
};
|
||||
|
||||
ExecutorService executor = Executors.newCachedThreadPool();
|
||||
cleanup.add(() -> executor.shutdownNow());
|
||||
Future<Boolean> pferFuture = executor.submit(pferCallable);
|
||||
Future<Void> invalidateFuture = executor.submit(invalidateCallable);
|
||||
|
||||
expectException(TimeoutException.class, () -> invalidateFuture.get(1, TimeUnit.SECONDS));
|
||||
|
||||
pferLatch.countDown();
|
||||
|
||||
assertTrue(pferFuture.get(5, TimeUnit.SECONDS));
|
||||
invalidateFuture.get(5, TimeUnit.SECONDS);
|
||||
|
||||
assertNull(cache.get());
|
||||
}
|
||||
|
||||
protected void exec(boolean transactional, Callable<?>... callables) {
|
||||
try {
|
||||
if (transactional) {
|
||||
for (Callable<?> c : callables) {
|
||||
withTx(tm, c);
|
||||
}
|
||||
} else {
|
||||
for (Callable<?> c : callables) {
|
||||
c.call();
|
||||
}
|
||||
}
|
||||
} catch (RuntimeException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private class Invalidation implements Callable<Void> {
|
||||
private PutFromLoadValidator putFromLoadValidator;
|
||||
private boolean removeRegion;
|
||||
|
||||
public Invalidation(PutFromLoadValidator putFromLoadValidator, boolean removeRegion) {
|
||||
this.putFromLoadValidator = putFromLoadValidator;
|
||||
this.removeRegion = removeRegion;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
if (removeRegion) {
|
||||
boolean success = putFromLoadValidator.beginInvalidatingRegion();
|
||||
assertTrue(success);
|
||||
putFromLoadValidator.endInvalidatingRegion();;
|
||||
} else {
|
||||
SharedSessionContractImplementor session = mock (SharedSessionContractImplementor.class);
|
||||
boolean success = putFromLoadValidator.beginInvalidatingKey(session, KEY1);
|
||||
assertTrue(success);
|
||||
success = putFromLoadValidator.endInvalidatingKey(session, KEY1);
|
||||
assertTrue(success);
|
||||
}
|
||||
// if we go for the timestamp-based approach, invalidation in the same millisecond
|
||||
// as the registerPendingPut/acquirePutFromLoad lock results in failure.
|
||||
TIME_SERVICE.advance(1);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private class RegularPut implements Callable<Void> {
|
||||
private PutFromLoadValidator putFromLoadValidator;
|
||||
|
||||
public RegularPut(PutFromLoadValidator putFromLoadValidator) {
|
||||
this.putFromLoadValidator = putFromLoadValidator;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
try {
|
||||
long txTimestamp = TIME_SERVICE.wallClockTime(); // this should be acquired before UserTransaction.begin()
|
||||
SharedSessionContractImplementor session = mock (SharedSessionContractImplementor.class);
|
||||
putFromLoadValidator.registerPendingPut(session, KEY1, txTimestamp);
|
||||
|
||||
PutFromLoadValidator.Lock lock = putFromLoadValidator.acquirePutFromLoadLock(session, KEY1, txTimestamp);
|
||||
try {
|
||||
assertNotNull(lock);
|
||||
} finally {
|
||||
if (lock != null) {
|
||||
putFromLoadValidator.releasePutFromLoadLock(KEY1, lock);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private class NakedPut implements Callable<Void> {
|
||||
private final PutFromLoadValidator testee;
|
||||
private final boolean expectSuccess;
|
||||
|
||||
public NakedPut(PutFromLoadValidator testee, boolean expectSuccess) {
|
||||
this.testee = testee;
|
||||
this.expectSuccess = expectSuccess;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
try {
|
||||
long txTimestamp = TIME_SERVICE.wallClockTime(); // this should be acquired before UserTransaction.begin()
|
||||
SharedSessionContractImplementor session = mock (SharedSessionContractImplementor.class);
|
||||
PutFromLoadValidator.Lock lock = testee.acquirePutFromLoadLock(session, KEY1, txTimestamp);
|
||||
try {
|
||||
if (expectSuccess) {
|
||||
assertNotNull(lock);
|
||||
} else {
|
||||
assertNull(lock);
|
||||
}
|
||||
}
|
||||
finally {
|
||||
if (lock != null) {
|
||||
testee.releasePutFromLoadLock(KEY1, lock);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestForIssue(jiraKey = "HHH-9928")
|
||||
public void testGetForNullReleasePuts() {
|
||||
ConfigurationBuilder cb = new ConfigurationBuilder();
|
||||
cb.simpleCache(true).expiration().maxIdle(500);
|
||||
Configuration ppCfg = cb.build();
|
||||
|
||||
InfinispanRegionFactory regionFactory = mock(InfinispanRegionFactory.class);
|
||||
doReturn(ppCfg).when(regionFactory).getPendingPutsCacheConfiguration();
|
||||
doAnswer(invocation -> TIME_SERVICE.wallClockTime()).when(regionFactory).nextTimestamp();
|
||||
|
||||
PutFromLoadValidator testee = new PutFromLoadValidator(cache, regionFactory, cm);
|
||||
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
try {
|
||||
withTx(tm, () -> {
|
||||
SharedSessionContractImplementor session = mock (SharedSessionContractImplementor.class);
|
||||
testee.registerPendingPut(session, KEY1, 0);
|
||||
return null;
|
||||
});
|
||||
TIME_SERVICE.advance(10);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
String ppName = cm.getCache().getName() + "-" + InfinispanRegionFactory.DEF_PENDING_PUTS_RESOURCE;
|
||||
Map ppCache = cm.getCache(ppName, false);
|
||||
assertNotNull(ppCache);
|
||||
Object pendingPutMap = ppCache.get(KEY1);
|
||||
assertNotNull(pendingPutMap);
|
||||
int size;
|
||||
try {
|
||||
Method sizeMethod = pendingPutMap.getClass().getMethod("size");
|
||||
sizeMethod.setAccessible(true);
|
||||
size = (Integer) sizeMethod.invoke(pendingPutMap);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
// some of the pending puts need to be expired by now
|
||||
assertTrue(size < 100);
|
||||
// but some are still registered
|
||||
assertTrue(size > 0);
|
||||
}
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan.collection;
|
||||
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.cache.spi.access.CollectionRegionAccessStrategy;
|
||||
import org.hibernate.test.cache.infinispan.AbstractExtraAPITest;
|
||||
import org.hibernate.test.cache.infinispan.util.TestInfinispanRegionFactory;
|
||||
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
/**
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class CollectionRegionAccessExtraAPITest extends AbstractExtraAPITest<CollectionRegionAccessStrategy> {
|
||||
@Override
|
||||
protected CollectionRegionAccessStrategy getAccessStrategy() {
|
||||
return environment.getCollectionRegion( REGION_NAME, CACHE_DATA_DESCRIPTION).buildAccessStrategy( accessType );
|
||||
}
|
||||
}
|
|
@ -1,152 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan.collection;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.hibernate.cache.infinispan.access.AccessDelegate;
|
||||
import org.hibernate.cache.infinispan.access.NonTxInvalidationCacheAccessDelegate;
|
||||
import org.hibernate.cache.infinispan.access.PutFromLoadValidator;
|
||||
import org.hibernate.cache.infinispan.access.TxInvalidationCacheAccessDelegate;
|
||||
import org.hibernate.cache.infinispan.collection.CollectionRegionImpl;
|
||||
import org.hibernate.cache.spi.access.CollectionRegionAccessStrategy;
|
||||
import org.hibernate.cache.spi.access.SoftLock;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
|
||||
import org.hibernate.test.cache.infinispan.AbstractRegionAccessStrategyTest;
|
||||
import org.hibernate.test.cache.infinispan.NodeEnvironment;
|
||||
import org.hibernate.test.cache.infinispan.util.TestSynchronization;
|
||||
import org.hibernate.test.cache.infinispan.util.TestingKeyFactory;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyLong;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.spy;
|
||||
|
||||
/**
|
||||
* Base class for tests of CollectionRegionAccessStrategy impls.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class CollectionRegionAccessStrategyTest extends
|
||||
AbstractRegionAccessStrategyTest<CollectionRegionImpl, CollectionRegionAccessStrategy> {
|
||||
protected static int testCount;
|
||||
|
||||
@Override
|
||||
protected Object generateNextKey() {
|
||||
return TestingKeyFactory.generateCollectionCacheKey( KEY_BASE + testCount++ );
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CollectionRegionImpl getRegion(NodeEnvironment environment) {
|
||||
return environment.getCollectionRegion( REGION_NAME, CACHE_DATA_DESCRIPTION );
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CollectionRegionAccessStrategy getAccessStrategy(CollectionRegionImpl region) {
|
||||
return region.buildAccessStrategy( accessType );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetRegion() {
|
||||
assertEquals( "Correct region", localRegion, localAccessStrategy.getRegion() );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPutFromLoadRemoveDoesNotProduceStaleData() throws Exception {
|
||||
if (!cacheMode.isInvalidation()) {
|
||||
return;
|
||||
}
|
||||
final CountDownLatch pferLatch = new CountDownLatch( 1 );
|
||||
final CountDownLatch removeLatch = new CountDownLatch( 1 );
|
||||
// remove the interceptor inserted by default PutFromLoadValidator, we're using different one
|
||||
PutFromLoadValidator originalValidator = PutFromLoadValidator.removeFromCache(localRegion.getCache());
|
||||
PutFromLoadValidator mockValidator = spy(originalValidator);
|
||||
doAnswer(invocation -> {
|
||||
try {
|
||||
return invocation.callRealMethod();
|
||||
} finally {
|
||||
try {
|
||||
removeLatch.countDown();
|
||||
// the remove should be blocked because the putFromLoad has been acquired
|
||||
// and the remove can continue only after we've inserted the entry
|
||||
assertFalse(pferLatch.await( 2, TimeUnit.SECONDS ) );
|
||||
}
|
||||
catch (InterruptedException e) {
|
||||
log.debug( "Interrupted" );
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.error( "Error", e );
|
||||
throw new RuntimeException( "Error", e );
|
||||
}
|
||||
}
|
||||
}).when(mockValidator).acquirePutFromLoadLock(any(), any(), anyLong());
|
||||
PutFromLoadValidator.addToCache(localRegion.getCache(), mockValidator);
|
||||
cleanup.add(() -> {
|
||||
PutFromLoadValidator.removeFromCache(localRegion.getCache());
|
||||
PutFromLoadValidator.addToCache(localRegion.getCache(), originalValidator);
|
||||
});
|
||||
|
||||
final AccessDelegate delegate = localRegion.getCache().getCacheConfiguration().transaction().transactionMode().isTransactional() ?
|
||||
new TxInvalidationCacheAccessDelegate(localRegion, mockValidator) :
|
||||
new NonTxInvalidationCacheAccessDelegate(localRegion, mockValidator);
|
||||
|
||||
ExecutorService executorService = Executors.newCachedThreadPool();
|
||||
cleanup.add(() -> executorService.shutdownNow());
|
||||
|
||||
final String KEY = "k1";
|
||||
Future<Void> pferFuture = executorService.submit(() -> {
|
||||
SharedSessionContractImplementor session = mockedSession();
|
||||
delegate.putFromLoad(session, KEY, "v1", session.getTimestamp(), null);
|
||||
return null;
|
||||
});
|
||||
|
||||
Future<Void> removeFuture = executorService.submit(() -> {
|
||||
removeLatch.await();
|
||||
SharedSessionContractImplementor session = mockedSession();
|
||||
withTx(localEnvironment, session, () -> {
|
||||
delegate.remove(session, KEY);
|
||||
return null;
|
||||
});
|
||||
pferLatch.countDown();
|
||||
return null;
|
||||
});
|
||||
|
||||
pferFuture.get();
|
||||
removeFuture.get();
|
||||
|
||||
assertFalse(localRegion.getCache().containsKey(KEY));
|
||||
assertFalse(remoteRegion.getCache().containsKey(KEY));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPutFromLoad() throws Exception {
|
||||
putFromLoadTest(false, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPutFromLoadMinimal() throws Exception {
|
||||
putFromLoadTest(true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doUpdate(CollectionRegionAccessStrategy strategy, SharedSessionContractImplementor session, Object key, Object value, Object version) throws javax.transaction.RollbackException, javax.transaction.SystemException {
|
||||
SoftLock softLock = strategy.lockItem(session, key, version);
|
||||
strategy.remove(session, key);
|
||||
session.getTransactionCoordinator().getLocalSynchronizations().registerSynchronization(
|
||||
new TestSynchronization.UnlockItem(strategy, session, key, softLock));
|
||||
}
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan.collection;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.CollectionRegion;
|
||||
import org.hibernate.cache.spi.Region;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.cache.spi.access.CollectionRegionAccessStrategy;
|
||||
import org.hibernate.test.cache.infinispan.AbstractEntityCollectionRegionTest;
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
|
||||
/**
|
||||
* @author Galder Zamarreño
|
||||
*/
|
||||
public class CollectionRegionImplTest extends AbstractEntityCollectionRegionTest {
|
||||
protected static final String CACHE_NAME = "test";
|
||||
|
||||
@Override
|
||||
protected void supportedAccessTypeTest(RegionFactory regionFactory, Properties properties) {
|
||||
CollectionRegion region = regionFactory.buildCollectionRegion(CACHE_NAME, properties, MUTABLE_NON_VERSIONED);
|
||||
assertNotNull(region.buildAccessStrategy(accessType));
|
||||
((InfinispanRegionFactory) regionFactory).getCacheManager().removeCache(CACHE_NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Region createRegion(InfinispanRegionFactory regionFactory, String regionName, Properties properties, CacheDataDescription cdd) {
|
||||
return regionFactory.buildCollectionRegion(regionName, properties, cdd);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AdvancedCache getInfinispanCache(InfinispanRegionFactory regionFactory) {
|
||||
return regionFactory.getCacheManager().getCache(InfinispanRegionFactory.DEF_ENTITY_RESOURCE).getAdvancedCache();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void putInRegion(Region region, Object key, Object value) {
|
||||
CollectionRegionAccessStrategy strategy = ((CollectionRegion) region).buildAccessStrategy(AccessType.TRANSACTIONAL);
|
||||
strategy.putFromLoad(null, key, value, region.nextTimestamp(), new Integer(1));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void removeFromRegion(Region region, Object key) {
|
||||
((CollectionRegion) region).buildAccessStrategy(AccessType.TRANSACTIONAL).remove(null, key);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,369 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan.entity;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.hibernate.cache.infinispan.entity.EntityRegionImpl;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.cache.spi.access.EntityRegionAccessStrategy;
|
||||
import org.hibernate.cache.spi.access.SoftLock;
|
||||
import org.hibernate.engine.spi.SharedSessionContractImplementor;
|
||||
|
||||
import org.hibernate.test.cache.infinispan.AbstractRegionAccessStrategyTest;
|
||||
import org.hibernate.test.cache.infinispan.NodeEnvironment;
|
||||
import org.hibernate.test.cache.infinispan.util.TestSynchronization;
|
||||
import org.hibernate.test.cache.infinispan.util.TestingKeyFactory;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import junit.framework.AssertionFailedError;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Base class for tests of EntityRegionAccessStrategy impls.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class EntityRegionAccessStrategyTest extends
|
||||
AbstractRegionAccessStrategyTest<EntityRegionImpl, EntityRegionAccessStrategy> {
|
||||
protected static int testCount;
|
||||
|
||||
@Override
|
||||
protected Object generateNextKey() {
|
||||
return TestingKeyFactory.generateEntityCacheKey( KEY_BASE + testCount++ );
|
||||
}
|
||||
|
||||
@Override
|
||||
protected EntityRegionImpl getRegion(NodeEnvironment environment) {
|
||||
return environment.getEntityRegion(REGION_NAME, CACHE_DATA_DESCRIPTION);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected EntityRegionAccessStrategy getAccessStrategy(EntityRegionImpl region) {
|
||||
return region.buildAccessStrategy( accessType );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetRegion() {
|
||||
assertEquals("Correct region", localRegion, localAccessStrategy.getRegion());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPutFromLoad() throws Exception {
|
||||
if (accessType == AccessType.READ_ONLY) {
|
||||
putFromLoadTestReadOnly(false);
|
||||
} else {
|
||||
putFromLoadTest(false, false);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPutFromLoadMinimal() throws Exception {
|
||||
if (accessType == AccessType.READ_ONLY) {
|
||||
putFromLoadTestReadOnly(true);
|
||||
} else {
|
||||
putFromLoadTest(true, false);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsert() throws Exception {
|
||||
final Object KEY = generateNextKey();
|
||||
|
||||
final CountDownLatch readLatch = new CountDownLatch(1);
|
||||
final CountDownLatch commitLatch = new CountDownLatch(1);
|
||||
final CountDownLatch completionLatch = new CountDownLatch(2);
|
||||
|
||||
CountDownLatch asyncInsertLatch = expectAfterUpdate();
|
||||
|
||||
Thread inserter = new Thread(() -> {
|
||||
try {
|
||||
SharedSessionContractImplementor session = mockedSession();
|
||||
withTx(localEnvironment, session, () -> {
|
||||
assertNull("Correct initial value", localAccessStrategy.get(session, KEY, session.getTimestamp()));
|
||||
|
||||
doInsert(localAccessStrategy, session, KEY, VALUE1, 1);
|
||||
|
||||
readLatch.countDown();
|
||||
commitLatch.await();
|
||||
return null;
|
||||
});
|
||||
} catch (Exception e) {
|
||||
log.error("node1 caught exception", e);
|
||||
node1Exception = e;
|
||||
} catch (AssertionFailedError e) {
|
||||
node1Failure = e;
|
||||
} finally {
|
||||
|
||||
completionLatch.countDown();
|
||||
}
|
||||
}, "testInsert-inserter");
|
||||
|
||||
Thread reader = new Thread(() -> {
|
||||
try {
|
||||
SharedSessionContractImplementor session = mockedSession();
|
||||
withTx(localEnvironment, session, () -> {
|
||||
readLatch.await();
|
||||
|
||||
assertNull("Correct initial value", localAccessStrategy.get(session, KEY, session.getTimestamp()));
|
||||
return null;
|
||||
});
|
||||
} catch (Exception e) {
|
||||
log.error("node1 caught exception", e);
|
||||
node1Exception = e;
|
||||
} catch (AssertionFailedError e) {
|
||||
node1Failure = e;
|
||||
} finally {
|
||||
commitLatch.countDown();
|
||||
completionLatch.countDown();
|
||||
}
|
||||
}, "testInsert-reader");
|
||||
|
||||
inserter.setDaemon(true);
|
||||
reader.setDaemon(true);
|
||||
inserter.start();
|
||||
reader.start();
|
||||
|
||||
assertTrue("Threads completed", completionLatch.await(10, TimeUnit.SECONDS));
|
||||
|
||||
assertThreadsRanCleanly();
|
||||
|
||||
SharedSessionContractImplementor s1 = mockedSession();
|
||||
assertEquals("Correct node1 value", VALUE1, localAccessStrategy.get(s1, KEY, s1.getTimestamp()));
|
||||
|
||||
assertTrue(asyncInsertLatch.await(10, TimeUnit.SECONDS));
|
||||
Object expected = isUsingInvalidation() ? null : VALUE1;
|
||||
SharedSessionContractImplementor s2 = mockedSession();
|
||||
assertEquals("Correct node2 value", expected, remoteAccessStrategy.get(s2, KEY, s2.getTimestamp()));
|
||||
}
|
||||
|
||||
protected void doInsert(EntityRegionAccessStrategy strategy, SharedSessionContractImplementor session, Object key, String value, Object version) {
|
||||
strategy.insert(session, key, value, null);
|
||||
session.getTransactionCoordinator().getLocalSynchronizations().registerSynchronization(
|
||||
new TestSynchronization.AfterInsert(strategy, session, key, value, version));
|
||||
}
|
||||
|
||||
protected void putFromLoadTestReadOnly(boolean minimal) throws Exception {
|
||||
final Object KEY = TestingKeyFactory.generateEntityCacheKey( KEY_BASE + testCount++ );
|
||||
|
||||
CountDownLatch remotePutFromLoadLatch = expectPutFromLoad();
|
||||
|
||||
SharedSessionContractImplementor session = mockedSession();
|
||||
withTx(localEnvironment, session, () -> {
|
||||
assertNull(localAccessStrategy.get(session, KEY, session.getTimestamp()));
|
||||
if (minimal)
|
||||
localAccessStrategy.putFromLoad(session, KEY, VALUE1, session.getTimestamp(), 1, true);
|
||||
else
|
||||
localAccessStrategy.putFromLoad(session, KEY, VALUE1, session.getTimestamp(), 1);
|
||||
return null;
|
||||
});
|
||||
|
||||
SharedSessionContractImplementor s2 = mockedSession();
|
||||
assertEquals(VALUE1, localAccessStrategy.get(s2, KEY, s2.getTimestamp()));
|
||||
SharedSessionContractImplementor s3 = mockedSession();
|
||||
Object expected;
|
||||
if (isUsingInvalidation()) {
|
||||
expected = null;
|
||||
} else {
|
||||
if (accessType != AccessType.NONSTRICT_READ_WRITE) {
|
||||
assertTrue(remotePutFromLoadLatch.await(2, TimeUnit.SECONDS));
|
||||
}
|
||||
expected = VALUE1;
|
||||
}
|
||||
assertEquals(expected, remoteAccessStrategy.get(s3, KEY, s3.getTimestamp()));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdate() throws Exception {
|
||||
if (accessType == AccessType.READ_ONLY) {
|
||||
return;
|
||||
}
|
||||
|
||||
final Object KEY = generateNextKey();
|
||||
|
||||
// Set up initial state
|
||||
SharedSessionContractImplementor s1 = mockedSession();
|
||||
localAccessStrategy.putFromLoad(s1, KEY, VALUE1, s1.getTimestamp(), 1);
|
||||
SharedSessionContractImplementor s2 = mockedSession();
|
||||
remoteAccessStrategy.putFromLoad(s2, KEY, VALUE1, s2.getTimestamp(), 1);
|
||||
|
||||
// both nodes are updated, we don't have to wait for any async replication of putFromLoad
|
||||
CountDownLatch asyncUpdateLatch = expectAfterUpdate();
|
||||
|
||||
final CountDownLatch readLatch = new CountDownLatch(1);
|
||||
final CountDownLatch commitLatch = new CountDownLatch(1);
|
||||
final CountDownLatch completionLatch = new CountDownLatch(2);
|
||||
|
||||
Thread updater = new Thread(() -> {
|
||||
try {
|
||||
SharedSessionContractImplementor session = mockedSession();
|
||||
withTx(localEnvironment, session, () -> {
|
||||
log.debug("Transaction began, get initial value");
|
||||
assertEquals("Correct initial value", VALUE1, localAccessStrategy.get(session, KEY, session.getTimestamp()));
|
||||
log.debug("Now update value");
|
||||
doUpdate(localAccessStrategy, session, KEY, VALUE2, 2);
|
||||
log.debug("Notify the read latch");
|
||||
readLatch.countDown();
|
||||
log.debug("Await commit");
|
||||
commitLatch.await();
|
||||
return null;
|
||||
});
|
||||
} catch (Exception e) {
|
||||
log.error("node1 caught exception", e);
|
||||
node1Exception = e;
|
||||
} catch (AssertionFailedError e) {
|
||||
node1Failure = e;
|
||||
} finally {
|
||||
if (readLatch.getCount() > 0) {
|
||||
readLatch.countDown();
|
||||
}
|
||||
log.debug("Completion latch countdown");
|
||||
completionLatch.countDown();
|
||||
}
|
||||
}, "testUpdate-updater");
|
||||
|
||||
Thread reader = new Thread(() -> {
|
||||
try {
|
||||
SharedSessionContractImplementor session = mockedSession();
|
||||
withTx(localEnvironment, session, () -> {
|
||||
log.debug("Transaction began, await read latch");
|
||||
readLatch.await();
|
||||
log.debug("Read latch acquired, verify local access strategy");
|
||||
|
||||
// This won't block w/ mvc and will read the old value (if transactional as the transaction
|
||||
// is not being committed yet, or if non-strict as we do the actual update only after transaction)
|
||||
// or null if non-transactional
|
||||
Object expected = isTransactional() || accessType == AccessType.NONSTRICT_READ_WRITE ? VALUE1 : null;
|
||||
assertEquals("Correct value", expected, localAccessStrategy.get(session, KEY, session.getTimestamp()));
|
||||
return null;
|
||||
});
|
||||
} catch (Exception e) {
|
||||
log.error("node1 caught exception", e);
|
||||
node1Exception = e;
|
||||
} catch (AssertionFailedError e) {
|
||||
node1Failure = e;
|
||||
} finally {
|
||||
commitLatch.countDown();
|
||||
log.debug("Completion latch countdown");
|
||||
completionLatch.countDown();
|
||||
}
|
||||
}, "testUpdate-reader");
|
||||
|
||||
updater.setDaemon(true);
|
||||
reader.setDaemon(true);
|
||||
updater.start();
|
||||
reader.start();
|
||||
|
||||
assertTrue(completionLatch.await(2, TimeUnit.SECONDS));
|
||||
|
||||
assertThreadsRanCleanly();
|
||||
|
||||
SharedSessionContractImplementor s3 = mockedSession();
|
||||
assertEquals("Correct node1 value", VALUE2, localAccessStrategy.get(s3, KEY, s3.getTimestamp()));
|
||||
assertTrue(asyncUpdateLatch.await(10, TimeUnit.SECONDS));
|
||||
Object expected = isUsingInvalidation() ? null : VALUE2;
|
||||
SharedSessionContractImplementor s4 = mockedSession();
|
||||
assertEquals("Correct node2 value", expected, remoteAccessStrategy.get(s4, KEY, s4.getTimestamp()));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doUpdate(EntityRegionAccessStrategy strategy, SharedSessionContractImplementor session, Object key, Object value, Object version) throws javax.transaction.RollbackException, javax.transaction.SystemException {
|
||||
SoftLock softLock = strategy.lockItem(session, key, null);
|
||||
strategy.update(session, key, value, null, null);
|
||||
session.getTransactionCoordinator().getLocalSynchronizations().registerSynchronization(
|
||||
new TestSynchronization.AfterUpdate(strategy, session, key, value, version, softLock));
|
||||
}
|
||||
|
||||
/**
|
||||
* This test fails in CI too often because it depends on very short timeout. The behaviour is basically
|
||||
* non-testable as we want to make sure that the "Putter" is always progressing; however, it is sometimes
|
||||
* progressing in different thread (on different node), and sometimes even in system, sending a message
|
||||
* over network. Therefore even checking that some OOB/remote thread is in RUNNABLE/RUNNING state is prone
|
||||
* to spurious failure (and we can't grab the state of all threads atomically).
|
||||
*/
|
||||
@Ignore
|
||||
@Test
|
||||
public void testContestedPutFromLoad() throws Exception {
|
||||
if (accessType == AccessType.READ_ONLY) {
|
||||
return;
|
||||
}
|
||||
|
||||
final Object KEY = TestingKeyFactory.generateEntityCacheKey(KEY_BASE + testCount++);
|
||||
|
||||
SharedSessionContractImplementor s1 = mockedSession();
|
||||
localAccessStrategy.putFromLoad(s1, KEY, VALUE1, s1.getTimestamp(), 1);
|
||||
|
||||
final CountDownLatch pferLatch = new CountDownLatch(1);
|
||||
final CountDownLatch pferCompletionLatch = new CountDownLatch(1);
|
||||
final CountDownLatch commitLatch = new CountDownLatch(1);
|
||||
final CountDownLatch completionLatch = new CountDownLatch(1);
|
||||
|
||||
Thread blocker = new Thread("Blocker") {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
SharedSessionContractImplementor session = mockedSession();
|
||||
withTx(localEnvironment, session, () -> {
|
||||
assertEquals("Correct initial value", VALUE1, localAccessStrategy.get(session, KEY, session.getTimestamp()));
|
||||
|
||||
doUpdate(localAccessStrategy, session, KEY, VALUE2, 2);
|
||||
|
||||
pferLatch.countDown();
|
||||
commitLatch.await();
|
||||
return null;
|
||||
});
|
||||
} catch (Exception e) {
|
||||
log.error("node1 caught exception", e);
|
||||
node1Exception = e;
|
||||
} catch (AssertionFailedError e) {
|
||||
node1Failure = e;
|
||||
} finally {
|
||||
completionLatch.countDown();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Thread putter = new Thread("Putter") {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
SharedSessionContractImplementor session = mockedSession();
|
||||
withTx(localEnvironment, session, () -> {
|
||||
localAccessStrategy.putFromLoad(session, KEY, VALUE1, session.getTimestamp(), 1);
|
||||
return null;
|
||||
});
|
||||
} catch (Exception e) {
|
||||
log.error("node1 caught exception", e);
|
||||
node1Exception = e;
|
||||
} catch (AssertionFailedError e) {
|
||||
node1Failure = e;
|
||||
} finally {
|
||||
pferCompletionLatch.countDown();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
blocker.start();
|
||||
assertTrue("Active tx has done an update", pferLatch.await(1, TimeUnit.SECONDS));
|
||||
putter.start();
|
||||
assertTrue("putFromLoad returns promptly", pferCompletionLatch.await(10, TimeUnit.MILLISECONDS));
|
||||
|
||||
commitLatch.countDown();
|
||||
|
||||
assertTrue("Threads completed", completionLatch.await(1, TimeUnit.SECONDS));
|
||||
|
||||
assertThreadsRanCleanly();
|
||||
|
||||
SharedSessionContractImplementor session = mockedSession();
|
||||
assertEquals("Correct node1 value", VALUE2, localAccessStrategy.get(session, KEY, session.getTimestamp()));
|
||||
}
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan.entity;
|
||||
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.cache.spi.access.EntityRegionAccessStrategy;
|
||||
import org.hibernate.test.cache.infinispan.AbstractExtraAPITest;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
/**
|
||||
* Tests for the "extra API" in EntityRegionAccessStrategy;.
|
||||
* <p>
|
||||
* By "extra API" we mean those methods that are superfluous to the
|
||||
* function of the JBC integration, where the impl is a no-op or a static
|
||||
* false return value, UnsupportedOperationException, etc.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class EntityRegionExtraAPITest extends AbstractExtraAPITest<EntityRegionAccessStrategy> {
|
||||
public static final String VALUE1 = "VALUE1";
|
||||
public static final String VALUE2 = "VALUE2";
|
||||
|
||||
@Override
|
||||
protected EntityRegionAccessStrategy getAccessStrategy() {
|
||||
return environment.getEntityRegion( REGION_NAME, CACHE_DATA_DESCRIPTION).buildAccessStrategy( accessType );
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings( {"UnnecessaryBoxing"})
|
||||
public void testAfterInsert() {
|
||||
boolean retval = accessStrategy.afterInsert(SESSION, KEY, VALUE1, Integer.valueOf( 1 ));
|
||||
assertEquals(accessType == AccessType.NONSTRICT_READ_WRITE, retval);
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings( {"UnnecessaryBoxing"})
|
||||
public void testAfterUpdate() {
|
||||
if (accessType == AccessType.READ_ONLY) {
|
||||
return;
|
||||
}
|
||||
boolean retval = accessStrategy.afterUpdate(SESSION, KEY, VALUE2, Integer.valueOf( 1 ), Integer.valueOf( 2 ), new MockSoftLock());
|
||||
assertEquals(accessType == AccessType.NONSTRICT_READ_WRITE, retval);
|
||||
}
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan.entity;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
import org.hibernate.cache.CacheException;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.spi.CacheDataDescription;
|
||||
import org.hibernate.cache.spi.EntityRegion;
|
||||
import org.hibernate.cache.spi.Region;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.test.cache.infinispan.AbstractEntityCollectionRegionTest;
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
/**
|
||||
* Tests of EntityRegionImpl.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class EntityRegionImplTest extends AbstractEntityCollectionRegionTest {
|
||||
protected static final String CACHE_NAME = "test";
|
||||
|
||||
@Override
|
||||
protected void supportedAccessTypeTest(RegionFactory regionFactory, Properties properties) {
|
||||
EntityRegion region = regionFactory.buildEntityRegion("test", properties, MUTABLE_NON_VERSIONED);
|
||||
assertNotNull(region.buildAccessStrategy(accessType));
|
||||
((InfinispanRegionFactory) regionFactory).getCacheManager().removeCache(CACHE_NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void putInRegion(Region region, Object key, Object value) {
|
||||
((EntityRegion) region).buildAccessStrategy(AccessType.TRANSACTIONAL).insert(null, key, value, 1);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void removeFromRegion(Region region, Object key) {
|
||||
((EntityRegion) region).buildAccessStrategy(AccessType.TRANSACTIONAL).remove(null, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Region createRegion(InfinispanRegionFactory regionFactory, String regionName, Properties properties, CacheDataDescription cdd) {
|
||||
return regionFactory.buildEntityRegion(regionName, properties, cdd);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AdvancedCache getInfinispanCache(InfinispanRegionFactory regionFactory) {
|
||||
return regionFactory.getCacheManager().getCache(
|
||||
InfinispanRegionFactory.DEF_ENTITY_RESOURCE).getAdvancedCache();
|
||||
}
|
||||
|
||||
}
|
|
@ -1,242 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan.functional;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import org.hibernate.Session;
|
||||
import org.hibernate.boot.Metadata;
|
||||
import org.hibernate.boot.spi.MetadataImplementor;
|
||||
import org.hibernate.cache.infinispan.util.FutureUpdate;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.hibernate.cache.infinispan.util.TombstoneUpdate;
|
||||
import org.hibernate.cache.internal.SimpleCacheKeysFactory;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.hibernate.cache.spi.access.AccessType;
|
||||
import org.hibernate.cfg.AvailableSettings;
|
||||
import org.hibernate.cfg.Environment;
|
||||
import org.hibernate.engine.jdbc.connections.spi.ConnectionProvider;
|
||||
import org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform;
|
||||
import org.hibernate.mapping.Column;
|
||||
import org.hibernate.mapping.PersistentClass;
|
||||
import org.hibernate.mapping.Property;
|
||||
import org.hibernate.mapping.RootClass;
|
||||
import org.hibernate.mapping.SimpleValue;
|
||||
import org.hibernate.resource.transaction.backend.jdbc.internal.JdbcResourceLocalTransactionCoordinatorBuilderImpl;
|
||||
import org.hibernate.resource.transaction.backend.jta.internal.JtaTransactionCoordinatorBuilderImpl;
|
||||
import org.hibernate.resource.transaction.spi.TransactionCoordinatorBuilder;
|
||||
|
||||
import org.hibernate.test.cache.infinispan.functional.cluster.DualNodeTest;
|
||||
import org.hibernate.test.cache.infinispan.util.ExpectingInterceptor;
|
||||
import org.hibernate.testing.BeforeClassOnce;
|
||||
import org.hibernate.testing.junit4.BaseNonConfigCoreFunctionalTestCase;
|
||||
import org.hibernate.testing.junit4.CustomParameterized;
|
||||
import org.hibernate.test.cache.infinispan.tm.JtaPlatformImpl;
|
||||
import org.hibernate.test.cache.infinispan.tm.XaConnectionProvider;
|
||||
import org.hibernate.test.cache.infinispan.util.InfinispanTestingSetup;
|
||||
import org.hibernate.test.cache.infinispan.util.TestInfinispanRegionFactory;
|
||||
import org.hibernate.test.cache.infinispan.util.TxUtil;
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.commands.write.PutKeyValueCommand;
|
||||
import org.junit.After;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
import org.infinispan.configuration.cache.CacheMode;
|
||||
|
||||
/**
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
@RunWith(CustomParameterized.class)
|
||||
public abstract class AbstractFunctionalTest extends BaseNonConfigCoreFunctionalTestCase {
|
||||
|
||||
protected final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog( DualNodeTest.class );
|
||||
|
||||
protected static final Object[] TRANSACTIONAL = new Object[]{"transactional", JtaPlatformImpl.class, JtaTransactionCoordinatorBuilderImpl.class, XaConnectionProvider.class, AccessType.TRANSACTIONAL, true, CacheMode.INVALIDATION_SYNC, false };
|
||||
protected static final Object[] READ_WRITE_INVALIDATION = new Object[]{"read-write", null, JdbcResourceLocalTransactionCoordinatorBuilderImpl.class, null, AccessType.READ_WRITE, false, CacheMode.INVALIDATION_SYNC, false };
|
||||
protected static final Object[] READ_ONLY_INVALIDATION = new Object[]{"read-only", null, JdbcResourceLocalTransactionCoordinatorBuilderImpl.class, null, AccessType.READ_ONLY, false, CacheMode.INVALIDATION_SYNC, false };
|
||||
protected static final Object[] READ_WRITE_REPLICATED = new Object[]{"read-write", null, JdbcResourceLocalTransactionCoordinatorBuilderImpl.class, null, AccessType.READ_WRITE, false, CacheMode.REPL_SYNC, false };
|
||||
protected static final Object[] READ_ONLY_REPLICATED = new Object[]{"read-only", null, JdbcResourceLocalTransactionCoordinatorBuilderImpl.class, null, AccessType.READ_ONLY, false, CacheMode.REPL_SYNC, false };
|
||||
protected static final Object[] READ_WRITE_DISTRIBUTED = new Object[]{"read-write", null, JdbcResourceLocalTransactionCoordinatorBuilderImpl.class, null, AccessType.READ_WRITE, false, CacheMode.DIST_SYNC, false };
|
||||
protected static final Object[] READ_ONLY_DISTRIBUTED = new Object[]{"read-only", null, JdbcResourceLocalTransactionCoordinatorBuilderImpl.class, null, AccessType.READ_ONLY, false, CacheMode.DIST_SYNC, false };
|
||||
protected static final Object[] NONSTRICT_REPLICATED = new Object[]{"nonstrict", null, JdbcResourceLocalTransactionCoordinatorBuilderImpl.class, null, AccessType.NONSTRICT_READ_WRITE, false, CacheMode.REPL_SYNC, true };
|
||||
protected static final Object[] NONSTRICT_DISTRIBUTED = new Object[]{"nonstrict", null, JdbcResourceLocalTransactionCoordinatorBuilderImpl.class, null, AccessType.NONSTRICT_READ_WRITE, false, CacheMode.DIST_SYNC, true };
|
||||
|
||||
// We need to use @ClassRule here since in @BeforeClassOnce startUp we're preparing the session factory,
|
||||
// constructing CacheManager along - and there we check that the test has the name already set
|
||||
@ClassRule
|
||||
public static final InfinispanTestingSetup infinispanTestIdentifier = new InfinispanTestingSetup();
|
||||
|
||||
@Parameterized.Parameter(value = 0)
|
||||
public String mode;
|
||||
|
||||
@Parameterized.Parameter(value = 1)
|
||||
public Class<? extends JtaPlatform> jtaPlatformClass;
|
||||
|
||||
@Parameterized.Parameter(value = 2)
|
||||
public Class<? extends TransactionCoordinatorBuilder> transactionCoordinatorBuilderClass;
|
||||
|
||||
@Parameterized.Parameter(value = 3)
|
||||
public Class<? extends ConnectionProvider> connectionProviderClass;
|
||||
|
||||
@Parameterized.Parameter(value = 4)
|
||||
public AccessType accessType;
|
||||
|
||||
@Parameterized.Parameter(value = 5)
|
||||
public boolean useTransactionalCache;
|
||||
|
||||
@Parameterized.Parameter(value = 6)
|
||||
public CacheMode cacheMode;
|
||||
|
||||
@Parameterized.Parameter(value = 7)
|
||||
public boolean addVersions;
|
||||
|
||||
protected boolean useJta;
|
||||
protected List<Runnable> cleanup = new ArrayList<>();
|
||||
|
||||
@CustomParameterized.Order(0)
|
||||
@Parameterized.Parameters(name = "{0}, {6}")
|
||||
public abstract List<Object[]> getParameters();
|
||||
|
||||
public List<Object[]> getParameters(boolean tx, boolean rw, boolean ro, boolean nonstrict) {
|
||||
ArrayList<Object[]> parameters = new ArrayList<>();
|
||||
if (tx) {
|
||||
parameters.add(TRANSACTIONAL);
|
||||
}
|
||||
if (rw) {
|
||||
parameters.add(READ_WRITE_INVALIDATION);
|
||||
parameters.add(READ_WRITE_REPLICATED);
|
||||
parameters.add(READ_WRITE_DISTRIBUTED);
|
||||
}
|
||||
if (ro) {
|
||||
parameters.add(READ_ONLY_INVALIDATION);
|
||||
parameters.add(READ_ONLY_REPLICATED);
|
||||
parameters.add(READ_ONLY_DISTRIBUTED);
|
||||
}
|
||||
if (nonstrict) {
|
||||
parameters.add(NONSTRICT_REPLICATED);
|
||||
parameters.add(NONSTRICT_DISTRIBUTED);
|
||||
}
|
||||
return parameters;
|
||||
}
|
||||
|
||||
@BeforeClassOnce
|
||||
public void setUseJta() {
|
||||
useJta = jtaPlatformClass != null;
|
||||
}
|
||||
|
||||
@After
|
||||
public void runCleanup() {
|
||||
cleanup.forEach(Runnable::run);
|
||||
cleanup.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] getMappings() {
|
||||
return new String[] {
|
||||
"cache/infinispan/functional/entities/Item.hbm.xml",
|
||||
"cache/infinispan/functional/entities/Customer.hbm.xml",
|
||||
"cache/infinispan/functional/entities/Contact.hbm.xml"
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void afterMetadataBuilt(Metadata metadata) {
|
||||
if (addVersions) {
|
||||
for (PersistentClass clazz : metadata.getEntityBindings()) {
|
||||
if (clazz.getVersion() != null) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
clazz.getMappedClass().getMethod("getVersion");
|
||||
clazz.getMappedClass().getMethod("setVersion", long.class);
|
||||
} catch (NoSuchMethodException e) {
|
||||
continue;
|
||||
}
|
||||
RootClass rootClazz = clazz.getRootClass();
|
||||
Property versionProperty = new Property();
|
||||
versionProperty.setName("version");
|
||||
SimpleValue value = new SimpleValue((MetadataImplementor) metadata, rootClazz.getTable());
|
||||
value.setTypeName("long");
|
||||
Column column = new Column();
|
||||
column.setValue(value);
|
||||
column.setName("version");
|
||||
value.addColumn(column);
|
||||
rootClazz.getTable().addColumn(column);
|
||||
versionProperty.setValue(value);
|
||||
rootClazz.setVersion(versionProperty);
|
||||
rootClazz.addProperty(versionProperty);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCacheConcurrencyStrategy() {
|
||||
return accessType.getExternalName();
|
||||
}
|
||||
|
||||
protected Class<? extends RegionFactory> getRegionFactoryClass() {
|
||||
return TestInfinispanRegionFactory.class;
|
||||
}
|
||||
|
||||
protected boolean getUseQueryCache() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected void addSettings(Map settings) {
|
||||
super.addSettings( settings );
|
||||
|
||||
settings.put( Environment.USE_SECOND_LEVEL_CACHE, "true" );
|
||||
settings.put( Environment.GENERATE_STATISTICS, "true" );
|
||||
settings.put( Environment.USE_QUERY_CACHE, String.valueOf( getUseQueryCache() ) );
|
||||
settings.put( Environment.CACHE_REGION_FACTORY, getRegionFactoryClass().getName() );
|
||||
settings.put( Environment.CACHE_KEYS_FACTORY, SimpleCacheKeysFactory.SHORT_NAME );
|
||||
settings.put( TestInfinispanRegionFactory.TRANSACTIONAL, useTransactionalCache );
|
||||
settings.put( TestInfinispanRegionFactory.CACHE_MODE, cacheMode);
|
||||
|
||||
if ( jtaPlatformClass != null ) {
|
||||
settings.put( AvailableSettings.JTA_PLATFORM, jtaPlatformClass.getName() );
|
||||
}
|
||||
settings.put( Environment.TRANSACTION_COORDINATOR_STRATEGY, transactionCoordinatorBuilderClass.getName() );
|
||||
if ( connectionProviderClass != null) {
|
||||
settings.put(Environment.CONNECTION_PROVIDER, connectionProviderClass.getName());
|
||||
}
|
||||
}
|
||||
|
||||
protected void markRollbackOnly(Session session) {
|
||||
TxUtil.markRollbackOnly(useJta, session);
|
||||
}
|
||||
|
||||
protected CountDownLatch expectAfterUpdate(AdvancedCache cache, int numUpdates) {
|
||||
return expectPutWithValue(cache, value -> value instanceof FutureUpdate, numUpdates);
|
||||
}
|
||||
|
||||
protected CountDownLatch expectEvict(AdvancedCache cache, int numUpdates) {
|
||||
return expectPutWithValue(cache, value -> value instanceof TombstoneUpdate && ((TombstoneUpdate) value).getValue() == null, numUpdates);
|
||||
}
|
||||
|
||||
protected CountDownLatch expectPutWithValue(AdvancedCache cache, Predicate<Object> valuePredicate, int numUpdates) {
|
||||
if (!cacheMode.isInvalidation() && accessType != AccessType.NONSTRICT_READ_WRITE) {
|
||||
CountDownLatch latch = new CountDownLatch(numUpdates);
|
||||
ExpectingInterceptor.get(cache)
|
||||
.when((ctx, cmd) -> cmd instanceof PutKeyValueCommand && valuePredicate.test(((PutKeyValueCommand) cmd).getValue()))
|
||||
.countDown(latch);
|
||||
cleanup.add(() -> ExpectingInterceptor.cleanup(cache));
|
||||
return latch;
|
||||
} else {
|
||||
return new CountDownLatch(0);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,229 +0,0 @@
|
|||
package org.hibernate.test.cache.infinispan.functional;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import javax.persistence.OptimisticLockException;
|
||||
import javax.persistence.PessimisticLockException;
|
||||
import org.hibernate.boot.registry.StandardServiceRegistryBuilder;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.entity.EntityRegionImpl;
|
||||
import org.hibernate.cache.infinispan.util.Caches;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.hibernate.cache.spi.Region;
|
||||
|
||||
import org.hibernate.cache.spi.entry.CacheEntry;
|
||||
import org.hibernate.cfg.AvailableSettings;
|
||||
import org.hibernate.testing.AfterClassOnce;
|
||||
import org.hibernate.testing.BeforeClassOnce;
|
||||
import org.hibernate.test.cache.infinispan.functional.entities.Item;
|
||||
import org.hibernate.test.cache.infinispan.util.TestInfinispanRegionFactory;
|
||||
import org.hibernate.test.cache.infinispan.util.TestTimeService;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Common base for TombstoneTest and VersionedTest
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public abstract class AbstractNonInvalidationTest extends SingleNodeTest {
|
||||
protected static final int WAIT_TIMEOUT = 2000;
|
||||
protected static final TestTimeService TIME_SERVICE = new TestTimeService();
|
||||
|
||||
protected long TIMEOUT;
|
||||
protected ExecutorService executor;
|
||||
protected InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog(getClass());
|
||||
protected AdvancedCache entityCache;
|
||||
protected long itemId;
|
||||
protected Region region;
|
||||
protected long timeout;
|
||||
protected final List<Runnable> cleanup = new ArrayList<>();
|
||||
|
||||
@BeforeClassOnce
|
||||
public void setup() {
|
||||
executor = Executors.newCachedThreadPool(new ThreadFactory() {
|
||||
AtomicInteger counter = new AtomicInteger();
|
||||
|
||||
@Override
|
||||
public Thread newThread(Runnable r) {
|
||||
return new Thread(r, "Executor-" + counter.incrementAndGet());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@AfterClassOnce
|
||||
public void shutdown() {
|
||||
executor.shutdown();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configureStandardServiceRegistryBuilder(StandardServiceRegistryBuilder ssrb) {
|
||||
// This applies to manually set LOCK_TIMEOUT for H2 DB. AvailableSettings.JPA_LOCK_TIMEOUT
|
||||
// works only for queries, not for CRUDs, so we have to modify the connection URL.
|
||||
// Alternative could be executing SET LOCK_TIMEOUT 100 as a native query.
|
||||
String url = (String) ssrb.getSettings().get(AvailableSettings.URL);
|
||||
if (url != null && url.contains("LOCK_TIMEOUT")) {
|
||||
url = url.replaceAll("LOCK_TIMEOUT=[^;]*", "LOCK_TIMEOUT=100");
|
||||
}
|
||||
ssrb.applySetting(AvailableSettings.URL, url);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void startUp() {
|
||||
super.startUp();
|
||||
InfinispanRegionFactory regionFactory = (InfinispanRegionFactory) sessionFactory().getSettings().getRegionFactory();
|
||||
TIMEOUT = regionFactory.getPendingPutsCacheConfiguration().expiration().maxIdle();
|
||||
region = sessionFactory().getSecondLevelCacheRegion(Item.class.getName());
|
||||
entityCache = ((EntityRegionImpl) region).getCache();
|
||||
}
|
||||
|
||||
@Before
|
||||
public void insertAndClearCache() throws Exception {
|
||||
region = sessionFactory().getSecondLevelCacheRegion(Item.class.getName());
|
||||
entityCache = ((EntityRegionImpl) region).getCache();
|
||||
timeout = ((EntityRegionImpl) region).getRegionFactory().getPendingPutsCacheConfiguration().expiration().maxIdle();
|
||||
Item item = new Item("my item", "Original item");
|
||||
withTxSession(s -> s.persist(item));
|
||||
entityCache.clear();
|
||||
assertEquals("Cache is not empty", Collections.EMPTY_SET, entityCache.keySet());
|
||||
itemId = item.getId();
|
||||
log.info("Insert and clear finished");
|
||||
}
|
||||
|
||||
@After
|
||||
public void cleanup() throws Exception {
|
||||
cleanup.forEach(Runnable::run);
|
||||
cleanup.clear();
|
||||
withTxSession(s -> {
|
||||
s.createQuery("delete from Item").executeUpdate();
|
||||
});
|
||||
}
|
||||
|
||||
protected Future<Boolean> removeFlushWait(long id, CyclicBarrier loadBarrier, CountDownLatch preFlushLatch, CountDownLatch flushLatch, CountDownLatch commitLatch) throws Exception {
|
||||
return executor.submit(() -> withTxSessionApply(s -> {
|
||||
try {
|
||||
Item item = s.load(Item.class, id);
|
||||
item.getName(); // force load & putFromLoad before the barrier
|
||||
loadBarrier.await(WAIT_TIMEOUT, TimeUnit.SECONDS);
|
||||
s.delete(item);
|
||||
if (preFlushLatch != null) {
|
||||
awaitOrThrow(preFlushLatch);
|
||||
}
|
||||
s.flush();
|
||||
} catch (OptimisticLockException e) {
|
||||
log.info("Exception thrown: ", e);
|
||||
markRollbackOnly(s);
|
||||
return false;
|
||||
} catch (PessimisticLockException e) {
|
||||
log.info("Exception thrown: ", e);
|
||||
markRollbackOnly(s);
|
||||
return false;
|
||||
} finally {
|
||||
if (flushLatch != null) {
|
||||
flushLatch.countDown();
|
||||
}
|
||||
}
|
||||
awaitOrThrow(commitLatch);
|
||||
return true;
|
||||
}));
|
||||
}
|
||||
|
||||
protected Future<Boolean> updateFlushWait(long id, CyclicBarrier loadBarrier, CountDownLatch preFlushLatch, CountDownLatch flushLatch, CountDownLatch commitLatch) throws Exception {
|
||||
return executor.submit(() -> withTxSessionApply(s -> {
|
||||
try {
|
||||
Item item = s.load(Item.class, id);
|
||||
item.getName(); // force load & putFromLoad before the barrier
|
||||
if (loadBarrier != null) {
|
||||
loadBarrier.await(WAIT_TIMEOUT, TimeUnit.SECONDS);
|
||||
}
|
||||
item.setDescription("Updated item");
|
||||
s.update(item);
|
||||
if (preFlushLatch != null) {
|
||||
awaitOrThrow(preFlushLatch);
|
||||
}
|
||||
s.flush();
|
||||
} catch (OptimisticLockException e) {
|
||||
log.info("Exception thrown: ", e);
|
||||
markRollbackOnly(s);
|
||||
return false;
|
||||
} catch (PessimisticLockException | org.hibernate.PessimisticLockException e) {
|
||||
log.info("Exception thrown: ", e);
|
||||
markRollbackOnly(s);
|
||||
return false;
|
||||
} finally {
|
||||
if (flushLatch != null) {
|
||||
flushLatch.countDown();
|
||||
}
|
||||
}
|
||||
if (commitLatch != null) {
|
||||
awaitOrThrow(commitLatch);
|
||||
}
|
||||
return true;
|
||||
}));
|
||||
}
|
||||
|
||||
protected Future<Boolean> evictWait(long id, CyclicBarrier loadBarrier, CountDownLatch preEvictLatch, CountDownLatch postEvictLatch) throws Exception {
|
||||
return executor.submit(() -> {
|
||||
try {
|
||||
loadBarrier.await(WAIT_TIMEOUT, TimeUnit.SECONDS);
|
||||
if (preEvictLatch != null) {
|
||||
awaitOrThrow(preEvictLatch);
|
||||
}
|
||||
sessionFactory().getCache().evictEntity(Item.class, id);
|
||||
} finally {
|
||||
if (postEvictLatch != null) {
|
||||
postEvictLatch.countDown();
|
||||
}
|
||||
}
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
protected void awaitOrThrow(CountDownLatch latch) throws InterruptedException, TimeoutException {
|
||||
if (!latch.await(WAIT_TIMEOUT, TimeUnit.SECONDS)) {
|
||||
throw new TimeoutException();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void addSettings(Map settings) {
|
||||
super.addSettings(settings);
|
||||
settings.put(TestInfinispanRegionFactory.TIME_SERVICE, TIME_SERVICE);
|
||||
}
|
||||
|
||||
protected void assertEmptyCache() {
|
||||
assertNull(entityCache.get(itemId)); // force expiration
|
||||
Map contents = Caches.entrySet(entityCache).toMap();
|
||||
assertEquals(Collections.EMPTY_MAP, contents);
|
||||
}
|
||||
|
||||
protected <T> T assertCacheContains(Class<T> expected) {
|
||||
Map contents = Caches.entrySet(entityCache).toMap();
|
||||
assertEquals("Cache does not have single element", 1, contents.size());
|
||||
Object value = contents.get(itemId);
|
||||
assertTrue(String.valueOf(value), expected.isInstance(value));
|
||||
return (T) value;
|
||||
}
|
||||
|
||||
protected Object assertSingleCacheEntry() {
|
||||
return assertCacheContains(CacheEntry.class);
|
||||
}
|
||||
}
|
|
@ -1,224 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan.functional;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.hibernate.FlushMode;
|
||||
import org.hibernate.stat.SecondLevelCacheStatistics;
|
||||
|
||||
import org.hibernate.test.cache.infinispan.util.InfinispanTestingSetup;
|
||||
import org.hibernate.test.cache.infinispan.functional.entities.Contact;
|
||||
import org.hibernate.test.cache.infinispan.functional.entities.Customer;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
|
||||
/**
|
||||
* BulkOperationsTestCase.
|
||||
*
|
||||
* @author Galder Zamarreño
|
||||
* @since 3.5
|
||||
*/
|
||||
public class BulkOperationsTest extends SingleNodeTest {
|
||||
@Override
|
||||
public List<Object[]> getParameters() {
|
||||
return getParameters(true, true, false, true);
|
||||
}
|
||||
|
||||
@ClassRule
|
||||
public static final InfinispanTestingSetup infinispanTestIdentifier = new InfinispanTestingSetup();
|
||||
|
||||
@Override
|
||||
public String[] getMappings() {
|
||||
return new String[] {
|
||||
"cache/infinispan/functional/entities/Contact.hbm.xml",
|
||||
"cache/infinispan/functional/entities/Customer.hbm.xml"
|
||||
};
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBulkOperations() throws Throwable {
|
||||
boolean cleanedUp = false;
|
||||
try {
|
||||
createContacts();
|
||||
|
||||
List<Integer> rhContacts = getContactsByCustomer( "Red Hat" );
|
||||
assertNotNull( "Red Hat contacts exist", rhContacts );
|
||||
assertEquals( "Created expected number of Red Hat contacts", 10, rhContacts.size() );
|
||||
|
||||
SecondLevelCacheStatistics contactSlcs = sessionFactory()
|
||||
.getStatistics()
|
||||
.getSecondLevelCacheStatistics( Contact.class.getName() );
|
||||
assertEquals( 20, contactSlcs.getElementCountInMemory() );
|
||||
|
||||
assertEquals( "Deleted all Red Hat contacts", 10, deleteContacts() );
|
||||
assertEquals( 0, contactSlcs.getElementCountInMemory() );
|
||||
|
||||
List<Integer> jbContacts = getContactsByCustomer( "JBoss" );
|
||||
assertNotNull( "JBoss contacts exist", jbContacts );
|
||||
assertEquals( "JBoss contacts remain", 10, jbContacts.size() );
|
||||
|
||||
for ( Integer id : rhContacts ) {
|
||||
assertNull( "Red Hat contact " + id + " cannot be retrieved", getContact( id ) );
|
||||
}
|
||||
rhContacts = getContactsByCustomer( "Red Hat" );
|
||||
if ( rhContacts != null ) {
|
||||
assertEquals( "No Red Hat contacts remain", 0, rhContacts.size() );
|
||||
}
|
||||
|
||||
updateContacts( "Kabir", "Updated" );
|
||||
assertEquals( 0, contactSlcs.getElementCountInMemory() );
|
||||
for ( Integer id : jbContacts ) {
|
||||
Contact contact = getContact( id );
|
||||
assertNotNull( "JBoss contact " + id + " exists", contact );
|
||||
String expected = ("Kabir".equals( contact.getName() )) ? "Updated" : "2222";
|
||||
assertEquals( "JBoss contact " + id + " has correct TLF", expected, contact.getTlf() );
|
||||
}
|
||||
|
||||
List<Integer> updated = getContactsByTLF( "Updated" );
|
||||
assertNotNull( "Got updated contacts", updated );
|
||||
assertEquals("Updated contacts", 5, updated.size());
|
||||
|
||||
assertEquals( 10, contactSlcs.getElementCountInMemory() );
|
||||
updateContactsWithOneManual( "Kabir", "UpdatedAgain" );
|
||||
assertEquals( 0, contactSlcs.getElementCountInMemory());
|
||||
for ( Integer id : jbContacts ) {
|
||||
Contact contact = getContact( id );
|
||||
assertNotNull( "JBoss contact " + id + " exists", contact );
|
||||
String expected = ("Kabir".equals( contact.getName() )) ? "UpdatedAgain" : "2222";
|
||||
assertEquals( "JBoss contact " + id + " has correct TLF", expected, contact.getTlf() );
|
||||
}
|
||||
|
||||
updated = getContactsByTLF( "UpdatedAgain" );
|
||||
assertNotNull( "Got updated contacts", updated );
|
||||
assertEquals( "Updated contacts", 5, updated.size() );
|
||||
}
|
||||
catch (Throwable t) {
|
||||
cleanedUp = true;
|
||||
cleanup( true );
|
||||
throw t;
|
||||
}
|
||||
finally {
|
||||
// cleanup the db so we can run this test multiple times w/o restarting the cluster
|
||||
if ( !cleanedUp ) {
|
||||
cleanup( false );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void createContacts() throws Exception {
|
||||
withTxSession(s -> {
|
||||
for ( int i = 0; i < 10; i++ ) {
|
||||
Customer c = createCustomer( i );
|
||||
s.persist(c);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public int deleteContacts() throws Exception {
|
||||
String deleteHQL = "delete Contact where customer in "
|
||||
+ " (select customer FROM Customer as customer where customer.name = :cName)";
|
||||
|
||||
int rowsAffected = withTxSessionApply(s ->
|
||||
s.createQuery( deleteHQL ).setFlushMode( FlushMode.AUTO )
|
||||
.setParameter( "cName", "Red Hat" ).executeUpdate());
|
||||
return rowsAffected;
|
||||
}
|
||||
|
||||
@SuppressWarnings( {"unchecked"})
|
||||
public List<Integer> getContactsByCustomer(String customerName) throws Exception {
|
||||
String selectHQL = "select contact.id from Contact contact"
|
||||
+ " where contact.customer.name = :cName";
|
||||
|
||||
return (List<Integer>) withTxSessionApply(s -> s.createQuery(selectHQL)
|
||||
.setFlushMode(FlushMode.AUTO)
|
||||
.setParameter("cName", customerName)
|
||||
.list());
|
||||
}
|
||||
|
||||
@SuppressWarnings( {"unchecked"})
|
||||
public List<Integer> getContactsByTLF(String tlf) throws Exception {
|
||||
String selectHQL = "select contact.id from Contact contact"
|
||||
+ " where contact.tlf = :cTLF";
|
||||
|
||||
return (List<Integer>) withTxSessionApply(s -> s.createQuery(selectHQL)
|
||||
.setFlushMode(FlushMode.AUTO)
|
||||
.setParameter("cTLF", tlf)
|
||||
.list());
|
||||
}
|
||||
|
||||
public int updateContacts(String name, String newTLF) throws Exception {
|
||||
String updateHQL = "update Contact set tlf = :cNewTLF where name = :cName";
|
||||
return withTxSessionApply(s -> s.createQuery( updateHQL )
|
||||
.setFlushMode( FlushMode.AUTO )
|
||||
.setParameter( "cNewTLF", newTLF )
|
||||
.setParameter( "cName", name )
|
||||
.executeUpdate());
|
||||
}
|
||||
|
||||
public int updateContactsWithOneManual(String name, String newTLF) throws Exception {
|
||||
String queryHQL = "from Contact c where c.name = :cName";
|
||||
String updateHQL = "update Contact set tlf = :cNewTLF where name = :cName";
|
||||
return withTxSessionApply(s -> {
|
||||
List<Contact> list = s.createQuery(queryHQL).setParameter("cName", name).list();
|
||||
list.get(0).setTlf(newTLF);
|
||||
return s.createQuery(updateHQL)
|
||||
.setFlushMode(FlushMode.AUTO)
|
||||
.setParameter("cNewTLF", newTLF)
|
||||
.setParameter("cName", name)
|
||||
.executeUpdate();
|
||||
});
|
||||
}
|
||||
|
||||
public Contact getContact(Integer id) throws Exception {
|
||||
return withTxSessionApply(s -> s.get( Contact.class, id ));
|
||||
}
|
||||
|
||||
public void cleanup(boolean ignore) throws Exception {
|
||||
String deleteContactHQL = "delete from Contact";
|
||||
String deleteCustomerHQL = "delete from Customer";
|
||||
withTxSession(s -> {
|
||||
s.createQuery(deleteContactHQL).setFlushMode(FlushMode.AUTO).executeUpdate();
|
||||
s.createQuery(deleteCustomerHQL).setFlushMode(FlushMode.AUTO).executeUpdate();
|
||||
});
|
||||
}
|
||||
|
||||
private Customer createCustomer(int id) throws Exception {
|
||||
System.out.println( "CREATE CUSTOMER " + id );
|
||||
try {
|
||||
Customer customer = new Customer();
|
||||
customer.setName( (id % 2 == 0) ? "JBoss" : "Red Hat" );
|
||||
Set<Contact> contacts = new HashSet<Contact>();
|
||||
|
||||
Contact kabir = new Contact();
|
||||
kabir.setCustomer( customer );
|
||||
kabir.setName( "Kabir" );
|
||||
kabir.setTlf( "1111" );
|
||||
contacts.add( kabir );
|
||||
|
||||
Contact bill = new Contact();
|
||||
bill.setCustomer( customer );
|
||||
bill.setName( "Bill" );
|
||||
bill.setTlf( "2222" );
|
||||
contacts.add( bill );
|
||||
|
||||
customer.setContacts( contacts );
|
||||
|
||||
return customer;
|
||||
}
|
||||
finally {
|
||||
System.out.println( "CREATE CUSTOMER " + id + " - END" );
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,448 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan.functional;
|
||||
|
||||
import java.io.PrintWriter;
|
||||
import java.io.StringWriter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.hibernate.FlushMode;
|
||||
import org.hibernate.LockMode;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.hibernate.stat.SecondLevelCacheStatistics;
|
||||
|
||||
import org.hibernate.test.cache.infinispan.functional.entities.Contact;
|
||||
import org.hibernate.test.cache.infinispan.functional.entities.Customer;
|
||||
import org.hibernate.test.cache.infinispan.util.TestInfinispanRegionFactory;
|
||||
import org.hibernate.test.cache.infinispan.util.TestTimeService;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
|
||||
/**
|
||||
* @author nikita_tovstoles@mba.berkeley.edu
|
||||
* @author Galder Zamarreño
|
||||
*/
|
||||
public class ConcurrentWriteTest extends SingleNodeTest {
|
||||
private static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog( ConcurrentWriteTest.class );
|
||||
private static final boolean trace = log.isTraceEnabled();
|
||||
/**
|
||||
* when USER_COUNT==1, tests pass, when >4 tests fail
|
||||
*/
|
||||
private static final int USER_COUNT = 5;
|
||||
private static final int ITERATION_COUNT = 150;
|
||||
private static final int THINK_TIME_MILLIS = 10;
|
||||
private static final long LAUNCH_INTERVAL_MILLIS = 10;
|
||||
private static final Random random = new Random();
|
||||
private static final TestTimeService TIME_SERVICE = new TestTimeService();
|
||||
|
||||
/**
|
||||
* kill switch used to stop all users when one fails
|
||||
*/
|
||||
private static volatile boolean TERMINATE_ALL_USERS = false;
|
||||
|
||||
/**
|
||||
* collection of IDs of all customers participating in this test
|
||||
*/
|
||||
private Set<Integer> customerIDs = new HashSet<Integer>();
|
||||
|
||||
@Override
|
||||
public List<Object[]> getParameters() {
|
||||
return getParameters(true, true, false, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void prepareTest() throws Exception {
|
||||
super.prepareTest();
|
||||
TERMINATE_ALL_USERS = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void addSettings(Map settings) {
|
||||
super.addSettings(settings);
|
||||
settings.put(TestInfinispanRegionFactory.TIME_SERVICE, TIME_SERVICE);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void cleanupTest() throws Exception {
|
||||
try {
|
||||
super.cleanupTest();
|
||||
}
|
||||
finally {
|
||||
cleanup();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPingDb() throws Exception {
|
||||
withTxSession(s -> s.createQuery( "from " + Customer.class.getName() ).list());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSingleUser() throws Exception {
|
||||
// setup
|
||||
sessionFactory().getStatistics().clear();
|
||||
// wait a while to make sure that timestamp comparison works after invalidateRegion
|
||||
TIME_SERVICE.advance(1);
|
||||
|
||||
Customer customer = createCustomer( 0 );
|
||||
final Integer customerId = customer.getId();
|
||||
getCustomerIDs().add( customerId );
|
||||
|
||||
// wait a while to make sure that timestamp comparison works after collection remove (during insert)
|
||||
TIME_SERVICE.advance(1);
|
||||
|
||||
assertNull( "contact exists despite not being added", getFirstContact( customerId ) );
|
||||
|
||||
// check that cache was hit
|
||||
SecondLevelCacheStatistics customerSlcs = sessionFactory()
|
||||
.getStatistics()
|
||||
.getSecondLevelCacheStatistics( Customer.class.getName() );
|
||||
assertEquals( 1, customerSlcs.getPutCount() );
|
||||
assertEquals( 1, customerSlcs.getElementCountInMemory() );
|
||||
assertEquals( 1, customerSlcs.getEntries().size() );
|
||||
|
||||
log.infof( "Add contact to customer {0}", customerId );
|
||||
SecondLevelCacheStatistics contactsCollectionSlcs = sessionFactory()
|
||||
.getStatistics()
|
||||
.getSecondLevelCacheStatistics( Customer.class.getName() + ".contacts" );
|
||||
assertEquals( 1, contactsCollectionSlcs.getPutCount() );
|
||||
assertEquals( 1, contactsCollectionSlcs.getElementCountInMemory() );
|
||||
assertEquals( 1, contactsCollectionSlcs.getEntries().size() );
|
||||
|
||||
final Contact contact = addContact( customerId );
|
||||
assertNotNull( "contact returned by addContact is null", contact );
|
||||
assertEquals(
|
||||
"Customer.contacts cache was not invalidated after addContact", 0,
|
||||
contactsCollectionSlcs.getElementCountInMemory()
|
||||
);
|
||||
|
||||
assertNotNull( "Contact missing after successful add call", getFirstContact( customerId ) );
|
||||
|
||||
// read everyone's contacts
|
||||
readEveryonesFirstContact();
|
||||
|
||||
removeContact( customerId );
|
||||
assertNull( "contact still exists after successful remove call", getFirstContact( customerId ) );
|
||||
|
||||
}
|
||||
|
||||
// Ignoring the test as it's more of a stress-test: this should be enabled manually
|
||||
@Ignore
|
||||
@Test
|
||||
public void testManyUsers() throws Throwable {
|
||||
try {
|
||||
// setup - create users
|
||||
for ( int i = 0; i < USER_COUNT; i++ ) {
|
||||
Customer customer = createCustomer( 0 );
|
||||
getCustomerIDs().add( customer.getId() );
|
||||
}
|
||||
assertEquals( "failed to create enough Customers", USER_COUNT, getCustomerIDs().size() );
|
||||
|
||||
final ExecutorService executor = Executors.newFixedThreadPool( USER_COUNT );
|
||||
|
||||
CyclicBarrier barrier = new CyclicBarrier( USER_COUNT + 1 );
|
||||
List<Future<Void>> futures = new ArrayList<Future<Void>>( USER_COUNT );
|
||||
for ( Integer customerId : getCustomerIDs() ) {
|
||||
Future<Void> future = executor.submit( new UserRunner( customerId, barrier ) );
|
||||
futures.add( future );
|
||||
Thread.sleep( LAUNCH_INTERVAL_MILLIS ); // rampup
|
||||
}
|
||||
barrier.await( 2, TimeUnit.MINUTES ); // wait for all threads to finish
|
||||
log.info( "All threads finished, let's shutdown the executor and check whether any exceptions were reported" );
|
||||
for ( Future<Void> future : futures ) {
|
||||
future.get();
|
||||
}
|
||||
executor.shutdown();
|
||||
log.info( "All future gets checked" );
|
||||
}
|
||||
catch (Throwable t) {
|
||||
log.error( "Error running test", t );
|
||||
throw t;
|
||||
}
|
||||
}
|
||||
|
||||
public void cleanup() throws Exception {
|
||||
getCustomerIDs().clear();
|
||||
String deleteContactHQL = "delete from Contact";
|
||||
String deleteCustomerHQL = "delete from Customer";
|
||||
withTxSession(s -> {
|
||||
s.createQuery(deleteContactHQL).setFlushMode(FlushMode.AUTO).executeUpdate();
|
||||
s.createQuery(deleteCustomerHQL).setFlushMode(FlushMode.AUTO).executeUpdate();
|
||||
});
|
||||
}
|
||||
|
||||
private Customer createCustomer(int nameSuffix) throws Exception {
|
||||
return withTxSessionApply(s -> {
|
||||
Customer customer = new Customer();
|
||||
customer.setName( "customer_" + nameSuffix );
|
||||
customer.setContacts( new HashSet<Contact>() );
|
||||
s.persist( customer );
|
||||
return customer;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* read first contact of every Customer participating in this test. this forces concurrent cache
|
||||
* writes of Customer.contacts Collection cache node
|
||||
*
|
||||
* @return who cares
|
||||
* @throws java.lang.Exception
|
||||
*/
|
||||
private void readEveryonesFirstContact() throws Exception {
|
||||
withTxSession(s -> {
|
||||
for ( Integer customerId : getCustomerIDs() ) {
|
||||
if ( TERMINATE_ALL_USERS ) {
|
||||
markRollbackOnly(s);
|
||||
return;
|
||||
}
|
||||
Customer customer = s.load( Customer.class, customerId );
|
||||
Set<Contact> contacts = customer.getContacts();
|
||||
if ( !contacts.isEmpty() ) {
|
||||
contacts.iterator().next();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* -load existing Customer -get customer's contacts; return 1st one
|
||||
*
|
||||
* @param customerId
|
||||
* @return first Contact or null if customer has none
|
||||
*/
|
||||
private Contact getFirstContact(Integer customerId) throws Exception {
|
||||
assert customerId != null;
|
||||
return withTxSessionApply(s -> {
|
||||
Customer customer = s.load(Customer.class, customerId);
|
||||
Set<Contact> contacts = customer.getContacts();
|
||||
Contact firstContact = contacts.isEmpty() ? null : contacts.iterator().next();
|
||||
if (TERMINATE_ALL_USERS) {
|
||||
markRollbackOnly(s);
|
||||
}
|
||||
return firstContact;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* -load existing Customer -create a new Contact and add to customer's contacts
|
||||
*
|
||||
* @param customerId
|
||||
* @return added Contact
|
||||
*/
|
||||
private Contact addContact(Integer customerId) throws Exception {
|
||||
assert customerId != null;
|
||||
return withTxSessionApply(s -> {
|
||||
final Customer customer = s.load(Customer.class, customerId);
|
||||
Contact contact = new Contact();
|
||||
contact.setName("contact name");
|
||||
contact.setTlf("wtf is tlf?");
|
||||
contact.setCustomer(customer);
|
||||
customer.getContacts().add(contact);
|
||||
// assuming contact is persisted via cascade from customer
|
||||
if (TERMINATE_ALL_USERS) {
|
||||
markRollbackOnly(s);
|
||||
}
|
||||
return contact;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* remove existing 'contact' from customer's list of contacts
|
||||
*
|
||||
* @param customerId
|
||||
* @throws IllegalStateException
|
||||
* if customer does not own a contact
|
||||
*/
|
||||
private void removeContact(Integer customerId) throws Exception {
|
||||
assert customerId != null;
|
||||
|
||||
withTxSession(s -> {
|
||||
Customer customer = s.load( Customer.class, customerId );
|
||||
Set<Contact> contacts = customer.getContacts();
|
||||
if ( contacts.size() != 1 ) {
|
||||
throw new IllegalStateException(
|
||||
"can't remove contact: customer id=" + customerId
|
||||
+ " expected exactly 1 contact, " + "actual count=" + contacts.size()
|
||||
);
|
||||
}
|
||||
|
||||
Contact contact = contacts.iterator().next();
|
||||
// H2 version 1.3 (without MVCC fails with deadlock on Contacts/Customers modification, therefore,
|
||||
// we have to enforce locking Contacts first
|
||||
s.lock(contact, LockMode.PESSIMISTIC_WRITE);
|
||||
contacts.remove( contact );
|
||||
contact.setCustomer( null );
|
||||
|
||||
// explicitly delete Contact because hbm has no 'DELETE_ORPHAN' cascade?
|
||||
// getEnvironment().getSessionFactory().getCurrentSession().delete(contact); //appears to
|
||||
// not be needed
|
||||
|
||||
// assuming contact is persisted via cascade from customer
|
||||
|
||||
if ( TERMINATE_ALL_USERS ) {
|
||||
markRollbackOnly(s);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the customerIDs
|
||||
*/
|
||||
public Set<Integer> getCustomerIDs() {
|
||||
return customerIDs;
|
||||
}
|
||||
|
||||
private String statusOfRunnersToString(Set<UserRunner> runners) {
|
||||
assert runners != null;
|
||||
|
||||
StringBuilder sb = new StringBuilder(
|
||||
"TEST CONFIG [userCount=" + USER_COUNT
|
||||
+ ", iterationsPerUser=" + ITERATION_COUNT + ", thinkTimeMillis="
|
||||
+ THINK_TIME_MILLIS + "] " + " STATE of UserRunners: "
|
||||
);
|
||||
|
||||
for ( UserRunner r : runners ) {
|
||||
sb.append( r.toString() ).append( System.lineSeparator() );
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
class UserRunner implements Callable<Void> {
|
||||
private final CyclicBarrier barrier;
|
||||
final private Integer customerId;
|
||||
private int completedIterations = 0;
|
||||
private Throwable causeOfFailure;
|
||||
|
||||
public UserRunner(Integer cId, CyclicBarrier barrier) {
|
||||
assert cId != null;
|
||||
this.customerId = cId;
|
||||
this.barrier = barrier;
|
||||
}
|
||||
|
||||
private boolean contactExists() throws Exception {
|
||||
return getFirstContact( customerId ) != null;
|
||||
}
|
||||
|
||||
public Void call() throws Exception {
|
||||
// name this thread for easier log tracing
|
||||
Thread.currentThread().setName( "UserRunnerThread-" + getCustomerId() );
|
||||
log.info( "Wait for all executions paths to be ready to perform calls" );
|
||||
try {
|
||||
for ( int i = 0; i < ITERATION_COUNT && !TERMINATE_ALL_USERS; i++ ) {
|
||||
contactExists();
|
||||
if ( trace ) {
|
||||
log.trace( "Add contact for customer " + customerId );
|
||||
}
|
||||
addContact( customerId );
|
||||
if ( trace ) {
|
||||
log.trace( "Added contact" );
|
||||
}
|
||||
thinkRandomTime();
|
||||
contactExists();
|
||||
thinkRandomTime();
|
||||
if ( trace ) {
|
||||
log.trace( "Read all customers' first contact" );
|
||||
}
|
||||
// read everyone's contacts
|
||||
readEveryonesFirstContact();
|
||||
if ( trace ) {
|
||||
log.trace( "Read completed" );
|
||||
}
|
||||
thinkRandomTime();
|
||||
if ( trace ) {
|
||||
log.trace( "Remove contact of customer" + customerId );
|
||||
}
|
||||
removeContact( customerId );
|
||||
if ( trace ) {
|
||||
log.trace( "Removed contact" );
|
||||
}
|
||||
contactExists();
|
||||
thinkRandomTime();
|
||||
++completedIterations;
|
||||
if ( trace ) {
|
||||
log.tracef( "Iteration completed %d", completedIterations );
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Throwable t) {
|
||||
TERMINATE_ALL_USERS = true;
|
||||
log.error( "Error", t );
|
||||
throw new Exception( t );
|
||||
}
|
||||
finally {
|
||||
log.info( "Wait for all execution paths to finish" );
|
||||
barrier.await();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public boolean isSuccess() {
|
||||
return ITERATION_COUNT == getCompletedIterations();
|
||||
}
|
||||
|
||||
public int getCompletedIterations() {
|
||||
return completedIterations;
|
||||
}
|
||||
|
||||
public Throwable getCauseOfFailure() {
|
||||
return causeOfFailure;
|
||||
}
|
||||
|
||||
public Integer getCustomerId() {
|
||||
return customerId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return super.toString() + "[customerId=" + getCustomerId() + " iterationsCompleted="
|
||||
+ getCompletedIterations() + " completedAll=" + isSuccess() + " causeOfFailure="
|
||||
+ (this.causeOfFailure != null ? getStackTrace( causeOfFailure ) : "") + "] ";
|
||||
}
|
||||
}
|
||||
|
||||
public static String getStackTrace(Throwable throwable) {
|
||||
StringWriter sw = new StringWriter();
|
||||
PrintWriter pw = new PrintWriter( sw, true );
|
||||
throwable.printStackTrace( pw );
|
||||
return sw.getBuffer().toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* sleep between 0 and THINK_TIME_MILLIS.
|
||||
*
|
||||
* @throws RuntimeException if sleep is interrupted or TERMINATE_ALL_USERS flag was set to true i n the
|
||||
* meantime
|
||||
*/
|
||||
private void thinkRandomTime() {
|
||||
try {
|
||||
Thread.sleep( random.nextInt( THINK_TIME_MILLIS ) );
|
||||
}
|
||||
catch (InterruptedException ex) {
|
||||
throw new RuntimeException( "sleep interrupted", ex );
|
||||
}
|
||||
|
||||
if ( TERMINATE_ALL_USERS ) {
|
||||
throw new RuntimeException( "told to terminate (because a UserRunner had failed)" );
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
package org.hibernate.test.cache.infinispan.functional;
|
||||
|
||||
import org.hibernate.stat.Statistics;
|
||||
import org.hibernate.test.cache.infinispan.functional.entities.Name;
|
||||
import org.hibernate.test.cache.infinispan.functional.entities.Person;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Persons should be correctly indexed since we can use Type for comparison
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class EqualityTest extends SingleNodeTest {
|
||||
@Override
|
||||
public List<Object[]> getParameters() {
|
||||
return getParameters(true, true, true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Class[] getAnnotatedClasses() {
|
||||
return new Class[] { Person.class };
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEqualityFromType() throws Exception {
|
||||
Person john = new Person("John", "Black", 26);
|
||||
Person peter = new Person("Peter", "White", 32);
|
||||
|
||||
withTxSession(s -> {
|
||||
s.persist(john);
|
||||
s.persist(peter);
|
||||
});
|
||||
|
||||
Statistics statistics = sessionFactory().getStatistics();
|
||||
statistics.clear();
|
||||
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
withTxSession(s -> {
|
||||
Person p1 = s.get(Person.class, john.getName());
|
||||
assertPersonEquals(john, p1);
|
||||
Person p2 = s.get(Person.class, peter.getName());
|
||||
assertPersonEquals(peter, p2);
|
||||
Person p3 = s.get(Person.class, new Name("Foo", "Bar"));
|
||||
assertNull(p3);
|
||||
});
|
||||
}
|
||||
|
||||
assertTrue(statistics.getSecondLevelCacheHitCount() > 0);
|
||||
assertTrue(statistics.getSecondLevelCacheMissCount() > 0);
|
||||
}
|
||||
|
||||
private static void assertPersonEquals(Person expected, Person person) {
|
||||
assertNotNull(person);
|
||||
assertNotNull(person.getName());
|
||||
assertEquals(expected.getName().getFirstName(), person.getName().getFirstName());
|
||||
assertEquals(expected.getName().getLastName(), person.getName().getLastName());
|
||||
assertEquals(expected.getAge(), person.getAge());
|
||||
}
|
||||
}
|
|
@ -1,275 +0,0 @@
|
|||
package org.hibernate.test.cache.infinispan.functional;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Phaser;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.hibernate.PessimisticLockException;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.entity.EntityRegionImpl;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
|
||||
import org.hibernate.testing.TestForIssue;
|
||||
import org.hibernate.test.cache.infinispan.functional.entities.Item;
|
||||
import org.hibernate.test.cache.infinispan.util.TestInfinispanRegionFactory;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.infinispan.AdvancedCache;
|
||||
import org.infinispan.commands.read.GetKeyValueCommand;
|
||||
import org.infinispan.context.InvocationContext;
|
||||
import org.infinispan.interceptors.base.BaseCustomInterceptor;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Tests specific to invalidation mode caches
|
||||
*
|
||||
* @author Radim Vansa <rvansa@redhat.com>
|
||||
*/
|
||||
public class InvalidationTest extends SingleNodeTest {
|
||||
static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog(ReadOnlyTest.class);
|
||||
|
||||
@Override
|
||||
public List<Object[]> getParameters() {
|
||||
return Arrays.asList(TRANSACTIONAL, READ_WRITE_INVALIDATION);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void addSettings(Map settings) {
|
||||
super.addSettings(settings);
|
||||
settings.put(TestInfinispanRegionFactory.PENDING_PUTS_SIMPLE, false);
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestForIssue(jiraKey = "HHH-9868")
|
||||
public void testConcurrentRemoveAndPutFromLoad() throws Exception {
|
||||
|
||||
final Item item = new Item( "chris", "Chris's Item" );
|
||||
withTxSession(s -> {
|
||||
s.persist(item);
|
||||
});
|
||||
|
||||
Phaser deletePhaser = new Phaser(2);
|
||||
Phaser getPhaser = new Phaser(2);
|
||||
HookInterceptor hook = new HookInterceptor();
|
||||
|
||||
AdvancedCache pendingPutsCache = getPendingPutsCache(Item.class);
|
||||
pendingPutsCache.addInterceptor(hook, 0);
|
||||
AtomicBoolean getThreadBlockedInDB = new AtomicBoolean(false);
|
||||
|
||||
Thread deleteThread = new Thread(() -> {
|
||||
try {
|
||||
withTxSession(s -> {
|
||||
Item loadedItem = s.get(Item.class, item.getId());
|
||||
assertNotNull(loadedItem);
|
||||
arriveAndAwait(deletePhaser, 2000);
|
||||
arriveAndAwait(deletePhaser, 2000);
|
||||
log.trace("Item loaded");
|
||||
s.delete(loadedItem);
|
||||
s.flush();
|
||||
log.trace("Item deleted");
|
||||
// start get-thread here
|
||||
arriveAndAwait(deletePhaser, 2000);
|
||||
// we need longer timeout since in non-MVCC DBs the get thread
|
||||
// can be blocked
|
||||
arriveAndAwait(deletePhaser, 4000);
|
||||
});
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}, "delete-thread");
|
||||
Thread getThread = new Thread(() -> {
|
||||
try {
|
||||
withTxSession(s -> {
|
||||
// DB load should happen before the record is deleted,
|
||||
// putFromLoad should happen after deleteThread ends
|
||||
Item loadedItem = s.get(Item.class, item.getId());
|
||||
if (getThreadBlockedInDB.get()) {
|
||||
assertNull(loadedItem);
|
||||
} else {
|
||||
assertNotNull(loadedItem);
|
||||
}
|
||||
});
|
||||
} catch (PessimisticLockException e) {
|
||||
// If we end up here, database locks guard us against situation tested
|
||||
// in this case and HHH-9868 cannot happen.
|
||||
// (delete-thread has ITEMS table write-locked and we try to acquire read-lock)
|
||||
try {
|
||||
arriveAndAwait(getPhaser, 2000);
|
||||
arriveAndAwait(getPhaser, 2000);
|
||||
} catch (Exception e1) {
|
||||
throw new RuntimeException(e1);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}, "get-thread");
|
||||
|
||||
deleteThread.start();
|
||||
// deleteThread loads the entity
|
||||
arriveAndAwait(deletePhaser, 2000);
|
||||
withTx(() -> {
|
||||
sessionFactory().getCache().evictEntity(Item.class, item.getId());
|
||||
assertFalse(sessionFactory().getCache().containsEntity(Item.class, item.getId()));
|
||||
return null;
|
||||
});
|
||||
arriveAndAwait(deletePhaser, 2000);
|
||||
// delete thread invalidates PFER
|
||||
arriveAndAwait(deletePhaser, 2000);
|
||||
// get thread gets the entity from DB
|
||||
hook.block(getPhaser, getThread);
|
||||
getThread.start();
|
||||
try {
|
||||
arriveAndAwait(getPhaser, 2000);
|
||||
} catch (TimeoutException e) {
|
||||
getThreadBlockedInDB.set(true);
|
||||
}
|
||||
arriveAndAwait(deletePhaser, 2000);
|
||||
// delete thread finishes the remove from DB and cache
|
||||
deleteThread.join();
|
||||
hook.unblock();
|
||||
arriveAndAwait(getPhaser, 2000);
|
||||
// get thread puts the entry into cache
|
||||
getThread.join();
|
||||
|
||||
assertNoInvalidators(pendingPutsCache);
|
||||
|
||||
withTxSession(s -> {
|
||||
Item loadedItem = s.get(Item.class, item.getId());
|
||||
assertNull(loadedItem);
|
||||
});
|
||||
}
|
||||
|
||||
protected AdvancedCache getPendingPutsCache(Class<Item> entityClazz) {
|
||||
EntityRegionImpl region = (EntityRegionImpl) sessionFactory().getCache()
|
||||
.getEntityRegionAccess(entityClazz.getName()).getRegion();
|
||||
AdvancedCache entityCache = region.getCache();
|
||||
return (AdvancedCache) entityCache.getCacheManager().getCache(
|
||||
entityCache.getName() + "-" + InfinispanRegionFactory.DEF_PENDING_PUTS_RESOURCE).getAdvancedCache();
|
||||
}
|
||||
|
||||
protected static void arriveAndAwait(Phaser phaser, int timeout) throws TimeoutException, InterruptedException {
|
||||
phaser.awaitAdvanceInterruptibly(phaser.arrive(), timeout, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
@TestForIssue(jiraKey = "HHH-11304")
|
||||
@Test
|
||||
public void testFailedInsert() throws Exception {
|
||||
AdvancedCache pendingPutsCache = getPendingPutsCache(Item.class);
|
||||
assertNoInvalidators(pendingPutsCache);
|
||||
withTxSession(s -> {
|
||||
Item i = new Item("inserted", "bar");
|
||||
s.persist(i);
|
||||
s.flush();
|
||||
s.getTransaction().setRollbackOnly();
|
||||
});
|
||||
assertNoInvalidators(pendingPutsCache);
|
||||
}
|
||||
|
||||
@TestForIssue(jiraKey = "HHH-11304")
|
||||
@Test
|
||||
public void testFailedUpdate() throws Exception {
|
||||
AdvancedCache pendingPutsCache = getPendingPutsCache(Item.class);
|
||||
assertNoInvalidators(pendingPutsCache);
|
||||
final Item item = new Item("before-update", "bar");
|
||||
withTxSession(s -> s.persist(item));
|
||||
|
||||
withTxSession(s -> {
|
||||
Item item2 = s.load(Item.class, item.getId());
|
||||
assertEquals("before-update", item2.getName());
|
||||
item2.setName("after-update");
|
||||
s.persist(item2);
|
||||
s.flush();
|
||||
s.flush(); // workaround for HHH-11312
|
||||
s.getTransaction().setRollbackOnly();
|
||||
});
|
||||
assertNoInvalidators(pendingPutsCache);
|
||||
|
||||
withTxSession(s -> {
|
||||
Item item3 = s.load(Item.class, item.getId());
|
||||
assertEquals("before-update", item3.getName());
|
||||
s.remove(item3);
|
||||
});
|
||||
assertNoInvalidators(pendingPutsCache);
|
||||
}
|
||||
|
||||
@TestForIssue(jiraKey = "HHH-11304")
|
||||
@Test
|
||||
public void testFailedRemove() throws Exception {
|
||||
AdvancedCache pendingPutsCache = getPendingPutsCache(Item.class);
|
||||
assertNoInvalidators(pendingPutsCache);
|
||||
final Item item = new Item("before-remove", "bar");
|
||||
withTxSession(s -> s.persist(item));
|
||||
|
||||
withTxSession(s -> {
|
||||
Item item2 = s.load(Item.class, item.getId());
|
||||
assertEquals("before-remove", item2.getName());
|
||||
s.remove(item2);
|
||||
s.flush();
|
||||
s.getTransaction().setRollbackOnly();
|
||||
});
|
||||
assertNoInvalidators(pendingPutsCache);
|
||||
|
||||
withTxSession(s -> {
|
||||
Item item3 = s.load(Item.class, item.getId());
|
||||
assertEquals("before-remove", item3.getName());
|
||||
s.remove(item3);
|
||||
});
|
||||
assertNoInvalidators(pendingPutsCache);
|
||||
}
|
||||
|
||||
protected void assertNoInvalidators(AdvancedCache<Object, Object> pendingPutsCache) throws Exception {
|
||||
Method getInvalidators = null;
|
||||
for (Map.Entry<Object, Object> entry : pendingPutsCache.entrySet()) {
|
||||
if (getInvalidators == null) {
|
||||
getInvalidators = entry.getValue().getClass().getMethod("getInvalidators");
|
||||
getInvalidators.setAccessible(true);
|
||||
}
|
||||
Collection invalidators = (Collection) getInvalidators.invoke(entry.getValue());
|
||||
if (invalidators != null) {
|
||||
assertTrue("Invalidators on key " + entry.getKey() + ": " + invalidators, invalidators.isEmpty());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class HookInterceptor extends BaseCustomInterceptor {
|
||||
Phaser phaser;
|
||||
Thread thread;
|
||||
|
||||
public synchronized void block(Phaser phaser, Thread thread) {
|
||||
this.phaser = phaser;
|
||||
this.thread = thread;
|
||||
}
|
||||
|
||||
public synchronized void unblock() {
|
||||
phaser = null;
|
||||
thread = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object visitGetKeyValueCommand(InvocationContext ctx, GetKeyValueCommand command) throws Throwable {
|
||||
Phaser phaser;
|
||||
Thread thread;
|
||||
synchronized (this) {
|
||||
phaser = this.phaser;
|
||||
thread = this.thread;
|
||||
}
|
||||
if (phaser != null && Thread.currentThread() == thread) {
|
||||
arriveAndAwait(phaser, 2000);
|
||||
arriveAndAwait(phaser, 2000);
|
||||
}
|
||||
return super.visitGetKeyValueCommand(ctx, command);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,180 +0,0 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.cache.infinispan.functional;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import javax.naming.Context;
|
||||
import javax.naming.InitialContext;
|
||||
import javax.naming.Name;
|
||||
import javax.naming.NameNotFoundException;
|
||||
import javax.naming.Reference;
|
||||
import javax.naming.StringRefAddr;
|
||||
|
||||
import org.hibernate.boot.registry.StandardServiceRegistry;
|
||||
import org.hibernate.cache.infinispan.InfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.JndiInfinispanRegionFactory;
|
||||
import org.hibernate.cache.infinispan.util.InfinispanMessageLogger;
|
||||
import org.hibernate.cache.spi.RegionFactory;
|
||||
import org.hibernate.cfg.Environment;
|
||||
import org.hibernate.engine.config.spi.ConfigurationService;
|
||||
import org.hibernate.engine.spi.SessionFactoryImplementor;
|
||||
import org.hibernate.stat.Statistics;
|
||||
|
||||
import org.hibernate.test.cache.infinispan.functional.entities.Item;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.infinispan.Cache;
|
||||
import org.infinispan.lifecycle.ComponentStatus;
|
||||
import org.infinispan.manager.DefaultCacheManager;
|
||||
import org.infinispan.manager.EmbeddedCacheManager;
|
||||
|
||||
import org.jboss.util.naming.NonSerializableFactory;
|
||||
|
||||
import org.jnp.server.Main;
|
||||
import org.jnp.server.SingletonNamingServer;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
/**
|
||||
* @author Galder Zamarreño
|
||||
*/
|
||||
public class JndiRegionFactoryTest extends SingleNodeTest {
|
||||
private static final InfinispanMessageLogger log = InfinispanMessageLogger.Provider.getLog( JndiRegionFactoryTest.class );
|
||||
private static final String JNDI_NAME = "java:CacheManager";
|
||||
private Main namingMain;
|
||||
private SingletonNamingServer namingServer;
|
||||
private Properties props;
|
||||
private boolean bindToJndi = true;
|
||||
private EmbeddedCacheManager manager;
|
||||
|
||||
@Override
|
||||
public List<Object[]> getParameters() {
|
||||
return Collections.singletonList(READ_WRITE_INVALIDATION);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Class<? extends RegionFactory> getRegionFactoryClass() {
|
||||
return JndiInfinispanRegionFactory.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void cleanupTest() throws Exception {
|
||||
Context ctx = new InitialContext( props );
|
||||
unbind( JNDI_NAME, ctx );
|
||||
namingServer.destroy();
|
||||
namingMain.stop();
|
||||
manager.stop(); // Need to stop cos JNDI region factory does not stop it.
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void afterStandardServiceRegistryBuilt(StandardServiceRegistry ssr) {
|
||||
if ( bindToJndi ) {
|
||||
try {
|
||||
// Create an in-memory jndi
|
||||
namingServer = new SingletonNamingServer();
|
||||
namingMain = new Main();
|
||||
namingMain.setInstallGlobalService( true );
|
||||
namingMain.setPort( -1 );
|
||||
namingMain.start();
|
||||
props = new Properties();
|
||||
props.put( "java.naming.factory.initial", "org.jnp.interfaces.NamingContextFactory" );
|
||||
props.put( "java.naming.factory.url.pkgs", "org.jboss.naming:org.jnp.interfaces" );
|
||||
|
||||
final String cfgFileName = (String) ssr.getService( ConfigurationService.class ).getSettings().get(
|
||||
InfinispanRegionFactory.INFINISPAN_CONFIG_RESOURCE_PROP
|
||||
);
|
||||
manager = new DefaultCacheManager(
|
||||
cfgFileName == null ? InfinispanRegionFactory.DEF_INFINISPAN_CONFIG_RESOURCE : cfgFileName,
|
||||
false
|
||||
);
|
||||
Context ctx = new InitialContext( props );
|
||||
bind( JNDI_NAME, manager, EmbeddedCacheManager.class, ctx );
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new RuntimeException( "Failure to set up JNDI", e );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected void addSettings(Map settings) {
|
||||
super.addSettings( settings );
|
||||
|
||||
settings.put( JndiInfinispanRegionFactory.CACHE_MANAGER_RESOURCE_PROP, JNDI_NAME );
|
||||
settings.put( Environment.JNDI_CLASS, "org.jnp.interfaces.NamingContextFactory" );
|
||||
settings.put( "java.naming.factory.url.pkgs", "org.jboss.naming:org.jnp.interfaces" );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRedeployment() throws Exception {
|
||||
addEntityCheckCache( sessionFactory() );
|
||||
bindToJndi = false;
|
||||
rebuildSessionFactory();
|
||||
|
||||
addEntityCheckCache( sessionFactory() );
|
||||
JndiInfinispanRegionFactory regionFactory = (JndiInfinispanRegionFactory) sessionFactory().getSettings().getRegionFactory();
|
||||
Cache cache = regionFactory.getCacheManager().getCache( Item.class.getName() );
|
||||
assertEquals( ComponentStatus.RUNNING, cache.getStatus() );
|
||||
}
|
||||
|
||||
private void addEntityCheckCache(SessionFactoryImplementor sessionFactory) throws Exception {
|
||||
Item item = new Item( "chris", "Chris's Item" );
|
||||
withTxSession(s -> s.persist( item ));
|
||||
|
||||
withTxSession(s -> {
|
||||
Item found = s.load(Item.class, item.getId());
|
||||
Statistics stats = sessionFactory.getStatistics();
|
||||
log.info(stats.toString());
|
||||
assertEquals(item.getDescription(), found.getDescription());
|
||||
assertEquals(0, stats.getSecondLevelCacheMissCount());
|
||||
assertEquals(1, stats.getSecondLevelCacheHitCount());
|
||||
s.delete(found);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method that binds the a non serializable object to the JNDI tree.
|
||||
*
|
||||
* @param jndiName Name under which the object must be bound
|
||||
* @param who Object to bind in JNDI
|
||||
* @param classType Class type under which should appear the bound object
|
||||
* @param ctx Naming context under which we bind the object
|
||||
* @throws Exception Thrown if a naming exception occurs during binding
|
||||
*/
|
||||
private void bind(String jndiName, Object who, Class<?> classType, Context ctx) throws Exception {
|
||||
// Ah ! This service isn't serializable, so we use a helper class
|
||||
NonSerializableFactory.bind( jndiName, who );
|
||||
Name n = ctx.getNameParser( "" ).parse( jndiName );
|
||||
while ( n.size() > 1 ) {
|
||||
String ctxName = n.get( 0 );
|
||||
try {
|
||||
ctx = (Context) ctx.lookup( ctxName );
|
||||
}
|
||||
catch (NameNotFoundException e) {
|
||||
log.debug( "creating Subcontext " + ctxName );
|
||||
ctx = ctx.createSubcontext( ctxName );
|
||||
}
|
||||
n = n.getSuffix( 1 );
|
||||
}
|
||||
|
||||
// The helper class NonSerializableFactory uses address type nns, we go on to
|
||||
// use the helper class to bind the service object in JNDI
|
||||
StringRefAddr addr = new StringRefAddr( "nns", jndiName );
|
||||
Reference ref = new Reference( classType.getName(), addr, NonSerializableFactory.class.getName(), null );
|
||||
ctx.rebind( n.get( 0 ), ref );
|
||||
}
|
||||
|
||||
private void unbind(String jndiName, Context ctx) throws Exception {
|
||||
NonSerializableFactory.unbind( jndiName );
|
||||
// ctx.unbind(jndiName);
|
||||
}
|
||||
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue