diff --git a/build.gradle b/build.gradle index 1bac304376..e99ad3b26c 100644 --- a/build.gradle +++ b/build.gradle @@ -63,9 +63,6 @@ libraries = [ jandex: 'org.jboss:jandex:1.0.3.Final', classmate: 'com.fasterxml:classmate:0.5.4', - // Jakarta commons-collections todo : get rid of commons-collections dependency - commons_collections: - 'commons-collections:commons-collections:3.2.1', // Dom4J dom4j: 'dom4j:dom4j:1.6.1@jar', diff --git a/hibernate-core/hibernate-core.gradle b/hibernate-core/hibernate-core.gradle index 99a98e867f..6c27302214 100644 --- a/hibernate-core/hibernate-core.gradle +++ b/hibernate-core/hibernate-core.gradle @@ -4,7 +4,6 @@ apply plugin: org.hibernate.build.gradle.inject.InjectionPlugin apply plugin: org.hibernate.build.gradle.testing.matrix.MatrixTestingPlugin dependencies { - compile( libraries.commons_collections ) compile( libraries.jta ) compile( libraries.dom4j ) { transitive = false diff --git a/hibernate-core/src/main/java/org/hibernate/cfg/AvailableSettings.java b/hibernate-core/src/main/java/org/hibernate/cfg/AvailableSettings.java index ad2ec11afa..6d70bc4949 100644 --- a/hibernate-core/src/main/java/org/hibernate/cfg/AvailableSettings.java +++ b/hibernate-core/src/main/java/org/hibernate/cfg/AvailableSettings.java @@ -452,15 +452,37 @@ public interface AvailableSettings { public static final String PREFER_POOLED_VALUES_LO = "hibernate.id.optimizer.pooled.prefer_lo"; /** - * The maximum number of strong references maintained by {@link org.hibernate.internal.util.collections.SoftLimitMRUCache}. Default is 128. + * The maximum number of strong references maintained by {@link org.hibernate.engine.query.spi.QueryPlanCache}. Default is 128. + * @deprecated in favor of {@link #QUERY_PLAN_CACHE_PARAMETER_METADATA_MAX_SIZE} */ + @Deprecated public static final String QUERY_PLAN_CACHE_MAX_STRONG_REFERENCES = "hibernate.query.plan_cache_max_strong_references"; /** - * The maximum number of soft references maintained by {@link org.hibernate.internal.util.collections.SoftLimitMRUCache}. Default is 2048. + * The maximum number of soft references maintained by {@link org.hibernate.engine.query.spi.QueryPlanCache}. Default is 2048. + * @deprecated in favor of {@link #QUERY_PLAN_CACHE_MAX_SIZE} */ + @Deprecated public static final String QUERY_PLAN_CACHE_MAX_SOFT_REFERENCES = "hibernate.query.plan_cache_max_soft_references"; + /** + * The maximum number of entries including: + * + * + * maintained by {@link org.hibernate.engine.query.spi.QueryPlanCache}. Default is 2048. + */ + public static final String QUERY_PLAN_CACHE_MAX_SIZE = "hibernate.query.plan_cache_max_size"; + + /** + * The maximum number of {@link org.hibernate.engine.query.spi.ParameterMetadata} maintained + * by {@link org.hibernate.engine.query.spi.QueryPlanCache}. Default is 128. + */ + public static final String QUERY_PLAN_CACHE_PARAMETER_METADATA_MAX_SIZE = "hibernate.query.plan_parameter_metadata_max_size"; + /** * Should we not use contextual LOB creation (aka based on {@link java.sql.Connection#createBlob()} et al). */ diff --git a/hibernate-core/src/main/java/org/hibernate/engine/internal/StatefulPersistenceContext.java b/hibernate-core/src/main/java/org/hibernate/engine/internal/StatefulPersistenceContext.java index eb7254de9e..2ba0961651 100644 --- a/hibernate-core/src/main/java/org/hibernate/engine/internal/StatefulPersistenceContext.java +++ b/hibernate-core/src/main/java/org/hibernate/engine/internal/StatefulPersistenceContext.java @@ -36,11 +36,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; - -import org.apache.commons.collections.map.AbstractReferenceMap; -import org.apache.commons.collections.map.ReferenceMap; import org.jboss.logging.Logger; - import org.hibernate.AssertionFailure; import org.hibernate.Hibernate; import org.hibernate.HibernateException; @@ -64,6 +60,7 @@ import org.hibernate.engine.spi.SessionImplementor; import org.hibernate.engine.spi.Status; import org.hibernate.internal.CoreMessageLogger; import org.hibernate.internal.util.MarkerObject; +import org.hibernate.internal.util.collections.ConcurrentReferenceHashMap; import org.hibernate.internal.util.collections.IdentityMap; import org.hibernate.persister.collection.CollectionPersister; import org.hibernate.persister.entity.EntityPersister; @@ -158,7 +155,7 @@ public class StatefulPersistenceContext implements PersistenceContext { entitiesByKey = new HashMap( INIT_COLL_SIZE ); entitiesByUniqueKey = new HashMap( INIT_COLL_SIZE ); //noinspection unchecked - proxiesByKey = (Map) new ReferenceMap( AbstractReferenceMap.HARD, AbstractReferenceMap.WEAK ); + proxiesByKey = new ConcurrentReferenceHashMap( INIT_COLL_SIZE, .75f, 1, ConcurrentReferenceHashMap.ReferenceType.STRONG, ConcurrentReferenceHashMap.ReferenceType.WEAK, null ); entitySnapshotsByKey = new HashMap( INIT_COLL_SIZE ); entityEntries = IdentityMap.instantiateSequenced( INIT_COLL_SIZE ); @@ -199,19 +196,14 @@ public class StatefulPersistenceContext implements PersistenceContext { @Override public void addUnownedCollection(CollectionKey key, PersistentCollection collection) { if (unownedCollections==null) { - unownedCollections = new HashMap(8); + unownedCollections = new HashMap(INIT_COLL_SIZE); } unownedCollections.put( key, collection ); } @Override public PersistentCollection useUnownedCollection(CollectionKey key) { - if ( unownedCollections == null ) { - return null; - } - else { - return unownedCollections.remove(key); - } + return ( unownedCollections == null ) ? null : unownedCollections.remove( key ); } @Override @@ -225,8 +217,11 @@ public class StatefulPersistenceContext implements PersistenceContext { @Override public void clear() { for ( Object o : proxiesByKey.values() ) { - final LazyInitializer li = ((HibernateProxy) o).getHibernateLazyInitializer(); - li.unsetSession(); + if ( o == null ) { + //entry may be GCd + continue; + } + ((HibernateProxy) o).getHibernateLazyInitializer().unsetSession(); } for ( Map.Entry aCollectionEntryArray : IdentityMap.concurrentEntries( collectionEntries ) ) { aCollectionEntryArray.getKey().unsetSession( getSession() ); @@ -733,14 +728,11 @@ public class StatefulPersistenceContext implements PersistenceContext { @Override public Object proxyFor(EntityPersister persister, EntityKey key, Object impl) throws HibernateException { - if ( !persister.hasProxy() ) return impl; - Object proxy = proxiesByKey.get(key); - if ( proxy != null ) { - return narrowProxy(proxy, persister, key, impl); - } - else { + if ( !persister.hasProxy() ) { return impl; } + Object proxy = proxiesByKey.get( key ); + return ( proxy != null ) ? narrowProxy( proxy, persister, key, impl ) : impl; } /** @@ -1574,7 +1566,14 @@ public class StatefulPersistenceContext implements PersistenceContext { count = ois.readInt(); if ( tracing ) LOG.trace("Starting deserialization of [" + count + "] proxiesByKey entries"); //noinspection unchecked - rtn.proxiesByKey = new ReferenceMap( AbstractReferenceMap.HARD, AbstractReferenceMap.WEAK, count < INIT_COLL_SIZE ? INIT_COLL_SIZE : count, .75f ); + rtn.proxiesByKey = new ConcurrentReferenceHashMap( + count < INIT_COLL_SIZE ? INIT_COLL_SIZE : count, + .75f, + 1, + ConcurrentReferenceHashMap.ReferenceType.STRONG, + ConcurrentReferenceHashMap.ReferenceType.WEAK, + null + ); for ( int i = 0; i < count; i++ ) { EntityKey ek = EntityKey.deserialize( ois, session ); Object proxy = ois.readObject(); diff --git a/hibernate-core/src/main/java/org/hibernate/engine/query/spi/HQLQueryPlan.java b/hibernate-core/src/main/java/org/hibernate/engine/query/spi/HQLQueryPlan.java index 805c8abaa5..b08406f972 100644 --- a/hibernate-core/src/main/java/org/hibernate/engine/query/spi/HQLQueryPlan.java +++ b/hibernate-core/src/main/java/org/hibernate/engine/query/spi/HQLQueryPlan.java @@ -25,6 +25,7 @@ package org.hibernate.engine.query.spi; import java.io.Serializable; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -46,6 +47,7 @@ import org.hibernate.hql.internal.QuerySplitter; import org.hibernate.hql.spi.FilterTranslator; import org.hibernate.hql.spi.ParameterTranslations; import org.hibernate.hql.spi.QueryTranslator; +import org.hibernate.hql.spi.QueryTranslatorFactory; import org.hibernate.internal.CoreMessageLogger; import org.hibernate.internal.util.collections.ArrayHelper; import org.hibernate.internal.util.collections.EmptyIterator; @@ -75,36 +77,38 @@ public class HQLQueryPlan implements Serializable { private final Set enabledFilterNames; private final boolean shallow; - public HQLQueryPlan(String hql, boolean shallow, Map enabledFilters, SessionFactoryImplementor factory) { this( hql, null, shallow, enabledFilters, factory ); } - protected HQLQueryPlan(String hql, String collectionRole, boolean shallow, Map enabledFilters, SessionFactoryImplementor factory) { + protected HQLQueryPlan(String hql, String collectionRole, boolean shallow, Map enabledFilters, SessionFactoryImplementor factory){ this.sourceQuery = hql; this.shallow = shallow; - Set copy = new HashSet(); copy.addAll( enabledFilters.keySet() ); this.enabledFilterNames = java.util.Collections.unmodifiableSet( copy ); - Set combinedQuerySpaces = new HashSet(); - String[] concreteQueryStrings = QuerySplitter.concreteQueries( hql, factory ); + final String[] concreteQueryStrings = QuerySplitter.concreteQueries( hql, factory ); final int length = concreteQueryStrings.length; - translators = new QueryTranslator[length]; - List sqlStringList = new ArrayList(); + this.translators = new QueryTranslator[length]; + + List sqlStringList = new ArrayList(); + Set combinedQuerySpaces = new HashSet(); + + final boolean hasCollectionRole = (collectionRole == null); + final Map querySubstitutions = factory.getSettings().getQuerySubstitutions(); + final QueryTranslatorFactory queryTranslatorFactory = factory.getSettings().getQueryTranslatorFactory(); + for ( int i=0; i 1 ) { - final int returns = translators[0].getReturnTypes().length; - returnMetadata = new ReturnMetadata( translators[0].getReturnAliases(), new Type[returns] ); - } - else { - returnMetadata = new ReturnMetadata( translators[0].getReturnAliases(), translators[0].getReturnTypes() ); - } + final Type[] types = ( length > 1 ) ? new Type[translators[0].getReturnTypes().length] : translators[0].getReturnTypes(); + returnMetadata = new ReturnMetadata( translators[0].getReturnAliases(), types ); } } } @@ -192,20 +191,19 @@ public class HQLQueryPlan implements Serializable { List combinedResults = new ArrayList(); IdentitySet distinction = new IdentitySet(); int includedCount = -1; - translator_loop: for ( int i = 0; i < translators.length; i++ ) { - List tmp = translators[i].list( session, queryParametersToUse ); + translator_loop: + for ( QueryTranslator translator : translators ) { + List tmp = translator.list( session, queryParametersToUse ); if ( needsLimit ) { // NOTE : firstRow is zero-based - int first = queryParameters.getRowSelection().getFirstRow() == null - ? 0 - : queryParameters.getRowSelection().getFirstRow().intValue(); - int max = queryParameters.getRowSelection().getMaxRows() == null - ? -1 - : queryParameters.getRowSelection().getMaxRows().intValue(); - final int size = tmp.size(); - for ( int x = 0; x < size; x++ ) { - final Object result = tmp.get( x ); - if ( ! distinction.add( result ) ) { + final int first = queryParameters.getRowSelection().getFirstRow() == null + ? 0 + : queryParameters.getRowSelection().getFirstRow(); + final int max = queryParameters.getRowSelection().getMaxRows() == null + ? -1 + : queryParameters.getRowSelection().getMaxRows(); + for ( final Object result : tmp ) { + if ( !distinction.add( result ) ) { continue; } includedCount++; @@ -239,14 +237,16 @@ public class HQLQueryPlan implements Serializable { Iterator[] results = null; boolean many = translators.length > 1; - if (many) { + if ( many ) { results = new Iterator[translators.length]; } Iterator result = null; for ( int i = 0; i < translators.length; i++ ) { result = translators[i].iterate( queryParameters, session ); - if (many) results[i] = result; + if ( many ) { + results[i] = result; + } } return many ? new JoinedIterator(results) : result; @@ -279,8 +279,8 @@ public class HQLQueryPlan implements Serializable { LOG.splitQueries( getSourceQuery(), translators.length ); } int result = 0; - for ( int i = 0; i < translators.length; i++ ) { - result += translators[i].executeUpdate( queryParameters, session ); + for ( QueryTranslator translator : translators ) { + result += translator.executeUpdate( queryParameters, session ); } return result; } @@ -289,7 +289,9 @@ public class HQLQueryPlan implements Serializable { long start = System.currentTimeMillis(); ParamLocationRecognizer recognizer = ParamLocationRecognizer.parseLocations( hql ); long end = System.currentTimeMillis(); - LOG.tracev( "HQL param location recognition took {0} mills ({1})", ( end - start ), hql ); + if ( LOG.isTraceEnabled() ) { + LOG.tracev( "HQL param location recognition took {0} mills ({1})", ( end - start ), hql ); + } int ordinalParamCount = parameterTranslations.getOrdinalParameterCount(); int[] locations = ArrayHelper.toIntArray( recognizer.getOrdinalParameterLocationList() ); @@ -309,7 +311,7 @@ public class HQLQueryPlan implements Serializable { } Iterator itr = recognizer.getNamedParameterDescriptionMap().entrySet().iterator(); - Map namedParamDescriptorMap = new HashMap(); + Map namedParamDescriptorMap = new HashMap(); while( itr.hasNext() ) { final Map.Entry entry = ( Map.Entry ) itr.next(); final String name = ( String ) entry.getKey(); @@ -328,7 +330,6 @@ public class HQLQueryPlan implements Serializable { return new ParameterMetadata( ordinalParamDescriptors, namedParamDescriptorMap ); } - public QueryTranslator[] getTranslators() { QueryTranslator[] copy = new QueryTranslator[translators.length]; System.arraycopy(translators, 0, copy, 0, copy.length); diff --git a/hibernate-core/src/main/java/org/hibernate/engine/query/spi/QueryPlanCache.java b/hibernate-core/src/main/java/org/hibernate/engine/query/spi/QueryPlanCache.java index 6588970fdb..3a179d6122 100644 --- a/hibernate-core/src/main/java/org/hibernate/engine/query/spi/QueryPlanCache.java +++ b/hibernate-core/src/main/java/org/hibernate/engine/query/spi/QueryPlanCache.java @@ -31,7 +31,6 @@ import java.util.HashSet; import java.util.Iterator; import java.util.Map; import java.util.Set; - import org.jboss.logging.Logger; import org.hibernate.MappingException; @@ -41,9 +40,8 @@ import org.hibernate.engine.query.spi.sql.NativeSQLQuerySpecification; import org.hibernate.engine.spi.SessionFactoryImplementor; import org.hibernate.internal.CoreMessageLogger; import org.hibernate.internal.FilterImpl; +import org.hibernate.internal.util.collections.BoundedConcurrentHashMap; import org.hibernate.internal.util.collections.CollectionHelper; -import org.hibernate.internal.util.collections.SimpleMRUCache; -import org.hibernate.internal.util.collections.SoftLimitMRUCache; import org.hibernate.internal.util.config.ConfigurationHelper; /** @@ -57,37 +55,64 @@ import org.hibernate.internal.util.config.ConfigurationHelper; public class QueryPlanCache implements Serializable { private static final CoreMessageLogger LOG = Logger.getMessageLogger(CoreMessageLogger.class, QueryPlanCache.class.getName()); + + private static final boolean isTraceEnabled = LOG.isTraceEnabled(); + /** + * The default strong reference count. + */ + public static final int DEFAULT_PARAMETER_METADATA_MAX_COUNT = 128; + /** + * The default soft reference count. + */ + public static final int DEFAULT_QUERY_PLAN_MAX_COUNT = 2048; + private final SessionFactoryImplementor factory; + /** + * the cache of the actual plans... + */ + private final BoundedConcurrentHashMap queryPlanCache; /** * simple cache of param metadata based on query string. Ideally, the original "user-supplied query" * string should be used to obtain this metadata (i.e., not the para-list-expanded query string) to avoid * unnecessary cache entries. - *

+ *

* Used solely for caching param metadata for native-sql queries, see {@link #getSQLParameterMetadata} for a * discussion as to why... */ - private final SimpleMRUCache sqlParamMetadataCache; - - /** - * the cache of the actual plans... - */ - private final SoftLimitMRUCache planCache; - private SessionFactoryImplementor factory; - - public QueryPlanCache(SessionFactoryImplementor factory) { - int maxStrongReferenceCount = ConfigurationHelper.getInt( - Environment.QUERY_PLAN_CACHE_MAX_STRONG_REFERENCES, - factory.getProperties(), - SoftLimitMRUCache.DEFAULT_STRONG_REF_COUNT - ); - int maxSoftReferenceCount = ConfigurationHelper.getInt( - Environment.QUERY_PLAN_CACHE_MAX_SOFT_REFERENCES, - factory.getProperties(), - SoftLimitMRUCache.DEFAULT_SOFT_REF_COUNT - ); + private final BoundedConcurrentHashMap parameterMetadataCache; + public QueryPlanCache(final SessionFactoryImplementor factory) { this.factory = factory; - this.sqlParamMetadataCache = new SimpleMRUCache( maxStrongReferenceCount ); - this.planCache = new SoftLimitMRUCache( maxStrongReferenceCount, maxSoftReferenceCount ); + + Integer maxParameterMetadataCount = ConfigurationHelper.getInteger( + Environment.QUERY_PLAN_CACHE_PARAMETER_METADATA_MAX_SIZE, + factory.getProperties() + ); + if ( maxParameterMetadataCount == null ) { + maxParameterMetadataCount = ConfigurationHelper.getInt( + Environment.QUERY_PLAN_CACHE_MAX_STRONG_REFERENCES, + factory.getProperties(), + DEFAULT_PARAMETER_METADATA_MAX_COUNT + ); + } + Integer maxQueryPlanCount = ConfigurationHelper.getInteger( + Environment.QUERY_PLAN_CACHE_MAX_SIZE, + factory.getProperties() + ); + if ( maxQueryPlanCount == null ) { + maxQueryPlanCount = ConfigurationHelper.getInt( + Environment.QUERY_PLAN_CACHE_MAX_SOFT_REFERENCES, + factory.getProperties(), + DEFAULT_QUERY_PLAN_MAX_COUNT + ); + } + + queryPlanCache = new BoundedConcurrentHashMap( maxQueryPlanCount, 20, BoundedConcurrentHashMap.Eviction.LIRS ); + parameterMetadataCache = new BoundedConcurrentHashMap( + maxParameterMetadataCount, + 20, + BoundedConcurrentHashMap.Eviction.LIRS + ); + } /** @@ -100,79 +125,22 @@ public class QueryPlanCache implements Serializable { * @param query The query * @return The parameter metadata */ - public ParameterMetadata getSQLParameterMetadata(String query) { - ParameterMetadata metadata = ( ParameterMetadata ) sqlParamMetadataCache.get( query ); - if ( metadata == null ) { - metadata = buildNativeSQLParameterMetadata( query ); - sqlParamMetadataCache.put( query, metadata ); + public ParameterMetadata getSQLParameterMetadata(final String query) { + ParameterMetadata value = parameterMetadataCache.get( query ); + if ( value == null ) { + value = buildParameterMetadata( query ); + parameterMetadataCache.putIfAbsent( query, value ); } - return metadata; + return value; } - - public HQLQueryPlan getHQLQueryPlan(String queryString, boolean shallow, Map enabledFilters) - throws QueryException, MappingException { - HQLQueryPlanKey key = new HQLQueryPlanKey( queryString, shallow, enabledFilters ); - HQLQueryPlan plan = ( HQLQueryPlan ) planCache.get ( key ); - - if ( plan == null ) { - LOG.tracev( "Unable to locate HQL query plan in cache; generating ({0})", queryString ); - plan = new HQLQueryPlan(queryString, shallow, enabledFilters, factory ); - } - else { - LOG.tracev( "Located HQL query plan in cache ({0})", queryString ); - } - planCache.put( key, plan ); - - return plan; - } - - public FilterQueryPlan getFilterQueryPlan(String filterString, String collectionRole, boolean shallow, Map enabledFilters) - throws QueryException, MappingException { - FilterQueryPlanKey key = new FilterQueryPlanKey( filterString, collectionRole, shallow, enabledFilters ); - FilterQueryPlan plan = ( FilterQueryPlan ) planCache.get ( key ); - - if ( plan == null ) { - LOG.tracev( "Unable to locate collection-filter query plan in cache; generating ({0} : {1} )", - collectionRole, filterString ); - plan = new FilterQueryPlan( filterString, collectionRole, shallow, enabledFilters, factory ); - } - else { - LOG.tracev( "Located collection-filter query plan in cache ({0} : {1})", collectionRole, filterString ); - } - - planCache.put( key, plan ); - - return plan; - } - - public NativeSQLQueryPlan getNativeSQLQueryPlan(NativeSQLQuerySpecification spec) { - NativeSQLQueryPlan plan = ( NativeSQLQueryPlan ) planCache.get( spec ); - - if ( plan == null ) { - if ( LOG.isTraceEnabled() ) { - LOG.tracev( "Unable to locate native-sql query plan in cache; generating ({0})", spec.getQueryString() ); - } - plan = new NativeSQLQueryPlan( spec, factory ); - } - else { - if ( LOG.isTraceEnabled() ) { - LOG.tracev( "Located native-sql query plan in cache ({0})", spec.getQueryString() ); - } - } - - planCache.put( spec, plan ); - return plan; - } - - @SuppressWarnings({ "UnnecessaryUnboxing" }) - private ParameterMetadata buildNativeSQLParameterMetadata(String sqlString) { - ParamLocationRecognizer recognizer = ParamLocationRecognizer.parseLocations( sqlString ); - - OrdinalParameterDescriptor[] ordinalDescriptors = - new OrdinalParameterDescriptor[ recognizer.getOrdinalParameterLocationList().size() ]; - for ( int i = 0; i < recognizer.getOrdinalParameterLocationList().size(); i++ ) { + + private ParameterMetadata buildParameterMetadata(String query){ + ParamLocationRecognizer recognizer = ParamLocationRecognizer.parseLocations( query ); + final int size = recognizer.getOrdinalParameterLocationList().size(); + OrdinalParameterDescriptor[] ordinalDescriptors = new OrdinalParameterDescriptor[ size ]; + for ( int i = 0; i < size; i++ ) { final Integer position = ( Integer ) recognizer.getOrdinalParameterLocationList().get( i ); - ordinalDescriptors[i] = new OrdinalParameterDescriptor( i, null, position.intValue() ); + ordinalDescriptors[i] = new OrdinalParameterDescriptor( i, null, position ); } Iterator itr = recognizer.getNamedParameterDescriptionMap().entrySet().iterator(); @@ -184,13 +152,66 @@ public class QueryPlanCache implements Serializable { ( ParamLocationRecognizer.NamedParameterDescription ) entry.getValue(); namedParamDescriptorMap.put( name , - new NamedParameterDescriptor( name, null, description.buildPositionsArray(), description.isJpaStyle() ) + new NamedParameterDescriptor( name, null, description.buildPositionsArray(), description.isJpaStyle() ) ); } return new ParameterMetadata( ordinalDescriptors, namedParamDescriptorMap ); } + public HQLQueryPlan getHQLQueryPlan( String queryString, boolean shallow, Map enabledFilters) + throws QueryException, MappingException { + HQLQueryPlanKey key = new HQLQueryPlanKey( queryString, shallow, enabledFilters ); + HQLQueryPlan value = (HQLQueryPlan) queryPlanCache.get( key ); + if ( value == null ) { + if( isTraceEnabled ) LOG.tracev( "Unable to locate HQL query plan in cache; generating ({0})", queryString ); + value = new HQLQueryPlan( queryString, shallow, enabledFilters, factory ); + queryPlanCache.putIfAbsent( key, value ); + } else { + if( isTraceEnabled ) LOG.tracev( "Located HQL query plan in cache ({0})", queryString ); + } + return value; + } + + + + public FilterQueryPlan getFilterQueryPlan(String filterString, String collectionRole, boolean shallow, Map enabledFilters) + throws QueryException, MappingException { + FilterQueryPlanKey key = new FilterQueryPlanKey( filterString, collectionRole, shallow, enabledFilters ); + FilterQueryPlan value = (FilterQueryPlan) queryPlanCache.get( key ); + if(value == null){ + if( isTraceEnabled ) LOG.tracev( "Unable to locate collection-filter query plan in cache; generating ({0} : {1} )", + collectionRole, filterString ); + value = new FilterQueryPlan( filterString, collectionRole, shallow, enabledFilters,factory ); + queryPlanCache.putIfAbsent( key, value ); + } else { + if( isTraceEnabled ) LOG.tracev( "Located collection-filter query plan in cache ({0} : {1})", collectionRole, filterString ); + } + return value; + } + + public NativeSQLQueryPlan getNativeSQLQueryPlan(final NativeSQLQuerySpecification spec) { + NativeSQLQueryPlan value = (NativeSQLQueryPlan) queryPlanCache.get( spec ); + if(value == null){ + if( isTraceEnabled ) LOG.tracev( "Unable to locate native-sql query plan in cache; generating ({0})", spec.getQueryString() ); + value = new NativeSQLQueryPlan( spec, factory); + queryPlanCache.putIfAbsent( spec, value ); + } else { + if( isTraceEnabled ) LOG.tracev( "Located native-sql query plan in cache ({0})", spec.getQueryString() ); + } + return value; + } + + + //clean up QueryPlanCache when Sessionfactory is closed + public void cleanup() { + if ( isTraceEnabled ) { + LOG.trace( "Cleaning QueryPlan Cache" ); + } + queryPlanCache.clear(); + parameterMetadataCache.clear(); + } + private static class HQLQueryPlanKey implements Serializable { private final String query; private final boolean shallow; @@ -200,8 +221,7 @@ public class QueryPlanCache implements Serializable { public HQLQueryPlanKey(String query, boolean shallow, Map enabledFilters) { this.query = query; this.shallow = shallow; - - if ( enabledFilters == null || enabledFilters.isEmpty() ) { + if ( CollectionHelper.isEmpty( enabledFilters ) ) { filterKeys = Collections.emptySet(); } else { @@ -314,13 +334,14 @@ public class QueryPlanCache implements Serializable { this.collectionRole = collectionRole; this.shallow = shallow; - if ( enabledFilters == null || enabledFilters.isEmpty() ) { - filterNames = Collections.emptySet(); + if ( CollectionHelper.isEmpty( enabledFilters ) ) { + this.filterNames = Collections.emptySet(); } else { Set tmp = new HashSet(); tmp.addAll( enabledFilters.keySet() ); this.filterNames = Collections.unmodifiableSet( tmp ); + } int hash = query.hashCode(); diff --git a/hibernate-core/src/main/java/org/hibernate/hql/internal/ast/QueryTranslatorImpl.java b/hibernate-core/src/main/java/org/hibernate/hql/internal/ast/QueryTranslatorImpl.java index fbf56021b7..15c6fc29e6 100644 --- a/hibernate-core/src/main/java/org/hibernate/hql/internal/ast/QueryTranslatorImpl.java +++ b/hibernate-core/src/main/java/org/hibernate/hql/internal/ast/QueryTranslatorImpl.java @@ -420,8 +420,8 @@ public class QueryTranslatorImpl implements FilterTranslator { return sql; } - public List collectSqlStrings() { - ArrayList list = new ArrayList(); + public List collectSqlStrings() { + ArrayList list = new ArrayList(); if ( isManipulationStatement() ) { String[] sqlStatements = statementExecutor.getSqlStatements(); for ( int i = 0; i < sqlStatements.length; i++ ) { diff --git a/hibernate-core/src/main/java/org/hibernate/hql/internal/classic/QueryTranslatorImpl.java b/hibernate-core/src/main/java/org/hibernate/hql/internal/classic/QueryTranslatorImpl.java index 9b6f7e27ef..72088efc90 100644 --- a/hibernate-core/src/main/java/org/hibernate/hql/internal/classic/QueryTranslatorImpl.java +++ b/hibernate-core/src/main/java/org/hibernate/hql/internal/classic/QueryTranslatorImpl.java @@ -268,7 +268,7 @@ public class QueryTranslatorImpl extends BasicLoader implements FilterTranslator return sqlString; } - public List collectSqlStrings() { + public List collectSqlStrings() { return ArrayHelper.toList( new String[] { sqlString } ); } diff --git a/hibernate-core/src/main/java/org/hibernate/hql/spi/QueryTranslator.java b/hibernate-core/src/main/java/org/hibernate/hql/spi/QueryTranslator.java index 0c81ccebf2..4caebc6f99 100644 --- a/hibernate-core/src/main/java/org/hibernate/hql/spi/QueryTranslator.java +++ b/hibernate-core/src/main/java/org/hibernate/hql/spi/QueryTranslator.java @@ -127,7 +127,7 @@ public interface QueryTranslator { */ String getSQLString(); - List collectSqlStrings(); + List collectSqlStrings(); /** * Returns the HQL string processed by the translator. diff --git a/hibernate-core/src/main/java/org/hibernate/internal/AbstractSessionImpl.java b/hibernate-core/src/main/java/org/hibernate/internal/AbstractSessionImpl.java index 7d32b782b6..cf2f29d475 100755 --- a/hibernate-core/src/main/java/org/hibernate/internal/AbstractSessionImpl.java +++ b/hibernate-core/src/main/java/org/hibernate/internal/AbstractSessionImpl.java @@ -42,6 +42,7 @@ import org.hibernate.engine.jdbc.LobCreationContext; import org.hibernate.engine.jdbc.spi.JdbcConnectionAccess; import org.hibernate.engine.query.spi.HQLQueryPlan; import org.hibernate.engine.query.spi.NativeSQLQueryPlan; +import org.hibernate.engine.query.spi.ParameterMetadata; import org.hibernate.engine.query.spi.sql.NativeSQLQuerySpecification; import org.hibernate.engine.spi.EntityKey; import org.hibernate.engine.spi.NamedQueryDefinition; @@ -148,10 +149,11 @@ public abstract class AbstractSessionImpl implements Serializable, SharedSession if ( nsqlqd==null ) { throw new MappingException( "Named query not known: " + queryName ); } + ParameterMetadata parameterMetadata = factory.getQueryPlanCache().getSQLParameterMetadata( nsqlqd.getQueryString() ); query = new SQLQueryImpl( nsqlqd, this, - factory.getQueryPlanCache().getSQLParameterMetadata( nsqlqd.getQueryString() ) + parameterMetadata ); query.setComment( "named native SQL query " + queryName ); nqd = nsqlqd; diff --git a/hibernate-core/src/main/java/org/hibernate/internal/SessionFactoryImpl.java b/hibernate-core/src/main/java/org/hibernate/internal/SessionFactoryImpl.java index f255499c5d..03352081e3 100644 --- a/hibernate-core/src/main/java/org/hibernate/internal/SessionFactoryImpl.java +++ b/hibernate-core/src/main/java/org/hibernate/internal/SessionFactoryImpl.java @@ -1135,6 +1135,8 @@ public final class SessionFactoryImpl catch ( MappingException e ) { errors.put( queryName, e ); } + + } if ( LOG.isDebugEnabled() ) { LOG.debugf( "Checking %s named SQL queries", namedSqlQueries.size() ); @@ -1176,6 +1178,7 @@ public final class SessionFactoryImpl catch ( MappingException e ) { errors.put( queryName, e ); } + } return errors; @@ -1265,11 +1268,15 @@ public final class SessionFactoryImpl } public Type[] getReturnTypes(String queryString) throws HibernateException { - return queryPlanCache.getHQLQueryPlan( queryString, false, CollectionHelper.EMPTY_MAP ).getReturnMetadata().getReturnTypes(); + return queryPlanCache.getHQLQueryPlan( queryString, false, CollectionHelper.EMPTY_MAP ) + .getReturnMetadata() + .getReturnTypes(); } public String[] getReturnAliases(String queryString) throws HibernateException { - return queryPlanCache.getHQLQueryPlan( queryString, false, CollectionHelper.EMPTY_MAP ).getReturnMetadata().getReturnAliases(); + return queryPlanCache.getHQLQueryPlan( queryString, false, CollectionHelper.EMPTY_MAP ) + .getReturnMetadata() + .getReturnAliases(); } public ClassMetadata getClassMetadata(Class persistentClass) throws HibernateException { @@ -1431,6 +1438,8 @@ public final class SessionFactoryImpl settings.getRegionFactory().stop(); + queryPlanCache.cleanup(); + if ( settings.isAutoDropSchema() ) { schemaExport.drop( false, true ); } diff --git a/hibernate-core/src/main/java/org/hibernate/internal/util/collections/BoundedConcurrentHashMap.java b/hibernate-core/src/main/java/org/hibernate/internal/util/collections/BoundedConcurrentHashMap.java new file mode 100644 index 0000000000..8c86d4fedc --- /dev/null +++ b/hibernate-core/src/main/java/org/hibernate/internal/util/collections/BoundedConcurrentHashMap.java @@ -0,0 +1,2450 @@ +/* + * JBoss, Home of Professional Open Source + * Copyright 2010 Red Hat Inc. and/or its affiliates and other + * contributors as indicated by the @author tags. All rights reserved. + * See the copyright.txt in the distribution for a full listing of + * individual contributors. + * + * This is free software; you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * This software is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this software; if not, write to the Free + * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA, or see the FSF site: http://www.fsf.org. + */ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/licenses/publicdomain + * + * Modified for https://jira.jboss.org/jira/browse/ISPN-299 + * Includes ideas described in http://portal.acm.org/citation.cfm?id=1547428 + * + */ + +package org.hibernate.internal.util.collections; +import java.io.IOException; +import java.io.Serializable; +import java.util.*; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.locks.ReentrantLock; + + + +import static java.util.Collections.singletonMap; +import static java.util.Collections.unmodifiableMap; + + +/** + * A hash table supporting full concurrency of retrievals and + * adjustable expected concurrency for updates. This class obeys the + * same functional specification as {@link Hashtable}, and + * includes versions of methods corresponding to each method of + * Hashtable. However, even though all operations are + * thread-safe, retrieval operations do not entail locking, + * and there is not any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with Hashtable in programs that rely on its + * thread safety but not on its synchronization details. + * + *

Retrieval operations (including get) generally do not + * block, so may overlap with update operations (including + * put and remove). Retrievals reflect the results + * of the most recently completed update operations holding + * upon their onset. For aggregate operations such as putAll + * and clear, concurrent retrievals may reflect insertion or + * removal of only some entries. Similarly, Iterators and + * Enumerations return elements reflecting the state of the hash table + * at some point at or since the creation of the iterator/enumeration. + * They do not throw {@link ConcurrentModificationException}. + * However, iterators are designed to be used by only one thread at a time. + * + *

The allowed concurrency among update operations is guided by + * the optional concurrencyLevel constructor argument + * (default 16), which is used as a hint for internal sizing. The + * table is internally partitioned to try to permit the indicated + * number of concurrent updates without contention. Because placement + * in hash tables is essentially random, the actual concurrency will + * vary. Ideally, you should choose a value to accommodate as many + * threads as will ever concurrently modify the table. Using a + * significantly higher value than you need can waste space and time, + * and a significantly lower value can lead to thread contention. But + * overestimates and underestimates within an order of magnitude do + * not usually have much noticeable impact. A value of one is + * appropriate when it is known that only one thread will modify and + * all others will only read. Also, resizing this or any other kind of + * hash table is a relatively slow operation, so, when possible, it is + * a good idea to provide estimates of expected table sizes in + * constructors. + * + *

This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + * + *

Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow null to be used as a key or value. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of keys maintained by this map + * @param the type of mapped values + */ +public class BoundedConcurrentHashMap extends AbstractMap + implements ConcurrentMap, Serializable { + private static final long serialVersionUID = 7249069246763182397L; + + /* + * The basic strategy is to subdivide the table among Segments, + * each of which itself is a concurrently readable hash table. + */ + + /* ---------------- Constants -------------- */ + + /** + * The default initial capacity for this table, + * used when not otherwise specified in a constructor. + */ + static final int DEFAULT_MAXIMUM_CAPACITY = 512; + + /** + * The default load factor for this table, used when not + * otherwise specified in a constructor. + */ + static final float DEFAULT_LOAD_FACTOR = 0.75f; + + /** + * The default concurrency level for this table, used when not + * otherwise specified in a constructor. + */ + static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The maximum capacity, used if a higher value is implicitly + * specified by either of the constructors with arguments. MUST + * be a power of two <= 1<<30 to ensure that entries are indexable + * using ints. + */ + static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The maximum number of segments to allow; used to bound + * constructor arguments. + */ + static final int MAX_SEGMENTS = 1 << 16; // slightly conservative + + /** + * Number of unsynchronized retries in size and containsValue + * methods before resorting to locking. This is used to avoid + * unbounded retries if tables undergo continuous modification + * which would make it impossible to obtain an accurate result. + */ + static final int RETRIES_BEFORE_LOCK = 2; + + /* ---------------- Fields -------------- */ + + /** + * Mask value for indexing into segments. The upper bits of a + * key's hash code are used to choose the segment. + */ + final int segmentMask; + + /** + * Shift value for indexing within segments. + */ + final int segmentShift; + + /** + * The segments, each of which is a specialized hash table + */ + final Segment[] segments; + + transient Set keySet; + transient Set> entrySet; + transient Collection values; + + /* ---------------- Small Utilities -------------- */ + + /** + * Applies a supplemental hash function to a given hashCode, which + * defends against poor quality hash functions. This is critical + * because ConcurrentHashMap uses power-of-two length hash tables, + * that otherwise encounter collisions for hashCodes that do not + * differ in lower or upper bits. + */ + private static int hash(int h) { + // Spread bits to regularize both segment and index locations, + // using variant of single-word Wang/Jenkins hash. + h += h << 15 ^ 0xffffcd7d; + h ^= h >>> 10; + h += h << 3; + h ^= h >>> 6; + h += (h << 2) + (h << 14); + return h ^ h >>> 16; + } + + /** + * Returns the segment that should be used for key with given hash + * @param hash the hash code for the key + * @return the segment + */ + final Segment segmentFor(int hash) { + return segments[hash >>> segmentShift & segmentMask]; + } + + /* ---------------- Inner Classes -------------- */ + + /** + * ConcurrentHashMap list entry. Note that this is never exported + * out as a user-visible Map.Entry. + * + * Because the value field is volatile, not final, it is legal wrt + * the Java Memory Model for an unsynchronized reader to see null + * instead of initial value when read via a data race. Although a + * reordering leading to this is not likely to ever actually + * occur, the Segment.readValueUnderLock method is used as a + * backup in case a null (pre-initialized) value is ever seen in + * an unsynchronized access method. + */ + private static class HashEntry { + final K key; + final int hash; + volatile V value; + final HashEntry next; + + HashEntry(K key, int hash, HashEntry next, V value) { + this.key = key; + this.hash = hash; + this.next = next; + this.value = value; + } + + @Override + public int hashCode() { + int result = 17; + result = result * 31 + hash; + result = result * 31 + key.hashCode(); + return result; + } + + @Override + public boolean equals(Object o) { + // HashEntry is internal class, never leaks out of CHM, hence slight optimization + if (this == o) { + return true; + } + if (o == null) { + return false; + } + HashEntry other = (HashEntry) o; + return hash == other.hash && key.equals(other.key); + } + + @SuppressWarnings("unchecked") + static HashEntry[] newArray(int i) { + return new HashEntry[i]; + } + } + + private enum Recency { + HIR_RESIDENT, LIR_RESIDENT, HIR_NONRESIDENT + } + + public enum Eviction { + NONE { + @Override + public EvictionPolicy make(Segment s, int capacity, float lf) { + return new NullEvictionPolicy(); + } + }, + LRU { + @Override + public EvictionPolicy make(Segment s, int capacity, float lf) { + return new LRU(s,capacity,lf,capacity*10,lf); + } + }, + LIRS { + @Override + public EvictionPolicy make(Segment s, int capacity, float lf) { + return new LIRS(s,capacity,capacity*10,lf); + } + }; + + abstract EvictionPolicy make(Segment s, int capacity, float lf); + } + + public interface EvictionListener { + void onEntryEviction(Map evicted); + void onEntryChosenForEviction(V internalCacheEntry); + } + + static final class NullEvictionListener implements EvictionListener { + @Override + public void onEntryEviction(Map evicted) { + // Do nothing. + } + @Override + public void onEntryChosenForEviction(V internalCacheEntry) { + // Do nothing. + } + } + + public interface EvictionPolicy { + + public final static int MAX_BATCH_SIZE = 64; + + HashEntry createNewEntry(K key, int hash, HashEntry next, V value); + + /** + * Invokes eviction policy algorithm and returns set of evicted entries. + * + *

+ * Set cannot be null but could possibly be an empty set. + * + * @return set of evicted entries. + */ + Set> execute(); + + /** + * Invoked to notify EvictionPolicy implementation that there has been an attempt to access + * an entry in Segment, however that entry was not present in Segment. + * + * @param e + * accessed entry in Segment + * + * @return non null set of evicted entries. + */ + Set> onEntryMiss(HashEntry e); + + /** + * Invoked to notify EvictionPolicy implementation that an entry in Segment has been + * accessed. Returns true if batching threshold has been reached, false otherwise. + *

+ * Note that this method is potentially invoked without holding a lock on Segment. + * + * @return true if batching threshold has been reached, false otherwise. + * + * @param e + * accessed entry in Segment + */ + boolean onEntryHit(HashEntry e); + + /** + * Invoked to notify EvictionPolicy implementation that an entry e has been removed from + * Segment. + * + * @param e + * removed entry in Segment + */ + void onEntryRemove(HashEntry e); + + /** + * Invoked to notify EvictionPolicy implementation that all Segment entries have been + * cleared. + * + */ + void clear(); + + /** + * Returns type of eviction algorithm (strategy). + * + * @return type of eviction algorithm + */ + Eviction strategy(); + + /** + * Returns true if batching threshold has expired, false otherwise. + *

+ * Note that this method is potentially invoked without holding a lock on Segment. + * + * @return true if batching threshold has expired, false otherwise. + */ + boolean thresholdExpired(); + } + + static class NullEvictionPolicy implements EvictionPolicy { + + @Override + public void clear() { + // Do nothing. + } + + @Override + public Set> execute() { + return Collections.emptySet(); + } + + @Override + public boolean onEntryHit(HashEntry e) { + return false; + } + + @Override + public Set> onEntryMiss(HashEntry e) { + return Collections.emptySet(); + } + + @Override + public void onEntryRemove(HashEntry e) { + // Do nothing. + } + + @Override + public boolean thresholdExpired() { + return false; + } + + @Override + public Eviction strategy() { + return Eviction.NONE; + } + + @Override + public HashEntry createNewEntry(K key, int hash, HashEntry next, V value) { + return new HashEntry(key, hash, next, value); + } + } + + static final class LRU extends LinkedHashMap, V> implements EvictionPolicy { + + /** The serialVersionUID */ + private static final long serialVersionUID = -7645068174197717838L; + + private final ConcurrentLinkedQueue> accessQueue; + private final Segment segment; + private final int maxBatchQueueSize; + private final int trimDownSize; + private final float batchThresholdFactor; + private final Set> evicted; + + public LRU(Segment s, int capacity, float lf, int maxBatchSize, float batchThresholdFactor) { + super(capacity, lf, true); + this.segment = s; + this.trimDownSize = capacity; + this.maxBatchQueueSize = maxBatchSize > MAX_BATCH_SIZE ? MAX_BATCH_SIZE : maxBatchSize; + this.batchThresholdFactor = batchThresholdFactor; + this.accessQueue = new ConcurrentLinkedQueue>(); + this.evicted = new HashSet>(); + } + + @Override + public Set> execute() { + Set> evictedCopy = new HashSet>(); + for (HashEntry e : accessQueue) { + put(e, e.value); + } + evictedCopy.addAll(evicted); + accessQueue.clear(); + evicted.clear(); + return evictedCopy; + } + + @Override + public Set> onEntryMiss(HashEntry e) { + put(e, e.value); + if (!evicted.isEmpty()) { + Set> evictedCopy = new HashSet>(); + evictedCopy.addAll(evicted); + evicted.clear(); + return evictedCopy; + } else { + return Collections.emptySet(); + } + } + + /* + * Invoked without holding a lock on Segment + */ + @Override + public boolean onEntryHit(HashEntry e) { + accessQueue.add(e); + return accessQueue.size() >= maxBatchQueueSize * batchThresholdFactor; + } + + /* + * Invoked without holding a lock on Segment + */ + @Override + public boolean thresholdExpired() { + return accessQueue.size() >= maxBatchQueueSize; + } + + @Override + public void onEntryRemove(HashEntry e) { + remove(e); + // we could have multiple instances of e in accessQueue; remove them all + while (accessQueue.remove(e)) { + continue; + } + } + + @Override + public void clear() { + super.clear(); + accessQueue.clear(); + } + + @Override + public Eviction strategy() { + return Eviction.LRU; + } + + protected boolean isAboveThreshold(){ + return size() > trimDownSize; + } + + protected boolean removeEldestEntry(Map.Entry,V> eldest){ + boolean aboveThreshold = isAboveThreshold(); + if(aboveThreshold){ + HashEntry evictedEntry = eldest.getKey(); + segment.evictionListener.onEntryChosenForEviction(evictedEntry.value); + segment.remove(evictedEntry.key, evictedEntry.hash, null); + evicted.add(evictedEntry); + } + return aboveThreshold; + } + + @Override + public HashEntry createNewEntry(K key, int hash, HashEntry next, V value) { + return new HashEntry(key, hash, next, value); + } + } + + /** + * Adapted to Infinispan BoundedConcurrentHashMap using LIRS implementation ideas from Charles Fry (fry@google.com) + * See http://code.google.com/p/concurrentlinkedhashmap/source/browse/trunk/src/test/java/com/googlecode/concurrentlinkedhashmap/caches/LirsMap.java + * for original sources + * + */ + private static final class LIRSHashEntry extends HashEntry { + + // LIRS stack S + private LIRSHashEntry previousInStack; + private LIRSHashEntry nextInStack; + + // LIRS queue Q + private LIRSHashEntry previousInQueue; + private LIRSHashEntry nextInQueue; + volatile Recency state; + + LIRS owner; + + + LIRSHashEntry(LIRS owner, K key, int hash, HashEntry next, V value) { + super(key,hash,next,value); + this.owner = owner; + this.state = Recency.HIR_RESIDENT; + + // initially point everything back to self + this.previousInStack = this; + this.nextInStack = this; + this.previousInQueue = this; + this.nextInQueue = this; + } + + @Override + public int hashCode() { + int result = 17; + result = result * 31 + hash; + result = result * 31 + key.hashCode(); + return result; + } + + @Override + public boolean equals(Object o) { + // HashEntry is internal class, never leaks out of CHM, hence slight optimization + if (this == o) { + return true; + } + if (o == null) { + return false; + } + HashEntry other = (HashEntry) o; + return hash == other.hash && key.equals(other.key); + } + + /** + * Returns true if this entry is in the stack, false otherwise. + */ + public boolean inStack() { + return (nextInStack != null); + } + + /** + * Returns true if this entry is in the queue, false otherwise. + */ + public boolean inQueue() { + return (nextInQueue != null); + } + + /** + * Records a cache hit. + */ + public void hit(Set> evicted) { + switch (state) { + case LIR_RESIDENT: + hotHit(evicted); + break; + case HIR_RESIDENT: + coldHit(evicted); + break; + case HIR_NONRESIDENT: + throw new IllegalStateException("Can't hit a non-resident entry!"); + default: + throw new AssertionError("Hit with unknown status: " + state); + } + } + + /** + * Records a cache hit on a hot block. + */ + private void hotHit(Set> evicted) { + // See section 3.3 case 1: + // "Upon accessing an LIR block X: + // This access is guaranteed to be a hit in the cache." + + // "We move it to the top of stack S." + boolean onBottom = (owner.stackBottom() == this); + moveToStackTop(); + + // "If the LIR block is originally located in the bottom of the stack, + // we conduct a stack pruning." + if (onBottom) { + owner.pruneStack(evicted); + } + } + + /** + * Records a cache hit on a cold block. + */ + private void coldHit(Set> evicted) { + // See section 3.3 case 2: + // "Upon accessing an HIR resident block X: + // This is a hit in the cache." + + // "We move it to the top of stack S." + boolean inStack = inStack(); + moveToStackTop(); + + // "There are two cases for block X:" + if (inStack) { + // "(1) If X is in the stack S, we change its status to LIR." + hot(); + + // "This block is also removed from list Q." + removeFromQueue(); + + // "The LIR block in the bottom of S is moved to the end of list Q + // with its status changed to HIR." + owner.stackBottom().migrateToQueue(); + + // "A stack pruning is then conducted." + owner.pruneStack(evicted); + } else { + // "(2) If X is not in stack S, we leave its status in HIR and move + // it to the end of list Q." + moveToQueueEnd(); + } + } + + /** + * Records a cache miss. This is how new entries join the LIRS stack and + * queue. This is called both when a new entry is first created, and when a + * non-resident entry is re-computed. + */ + private Set> miss() { + Set> evicted = Collections.emptySet(); + if (owner.hotSize < owner.maximumHotSize) { + warmupMiss(); + } else { + evicted = new HashSet>(); + fullMiss(evicted); + } + + // now the missed item is in the cache + owner.size++; + return evicted; + } + + /** + * Records a miss when the hot entry set is not full. + */ + private void warmupMiss() { + // See section 3.3: + // "When LIR block set is not full, all the referenced blocks are + // given an LIR status until its size reaches L_lirs." + hot(); + moveToStackTop(); + } + + /** + * Records a miss when the hot entry set is full. + */ + private void fullMiss(Set> evicted) { + // See section 3.3 case 3: + // "Upon accessing an HIR non-resident block X: + // This is a miss." + + // This condition is unspecified in the paper, but appears to be + // necessary. + if (owner.size >= owner.maximumSize) { + // "We remove the HIR resident block at the front of list Q (it then + // becomes a non-resident block), and replace it out of the cache." + LIRSHashEntry evictedNode = owner.queueFront(); + evicted.add(evictedNode); + } + + // "Then we load the requested block X into the freed buffer and place + // it on the top of stack S." + boolean inStack = inStack(); + moveToStackTop(); + + // "There are two cases for block X:" + if (inStack) { + // "(1) If X is in stack S, we change its status to LIR and move the + // LIR block in the bottom of stack S to the end of list Q with its + // status changed to HIR. A stack pruning is then conducted. + hot(); + owner.stackBottom().migrateToQueue(); + owner.pruneStack(evicted); + } else { + // "(2) If X is not in stack S, we leave its status in HIR and place + // it in the end of list Q." + cold(); + } + } + + /** + * Marks this entry as hot. + */ + private void hot() { + if (state != Recency.LIR_RESIDENT) { + owner.hotSize++; + } + state = Recency.LIR_RESIDENT; + } + + /** + * Marks this entry as cold. + */ + private void cold() { + if (state == Recency.LIR_RESIDENT) { + owner.hotSize--; + } + state = Recency.HIR_RESIDENT; + moveToQueueEnd(); + } + + /** + * Marks this entry as non-resident. + */ + @SuppressWarnings("fallthrough") + private void nonResident() { + switch (state) { + case LIR_RESIDENT: + owner.hotSize--; + // fallthrough + case HIR_RESIDENT: + owner.size--; + break; + } + state = Recency.HIR_NONRESIDENT; + } + + /** + * Returns true if this entry is resident in the cache, false otherwise. + */ + public boolean isResident() { + return (state != Recency.HIR_NONRESIDENT); + } + + + /** + * Temporarily removes this entry from the stack, fixing up neighbor links. + * This entry's links remain unchanged, meaning that {@link #inStack()} will + * continue to return true. This should only be called if this node's links + * will be subsequently changed. + */ + private void tempRemoveFromStack() { + if (inStack()) { + previousInStack.nextInStack = nextInStack; + nextInStack.previousInStack = previousInStack; + } + } + + /** + * Removes this entry from the stack. + */ + private void removeFromStack() { + tempRemoveFromStack(); + previousInStack = null; + nextInStack = null; + } + + /** + * Inserts this entry before the specified existing entry in the stack. + */ + private void addToStackBefore(LIRSHashEntry existingEntry) { + previousInStack = existingEntry.previousInStack; + nextInStack = existingEntry; + previousInStack.nextInStack = this; + nextInStack.previousInStack = this; + } + + /** + * Moves this entry to the top of the stack. + */ + private void moveToStackTop() { + tempRemoveFromStack(); + addToStackBefore(owner.header.nextInStack); + } + + /** + * Moves this entry to the bottom of the stack. + */ + private void moveToStackBottom() { + tempRemoveFromStack(); + addToStackBefore(owner.header); + } + + /** + * Temporarily removes this entry from the queue, fixing up neighbor links. + * This entry's links remain unchanged. This should only be called if this + * node's links will be subsequently changed. + */ + private void tempRemoveFromQueue() { + if (inQueue()) { + previousInQueue.nextInQueue = nextInQueue; + nextInQueue.previousInQueue = previousInQueue; + } + } + + /** + * Removes this entry from the queue. + */ + private void removeFromQueue() { + tempRemoveFromQueue(); + previousInQueue = null; + nextInQueue = null; + } + + /** + * Inserts this entry before the specified existing entry in the queue. + */ + private void addToQueueBefore(LIRSHashEntry existingEntry) { + previousInQueue = existingEntry.previousInQueue; + nextInQueue = existingEntry; + previousInQueue.nextInQueue = this; + nextInQueue.previousInQueue = this; + } + + /** + * Moves this entry to the end of the queue. + */ + private void moveToQueueEnd() { + tempRemoveFromQueue(); + addToQueueBefore(owner.header); + } + + + /** + * Moves this entry from the stack to the queue, marking it cold + * (as hot entries must remain in the stack). This should only be called + * on resident entries, as non-resident entries should not be made resident. + * The bottom entry on the queue is always hot due to stack pruning. + */ + private void migrateToQueue() { + removeFromStack(); + cold(); + } + + /** + * Moves this entry from the queue to the stack, marking it hot (as cold + * resident entries must remain in the queue). + */ + private void migrateToStack() { + removeFromQueue(); + if (!inStack()) { + moveToStackBottom(); + } + hot(); + } + + /** + * Evicts this entry, removing it from the queue and setting its status to + * cold non-resident. If the entry is already absent from the stack, it is + * removed from the backing map; otherwise it remains in order for its + * recency to be maintained. + */ + private void evict() { + removeFromQueue(); + removeFromStack(); + nonResident(); + owner = null; + } + + /** + * Removes this entry from the cache. This operation is not specified in + * the paper, which does not account for forced eviction. + */ + private V remove() { + boolean wasHot = (state == Recency.LIR_RESIDENT); + V result = value; + LIRSHashEntry end = owner != null? owner.queueEnd():null; + evict(); + + // attempt to maintain a constant number of hot entries + if (wasHot) { + if (end != null) { + end.migrateToStack(); + } + } + + return result; + } + } + + + static final class LIRS implements EvictionPolicy { + + /** + * The percentage of the cache which is dedicated to hot blocks. + * See section 5.1 + */ + private static final float L_LIRS = 0.95f; + + /** The owning segment */ + private final Segment segment; + + /** + * The accessQueue for reducing lock contention + * See "BP-Wrapper: a system framework making any replacement algorithms + * (almost) lock contention free" + * + * http://www.cse.ohio-state.edu/hpcs/WWW/HTML/publications/abs09-1.html + * + * */ + private final ConcurrentLinkedQueue> accessQueue; + + /** + * The maxBatchQueueSize + * + * See "BP-Wrapper: a system framework making any replacement algorithms (almost) lock + * contention free" + * */ + private final int maxBatchQueueSize; + + /** The number of LIRS entries in a segment */ + private int size; + + private final float batchThresholdFactor; + + + /** + * This header encompasses two data structures: + * + *

    + *
  • The LIRS stack, S, which is maintains recency information. All hot + * entries are on the stack. All cold and non-resident entries which are more + * recent than the least recent hot entry are also stored in the stack (the + * stack is always pruned such that the last entry is hot, and all entries + * accessed more recently than the last hot entry are present in the stack). + * The stack is ordered by recency, with its most recently accessed entry + * at the top, and its least recently accessed entry at the bottom.
  • + * + *
  • The LIRS queue, Q, which enqueues all cold entries for eviction. Cold + * entries (by definition in the queue) may be absent from the stack (due to + * pruning of the stack). Cold entries are added to the end of the queue + * and entries are evicted from the front of the queue.
  • + *
+ */ + private final LIRSHashEntry header = new LIRSHashEntry(null, null,0,null,null); + + /** The maximum number of hot entries (L_lirs in the paper). */ + private final int maximumHotSize; + + /** The maximum number of resident entries (L in the paper). */ + private final int maximumSize ; + + /** The actual number of hot entries. */ + private int hotSize = 0; + + + + public LIRS(Segment s, int capacity, int maxBatchSize, float batchThresholdFactor) { + this.segment = s; + this.maximumSize = capacity; + this.maximumHotSize = calculateLIRSize(capacity); + this.maxBatchQueueSize = maxBatchSize > MAX_BATCH_SIZE ? MAX_BATCH_SIZE : maxBatchSize; + this.batchThresholdFactor = batchThresholdFactor; + this.accessQueue = new ConcurrentLinkedQueue>(); + } + + private static int calculateLIRSize(int maximumSize) { + int result = (int) (L_LIRS * maximumSize); + return (result == maximumSize) ? maximumSize - 1 : result; + } + + @Override + public Set> execute() { + Set> evicted = new HashSet>(); + try { + for (LIRSHashEntry e : accessQueue) { + if(e.isResident()){ + e.hit(evicted); + } + } + removeFromSegment(evicted); + } finally { + accessQueue.clear(); + } + return evicted; + } + + /** + * Prunes HIR blocks in the bottom of the stack until an HOT block sits in + * the stack bottom. If pruned blocks were resident, then they + * remain in the queue; otherwise they are no longer referenced, and are thus + * removed from the backing map. + */ + private void pruneStack(Set> evicted) { + // See section 3.3: + // "We define an operation called "stack pruning" on the LIRS + // stack S, which removes the HIR blocks in the bottom of + // the stack until an LIR block sits in the stack bottom. This + // operation serves for two purposes: (1) We ensure the block in + // the bottom of the stack always belongs to the LIR block set. + // (2) After the LIR block in the bottom is removed, those HIR + // blocks contiguously located above it will not have chances to + // change their status from HIR to LIR, because their recencies + // are larger than the new maximum recency of LIR blocks." + LIRSHashEntry bottom = stackBottom(); + while (bottom != null && bottom.state != Recency.LIR_RESIDENT) { + bottom.removeFromStack(); + if (bottom.state == Recency.HIR_NONRESIDENT) { + evicted.add(bottom); + } + bottom = stackBottom(); + } + } + + @Override + public Set> onEntryMiss(HashEntry en) { + LIRSHashEntry e = (LIRSHashEntry) en; + Set> evicted = e.miss(); + removeFromSegment(evicted); + return evicted; + } + + private void removeFromSegment(Set> evicted) { + for (HashEntry e : evicted) { + ((LIRSHashEntry)e).evict(); + segment.evictionListener.onEntryChosenForEviction(e.value); + segment.remove(e.key, e.hash, null); + } + } + + /* + * Invoked without holding a lock on Segment + */ + @Override + public boolean onEntryHit(HashEntry e) { + accessQueue.add((LIRSHashEntry) e); + return accessQueue.size() >= maxBatchQueueSize * batchThresholdFactor; + } + + /* + * Invoked without holding a lock on Segment + */ + @Override + public boolean thresholdExpired() { + return accessQueue.size() >= maxBatchQueueSize; + } + + @Override + public void onEntryRemove(HashEntry e) { + + ((LIRSHashEntry)e).remove(); + // we could have multiple instances of e in accessQueue; remove them all + while (accessQueue.remove(e)) { + } + } + + @Override + public void clear() { + accessQueue.clear(); + } + + @Override + public Eviction strategy() { + return Eviction.LIRS; + } + + /** + * Returns the entry at the bottom of the stack. + */ + private LIRSHashEntry stackBottom() { + LIRSHashEntry bottom = header.previousInStack; + return (bottom == header) ? null : bottom; + } + + /** + * Returns the entry at the front of the queue. + */ + private LIRSHashEntry queueFront() { + LIRSHashEntry front = header.nextInQueue; + return (front == header) ? null : front; + } + + /** + * Returns the entry at the end of the queue. + */ + private LIRSHashEntry queueEnd() { + LIRSHashEntry end = header.previousInQueue; + return (end == header) ? null : end; + } + + + @Override + public HashEntry createNewEntry(K key, int hash, HashEntry next, V value) { + return new LIRSHashEntry(this,key, hash, next, value); + } + } + + /** + * Segments are specialized versions of hash tables. This + * subclasses from ReentrantLock opportunistically, just to + * simplify some locking and avoid separate construction. + */ + static final class Segment extends ReentrantLock { + /* + * Segments maintain a table of entry lists that are ALWAYS + * kept in a consistent state, so can be read without locking. + * Next fields of nodes are immutable (final). All list + * additions are performed at the front of each bin. This + * makes it easy to check changes, and also fast to traverse. + * When nodes would otherwise be changed, new nodes are + * created to replace them. This works well for hash tables + * since the bin lists tend to be short. (The average length + * is less than two for the default load factor threshold.) + * + * Read operations can thus proceed without locking, but rely + * on selected uses of volatiles to ensure that completed + * write operations performed by other threads are + * noticed. For most purposes, the "count" field, tracking the + * number of elements, serves as that volatile variable + * ensuring visibility. This is convenient because this field + * needs to be read in many read operations anyway: + * + * - All (unsynchronized) read operations must first read the + * "count" field, and should not look at table entries if + * it is 0. + * + * - All (synchronized) write operations should write to + * the "count" field after structurally changing any bin. + * The operations must not take any action that could even + * momentarily cause a concurrent read operation to see + * inconsistent data. This is made easier by the nature of + * the read operations in Map. For example, no operation + * can reveal that the table has grown but the threshold + * has not yet been updated, so there are no atomicity + * requirements for this with respect to reads. + * + * As a guide, all critical volatile reads and writes to the + * count field are marked in code comments. + */ + + private static final long serialVersionUID = 2249069246763182397L; + + /** + * The number of elements in this segment's region. + */ + transient volatile int count; + + /** + * Number of updates that alter the size of the table. This is + * used during bulk-read methods to make sure they see a + * consistent snapshot: If modCounts change during a traversal + * of segments computing size or checking containsValue, then + * we might have an inconsistent view of state so (usually) + * must retry. + */ + transient int modCount; + + /** + * The table is rehashed when its size exceeds this threshold. + * (The value of this field is always (int)(capacity * + * loadFactor).) + */ + transient int threshold; + + /** + * The per-segment table. + */ + transient volatile HashEntry[] table; + + /** + * The load factor for the hash table. Even though this value + * is same for all segments, it is replicated to avoid needing + * links to outer object. + * @serial + */ + final float loadFactor; + + final int evictCap; + + transient final EvictionPolicy eviction; + + transient final EvictionListener evictionListener; + + Segment(int cap, int evictCap, float lf, Eviction es, EvictionListener listener) { + loadFactor = lf; + this.evictCap = evictCap; + eviction = es.make(this, evictCap, lf); + evictionListener = listener; + setTable(HashEntry. newArray(cap)); + } + + @SuppressWarnings("unchecked") + static Segment[] newArray(int i) { + return new Segment[i]; + } + + EvictionListener getEvictionListener() { + return evictionListener; + } + + /** + * Sets table to new HashEntry array. + * Call only while holding lock or in constructor. + */ + void setTable(HashEntry[] newTable) { + threshold = (int)(newTable.length * loadFactor); + table = newTable; + } + + /** + * Returns properly casted first entry of bin for given hash. + */ + HashEntry getFirst(int hash) { + HashEntry[] tab = table; + return tab[hash & tab.length - 1]; + } + + /** + * Reads value field of an entry under lock. Called if value + * field ever appears to be null. This is possible only if a + * compiler happens to reorder a HashEntry initialization with + * its table assignment, which is legal under memory model + * but is not known to ever occur. + */ + V readValueUnderLock(HashEntry e) { + lock(); + try { + return e.value; + } finally { + unlock(); + } + } + + /* Specialized implementations of map methods */ + + V get(Object key, int hash) { + int c = count; + if (c != 0) { // read-volatile + V result = null; + HashEntry e = getFirst(hash); + while (e != null) { + if (e.hash == hash && key.equals(e.key)) { + V v = e.value; + if (v != null) { + result = v; + break; + } else { + result = readValueUnderLock(e); // recheck + break; + } + } + e = e.next; + } + // a hit + if (result != null) { + if (eviction.onEntryHit(e)) { + Set> evicted = attemptEviction(false); + notifyEvictionListener(evicted); + } + } + return result; + } + return null; + } + + boolean containsKey(Object key, int hash) { + if (count != 0) { // read-volatile + HashEntry e = getFirst(hash); + while (e != null) { + if (e.hash == hash && key.equals(e.key)) { + return true; + } + e = e.next; + } + } + return false; + } + + boolean containsValue(Object value) { + if (count != 0) { // read-volatile + HashEntry[] tab = table; + int len = tab.length; + for (int i = 0 ; i < len; i++) { + for (HashEntry e = tab[i]; e != null; e = e.next) { + V v = e.value; + if (v == null) { + v = readValueUnderLock(e); + } + if (value.equals(v)) { + return true; + } + } + } + } + return false; + } + + boolean replace(K key, int hash, V oldValue, V newValue) { + lock(); + Set> evicted = null; + try { + HashEntry e = getFirst(hash); + while (e != null && (e.hash != hash || !key.equals(e.key))) { + e = e.next; + } + + boolean replaced = false; + if (e != null && oldValue.equals(e.value)) { + replaced = true; + e.value = newValue; + if (eviction.onEntryHit(e)) { + evicted = attemptEviction(true); + } + } + return replaced; + } finally { + unlock(); + notifyEvictionListener(evicted); + } + } + + V replace(K key, int hash, V newValue) { + lock(); + Set> evicted = null; + try { + HashEntry e = getFirst(hash); + while (e != null && (e.hash != hash || !key.equals(e.key))) { + e = e.next; + } + + V oldValue = null; + if (e != null) { + oldValue = e.value; + e.value = newValue; + if (eviction.onEntryHit(e)) { + evicted = attemptEviction(true); + } + } + return oldValue; + } finally { + unlock(); + notifyEvictionListener(evicted); + } + } + + V put(K key, int hash, V value, boolean onlyIfAbsent) { + lock(); + Set> evicted = null; + try { + int c = count; + if (c++ > threshold && eviction.strategy() == Eviction.NONE) { + rehash(); + } + HashEntry[] tab = table; + int index = hash & tab.length - 1; + HashEntry first = tab[index]; + HashEntry e = first; + while (e != null && (e.hash != hash || !key.equals(e.key))) { + e = e.next; + } + + V oldValue; + if (e != null) { + oldValue = e.value; + if (!onlyIfAbsent) { + e.value = value; + eviction.onEntryHit(e); + } + } else { + oldValue = null; + ++modCount; + count = c; // write-volatile + if (eviction.strategy() != Eviction.NONE) { + if (c > evictCap) { + // remove entries;lower count + evicted = eviction.execute(); + // re-read first + first = tab[index]; + } + // add a new entry + tab[index] = eviction.createNewEntry(key, hash, first, value); + // notify a miss + Set> newlyEvicted = eviction.onEntryMiss(tab[index]); + if (!newlyEvicted.isEmpty()) { + if (evicted != null) { + evicted.addAll(newlyEvicted); + } else { + evicted = newlyEvicted; + } + } + } else { + tab[index] = eviction.createNewEntry(key, hash, first, value); + } + } + return oldValue; + } finally { + unlock(); + notifyEvictionListener(evicted); + } + } + + void rehash() { + HashEntry[] oldTable = table; + int oldCapacity = oldTable.length; + if (oldCapacity >= MAXIMUM_CAPACITY) { + return; + } + + /* + * Reclassify nodes in each list to new Map. Because we are + * using power-of-two expansion, the elements from each bin + * must either stay at same index, or move with a power of two + * offset. We eliminate unnecessary node creation by catching + * cases where old nodes can be reused because their next + * fields won't change. Statistically, at the default + * threshold, only about one-sixth of them need cloning when + * a table doubles. The nodes they replace will be garbage + * collectable as soon as they are no longer referenced by any + * reader thread that may be in the midst of traversing table + * right now. + */ + + HashEntry[] newTable = HashEntry.newArray(oldCapacity<<1); + threshold = (int)(newTable.length * loadFactor); + int sizeMask = newTable.length - 1; + for (int i = 0; i < oldCapacity ; i++) { + // We need to guarantee that any existing reads of old Map can + // proceed. So we cannot yet null out each bin. + HashEntry e = oldTable[i]; + + if (e != null) { + HashEntry next = e.next; + int idx = e.hash & sizeMask; + + // Single node on list + if (next == null) { + newTable[idx] = e; + } else { + // Reuse trailing consecutive sequence at same slot + HashEntry lastRun = e; + int lastIdx = idx; + for (HashEntry last = next; + last != null; + last = last.next) { + int k = last.hash & sizeMask; + if (k != lastIdx) { + lastIdx = k; + lastRun = last; + } + } + newTable[lastIdx] = lastRun; + + // Clone all remaining nodes + for (HashEntry p = e; p != lastRun; p = p.next) { + int k = p.hash & sizeMask; + HashEntry n = newTable[k]; + newTable[k] = eviction.createNewEntry(p.key, p.hash, n, p.value); + } + } + } + } + table = newTable; + } + + /** + * Remove; match on key only if value null, else match both. + */ + V remove(Object key, int hash, Object value) { + lock(); + try { + int c = count - 1; + HashEntry[] tab = table; + int index = hash & tab.length - 1; + HashEntry first = tab[index]; + HashEntry e = first; + while (e != null && (e.hash != hash || !key.equals(e.key))) { + e = e.next; + } + + V oldValue = null; + if (e != null) { + V v = e.value; + if (value == null || value.equals(v)) { + oldValue = v; + // All entries following removed node can stay + // in list, but all preceding ones need to be + // cloned. + ++modCount; + + // e was removed + eviction.onEntryRemove(e); + + HashEntry newFirst = e.next; + for (HashEntry p = first; p != e; p = p.next) { + // TODO A remove operation makes the map behave like all the other keys in the bucket were just added??? + // allow p to be GC-ed + eviction.onEntryRemove(p); + newFirst = eviction.createNewEntry(p.key, p.hash, newFirst, p.value); + // and notify eviction algorithm about new hash entries + eviction.onEntryMiss(newFirst); + } + + tab[index] = newFirst; + count = c; // write-volatile + } + } + return oldValue; + } finally { + unlock(); + } + } + + void clear() { + if (count != 0) { + lock(); + try { + HashEntry[] tab = table; + for (int i = 0; i < tab.length; i++) { + tab[i] = null; + } + ++modCount; + eviction.clear(); + count = 0; // write-volatile + } finally { + unlock(); + } + } + } + + private Set> attemptEviction(boolean lockedAlready) { + Set> evicted = null; + boolean obtainedLock = lockedAlready || tryLock(); + if (!obtainedLock && eviction.thresholdExpired()) { + lock(); + obtainedLock = true; + } + if (obtainedLock) { + try { + if (eviction.thresholdExpired()) { + evicted = eviction.execute(); + } + } finally { + if (!lockedAlready) { + unlock(); + } + } + } + return evicted; + } + + private void notifyEvictionListener(Set> evicted) { + // piggyback listener invocation on callers thread outside lock + if (evicted != null) { + Map evictedCopy; + if (evicted.size() == 1) { + HashEntry evictedEntry = evicted.iterator().next(); + evictedCopy = singletonMap(evictedEntry.key, evictedEntry.value); + } else { + evictedCopy = new HashMap(evicted.size()); + for (HashEntry he : evicted) { + evictedCopy.put(he.key, he.value); + } + evictedCopy = unmodifiableMap(evictedCopy); + } + evictionListener.onEntryEviction(evictedCopy); + } + } + } + + + /* ---------------- Public operations -------------- */ + + + /** + * Creates a new, empty map with the specified maximum capacity, load factor and concurrency + * level. + * + * @param capacity + * is the upper bound capacity for the number of elements in this map + * + * @param concurrencyLevel + * the estimated number of concurrently updating threads. The implementation performs + * internal sizing to try to accommodate this many threads. + * + * @param evictionStrategy + * the algorithm used to evict elements from this map + * + * @param evictionListener + * the evicton listener callback to be notified about evicted elements + * + * @throws IllegalArgumentException + * if the initial capacity is negative or the load factor or concurrencyLevel are + * nonpositive. + */ + public BoundedConcurrentHashMap(int capacity, int concurrencyLevel, + Eviction evictionStrategy, EvictionListener evictionListener) { + if (capacity < 0 || concurrencyLevel <= 0) { + throw new IllegalArgumentException(); + } + + concurrencyLevel = Math.min(capacity / 2, concurrencyLevel); // concurrencyLevel cannot be > capacity/2 + concurrencyLevel = Math.max(concurrencyLevel, 1); // concurrencyLevel cannot be less than 1 + + // minimum two elements per segment + if (capacity < concurrencyLevel * 2 && capacity != 1) { + throw new IllegalArgumentException("Maximum capacity has to be at least twice the concurrencyLevel"); + } + + if (evictionStrategy == null || evictionListener == null) { + throw new IllegalArgumentException(); + } + + if (concurrencyLevel > MAX_SEGMENTS) { + concurrencyLevel = MAX_SEGMENTS; + } + + // Find power-of-two sizes best matching arguments + int sshift = 0; + int ssize = 1; + while (ssize < concurrencyLevel) { + ++sshift; + ssize <<= 1; + } + segmentShift = 32 - sshift; + segmentMask = ssize - 1; + this.segments = Segment.newArray(ssize); + + if (capacity > MAXIMUM_CAPACITY) { + capacity = MAXIMUM_CAPACITY; + } + int c = capacity / ssize; + int cap = 1; + while (cap < c) { + cap <<= 1; + } + + for (int i = 0; i < this.segments.length; ++i) { + this.segments[i] = new Segment(cap, c, DEFAULT_LOAD_FACTOR, evictionStrategy, evictionListener); + } + } + + /** + * Creates a new, empty map with the specified maximum capacity, load factor, concurrency + * level and LRU eviction policy. + * + * @param capacity + * is the upper bound capacity for the number of elements in this map + * + * @param concurrencyLevel + * the estimated number of concurrently updating threads. The implementation performs + * internal sizing to try to accommodate this many threads. + * + * @throws IllegalArgumentException + * if the initial capacity is negative or the load factor or concurrencyLevel are + * nonpositive. + */ + public BoundedConcurrentHashMap(int capacity, int concurrencyLevel) { + this(capacity, concurrencyLevel, Eviction.LRU); + } + + /** + * Creates a new, empty map with the specified maximum capacity, load factor, concurrency + * level and eviction strategy. + * + * @param capacity + * is the upper bound capacity for the number of elements in this map + * + * @param concurrencyLevel + * the estimated number of concurrently updating threads. The implementation performs + * internal sizing to try to accommodate this many threads. + * + * @param evictionStrategy + * the algorithm used to evict elements from this map + * + * @throws IllegalArgumentException + * if the initial capacity is negative or the load factor or concurrencyLevel are + * nonpositive. + */ + public BoundedConcurrentHashMap(int capacity, int concurrencyLevel, Eviction evictionStrategy) { + this(capacity, concurrencyLevel, evictionStrategy, new NullEvictionListener()); + } + + /** + * Creates a new, empty map with the specified maximum capacity, default concurrency + * level and LRU eviction policy. + * + * @param capacity + * is the upper bound capacity for the number of elements in this map + * + * + * @throws IllegalArgumentException if the initial capacity of + * elements is negative or the load factor is nonpositive + * + * @since 1.6 + */ + public BoundedConcurrentHashMap(int capacity) { + this(capacity, DEFAULT_CONCURRENCY_LEVEL); + } + + /** + * Creates a new, empty map with the default maximum capacity + */ + public BoundedConcurrentHashMap() { + this(DEFAULT_MAXIMUM_CAPACITY, DEFAULT_CONCURRENCY_LEVEL); + } + + /** + * Returns true if this map contains no key-value mappings. + * + * @return true if this map contains no key-value mappings + */ + @Override + public boolean isEmpty() { + final Segment[] segments = this.segments; + /* + * We keep track of per-segment modCounts to avoid ABA + * problems in which an element in one segment was added and + * in another removed during traversal, in which case the + * table was never actually empty at any point. Note the + * similar use of modCounts in the size() and containsValue() + * methods, which are the only other methods also susceptible + * to ABA problems. + */ + int[] mc = new int[segments.length]; + int mcsum = 0; + for (int i = 0; i < segments.length; ++i) { + if (segments[i].count != 0) { + return false; + } else { + mcsum += mc[i] = segments[i].modCount; + } + } + // If mcsum happens to be zero, then we know we got a snapshot + // before any modifications at all were made. This is + // probably common enough to bother tracking. + if (mcsum != 0) { + for (int i = 0; i < segments.length; ++i) { + if (segments[i].count != 0 || mc[i] != segments[i].modCount) { + return false; + } + } + } + return true; + } + + /** + * Returns the number of key-value mappings in this map. If the + * map contains more than Integer.MAX_VALUE elements, returns + * Integer.MAX_VALUE. + * + * @return the number of key-value mappings in this map + */ + @Override + public int size() { + final Segment[] segments = this.segments; + long sum = 0; + long check = 0; + int[] mc = new int[segments.length]; + // Try a few times to get accurate count. On failure due to + // continuous async changes in table, resort to locking. + for (int k = 0; k < RETRIES_BEFORE_LOCK; ++ k) { + check = 0; + sum = 0; + int mcsum = 0; + for (int i = 0; i < segments.length; ++ i) { + sum += segments[i].count; + mcsum += mc[i] = segments[i].modCount; + } + if (mcsum != 0) { + for (int i = 0; i < segments.length; ++ i) { + check += segments[i].count; + if (mc[i] != segments[i].modCount) { + check = -1; // force retry + break; + } + } + } + if (check == sum) { + break; + } + } + if (check != sum) { // Resort to locking all segments + sum = 0; + for (int i = 0; i < segments.length; ++ i) { + segments[i].lock(); + } + try { + for (int i = 0; i < segments.length; ++ i) { + sum += segments[i].count; + } + } finally { + for (int i = 0; i < segments.length; ++ i) { + segments[i].unlock(); + } + } + } + if (sum > Integer.MAX_VALUE) { + return Integer.MAX_VALUE; + } else { + return (int) sum; + } + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code key.equals(k)}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + @Override + public V get(Object key) { + int hash = hash(key.hashCode()); + return segmentFor(hash).get(key, hash); + } + + /** + * Tests if the specified object is a key in this table. + * + * @param key possible key + * @return true if and only if the specified object + * is a key in this table, as determined by the + * equals method; false otherwise. + * @throws NullPointerException if the specified key is null + */ + @Override + public boolean containsKey(Object key) { + int hash = hash(key.hashCode()); + return segmentFor(hash).containsKey(key, hash); + } + + /** + * Returns true if this map maps one or more keys to the + * specified value. Note: This method requires a full internal + * traversal of the hash table, and so is much slower than + * method containsKey. + * + * @param value value whose presence in this map is to be tested + * @return true if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the specified value is null + */ + @Override + public boolean containsValue(Object value) { + if (value == null) { + throw new NullPointerException(); + } + + // See explanation of modCount use above + + final Segment[] segments = this.segments; + int[] mc = new int[segments.length]; + + // Try a few times without locking + for (int k = 0; k < RETRIES_BEFORE_LOCK; ++ k) { + int mcsum = 0; + for (int i = 0; i < segments.length; ++ i) { + @SuppressWarnings("unused") + int c = segments[i].count; // read-volatile + mcsum += mc[i] = segments[i].modCount; + if (segments[i].containsValue(value)) { + return true; + } + } + boolean cleanSweep = true; + if (mcsum != 0) { + for (int i = 0; i < segments.length; ++ i) { + @SuppressWarnings("unused") + int c = segments[i].count; // read-volatile + if (mc[i] != segments[i].modCount) { + cleanSweep = false; + break; + } + } + } + if (cleanSweep) { + return false; + } + } + // Resort to locking all segments + for (int i = 0; i < segments.length; ++ i) { + segments[i].lock(); + } + boolean found = false; + try { + for (int i = 0; i < segments.length; ++ i) { + if (segments[i].containsValue(value)) { + found = true; + break; + } + } + } finally { + for (int i = 0; i < segments.length; ++ i) { + segments[i].unlock(); + } + } + return found; + } + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue}, and exists solely to ensure + * full compatibility with class {@link Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + + * @param value a value to search for + * @return true if and only if some key maps to the + * value argument in this table as + * determined by the equals method; + * false otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + *

The value can be retrieved by calling the get method + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with key, or + * null if there was no mapping for key + * @throws NullPointerException if the specified key or value is null + */ + @Override + public V put(K key, V value) { + if (value == null) { + throw new NullPointerException(); + } + int hash = hash(key.hashCode()); + return segmentFor(hash).put(key, hash, value, false); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or null if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @Override + public V putIfAbsent(K key, V value) { + if (value == null) { + throw new NullPointerException(); + } + int hash = hash(key.hashCode()); + return segmentFor(hash).put(key, hash, value, true); + } + + /** + * Copies all of the mappings from the specified map to this one. + * These mappings replace any mappings that this map had for any of the + * keys currently in the specified map. + * + * @param m mappings to be stored in this map + */ + @Override + public void putAll(Map m) { + for (Map.Entry e: m.entrySet()) { + put(e.getKey(), e.getValue()); + } + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @param key the key that needs to be removed + * @return the previous value associated with key, or + * null if there was no mapping for key + * @throws NullPointerException if the specified key is null + */ + @Override + public V remove(Object key) { + int hash = hash(key.hashCode()); + return segmentFor(hash).remove(key, hash, null); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + @Override + public boolean remove(Object key, Object value) { + int hash = hash(key.hashCode()); + if (value == null) { + return false; + } + return segmentFor(hash).remove(key, hash, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + @Override + public boolean replace(K key, V oldValue, V newValue) { + if (oldValue == null || newValue == null) { + throw new NullPointerException(); + } + int hash = hash(key.hashCode()); + return segmentFor(hash).replace(key, hash, oldValue, newValue); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or null if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @Override + public V replace(K key, V value) { + if (value == null) { + throw new NullPointerException(); + } + int hash = hash(key.hashCode()); + return segmentFor(hash).replace(key, hash, value); + } + + /** + * Removes all of the mappings from this map. + */ + @Override + public void clear() { + for (int i = 0; i < segments.length; ++ i) { + segments[i].clear(); + } + } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from this map, + * via the Iterator.remove, Set.remove, + * removeAll, retainAll, and clear + * operations. It does not support the add or + * addAll operations. + * + *

The view's iterator is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + @Override + public Set keySet() { + Set ks = keySet; + return ks != null? ks : (keySet = new KeySet()); + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. The collection + * supports element removal, which removes the corresponding + * mapping from this map, via the Iterator.remove, + * Collection.remove, removeAll, + * retainAll, and clear operations. It does not + * support the add or addAll operations. + * + *

The view's iterator is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + @Override + public Collection values() { + Collection vs = values; + return vs != null? vs : (values = new Values()); + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from the map, + * via the Iterator.remove, Set.remove, + * removeAll, retainAll, and clear + * operations. It does not support the add or + * addAll operations. + * + *

The view's iterator is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + @Override + public Set> entrySet() { + Set> es = entrySet; + return es != null? es : (entrySet = new EntrySet()); + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new KeyIterator(); + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new ValueIterator(); + } + + /* ---------------- Iterator Support -------------- */ + + abstract class HashIterator { + int nextSegmentIndex; + + int nextTableIndex; + + HashEntry[] currentTable; + + HashEntry nextEntry; + + HashEntry lastReturned; + + HashIterator() { + nextSegmentIndex = segments.length - 1; + nextTableIndex = -1; + advance(); + } + + public boolean hasMoreElements() { + return hasNext(); + } + + final void advance() { + if (nextEntry != null && (nextEntry = nextEntry.next) != null) { + return; + } + + while (nextTableIndex >= 0) { + if ((nextEntry = currentTable[nextTableIndex --]) != null) { + return; + } + } + + while (nextSegmentIndex >= 0) { + Segment seg = segments[nextSegmentIndex --]; + if (seg.count != 0) { + currentTable = seg.table; + for (int j = currentTable.length - 1; j >= 0; -- j) { + if ((nextEntry = currentTable[j]) != null) { + nextTableIndex = j - 1; + return; + } + } + } + } + } + + public boolean hasNext() { + return nextEntry != null; + } + + HashEntry nextEntry() { + if (nextEntry == null) { + throw new NoSuchElementException(); + } + lastReturned = nextEntry; + advance(); + return lastReturned; + } + + public void remove() { + if (lastReturned == null) { + throw new IllegalStateException(); + } + BoundedConcurrentHashMap.this.remove(lastReturned.key); + lastReturned = null; + } + } + + final class KeyIterator extends HashIterator implements Iterator, Enumeration { + @Override + public K next() { + return super.nextEntry().key; + } + + @Override + public K nextElement() { + return super.nextEntry().key; + } + } + + final class ValueIterator extends HashIterator implements Iterator, Enumeration { + @Override + public V next() { + return super.nextEntry().value; + } + + @Override + public V nextElement() { + return super.nextEntry().value; + } + } + + /** + * Custom Entry class used by EntryIterator.next(), that relays + * setValue changes to the underlying map. + */ + final class WriteThroughEntry extends AbstractMap.SimpleEntry { + + private static final long serialVersionUID = -7041346694785573824L; + + WriteThroughEntry(K k, V v) { + super(k, v); + } + + /** + * Set our entry's value and write through to the map. The + * value to return is somewhat arbitrary here. Since a + * WriteThroughEntry does not necessarily track asynchronous + * changes, the most recent "previous" value could be + * different from what we return (or could even have been + * removed in which case the put will re-establish). We do not + * and cannot guarantee more. + */ + @Override + public V setValue(V value) { + if (value == null) { + throw new NullPointerException(); + } + V v = super.setValue(value); + BoundedConcurrentHashMap.this.put(getKey(), value); + return v; + } + } + + final class EntryIterator extends HashIterator implements Iterator> { + @Override + public Map.Entry next() { + HashEntry e = super.nextEntry(); + return new WriteThroughEntry(e.key, e.value); + } + } + + final class KeySet extends AbstractSet { + @Override + public Iterator iterator() { + return new KeyIterator(); + } + + @Override + public int size() { + return BoundedConcurrentHashMap.this.size(); + } + + @Override + public boolean isEmpty() { + return BoundedConcurrentHashMap.this.isEmpty(); + } + + @Override + public boolean contains(Object o) { + return BoundedConcurrentHashMap.this.containsKey(o); + } + + @Override + public boolean remove(Object o) { + return BoundedConcurrentHashMap.this.remove(o) != null; + } + + @Override + public void clear() { + BoundedConcurrentHashMap.this.clear(); + } + } + + final class Values extends AbstractCollection { + @Override + public Iterator iterator() { + return new ValueIterator(); + } + + @Override + public int size() { + return BoundedConcurrentHashMap.this.size(); + } + + @Override + public boolean isEmpty() { + return BoundedConcurrentHashMap.this.isEmpty(); + } + + @Override + public boolean contains(Object o) { + return BoundedConcurrentHashMap.this.containsValue(o); + } + + @Override + public void clear() { + BoundedConcurrentHashMap.this.clear(); + } + } + + final class EntrySet extends AbstractSet> { + @Override + public Iterator> iterator() { + return new EntryIterator(); + } + + @Override + public boolean contains(Object o) { + if (!(o instanceof Map.Entry)) { + return false; + } + Map.Entry e = (Map.Entry) o; + V v = BoundedConcurrentHashMap.this.get(e.getKey()); + return v != null && v.equals(e.getValue()); + } + + @Override + public boolean remove(Object o) { + if (!(o instanceof Map.Entry)) { + return false; + } + Map.Entry e = (Map.Entry) o; + return BoundedConcurrentHashMap.this.remove(e.getKey(), e.getValue()); + } + + @Override + public int size() { + return BoundedConcurrentHashMap.this.size(); + } + + @Override + public boolean isEmpty() { + return BoundedConcurrentHashMap.this.isEmpty(); + } + + @Override + public void clear() { + BoundedConcurrentHashMap.this.clear(); + } + } + + /* ---------------- Serialization Support -------------- */ + + /** + * Save the state of the ConcurrentHashMap instance to a + * stream (i.e., serialize it). + * @param s the stream + * @serialData + * the key (Object) and value (Object) + * for each key-value mapping, followed by a null pair. + * The key-value mappings are emitted in no particular order. + */ + private void writeObject(java.io.ObjectOutputStream s) throws IOException { + s.defaultWriteObject(); + + for (int k = 0; k < segments.length; ++ k) { + Segment seg = segments[k]; + seg.lock(); + try { + HashEntry[] tab = seg.table; + for (int i = 0; i < tab.length; ++ i) { + for (HashEntry e = tab[i]; e != null; e = e.next) { + s.writeObject(e.key); + s.writeObject(e.value); + } + } + } finally { + seg.unlock(); + } + } + s.writeObject(null); + s.writeObject(null); + } + + /** + * Reconstitute the ConcurrentHashMap instance from a + * stream (i.e., deserialize it). + * @param s the stream + */ + @SuppressWarnings("unchecked") + private void readObject(java.io.ObjectInputStream s) throws IOException, + ClassNotFoundException { + s.defaultReadObject(); + + // Initialize each segment to be minimally sized, and let grow. + for (int i = 0; i < segments.length; ++ i) { + segments[i].setTable(new HashEntry[1]); + } + + // Read the keys and values, and put the mappings in the table + for (;;) { + K key = (K) s.readObject(); + V value = (V) s.readObject(); + if (key == null) { + break; + } + put(key, value); + } + } +} \ No newline at end of file diff --git a/hibernate-core/src/main/java/org/hibernate/internal/util/collections/ConcurrentReferenceHashMap.java b/hibernate-core/src/main/java/org/hibernate/internal/util/collections/ConcurrentReferenceHashMap.java new file mode 100644 index 0000000000..8d0ea6ac93 --- /dev/null +++ b/hibernate-core/src/main/java/org/hibernate/internal/util/collections/ConcurrentReferenceHashMap.java @@ -0,0 +1,1889 @@ +/* + * Hibernate, Relational Persistence for Idiomatic Java + * + * Copyright (c) 2008, Red Hat Middleware LLC or third-party contributors as + * indicated by the @author tags or express copyright attribution + * statements applied by the authors. All third-party contributions are + * distributed under license by Red Hat Middleware LLC. + * + * This copyrighted material is made available to anyone wishing to use, modify, + * copy, or redistribute it subject to the terms and conditions of the GNU + * Lesser General Public License, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this distribution; if not, write to: + * Free Software Foundation, Inc. + * 51 Franklin Street, Fifth Floor + * Boston, MA 02110-1301 USA + */ + +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/licenses/publicdomain + */ + +package org.hibernate.internal.util.collections; + +import java.io.IOException; +import java.io.Serializable; +import java.lang.ref.Reference; +import java.lang.ref.ReferenceQueue; +import java.lang.ref.SoftReference; +import java.lang.ref.WeakReference; +import java.util.AbstractCollection; +import java.util.AbstractMap; +import java.util.AbstractSet; +import java.util.Collection; +import java.util.ConcurrentModificationException; +import java.util.EnumSet; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.IdentityHashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.concurrent.locks.ReentrantLock; + +/** + * An advanced hash table supporting configurable garbage collection semantics + * of keys and values, optional referential-equality, full concurrency of + * retrievals, and adjustable expected concurrency for updates. + * + * This table is designed around specific advanced use-cases. If there is any + * doubt whether this table is for you, you most likely should be using + * {@link java.util.concurrent.ConcurrentHashMap} instead. + * + * This table supports strong, weak, and soft keys and values. By default keys + * are weak, and values are strong. Such a configuration offers similar behavior + * to {@link java.util.WeakHashMap}, entries of this table are periodically + * removed once their corresponding keys are no longer referenced outside of + * this table. In other words, this table will not prevent a key from being + * discarded by the garbage collector. Once a key has been discarded by the + * collector, the corresponding entry is no longer visible to this table; + * however, the entry may occupy space until a future table operation decides to + * reclaim it. For this reason, summary functions such as size and + * isEmpty might return a value greater than the observed number of + * entries. In order to support a high level of concurrency, stale entries are + * only reclaimed during blocking (usually mutating) operations. + * + * Enabling soft keys allows entries in this table to remain until their space + * is absolutely needed by the garbage collector. This is unlike weak keys which + * can be reclaimed as soon as they are no longer referenced by a normal strong + * reference. The primary use case for soft keys is a cache, which ideally + * occupies memory that is not in use for as long as possible. + * + * By default, values are held using a normal strong reference. This provides + * the commonly desired guarantee that a value will always have at least the + * same life-span as it's key. For this reason, care should be taken to ensure + * that a value never refers, either directly or indirectly, to its key, thereby + * preventing reclamation. If this is unavoidable, then it is recommended to use + * the same reference type in use for the key. However, it should be noted that + * non-strong values may disappear before their corresponding key. + * + * While this table does allow the use of both strong keys and values, it is + * recommended to use {@link java.util.concurrent.ConcurrentHashMap} for such a + * configuration, since it is optimized for that case. + * + * Just like {@link java.util.concurrent.ConcurrentHashMap}, this class obeys + * the same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * Hashtable. However, even though all operations are thread-safe, + * retrieval operations do not entail locking, and there is + * not any support for locking the entire table in a way that + * prevents all access. This class is fully interoperable with + * Hashtable in programs that rely on its thread safety but not on + * its synchronization details. + * + *

+ * Retrieval operations (including get) generally do not block, so + * may overlap with update operations (including put and + * remove). Retrievals reflect the results of the most recently + * completed update operations holding upon their onset. For + * aggregate operations such as putAll and clear, + * concurrent retrievals may reflect insertion or removal of only some entries. + * Similarly, Iterators and Enumerations return elements reflecting the state of + * the hash table at some point at or since the creation of the + * iterator/enumeration. They do not throw + * {@link ConcurrentModificationException}. However, iterators are designed to + * be used by only one thread at a time. + * + *

+ * The allowed concurrency among update operations is guided by the optional + * concurrencyLevel constructor argument (default 16), + * which is used as a hint for internal sizing. The table is internally + * partitioned to try to permit the indicated number of concurrent updates + * without contention. Because placement in hash tables is essentially random, + * the actual concurrency will vary. Ideally, you should choose a value to + * accommodate as many threads as will ever concurrently modify the table. Using + * a significantly higher value than you need can waste space and time, and a + * significantly lower value can lead to thread contention. But overestimates + * and underestimates within an order of magnitude do not usually have much + * noticeable impact. A value of one is appropriate when it is known that only + * one thread will modify and all others will only read. Also, resizing this or + * any other kind of hash table is a relatively slow operation, so, when + * possible, it is a good idea to provide estimates of expected table sizes in + * constructors. + * + *

+ * This class and its views and iterators implement all of the optional + * methods of the {@link Map} and {@link Iterator} interfaces. + * + *

+ * Like {@link Hashtable} but unlike {@link HashMap}, this class does + * not allow null to be used as a key or value. + * + *

+ * This class is a member of the + * Java Collections Framework. + * + * @param the type of keys maintained by this map + * @param the type of mapped values + * + * @author Doug Lea + * @author Jason T. Greene + */ +public class ConcurrentReferenceHashMap extends AbstractMap + implements java.util.concurrent.ConcurrentMap, Serializable { + private static final long serialVersionUID = 7249069246763182397L; + + /* + * The basic strategy is to subdivide the table among Segments, + * each of which itself is a concurrently readable hash table. + */ + + /** + * An option specifying which Java reference type should be used to refer + * to a key and/or value. + */ + public static enum ReferenceType { + /** + * Indicates a normal Java strong reference should be used + */ + STRONG, + /** + * Indicates a {@link WeakReference} should be used + */ + WEAK, + /** + * Indicates a {@link SoftReference} should be used + */ + SOFT + } + + ; + + + public static enum Option { + /** + * Indicates that referential-equality (== instead of .equals()) should + * be used when locating keys. This offers similar behavior to {@link IdentityHashMap} + */ + IDENTITY_COMPARISONS + } + + ; + + /* ---------------- Constants -------------- */ + + static final ReferenceType DEFAULT_KEY_TYPE = ReferenceType.WEAK; + + static final ReferenceType DEFAULT_VALUE_TYPE = ReferenceType.STRONG; + + + /** + * The default initial capacity for this table, + * used when not otherwise specified in a constructor. + */ + static final int DEFAULT_INITIAL_CAPACITY = 16; + + /** + * The default load factor for this table, used when not + * otherwise specified in a constructor. + */ + static final float DEFAULT_LOAD_FACTOR = 0.75f; + + /** + * The default concurrency level for this table, used when not + * otherwise specified in a constructor. + */ + static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The maximum capacity, used if a higher value is implicitly + * specified by either of the constructors with arguments. MUST + * be a power of two <= 1<<30 to ensure that entries are indexable + * using ints. + */ + static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The maximum number of segments to allow; used to bound + * constructor arguments. + */ + static final int MAX_SEGMENTS = 1 << 16; // slightly conservative + + /** + * Number of unsynchronized retries in size and containsValue + * methods before resorting to locking. This is used to avoid + * unbounded retries if tables undergo continuous modification + * which would make it impossible to obtain an accurate result. + */ + static final int RETRIES_BEFORE_LOCK = 2; + + /* ---------------- Fields -------------- */ + + /** + * Mask value for indexing into segments. The upper bits of a + * key's hash code are used to choose the segment. + */ + final int segmentMask; + + /** + * Shift value for indexing within segments. + */ + final int segmentShift; + + /** + * The segments, each of which is a specialized hash table + */ + final Segment[] segments; + + boolean identityComparisons; + + transient Set keySet; + transient Set> entrySet; + transient Collection values; + + /* ---------------- Small Utilities -------------- */ + + /** + * Applies a supplemental hash function to a given hashCode, which + * defends against poor quality hash functions. This is critical + * because ConcurrentReferenceHashMap uses power-of-two length hash tables, + * that otherwise encounter collisions for hashCodes that do not + * differ in lower or upper bits. + */ + private static int hash(int h) { + // Spread bits to regularize both segment and index locations, + // using variant of single-word Wang/Jenkins hash. + h += ( h << 15 ) ^ 0xffffcd7d; + h ^= ( h >>> 10 ); + h += ( h << 3 ); + h ^= ( h >>> 6 ); + h += ( h << 2 ) + ( h << 14 ); + return h ^ ( h >>> 16 ); + } + + /** + * Returns the segment that should be used for key with given hash + * + * @param hash the hash code for the key + * + * @return the segment + */ + final Segment segmentFor(int hash) { + return segments[( hash >>> segmentShift ) & segmentMask]; + } + + private int hashOf(Object key) { + return hash( + identityComparisons ? + System.identityHashCode( key ) : key.hashCode() + ); + } + + /* ---------------- Inner Classes -------------- */ + + static interface KeyReference { + int keyHash(); + + Object keyRef(); + } + + /** + * A weak-key reference which stores the key hash needed for reclamation. + */ + static final class WeakKeyReference extends WeakReference implements KeyReference { + final int hash; + + WeakKeyReference(K key, int hash, ReferenceQueue refQueue) { + super( key, refQueue ); + this.hash = hash; + } + + public final int keyHash() { + return hash; + } + + public final Object keyRef() { + return this; + } + } + + /** + * A soft-key reference which stores the key hash needed for reclamation. + */ + static final class SoftKeyReference extends SoftReference implements KeyReference { + final int hash; + + SoftKeyReference(K key, int hash, ReferenceQueue refQueue) { + super( key, refQueue ); + this.hash = hash; + } + + public final int keyHash() { + return hash; + } + + public final Object keyRef() { + return this; + } + } + + static final class WeakValueReference extends WeakReference implements KeyReference { + final Object keyRef; + final int hash; + + WeakValueReference(V value, Object keyRef, int hash, ReferenceQueue refQueue) { + super( value, refQueue ); + this.keyRef = keyRef; + this.hash = hash; + } + + public final int keyHash() { + return hash; + } + + public final Object keyRef() { + return keyRef; + } + } + + static final class SoftValueReference extends SoftReference implements KeyReference { + final Object keyRef; + final int hash; + + SoftValueReference(V value, Object keyRef, int hash, ReferenceQueue refQueue) { + super( value, refQueue ); + this.keyRef = keyRef; + this.hash = hash; + } + + public final int keyHash() { + return hash; + } + + public final Object keyRef() { + return keyRef; + } + } + + /** + * ConcurrentReferenceHashMap list entry. Note that this is never exported + * out as a user-visible Map.Entry. + * + * Because the value field is volatile, not final, it is legal wrt + * the Java Memory Model for an unsynchronized reader to see null + * instead of initial value when read via a data race. Although a + * reordering leading to this is not likely to ever actually + * occur, the Segment.readValueUnderLock method is used as a + * backup in case a null (pre-initialized) value is ever seen in + * an unsynchronized access method. + */ + static final class HashEntry { + final Object keyRef; + final int hash; + volatile Object valueRef; + final HashEntry next; + + HashEntry(K key, int hash, HashEntry next, V value, + ReferenceType keyType, ReferenceType valueType, + ReferenceQueue refQueue) { + this.hash = hash; + this.next = next; + this.keyRef = newKeyReference( key, keyType, refQueue ); + this.valueRef = newValueReference( value, valueType, refQueue ); + } + + final Object newKeyReference(K key, ReferenceType keyType, + ReferenceQueue refQueue) { + if ( keyType == ReferenceType.WEAK ) { + return new WeakKeyReference( key, hash, refQueue ); + } + if ( keyType == ReferenceType.SOFT ) { + return new SoftKeyReference( key, hash, refQueue ); + } + + return key; + } + + final Object newValueReference(V value, ReferenceType valueType, + ReferenceQueue refQueue) { + if ( valueType == ReferenceType.WEAK ) { + return new WeakValueReference( value, keyRef, hash, refQueue ); + } + if ( valueType == ReferenceType.SOFT ) { + return new SoftValueReference( value, keyRef, hash, refQueue ); + } + + return value; + } + + @SuppressWarnings("unchecked") + final K key() { + if ( keyRef instanceof KeyReference ) { + return ( (Reference) keyRef ).get(); + } + + return (K) keyRef; + } + + final V value() { + return dereferenceValue( valueRef ); + } + + @SuppressWarnings("unchecked") + final V dereferenceValue(Object value) { + if ( value instanceof KeyReference ) { + return ( (Reference) value ).get(); + } + + return (V) value; + } + + final void setValue(V value, ReferenceType valueType, ReferenceQueue refQueue) { + this.valueRef = newValueReference( value, valueType, refQueue ); + } + + @SuppressWarnings("unchecked") + static final HashEntry[] newArray(int i) { + return new HashEntry[i]; + } + } + + /** + * Segments are specialized versions of hash tables. This + * subclasses from ReentrantLock opportunistically, just to + * simplify some locking and avoid separate construction. + */ + static final class Segment extends ReentrantLock implements Serializable { + /* + * Segments maintain a table of entry lists that are ALWAYS + * kept in a consistent state, so can be read without locking. + * Next fields of nodes are immutable (final). All list + * additions are performed at the front of each bin. This + * makes it easy to check changes, and also fast to traverse. + * When nodes would otherwise be changed, new nodes are + * created to replace them. This works well for hash tables + * since the bin lists tend to be short. (The average length + * is less than two for the default load factor threshold.) + * + * Read operations can thus proceed without locking, but rely + * on selected uses of volatiles to ensure that completed + * write operations performed by other threads are + * noticed. For most purposes, the "count" field, tracking the + * number of elements, serves as that volatile variable + * ensuring visibility. This is convenient because this field + * needs to be read in many read operations anyway: + * + * - All (unsynchronized) read operations must first read the + * "count" field, and should not look at table entries if + * it is 0. + * + * - All (synchronized) write operations should write to + * the "count" field after structurally changing any bin. + * The operations must not take any action that could even + * momentarily cause a concurrent read operation to see + * inconsistent data. This is made easier by the nature of + * the read operations in Map. For example, no operation + * can reveal that the table has grown but the threshold + * has not yet been updated, so there are no atomicity + * requirements for this with respect to reads. + * + * As a guide, all critical volatile reads and writes to the + * count field are marked in code comments. + */ + + private static final long serialVersionUID = 2249069246763182397L; + + /** + * The number of elements in this segment's region. + */ + transient volatile int count; + + /** + * Number of updates that alter the size of the table. This is + * used during bulk-read methods to make sure they see a + * consistent snapshot: If modCounts change during a traversal + * of segments computing size or checking containsValue, then + * we might have an inconsistent view of state so (usually) + * must retry. + */ + transient int modCount; + + /** + * The table is rehashed when its size exceeds this threshold. + * (The value of this field is always (int)(capacity * + * loadFactor).) + */ + transient int threshold; + + /** + * The per-segment table. + */ + transient volatile HashEntry[] table; + + /** + * The load factor for the hash table. Even though this value + * is same for all segments, it is replicated to avoid needing + * links to outer object. + * + * @serial + */ + final float loadFactor; + + /** + * The collected weak-key reference queue for this segment. + * This should be (re)initialized whenever table is assigned, + */ + transient volatile ReferenceQueue refQueue; + + final ReferenceType keyType; + + final ReferenceType valueType; + + final boolean identityComparisons; + + Segment(int initialCapacity, float lf, ReferenceType keyType, + ReferenceType valueType, boolean identityComparisons) { + loadFactor = lf; + this.keyType = keyType; + this.valueType = valueType; + this.identityComparisons = identityComparisons; + setTable( HashEntry.newArray( initialCapacity ) ); + } + + @SuppressWarnings("unchecked") + static final Segment[] newArray(int i) { + return new Segment[i]; + } + + private boolean keyEq(Object src, Object dest) { + return identityComparisons ? src == dest : src.equals( dest ); + } + + /** + * Sets table to new HashEntry array. + * Call only while holding lock or in constructor. + */ + void setTable(HashEntry[] newTable) { + threshold = (int) ( newTable.length * loadFactor ); + table = newTable; + refQueue = new ReferenceQueue(); + } + + /** + * Returns properly casted first entry of bin for given hash. + */ + HashEntry getFirst(int hash) { + HashEntry[] tab = table; + return tab[hash & ( tab.length - 1 )]; + } + + HashEntry newHashEntry(K key, int hash, HashEntry next, V value) { + return new HashEntry( key, hash, next, value, keyType, valueType, refQueue ); + } + + /** + * Reads value field of an entry under lock. Called if value + * field ever appears to be null. This is possible only if a + * compiler happens to reorder a HashEntry initialization with + * its table assignment, which is legal under memory model + * but is not known to ever occur. + */ + V readValueUnderLock(HashEntry e) { + lock(); + try { + removeStale(); + return e.value(); + } + finally { + unlock(); + } + } + + /* Specialized implementations of map methods */ + + V get(Object key, int hash) { + if ( count != 0 ) { // read-volatile + HashEntry e = getFirst( hash ); + while ( e != null ) { + if ( e.hash == hash && keyEq( key, e.key() ) ) { + Object opaque = e.valueRef; + if ( opaque != null ) { + return e.dereferenceValue( opaque ); + } + + return readValueUnderLock( e ); // recheck + } + e = e.next; + } + } + return null; + } + + boolean containsKey(Object key, int hash) { + if ( count != 0 ) { // read-volatile + HashEntry e = getFirst( hash ); + while ( e != null ) { + if ( e.hash == hash && keyEq( key, e.key() ) ) { + return true; + } + e = e.next; + } + } + return false; + } + + boolean containsValue(Object value) { + if ( count != 0 ) { // read-volatile + HashEntry[] tab = table; + int len = tab.length; + for ( int i = 0; i < len; i++ ) { + for ( HashEntry e = tab[i]; e != null; e = e.next ) { + Object opaque = e.valueRef; + V v; + + if ( opaque == null ) { + v = readValueUnderLock( e ); // recheck + } + else { + v = e.dereferenceValue( opaque ); + } + + if ( value.equals( v ) ) { + return true; + } + } + } + } + return false; + } + + boolean replace(K key, int hash, V oldValue, V newValue) { + lock(); + try { + removeStale(); + HashEntry e = getFirst( hash ); + while ( e != null && ( e.hash != hash || !keyEq( key, e.key() ) ) ) { + e = e.next; + } + + boolean replaced = false; + if ( e != null && oldValue.equals( e.value() ) ) { + replaced = true; + e.setValue( newValue, valueType, refQueue ); + } + return replaced; + } + finally { + unlock(); + } + } + + V replace(K key, int hash, V newValue) { + lock(); + try { + removeStale(); + HashEntry e = getFirst( hash ); + while ( e != null && ( e.hash != hash || !keyEq( key, e.key() ) ) ) { + e = e.next; + } + + V oldValue = null; + if ( e != null ) { + oldValue = e.value(); + e.setValue( newValue, valueType, refQueue ); + } + return oldValue; + } + finally { + unlock(); + } + } + + + V put(K key, int hash, V value, boolean onlyIfAbsent) { + lock(); + try { + removeStale(); + int c = count; + if ( c++ > threshold ) {// ensure capacity + int reduced = rehash(); + if ( reduced > 0 ) // adjust from possible weak cleanups + { + count = ( c -= reduced ) - 1; // write-volatile + } + } + + HashEntry[] tab = table; + int index = hash & ( tab.length - 1 ); + HashEntry first = tab[index]; + HashEntry e = first; + while ( e != null && ( e.hash != hash || !keyEq( key, e.key() ) ) ) { + e = e.next; + } + + V oldValue; + if ( e != null ) { + oldValue = e.value(); + if ( !onlyIfAbsent ) { + e.setValue( value, valueType, refQueue ); + } + } + else { + oldValue = null; + ++modCount; + tab[index] = newHashEntry( key, hash, first, value ); + count = c; // write-volatile + } + return oldValue; + } + finally { + unlock(); + } + } + + int rehash() { + HashEntry[] oldTable = table; + int oldCapacity = oldTable.length; + if ( oldCapacity >= MAXIMUM_CAPACITY ) { + return 0; + } + + /* + * Reclassify nodes in each list to new Map. Because we are + * using power-of-two expansion, the elements from each bin + * must either stay at same index, or move with a power of two + * offset. We eliminate unnecessary node creation by catching + * cases where old nodes can be reused because their next + * fields won't change. Statistically, at the default + * threshold, only about one-sixth of them need cloning when + * a table doubles. The nodes they replace will be garbage + * collectable as soon as they are no longer referenced by any + * reader thread that may be in the midst of traversing table + * right now. + */ + + HashEntry[] newTable = HashEntry.newArray( oldCapacity << 1 ); + threshold = (int) ( newTable.length * loadFactor ); + int sizeMask = newTable.length - 1; + int reduce = 0; + for ( int i = 0; i < oldCapacity; i++ ) { + // We need to guarantee that any existing reads of old Map can + // proceed. So we cannot yet null out each bin. + HashEntry e = oldTable[i]; + + if ( e != null ) { + HashEntry next = e.next; + int idx = e.hash & sizeMask; + + // Single node on list + if ( next == null ) { + newTable[idx] = e; + } + + else { + // Reuse trailing consecutive sequence at same slot + HashEntry lastRun = e; + int lastIdx = idx; + for ( HashEntry last = next; + last != null; + last = last.next ) { + int k = last.hash & sizeMask; + if ( k != lastIdx ) { + lastIdx = k; + lastRun = last; + } + } + newTable[lastIdx] = lastRun; + // Clone all remaining nodes + for ( HashEntry p = e; p != lastRun; p = p.next ) { + // Skip GC'd weak refs + K key = p.key(); + if ( key == null ) { + reduce++; + continue; + } + int k = p.hash & sizeMask; + HashEntry n = newTable[k]; + newTable[k] = newHashEntry( key, p.hash, n, p.value() ); + } + } + } + } + table = newTable; + return reduce; + } + + /** + * Remove; match on key only if value null, else match both. + */ + V remove(Object key, int hash, Object value, boolean refRemove) { + lock(); + try { + if ( !refRemove ) { + removeStale(); + } + int c = count - 1; + HashEntry[] tab = table; + int index = hash & ( tab.length - 1 ); + HashEntry first = tab[index]; + HashEntry e = first; + // a ref remove operation compares the Reference instance + while ( e != null && key != e.keyRef + && ( refRemove || hash != e.hash || !keyEq( key, e.key() ) ) ) { + e = e.next; + } + + V oldValue = null; + if ( e != null ) { + V v = e.value(); + if ( value == null || value.equals( v ) ) { + oldValue = v; + // All entries following removed node can stay + // in list, but all preceding ones need to be + // cloned. + ++modCount; + HashEntry newFirst = e.next; + for ( HashEntry p = first; p != e; p = p.next ) { + K pKey = p.key(); + if ( pKey == null ) { // Skip GC'd keys + c--; + continue; + } + + newFirst = newHashEntry( pKey, p.hash, newFirst, p.value() ); + } + tab[index] = newFirst; + count = c; // write-volatile + } + } + return oldValue; + } + finally { + unlock(); + } + } + + final void removeStale() { + KeyReference ref; + while ( ( ref = (KeyReference) refQueue.poll() ) != null ) { + remove( ref.keyRef(), ref.keyHash(), null, true ); + } + } + + void clear() { + if ( count != 0 ) { + lock(); + try { + HashEntry[] tab = table; + for ( int i = 0; i < tab.length; i++ ) { + tab[i] = null; + } + ++modCount; + // replace the reference queue to avoid unnecessary stale cleanups + refQueue = new ReferenceQueue(); + count = 0; // write-volatile + } + finally { + unlock(); + } + } + } + } + + + /* ---------------- Public operations -------------- */ + + /** + * Creates a new, empty map with the specified initial + * capacity, reference types, load factor and concurrency level. + * + * Behavioral changing options such as {@link Option#IDENTITY_COMPARISONS} + * can also be specified. + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements. + * @param loadFactor the load factor threshold, used to control resizing. + * Resizing may be performed when the average number of elements per + * bin exceeds this threshold. + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation performs internal sizing + * to try to accommodate this many threads. + * @param keyType the reference type to use for keys + * @param valueType the reference type to use for values + * @param options the behavioral options + * + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive. + */ + public ConcurrentReferenceHashMap(int initialCapacity, + float loadFactor, int concurrencyLevel, + ReferenceType keyType, ReferenceType valueType, + EnumSet