From c654277e62a66cdf60f18bd3f33ac8672d748738 Mon Sep 17 00:00:00 2001
From: Steve Ebersole
+ *
+ * The Session is a factory for Criteria.
+ * Criterion instances are usually obtained via
+ * the factory methods on Restrictions. eg.
+ *
+ * List cats = session.createCriteria(Cat.class)
+ * .add( Restrictions.like("name", "Iz%") )
+ * .add( Restrictions.gt( "weight", new Float(minWeight) ) )
+ * .addOrder( Order.asc("age") )
+ * .list();
+ *
+ * You may navigate associations using createAlias() or
+ * createCriteria().
+ *
+ * List cats = session.createCriteria(Cat.class)
+ * .createCriteria("kittens")
+ * .add( Restrictions.like("name", "Iz%") )
+ * .list();
+ *
+ *
+ * List cats = session.createCriteria(Cat.class)
+ * .createAlias("kittens", "kit")
+ * .add( Restrictions.like("kit.name", "Iz%") )
+ * .list();
+ *
+ * You may specify projection and aggregation using Projection
+ * instances obtained via the factory methods on Projections.
+ *
+ * List cats = session.createCriteria(Cat.class)
+ * .setProjection( Projections.projectionList()
+ * .add( Projections.rowCount() )
+ * .add( Projections.avg("weight") )
+ * .add( Projections.max("weight") )
+ * .add( Projections.min("weight") )
+ * .add( Projections.groupProperty("color") )
+ * )
+ * .addOrder( Order.asc("color") )
+ * .list();
+ *
+ *
+ * @see Session#createCriteria(java.lang.Class)
+ * @see org.hibernate.criterion.Restrictions
+ * @see org.hibernate.criterion.Projections
+ * @see org.hibernate.criterion.Order
+ * @see org.hibernate.criterion.Criterion
+ * @see org.hibernate.criterion.Projection
+ * @see org.hibernate.criterion.DetachedCriteria a disconnected version of this API
+ * @author Gavin King
+ */
+public interface Criteria extends CriteriaSpecification {
+
+ /**
+ * Get the alias of the entity encapsulated by this criteria instance.
+ *
+ * @return The alias for the encapsulated entity.
+ */
+ public String getAlias();
+
+ /**
+ * Used to specify that the query results will be a projection (scalar in
+ * nature). Implicitly specifies the {@link #PROJECTION} result transformer.
+ *
+ * The individual components contained within the given
+ * {@link Projection projection} determines the overall "shape" of the
+ * query result.
+ *
+ * @param projection The projection representing the overall "shape" of the
+ * query results.
+ * @return this (for method chaining)
+ */
+ public Criteria setProjection(Projection projection);
+
+ /**
+ * Add a {@link Criterion restriction} to constrain the results to be
+ * retrieved.
+ *
+ * @param criterion The {@link Criterion criterion} object representing the
+ * restriction to be applied.
+ * @return this (for method chaining)
+ */
+ public Criteria add(Criterion criterion);
+
+ /**
+ * Add an {@link Order ordering} to the result set.
+ *
+ * @param order The {@link Order order} object representing an ordering
+ * to be applied to the results.
+ * @return this (for method chaining)
+ */
+ public Criteria addOrder(Order order);
+
+ /**
+ * Specify an association fetching strategy for an association or a
+ * collection of values.
+ *
+ * @param associationPath a dot seperated property path
+ * @param mode The fetch mode for the referenced association
+ * @return this (for method chaining)
+ */
+ public Criteria setFetchMode(String associationPath, FetchMode mode) throws HibernateException;
+
+ /**
+ * Set the lock mode of the current entity
+ *
+ * @param lockMode The lock mode to be applied
+ * @return this (for method chaining)
+ */
+ public Criteria setLockMode(LockMode lockMode);
+
+ /**
+ * Set the lock mode of the aliased entity
+ *
+ * @param alias The previously assigned alias representing the entity to
+ * which the given lock mode should apply.
+ * @param lockMode The lock mode to be applied
+ * @return this (for method chaining)
+ */
+ public Criteria setLockMode(String alias, LockMode lockMode);
+
+ /**
+ * Join an association, assigning an alias to the joined association.
+ *
+ * Functionally equivalent to {@link #createAlias(String, String, int)} using
+ * {@link #INNER_JOIN} for the joinType.
+ *
+ * @param associationPath A dot-seperated property path
+ * @param alias The alias to assign to the joined association (for later reference).
+ * @return this (for method chaining)
+ */
+ public Criteria createAlias(String associationPath, String alias) throws HibernateException;
+
+ /**
+ * Join an association using the specified join-type, assigning an alias
+ * to the joined association.
+ *
+ * The joinType is expected to be one of {@link #INNER_JOIN} (the default),
+ * {@link #FULL_JOIN}, or {@link #LEFT_JOIN}.
+ *
+ * @param associationPath A dot-seperated property path
+ * @param alias The alias to assign to the joined association (for later reference).
+ * @param joinType The type of join to use.
+ * @return this (for method chaining)
+ */
+ public Criteria createAlias(String associationPath, String alias, int joinType) throws HibernateException;
+
+ /**
+ * Create a new Criteria, "rooted" at the associated entity.
+ *
+ * Functionally equivalent to {@link #createCriteria(String, int)} using
+ * {@link #INNER_JOIN} for the joinType.
+ *
+ * @param associationPath A dot-seperated property path
+ * @return the created "sub criteria"
+ */
+ public Criteria createCriteria(String associationPath) throws HibernateException;
+
+ /**
+ * Create a new Criteria, "rooted" at the associated entity, using the
+ * specified join type.
+ *
+ * @param associationPath A dot-seperated property path
+ * @param joinType The type of join to use.
+ * @return the created "sub criteria"
+ */
+ public Criteria createCriteria(String associationPath, int joinType) throws HibernateException;
+
+ /**
+ * Create a new Criteria, "rooted" at the associated entity,
+ * assigning the given alias.
+ *
+ * Functionally equivalent to {@link #createCriteria(String, String, int)} using
+ * {@link #INNER_JOIN} for the joinType.
+ *
+ * @param associationPath A dot-seperated property path
+ * @param alias The alias to assign to the joined association (for later reference).
+ * @return the created "sub criteria"
+ */
+ public Criteria createCriteria(String associationPath, String alias) throws HibernateException;
+
+ /**
+ * Create a new Criteria, "rooted" at the associated entity,
+ * assigning the given alias and using the specified join type.
+ *
+ * @param associationPath A dot-seperated property path
+ * @param alias The alias to assign to the joined association (for later reference).
+ * @param joinType The type of join to use.
+ * @return the created "sub criteria"
+ */
+ public Criteria createCriteria(String associationPath, String alias, int joinType) throws HibernateException;
+
+ /**
+ * Set a strategy for handling the query results. This determines the
+ * "shape" of the query result.
+ *
+ * @param resultTransformer The transformer to apply
+ * @return this (for method chaining)
+ *
+ * @see #ROOT_ENTITY
+ * @see #DISTINCT_ROOT_ENTITY
+ * @see #ALIAS_TO_ENTITY_MAP
+ * @see #PROJECTION
+ */
+ public Criteria setResultTransformer(ResultTransformer resultTransformer);
+
+ /**
+ * Set a limit upon the number of objects to be retrieved.
+ *
+ * @param maxResults the maximum number of results
+ * @return this (for method chaining)
+ */
+ public Criteria setMaxResults(int maxResults);
+
+ /**
+ * Set the first result to be retrieved.
+ *
+ * @param firstResult the first result to retrieve, numbered from 0
+ * @return this (for method chaining)
+ */
+ public Criteria setFirstResult(int firstResult);
+
+ /**
+ * Set a fetch size for the underlying JDBC query.
+ *
+ * @param fetchSize the fetch size
+ * @return this (for method chaining)
+ *
+ * @see java.sql.Statement#setFetchSize
+ */
+ public Criteria setFetchSize(int fetchSize);
+
+ /**
+ * Set a timeout for the underlying JDBC query.
+ *
+ * @param timeout The timeout value to apply.
+ * @return this (for method chaining)
+ *
+ * @see java.sql.Statement#setQueryTimeout
+ */
+ public Criteria setTimeout(int timeout);
+
+ /**
+ * Enable caching of this query result, provided query caching is enabled
+ * for the underlying session factory.
+ *
+ * @param cacheable Should the result be considered cacheable; default is
+ * to not cache (false).
+ * @return this (for method chaining)
+ */
+ public Criteria setCacheable(boolean cacheable);
+
+ /**
+ * Set the name of the cache region to use for query result caching.
+ *
+ * @param cacheRegion the name of a query cache region, or null
+ * for the default query cache
+ * @return this (for method chaining)
+ *
+ * @see #setCacheable
+ */
+ public Criteria setCacheRegion(String cacheRegion);
+
+ /**
+ * Add a comment to the generated SQL.
+ *
+ * @param comment a human-readable string
+ * @return this (for method chaining)
+ */
+ public Criteria setComment(String comment);
+
+ /**
+ * Override the flush mode for this particular query.
+ *
+ * @param flushMode The flush mode to use.
+ * @return this (for method chaining)
+ */
+ public Criteria setFlushMode(FlushMode flushMode);
+
+ /**
+ * Override the cache mode for this particular query.
+ *
+ * @param cacheMode The cache mode to use.
+ * @return this (for method chaining)
+ */
+ public Criteria setCacheMode(CacheMode cacheMode);
+
+ /**
+ * Get the results.
+ *
+ * @return The list of matched query results.
+ */
+ public List list() throws HibernateException;
+
+ /**
+ * Get the results as an instance of {@link ScrollableResults}
+ *
+ * @return The {@link ScrollableResults} representing the matched
+ * query results.
+ */
+ public ScrollableResults scroll() throws HibernateException;
+
+ /**
+ * Get the results as an instance of {@link ScrollableResults} based on the
+ * given scroll mode.
+ *
+ * @param scrollMode Indicates the type of underlying database cursor to
+ * request.
+ * @return The {@link ScrollableResults} representing the matched
+ * query results.
+ */
+ public ScrollableResults scroll(ScrollMode scrollMode) throws HibernateException;
+
+ /**
+ * Convenience method to return a single instance that matches
+ * the query, or null if the query returns no results.
+ *
+ * @return the single result or null
+ * @throws HibernateException if there is more than one matching result
+ */
+ public Object uniqueResult() throws HibernateException;
+
+}
\ No newline at end of file
diff --git a/src/org/hibernate/DuplicateMappingException.java b/src/org/hibernate/DuplicateMappingException.java
new file mode 100644
index 0000000000..ef936261d0
--- /dev/null
+++ b/src/org/hibernate/DuplicateMappingException.java
@@ -0,0 +1,32 @@
+package org.hibernate;
+
+/**
+ * Raised whenever a duplicate for a certain type occurs.
+ * Duplicate class, table, property name etc.
+ *
+ * @author Max Rydahl Andersen
+ *
+ */
+public class DuplicateMappingException extends MappingException {
+
+ private final String name;
+ private final String type;
+
+ public DuplicateMappingException(String customMessage, String type, String name) {
+ super(customMessage);
+ this.type=type;
+ this.name=name;
+ }
+
+ public DuplicateMappingException(String type, String name) {
+ this("Duplicate " + type + " mapping " + name, type, name);
+ }
+
+ public String getType() {
+ return type;
+ }
+
+ public String getName() {
+ return name;
+ }
+}
diff --git a/src/org/hibernate/EmptyInterceptor.java b/src/org/hibernate/EmptyInterceptor.java
new file mode 100755
index 0000000000..307d399523
--- /dev/null
+++ b/src/org/hibernate/EmptyInterceptor.java
@@ -0,0 +1,98 @@
+//$Id$
+package org.hibernate;
+
+import java.io.Serializable;
+import java.util.Iterator;
+
+import org.hibernate.type.Type;
+
+/**
+ * An interceptor that does nothing. May be used as a base class
+ * for application-defined custom interceptors.
+ *
+ * @author Gavin King
+ */
+public class EmptyInterceptor implements Interceptor, Serializable {
+
+ public static final Interceptor INSTANCE = new EmptyInterceptor();
+
+ protected EmptyInterceptor() {}
+
+ public void onDelete(
+ Object entity,
+ Serializable id,
+ Object[] state,
+ String[] propertyNames,
+ Type[] types) {}
+
+ public boolean onFlushDirty(
+ Object entity,
+ Serializable id,
+ Object[] currentState,
+ Object[] previousState,
+ String[] propertyNames,
+ Type[] types) {
+ return false;
+ }
+
+ public boolean onLoad(
+ Object entity,
+ Serializable id,
+ Object[] state,
+ String[] propertyNames,
+ Type[] types) {
+ return false;
+ }
+
+ public boolean onSave(
+ Object entity,
+ Serializable id,
+ Object[] state,
+ String[] propertyNames,
+ Type[] types) {
+ return false;
+ }
+
+ public void postFlush(Iterator entities) {}
+ public void preFlush(Iterator entities) {}
+
+ public Boolean isTransient(Object entity) {
+ return null;
+ }
+
+ public Object instantiate(String entityName, EntityMode entityMode, Serializable id) {
+ return null;
+ }
+
+ public int[] findDirty(Object entity,
+ Serializable id,
+ Object[] currentState,
+ Object[] previousState,
+ String[] propertyNames,
+ Type[] types) {
+ return null;
+ }
+
+ public String getEntityName(Object object) {
+ return null;
+ }
+
+ public Object getEntity(String entityName, Serializable id) {
+ return null;
+ }
+
+ public void afterTransactionBegin(Transaction tx) {}
+ public void afterTransactionCompletion(Transaction tx) {}
+ public void beforeTransactionCompletion(Transaction tx) {}
+
+ public String onPrepareStatement(String sql) {
+ return sql;
+ }
+
+ public void onCollectionRemove(Object collection, Serializable key) throws CallbackException {}
+
+ public void onCollectionRecreate(Object collection, Serializable key) throws CallbackException {}
+
+ public void onCollectionUpdate(Object collection, Serializable key) throws CallbackException {}
+
+}
\ No newline at end of file
diff --git a/src/org/hibernate/EntityMode.java b/src/org/hibernate/EntityMode.java
new file mode 100644
index 0000000000..b1d27e4ab4
--- /dev/null
+++ b/src/org/hibernate/EntityMode.java
@@ -0,0 +1,49 @@
+// $Id$
+package org.hibernate;
+
+import java.util.Map;
+import java.util.HashMap;
+import java.io.Serializable;
+
+/**
+ * Defines the representation modes available for entities.
+ *
+ * @author Steve Ebersole
+ */
+public class EntityMode implements Serializable {
+
+ private static final Map INSTANCES = new HashMap();
+
+ public static final EntityMode POJO = new EntityMode( "pojo" );
+ public static final EntityMode DOM4J = new EntityMode( "dom4j" );
+ public static final EntityMode MAP = new EntityMode( "dynamic-map" );
+
+ static {
+ INSTANCES.put( POJO.name, POJO );
+ INSTANCES.put( DOM4J.name, DOM4J );
+ INSTANCES.put( MAP.name, MAP );
+ }
+
+ private final String name;
+
+ public EntityMode(String name) {
+ this.name = name;
+ }
+
+ public String toString() {
+ return name;
+ }
+
+ private Object readResolve() {
+ return INSTANCES.get( name );
+ }
+
+ public static EntityMode parse(String name) {
+ EntityMode rtn = ( EntityMode ) INSTANCES.get( name );
+ if ( rtn == null ) {
+ // default is POJO
+ rtn = POJO;
+ }
+ return rtn;
+ }
+}
diff --git a/src/org/hibernate/FetchMode.java b/src/org/hibernate/FetchMode.java
new file mode 100644
index 0000000000..ae990bf5ac
--- /dev/null
+++ b/src/org/hibernate/FetchMode.java
@@ -0,0 +1,70 @@
+//$Id$
+package org.hibernate;
+
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Represents an association fetching strategy. This is used
+ * together with the Criteria API to specify runtime
+ * fetching strategies.
+ *
+ * For HQL queries, use the FETCH keyword instead.
+ *
+ * @see Criteria#setFetchMode(java.lang.String, FetchMode)
+ * @author Gavin King
+ */
+public final class FetchMode implements Serializable {
+ private final String name;
+ private static final Map INSTANCES = new HashMap();
+
+ private FetchMode(String name) {
+ this.name=name;
+ }
+ public String toString() {
+ return name;
+ }
+ /**
+ * Default to the setting configured in the mapping file.
+ */
+ public static final FetchMode DEFAULT = new FetchMode("DEFAULT");
+
+ /**
+ * Fetch using an outer join. Equivalent to fetch="join".
+ */
+ public static final FetchMode JOIN = new FetchMode("JOIN");
+ /**
+ * Fetch eagerly, using a separate select. Equivalent to
+ * fetch="select".
+ */
+ public static final FetchMode SELECT = new FetchMode("SELECT");
+
+ /**
+ * Fetch lazily. Equivalent to outer-join="false".
+ * @deprecated use FetchMode.SELECT
+ */
+ public static final FetchMode LAZY = SELECT;
+ /**
+ * Fetch eagerly, using an outer join. Equivalent to
+ * outer-join="true".
+ * @deprecated use FetchMode.JOIN
+ */
+ public static final FetchMode EAGER = JOIN;
+
+ static {
+ INSTANCES.put( JOIN.name, JOIN );
+ INSTANCES.put( SELECT.name, SELECT );
+ INSTANCES.put( DEFAULT.name, DEFAULT );
+ }
+
+ private Object readResolve() {
+ return INSTANCES.get(name);
+ }
+
+}
+
+
+
+
+
diff --git a/src/org/hibernate/Filter.java b/src/org/hibernate/Filter.java
new file mode 100644
index 0000000000..692ef2b5f9
--- /dev/null
+++ b/src/org/hibernate/Filter.java
@@ -0,0 +1,68 @@
+// $Id$
+package org.hibernate;
+
+import org.hibernate.engine.FilterDefinition;
+
+import java.util.Collection;
+
+/**
+ * Type definition of Filter. Filter defines the user's view into enabled dynamic filters,
+ * allowing them to set filter parameter values.
+ *
+ * @author Steve Ebersole
+ */
+public interface Filter {
+
+ /**
+ * Get the name of this filter.
+ *
+ * @return This filter's name.
+ */
+ public String getName();
+
+ /**
+ * Get the filter definition containing additional information about the
+ * filter (such as default-condition and expected parameter names/types).
+ *
+ * @return The filter definition
+ */
+ public FilterDefinition getFilterDefinition();
+
+
+ /**
+ * Set the named parameter's value for this filter.
+ *
+ * @param name The parameter's name.
+ * @param value The value to be applied.
+ * @return This FilterImpl instance (for method chaining).
+ */
+ public Filter setParameter(String name, Object value);
+
+ /**
+ * Set the named parameter's value list for this filter. Used
+ * in conjunction with IN-style filter criteria.
+ *
+ * @param name The parameter's name.
+ * @param values The values to be expanded into an SQL IN list.
+ * @return This FilterImpl instance (for method chaining).
+ */
+ public Filter setParameterList(String name, Collection values);
+
+ /**
+ * Set the named parameter's value list for this filter. Used
+ * in conjunction with IN-style filter criteria.
+ *
+ * @param name The parameter's name.
+ * @param values The values to be expanded into an SQL IN list.
+ * @return This FilterImpl instance (for method chaining).
+ */
+ public Filter setParameterList(String name, Object[] values);
+
+ /**
+ * Perform validation of the filter state. This is used to verify the
+ * state of the filter after its enablement and before its use.
+ *
+ * @throws HibernateException If the state is not currently valid.
+ */
+ public void validate() throws HibernateException;
+}
diff --git a/src/org/hibernate/FlushMode.java b/src/org/hibernate/FlushMode.java
new file mode 100644
index 0000000000..3520eaa0fd
--- /dev/null
+++ b/src/org/hibernate/FlushMode.java
@@ -0,0 +1,92 @@
+//$Id$
+package org.hibernate;
+
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Represents a flushing strategy. The flush process synchronizes
+ * database state with session state by detecting state changes
+ * and executing SQL statements.
+ *
+ * @see Session#setFlushMode(FlushMode)
+ * @see Query#setFlushMode(FlushMode)
+ * @see Criteria#setFlushMode(FlushMode)
+ *
+ * @author Gavin King
+ */
+public final class FlushMode implements Serializable {
+ private static final Map INSTANCES = new HashMap();
+
+ private final int level;
+ private final String name;
+
+ private FlushMode(int level, String name) {
+ this.level = level;
+ this.name = name;
+ }
+
+ public String toString() {
+ return name;
+ }
+
+ /**
+ * The {@link Session} is never flushed unless {@link Session#flush}
+ * is explicitly called by the application. This mode is very
+ * efficient for read only transactions.
+ *
+ * @deprecated use {@link #MANUAL} instead.
+ */
+ public static final FlushMode NEVER = new FlushMode( 0, "NEVER" );
+
+ /**
+ * The {@link Session} is only ever flushed when {@link Session#flush}
+ * is explicitly called by the application. This mode is very
+ * efficient for read only transactions.
+ */
+ public static final FlushMode MANUAL = new FlushMode( 0, "MANUAL" );
+
+ /**
+ * The {@link Session} is flushed when {@link Transaction#commit}
+ * is called.
+ */
+ public static final FlushMode COMMIT = new FlushMode(5, "COMMIT");
+
+ /**
+ * The {@link Session} is sometimes flushed before query execution
+ * in order to ensure that queries never return stale state. This
+ * is the default flush mode.
+ */
+ public static final FlushMode AUTO = new FlushMode(10, "AUTO");
+
+ /**
+ * The {@link Session} is flushed before every query. This is
+ * almost always unnecessary and inefficient.
+ */
+ public static final FlushMode ALWAYS = new FlushMode(20, "ALWAYS");
+
+ public boolean lessThan(FlushMode other) {
+ return this.level
+ * Inspection occurs before property values are written and after they are read
+ * from the database.
+ *
+ * There might be a single instance of Interceptor for a SessionFactory, or a new instance
+ * might be specified for each Session. Whichever approach is used, the interceptor must be
+ * serializable if the Session is to be serializable. This means that SessionFactory-scoped
+ * interceptors should implement readResolve().
+ *
+ * The Session may not be invoked from a callback (nor may a callback cause a collection or proxy to
+ * be lazily initialized).
+ *
+ * Instead of implementing this interface directly, it is usually better to extend EmptyInterceptor
+ * and override only the callback methods of interest.
+ *
+ * @see SessionFactory#openSession(Interceptor)
+ * @see org.hibernate.cfg.Configuration#setInterceptor(Interceptor)
+ * @see EmptyInterceptor
+ * @author Gavin King
+ */
+public interface Interceptor {
+ /**
+ * Called just before an object is initialized. The interceptor may change the state, which will
+ * be propagated to the persistent object. Note that when this method is called, entity will be
+ * an empty uninitialized instance of the class.
+ *
+ * @return true if the user modified the state in any way.
+ */
+ public boolean onLoad(Object entity, Serializable id, Object[] state, String[] propertyNames, Type[] types) throws CallbackException;
+ /**
+ * Called when an object is detected to be dirty, during a flush. The interceptor may modify the detected
+ * currentState, which will be propagated to both the database and the persistent object.
+ * Note that not all flushes end in actual synchronization with the database, in which case the
+ * new currentState will be propagated to the object, but not necessarily (immediately) to
+ * the database. It is strongly recommended that the interceptor not modify the previousState.
+ *
+ * @return true if the user modified the currentState in any way.
+ */
+ public boolean onFlushDirty(Object entity, Serializable id, Object[] currentState, Object[] previousState, String[] propertyNames, Type[] types) throws CallbackException;
+ /**
+ * Called before an object is saved. The interceptor may modify the state, which will be used for
+ * the SQL INSERT and propagated to the persistent object.
+ *
+ * @return true if the user modified the state in any way.
+ */
+ public boolean onSave(Object entity, Serializable id, Object[] state, String[] propertyNames, Type[] types) throws CallbackException;
+ /**
+ * Called before an object is deleted. It is not recommended that the interceptor modify the state.
+ */
+ public void onDelete(Object entity, Serializable id, Object[] state, String[] propertyNames, Type[] types) throws CallbackException;
+ /**
+ * Called before a collection is (re)created.
+ */
+ public void onCollectionRecreate(Object collection, Serializable key) throws CallbackException;
+ /**
+ * Called before a collection is deleted.
+ */
+ public void onCollectionRemove(Object collection, Serializable key) throws CallbackException;
+ /**
+ * Called before a collection is updated.
+ */
+ public void onCollectionUpdate(Object collection, Serializable key) throws CallbackException;
+ /**
+ * Called before a flush
+ */
+ public void preFlush(Iterator entities) throws CallbackException;
+ /**
+ * Called after a flush that actually ends in execution of the SQL statements required to synchronize
+ * in-memory state with the database.
+ */
+ public void postFlush(Iterator entities) throws CallbackException;
+ /**
+ * Called to distinguish between transient and detached entities. The return value determines the
+ * state of the entity with respect to the current session.
+ *
+ *
+ * @param entity a transient or detached entity
+ * @return Boolean or null to choose default behaviour
+ */
+ public Boolean isTransient(Object entity);
+ /**
+ * Called from flush(). The return value determines whether the entity is updated
+ *
+ *
+ * @param entity a persistent entity
+ * @return array of dirty property indices or null to choose default behaviour
+ */
+ public int[] findDirty(Object entity, Serializable id, Object[] currentState, Object[] previousState, String[] propertyNames, Type[] types);
+ /**
+ * Instantiate the entity class. Return null to indicate that Hibernate should use
+ * the default constructor of the class. The identifier property of the returned instance
+ * should be initialized with the given identifier.
+ *
+ * @param entityName the name of the entity
+ * @param entityMode The type of entity instance to be returned.
+ * @param id the identifier of the new instance
+ * @return an instance of the class, or null to choose default behaviour
+ */
+ public Object instantiate(String entityName, EntityMode entityMode, Serializable id) throws CallbackException;
+
+ /**
+ * Get the entity name for a persistent or transient instance
+ * @param object an entity instance
+ * @return the name of the entity
+ */
+ public String getEntityName(Object object) throws CallbackException;
+
+ /**
+ * Get a fully loaded entity instance that is cached externally
+ * @param entityName the name of the entity
+ * @param id the instance identifier
+ * @return a fully initialized entity
+ * @throws CallbackException
+ */
+ public Object getEntity(String entityName, Serializable id) throws CallbackException;
+
+ /**
+ * Called when a Hibernate transaction is begun via the Hibernate Transaction
+ * API. Will not be called if transactions are being controlled via some other
+ * mechanism (CMT, for example).
+ */
+ public void afterTransactionBegin(Transaction tx);
+ /**
+ * Called before a transaction is committed (but not before rollback).
+ */
+ public void beforeTransactionCompletion(Transaction tx);
+ /**
+ * Called after a transaction is committed or rolled back.
+ */
+ public void afterTransactionCompletion(Transaction tx);
+
+ /**
+ * Called when sql string is being prepared.
+ * @param sql sql to be prepared
+ * @return original or modified sql
+ */
+ public String onPrepareStatement(String sql);
+}
diff --git a/src/org/hibernate/InvalidMappingException.java b/src/org/hibernate/InvalidMappingException.java
new file mode 100644
index 0000000000..23e6eef967
--- /dev/null
+++ b/src/org/hibernate/InvalidMappingException.java
@@ -0,0 +1,42 @@
+package org.hibernate;
+
+/**
+ * Thrown when a mapping is found to be invalid.
+ * Similar to MappingException, but this contains more info about the path and type of mapping (e.g. file, resource or url)
+ *
+ * @author Max Rydahl Andersen
+ *
+ */
+public class InvalidMappingException extends MappingException {
+
+ private final String path;
+ private final String type;
+
+ public InvalidMappingException(String customMessage, String type, String path, Throwable cause) {
+ super(customMessage, cause);
+ this.type=type;
+ this.path=path;
+ }
+
+ public InvalidMappingException(String customMessage, String type, String path) {
+ super(customMessage);
+ this.type=type;
+ this.path=path;
+ }
+
+ public InvalidMappingException(String type, String path) {
+ this("Could not parse mapping document from " + type + (path==null?"":" " + path), type, path);
+ }
+
+ public InvalidMappingException(String type, String path, Throwable cause) {
+ this("Could not parse mapping document from " + type + (path==null?"":" " + path), type, path, cause);
+ }
+
+ public String getType() {
+ return type;
+ }
+
+ public String getPath() {
+ return path;
+ }
+}
diff --git a/src/org/hibernate/JDBCException.java b/src/org/hibernate/JDBCException.java
new file mode 100644
index 0000000000..c258c975d7
--- /dev/null
+++ b/src/org/hibernate/JDBCException.java
@@ -0,0 +1,63 @@
+//$Id$
+package org.hibernate;
+
+import java.sql.SQLException;
+
+
+/**
+ * Wraps an SQLException. Indicates that an exception
+ * occurred during a JDBC call.
+ *
+ * @see java.sql.SQLException
+ * @author Gavin King
+ */
+public class JDBCException extends HibernateException {
+
+ private SQLException sqle;
+ private String sql;
+
+ public JDBCException(String string, SQLException root) {
+ super(string, root);
+ sqle=root;
+ }
+
+ public JDBCException(String string, SQLException root, String sql) {
+ this(string, root);
+ this.sql = sql;
+ }
+
+ /**
+ * Get the SQLState of the underlying SQLException.
+ * @see java.sql.SQLException
+ * @return String
+ */
+ public String getSQLState() {
+ return sqle.getSQLState();
+ }
+
+ /**
+ * Get the errorCode of the underlying SQLException.
+ * @see java.sql.SQLException
+ * @return int the error code
+ */
+ public int getErrorCode() {
+ return sqle.getErrorCode();
+ }
+
+ /**
+ * Get the underlying SQLException.
+ * @return SQLException
+ */
+ public SQLException getSQLException() {
+ return sqle;
+ }
+
+ /**
+ * Get the actual SQL statement that caused the exception
+ * (may be null)
+ */
+ public String getSQL() {
+ return sql;
+ }
+
+}
diff --git a/src/org/hibernate/LazyInitializationException.java b/src/org/hibernate/LazyInitializationException.java
new file mode 100644
index 0000000000..87dfea4314
--- /dev/null
+++ b/src/org/hibernate/LazyInitializationException.java
@@ -0,0 +1,28 @@
+//$Id$
+package org.hibernate;
+
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * Indicates access to unfetched data outside of a session context.
+ * For example, when an uninitialized proxy or collection is accessed
+ * after the session was closed.
+ *
+ * @see Hibernate#initialize(java.lang.Object)
+ * @see Hibernate#isInitialized(java.lang.Object)
+ * @author Gavin King
+ */
+public class LazyInitializationException extends HibernateException {
+
+ public LazyInitializationException(String msg) {
+ super(msg);
+ LogFactory.getLog(LazyInitializationException.class).error(msg, this);
+ }
+
+}
+
+
+
+
+
+
diff --git a/src/org/hibernate/LockMode.java b/src/org/hibernate/LockMode.java
new file mode 100644
index 0000000000..b144e6ec6c
--- /dev/null
+++ b/src/org/hibernate/LockMode.java
@@ -0,0 +1,106 @@
+//$Id$
+package org.hibernate;
+
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Instances represent a lock mode for a row of a relational
+ * database table. It is not intended that users spend much
+ * time worrying about locking since Hibernate usually
+ * obtains exactly the right lock level automatically.
+ * Some "advanced" users may wish to explicitly specify lock
+ * levels.
+ *
+ * @see Session#lock(Object,LockMode)
+ * @author Gavin King
+ */
+public final class LockMode implements Serializable {
+ private final int level;
+ private final String name;
+ private static final Map INSTANCES = new HashMap();
+
+ private LockMode(int level, String name) {
+ this.level=level;
+ this.name=name;
+ }
+ public String toString() {
+ return name;
+ }
+ /**
+ * Check if this lock mode is more restrictive than the given lock mode.
+ *
+ * @param mode LockMode to check
+ * @return true if this lock mode is more restrictive than given lock mode
+ */
+ public boolean greaterThan(LockMode mode) {
+ return level > mode.level;
+ }
+ /**
+ * Check if this lock mode is less restrictive than the given lock mode.
+ *
+ * @param mode LockMode to check
+ * @return true if this lock mode is less restrictive than given lock mode
+ */
+ public boolean lessThan(LockMode mode) {
+ return level < mode.level;
+ }
+ /**
+ * No lock required. If an object is requested with this lock
+ * mode, a READ lock will be obtained if it is
+ * necessary to actually read the state from the database,
+ * rather than pull it from a cache.
+ *
+ * This is the "default" lock mode.
+ */
+ public static final LockMode NONE = new LockMode(0, "NONE");
+ /**
+ * A shared lock. Objects in this lock mode were read from
+ * the database in the current transaction, rather than being
+ * pulled from a cache.
+ */
+ public static final LockMode READ = new LockMode(5, "READ");
+ /**
+ * An upgrade lock. Objects loaded in this lock mode are
+ * materialized using an SQL select ... for update.
+ */
+ public static final LockMode UPGRADE = new LockMode(10, "UPGRADE");
+ /**
+ * Attempt to obtain an upgrade lock, using an Oracle-style
+ * select for update nowait. The semantics of
+ * this lock mode, once obtained, are the same as
+ * UPGRADE.
+ */
+ public static final LockMode UPGRADE_NOWAIT = new LockMode(10, "UPGRADE_NOWAIT");
+ /**
+ * A WRITE lock is obtained when an object is updated
+ * or inserted. This lock mode is for internal use only and is
+ * not a valid mode for load() or lock() (both
+ * of which throw exceptions if WRITE is specified).
+ */
+ public static final LockMode WRITE = new LockMode(10, "WRITE");
+
+ /**
+ * Similiar to {@link #UPGRADE} except that, for versioned entities,
+ * it results in a forced version increment.
+ */
+ public static final LockMode FORCE = new LockMode( 15, "FORCE" );
+
+ static {
+ INSTANCES.put( NONE.name, NONE );
+ INSTANCES.put( READ.name, READ );
+ INSTANCES.put( UPGRADE.name, UPGRADE );
+ INSTANCES.put( UPGRADE_NOWAIT.name, UPGRADE_NOWAIT );
+ INSTANCES.put( WRITE.name, WRITE );
+ INSTANCES.put( FORCE.name, FORCE );
+ }
+
+ private Object readResolve() {
+ return parse( name );
+ }
+
+ public static LockMode parse(String name) {
+ return ( LockMode ) INSTANCES.get(name);
+ }
+}
diff --git a/src/org/hibernate/MappingException.java b/src/org/hibernate/MappingException.java
new file mode 100644
index 0000000000..ce00409584
--- /dev/null
+++ b/src/org/hibernate/MappingException.java
@@ -0,0 +1,31 @@
+//$Id$
+package org.hibernate;
+
+/**
+ * An exception that usually occurs at configuration time, rather
+ * than runtime, as a result of something screwy in the O-R mappings.
+ *
+ * @author Gavin King
+ */
+
+public class MappingException extends HibernateException {
+
+ public MappingException(String msg, Throwable root) {
+ super( msg, root );
+ }
+
+ public MappingException(Throwable root) {
+ super(root);
+ }
+
+ public MappingException(String s) {
+ super(s);
+ }
+
+}
+
+
+
+
+
+
diff --git a/src/org/hibernate/MappingNotFoundException.java b/src/org/hibernate/MappingNotFoundException.java
new file mode 100644
index 0000000000..f5701098b5
--- /dev/null
+++ b/src/org/hibernate/MappingNotFoundException.java
@@ -0,0 +1,41 @@
+package org.hibernate;
+
+/**
+ * Thrown when a resource for a mapping could not be found.
+ *
+ * @author Max Rydahl Andersen
+ *
+ */
+public class MappingNotFoundException extends MappingException {
+
+ private final String path;
+ private final String type;
+
+ public MappingNotFoundException(String customMessage, String type, String path, Throwable cause) {
+ super(customMessage, cause);
+ this.type=type;
+ this.path=path;
+ }
+
+ public MappingNotFoundException(String customMessage, String type, String path) {
+ super(customMessage);
+ this.type=type;
+ this.path=path;
+ }
+
+ public MappingNotFoundException(String type, String path) {
+ this(type + ": " + path + " not found", type, path);
+ }
+
+ public MappingNotFoundException(String type, String path, Throwable cause) {
+ this(type + ": " + path + " not found", type, path, cause);
+ }
+
+ public String getType() {
+ return type;
+ }
+
+ public String getPath() {
+ return path;
+ }
+}
diff --git a/src/org/hibernate/NonUniqueObjectException.java b/src/org/hibernate/NonUniqueObjectException.java
new file mode 100644
index 0000000000..825b2e45fa
--- /dev/null
+++ b/src/org/hibernate/NonUniqueObjectException.java
@@ -0,0 +1,44 @@
+//$Id$
+package org.hibernate;
+
+import java.io.Serializable;
+
+import org.hibernate.pretty.MessageHelper;
+
+/**
+ * This exception is thrown when an operation would
+ * break session-scoped identity. This occurs if the
+ * user tries to associate two different instances of
+ * the same Java class with a particular identifier,
+ * in the scope of a single Session.
+ *
+ * @author Gavin King
+ */
+public class NonUniqueObjectException extends HibernateException {
+ private final Serializable identifier;
+ private final String entityName;
+
+ public NonUniqueObjectException(String message, Serializable id, String clazz) {
+ super(message);
+ this.entityName = clazz;
+ this.identifier = id;
+ }
+
+ public NonUniqueObjectException(Serializable id, String clazz) {
+ this("a different object with the same identifier value was already associated with the session", id, clazz);
+ }
+
+ public Serializable getIdentifier() {
+ return identifier;
+ }
+
+ public String getMessage() {
+ return super.getMessage() + ": " +
+ MessageHelper.infoString(entityName, identifier);
+ }
+
+ public String getEntityName() {
+ return entityName;
+ }
+
+}
diff --git a/src/org/hibernate/NonUniqueResultException.java b/src/org/hibernate/NonUniqueResultException.java
new file mode 100644
index 0000000000..dc50cef3c9
--- /dev/null
+++ b/src/org/hibernate/NonUniqueResultException.java
@@ -0,0 +1,17 @@
+//$Id$
+package org.hibernate;
+
+/**
+ * Thrown when the application calls Query.uniqueResult() and
+ * the query returned more than one result. Unlike all other Hibernate
+ * exceptions, this one is recoverable!
+ *
+ * @author Gavin King
+ */
+public class NonUniqueResultException extends HibernateException {
+
+ public NonUniqueResultException(int resultCount) {
+ super( "query did not return a unique result: " + resultCount );
+ }
+
+}
diff --git a/src/org/hibernate/ObjectDeletedException.java b/src/org/hibernate/ObjectDeletedException.java
new file mode 100644
index 0000000000..28492eeec2
--- /dev/null
+++ b/src/org/hibernate/ObjectDeletedException.java
@@ -0,0 +1,25 @@
+//$Id$
+package org.hibernate;
+
+import java.io.Serializable;
+
+/**
+ * Thrown when the user tries to do something illegal with a deleted
+ * object.
+ *
+ * @author Gavin King
+ */
+public class ObjectDeletedException extends UnresolvableObjectException {
+
+ public ObjectDeletedException(String message, Serializable identifier, String clazz) {
+ super(message, identifier, clazz);
+ }
+
+}
+
+
+
+
+
+
+
diff --git a/src/org/hibernate/ObjectNotFoundException.java b/src/org/hibernate/ObjectNotFoundException.java
new file mode 100644
index 0000000000..db31c3d51b
--- /dev/null
+++ b/src/org/hibernate/ObjectNotFoundException.java
@@ -0,0 +1,24 @@
+//$Id$
+package org.hibernate;
+
+import java.io.Serializable;
+
+/**
+ * Thrown when Session.load() fails to select a row with
+ * the given primary key (identifier value). This exception might not
+ * be thrown when load() is called, even if there was no
+ * row on the database, because load() returns a proxy if
+ * possible. Applications should use Session.get() to test if
+ * a row exists in the database.
+ *
+ * Like all Hibernate exceptions, this exception is considered
+ * unrecoverable.
+ *
+ * @author Gavin King
+ */
+public class ObjectNotFoundException extends UnresolvableObjectException {
+
+ public ObjectNotFoundException(Serializable identifier, String clazz) {
+ super(identifier, clazz);
+ }
+}
diff --git a/src/org/hibernate/PersistentObjectException.java b/src/org/hibernate/PersistentObjectException.java
new file mode 100644
index 0000000000..2f9a27c1d2
--- /dev/null
+++ b/src/org/hibernate/PersistentObjectException.java
@@ -0,0 +1,21 @@
+//$Id$
+package org.hibernate;
+
+/**
+ * Thrown when the user passes a persistent instance to a Session
+ * method that expects a transient instance.
+ *
+ * @author Gavin King
+ */
+public class PersistentObjectException extends HibernateException {
+
+ public PersistentObjectException(String s) {
+ super(s);
+ }
+}
+
+
+
+
+
+
diff --git a/src/org/hibernate/PropertyAccessException.java b/src/org/hibernate/PropertyAccessException.java
new file mode 100644
index 0000000000..e16d0a723e
--- /dev/null
+++ b/src/org/hibernate/PropertyAccessException.java
@@ -0,0 +1,50 @@
+//$Id$
+package org.hibernate;
+
+import org.hibernate.util.StringHelper;
+
+/**
+ * A problem occurred accessing a property of an instance of a
+ * persistent class by reflection, or via CGLIB. There are a
+ * number of possible underlying causes, including
+ *
+ *
+ * @author Gavin King
+ */
+public class PropertyAccessException extends HibernateException {
+
+ private final Class persistentClass;
+ private final String propertyName;
+ private final boolean wasSetter;
+
+ public PropertyAccessException(Throwable root, String s, boolean wasSetter, Class persistentClass, String propertyName) {
+ super(s, root);
+ this.persistentClass = persistentClass;
+ this.wasSetter = wasSetter;
+ this.propertyName = propertyName;
+ }
+
+ public Class getPersistentClass() {
+ return persistentClass;
+ }
+
+ public String getPropertyName() {
+ return propertyName;
+ }
+
+ public String getMessage() {
+ return super.getMessage() +
+ ( wasSetter ? " setter of " : " getter of ") +
+ StringHelper.qualify( persistentClass.getName(), propertyName );
+ }
+}
+
+
+
+
+
+
diff --git a/src/org/hibernate/PropertyNotFoundException.java b/src/org/hibernate/PropertyNotFoundException.java
new file mode 100644
index 0000000000..26467fb9bf
--- /dev/null
+++ b/src/org/hibernate/PropertyNotFoundException.java
@@ -0,0 +1,22 @@
+//$Id$
+package org.hibernate;
+
+/**
+ * Indicates that an expected getter or setter method could not be
+ * found on a class.
+ *
+ * @author Gavin King
+ */
+public class PropertyNotFoundException extends MappingException {
+
+ public PropertyNotFoundException(String s) {
+ super(s);
+ }
+
+}
+
+
+
+
+
+
diff --git a/src/org/hibernate/PropertyValueException.java b/src/org/hibernate/PropertyValueException.java
new file mode 100644
index 0000000000..27516a566c
--- /dev/null
+++ b/src/org/hibernate/PropertyValueException.java
@@ -0,0 +1,56 @@
+//$Id$
+package org.hibernate;
+
+import org.hibernate.util.StringHelper;
+
+/**
+ * Thrown when the (illegal) value of a property can not be persisted.
+ * There are two main causes:
+ *
+ *
+ * @author Gavin King
+ */
+public class PropertyValueException extends HibernateException {
+
+ private final String entityName;
+ private final String propertyName;
+
+ public PropertyValueException(String s, String entityName, String propertyName) {
+ super(s);
+ this.entityName = entityName;
+ this.propertyName = propertyName;
+ }
+
+ public String getEntityName() {
+ return entityName;
+ }
+
+ public String getPropertyName() {
+ return propertyName;
+ }
+
+ public String getMessage() {
+ return super.getMessage() + ": " +
+ StringHelper.qualify(entityName, propertyName);
+ }
+
+ /**
+ * Return a well formed property path.
+ * Basicaly, it will return parent.child
+ *
+ * @param parent parent in path
+ * @param child child in path
+ * @return parent-child path
+ */
+ public static String buildPropertyPath(String parent, String child) {
+ return new StringBuffer(parent).append('.').append(child).toString();
+ }
+}
+
+
+
+
+
+
diff --git a/src/org/hibernate/Query.java b/src/org/hibernate/Query.java
new file mode 100644
index 0000000000..ca3cc5c531
--- /dev/null
+++ b/src/org/hibernate/Query.java
@@ -0,0 +1,387 @@
+//$Id$
+package org.hibernate;
+
+import java.io.Serializable;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.Calendar;
+import java.util.Collection;
+import java.util.Date;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import org.hibernate.transform.ResultTransformer;
+import org.hibernate.type.Type;
+
+/**
+ * An object-oriented representation of a Hibernate query. A Query
+ * instance is obtained by calling Session.createQuery(). This
+ * interface exposes some extra functionality beyond that provided by
+ * Session.iterate() and Session.find():
+ *
+ *
+ *
+ * Named query parameters are tokens of the form :name in the
+ * query string. A value is bound to the integer parameter
+ * :foo by calling
+ *
+ * setParameter("foo", foo, Hibernate.INTEGER);
+ *
+ * for example. A name may appear multiple times in the query string.
+ *
+ * JDBC-style ? parameters are also supported. To bind a
+ * value to a JDBC-style parameter use a set method that accepts an
+ * int positional argument (numbered from zero, contrary
+ * to JDBC).
+ *
+ * You may not mix and match JDBC-style parameters and named parameters
+ * in the same query.
+ *
+ * Queries are executed by calling list(), scroll() or
+ * iterate(). A query may be re-executed by subsequent invocations.
+ * Its lifespan is, however, bounded by the lifespan of the Session
+ * that created it.
+ *
+ * Implementors are not intended to be threadsafe.
+ *
+ * @see org.hibernate.Session#createQuery(java.lang.String)
+ * @see org.hibernate.ScrollableResults
+ * @author Gavin King
+ */
+public interface Query {
+ /**
+ * Get the query string.
+ *
+ * @return the query string
+ */
+ public String getQueryString();
+ /**
+ * Return the Hibernate types of the query result set.
+ * @return an array of types
+ */
+ public Type[] getReturnTypes() throws HibernateException;
+ /**
+ * Return the HQL select clause aliases (if any)
+ * @return an array of aliases as strings
+ */
+ public String[] getReturnAliases() throws HibernateException;
+ /**
+ * Return the names of all named parameters of the query.
+ * @return the parameter names, in no particular order
+ */
+ public String[] getNamedParameters() throws HibernateException;
+ /**
+ * Return the query results as an Iterator. If the query
+ * contains multiple results pre row, the results are returned in
+ * an instance of Object[].
+ *
+ * Entities returned as results are initialized on demand. The first
+ * SQL query returns identifiers only.
+ *
+ * @return the result iterator
+ * @throws HibernateException
+ */
+ public Iterator iterate() throws HibernateException;
+ /**
+ * Return the query results as ScrollableResults. The
+ * scrollability of the returned results depends upon JDBC driver
+ * support for scrollable ResultSets.
+ *
+ * @see ScrollableResults
+ * @return the result iterator
+ * @throws HibernateException
+ */
+ public ScrollableResults scroll() throws HibernateException;
+ /**
+ * Return the query results as ScrollableResults. The
+ * scrollability of the returned results depends upon JDBC driver
+ * support for scrollable ResultSets.
+ *
+ * @see ScrollableResults
+ * @see ScrollMode
+ * @return the result iterator
+ * @throws HibernateException
+ */
+ public ScrollableResults scroll(ScrollMode scrollMode) throws HibernateException;
+ /**
+ * Return the query results as a List. If the query contains
+ * multiple results pre row, the results are returned in an instance
+ * of Object[].
+ *
+ * @return the result list
+ * @throws HibernateException
+ */
+ public List list() throws HibernateException;
+ /**
+ * Convenience method to return a single instance that matches
+ * the query, or null if the query returns no results.
+ *
+ * @return the single result or null
+ * @throws NonUniqueResultException if there is more than one matching result
+ */
+ public Object uniqueResult() throws HibernateException;
+
+ /**
+ * Execute the update or delete statement.
+ *
+ * Session sess = factory.openSession(); + * Transaction tx; + * try { + * tx = sess.beginTransaction(); + * //do some work + * ... + * tx.commit(); + * } + * catch (Exception e) { + * if (tx!=null) tx.rollback(); + * throw e; + * } + * finally { + * sess.close(); + * } + *+ *
+ * Note that the second-level cache will be disabled if you + * supply a JDBC connection. Hibernate will not be able to track + * any statements you might have executed in the same transaction. + * Consider implementing your own ConnectionProvider. + * + * @param connection a connection provided by the application. + * @return Session + */ + public org.hibernate.classic.Session openSession(Connection connection); + + /** + * Create database connection and open a Session on it, specifying an + * interceptor. + * + * @param interceptor a session-scoped interceptor + * @return Session + * @throws HibernateException + */ + public org.hibernate.classic.Session openSession(Interceptor interceptor) throws HibernateException; + + /** + * Open a Session on the given connection, specifying an interceptor. + *
+ * Note that the second-level cache will be disabled if you + * supply a JDBC connection. Hibernate will not be able to track + * any statements you might have executed in the same transaction. + * Consider implementing your own ConnectionProvider. + * + * @param connection a connection provided by the application. + * @param interceptor a session-scoped interceptor + * @return Session + */ + public org.hibernate.classic.Session openSession(Connection connection, Interceptor interceptor); + + /** + * Create database connection and open a Session on it. + * + * @return Session + * @throws HibernateException + */ + public org.hibernate.classic.Session openSession() throws HibernateException; + + /** + * Obtains the current session. The definition of what exactly "current" + * means controlled by the {@link org.hibernate.context.CurrentSessionContext} impl configured + * for use. + *
+ * Note that for backwards compatibility, if a {@link org.hibernate.context.CurrentSessionContext} + * is not configured but a JTA {@link org.hibernate.transaction.TransactionManagerLookup} + * is configured this will default to the {@link org.hibernate.context.JTASessionContext} + * impl. + * + * @return The current session. + * @throws HibernateException Indicates an issue locating a suitable current session. + */ + public org.hibernate.classic.Session getCurrentSession() throws HibernateException; + + /** + * Get the ClassMetadata associated with the given entity class + * + * @see org.hibernate.metadata.ClassMetadata + */ + public ClassMetadata getClassMetadata(Class persistentClass) throws HibernateException; + + /** + * Get the ClassMetadata associated with the given entity name + * + * @see org.hibernate.metadata.ClassMetadata + * @since 3.0 + */ + public ClassMetadata getClassMetadata(String entityName) throws HibernateException; + + /** + * Get the CollectionMetadata associated with the named collection role + * + * @see org.hibernate.metadata.CollectionMetadata + */ + public CollectionMetadata getCollectionMetadata(String roleName) throws HibernateException; + + + /** + * Get all ClassMetadata as a Map from entityname String + * to metadata object + * + * @see org.hibernate.metadata.ClassMetadata + * @return a map from String an entity name to ClassMetaData + * @since 3.0 changed key from Class to String + */ + public Map getAllClassMetadata() throws HibernateException; + + /** + * Get all CollectionMetadata as a Map from role name + * to metadata object + * + * @see org.hibernate.metadata.CollectionMetadata + * @return a map from String to CollectionMetadata + */ + public Map getAllCollectionMetadata() throws HibernateException; + + /** + * Get the statistics for this session factory + */ + public Statistics getStatistics(); + + /** + * Destroy this SessionFactory and release all resources (caches, + * connection pools, etc). It is the responsibility of the application + * to ensure that there are no open Sessions before calling + * close(). + */ + public void close() throws HibernateException; + + /** + * Was this SessionFactory already closed? + */ + public boolean isClosed(); + + /** + * Evict all entries from the second-level cache. This method occurs outside + * of any transaction; it performs an immediate "hard" remove, so does not respect + * any transaction isolation semantics of the usage strategy. Use with care. + */ + public void evict(Class persistentClass) throws HibernateException; + /** + * Evict an entry from the second-level cache. This method occurs outside + * of any transaction; it performs an immediate "hard" remove, so does not respect + * any transaction isolation semantics of the usage strategy. Use with care. + */ + public void evict(Class persistentClass, Serializable id) throws HibernateException; + /** + * Evict all entries from the second-level cache. This method occurs outside + * of any transaction; it performs an immediate "hard" remove, so does not respect + * any transaction isolation semantics of the usage strategy. Use with care. + */ + public void evictEntity(String entityName) throws HibernateException; + /** + * Evict an entry from the second-level cache. This method occurs outside + * of any transaction; it performs an immediate "hard" remove, so does not respect + * any transaction isolation semantics of the usage strategy. Use with care. + */ + public void evictEntity(String entityName, Serializable id) throws HibernateException; + /** + * Evict all entries from the second-level cache. This method occurs outside + * of any transaction; it performs an immediate "hard" remove, so does not respect + * any transaction isolation semantics of the usage strategy. Use with care. + */ + public void evictCollection(String roleName) throws HibernateException; + /** + * Evict an entry from the second-level cache. This method occurs outside + * of any transaction; it performs an immediate "hard" remove, so does not respect + * any transaction isolation semantics of the usage strategy. Use with care. + */ + public void evictCollection(String roleName, Serializable id) throws HibernateException; + + /** + * Evict any query result sets cached in the default query cache region. + */ + public void evictQueries() throws HibernateException; + /** + * Evict any query result sets cached in the named query cache region. + */ + public void evictQueries(String cacheRegion) throws HibernateException; + /** + * Get a new stateless session. + */ + public StatelessSession openStatelessSession(); + /** + * Get a new stateless session for the given JDBC connection. + */ + public StatelessSession openStatelessSession(Connection connection); + + /** + * Obtain a set of the names of all filters defined on this SessionFactory. + * + * @return The set of filter names. + */ + public Set getDefinedFilterNames(); + + /** + * Obtain the definition of a filter by name. + * + * @param filterName The name of the filter for which to obtain the definition. + * @return The filter definition. + * @throws HibernateException If no filter defined with the given name. + */ + public FilterDefinition getFilterDefinition(String filterName) throws HibernateException; +} diff --git a/src/org/hibernate/StaleObjectStateException.java b/src/org/hibernate/StaleObjectStateException.java new file mode 100644 index 0000000000..851ab380c8 --- /dev/null +++ b/src/org/hibernate/StaleObjectStateException.java @@ -0,0 +1,45 @@ +//$Id$ +package org.hibernate; + +import java.io.Serializable; + +import org.hibernate.pretty.MessageHelper; + +/** + * A StaleStateException that carries information + * about a particular entity instance that was the source + * of the failure. + * + * @author Gavin King + */ +public class StaleObjectStateException extends StaleStateException { + private final String entityName; + private final Serializable identifier; + + public StaleObjectStateException(String persistentClass, Serializable identifier) { + super("Row was updated or deleted by another transaction (or unsaved-value mapping was incorrect)"); + this.entityName = persistentClass; + this.identifier = identifier; + } + + public String getEntityName() { + return entityName; + } + + public Serializable getIdentifier() { + return identifier; + } + + public String getMessage() { + return super.getMessage() + ": " + + MessageHelper.infoString(entityName, identifier); + } + +} + + + + + + + diff --git a/src/org/hibernate/StaleStateException.java b/src/org/hibernate/StaleStateException.java new file mode 100755 index 0000000000..310cb294dc --- /dev/null +++ b/src/org/hibernate/StaleStateException.java @@ -0,0 +1,21 @@ +//$Id$ +package org.hibernate; + +/** + * Thrown when a version number or timestamp check failed, indicating that the + * Session contained stale data (when using long transactions + * with versioning). Also occurs if we try delete or update a row that does + * not exist.+ This package defines "actions" that are scheduled for + asycnchronous execution by the event listeners. +
+ + diff --git a/src/org/hibernate/bytecode/AbstractClassTransformerImpl.java b/src/org/hibernate/bytecode/AbstractClassTransformerImpl.java new file mode 100644 index 0000000000..ad457485a2 --- /dev/null +++ b/src/org/hibernate/bytecode/AbstractClassTransformerImpl.java @@ -0,0 +1,45 @@ +//$Id: $ +package org.hibernate.bytecode; + +import org.hibernate.bytecode.util.ClassFilter; +import org.hibernate.bytecode.util.FieldFilter; + +import java.security.ProtectionDomain; + +/** + * @author Emmanuel Bernard + * @author Steve Ebersole + */ +public abstract class AbstractClassTransformerImpl implements ClassTransformer { + + protected final ClassFilter classFilter; + protected final FieldFilter fieldFilter; + + protected AbstractClassTransformerImpl(ClassFilter classFilter, FieldFilter fieldFilter) { + this.classFilter = classFilter; + this.fieldFilter = fieldFilter; + } + + public byte[] transform( + ClassLoader loader, + String className, + Class classBeingRedefined, + ProtectionDomain protectionDomain, + byte[] classfileBuffer) { + // to be safe... + className = className.replace( '/', '.' ); + if ( classFilter.shouldInstrumentClass( className ) ) { + return doTransform( loader, className, classBeingRedefined, protectionDomain, classfileBuffer ); + } + else { + return classfileBuffer; + } + } + + protected abstract byte[] doTransform( + ClassLoader loader, + String className, + Class classBeingRedefined, + ProtectionDomain protectionDomain, + byte[] classfileBuffer); +} diff --git a/src/org/hibernate/bytecode/BasicProxyFactory.java b/src/org/hibernate/bytecode/BasicProxyFactory.java new file mode 100644 index 0000000000..0bb5e582a9 --- /dev/null +++ b/src/org/hibernate/bytecode/BasicProxyFactory.java @@ -0,0 +1,10 @@ +package org.hibernate.bytecode; + +/** + * A proxy factory for "basic proxy" generation + * + * @author Steve Ebersole + */ +public interface BasicProxyFactory { + public Object getProxy(); +} diff --git a/src/org/hibernate/bytecode/BytecodeProvider.java b/src/org/hibernate/bytecode/BytecodeProvider.java new file mode 100644 index 0000000000..0f780d937a --- /dev/null +++ b/src/org/hibernate/bytecode/BytecodeProvider.java @@ -0,0 +1,49 @@ +package org.hibernate.bytecode; + +import org.hibernate.bytecode.util.ClassFilter; +import org.hibernate.bytecode.util.FieldFilter; + +/** + * Contract for providers of bytecode services to Hibernate. + * + * Bytecode requirements break down into basically 3 areasThis object provides methods that set/get multiple properties
+ * of a JavaBean at once. This class and its support classes have been
+ * developed for the comaptibility with cglib
+ * (http://cglib.sourceforge.net/).
+ *
+ * @author Muga Nishizawa
+ * @author modified by Shigeru Chiba
+ */
+public abstract class BulkAccessor implements Serializable {
+ protected Class target;
+ protected String[] getters, setters;
+ protected Class[] types;
+
+ protected BulkAccessor() {
+ }
+
+ /**
+ * Obtains the values of properties of a given bean.
+ *
+ * @param bean JavaBean.
+ * @param values the obtained values are stored in this array.
+ */
+ public abstract void getPropertyValues(Object bean, Object[] values);
+
+ /**
+ * Sets properties of a given bean to specified values.
+ *
+ * @param bean JavaBean.
+ * @param values the values assinged to properties.
+ */
+ public abstract void setPropertyValues(Object bean, Object[] values);
+
+ /**
+ * Returns the values of properties of a given bean.
+ *
+ * @param bean JavaBean.
+ */
+ public Object[] getPropertyValues(Object bean) {
+ Object[] values = new Object[getters.length];
+ getPropertyValues( bean, values );
+ return values;
+ }
+
+ /**
+ * Returns the types of properties.
+ */
+ public Class[] getPropertyTypes() {
+ return ( Class[] ) types.clone();
+ }
+
+ /**
+ * Returns the setter names of properties.
+ */
+ public String[] getGetters() {
+ return ( String[] ) getters.clone();
+ }
+
+ /**
+ * Returns the getter names of the properties.
+ */
+ public String[] getSetters() {
+ return ( String[] ) setters.clone();
+ }
+
+ /**
+ * Creates a new instance of
+ This package defines the API for plugging in bytecode libraries
+ for usage by Hibernate. Hibernate uses these bytecode libraries
+ in three scenarios:BulkAccessor
.
+ * The created instance provides methods for setting/getting
+ * specified properties at once.
+ *
+ * @param beanClass the class of the JavaBeans accessed
+ * through the created object.
+ * @param getters the names of setter methods for specified properties.
+ * @param setters the names of getter methods for specified properties.
+ * @param types the types of specified properties.
+ */
+ public static BulkAccessor create(
+ Class beanClass,
+ String[] getters,
+ String[] setters,
+ Class[] types) {
+ BulkAccessorFactory factory = new BulkAccessorFactory( beanClass, getters, setters, types );
+ return factory.create();
+ }
+}
diff --git a/src/org/hibernate/bytecode/javassist/BulkAccessorException.java b/src/org/hibernate/bytecode/javassist/BulkAccessorException.java
new file mode 100644
index 0000000000..497c282376
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/BulkAccessorException.java
@@ -0,0 +1,78 @@
+package org.hibernate.bytecode.javassist;
+
+/**
+ * An exception thrown while generating a bulk accessor.
+ *
+ * @author Muga Nishizawa
+ * @author modified by Shigeru Chiba
+ */
+public class BulkAccessorException extends RuntimeException {
+ private Throwable myCause;
+
+ /**
+ * Gets the cause of this throwable.
+ * It is for JDK 1.3 compatibility.
+ */
+ public Throwable getCause() {
+ return (myCause == this ? null : myCause);
+ }
+
+ /**
+ * Initializes the cause of this throwable.
+ * It is for JDK 1.3 compatibility.
+ */
+ public synchronized Throwable initCause(Throwable cause) {
+ myCause = cause;
+ return this;
+ }
+
+ private int index;
+
+ /**
+ * Constructs an exception.
+ */
+ public BulkAccessorException(String message) {
+ super(message);
+ index = -1;
+ initCause(null);
+ }
+
+ /**
+ * Constructs an exception.
+ *
+ * @param index the index of the property that causes an exception.
+ */
+ public BulkAccessorException(String message, int index) {
+ this(message + ": " + index);
+ this.index = index;
+ }
+
+ /**
+ * Constructs an exception.
+ */
+ public BulkAccessorException(String message, Throwable cause) {
+ super(message);
+ index = -1;
+ initCause(cause);
+ }
+
+ /**
+ * Constructs an exception.
+ *
+ * @param index the index of the property that causes an exception.
+ */
+ public BulkAccessorException(Throwable cause, int index) {
+ this("Property " + index);
+ this.index = index;
+ initCause(cause);
+ }
+
+ /**
+ * Returns the index of the property that causes this exception.
+ *
+ * @return -1 if the index is not specified.
+ */
+ public int getIndex() {
+ return this.index;
+ }
+}
diff --git a/src/org/hibernate/bytecode/javassist/BulkAccessorFactory.java b/src/org/hibernate/bytecode/javassist/BulkAccessorFactory.java
new file mode 100644
index 0000000000..1821a8ac25
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/BulkAccessorFactory.java
@@ -0,0 +1,388 @@
+package org.hibernate.bytecode.javassist;
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.security.ProtectionDomain;
+
+import javassist.CannotCompileException;
+import javassist.bytecode.AccessFlag;
+import javassist.bytecode.Bytecode;
+import javassist.bytecode.ClassFile;
+import javassist.bytecode.ConstPool;
+import javassist.bytecode.MethodInfo;
+import javassist.bytecode.Opcode;
+import javassist.util.proxy.FactoryHelper;
+import javassist.util.proxy.RuntimeSupport;
+
+/**
+ * A factory of bulk accessors.
+ *
+ * @author Muga Nishizawa
+ * @author modified by Shigeru Chiba
+ */
+class BulkAccessorFactory {
+ private static final String PACKAGE_NAME_PREFIX = "org.javassist.tmp.";
+ private static final String BULKACESSOR_CLASS_NAME = BulkAccessor.class.getName();
+ private static final String OBJECT_CLASS_NAME = Object.class.getName();
+ private static final String GENERATED_GETTER_NAME = "getPropertyValues";
+ private static final String GENERATED_SETTER_NAME = "setPropertyValues";
+ private static final String GET_SETTER_DESC = "(Ljava/lang/Object;[Ljava/lang/Object;)V";
+ private static final String THROWABLE_CLASS_NAME = Throwable.class.getName();
+ private static final String BULKEXCEPTION_CLASS_NAME = BulkAccessorException.class.getName();
+ private static int counter = 0;
+
+ private Class targetBean;
+ private String[] getterNames;
+ private String[] setterNames;
+ private Class[] types;
+ public String writeDirectory;
+
+ BulkAccessorFactory(
+ Class target,
+ String[] getterNames,
+ String[] setterNames,
+ Class[] types) {
+ this.targetBean = target;
+ this.getterNames = getterNames;
+ this.setterNames = setterNames;
+ this.types = types;
+ this.writeDirectory = null;
+ }
+
+ BulkAccessor create() {
+ Method[] getters = new Method[getterNames.length];
+ Method[] setters = new Method[setterNames.length];
+ findAccessors( targetBean, getterNames, setterNames, types, getters, setters );
+
+ Class beanClass;
+ try {
+ ClassFile classfile = make( getters, setters );
+ ClassLoader loader = this.getClassLoader();
+ if ( writeDirectory != null ) {
+ FactoryHelper.writeFile( classfile, writeDirectory );
+ }
+
+ beanClass = FactoryHelper.toClass( classfile, loader, getDomain() );
+ return ( BulkAccessor ) this.newInstance( beanClass );
+ }
+ catch ( Exception e ) {
+ throw new BulkAccessorException( e.getMessage(), e );
+ }
+ }
+
+ private ProtectionDomain getDomain() {
+ Class cl;
+ if ( this.targetBean != null ) {
+ cl = this.targetBean;
+ }
+ else {
+ cl = this.getClass();
+ }
+ return cl.getProtectionDomain();
+ }
+
+ private ClassFile make(Method[] getters, Method[] setters) throws CannotCompileException {
+ String className = targetBean.getName();
+ // set the name of bulk accessor.
+ className = className + "_$$_bulkaccess_" + counter++;
+ if ( className.startsWith( "java." ) ) {
+ className = "org.javassist.tmp." + className;
+ }
+
+ ClassFile classfile = new ClassFile( false, className, BULKACESSOR_CLASS_NAME );
+ classfile.setAccessFlags( AccessFlag.PUBLIC );
+ addDefaultConstructor( classfile );
+ addGetter( classfile, getters );
+ addSetter( classfile, setters );
+ return classfile;
+ }
+
+ private ClassLoader getClassLoader() {
+ if ( targetBean != null && targetBean.getName().equals( OBJECT_CLASS_NAME ) ) {
+ return targetBean.getClassLoader();
+ }
+ else {
+ return getClass().getClassLoader();
+ }
+ }
+
+ private Object newInstance(Class type) throws Exception {
+ BulkAccessor instance = ( BulkAccessor ) type.newInstance();
+ instance.target = targetBean;
+ int len = getterNames.length;
+ instance.getters = new String[len];
+ instance.setters = new String[len];
+ instance.types = new Class[len];
+ for ( int i = 0; i < len; i++ ) {
+ instance.getters[i] = getterNames[i];
+ instance.setters[i] = setterNames[i];
+ instance.types[i] = types[i];
+ }
+
+ return instance;
+ }
+
+ /**
+ * Declares a constructor that takes no parameter.
+ *
+ * @param classfile
+ * @throws CannotCompileException
+ */
+ private void addDefaultConstructor(ClassFile classfile) throws CannotCompileException {
+ ConstPool cp = classfile.getConstPool();
+ String cons_desc = "()V";
+ MethodInfo mi = new MethodInfo( cp, MethodInfo.nameInit, cons_desc );
+
+ Bytecode code = new Bytecode( cp, 0, 1 );
+ // aload_0
+ code.addAload( 0 );
+ // invokespecial
+ code.addInvokespecial( BulkAccessor.class.getName(), MethodInfo.nameInit, cons_desc );
+ // return
+ code.addOpcode( Opcode.RETURN );
+
+ mi.setCodeAttribute( code.toCodeAttribute() );
+ mi.setAccessFlags( AccessFlag.PUBLIC );
+ classfile.addMethod( mi );
+ }
+
+ private void addGetter(ClassFile classfile, final Method[] getters) throws CannotCompileException {
+ ConstPool cp = classfile.getConstPool();
+ int target_type_index = cp.addClassInfo( this.targetBean.getName() );
+ String desc = GET_SETTER_DESC;
+ MethodInfo mi = new MethodInfo( cp, GENERATED_GETTER_NAME, desc );
+
+ Bytecode code = new Bytecode( cp, 6, 4 );
+ /* | this | bean | args | raw bean | */
+ if ( getters.length >= 0 ) {
+ // aload_1 // load bean
+ code.addAload( 1 );
+ // checkcast // cast bean
+ code.addCheckcast( this.targetBean.getName() );
+ // astore_3 // store bean
+ code.addAstore( 3 );
+ for ( int i = 0; i < getters.length; ++i ) {
+ if ( getters[i] != null ) {
+ Method getter = getters[i];
+ // aload_2 // args
+ code.addAload( 2 );
+ // iconst_i // continue to aastore
+ code.addIconst( i ); // growing stack is 1
+ Class returnType = getter.getReturnType();
+ int typeIndex = -1;
+ if ( returnType.isPrimitive() ) {
+ typeIndex = FactoryHelper.typeIndex( returnType );
+ // new
+ code.addNew( FactoryHelper.wrapperTypes[typeIndex] );
+ // dup
+ code.addOpcode( Opcode.DUP );
+ }
+
+ // aload_3 // load the raw bean
+ code.addAload( 3 );
+ String getter_desc = RuntimeSupport.makeDescriptor( getter );
+ String getterName = getter.getName();
+ if ( this.targetBean.isInterface() ) {
+ // invokeinterface
+ code.addInvokeinterface( target_type_index, getterName, getter_desc, 1 );
+ }
+ else {
+ // invokevirtual
+ code.addInvokevirtual( target_type_index, getterName, getter_desc );
+ }
+
+ if ( typeIndex >= 0 ) { // is a primitive type
+ // invokespecial
+ code.addInvokespecial(
+ FactoryHelper.wrapperTypes[typeIndex],
+ MethodInfo.nameInit,
+ FactoryHelper.wrapperDesc[typeIndex]
+ );
+ }
+
+ // aastore // args
+ code.add( Opcode.AASTORE );
+ code.growStack( -3 );
+ }
+ }
+ }
+ // return
+ code.addOpcode( Opcode.RETURN );
+
+ mi.setCodeAttribute( code.toCodeAttribute() );
+ mi.setAccessFlags( AccessFlag.PUBLIC );
+ classfile.addMethod( mi );
+ }
+
+ private void addSetter(ClassFile classfile, final Method[] setters) throws CannotCompileException {
+ ConstPool cp = classfile.getConstPool();
+ int target_type_index = cp.addClassInfo( this.targetBean.getName() );
+ String desc = GET_SETTER_DESC;
+ MethodInfo mi = new MethodInfo( cp, GENERATED_SETTER_NAME, desc );
+
+ Bytecode code = new Bytecode( cp, 4, 6 );
+ /* | this | bean | args | i | raw bean | exception | */
+ if ( setters.length > 0 ) {
+ int start, end; // required to exception table
+ // iconst_0 // i
+ code.addIconst( 0 );
+ // istore_3 // store i
+ code.addIstore( 3 );
+ // aload_1 // load the bean
+ code.addAload( 1 );
+ // checkcast // cast the bean into a raw bean
+ code.addCheckcast( this.targetBean.getName() );
+ // astore 4 // store the raw bean
+ code.addAstore( 4 );
+ /* current stack len = 0 */
+ // start region to handling exception (BulkAccessorException)
+ start = code.currentPc();
+ int lastIndex = 0;
+ for ( int i = 0; i < setters.length; ++i ) {
+ if ( setters[i] != null ) {
+ int diff = i - lastIndex;
+ if ( diff > 0 ) {
+ // iinc 3, 1
+ code.addOpcode( Opcode.IINC );
+ code.add( 3 );
+ code.add( diff );
+ lastIndex = i;
+ }
+ }
+ /* current stack len = 0 */
+ // aload 4 // load the raw bean
+ code.addAload( 4 );
+ // aload_2 // load the args
+ code.addAload( 2 );
+ // iconst_i
+ code.addIconst( i );
+ // aaload
+ code.addOpcode( Opcode.AALOAD );
+ // checkcast
+ Class[] setterParamTypes = setters[i].getParameterTypes();
+ Class setterParamType = setterParamTypes[0];
+ if ( setterParamType.isPrimitive() ) {
+ // checkcast (case of primitive type)
+ // invokevirtual (case of primitive type)
+ this.addUnwrapper( classfile, code, setterParamType );
+ }
+ else {
+ // checkcast (case of reference type)
+ code.addCheckcast( setterParamType.getName() );
+ }
+ /* current stack len = 2 */
+ String rawSetterMethod_desc = RuntimeSupport.makeDescriptor( setters[i] );
+ if ( !this.targetBean.isInterface() ) {
+ // invokevirtual
+ code.addInvokevirtual( target_type_index, setters[i].getName(), rawSetterMethod_desc );
+ }
+ else {
+ // invokeinterface
+ Class[] params = setters[i].getParameterTypes();
+ int size;
+ if ( params[0].equals( Double.TYPE ) || params[0].equals( Long.TYPE ) ) {
+ size = 3;
+ }
+ else {
+ size = 2;
+ }
+
+ code.addInvokeinterface( target_type_index, setters[i].getName(), rawSetterMethod_desc, size );
+ }
+ }
+
+ // end region to handling exception (BulkAccessorException)
+ end = code.currentPc();
+ // return
+ code.addOpcode( Opcode.RETURN );
+ /* current stack len = 0 */
+ // register in exception table
+ int throwableType_index = cp.addClassInfo( THROWABLE_CLASS_NAME );
+ code.addExceptionHandler( start, end, code.currentPc(), throwableType_index );
+ // astore 5 // store exception
+ code.addAstore( 5 );
+ // new // BulkAccessorException
+ code.addNew( BULKEXCEPTION_CLASS_NAME );
+ // dup
+ code.addOpcode( Opcode.DUP );
+ // aload 5 // load exception
+ code.addAload( 5 );
+ // iload_3 // i
+ code.addIload( 3 );
+ // invokespecial // BulkAccessorException.
+
+
+ Currently, both CGLIB and Javassist are supported out-of-the-box. +
++ Note that for field-level interception, simply plugging in a new {@link BytecodeProvider} + is not enough for Hibernate to be able to recognize new providers. You would additionally + need to make appropriate code changes to the {@link org.hibernate.intercept.Helper} + class. This is because the detection of these enhanced classes is needed in a static + environment (i.e. outside the scope of any {@link org.hibernate.SessionFactory}. +
++ Note that in the current form the ability to specify a different bytecode provider + is actually considered a global settings (global to the JVM). +
+ + diff --git a/src/org/hibernate/bytecode/util/BasicClassFilter.java b/src/org/hibernate/bytecode/util/BasicClassFilter.java new file mode 100644 index 0000000000..64e179aff7 --- /dev/null +++ b/src/org/hibernate/bytecode/util/BasicClassFilter.java @@ -0,0 +1,59 @@ +package org.hibernate.bytecode.util; + +import java.util.Set; +import java.util.HashSet; + +/** + * BasicClassFilter provides class filtering based on a series of packages to + * be included and/or a series of explicit class names to be included. If + * neither is specified, then no restrictions are applied. + * + * @author Steve Ebersole + */ +public class BasicClassFilter implements ClassFilter { + private final String[] includedPackages; + private final Set includedClassNames = new HashSet(); + private final boolean isAllEmpty; + + public BasicClassFilter() { + this( null, null ); + } + + public BasicClassFilter(String[] includedPackages, String[] includedClassNames) { + this.includedPackages = includedPackages; + if ( includedClassNames != null ) { + for ( int i = 0; i < includedClassNames.length; i++ ) { + this.includedClassNames.add( includedClassNames[i] ); + } + } + + isAllEmpty = ( this.includedPackages == null || this.includedPackages.length == 0 ) + && ( this.includedClassNames.isEmpty() ); + } + + public boolean shouldInstrumentClass(String className) { + if ( isAllEmpty ) { + return true; + } + else if ( includedClassNames.contains( className ) ) { + return true; + } + else if ( isInIncludedPackage( className ) ) { + return true; + } + else { + return false; + } + } + + private boolean isInIncludedPackage(String className) { + if ( includedPackages != null ) { + for ( int i = 0; i < includedPackages.length; i++ ) { + if ( className.startsWith( includedPackages[i] ) ) { + return true; + } + } + } + return false; + } +} diff --git a/src/org/hibernate/bytecode/util/ByteCodeHelper.java b/src/org/hibernate/bytecode/util/ByteCodeHelper.java new file mode 100644 index 0000000000..8ada73f7e9 --- /dev/null +++ b/src/org/hibernate/bytecode/util/ByteCodeHelper.java @@ -0,0 +1,78 @@ +package org.hibernate.bytecode.util; + +import java.io.InputStream; +import java.io.IOException; +import java.io.File; +import java.io.FileInputStream; +import java.io.ByteArrayOutputStream; +import java.io.BufferedInputStream; +import java.util.zip.ZipInputStream; + +/** + * A helper for reading byte code from various input sources. + * + * @author Steve Ebersole + */ +public class ByteCodeHelper { + private ByteCodeHelper() { + } + + /** + * Reads class byte array info from the given input stream. + * + * The stream is closed within this method! + * + * @param inputStream + * @return + * @throws IOException + */ + public static byte[] readByteCode(InputStream inputStream) throws IOException { + if ( inputStream == null ) { + throw new IOException( "null input stream" ); + } + + byte[] buffer = new byte[409600]; + byte[] classBytes = new byte[0]; + int r = 0; + + try { + r = inputStream.read( buffer ); + while ( r >= buffer.length ) { + byte[] temp = new byte[ classBytes.length + buffer.length ]; + System.arraycopy( classBytes, 0, temp, 0, classBytes.length ); + System.arraycopy( buffer, 0, temp, classBytes.length, buffer.length ); + classBytes = temp; + } + if ( r != -1 ) { + byte[] temp = new byte[ classBytes.length + r ]; + System.arraycopy( classBytes, 0, temp, 0, classBytes.length ); + System.arraycopy( buffer, 0, temp, classBytes.length, r ); + classBytes = temp; + } + } + finally { + try { + inputStream.close(); + } + catch (IOException ignore) { + // intentionally empty + } + } + + return classBytes; + } + + public static byte[] readByteCode(File file) throws IOException { + return ByteCodeHelper.readByteCode( new FileInputStream( file ) ); + } + + public static byte[] readByteCode(ZipInputStream zip) throws IOException { + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + InputStream in = new BufferedInputStream( zip ); + int b; + while ( ( b = in.read() ) != -1 ) { + bout.write( b ); + } + return bout.toByteArray(); + } +} diff --git a/src/org/hibernate/bytecode/util/ClassDescriptor.java b/src/org/hibernate/bytecode/util/ClassDescriptor.java new file mode 100644 index 0000000000..5e2601aa4b --- /dev/null +++ b/src/org/hibernate/bytecode/util/ClassDescriptor.java @@ -0,0 +1,30 @@ +package org.hibernate.bytecode.util; + +/** + * Contract describing the information Hibernate needs in terms of instrumenting + * a class, either via ant task or dynamic classloader. + * + * @author Steve Ebersole + */ +public interface ClassDescriptor { + /** + * The name of the class. + * + * @return The class name. + */ + public String getName(); + + /** + * Determine if the class is already instrumented. + * + * @return True if already instrumented; false otherwise. + */ + public boolean isInstrumented(); + + /** + * The bytes making up the class' bytecode. + * + * @return The bytecode bytes. + */ + public byte[] getBytes(); +} diff --git a/src/org/hibernate/bytecode/util/ClassFilter.java b/src/org/hibernate/bytecode/util/ClassFilter.java new file mode 100644 index 0000000000..9418fe4bb1 --- /dev/null +++ b/src/org/hibernate/bytecode/util/ClassFilter.java @@ -0,0 +1,10 @@ +package org.hibernate.bytecode.util; + +/** + * Used to determine whether a class should be instrumented. + * + * @author Steve Ebersole + */ +public interface ClassFilter { + public boolean shouldInstrumentClass(String className); +} diff --git a/src/org/hibernate/bytecode/util/FieldFilter.java b/src/org/hibernate/bytecode/util/FieldFilter.java new file mode 100644 index 0000000000..6625120b1e --- /dev/null +++ b/src/org/hibernate/bytecode/util/FieldFilter.java @@ -0,0 +1,29 @@ +package org.hibernate.bytecode.util; + +/** + * Used to determine whether a field reference should be instrumented. + * + * @author Steve Ebersole + */ +public interface FieldFilter { + /** + * Should this field definition be instrumented? + * + * @param className The name of the class currently being processed + * @param fieldName The name of the field being checked. + * @return True if we should instrument this field. + */ + public boolean shouldInstrumentField(String className, String fieldName); + + /** + * Should we instrument *access to* the given field. This differs from + * {@link #shouldInstrumentField} in that here we are talking about a particular usage of + * a field. + * + * @param transformingClassName The class currently being transformed. + * @param fieldOwnerClassName The name of the class owning this field being checked. + * @param fieldName The name of the field being checked. + * @return True if this access should be transformed. + */ + public boolean shouldTransformFieldAccess(String transformingClassName, String fieldOwnerClassName, String fieldName); +} diff --git a/src/org/hibernate/cache/AbstractJndiBoundCacheProvider.java b/src/org/hibernate/cache/AbstractJndiBoundCacheProvider.java new file mode 100644 index 0000000000..ba013736f1 --- /dev/null +++ b/src/org/hibernate/cache/AbstractJndiBoundCacheProvider.java @@ -0,0 +1,86 @@ +// $Id$ +package org.hibernate.cache; + +import java.util.Properties; + +import javax.naming.Context; +import javax.naming.InitialContext; +import javax.naming.NamingException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.hibernate.cfg.Environment; +import org.hibernate.util.NamingHelper; +import org.hibernate.util.StringHelper; + +/** + * Support for CacheProvider implementations which are backed by caches bound + * into JNDI namespace. + * + * @author Steve Ebersole + */ +public abstract class AbstractJndiBoundCacheProvider implements CacheProvider { + + private static final Log log = LogFactory.getLog( AbstractJndiBoundCacheProvider.class ); + private Object cache; + + protected void prepare(Properties properties) { + // Do nothing; subclasses may override. + } + + protected void release() { + // Do nothing; subclasses may override. + } + + /** + * Callback to perform any necessary initialization of the underlying cache implementation during SessionFactory + * construction. + * + * @param properties current configuration settings. + */ + public final void start(Properties properties) throws CacheException { + String jndiNamespace = properties.getProperty( Environment.CACHE_NAMESPACE ); + if ( StringHelper.isEmpty( jndiNamespace ) ) { + throw new CacheException( "No JNDI namespace specified for cache" ); + } + cache = locateCache( jndiNamespace, NamingHelper.getJndiProperties( properties ) ); + prepare( properties ); + } + + /** + * Callback to perform any necessary cleanup of the underlying cache + * implementation during SessionFactory.close(). + */ + public final void stop() { + release(); + cache = null; + } + + private Object locateCache(String jndiNamespace, Properties jndiProperties) { + + Context ctx = null; + try { + ctx = new InitialContext( jndiProperties ); + return ctx.lookup( jndiNamespace ); + } + catch (NamingException ne) { + String msg = "Unable to retreive Cache from JNDI [" + jndiNamespace + "]"; + log.info( msg, ne ); + throw new CacheException( msg ); + } + finally { + if ( ctx != null ) { + try { + ctx.close(); + } + catch( NamingException ne ) { + log.info( "Unable to release initial context", ne ); + } + } + } + } + + public Object getCache() { + return cache; + } +} diff --git a/src/org/hibernate/cache/Cache.java b/src/org/hibernate/cache/Cache.java new file mode 100644 index 0000000000..186d00c674 --- /dev/null +++ b/src/org/hibernate/cache/Cache.java @@ -0,0 +1,106 @@ +//$Id$ +package org.hibernate.cache; + +import java.util.Map; + +/** + * Implementors define a caching algorithm. All implementors + * must be threadsafe. + */ +public interface Cache { + /** + * Get an item from the cache + * @param key + * @return the cached object or null + * @throws CacheException + */ + public Object read(Object key) throws CacheException; + /** + * Get an item from the cache, nontransactionally + * @param key + * @return the cached object or null + * @throws CacheException + */ + public Object get(Object key) throws CacheException; + /** + * Add an item to the cache, nontransactionally, with + * failfast semantics + * @param key + * @param value + * @throws CacheException + */ + public void put(Object key, Object value) throws CacheException; + /** + * Add an item to the cache + * @param key + * @param value + * @throws CacheException + */ + public void update(Object key, Object value) throws CacheException; + /** + * Remove an item from the cache + */ + public void remove(Object key) throws CacheException; + /** + * Clear the cache + */ + public void clear() throws CacheException; + /** + * Clean up + */ + public void destroy() throws CacheException; + /** + * If this is a clustered cache, lock the item + */ + public void lock(Object key) throws CacheException; + /** + * If this is a clustered cache, unlock the item + */ + public void unlock(Object key) throws CacheException; + /** + * Generate a timestamp + */ + public long nextTimestamp(); + /** + * Get a reasonable "lock timeout" + */ + public int getTimeout(); + + /** + * Get the name of the cache region + */ + public String getRegionName(); + + /** + * The number of bytes is this cache region currently consuming in memory. + * + * @return The number of bytes consumed by this region; -1 if unknown or + * unsupported. + */ + public long getSizeInMemory(); + + /** + * The count of entries currently contained in the regions in-memory store. + * + * @return The count of entries in memory; -1 if unknown or unsupported. + */ + public long getElementCountInMemory(); + + /** + * The count of entries currently contained in the regions disk store. + * + * @return The count of entries on disk; -1 if unknown or unsupported. + */ + public long getElementCountOnDisk(); + + /** + * optional operation + */ + public Map toMap(); +} + + + + + + diff --git a/src/org/hibernate/cache/CacheConcurrencyStrategy.java b/src/org/hibernate/cache/CacheConcurrencyStrategy.java new file mode 100644 index 0000000000..0870b8f50b --- /dev/null +++ b/src/org/hibernate/cache/CacheConcurrencyStrategy.java @@ -0,0 +1,177 @@ +//$Id$ +package org.hibernate.cache; + +import java.util.Comparator; + +/** + * Implementors manage transactional access to cached data. Transactions + * pass in a timestamp indicating transaction start time. Two different + * implementation patterns are provided for.hibernate.cache.provider_class=org.hibernate.cache.EhCacheProvider
+ * in Hibernate 3.x or later
+ *
+ * Taken from EhCache 0.9 distribution
+ * @author Greg Luck
+ * @author Emmanuel Bernard
+ */
+/**
+ * Cache Provider plugin for ehcache-1.2. New in this provider are ehcache support for multiple
+ * Hibernate session factories, each with its own ehcache configuration, and non Serializable keys and values.
+ * Ehcache-1.2 also has many other features such as cluster support and listeners, which can be used seamlessly simply
+ * by configurion in ehcache.xml.
+ *
+ * Use hibernate.cache.provider_class=org.hibernate.cache.EhCacheProvider
in the Hibernate configuration
+ * to enable this provider for Hibernate's second level cache.
+ *
+ * When configuring multiple ehcache CacheManagers, as you would where you have multiple Hibernate Configurations and
+ * multiple SessionFactories, specify in each Hibernate configuration the ehcache configuration using
+ * the property hibernate.cache.provider_configuration_file_resource_path
An example to set an ehcache configuration
+ * called ehcache-2.xml would be hibernate.cache.provider_configuration_file_resource_path=/ehcache-2.xml
. If the leading
+ * slash is not there one will be added. The configuration file will be looked for in the root of the classpath.
+ *
+ * Updated for ehcache-1.2. Note this provider requires ehcache-1.2.jar. Make sure ehcache-1.1.jar or earlier
+ * is not in the classpath or it will not work.
+ *
+ * See http://ehcache.sf.net for documentation on ehcache
+ *
+ *
+ * @author Greg Luck
+ * @author Emmanuel Bernard
+ */
+public class EhCacheProvider implements CacheProvider {
+
+ private static final Log log = LogFactory.getLog(EhCacheProvider.class);
+
+ private CacheManager manager;
+
+ /**
+ * Builds a Cache.
+ * + * Even though this method provides properties, they are not used. + * Properties for EHCache are specified in the ehcache.xml file. + * Configuration will be read from ehcache.xml for a cache declaration + * where the name attribute matches the name parameter in this builder. + * + * @param name the name of the cache. Must match a cache configured in ehcache.xml + * @param properties not used + * @return a newly built cache will be built and initialised + * @throws CacheException inter alia, if a cache of the same name already exists + */ + public Cache buildCache(String name, Properties properties) throws CacheException { + try { + net.sf.ehcache.Cache cache = manager.getCache(name); + if (cache == null) { + log.warn("Could not find configuration [" + name + "]; using defaults."); + manager.addCache(name); + cache = manager.getCache(name); + log.debug("started EHCache region: " + name); + } + return new EhCache(cache); + } + catch (net.sf.ehcache.CacheException e) { + throw new CacheException(e); + } + } + + /** + * Returns the next timestamp. + */ + public long nextTimestamp() { + return Timestamper.next(); + } + + /** + * Callback to perform any necessary initialization of the underlying cache implementation + * during SessionFactory construction. + * + * @param properties current configuration settings. + */ + public void start(Properties properties) throws CacheException { + if (manager != null) { + log.warn("Attempt to restart an already started EhCacheProvider. Use sessionFactory.close() " + + " between repeated calls to buildSessionFactory. Using previously created EhCacheProvider." + + " If this behaviour is required, consider using net.sf.ehcache.hibernate.SingletonEhCacheProvider."); + return; + } + try { + String configurationResourceName = null; + if (properties != null) { + configurationResourceName = (String) properties.get( Environment.CACHE_PROVIDER_CONFIG ); + } + if ( StringHelper.isEmpty( configurationResourceName ) ) { + manager = new CacheManager(); + } else { + URL url = loadResource(configurationResourceName); + manager = new CacheManager(url); + } + } catch (net.sf.ehcache.CacheException e) { + //yukky! Don't you have subclasses for that! + //TODO race conditions can happen here + if (e.getMessage().startsWith("Cannot parseConfiguration CacheManager. Attempt to create a new instance of " + + "CacheManager using the diskStorePath")) { + throw new CacheException("Attempt to restart an already started EhCacheProvider. Use sessionFactory.close() " + + " between repeated calls to buildSessionFactory. Consider using net.sf.ehcache.hibernate.SingletonEhCacheProvider." + , e ); + } else { + throw e; + } + } + } + + private URL loadResource(String configurationResourceName) { + URL url = ConfigHelper.locateConfig( configurationResourceName ); + if (log.isDebugEnabled()) { + log.debug("Creating EhCacheProvider from a specified resource: " + + configurationResourceName + " Resolved to URL: " + url); + } + return url; + } + + /** + * Callback to perform any necessary cleanup of the underlying cache implementation + * during SessionFactory.close(). + */ + public void stop() { + if (manager != null) { + manager.shutdown(); + manager = null; + } + } + + public boolean isMinimalPutsEnabledByDefault() { + return false; + } + +} diff --git a/src/org/hibernate/cache/FilterKey.java b/src/org/hibernate/cache/FilterKey.java new file mode 100755 index 0000000000..15d6b26423 --- /dev/null +++ b/src/org/hibernate/cache/FilterKey.java @@ -0,0 +1,70 @@ +//$Id$ +package org.hibernate.cache; + +import java.io.Serializable; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + +import org.hibernate.EntityMode; +import org.hibernate.engine.TypedValue; +import org.hibernate.impl.FilterImpl; +import org.hibernate.type.Type; + +/** + * Allows cached queries to be keyed by enabled filters. + * + * @author Gavin King + */ +public final class FilterKey implements Serializable { + private String filterName; + private Map filterParameters = new HashMap(); + + public FilterKey(String name, Map params, Map types, EntityMode entityMode) { + filterName = name; + Iterator iter = params.entrySet().iterator(); + while ( iter.hasNext() ) { + Map.Entry me = (Map.Entry) iter.next(); + Type type = (Type) types.get( me.getKey() ); + filterParameters.put( me.getKey(), new TypedValue( type, me.getValue(), entityMode ) ); + } + } + + public int hashCode() { + int result = 13; + result = 37 * result + filterName.hashCode(); + result = 37 * result + filterParameters.hashCode(); + return result; + } + + public boolean equals(Object other) { + if ( !(other instanceof FilterKey) ) return false; + FilterKey that = (FilterKey) other; + if ( !that.filterName.equals(filterName) ) return false; + if ( !that.filterParameters.equals(filterParameters) ) return false; + return true; + } + + public String toString() { + return "FilterKey[" + filterName + filterParameters + ']'; + } + + public static Set createFilterKeys(Map enabledFilters, EntityMode entityMode) { + if ( enabledFilters.size()==0 ) return null; + Set result = new HashSet(); + Iterator iter = enabledFilters.values().iterator(); + while ( iter.hasNext() ) { + FilterImpl filter = (FilterImpl) iter.next(); + FilterKey key = new FilterKey( + filter.getName(), + filter.getParameters(), + filter.getFilterDefinition().getParameterTypes(), + entityMode + ); + result.add(key); + } + return result; + } +} diff --git a/src/org/hibernate/cache/HashtableCache.java b/src/org/hibernate/cache/HashtableCache.java new file mode 100644 index 0000000000..1cc65cc01a --- /dev/null +++ b/src/org/hibernate/cache/HashtableCache.java @@ -0,0 +1,90 @@ +//$Id$ +package org.hibernate.cache; + +import java.util.Collections; +import java.util.Hashtable; + +import java.util.Map; + +/** + * A lightweight implementation of the Cache interface + * @author Gavin King + */ +public class HashtableCache implements Cache { + + private final Map hashtable = new Hashtable(); + private final String regionName; + + public HashtableCache(String regionName) { + this.regionName = regionName; + } + + public String getRegionName() { + return regionName; + } + + public Object read(Object key) throws CacheException { + return hashtable.get(key); + } + + public Object get(Object key) throws CacheException { + return hashtable.get(key); + } + + public void update(Object key, Object value) throws CacheException { + put(key, value); + } + + public void put(Object key, Object value) throws CacheException { + hashtable.put(key, value); + } + + public void remove(Object key) throws CacheException { + hashtable.remove(key); + } + + public void clear() throws CacheException { + hashtable.clear(); + } + + public void destroy() throws CacheException { + + } + + public void lock(Object key) throws CacheException { + // local cache, so we use synchronization + } + + public void unlock(Object key) throws CacheException { + // local cache, so we use synchronization + } + + public long nextTimestamp() { + return Timestamper.next(); + } + + public int getTimeout() { + return Timestamper.ONE_MS * 60000; //ie. 60 seconds + } + + public long getSizeInMemory() { + return -1; + } + + public long getElementCountInMemory() { + return hashtable.size(); + } + + public long getElementCountOnDisk() { + return 0; + } + + public Map toMap() { + return Collections.unmodifiableMap(hashtable); + } + + public String toString() { + return "HashtableCache(" + regionName + ')'; + } + +} diff --git a/src/org/hibernate/cache/HashtableCacheProvider.java b/src/org/hibernate/cache/HashtableCacheProvider.java new file mode 100644 index 0000000000..13ac176983 --- /dev/null +++ b/src/org/hibernate/cache/HashtableCacheProvider.java @@ -0,0 +1,42 @@ +//$Id$ +package org.hibernate.cache; + +import java.util.Properties; + +/** + * A simple in-memory Hashtable-based cache impl. + * + * @author Gavin King + */ +public class HashtableCacheProvider implements CacheProvider { + + public Cache buildCache(String regionName, Properties properties) throws CacheException { + return new HashtableCache( regionName ); + } + + public long nextTimestamp() { + return Timestamper.next(); + } + + /** + * Callback to perform any necessary initialization of the underlying cache implementation + * during SessionFactory construction. + * + * @param properties current configuration settings. + */ + public void start(Properties properties) throws CacheException { + } + + /** + * Callback to perform any necessary cleanup of the underlying cache implementation + * during SessionFactory.close(). + */ + public void stop() { + } + + public boolean isMinimalPutsEnabledByDefault() { + return false; + } + +} + diff --git a/src/org/hibernate/cache/JndiBoundTreeCacheProvider.java b/src/org/hibernate/cache/JndiBoundTreeCacheProvider.java new file mode 100644 index 0000000000..cda5dd5111 --- /dev/null +++ b/src/org/hibernate/cache/JndiBoundTreeCacheProvider.java @@ -0,0 +1,63 @@ +// $Id$ +package org.hibernate.cache; + +import java.util.Properties; + +import javax.transaction.TransactionManager; + +import org.hibernate.transaction.TransactionManagerLookup; +import org.hibernate.transaction.TransactionManagerLookupFactory; + +/** + * Support for JBossCache (TreeCache), where the cache instance is available + * via JNDI lookup. + * + * @author Steve Ebersole + */ +public class JndiBoundTreeCacheProvider extends AbstractJndiBoundCacheProvider { + + private TransactionManager transactionManager; + + /** + * Construct a Cache representing the "region" within in the underlying cache + * provider. + * + * @param regionName the name of the cache region + * @param properties configuration settings + * + * @throws CacheException + */ + public Cache buildCache(String regionName, Properties properties) throws CacheException { + return new TreeCache( getTreeCacheInstance(), regionName, transactionManager ); + } + + public void prepare(Properties properties) throws CacheException { + TransactionManagerLookup transactionManagerLookup = TransactionManagerLookupFactory.getTransactionManagerLookup(properties); + if (transactionManagerLookup!=null) { + transactionManager = transactionManagerLookup.getTransactionManager(properties); + } + } + /** + * Generate a timestamp + */ + public long nextTimestamp() { + return System.currentTimeMillis() / 100; + } + + /** + * By default, should minimal-puts mode be enabled when using this cache. + *
+ * Since TreeCache is a clusterable cache and we are only getting a + * reference the instance from JNDI, safest to assume a clustered + * setup and return true here. + * + * @return True. + */ + public boolean isMinimalPutsEnabledByDefault() { + return true; + } + + public org.jboss.cache.TreeCache getTreeCacheInstance() { + return ( org.jboss.cache.TreeCache ) super.getCache(); + } +} diff --git a/src/org/hibernate/cache/NoCacheProvider.java b/src/org/hibernate/cache/NoCacheProvider.java new file mode 100644 index 0000000000..df77e55569 --- /dev/null +++ b/src/org/hibernate/cache/NoCacheProvider.java @@ -0,0 +1,58 @@ +// $Id$ +package org.hibernate.cache; + +import java.util.Properties; + +/** + * Implementation of NoCacheProvider. + * + * @author Steve Ebersole + */ +public class NoCacheProvider implements CacheProvider { + /** + * Configure the cache + * + * @param regionName the name of the cache region + * @param properties configuration settings + * + * @throws CacheException + */ + public Cache buildCache(String regionName, Properties properties) throws CacheException { + throw new NoCachingEnabledException(); + } + + /** + * Generate a timestamp + */ + public long nextTimestamp() { + // This, is used by SessionFactoryImpl to hand to the generated SessionImpl; + // was the only reason I could see that we cannot just use null as + // Settings.cacheProvider + return System.currentTimeMillis() / 100; + } + + /** + * Callback to perform any necessary initialization of the underlying cache implementation during SessionFactory + * construction. + * + * @param properties current configuration settings. + */ + public void start(Properties properties) throws CacheException { + // this is called by SessionFactory irregardless; we just disregard here; + // could also add a check to SessionFactory to only conditionally call start + } + + /** + * Callback to perform any necessary cleanup of the underlying cache implementation during SessionFactory.close(). + */ + public void stop() { + // this is called by SessionFactory irregardless; we just disregard here; + // could also add a check to SessionFactory to only conditionally call stop + } + + public boolean isMinimalPutsEnabledByDefault() { + // this is called from SettingsFactory irregardless; trivial to simply disregard + return false; + } + +} diff --git a/src/org/hibernate/cache/NoCachingEnabledException.java b/src/org/hibernate/cache/NoCachingEnabledException.java new file mode 100644 index 0000000000..6b713d8653 --- /dev/null +++ b/src/org/hibernate/cache/NoCachingEnabledException.java @@ -0,0 +1,20 @@ +// $Id$ +package org.hibernate.cache; + +import org.hibernate.cfg.Environment; + +/** + * Implementation of NoCachingEnabledException. + * + * @author Steve Ebersole + */ +public class NoCachingEnabledException extends CacheException { + private static final String MSG = + "Second-level cache is not enabled for usage [" + + Environment.USE_SECOND_LEVEL_CACHE + + " | " + Environment.USE_QUERY_CACHE + "]"; + + public NoCachingEnabledException() { + super( MSG ); + } +} diff --git a/src/org/hibernate/cache/NonstrictReadWriteCache.java b/src/org/hibernate/cache/NonstrictReadWriteCache.java new file mode 100644 index 0000000000..1cc71080f7 --- /dev/null +++ b/src/org/hibernate/cache/NonstrictReadWriteCache.java @@ -0,0 +1,170 @@ +//$Id$ +package org.hibernate.cache; + +import java.util.Comparator; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +/** + * Caches data that is sometimes updated without ever locking the cache. + * If concurrent access to an item is possible, this concurrency strategy + * makes no guarantee that the item returned from the cache is the latest + * version available in the database. Configure your cache timeout accordingly! + * This is an "asynchronous" concurrency strategy. + * + * @author Gavin King + * @see ReadWriteCache for a much stricter algorithm + */ +public class NonstrictReadWriteCache implements CacheConcurrencyStrategy { + + private Cache cache; + + private static final Log log = LogFactory.getLog( NonstrictReadWriteCache.class ); + + public NonstrictReadWriteCache() { + } + + public void setCache(Cache cache) { + this.cache = cache; + } + + public Cache getCache() { + return cache; + } + + /** + * Get the most recent version, if available. + */ + public Object get(Object key, long txTimestamp) throws CacheException { + if ( log.isDebugEnabled() ) { + log.debug( "Cache lookup: " + key ); + } + + Object result = cache.get( key ); + if ( result != null ) { + log.debug( "Cache hit" ); + } + else { + log.debug( "Cache miss" ); + } + return result; + } + + /** + * Add an item to the cache. + */ + public boolean put( + Object key, + Object value, + long txTimestamp, + Object version, + Comparator versionComparator, + boolean minimalPut) throws CacheException { + if ( minimalPut && cache.get( key ) != null ) { + if ( log.isDebugEnabled() ) { + log.debug( "item already cached: " + key ); + } + return false; + } + if ( log.isDebugEnabled() ) { + log.debug( "Caching: " + key ); + } + + cache.put( key, value ); + return true; + + } + + /** + * Do nothing. + * + * @return null, no lock + */ + public SoftLock lock(Object key, Object version) throws CacheException { + return null; + } + + public void remove(Object key) throws CacheException { + if ( log.isDebugEnabled() ) { + log.debug( "Removing: " + key ); + } + cache.remove( key ); + } + + public void clear() throws CacheException { + if ( log.isDebugEnabled() ) { + log.debug( "Clearing" ); + } + cache.clear(); + } + + public void destroy() { + try { + cache.destroy(); + } + catch ( Exception e ) { + log.warn( "could not destroy cache", e ); + } + } + + /** + * Invalidate the item + */ + public void evict(Object key) throws CacheException { + if ( log.isDebugEnabled() ) { + log.debug( "Invalidating: " + key ); + } + + cache.remove( key ); + } + + /** + * Invalidate the item + */ + public boolean insert(Object key, Object value, Object currentVersion) { + return false; + } + + /** + * Do nothing. + */ + public boolean update(Object key, Object value, Object currentVersion, Object previousVersion) { + evict( key ); + return false; + } + + /** + * Invalidate the item (again, for safety). + */ + public void release(Object key, SoftLock lock) throws CacheException { + if ( log.isDebugEnabled() ) { + log.debug( "Invalidating (again): " + key ); + } + + cache.remove( key ); + } + + /** + * Invalidate the item (again, for safety). + */ + public boolean afterUpdate(Object key, Object value, Object version, SoftLock lock) throws CacheException { + release( key, lock ); + return false; + } + + /** + * Do nothing. + */ + public boolean afterInsert(Object key, Object value, Object version) throws CacheException { + return false; + } + + public String getRegionName() { + return cache.getRegionName(); + } + + public String toString() { + return cache + "(nonstrict-read-write)"; + } +} diff --git a/src/org/hibernate/cache/OSCache.java b/src/org/hibernate/cache/OSCache.java new file mode 100644 index 0000000000..2802482aa8 --- /dev/null +++ b/src/org/hibernate/cache/OSCache.java @@ -0,0 +1,111 @@ +//$Id$ +package org.hibernate.cache; + +import java.util.Map; + +import com.opensymphony.oscache.base.NeedsRefreshException; +import com.opensymphony.oscache.general.GeneralCacheAdministrator; + +/** + * @author Mathias Bogaert + */ +public class OSCache implements Cache { + + /** + * The OSCache 2.0 cache administrator. + */ + private GeneralCacheAdministrator cache = new GeneralCacheAdministrator(); + + private final int refreshPeriod; + private final String cron; + private final String regionName; + + private String toString(Object key) { + return String.valueOf(key) + '.' + regionName; + } + + public OSCache(int refreshPeriod, String cron, String region) { + this.refreshPeriod = refreshPeriod; + this.cron = cron; + this.regionName = region; + } + + public void setCacheCapacity(int cacheCapacity) { + cache.setCacheCapacity(cacheCapacity); + } + + public Object get(Object key) throws CacheException { + try { + return cache.getFromCache( toString(key), refreshPeriod, cron ); + } + catch (NeedsRefreshException e) { + cache.cancelUpdate( toString(key) ); + return null; + } + } + + public Object read(Object key) throws CacheException { + return get(key); + } + + public void update(Object key, Object value) throws CacheException { + put(key, value); + } + + public void put(Object key, Object value) throws CacheException { + cache.putInCache( toString(key), value ); + } + + public void remove(Object key) throws CacheException { + cache.flushEntry( toString(key) ); + } + + public void clear() throws CacheException { + cache.flushAll(); + } + + public void destroy() throws CacheException { + cache.destroy(); + } + + public void lock(Object key) throws CacheException { + // local cache, so we use synchronization + } + + public void unlock(Object key) throws CacheException { + // local cache, so we use synchronization + } + + public long nextTimestamp() { + return Timestamper.next(); + } + + public int getTimeout() { + return Timestamper.ONE_MS * 60000; //ie. 60 seconds + } + + public String getRegionName() { + return regionName; + } + + public long getSizeInMemory() { + return -1; + } + + public long getElementCountInMemory() { + return -1; + } + + public long getElementCountOnDisk() { + return -1; + } + + public Map toMap() { + throw new UnsupportedOperationException(); + } + + public String toString() { + return "OSCache(" + regionName + ')'; + } + +} diff --git a/src/org/hibernate/cache/OSCacheProvider.java b/src/org/hibernate/cache/OSCacheProvider.java new file mode 100644 index 0000000000..ca3cf80ca8 --- /dev/null +++ b/src/org/hibernate/cache/OSCacheProvider.java @@ -0,0 +1,87 @@ +//$Id$ +package org.hibernate.cache; + +import java.util.Properties; + +import org.hibernate.util.PropertiesHelper; +import org.hibernate.util.StringHelper; + +import com.opensymphony.oscache.base.CacheEntry; +import com.opensymphony.oscache.base.Config; + +/** + * Support for OpenSymphony OSCache. This implementation assumes + * that identifiers have well-behaved toString() methods. + * + * @author Mathias Bogaert + */ +public class OSCacheProvider implements CacheProvider { + + /** + * The OSCache refresh period property suffix. + */ + public static final String OSCACHE_REFRESH_PERIOD = "refresh.period"; + /** + * The OSCache CRON expression property suffix. + */ + public static final String OSCACHE_CRON = "cron"; + /** + * The OSCache cache capacity property suffix. + */ + public static final String OSCACHE_CAPACITY = "capacity"; + + private static final Properties OSCACHE_PROPERTIES = new Config().getProperties(); + + /** + * Builds a new {@link Cache} instance, and gets it's properties from the OSCache {@link Config} + * which reads the properties file (oscache.properties
) from the classpath.
+ * If the file cannot be found or loaded, an the defaults are used.
+ *
+ * @param region
+ * @param properties
+ * @return
+ * @throws CacheException
+ */
+ public Cache buildCache(String region, Properties properties) throws CacheException {
+
+ int refreshPeriod = PropertiesHelper.getInt(
+ StringHelper.qualify(region, OSCACHE_REFRESH_PERIOD),
+ OSCACHE_PROPERTIES,
+ CacheEntry.INDEFINITE_EXPIRY
+ );
+ String cron = OSCACHE_PROPERTIES.getProperty( StringHelper.qualify(region, OSCACHE_CRON) );
+
+ // construct the cache
+ final OSCache cache = new OSCache(refreshPeriod, cron, region);
+
+ Integer capacity = PropertiesHelper.getInteger( StringHelper.qualify(region, OSCACHE_CAPACITY), OSCACHE_PROPERTIES );
+ if ( capacity!=null ) cache.setCacheCapacity( capacity.intValue() );
+
+ return cache;
+ }
+
+ public long nextTimestamp() {
+ return Timestamper.next();
+ }
+
+ /**
+ * Callback to perform any necessary initialization of the underlying cache implementation
+ * during SessionFactory construction.
+ *
+ * @param properties current configuration settings.
+ */
+ public void start(Properties properties) throws CacheException {
+ }
+
+ /**
+ * Callback to perform any necessary cleanup of the underlying cache implementation
+ * during SessionFactory.close().
+ */
+ public void stop() {
+ }
+
+ public boolean isMinimalPutsEnabledByDefault() {
+ return false;
+ }
+
+}
diff --git a/src/org/hibernate/cache/OptimisticCache.java b/src/org/hibernate/cache/OptimisticCache.java
new file mode 100644
index 0000000000..7bcbcb24b8
--- /dev/null
+++ b/src/org/hibernate/cache/OptimisticCache.java
@@ -0,0 +1,64 @@
+package org.hibernate.cache;
+
+/**
+ * A contract for transactional cache implementations which support
+ * optimistic locking of items within the cache.
+ *
+ * The optimisitic locking capabilities are only utilized for
+ * the entity cache regions.
+ *
+ * Unlike the methods on the {@link Cache} interface, all the methods
+ * here will only ever be called from access scenarios where versioned
+ * data is actually a possiblity (i.e., entity data). Be sure to consult
+ * with {@link OptimisticCacheSource#isVersioned()} to determine whether
+ * versioning is actually in effect.
+ *
+ * @author Steve Ebersole
+ */
+public interface OptimisticCache extends Cache {
+ /**
+ * Indicates the "source" of the cached data. Currently this will
+ * only ever represent an {@link org.hibernate.persister.entity.EntityPersister}.
+ *
+ * Made available to the cache so that it can access certain information
+ * about versioning strategy.
+ *
+ * @param source The source.
+ */
+ public void setSource(OptimisticCacheSource source);
+
+ /**
+ * Called during {@link CacheConcurrencyStrategy#insert} processing for
+ * transactional strategies. Indicates we have just performed an insert
+ * into the DB and now need to cache that entity's data.
+ *
+ * @param key The cache key.
+ * @param value The data to be cached.
+ * @param currentVersion The entity's version; or null if not versioned.
+ */
+ public void writeInsert(Object key, Object value, Object currentVersion);
+
+ /**
+ * Called during {@link CacheConcurrencyStrategy#update} processing for
+ * transactional strategies. Indicates we have just performed an update
+ * against the DB and now need to cache the updated state.
+ *
+ * @param key The cache key.
+ * @param value The data to be cached.
+ * @param currentVersion The entity's current version
+ * @param previousVersion The entity's previous version (before the update);
+ * or null if not versioned.
+ */
+ public void writeUpdate(Object key, Object value, Object currentVersion, Object previousVersion);
+
+ /**
+ * Called during {@link CacheConcurrencyStrategy#put} processing for
+ * transactional strategies. Indicates we have just loaded an entity's
+ * state from the database and need it cached.
+ *
+ * @param key The cache key.
+ * @param value The data to be cached.
+ * @param currentVersion The entity's version; or null if not versioned.
+ */
+ public void writeLoad(Object key, Object value, Object currentVersion);
+}
diff --git a/src/org/hibernate/cache/OptimisticCacheSource.java b/src/org/hibernate/cache/OptimisticCacheSource.java
new file mode 100644
index 0000000000..ca01e4f8c7
--- /dev/null
+++ b/src/org/hibernate/cache/OptimisticCacheSource.java
@@ -0,0 +1,29 @@
+package org.hibernate.cache;
+
+import java.util.Comparator;
+
+/**
+ * Contract for sources of optimistically lockable data sent to the second level
+ * cache.
+ *
+ * Note currently {@link org.hibernate.persister.entity.EntityPersister}s are
+ * the only viable source.
+ *
+ * @author Steve Ebersole
+ */
+public interface OptimisticCacheSource {
+ /**
+ * Does this source represent versioned (i.e., and thus optimistically
+ * lockable) data?
+ *
+ * @return True if this source represents versioned data; false otherwise.
+ */
+ public boolean isVersioned();
+
+ /**
+ * Get the comparator used to compare two different version values together.
+ *
+ * @return An appropriate comparator.
+ */
+ public Comparator getVersionComparator();
+}
diff --git a/src/org/hibernate/cache/OptimisticTreeCache.java b/src/org/hibernate/cache/OptimisticTreeCache.java
new file mode 100644
index 0000000000..d31c0cfbc3
--- /dev/null
+++ b/src/org/hibernate/cache/OptimisticTreeCache.java
@@ -0,0 +1,329 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.Comparator;
+
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.optimistic.DataVersion;
+import org.jboss.cache.config.Option;
+import org.jboss.cache.lock.TimeoutException;
+
+/**
+ * Represents a particular region within the given JBossCache TreeCache
+ * utilizing TreeCache's optimistic locking capabilities.
+ *
+ * @see OptimisticTreeCacheProvider for more details
+ *
+ * @author Steve Ebersole
+ */
+public class OptimisticTreeCache implements OptimisticCache {
+
+ // todo : eventually merge this with TreeCache and just add optional opt-lock support there.
+
+ private static final Log log = LogFactory.getLog( OptimisticTreeCache.class);
+
+ private static final String ITEM = "item";
+
+ private org.jboss.cache.TreeCache cache;
+ private final String regionName;
+ private final Fqn regionFqn;
+ private OptimisticCacheSource source;
+
+ public OptimisticTreeCache(org.jboss.cache.TreeCache cache, String regionName)
+ throws CacheException {
+ this.cache = cache;
+ this.regionName = regionName;
+ this.regionFqn = Fqn.fromString( regionName.replace( '.', '/' ) );
+ }
+
+
+ // OptimisticCache impl ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ public void setSource(OptimisticCacheSource source) {
+ this.source = source;
+ }
+
+ public void writeInsert(Object key, Object value, Object currentVersion) {
+ writeUpdate( key, value, currentVersion, null );
+ }
+
+ public void writeUpdate(Object key, Object value, Object currentVersion, Object previousVersion) {
+ try {
+ Option option = new Option();
+ DataVersion dv = ( source != null && source.isVersioned() )
+ ? new DataVersionAdapter( currentVersion, previousVersion, source.getVersionComparator(), source.toString() )
+ : NonLockingDataVersion.INSTANCE;
+ option.setDataVersion( dv );
+ cache.put( new Fqn( regionFqn, key ), ITEM, value, option );
+ }
+ catch ( Exception e ) {
+ throw new CacheException( e );
+ }
+ }
+
+ public void writeLoad(Object key, Object value, Object currentVersion) {
+ try {
+ Option option = new Option();
+ option.setFailSilently( true );
+ option.setDataVersion( NonLockingDataVersion.INSTANCE );
+ cache.remove( new Fqn( regionFqn, key ), "ITEM", option );
+
+ option = new Option();
+ option.setFailSilently( true );
+ DataVersion dv = ( source != null && source.isVersioned() )
+ ? new DataVersionAdapter( currentVersion, currentVersion, source.getVersionComparator(), source.toString() )
+ : NonLockingDataVersion.INSTANCE;
+ option.setDataVersion( dv );
+ cache.put( new Fqn( regionFqn, key ), ITEM, value, option );
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+
+ // Cache impl ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ public Object get(Object key) throws CacheException {
+ try {
+ Option option = new Option();
+ option.setFailSilently( true );
+// option.setDataVersion( NonLockingDataVersion.INSTANCE );
+ return cache.get( new Fqn( regionFqn, key ), ITEM, option );
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public Object read(Object key) throws CacheException {
+ try {
+ return cache.get( new Fqn( regionFqn, key ), ITEM );
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public void update(Object key, Object value) throws CacheException {
+ try {
+ Option option = new Option();
+ option.setDataVersion( NonLockingDataVersion.INSTANCE );
+ cache.put( new Fqn( regionFqn, key ), ITEM, value, option );
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public void put(Object key, Object value) throws CacheException {
+ try {
+ log.trace( "performing put() into region [" + regionName + "]" );
+ // do the put outside the scope of the JTA txn
+ Option option = new Option();
+ option.setFailSilently( true );
+ option.setDataVersion( NonLockingDataVersion.INSTANCE );
+ cache.put( new Fqn( regionFqn, key ), ITEM, value, option );
+ }
+ catch (TimeoutException te) {
+ //ignore!
+ log.debug("ignoring write lock acquisition failure");
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public void remove(Object key) throws CacheException {
+ try {
+ // tree cache in optimistic mode seems to have as very difficult
+ // time with remove calls on non-existent nodes (NPEs)...
+ if ( cache.get( new Fqn( regionFqn, key ), ITEM ) != null ) {
+ Option option = new Option();
+ option.setDataVersion( NonLockingDataVersion.INSTANCE );
+ cache.remove( new Fqn( regionFqn, key ), option );
+ }
+ else {
+ log.trace( "skipping remove() call as the underlying node did not seem to exist" );
+ }
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public void clear() throws CacheException {
+ try {
+ Option option = new Option();
+ option.setDataVersion( NonLockingDataVersion.INSTANCE );
+ cache.remove( regionFqn, option );
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public void destroy() throws CacheException {
+ try {
+ Option option = new Option();
+ option.setCacheModeLocal( true );
+ option.setFailSilently( true );
+ option.setDataVersion( NonLockingDataVersion.INSTANCE );
+ cache.remove( regionFqn, option );
+ }
+ catch( Exception e ) {
+ throw new CacheException( e );
+ }
+ }
+
+ public void lock(Object key) throws CacheException {
+ throw new UnsupportedOperationException( "TreeCache is a fully transactional cache" + regionName );
+ }
+
+ public void unlock(Object key) throws CacheException {
+ throw new UnsupportedOperationException( "TreeCache is a fully transactional cache: " + regionName );
+ }
+
+ public long nextTimestamp() {
+ return System.currentTimeMillis() / 100;
+ }
+
+ public int getTimeout() {
+ return 600; //60 seconds
+ }
+
+ public String getRegionName() {
+ return regionName;
+ }
+
+ public long getSizeInMemory() {
+ return -1;
+ }
+
+ public long getElementCountInMemory() {
+ try {
+ Set children = cache.getChildrenNames( regionFqn );
+ return children == null ? 0 : children.size();
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public long getElementCountOnDisk() {
+ return 0;
+ }
+
+ public Map toMap() {
+ try {
+ Map result = new HashMap();
+ Set childrenNames = cache.getChildrenNames( regionFqn );
+ if (childrenNames != null) {
+ Iterator iter = childrenNames.iterator();
+ while ( iter.hasNext() ) {
+ Object key = iter.next();
+ result.put(
+ key,
+ cache.get( new Fqn( regionFqn, key ), ITEM )
+ );
+ }
+ }
+ return result;
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public String toString() {
+ return "OptimisticTreeCache(" + regionName + ')';
+ }
+
+ public static class DataVersionAdapter implements DataVersion {
+ private final Object currentVersion;
+ private final Object previousVersion;
+ private final Comparator versionComparator;
+ private final String sourceIdentifer;
+
+ public DataVersionAdapter(Object currentVersion, Object previousVersion, Comparator versionComparator, String sourceIdentifer) {
+ this.currentVersion = currentVersion;
+ this.previousVersion = previousVersion;
+ this.versionComparator = versionComparator;
+ this.sourceIdentifer = sourceIdentifer;
+ log.trace( "created " + this );
+ }
+
+ /**
+ * newerThan() call is dispatched against the DataVersion currently
+ * associated with the node; the passed dataVersion param is the
+ * DataVersion associated with the data we are trying to put into
+ * the node.
+ *
+ * we are expected to return true in the case where we (the current
+ * node DataVersion) are newer that then incoming value. Returning
+ * true here essentially means that a optimistic lock failure has
+ * occured (because conversely, the value we are trying to put into
+ * the node is "older than" the value already there...)
+ */
+ public boolean newerThan(DataVersion dataVersion) {
+ log.trace( "checking [" + this + "] against [" + dataVersion + "]" );
+ if ( dataVersion instanceof CircumventChecksDataVersion ) {
+ log.trace( "skipping lock checks..." );
+ return false;
+ }
+ else if ( dataVersion instanceof NonLockingDataVersion ) {
+ // can happen because of the multiple ways Cache.remove()
+ // can be invoked :(
+ log.trace( "skipping lock checks..." );
+ return false;
+ }
+ DataVersionAdapter other = ( DataVersionAdapter ) dataVersion;
+ if ( other.previousVersion == null ) {
+ log.warn( "Unexpected optimistic lock check on inserting data" );
+ // work around the "feature" where tree cache is validating the
+ // inserted node during the next transaction. no idea...
+ if ( this == dataVersion ) {
+ log.trace( "skipping lock checks due to same DV instance" );
+ return false;
+ }
+ }
+ return versionComparator.compare( currentVersion, other.previousVersion ) >= 1;
+ }
+
+ public String toString() {
+ return super.toString() + " [current=" + currentVersion + ", previous=" + previousVersion + ", src=" + sourceIdentifer + "]";
+ }
+ }
+
+ /**
+ * Used in regions where no locking should ever occur. This includes query-caches,
+ * update-timestamps caches, collection caches, and entity caches where the entity
+ * is not versioned.
+ */
+ public static class NonLockingDataVersion implements DataVersion {
+ public static final DataVersion INSTANCE = new NonLockingDataVersion();
+ public boolean newerThan(DataVersion dataVersion) {
+ log.trace( "non locking lock check...");
+ return false;
+ }
+ }
+
+ /**
+ * Used to signal to a DataVersionAdapter to simply not perform any checks. This
+ * is currently needed for proper handling of remove() calls for entity cache regions
+ * (we do not know the version info...).
+ */
+ public static class CircumventChecksDataVersion implements DataVersion {
+ public static final DataVersion INSTANCE = new CircumventChecksDataVersion();
+ public boolean newerThan(DataVersion dataVersion) {
+ throw new CacheException( "optimistic locking checks should never happen on CircumventChecksDataVersion" );
+ }
+ }
+}
diff --git a/src/org/hibernate/cache/OptimisticTreeCacheProvider.java b/src/org/hibernate/cache/OptimisticTreeCacheProvider.java
new file mode 100644
index 0000000000..fd2cc7458c
--- /dev/null
+++ b/src/org/hibernate/cache/OptimisticTreeCacheProvider.java
@@ -0,0 +1,130 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.Properties;
+import javax.transaction.TransactionManager;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.cfg.Environment;
+import org.hibernate.transaction.TransactionManagerLookup;
+import org.hibernate.transaction.TransactionManagerLookupFactory;
+import org.jboss.cache.PropertyConfigurator;
+
+/**
+ * Support for a standalone JBossCache TreeCache instance utilizing TreeCache's
+ * optimistic locking capabilities. This capability was added in JBossCache
+ * version 1.3.0; as such this provider will only work with that version or
+ * higher.
+ *
+ * The TreeCache instance is configured via a local config resource. The
+ * resource to be used for configuration can be controlled by specifying a value
+ * for the {@link #CONFIG_RESOURCE} config property.
+ *
+ * @author Steve Ebersole
+ */
+public class OptimisticTreeCacheProvider implements CacheProvider {
+
+ /**
+ * @deprecated use {@link Environment.CACHE_PROVIDER_CONFIG}
+ */
+ public static final String CONFIG_RESOURCE = "hibernate.cache.opt_tree_cache.config";
+ public static final String DEFAULT_CONFIG = "treecache.xml";
+
+ private static final String NODE_LOCKING_SCHEME = "OPTIMISTIC";
+ private static final Log log = LogFactory.getLog( OptimisticTreeCacheProvider.class );
+
+ private org.jboss.cache.TreeCache cache;
+
+ /**
+ * Construct and configure the Cache representation of a named cache region.
+ *
+ * @param regionName the name of the cache region
+ * @param properties configuration settings
+ * @return The Cache representation of the named cache region.
+ * @throws CacheException
+ * Indicates an error building the cache region.
+ */
+ public Cache buildCache(String regionName, Properties properties) throws CacheException {
+ return new OptimisticTreeCache( cache, regionName );
+ }
+
+ public long nextTimestamp() {
+ return System.currentTimeMillis() / 100;
+ }
+
+ /**
+ * Prepare the underlying JBossCache TreeCache instance.
+ *
+ * @param properties All current config settings.
+ * @throws CacheException
+ * Indicates a problem preparing cache for use.
+ */
+ public void start(Properties properties) {
+ String resource = properties.getProperty( Environment.CACHE_PROVIDER_CONFIG );
+ if (resource == null) {
+ resource = properties.getProperty( CONFIG_RESOURCE );
+ }
+ if ( resource == null ) {
+ resource = DEFAULT_CONFIG;
+ }
+ log.debug( "Configuring TreeCache from resource [" + resource + "]" );
+ try {
+ cache = new org.jboss.cache.TreeCache();
+ PropertyConfigurator config = new PropertyConfigurator();
+ config.configure( cache, resource );
+ TransactionManagerLookup transactionManagerLookup =
+ TransactionManagerLookupFactory.getTransactionManagerLookup( properties );
+ if ( transactionManagerLookup == null ) {
+ throw new CacheException(
+ "JBossCache only supports optimisitc locking with a configured " +
+ "TransactionManagerLookup (" + Environment.TRANSACTION_MANAGER_STRATEGY + ")"
+ );
+ }
+ cache.setTransactionManagerLookup(
+ new TransactionManagerLookupAdaptor(
+ transactionManagerLookup,
+ properties
+ )
+ );
+ if ( ! NODE_LOCKING_SCHEME.equalsIgnoreCase( cache.getNodeLockingScheme() ) ) {
+ log.info( "Overriding node-locking-scheme to : " + NODE_LOCKING_SCHEME );
+ cache.setNodeLockingScheme( NODE_LOCKING_SCHEME );
+ }
+ cache.start();
+ }
+ catch ( Exception e ) {
+ throw new CacheException( e );
+ }
+ }
+
+ public void stop() {
+ if ( cache != null ) {
+ cache.stop();
+ cache.destroy();
+ cache = null;
+ }
+ }
+
+ public boolean isMinimalPutsEnabledByDefault() {
+ return true;
+ }
+
+ static final class TransactionManagerLookupAdaptor implements org.jboss.cache.TransactionManagerLookup {
+ private final TransactionManagerLookup tml;
+ private final Properties props;
+
+ TransactionManagerLookupAdaptor(TransactionManagerLookup tml, Properties props) {
+ this.tml = tml;
+ this.props = props;
+ }
+
+ public TransactionManager getTransactionManager() throws Exception {
+ return tml.getTransactionManager( props );
+ }
+ }
+
+ public org.jboss.cache.TreeCache getUnderlyingCache() {
+ return cache;
+ }
+}
diff --git a/src/org/hibernate/cache/QueryCache.java b/src/org/hibernate/cache/QueryCache.java
new file mode 100644
index 0000000000..0cc457cf31
--- /dev/null
+++ b/src/org/hibernate/cache/QueryCache.java
@@ -0,0 +1,33 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.List;
+import java.util.Set;
+
+import org.hibernate.HibernateException;
+import org.hibernate.engine.SessionImplementor;
+import org.hibernate.type.Type;
+
+/**
+ * Defines the contract for caches capable of storing query results. These
+ * caches should only concern themselves with storing the matching result ids.
+ * The transactional semantics are necessarily less strict than the semantics
+ * of an item cache.
+ *
+ * @author Gavin King
+ */
+public interface QueryCache {
+
+ public void clear() throws CacheException;
+
+ public boolean put(QueryKey key, Type[] returnTypes, List result, boolean isNaturalKeyLookup, SessionImplementor session) throws HibernateException;
+
+ public List get(QueryKey key, Type[] returnTypes, boolean isNaturalKeyLookup, Set spaces, SessionImplementor session)
+ throws HibernateException;
+
+ public void destroy();
+
+ public Cache getCache();
+
+ public String getRegionName();
+}
diff --git a/src/org/hibernate/cache/QueryCacheFactory.java b/src/org/hibernate/cache/QueryCacheFactory.java
new file mode 100644
index 0000000000..9472364fa6
--- /dev/null
+++ b/src/org/hibernate/cache/QueryCacheFactory.java
@@ -0,0 +1,24 @@
+// $Id$
+package org.hibernate.cache;
+
+import org.hibernate.HibernateException;
+import org.hibernate.cfg.Settings;
+
+import java.util.Properties;
+
+/**
+ * Defines a factory for query cache instances. These factories are responsible for
+ * creating individual QueryCache instances.
+ *
+ * @author Steve Ebersole
+ */
+public interface QueryCacheFactory {
+
+ public QueryCache getQueryCache(
+ String regionName,
+ UpdateTimestampsCache updateTimestampsCache,
+ Settings settings,
+ Properties props)
+ throws HibernateException;
+
+}
diff --git a/src/org/hibernate/cache/QueryKey.java b/src/org/hibernate/cache/QueryKey.java
new file mode 100644
index 0000000000..5ccdcf48e2
--- /dev/null
+++ b/src/org/hibernate/cache/QueryKey.java
@@ -0,0 +1,117 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.io.Serializable;
+import java.util.Map;
+import java.util.Set;
+
+import org.hibernate.EntityMode;
+import org.hibernate.engine.QueryParameters;
+import org.hibernate.engine.RowSelection;
+import org.hibernate.transform.ResultTransformer;
+import org.hibernate.type.Type;
+import org.hibernate.util.EqualsHelper;
+
+/**
+ * A key that identifies a particular query with bound parameter values
+ * @author Gavin King
+ */
+public class QueryKey implements Serializable {
+ private final String sqlQueryString;
+ private final Type[] types;
+ private final Object[] values;
+ private final Integer firstRow;
+ private final Integer maxRows;
+ private final Map namedParameters;
+ private final EntityMode entityMode;
+ private final Set filters;
+ private final int hashCode;
+
+ // the user provided resulttransformer, not the one used with "select new". Here to avoid mangling transformed/non-transformed results.
+ private final ResultTransformer customTransformer;
+
+ public QueryKey(String queryString, QueryParameters queryParameters, Set filters, EntityMode entityMode) {
+ this.sqlQueryString = queryString;
+ this.types = queryParameters.getPositionalParameterTypes();
+ this.values = queryParameters.getPositionalParameterValues();
+ RowSelection selection = queryParameters.getRowSelection();
+ if (selection!=null) {
+ firstRow = selection.getFirstRow();
+ maxRows = selection.getMaxRows();
+ }
+ else {
+ firstRow = null;
+ maxRows = null;
+ }
+ this.namedParameters = queryParameters.getNamedParameters();
+ this.entityMode = entityMode;
+ this.filters = filters;
+ this.customTransformer = queryParameters.getResultTransformer();
+ this.hashCode = getHashCode();
+ }
+
+ public boolean equals(Object other) {
+ QueryKey that = (QueryKey) other;
+ if ( !sqlQueryString.equals(that.sqlQueryString) ) return false;
+ if ( !EqualsHelper.equals(firstRow, that.firstRow) || !EqualsHelper.equals(maxRows, that.maxRows) ) return false;
+ if ( !EqualsHelper.equals(customTransformer, that.customTransformer) ) return false;
+ if (types==null) {
+ if (that.types!=null) return false;
+ }
+ else {
+ if (that.types==null) return false;
+ if ( types.length!=that.types.length ) return false;
+ for ( int i=0; i+ This package defines formats for disassembled state + kept in the second level cache. +
+ +