+ * Note that the second-level cache will be disabled if you
+ * supply a JDBC connection. Hibernate will not be able to track
+ * any statements you might have executed in the same transaction.
+ * Consider implementing your own ConnectionProvider.
+ *
+ * @param connection a connection provided by the application.
+ * @return Session
+ */
+ public org.hibernate.classic.Session openSession(Connection connection);
+
+ /**
+ * Create database connection and open a Session on it, specifying an
+ * interceptor.
+ *
+ * @param interceptor a session-scoped interceptor
+ * @return Session
+ * @throws HibernateException
+ */
+ public org.hibernate.classic.Session openSession(Interceptor interceptor) throws HibernateException;
+
+ /**
+ * Open a Session on the given connection, specifying an interceptor.
+ *
+ * Note that the second-level cache will be disabled if you
+ * supply a JDBC connection. Hibernate will not be able to track
+ * any statements you might have executed in the same transaction.
+ * Consider implementing your own ConnectionProvider.
+ *
+ * @param connection a connection provided by the application.
+ * @param interceptor a session-scoped interceptor
+ * @return Session
+ */
+ public org.hibernate.classic.Session openSession(Connection connection, Interceptor interceptor);
+
+ /**
+ * Create database connection and open a Session on it.
+ *
+ * @return Session
+ * @throws HibernateException
+ */
+ public org.hibernate.classic.Session openSession() throws HibernateException;
+
+ /**
+ * Obtains the current session. The definition of what exactly "current"
+ * means controlled by the {@link org.hibernate.context.CurrentSessionContext} impl configured
+ * for use.
+ *
";
+
+ }
+}
diff --git a/src/org/hibernate/action/EntityAction.java b/src/org/hibernate/action/EntityAction.java
new file mode 100644
index 0000000000..47b53f24dd
--- /dev/null
+++ b/src/org/hibernate/action/EntityAction.java
@@ -0,0 +1,136 @@
+//$Id$
+package org.hibernate.action;
+
+import org.hibernate.AssertionFailure;
+import org.hibernate.engine.SessionImplementor;
+import org.hibernate.persister.entity.EntityPersister;
+import org.hibernate.pretty.MessageHelper;
+import org.hibernate.util.StringHelper;
+
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.Serializable;
+
+/**
+ * Base class for actions relating to insert/update/delete of an entity
+ * instance.
+ *
+ * @author Gavin King
+ */
+public abstract class EntityAction implements Executable, Serializable, Comparable {
+
+ private final String entityName;
+ private final Serializable id;
+ private final Object instance;
+ private final SessionImplementor session;
+
+ private transient EntityPersister persister;
+
+ /**
+ * Instantiate an action.
+ *
+ * @param session The session from which this action is coming.
+ * @param id The id of the entity
+ * @param instance The entiyt instance
+ * @param persister The entity persister
+ */
+ protected EntityAction(SessionImplementor session, Serializable id, Object instance, EntityPersister persister) {
+ this.entityName = persister.getEntityName();
+ this.id = id;
+ this.instance = instance;
+ this.session = session;
+ this.persister = persister;
+ }
+
+ protected abstract boolean hasPostCommitEventListeners();
+
+ /**
+ * entity name accessor
+ *
+ * @return The entity name
+ */
+ public String getEntityName() {
+ return entityName;
+ }
+
+ /**
+ * entity id accessor
+ *
+ * @return The entity id
+ */
+ public final Serializable getId() {
+ if ( id instanceof DelayedPostInsertIdentifier ) {
+ return session.getPersistenceContext().getEntry( instance ).getId();
+ }
+ return id;
+ }
+
+ /**
+ * entity instance accessor
+ *
+ * @return The entity instance
+ */
+ public final Object getInstance() {
+ return instance;
+ }
+
+ /**
+ * originating session accessor
+ *
+ * @return The session from which this action originated.
+ */
+ public final SessionImplementor getSession() {
+ return session;
+ }
+
+ /**
+ * entity persister accessor
+ *
+ * @return The entity persister
+ */
+ public final EntityPersister getPersister() {
+ return persister;
+ }
+
+ public final Serializable[] getPropertySpaces() {
+ return persister.getPropertySpaces();
+ }
+
+ public void beforeExecutions() {
+ throw new AssertionFailure( "beforeExecutions() called for non-collection action" );
+ }
+
+ public boolean hasAfterTransactionCompletion() {
+ return persister.hasCache() || hasPostCommitEventListeners();
+ }
+
+ public String toString() {
+ return StringHelper.unqualify( getClass().getName() ) + MessageHelper.infoString( entityName, id );
+ }
+
+ public int compareTo(Object other) {
+ EntityAction action = ( EntityAction ) other;
+ //sort first by entity name
+ int roleComparison = entityName.compareTo( action.entityName );
+ if ( roleComparison != 0 ) {
+ return roleComparison;
+ }
+ else {
+ //then by id
+ return persister.getIdentifierType().compare( id, action.id, session.getEntityMode() );
+ }
+ }
+
+ /**
+ * Serialization...
+ *
+ * @param ois Thed object stream
+ * @throws IOException Problem performing the default stream reading
+ * @throws ClassNotFoundException Problem performing the default stream reading
+ */
+ private void readObject(ObjectInputStream ois) throws IOException, ClassNotFoundException {
+ ois.defaultReadObject();
+ persister = session.getFactory().getEntityPersister( entityName );
+ }
+}
+
diff --git a/src/org/hibernate/action/EntityDeleteAction.java b/src/org/hibernate/action/EntityDeleteAction.java
new file mode 100644
index 0000000000..50ad219627
--- /dev/null
+++ b/src/org/hibernate/action/EntityDeleteAction.java
@@ -0,0 +1,174 @@
+//$Id$
+package org.hibernate.action;
+
+import java.io.Serializable;
+
+import org.hibernate.AssertionFailure;
+import org.hibernate.HibernateException;
+import org.hibernate.cache.CacheKey;
+import org.hibernate.cache.CacheConcurrencyStrategy.SoftLock;
+import org.hibernate.engine.EntityEntry;
+import org.hibernate.engine.EntityKey;
+import org.hibernate.engine.PersistenceContext;
+import org.hibernate.engine.SessionImplementor;
+import org.hibernate.event.PostDeleteEvent;
+import org.hibernate.event.PostDeleteEventListener;
+import org.hibernate.event.PreDeleteEvent;
+import org.hibernate.event.PreDeleteEventListener;
+import org.hibernate.event.EventSource;
+import org.hibernate.persister.entity.EntityPersister;
+
+public final class EntityDeleteAction extends EntityAction {
+
+ private final Object version;
+ private SoftLock lock;
+ private final boolean isCascadeDeleteEnabled;
+ private final Object[] state;
+
+ public EntityDeleteAction(
+ final Serializable id,
+ final Object[] state,
+ final Object version,
+ final Object instance,
+ final EntityPersister persister,
+ final boolean isCascadeDeleteEnabled,
+ final SessionImplementor session) {
+ super( session, id, instance, persister );
+ this.version = version;
+ this.isCascadeDeleteEnabled = isCascadeDeleteEnabled;
+ this.state = state;
+ }
+
+ public void execute() throws HibernateException {
+ Serializable id = getId();
+ EntityPersister persister = getPersister();
+ SessionImplementor session = getSession();
+ Object instance = getInstance();
+
+ boolean veto = preDelete();
+
+ Object version = this.version;
+ if ( persister.isVersionPropertyGenerated() ) {
+ // we need to grab the version value from the entity, otherwise
+ // we have issues with generated-version entities that may have
+ // multiple actions queued during the same flush
+ version = persister.getVersion( instance, session.getEntityMode() );
+ }
+
+ final CacheKey ck;
+ if ( persister.hasCache() ) {
+ ck = new CacheKey(
+ id,
+ persister.getIdentifierType(),
+ persister.getRootEntityName(),
+ session.getEntityMode(),
+ session.getFactory()
+ );
+ lock = persister.getCache().lock(ck, version);
+ }
+ else {
+ ck = null;
+ }
+
+ if ( !isCascadeDeleteEnabled && !veto ) {
+ persister.delete( id, version, instance, session );
+ }
+
+ //postDelete:
+ // After actually deleting a row, record the fact that the instance no longer
+ // exists on the database (needed for identity-column key generation), and
+ // remove it from the session cache
+ final PersistenceContext persistenceContext = session.getPersistenceContext();
+ EntityEntry entry = persistenceContext.removeEntry( instance );
+ if ( entry == null ) {
+ throw new AssertionFailure( "possible nonthreadsafe access to session" );
+ }
+ entry.postDelete();
+
+ EntityKey key = new EntityKey( entry.getId(), entry.getPersister(), session.getEntityMode() );
+ persistenceContext.removeEntity(key);
+ persistenceContext.removeProxy(key);
+
+ if ( persister.hasCache() ) persister.getCache().evict(ck);
+
+ postDelete();
+
+ if ( getSession().getFactory().getStatistics().isStatisticsEnabled() && !veto ) {
+ getSession().getFactory().getStatisticsImplementor()
+ .deleteEntity( getPersister().getEntityName() );
+ }
+ }
+
+ private boolean preDelete() {
+ PreDeleteEventListener[] preListeners = getSession().getListeners()
+ .getPreDeleteEventListeners();
+ boolean veto = false;
+ if (preListeners.length>0) {
+ PreDeleteEvent preEvent = new PreDeleteEvent( getInstance(), getId(), state, getPersister() );
+ for ( int i = 0; i < preListeners.length; i++ ) {
+ veto = preListeners[i].onPreDelete(preEvent) || veto;
+ }
+ }
+ return veto;
+ }
+
+ private void postDelete() {
+ PostDeleteEventListener[] postListeners = getSession().getListeners()
+ .getPostDeleteEventListeners();
+ if (postListeners.length>0) {
+ PostDeleteEvent postEvent = new PostDeleteEvent(
+ getInstance(),
+ getId(),
+ state,
+ getPersister(),
+ (EventSource) getSession()
+ );
+ for ( int i = 0; i < postListeners.length; i++ ) {
+ postListeners[i].onPostDelete(postEvent);
+ }
+ }
+ }
+
+ private void postCommitDelete() {
+ PostDeleteEventListener[] postListeners = getSession().getListeners()
+ .getPostCommitDeleteEventListeners();
+ if (postListeners.length>0) {
+ PostDeleteEvent postEvent = new PostDeleteEvent(
+ getInstance(),
+ getId(),
+ state,
+ getPersister(),
+ (EventSource) getSession()
+ );
+ for ( int i = 0; i < postListeners.length; i++ ) {
+ postListeners[i].onPostDelete(postEvent);
+ }
+ }
+ }
+
+ public void afterTransactionCompletion(boolean success) throws HibernateException {
+ if ( getPersister().hasCache() ) {
+ final CacheKey ck = new CacheKey(
+ getId(),
+ getPersister().getIdentifierType(),
+ getPersister().getRootEntityName(),
+ getSession().getEntityMode(),
+ getSession().getFactory()
+ );
+ getPersister().getCache().release(ck, lock);
+ }
+ postCommitDelete();
+ }
+
+ protected boolean hasPostCommitEventListeners() {
+ return getSession().getListeners().getPostCommitDeleteEventListeners().length>0;
+ }
+
+}
+
+
+
+
+
+
+
diff --git a/src/org/hibernate/action/EntityIdentityInsertAction.java b/src/org/hibernate/action/EntityIdentityInsertAction.java
new file mode 100644
index 0000000000..59bf5cb054
--- /dev/null
+++ b/src/org/hibernate/action/EntityIdentityInsertAction.java
@@ -0,0 +1,159 @@
+//$Id$
+package org.hibernate.action;
+
+import java.io.Serializable;
+
+import org.hibernate.HibernateException;
+import org.hibernate.AssertionFailure;
+import org.hibernate.engine.SessionImplementor;
+import org.hibernate.engine.EntityKey;
+import org.hibernate.event.PostInsertEvent;
+import org.hibernate.event.PostInsertEventListener;
+import org.hibernate.event.PreInsertEvent;
+import org.hibernate.event.PreInsertEventListener;
+import org.hibernate.event.EventSource;
+import org.hibernate.persister.entity.EntityPersister;
+
+public final class EntityIdentityInsertAction extends EntityAction {
+ private final Object[] state;
+ private final boolean isDelayed;
+ private final EntityKey delayedEntityKey;
+ //private CacheEntry cacheEntry;
+ private Serializable generatedId;
+
+ public EntityIdentityInsertAction(
+ Object[] state,
+ Object instance,
+ EntityPersister persister,
+ SessionImplementor session,
+ boolean isDelayed) throws HibernateException {
+ super( session, null, instance, persister );
+ this.state = state;
+ this.isDelayed = isDelayed;
+ delayedEntityKey = isDelayed ? generateDelayedEntityKey() : null;
+ }
+
+ public void execute() throws HibernateException {
+
+ final EntityPersister persister = getPersister();
+ final SessionImplementor session = getSession();
+ final Object instance = getInstance();
+
+ boolean veto = preInsert();
+
+ // Don't need to lock the cache here, since if someone
+ // else inserted the same pk first, the insert would fail
+
+ if ( !veto ) {
+ generatedId = persister.insert( state, instance, session );
+ if ( persister.hasInsertGeneratedProperties() ) {
+ persister.processInsertGeneratedProperties( generatedId, instance, state, session );
+ }
+ //need to do that here rather than in the save event listener to let
+ //the post insert events to have a id-filled entity when IDENTITY is used (EJB3)
+ persister.setIdentifier( instance, generatedId, session.getEntityMode() );
+ }
+
+
+ //TODO: this bit actually has to be called after all cascades!
+ // but since identity insert is called *synchronously*,
+ // instead of asynchronously as other actions, it isn't
+ /*if ( persister.hasCache() && !persister.isCacheInvalidationRequired() ) {
+ cacheEntry = new CacheEntry(object, persister, session);
+ persister.getCache().insert(generatedId, cacheEntry);
+ }*/
+
+ postInsert();
+
+ if ( session.getFactory().getStatistics().isStatisticsEnabled() && !veto ) {
+ session.getFactory().getStatisticsImplementor()
+ .insertEntity( getPersister().getEntityName() );
+ }
+
+ }
+
+ private void postInsert() {
+ if ( isDelayed ) {
+ getSession().getPersistenceContext().replaceDelayedEntityIdentityInsertKeys( delayedEntityKey, generatedId );
+ }
+ PostInsertEventListener[] postListeners = getSession().getListeners()
+ .getPostInsertEventListeners();
+ if (postListeners.length>0) {
+ PostInsertEvent postEvent = new PostInsertEvent(
+ getInstance(),
+ generatedId,
+ state,
+ getPersister(),
+ (EventSource) getSession()
+ );
+ for ( int i = 0; i < postListeners.length; i++ ) {
+ postListeners[i].onPostInsert(postEvent);
+ }
+ }
+ }
+
+ private void postCommitInsert() {
+ PostInsertEventListener[] postListeners = getSession().getListeners()
+ .getPostCommitInsertEventListeners();
+ if (postListeners.length>0) {
+ PostInsertEvent postEvent = new PostInsertEvent(
+ getInstance(),
+ generatedId,
+ state,
+ getPersister(),
+ (EventSource) getSession()
+ );
+ for ( int i = 0; i < postListeners.length; i++ ) {
+ postListeners[i].onPostInsert(postEvent);
+ }
+ }
+ }
+
+ private boolean preInsert() {
+ PreInsertEventListener[] preListeners = getSession().getListeners()
+ .getPreInsertEventListeners();
+ boolean veto = false;
+ if (preListeners.length>0) {
+ PreInsertEvent preEvent = new PreInsertEvent( getInstance(), null, state, getPersister(), getSession() );
+ for ( int i = 0; i < preListeners.length; i++ ) {
+ veto = preListeners[i].onPreInsert(preEvent) || veto;
+ }
+ }
+ return veto;
+ }
+
+ //Make 100% certain that this is called before any subsequent ScheduledUpdate.afterTransactionCompletion()!!
+ public void afterTransactionCompletion(boolean success) throws HibernateException {
+ //TODO: reenable if we also fix the above todo
+ /*EntityPersister persister = getEntityPersister();
+ if ( success && persister.hasCache() && !persister.isCacheInvalidationRequired() ) {
+ persister.getCache().afterInsert( getGeneratedId(), cacheEntry );
+ }*/
+ postCommitInsert();
+ }
+
+ public boolean hasAfterTransactionCompletion() {
+ //TODO: simply remove this override
+ // if we fix the above todos
+ return hasPostCommitEventListeners();
+ }
+
+ protected boolean hasPostCommitEventListeners() {
+ return getSession().getListeners().getPostCommitInsertEventListeners().length>0;
+ }
+
+ public final Serializable getGeneratedId() {
+ return generatedId;
+ }
+
+ public EntityKey getDelayedEntityKey() {
+ return delayedEntityKey;
+ }
+
+ private synchronized EntityKey generateDelayedEntityKey() {
+ if ( !isDelayed ) {
+ throw new AssertionFailure( "cannot request delayed entity-key for non-delayed post-insert-id generation" );
+ }
+ return new EntityKey( new DelayedPostInsertIdentifier(), getPersister(), getSession().getEntityMode() );
+ }
+}
diff --git a/src/org/hibernate/action/EntityInsertAction.java b/src/org/hibernate/action/EntityInsertAction.java
new file mode 100644
index 0000000000..3c1acdc7d9
--- /dev/null
+++ b/src/org/hibernate/action/EntityInsertAction.java
@@ -0,0 +1,200 @@
+//$Id$
+package org.hibernate.action;
+
+import java.io.Serializable;
+
+import org.hibernate.AssertionFailure;
+import org.hibernate.HibernateException;
+import org.hibernate.cache.CacheKey;
+import org.hibernate.cache.entry.CacheEntry;
+import org.hibernate.engine.EntityEntry;
+import org.hibernate.engine.SessionFactoryImplementor;
+import org.hibernate.engine.SessionImplementor;
+import org.hibernate.engine.Versioning;
+import org.hibernate.event.PostInsertEvent;
+import org.hibernate.event.PostInsertEventListener;
+import org.hibernate.event.PreInsertEvent;
+import org.hibernate.event.PreInsertEventListener;
+import org.hibernate.event.EventSource;
+import org.hibernate.persister.entity.EntityPersister;
+
+public final class EntityInsertAction extends EntityAction {
+
+ private Object[] state;
+ private Object version;
+ private Object cacheEntry;
+
+ public EntityInsertAction(
+ Serializable id,
+ Object[] state,
+ Object instance,
+ Object version,
+ EntityPersister persister,
+ SessionImplementor session) throws HibernateException {
+ super( session, id, instance, persister );
+ this.state = state;
+ this.version = version;
+ }
+
+ public Object[] getState() {
+ return state;
+ }
+
+ public void execute() throws HibernateException {
+ EntityPersister persister = getPersister();
+ SessionImplementor session = getSession();
+ Object instance = getInstance();
+ Serializable id = getId();
+
+ boolean veto = preInsert();
+
+ // Don't need to lock the cache here, since if someone
+ // else inserted the same pk first, the insert would fail
+
+ if ( !veto ) {
+
+ persister.insert( id, state, instance, session );
+
+ EntityEntry entry = session.getPersistenceContext().getEntry( instance );
+ if ( entry == null ) {
+ throw new AssertionFailure( "possible nonthreadsafe access to session" );
+ }
+
+ entry.postInsert();
+
+ if ( persister.hasInsertGeneratedProperties() ) {
+ persister.processInsertGeneratedProperties( id, instance, state, session );
+ if ( persister.isVersionPropertyGenerated() ) {
+ version = Versioning.getVersion(state, persister);
+ }
+ entry.postUpdate(instance, state, version);
+ }
+
+ }
+
+ final SessionFactoryImplementor factory = getSession().getFactory();
+
+ if ( isCachePutEnabled( persister, session ) ) {
+
+ CacheEntry ce = new CacheEntry(
+ state,
+ persister,
+ persister.hasUninitializedLazyProperties( instance, session.getEntityMode() ),
+ version,
+ session,
+ instance
+ );
+
+ cacheEntry = persister.getCacheEntryStructure().structure(ce);
+ final CacheKey ck = new CacheKey(
+ id,
+ persister.getIdentifierType(),
+ persister.getRootEntityName(),
+ session.getEntityMode(),
+ session.getFactory()
+ );
+// boolean put = persister.getCache().insert(ck, cacheEntry);
+ boolean put = persister.getCache().insert( ck, cacheEntry, version );
+
+ if ( put && factory.getStatistics().isStatisticsEnabled() ) {
+ factory.getStatisticsImplementor()
+ .secondLevelCachePut( getPersister().getCache().getRegionName() );
+ }
+
+ }
+
+ postInsert();
+
+ if ( factory.getStatistics().isStatisticsEnabled() && !veto ) {
+ factory.getStatisticsImplementor()
+ .insertEntity( getPersister().getEntityName() );
+ }
+
+ }
+
+ private void postInsert() {
+ PostInsertEventListener[] postListeners = getSession().getListeners()
+ .getPostInsertEventListeners();
+ if ( postListeners.length > 0 ) {
+ PostInsertEvent postEvent = new PostInsertEvent(
+ getInstance(),
+ getId(),
+ state,
+ getPersister(),
+ (EventSource) getSession()
+ );
+ for ( int i = 0; i < postListeners.length; i++ ) {
+ postListeners[i].onPostInsert(postEvent);
+ }
+ }
+ }
+
+ private void postCommitInsert() {
+ PostInsertEventListener[] postListeners = getSession().getListeners()
+ .getPostCommitInsertEventListeners();
+ if ( postListeners.length > 0 ) {
+ PostInsertEvent postEvent = new PostInsertEvent(
+ getInstance(),
+ getId(),
+ state,
+ getPersister(),
+ (EventSource) getSession()
+ );
+ for ( int i = 0; i < postListeners.length; i++ ) {
+ postListeners[i].onPostInsert(postEvent);
+ }
+ }
+ }
+
+ private boolean preInsert() {
+ PreInsertEventListener[] preListeners = getSession().getListeners()
+ .getPreInsertEventListeners();
+ boolean veto = false;
+ if (preListeners.length>0) {
+ PreInsertEvent preEvent = new PreInsertEvent( getInstance(), getId(), state, getPersister(), getSession() );
+ for ( int i = 0; i < preListeners.length; i++ ) {
+ veto = preListeners[i].onPreInsert(preEvent) || veto;
+ }
+ }
+ return veto;
+ }
+
+ //Make 100% certain that this is called before any subsequent ScheduledUpdate.afterTransactionCompletion()!!
+ public void afterTransactionCompletion(boolean success) throws HibernateException {
+ EntityPersister persister = getPersister();
+ if ( success && isCachePutEnabled( persister, getSession() ) ) {
+ final CacheKey ck = new CacheKey(
+ getId(),
+ persister.getIdentifierType(),
+ persister.getRootEntityName(),
+ getSession().getEntityMode(),
+ getSession().getFactory()
+ );
+ boolean put = persister.getCache().afterInsert(ck, cacheEntry, version );
+
+ if ( put && getSession().getFactory().getStatistics().isStatisticsEnabled() ) {
+ getSession().getFactory().getStatisticsImplementor()
+ .secondLevelCachePut( getPersister().getCache().getRegionName() );
+ }
+ }
+ postCommitInsert();
+ }
+
+ protected boolean hasPostCommitEventListeners() {
+ return getSession().getListeners().getPostCommitInsertEventListeners().length>0;
+ }
+
+ private boolean isCachePutEnabled(EntityPersister persister, SessionImplementor session) {
+ return persister.hasCache() &&
+ !persister.isCacheInvalidationRequired() &&
+ session.getCacheMode().isPutEnabled();
+ }
+
+}
+
+
+
+
+
+
+
diff --git a/src/org/hibernate/action/EntityUpdateAction.java b/src/org/hibernate/action/EntityUpdateAction.java
new file mode 100644
index 0000000000..28c8e92b29
--- /dev/null
+++ b/src/org/hibernate/action/EntityUpdateAction.java
@@ -0,0 +1,261 @@
+//$Id$
+package org.hibernate.action;
+
+import java.io.Serializable;
+
+import org.hibernate.AssertionFailure;
+import org.hibernate.HibernateException;
+import org.hibernate.cache.CacheException;
+import org.hibernate.cache.CacheKey;
+import org.hibernate.cache.CacheConcurrencyStrategy.SoftLock;
+import org.hibernate.cache.entry.CacheEntry;
+import org.hibernate.engine.EntityEntry;
+import org.hibernate.engine.SessionFactoryImplementor;
+import org.hibernate.engine.SessionImplementor;
+import org.hibernate.engine.Status;
+import org.hibernate.engine.Versioning;
+import org.hibernate.event.PostUpdateEvent;
+import org.hibernate.event.PostUpdateEventListener;
+import org.hibernate.event.PreUpdateEvent;
+import org.hibernate.event.PreUpdateEventListener;
+import org.hibernate.event.EventSource;
+import org.hibernate.persister.entity.EntityPersister;
+import org.hibernate.type.TypeFactory;
+
+public final class EntityUpdateAction extends EntityAction {
+
+ private final Object[] state;
+ private final Object[] previousState;
+ private final Object previousVersion;
+ private Object nextVersion;
+ private final int[] dirtyFields;
+ private final boolean hasDirtyCollection;
+ private final Object rowId;
+ private Object cacheEntry;
+ private SoftLock lock;
+
+ public EntityUpdateAction(
+ final Serializable id,
+ final Object[] state,
+ final int[] dirtyProperties,
+ final boolean hasDirtyCollection,
+ final Object[] previousState,
+ final Object previousVersion,
+ final Object nextVersion,
+ final Object instance,
+ final Object rowId,
+ final EntityPersister persister,
+ final SessionImplementor session) throws HibernateException {
+ super( session, id, instance, persister );
+ this.state = state;
+ this.previousState = previousState;
+ this.previousVersion = previousVersion;
+ this.nextVersion = nextVersion;
+ this.dirtyFields = dirtyProperties;
+ this.hasDirtyCollection = hasDirtyCollection;
+ this.rowId = rowId;
+ }
+
+ public void execute() throws HibernateException {
+ Serializable id = getId();
+ EntityPersister persister = getPersister();
+ SessionImplementor session = getSession();
+ Object instance = getInstance();
+
+ boolean veto = preUpdate();
+
+ final SessionFactoryImplementor factory = getSession().getFactory();
+ Object previousVersion = this.previousVersion;
+ if ( persister.isVersionPropertyGenerated() ) {
+ // we need to grab the version value from the entity, otherwise
+ // we have issues with generated-version entities that may have
+ // multiple actions queued during the same flush
+ previousVersion = persister.getVersion( instance, session.getEntityMode() );
+ }
+
+ final CacheKey ck;
+ if ( persister.hasCache() ) {
+ ck = new CacheKey(
+ id,
+ persister.getIdentifierType(),
+ persister.getRootEntityName(),
+ session.getEntityMode(),
+ session.getFactory()
+ );
+ lock = persister.getCache().lock(ck, previousVersion);
+ }
+ else {
+ ck = null;
+ }
+
+ if ( !veto ) {
+ persister.update(
+ id,
+ state,
+ dirtyFields,
+ hasDirtyCollection,
+ previousState,
+ previousVersion,
+ instance,
+ rowId,
+ session
+ );
+ }
+
+ EntityEntry entry = getSession().getPersistenceContext().getEntry( instance );
+ if ( entry == null ) {
+ throw new AssertionFailure( "possible nonthreadsafe access to session" );
+ }
+
+ if ( entry.getStatus()==Status.MANAGED || persister.isVersionPropertyGenerated() ) {
+ // get the updated snapshot of the entity state by cloning current state;
+ // it is safe to copy in place, since by this time no-one else (should have)
+ // has a reference to the array
+ TypeFactory.deepCopy(
+ state,
+ persister.getPropertyTypes(),
+ persister.getPropertyCheckability(),
+ state,
+ session
+ );
+ if ( persister.hasUpdateGeneratedProperties() ) {
+ // this entity defines proeprty generation, so process those generated
+ // values...
+ persister.processUpdateGeneratedProperties( id, instance, state, session );
+ if ( persister.isVersionPropertyGenerated() ) {
+ nextVersion = Versioning.getVersion( state, persister );
+ }
+ }
+ // have the entity entry perform post-update processing, passing it the
+ // update state and the new version (if one).
+ entry.postUpdate( instance, state, nextVersion );
+ }
+
+ if ( persister.hasCache() ) {
+ if ( persister.isCacheInvalidationRequired() || entry.getStatus()!=Status.MANAGED ) {
+ persister.getCache().evict(ck);
+ }
+ else {
+ //TODO: inefficient if that cache is just going to ignore the updated state!
+ CacheEntry ce = new CacheEntry(
+ state,
+ persister,
+ persister.hasUninitializedLazyProperties( instance, session.getEntityMode() ),
+ nextVersion,
+ getSession(),
+ instance
+ );
+ cacheEntry = persister.getCacheEntryStructure().structure(ce);
+// boolean put = persister.getCache().update(ck, cacheEntry);
+ boolean put = persister.getCache().update( ck, cacheEntry, nextVersion, previousVersion );
+
+ if ( put && factory.getStatistics().isStatisticsEnabled() ) {
+ factory.getStatisticsImplementor()
+ .secondLevelCachePut( getPersister().getCache().getRegionName() );
+ }
+ }
+ }
+
+ postUpdate();
+
+ if ( factory.getStatistics().isStatisticsEnabled() && !veto ) {
+ factory.getStatisticsImplementor()
+ .updateEntity( getPersister().getEntityName() );
+ }
+ }
+
+ private void postUpdate() {
+ PostUpdateEventListener[] postListeners = getSession().getListeners()
+ .getPostUpdateEventListeners();
+ if (postListeners.length>0) {
+ PostUpdateEvent postEvent = new PostUpdateEvent(
+ getInstance(),
+ getId(),
+ state,
+ previousState,
+ getPersister(),
+ (EventSource) getSession()
+ );
+ for ( int i = 0; i < postListeners.length; i++ ) {
+ postListeners[i].onPostUpdate(postEvent);
+ }
+ }
+ }
+
+ private void postCommitUpdate() {
+ PostUpdateEventListener[] postListeners = getSession().getListeners()
+ .getPostCommitUpdateEventListeners();
+ if (postListeners.length>0) {
+ PostUpdateEvent postEvent = new PostUpdateEvent(
+ getInstance(),
+ getId(),
+ state,
+ previousState,
+ getPersister(),
+ (EventSource) getSession()
+ );
+ for ( int i = 0; i < postListeners.length; i++ ) {
+ postListeners[i].onPostUpdate(postEvent);
+ }
+ }
+ }
+
+ private boolean preUpdate() {
+ PreUpdateEventListener[] preListeners = getSession().getListeners()
+ .getPreUpdateEventListeners();
+ boolean veto = false;
+ if (preListeners.length>0) {
+ PreUpdateEvent preEvent = new PreUpdateEvent(
+ getInstance(),
+ getId(),
+ state,
+ previousState,
+ getPersister(),
+ getSession()
+ );
+ for ( int i = 0; i < preListeners.length; i++ ) {
+ veto = preListeners[i].onPreUpdate(preEvent) || veto;
+ }
+ }
+ return veto;
+ }
+
+ public void afterTransactionCompletion(boolean success) throws CacheException {
+ EntityPersister persister = getPersister();
+ if ( persister.hasCache() ) {
+
+ final CacheKey ck = new CacheKey(
+ getId(),
+ persister.getIdentifierType(),
+ persister.getRootEntityName(),
+ getSession().getEntityMode(),
+ getSession().getFactory()
+ );
+
+ if ( success && cacheEntry!=null /*!persister.isCacheInvalidationRequired()*/ ) {
+ boolean put = persister.getCache().afterUpdate(ck, cacheEntry, nextVersion, lock );
+
+ if ( put && getSession().getFactory().getStatistics().isStatisticsEnabled() ) {
+ getSession().getFactory().getStatisticsImplementor()
+ .secondLevelCachePut( getPersister().getCache().getRegionName() );
+ }
+ }
+ else {
+ persister.getCache().release(ck, lock );
+ }
+ }
+ postCommitUpdate();
+ }
+
+ protected boolean hasPostCommitEventListeners() {
+ return getSession().getListeners().getPostCommitUpdateEventListeners().length>0;
+ }
+
+}
+
+
+
+
+
+
+
diff --git a/src/org/hibernate/action/Executable.java b/src/org/hibernate/action/Executable.java
new file mode 100644
index 0000000000..4860fa9f1e
--- /dev/null
+++ b/src/org/hibernate/action/Executable.java
@@ -0,0 +1,39 @@
+//$Id$
+package org.hibernate.action;
+
+import org.hibernate.HibernateException;
+
+import java.io.Serializable;
+
+/**
+ * An operation which may be scheduled for later execution.
+ * Usually, the operation is a database insert/update/delete,
+ * together with required second-level cache management.
+ *
+ * @author Gavin King
+ */
+public interface Executable {
+ /**
+ * Called before executing any actions
+ */
+ public void beforeExecutions() throws HibernateException;
+ /**
+ * Execute this action
+ */
+ public void execute() throws HibernateException;
+ /**
+ * Do we need to retain this instance until after the
+ * transaction completes?
+ * @return false if this class defines a no-op
+ * hasAfterTransactionCompletion()
+ */
+ public boolean hasAfterTransactionCompletion();
+ /**
+ * Called after the transaction completes
+ */
+ public void afterTransactionCompletion(boolean success) throws HibernateException;
+ /**
+ * What spaces (tables) are affected by this action?
+ */
+ public Serializable[] getPropertySpaces();
+}
diff --git a/src/org/hibernate/action/package.html b/src/org/hibernate/action/package.html
new file mode 100755
index 0000000000..c9dd1b09b3
--- /dev/null
+++ b/src/org/hibernate/action/package.html
@@ -0,0 +1,10 @@
+
+
+
+
+
+ This package defines "actions" that are scheduled for
+ asycnchronous execution by the event listeners.
+
+
+
diff --git a/src/org/hibernate/bytecode/AbstractClassTransformerImpl.java b/src/org/hibernate/bytecode/AbstractClassTransformerImpl.java
new file mode 100644
index 0000000000..ad457485a2
--- /dev/null
+++ b/src/org/hibernate/bytecode/AbstractClassTransformerImpl.java
@@ -0,0 +1,45 @@
+//$Id: $
+package org.hibernate.bytecode;
+
+import org.hibernate.bytecode.util.ClassFilter;
+import org.hibernate.bytecode.util.FieldFilter;
+
+import java.security.ProtectionDomain;
+
+/**
+ * @author Emmanuel Bernard
+ * @author Steve Ebersole
+ */
+public abstract class AbstractClassTransformerImpl implements ClassTransformer {
+
+ protected final ClassFilter classFilter;
+ protected final FieldFilter fieldFilter;
+
+ protected AbstractClassTransformerImpl(ClassFilter classFilter, FieldFilter fieldFilter) {
+ this.classFilter = classFilter;
+ this.fieldFilter = fieldFilter;
+ }
+
+ public byte[] transform(
+ ClassLoader loader,
+ String className,
+ Class classBeingRedefined,
+ ProtectionDomain protectionDomain,
+ byte[] classfileBuffer) {
+ // to be safe...
+ className = className.replace( '/', '.' );
+ if ( classFilter.shouldInstrumentClass( className ) ) {
+ return doTransform( loader, className, classBeingRedefined, protectionDomain, classfileBuffer );
+ }
+ else {
+ return classfileBuffer;
+ }
+ }
+
+ protected abstract byte[] doTransform(
+ ClassLoader loader,
+ String className,
+ Class classBeingRedefined,
+ ProtectionDomain protectionDomain,
+ byte[] classfileBuffer);
+}
diff --git a/src/org/hibernate/bytecode/BasicProxyFactory.java b/src/org/hibernate/bytecode/BasicProxyFactory.java
new file mode 100644
index 0000000000..0bb5e582a9
--- /dev/null
+++ b/src/org/hibernate/bytecode/BasicProxyFactory.java
@@ -0,0 +1,10 @@
+package org.hibernate.bytecode;
+
+/**
+ * A proxy factory for "basic proxy" generation
+ *
+ * @author Steve Ebersole
+ */
+public interface BasicProxyFactory {
+ public Object getProxy();
+}
diff --git a/src/org/hibernate/bytecode/BytecodeProvider.java b/src/org/hibernate/bytecode/BytecodeProvider.java
new file mode 100644
index 0000000000..0f780d937a
--- /dev/null
+++ b/src/org/hibernate/bytecode/BytecodeProvider.java
@@ -0,0 +1,49 @@
+package org.hibernate.bytecode;
+
+import org.hibernate.bytecode.util.ClassFilter;
+import org.hibernate.bytecode.util.FieldFilter;
+
+/**
+ * Contract for providers of bytecode services to Hibernate.
+ *
+ * Bytecode requirements break down into basically 3 areas
+ * - proxy generation (both for runtime-lazy-loading and basic proxy generation)
+ * {@link #getProxyFactoryFactory()}
+ *
- bean relection optimization {@link #getReflectionOptimizer}
+ *
- field-access instumentation {@link #getTransformer}
+ *
+ *
+ * @author Steve Ebersole
+ */
+public interface BytecodeProvider {
+ /**
+ * Retrieve the specific factory for this provider capable of
+ * generating run-time proxies for lazy-loading purposes.
+ *
+ * @return The provider specifc factory.
+ */
+ public ProxyFactoryFactory getProxyFactoryFactory();
+
+ /**
+ * Retrieve the ReflectionOptimizer delegate for this provider
+ * capable of generating reflection optimization components.
+ *
+ * @param clazz The class to be reflected upon.
+ * @param getterNames Names of all property getters to be accessed via reflection.
+ * @param setterNames Names of all property setters to be accessed via reflection.
+ * @param types The types of all properties to be accessed.
+ * @return The reflection optimization delegate.
+ */
+ public ReflectionOptimizer getReflectionOptimizer(Class clazz, String[] getterNames, String[] setterNames, Class[] types);
+
+ /**
+ * Generate a ClassTransformer capable of performing bytecode manipulation.
+ *
+ * @param classFilter filter used to limit which classes are to be instrumented
+ * via this ClassTransformer.
+ * @param fieldFilter filter used to limit which fields are to be instrumented
+ * via this ClassTransformer.
+ * @return The appropriate ClassTransformer.
+ */
+ public ClassTransformer getTransformer(ClassFilter classFilter, FieldFilter fieldFilter);
+}
diff --git a/src/org/hibernate/bytecode/ClassTransformer.java b/src/org/hibernate/bytecode/ClassTransformer.java
new file mode 100644
index 0000000000..2f380cec4a
--- /dev/null
+++ b/src/org/hibernate/bytecode/ClassTransformer.java
@@ -0,0 +1,34 @@
+//$Id: $
+package org.hibernate.bytecode;
+
+import java.security.ProtectionDomain;
+
+/**
+ * A persistence provider provides an instance of this interface
+ * to the PersistenceUnitInfo.addTransformer method.
+ * The supplied transformer instance will get called to transform
+ * entity class files when they are loaded and redefined. The transformation
+ * occurs before the class is defined by the JVM
+ *
+ *
+ * @author Bill Burke
+ * @author Emmanuel Bernard
+ */
+public interface ClassTransformer
+{
+ /**
+ * Invoked when a class is being loaded or redefined to add hooks for persistence bytecode manipulation
+ *
+ * @param loader the defining class loaderof the class being transformed. It may be null if using bootstrap loader
+ * @param classname The name of the class being transformed
+ * @param classBeingRedefined If an already loaded class is being redefined, then pass this as a parameter
+ * @param protectionDomain ProtectionDomain of the class being (re)-defined
+ * @param classfileBuffer The input byte buffer in class file format
+ * @return A well-formed class file that can be loaded
+ */
+ public byte[] transform(ClassLoader loader,
+ String classname,
+ Class classBeingRedefined,
+ ProtectionDomain protectionDomain,
+ byte[] classfileBuffer);
+}
diff --git a/src/org/hibernate/bytecode/InstrumentedClassLoader.java b/src/org/hibernate/bytecode/InstrumentedClassLoader.java
new file mode 100644
index 0000000000..dc5aa70a2c
--- /dev/null
+++ b/src/org/hibernate/bytecode/InstrumentedClassLoader.java
@@ -0,0 +1,54 @@
+package org.hibernate.bytecode;
+
+import org.hibernate.bytecode.util.ByteCodeHelper;
+
+import java.io.InputStream;
+
+/**
+ * A specialized classloader which performs bytecode enhancement on class
+ * definitions as they are loaded into the classloader scope.
+ *
+ * @author Emmanuel Bernard
+ * @author Steve Ebersole
+ */
+public class InstrumentedClassLoader extends ClassLoader {
+
+ private ClassTransformer classTransformer;
+
+ public InstrumentedClassLoader(ClassLoader parent, ClassTransformer classTransformer) {
+ super( parent );
+ this.classTransformer = classTransformer;
+ }
+
+ public Class loadClass(String name) throws ClassNotFoundException {
+ if ( name.startsWith( "java." ) || classTransformer == null ) {
+ return getParent().loadClass( name );
+ }
+
+ Class c = findLoadedClass( name );
+ if ( c != null ) {
+ return c;
+ }
+
+ InputStream is = this.getResourceAsStream( name.replace( '.', '/' ) + ".class" );
+ if ( is == null ) {
+ throw new ClassNotFoundException( name + " not found" );
+ }
+
+ try {
+ byte[] originalBytecode = ByteCodeHelper.readByteCode( is );
+ byte[] transformedBytecode = classTransformer.transform( getParent(), name, null, null, originalBytecode );
+ if ( originalBytecode == transformedBytecode ) {
+ // no transformations took place, so handle it as we would a
+ // non-instrumented class
+ return getParent().loadClass( name );
+ }
+ else {
+ return defineClass( name, transformedBytecode, 0, transformedBytecode.length );
+ }
+ }
+ catch( Throwable t ) {
+ throw new ClassNotFoundException( name + " not found", t );
+ }
+ }
+}
diff --git a/src/org/hibernate/bytecode/ProxyFactoryFactory.java b/src/org/hibernate/bytecode/ProxyFactoryFactory.java
new file mode 100644
index 0000000000..c44dc926c7
--- /dev/null
+++ b/src/org/hibernate/bytecode/ProxyFactoryFactory.java
@@ -0,0 +1,37 @@
+package org.hibernate.bytecode;
+
+import org.hibernate.proxy.ProxyFactory;
+
+/**
+ * An interface for factories of {@link ProxyFactory proxy factory} instances.
+ *
+ * Currently used to abstract from the tupizer whether we are using CGLIB or
+ * Javassist for lazy proxy generation.
+ *
+ * @author Steve Ebersole
+ */
+public interface ProxyFactoryFactory {
+ /**
+ * Build a proxy factory specifically for handling runtime
+ * lazy loading.
+ *
+ * @return The lazy-load proxy factory.
+ */
+ public ProxyFactory buildProxyFactory();
+
+ /**
+ * Build a proxy factory for basic proxy concerns. The return
+ * should be capable of properly handling newInstance() calls.
+ *
+ * Should build basic proxies essentially equivalent to JDK proxies in
+ * terms of capabilities, but should be able to deal with abstract super
+ * classes in addition to proxy interfaces.
+ *
+ * Must pass in either superClass or interfaces (or both).
+ *
+ * @param superClass The abstract super class (or null if none).
+ * @param interfaces Interfaces to be proxied (or null if none).
+ * @return The proxy class
+ */
+ public BasicProxyFactory buildBasicProxyFactory(Class superClass, Class[] interfaces);
+}
diff --git a/src/org/hibernate/bytecode/ReflectionOptimizer.java b/src/org/hibernate/bytecode/ReflectionOptimizer.java
new file mode 100644
index 0000000000..83d6b60153
--- /dev/null
+++ b/src/org/hibernate/bytecode/ReflectionOptimizer.java
@@ -0,0 +1,35 @@
+package org.hibernate.bytecode;
+
+/**
+ * Represents reflection optimization for a particular class.
+ *
+ * @author Steve Ebersole
+ */
+public interface ReflectionOptimizer {
+
+ public InstantiationOptimizer getInstantiationOptimizer();
+ public AccessOptimizer getAccessOptimizer();
+
+ /**
+ * Represents optimized entity instantiation.
+ */
+ public static interface InstantiationOptimizer {
+ /**
+ * Perform instantiation of an instance of the underlying class.
+ *
+ * @return The new instance.
+ */
+ public Object newInstance();
+ }
+
+ /**
+ * Represents optimized entity property access.
+ *
+ * @author Steve Ebersole
+ */
+ public interface AccessOptimizer {
+ public String[] getPropertyNames();
+ public Object[] getPropertyValues(Object object);
+ public void setPropertyValues(Object object, Object[] values);
+ }
+}
diff --git a/src/org/hibernate/bytecode/cglib/AccessOptimizerAdapter.java b/src/org/hibernate/bytecode/cglib/AccessOptimizerAdapter.java
new file mode 100644
index 0000000000..092e4a414c
--- /dev/null
+++ b/src/org/hibernate/bytecode/cglib/AccessOptimizerAdapter.java
@@ -0,0 +1,102 @@
+package org.hibernate.bytecode.cglib;
+
+import org.hibernate.bytecode.ReflectionOptimizer;
+import org.hibernate.PropertyAccessException;
+import net.sf.cglib.beans.BulkBean;
+import net.sf.cglib.beans.BulkBeanException;
+import net.sf.cglib.reflect.FastClass;
+
+import java.io.Serializable;
+import java.io.ObjectOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+
+/**
+ * The {@link ReflectionOptimizer.AccessOptimizer} implementation for CGLIB
+ * which simply acts as an adpater to the {@link BulkBean} class.
+ *
+ * @author Steve Ebersole
+ */
+public class AccessOptimizerAdapter implements ReflectionOptimizer.AccessOptimizer, Serializable {
+
+ public static final String PROPERTY_GET_EXCEPTION =
+ "exception getting property value with CGLIB (set hibernate.bytecode.use_reflection_optimizer=false for more info)";
+
+ public static final String PROPERTY_SET_EXCEPTION =
+ "exception setting property value with CGLIB (set hibernate.bytecode.use_reflection_optimizer=false for more info)";
+
+ private Class mappedClass;
+ private BulkBean bulkBean;
+
+ public AccessOptimizerAdapter(BulkBean bulkBean, Class mappedClass) {
+ this.bulkBean = bulkBean;
+ this.mappedClass = mappedClass;
+ }
+
+ public String[] getPropertyNames() {
+ return bulkBean.getGetters();
+ }
+
+ public Object[] getPropertyValues(Object object) {
+ try {
+ return bulkBean.getPropertyValues( object );
+ }
+ catch ( Throwable t ) {
+ throw new PropertyAccessException(
+ t,
+ PROPERTY_GET_EXCEPTION,
+ false,
+ mappedClass,
+ getterName( t, bulkBean )
+ );
+ }
+ }
+
+ public void setPropertyValues(Object object, Object[] values) {
+ try {
+ bulkBean.setPropertyValues( object, values );
+ }
+ catch ( Throwable t ) {
+ throw new PropertyAccessException(
+ t,
+ PROPERTY_SET_EXCEPTION,
+ true,
+ mappedClass,
+ setterName( t, bulkBean )
+ );
+ }
+ }
+
+ private static String setterName(Throwable t, BulkBean optimizer) {
+ if ( t instanceof BulkBeanException ) {
+ return optimizer.getSetters()[( ( BulkBeanException ) t ).getIndex()];
+ }
+ else {
+ return "?";
+ }
+ }
+
+ private static String getterName(Throwable t, BulkBean optimizer) {
+ if ( t instanceof BulkBeanException ) {
+ return optimizer.getGetters()[( ( BulkBeanException ) t ).getIndex()];
+ }
+ else {
+ return "?";
+ }
+ }
+
+ private void writeObject(ObjectOutputStream out) throws IOException {
+ out.writeObject( mappedClass );
+ out.writeObject( bulkBean.getGetters() );
+ out.writeObject( bulkBean.getSetters() );
+ out.writeObject( bulkBean.getPropertyTypes() );
+ }
+
+ private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
+ Class beanClass = ( Class ) in.readObject();
+ String[] getters = ( String[] ) in.readObject();
+ String[] setters = ( String[] ) in.readObject();
+ Class[] types = ( Class[] ) in.readObject();
+ bulkBean = BulkBean.create( beanClass, getters, setters, types );
+ }
+}
diff --git a/src/org/hibernate/bytecode/cglib/BytecodeProviderImpl.java b/src/org/hibernate/bytecode/cglib/BytecodeProviderImpl.java
new file mode 100644
index 0000000000..aa21d75457
--- /dev/null
+++ b/src/org/hibernate/bytecode/cglib/BytecodeProviderImpl.java
@@ -0,0 +1,92 @@
+package org.hibernate.bytecode.cglib;
+
+import java.lang.reflect.Modifier;
+
+import net.sf.cglib.beans.BulkBean;
+import net.sf.cglib.beans.BulkBeanException;
+import net.sf.cglib.reflect.FastClass;
+import net.sf.cglib.transform.ClassFilter;
+import net.sf.cglib.transform.ClassTransformer;
+import net.sf.cglib.transform.ClassTransformerFactory;
+import net.sf.cglib.transform.TransformingClassLoader;
+import net.sf.cglib.transform.impl.InterceptFieldFilter;
+import net.sf.cglib.transform.impl.InterceptFieldTransformer;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.bytecode.BytecodeProvider;
+import org.hibernate.bytecode.ProxyFactoryFactory;
+import org.hibernate.bytecode.ReflectionOptimizer;
+import org.hibernate.bytecode.util.FieldFilter;
+import org.hibernate.util.StringHelper;
+import org.objectweb.asm.Type;
+
+/**
+ * Bytecode provider implementation for CGLIB.
+ *
+ * @author Steve Ebersole
+ */
+public class BytecodeProviderImpl implements BytecodeProvider {
+
+ private static final Log log = LogFactory.getLog( BytecodeProviderImpl.class );
+
+ public ProxyFactoryFactory getProxyFactoryFactory() {
+ return new ProxyFactoryFactoryImpl();
+ }
+
+ public ReflectionOptimizer getReflectionOptimizer(
+ Class clazz,
+ String[] getterNames,
+ String[] setterNames,
+ Class[] types) {
+ FastClass fastClass;
+ BulkBean bulkBean;
+ try {
+ fastClass = FastClass.create( clazz );
+ bulkBean = BulkBean.create( clazz, getterNames, setterNames, types );
+ if ( !clazz.isInterface() && !Modifier.isAbstract( clazz.getModifiers() ) ) {
+ if ( fastClass == null ) {
+ bulkBean = null;
+ }
+ else {
+ //test out the optimizer:
+ Object instance = fastClass.newInstance();
+ bulkBean.setPropertyValues( instance, bulkBean.getPropertyValues( instance ) );
+ }
+ }
+ }
+ catch( Throwable t ) {
+ fastClass = null;
+ bulkBean = null;
+ String message = "reflection optimizer disabled for: " +
+ clazz.getName() +
+ " [" +
+ StringHelper.unqualify( t.getClass().getName() ) +
+ ": " +
+ t.getMessage();
+
+ if (t instanceof BulkBeanException ) {
+ int index = ( (BulkBeanException) t ).getIndex();
+ if (index >= 0) {
+ message += " (property " + setterNames[index] + ")";
+ }
+ }
+
+ log.debug( message );
+ }
+
+ if ( fastClass != null && bulkBean != null ) {
+ return new ReflectionOptimizerImpl(
+ new InstantiationOptimizerAdapter( fastClass ),
+ new AccessOptimizerAdapter( bulkBean, clazz )
+ );
+ }
+ else {
+ return null;
+ }
+ }
+
+ public org.hibernate.bytecode.ClassTransformer getTransformer(org.hibernate.bytecode.util.ClassFilter classFilter, FieldFilter fieldFilter) {
+ return new CglibClassTransformer( classFilter, fieldFilter );
+ }
+
+}
diff --git a/src/org/hibernate/bytecode/cglib/CglibClassTransformer.java b/src/org/hibernate/bytecode/cglib/CglibClassTransformer.java
new file mode 100644
index 0000000000..9f8834ef54
--- /dev/null
+++ b/src/org/hibernate/bytecode/cglib/CglibClassTransformer.java
@@ -0,0 +1,121 @@
+//$Id: $
+package org.hibernate.bytecode.cglib;
+
+import java.security.ProtectionDomain;
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.ByteArrayOutputStream;
+
+import net.sf.cglib.transform.ClassTransformer;
+import net.sf.cglib.transform.TransformingClassGenerator;
+import net.sf.cglib.transform.ClassReaderGenerator;
+import net.sf.cglib.transform.impl.InterceptFieldEnabled;
+import net.sf.cglib.transform.impl.InterceptFieldFilter;
+import net.sf.cglib.transform.impl.InterceptFieldTransformer;
+import net.sf.cglib.core.ClassNameReader;
+import net.sf.cglib.core.DebuggingClassWriter;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.bytecode.AbstractClassTransformerImpl;
+import org.hibernate.bytecode.util.FieldFilter;
+import org.hibernate.bytecode.util.ClassFilter;
+import org.hibernate.HibernateException;
+import org.objectweb.asm.Attribute;
+import org.objectweb.asm.Type;
+import org.objectweb.asm.ClassReader;
+import org.objectweb.asm.ClassWriter;
+import org.objectweb.asm.attrs.Attributes;
+
+/**
+ * Enhance the classes allowing them to implements InterceptFieldEnabled
+ * This interface is then used by Hibernate for some optimizations.
+ *
+ * @author Emmanuel Bernard
+ * @author Steve Ebersole
+ */
+public class CglibClassTransformer extends AbstractClassTransformerImpl {
+
+ private static Log log = LogFactory.getLog( CglibClassTransformer.class.getName() );
+
+ public CglibClassTransformer(ClassFilter classFilter, FieldFilter fieldFilter) {
+ super( classFilter, fieldFilter );
+ }
+
+ protected byte[] doTransform(
+ ClassLoader loader,
+ String className,
+ Class classBeingRedefined,
+ ProtectionDomain protectionDomain,
+ byte[] classfileBuffer) {
+ ClassReader reader;
+ try {
+ reader = new ClassReader( new ByteArrayInputStream( classfileBuffer ) );
+ }
+ catch (IOException e) {
+ log.error( "Unable to read class", e );
+ throw new HibernateException( "Unable to read class: " + e.getMessage() );
+ }
+
+ String[] names = ClassNameReader.getClassInfo( reader );
+ ClassWriter w = new DebuggingClassWriter( true );
+ ClassTransformer t = getClassTransformer( names );
+ if ( t != null ) {
+ if ( log.isDebugEnabled() ) {
+ log.debug( "Enhancing " + className );
+ }
+ ByteArrayOutputStream out;
+ byte[] result;
+ try {
+ reader = new ClassReader( new ByteArrayInputStream( classfileBuffer ) );
+ new TransformingClassGenerator(
+ new ClassReaderGenerator( reader, attributes(), skipDebug() ), t
+ ).generateClass( w );
+ out = new ByteArrayOutputStream();
+ out.write( w.toByteArray() );
+ result = out.toByteArray();
+ out.close();
+ }
+ catch (Exception e) {
+ log.error( "Unable to transform class", e );
+ throw new HibernateException( "Unable to transform class: " + e.getMessage() );
+ }
+ return result;
+ }
+ return classfileBuffer;
+ }
+
+
+ private Attribute[] attributes() {
+ return Attributes.getDefaultAttributes();
+ }
+
+ private boolean skipDebug() {
+ return false;
+ }
+
+ private ClassTransformer getClassTransformer(final String[] classInfo) {
+ if ( isAlreadyInstrumented( classInfo ) ) {
+ return null;
+ }
+ return new InterceptFieldTransformer(
+ new InterceptFieldFilter() {
+ public boolean acceptRead(Type owner, String name) {
+ return fieldFilter.shouldTransformFieldAccess( classInfo[0], owner.getClassName(), name );
+ }
+
+ public boolean acceptWrite(Type owner, String name) {
+ return fieldFilter.shouldTransformFieldAccess( classInfo[0], owner.getClassName(), name );
+ }
+ }
+ );
+ }
+
+ private boolean isAlreadyInstrumented(String[] classInfo) {
+ for ( int i = 1; i < classInfo.length; i++ ) {
+ if ( InterceptFieldEnabled.class.getName().equals( classInfo[i] ) ) {
+ return true;
+ }
+ }
+ return false;
+ }
+}
diff --git a/src/org/hibernate/bytecode/cglib/InstantiationOptimizerAdapter.java b/src/org/hibernate/bytecode/cglib/InstantiationOptimizerAdapter.java
new file mode 100644
index 0000000000..f4f0916db8
--- /dev/null
+++ b/src/org/hibernate/bytecode/cglib/InstantiationOptimizerAdapter.java
@@ -0,0 +1,46 @@
+package org.hibernate.bytecode.cglib;
+
+import org.hibernate.bytecode.ReflectionOptimizer;
+import net.sf.cglib.reflect.FastClass;
+import org.hibernate.InstantiationException;
+
+import java.io.Serializable;
+import java.io.ObjectOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+
+/**
+ * The {@link ReflectionOptimizer.InstantiationOptimizer} implementation for CGLIB
+ * which simply acts as an adpater to the {@link FastClass} class.
+ *
+ * @author Steve Ebersole
+ */
+public class InstantiationOptimizerAdapter implements ReflectionOptimizer.InstantiationOptimizer, Serializable {
+ private FastClass fastClass;
+
+ public InstantiationOptimizerAdapter(FastClass fastClass) {
+ this.fastClass = fastClass;
+ }
+
+ public Object newInstance() {
+ try {
+ return fastClass.newInstance();
+ }
+ catch ( Throwable t ) {
+ throw new InstantiationException(
+ "Could not instantiate entity with CGLIB optimizer: ",
+ fastClass.getJavaClass(),
+ t
+ );
+ }
+ }
+
+ private void writeObject(ObjectOutputStream out) throws IOException {
+ out.writeObject( fastClass.getJavaClass() );
+ }
+
+ private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
+ Class beanClass = ( Class ) in.readObject();
+ fastClass = FastClass.create( beanClass );
+ }
+}
diff --git a/src/org/hibernate/bytecode/cglib/ProxyFactoryFactoryImpl.java b/src/org/hibernate/bytecode/cglib/ProxyFactoryFactoryImpl.java
new file mode 100644
index 0000000000..4ff8e37ed4
--- /dev/null
+++ b/src/org/hibernate/bytecode/cglib/ProxyFactoryFactoryImpl.java
@@ -0,0 +1,141 @@
+package org.hibernate.bytecode.cglib;
+
+import org.hibernate.bytecode.ProxyFactoryFactory;
+import org.hibernate.bytecode.BasicProxyFactory;
+import org.hibernate.proxy.ProxyFactory;
+import org.hibernate.proxy.pojo.cglib.CGLIBProxyFactory;
+import org.hibernate.AssertionFailure;
+import org.hibernate.HibernateException;
+import net.sf.cglib.proxy.Enhancer;
+import net.sf.cglib.proxy.CallbackFilter;
+import net.sf.cglib.proxy.MethodInterceptor;
+import net.sf.cglib.proxy.MethodProxy;
+import net.sf.cglib.proxy.NoOp;
+import net.sf.cglib.proxy.Callback;
+import net.sf.cglib.proxy.Factory;
+
+import java.lang.reflect.Method;
+import java.util.HashMap;
+
+/**
+ * A factory for CGLIB-based {@link ProxyFactory} instances.
+ *
+ * @author Steve Ebersole
+ */
+public class ProxyFactoryFactoryImpl implements ProxyFactoryFactory {
+
+ /**
+ * Builds a CGLIB-based proxy factory.
+ *
+ * @return a new CGLIB-based proxy factory.
+ */
+ public ProxyFactory buildProxyFactory() {
+ return new CGLIBProxyFactory();
+ }
+
+ public BasicProxyFactory buildBasicProxyFactory(Class superClass, Class[] interfaces) {
+ return new BasicProxyFactoryImpl( superClass, interfaces );
+ }
+
+ public static class BasicProxyFactoryImpl implements BasicProxyFactory {
+ private final Class proxyClass;
+ private final Factory factory;
+
+ public BasicProxyFactoryImpl(Class superClass, Class[] interfaces) {
+ if ( superClass == null && ( interfaces == null || interfaces.length < 1 ) ) {
+ throw new AssertionFailure( "attempting to build proxy without any superclass or interfaces" );
+ }
+
+ Enhancer en = new Enhancer();
+ en.setUseCache( false );
+ en.setInterceptDuringConstruction( false );
+ en.setUseFactory( true );
+ en.setCallbackTypes( CALLBACK_TYPES );
+ en.setCallbackFilter( FINALIZE_FILTER );
+ if ( superClass != null ) {
+ en.setSuperclass( superClass );
+ }
+ if ( interfaces != null && interfaces.length > 0 ) {
+ en.setInterfaces( interfaces );
+ }
+ proxyClass = en.createClass();
+ try {
+ factory = ( Factory ) proxyClass.newInstance();
+ }
+ catch ( Throwable t ) {
+ throw new HibernateException( "Unable to build CGLIB Factory instance" );
+ }
+ }
+
+ public Object getProxy() {
+ try {
+ return factory.newInstance(
+ new Callback[] { new PassThroughInterceptor( proxyClass.getName() ), NoOp.INSTANCE }
+ );
+ }
+ catch ( Throwable t ) {
+ throw new HibernateException( "Unable to instantiate proxy instance" );
+ }
+ }
+ }
+
+ private static final CallbackFilter FINALIZE_FILTER = new CallbackFilter() {
+ public int accept(Method method) {
+ if ( method.getParameterTypes().length == 0 && method.getName().equals("finalize") ){
+ return 1;
+ }
+ else {
+ return 0;
+ }
+ }
+ };
+
+ private static final Class[] CALLBACK_TYPES = new Class[] { MethodInterceptor.class, NoOp.class };
+
+ private static class PassThroughInterceptor implements MethodInterceptor {
+ private HashMap data = new HashMap();
+ private final String proxiedClassName;
+
+ public PassThroughInterceptor(String proxiedClassName) {
+ this.proxiedClassName = proxiedClassName;
+ }
+
+ public Object intercept(
+ Object obj,
+ Method method,
+ Object[] args,
+ MethodProxy proxy) throws Throwable {
+ String name = method.getName();
+ if ( "toString".equals( name ) ) {
+ return proxiedClassName + "@" + System.identityHashCode( obj );
+ }
+ else if ( "equals".equals( name ) ) {
+ return args[0] instanceof Factory && ( ( Factory ) args[0] ).getCallback( 0 ) == this
+ ? Boolean.TRUE
+ : Boolean.FALSE;
+ }
+ else if ( "hashCode".equals( name ) ) {
+ return new Integer( System.identityHashCode( obj ) );
+ }
+ boolean hasGetterSignature = method.getParameterTypes().length == 0 && method.getReturnType() != null;
+ boolean hasSetterSignature = method.getParameterTypes().length == 1 && ( method.getReturnType() == null || method.getReturnType() == void.class );
+ if ( name.startsWith( "get" ) && hasGetterSignature ) {
+ String propName = name.substring( 3 );
+ return data.get( propName );
+ }
+ else if ( name.startsWith( "is" ) && hasGetterSignature ) {
+ String propName = name.substring( 2 );
+ return data.get( propName );
+ }
+ else if ( name.startsWith( "set" ) && hasSetterSignature) {
+ String propName = name.substring( 3 );
+ data.put( propName, args[0] );
+ return null;
+ }
+ else {
+ // todo : what else to do here?
+ return null;
+ }
+ }
+ }
+}
diff --git a/src/org/hibernate/bytecode/cglib/ReflectionOptimizerImpl.java b/src/org/hibernate/bytecode/cglib/ReflectionOptimizerImpl.java
new file mode 100644
index 0000000000..e92c6ea1a8
--- /dev/null
+++ b/src/org/hibernate/bytecode/cglib/ReflectionOptimizerImpl.java
@@ -0,0 +1,34 @@
+package org.hibernate.bytecode.cglib;
+
+import org.hibernate.bytecode.ReflectionOptimizer;
+
+import java.io.Serializable;
+import java.io.ObjectOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+
+/**
+ * ReflectionOptimizer implementation for CGLIB.
+ *
+ * @author Steve Ebersole
+ */
+public class ReflectionOptimizerImpl implements ReflectionOptimizer, Serializable {
+ private transient InstantiationOptimizerAdapter instantiationOptimizer;
+ private transient AccessOptimizerAdapter accessOptimizer;
+
+ public ReflectionOptimizerImpl(
+ InstantiationOptimizerAdapter instantiationOptimizer,
+ AccessOptimizerAdapter accessOptimizer) {
+ this.instantiationOptimizer = instantiationOptimizer;
+ this.accessOptimizer = accessOptimizer;
+ }
+
+ public InstantiationOptimizer getInstantiationOptimizer() {
+ return instantiationOptimizer;
+ }
+
+ public AccessOptimizer getAccessOptimizer() {
+ return accessOptimizer;
+ }
+
+}
diff --git a/src/org/hibernate/bytecode/javassist/AccessOptimizerAdapter.java b/src/org/hibernate/bytecode/javassist/AccessOptimizerAdapter.java
new file mode 100644
index 0000000000..c1ee941122
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/AccessOptimizerAdapter.java
@@ -0,0 +1,81 @@
+package org.hibernate.bytecode.javassist;
+
+import org.hibernate.bytecode.ReflectionOptimizer;
+import org.hibernate.PropertyAccessException;
+
+import java.io.Serializable;
+
+/**
+ * The {@link ReflectionOptimizer.AccessOptimizer} implementation for Javassist
+ * which simply acts as an adpater to the {@link BulkAccessor} class.
+ *
+ * @author Steve Ebersole
+ */
+public class AccessOptimizerAdapter implements ReflectionOptimizer.AccessOptimizer, Serializable {
+
+ public static final String PROPERTY_GET_EXCEPTION =
+ "exception getting property value with Javassist (set hibernate.bytecode.use_reflection_optimizer=false for more info)";
+
+ public static final String PROPERTY_SET_EXCEPTION =
+ "exception setting property value with Javassist (set hibernate.bytecode.use_reflection_optimizer=false for more info)";
+
+ private final BulkAccessor bulkAccessor;
+ private final Class mappedClass;
+
+ public AccessOptimizerAdapter(BulkAccessor bulkAccessor, Class mappedClass) {
+ this.bulkAccessor = bulkAccessor;
+ this.mappedClass = mappedClass;
+ }
+
+ public String[] getPropertyNames() {
+ return bulkAccessor.getGetters();
+ }
+
+ public Object[] getPropertyValues(Object object) {
+ try {
+ return bulkAccessor.getPropertyValues( object );
+ }
+ catch ( Throwable t ) {
+ throw new PropertyAccessException(
+ t,
+ PROPERTY_GET_EXCEPTION,
+ false,
+ mappedClass,
+ getterName( t, bulkAccessor )
+ );
+ }
+ }
+
+ public void setPropertyValues(Object object, Object[] values) {
+ try {
+ bulkAccessor.setPropertyValues( object, values );
+ }
+ catch ( Throwable t ) {
+ throw new PropertyAccessException(
+ t,
+ PROPERTY_SET_EXCEPTION,
+ true,
+ mappedClass,
+ setterName( t, bulkAccessor )
+ );
+ }
+ }
+
+ private static String setterName(Throwable t, BulkAccessor accessor) {
+ if (t instanceof BulkAccessorException ) {
+ return accessor.getSetters()[ ( (BulkAccessorException) t ).getIndex() ];
+ }
+ else {
+ return "?";
+ }
+ }
+
+ private static String getterName(Throwable t, BulkAccessor accessor) {
+ if (t instanceof BulkAccessorException ) {
+ return accessor.getGetters()[ ( (BulkAccessorException) t ).getIndex() ];
+ }
+ else {
+ return "?";
+ }
+ }
+}
diff --git a/src/org/hibernate/bytecode/javassist/BulkAccessor.java b/src/org/hibernate/bytecode/javassist/BulkAccessor.java
new file mode 100644
index 0000000000..a7a4d14ec3
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/BulkAccessor.java
@@ -0,0 +1,92 @@
+package org.hibernate.bytecode.javassist;
+
+import java.io.Serializable;
+
+
+/**
+ * A JavaBean accessor.
+ *
+ * This object provides methods that set/get multiple properties
+ * of a JavaBean at once. This class and its support classes have been
+ * developed for the comaptibility with cglib
+ * (http://cglib.sourceforge.net/).
+ *
+ * @author Muga Nishizawa
+ * @author modified by Shigeru Chiba
+ */
+public abstract class BulkAccessor implements Serializable {
+ protected Class target;
+ protected String[] getters, setters;
+ protected Class[] types;
+
+ protected BulkAccessor() {
+ }
+
+ /**
+ * Obtains the values of properties of a given bean.
+ *
+ * @param bean JavaBean.
+ * @param values the obtained values are stored in this array.
+ */
+ public abstract void getPropertyValues(Object bean, Object[] values);
+
+ /**
+ * Sets properties of a given bean to specified values.
+ *
+ * @param bean JavaBean.
+ * @param values the values assinged to properties.
+ */
+ public abstract void setPropertyValues(Object bean, Object[] values);
+
+ /**
+ * Returns the values of properties of a given bean.
+ *
+ * @param bean JavaBean.
+ */
+ public Object[] getPropertyValues(Object bean) {
+ Object[] values = new Object[getters.length];
+ getPropertyValues( bean, values );
+ return values;
+ }
+
+ /**
+ * Returns the types of properties.
+ */
+ public Class[] getPropertyTypes() {
+ return ( Class[] ) types.clone();
+ }
+
+ /**
+ * Returns the setter names of properties.
+ */
+ public String[] getGetters() {
+ return ( String[] ) getters.clone();
+ }
+
+ /**
+ * Returns the getter names of the properties.
+ */
+ public String[] getSetters() {
+ return ( String[] ) setters.clone();
+ }
+
+ /**
+ * Creates a new instance of BulkAccessor
.
+ * The created instance provides methods for setting/getting
+ * specified properties at once.
+ *
+ * @param beanClass the class of the JavaBeans accessed
+ * through the created object.
+ * @param getters the names of setter methods for specified properties.
+ * @param setters the names of getter methods for specified properties.
+ * @param types the types of specified properties.
+ */
+ public static BulkAccessor create(
+ Class beanClass,
+ String[] getters,
+ String[] setters,
+ Class[] types) {
+ BulkAccessorFactory factory = new BulkAccessorFactory( beanClass, getters, setters, types );
+ return factory.create();
+ }
+}
diff --git a/src/org/hibernate/bytecode/javassist/BulkAccessorException.java b/src/org/hibernate/bytecode/javassist/BulkAccessorException.java
new file mode 100644
index 0000000000..497c282376
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/BulkAccessorException.java
@@ -0,0 +1,78 @@
+package org.hibernate.bytecode.javassist;
+
+/**
+ * An exception thrown while generating a bulk accessor.
+ *
+ * @author Muga Nishizawa
+ * @author modified by Shigeru Chiba
+ */
+public class BulkAccessorException extends RuntimeException {
+ private Throwable myCause;
+
+ /**
+ * Gets the cause of this throwable.
+ * It is for JDK 1.3 compatibility.
+ */
+ public Throwable getCause() {
+ return (myCause == this ? null : myCause);
+ }
+
+ /**
+ * Initializes the cause of this throwable.
+ * It is for JDK 1.3 compatibility.
+ */
+ public synchronized Throwable initCause(Throwable cause) {
+ myCause = cause;
+ return this;
+ }
+
+ private int index;
+
+ /**
+ * Constructs an exception.
+ */
+ public BulkAccessorException(String message) {
+ super(message);
+ index = -1;
+ initCause(null);
+ }
+
+ /**
+ * Constructs an exception.
+ *
+ * @param index the index of the property that causes an exception.
+ */
+ public BulkAccessorException(String message, int index) {
+ this(message + ": " + index);
+ this.index = index;
+ }
+
+ /**
+ * Constructs an exception.
+ */
+ public BulkAccessorException(String message, Throwable cause) {
+ super(message);
+ index = -1;
+ initCause(cause);
+ }
+
+ /**
+ * Constructs an exception.
+ *
+ * @param index the index of the property that causes an exception.
+ */
+ public BulkAccessorException(Throwable cause, int index) {
+ this("Property " + index);
+ this.index = index;
+ initCause(cause);
+ }
+
+ /**
+ * Returns the index of the property that causes this exception.
+ *
+ * @return -1 if the index is not specified.
+ */
+ public int getIndex() {
+ return this.index;
+ }
+}
diff --git a/src/org/hibernate/bytecode/javassist/BulkAccessorFactory.java b/src/org/hibernate/bytecode/javassist/BulkAccessorFactory.java
new file mode 100644
index 0000000000..1821a8ac25
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/BulkAccessorFactory.java
@@ -0,0 +1,388 @@
+package org.hibernate.bytecode.javassist;
+
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.security.ProtectionDomain;
+
+import javassist.CannotCompileException;
+import javassist.bytecode.AccessFlag;
+import javassist.bytecode.Bytecode;
+import javassist.bytecode.ClassFile;
+import javassist.bytecode.ConstPool;
+import javassist.bytecode.MethodInfo;
+import javassist.bytecode.Opcode;
+import javassist.util.proxy.FactoryHelper;
+import javassist.util.proxy.RuntimeSupport;
+
+/**
+ * A factory of bulk accessors.
+ *
+ * @author Muga Nishizawa
+ * @author modified by Shigeru Chiba
+ */
+class BulkAccessorFactory {
+ private static final String PACKAGE_NAME_PREFIX = "org.javassist.tmp.";
+ private static final String BULKACESSOR_CLASS_NAME = BulkAccessor.class.getName();
+ private static final String OBJECT_CLASS_NAME = Object.class.getName();
+ private static final String GENERATED_GETTER_NAME = "getPropertyValues";
+ private static final String GENERATED_SETTER_NAME = "setPropertyValues";
+ private static final String GET_SETTER_DESC = "(Ljava/lang/Object;[Ljava/lang/Object;)V";
+ private static final String THROWABLE_CLASS_NAME = Throwable.class.getName();
+ private static final String BULKEXCEPTION_CLASS_NAME = BulkAccessorException.class.getName();
+ private static int counter = 0;
+
+ private Class targetBean;
+ private String[] getterNames;
+ private String[] setterNames;
+ private Class[] types;
+ public String writeDirectory;
+
+ BulkAccessorFactory(
+ Class target,
+ String[] getterNames,
+ String[] setterNames,
+ Class[] types) {
+ this.targetBean = target;
+ this.getterNames = getterNames;
+ this.setterNames = setterNames;
+ this.types = types;
+ this.writeDirectory = null;
+ }
+
+ BulkAccessor create() {
+ Method[] getters = new Method[getterNames.length];
+ Method[] setters = new Method[setterNames.length];
+ findAccessors( targetBean, getterNames, setterNames, types, getters, setters );
+
+ Class beanClass;
+ try {
+ ClassFile classfile = make( getters, setters );
+ ClassLoader loader = this.getClassLoader();
+ if ( writeDirectory != null ) {
+ FactoryHelper.writeFile( classfile, writeDirectory );
+ }
+
+ beanClass = FactoryHelper.toClass( classfile, loader, getDomain() );
+ return ( BulkAccessor ) this.newInstance( beanClass );
+ }
+ catch ( Exception e ) {
+ throw new BulkAccessorException( e.getMessage(), e );
+ }
+ }
+
+ private ProtectionDomain getDomain() {
+ Class cl;
+ if ( this.targetBean != null ) {
+ cl = this.targetBean;
+ }
+ else {
+ cl = this.getClass();
+ }
+ return cl.getProtectionDomain();
+ }
+
+ private ClassFile make(Method[] getters, Method[] setters) throws CannotCompileException {
+ String className = targetBean.getName();
+ // set the name of bulk accessor.
+ className = className + "_$$_bulkaccess_" + counter++;
+ if ( className.startsWith( "java." ) ) {
+ className = "org.javassist.tmp." + className;
+ }
+
+ ClassFile classfile = new ClassFile( false, className, BULKACESSOR_CLASS_NAME );
+ classfile.setAccessFlags( AccessFlag.PUBLIC );
+ addDefaultConstructor( classfile );
+ addGetter( classfile, getters );
+ addSetter( classfile, setters );
+ return classfile;
+ }
+
+ private ClassLoader getClassLoader() {
+ if ( targetBean != null && targetBean.getName().equals( OBJECT_CLASS_NAME ) ) {
+ return targetBean.getClassLoader();
+ }
+ else {
+ return getClass().getClassLoader();
+ }
+ }
+
+ private Object newInstance(Class type) throws Exception {
+ BulkAccessor instance = ( BulkAccessor ) type.newInstance();
+ instance.target = targetBean;
+ int len = getterNames.length;
+ instance.getters = new String[len];
+ instance.setters = new String[len];
+ instance.types = new Class[len];
+ for ( int i = 0; i < len; i++ ) {
+ instance.getters[i] = getterNames[i];
+ instance.setters[i] = setterNames[i];
+ instance.types[i] = types[i];
+ }
+
+ return instance;
+ }
+
+ /**
+ * Declares a constructor that takes no parameter.
+ *
+ * @param classfile
+ * @throws CannotCompileException
+ */
+ private void addDefaultConstructor(ClassFile classfile) throws CannotCompileException {
+ ConstPool cp = classfile.getConstPool();
+ String cons_desc = "()V";
+ MethodInfo mi = new MethodInfo( cp, MethodInfo.nameInit, cons_desc );
+
+ Bytecode code = new Bytecode( cp, 0, 1 );
+ // aload_0
+ code.addAload( 0 );
+ // invokespecial
+ code.addInvokespecial( BulkAccessor.class.getName(), MethodInfo.nameInit, cons_desc );
+ // return
+ code.addOpcode( Opcode.RETURN );
+
+ mi.setCodeAttribute( code.toCodeAttribute() );
+ mi.setAccessFlags( AccessFlag.PUBLIC );
+ classfile.addMethod( mi );
+ }
+
+ private void addGetter(ClassFile classfile, final Method[] getters) throws CannotCompileException {
+ ConstPool cp = classfile.getConstPool();
+ int target_type_index = cp.addClassInfo( this.targetBean.getName() );
+ String desc = GET_SETTER_DESC;
+ MethodInfo mi = new MethodInfo( cp, GENERATED_GETTER_NAME, desc );
+
+ Bytecode code = new Bytecode( cp, 6, 4 );
+ /* | this | bean | args | raw bean | */
+ if ( getters.length >= 0 ) {
+ // aload_1 // load bean
+ code.addAload( 1 );
+ // checkcast // cast bean
+ code.addCheckcast( this.targetBean.getName() );
+ // astore_3 // store bean
+ code.addAstore( 3 );
+ for ( int i = 0; i < getters.length; ++i ) {
+ if ( getters[i] != null ) {
+ Method getter = getters[i];
+ // aload_2 // args
+ code.addAload( 2 );
+ // iconst_i // continue to aastore
+ code.addIconst( i ); // growing stack is 1
+ Class returnType = getter.getReturnType();
+ int typeIndex = -1;
+ if ( returnType.isPrimitive() ) {
+ typeIndex = FactoryHelper.typeIndex( returnType );
+ // new
+ code.addNew( FactoryHelper.wrapperTypes[typeIndex] );
+ // dup
+ code.addOpcode( Opcode.DUP );
+ }
+
+ // aload_3 // load the raw bean
+ code.addAload( 3 );
+ String getter_desc = RuntimeSupport.makeDescriptor( getter );
+ String getterName = getter.getName();
+ if ( this.targetBean.isInterface() ) {
+ // invokeinterface
+ code.addInvokeinterface( target_type_index, getterName, getter_desc, 1 );
+ }
+ else {
+ // invokevirtual
+ code.addInvokevirtual( target_type_index, getterName, getter_desc );
+ }
+
+ if ( typeIndex >= 0 ) { // is a primitive type
+ // invokespecial
+ code.addInvokespecial(
+ FactoryHelper.wrapperTypes[typeIndex],
+ MethodInfo.nameInit,
+ FactoryHelper.wrapperDesc[typeIndex]
+ );
+ }
+
+ // aastore // args
+ code.add( Opcode.AASTORE );
+ code.growStack( -3 );
+ }
+ }
+ }
+ // return
+ code.addOpcode( Opcode.RETURN );
+
+ mi.setCodeAttribute( code.toCodeAttribute() );
+ mi.setAccessFlags( AccessFlag.PUBLIC );
+ classfile.addMethod( mi );
+ }
+
+ private void addSetter(ClassFile classfile, final Method[] setters) throws CannotCompileException {
+ ConstPool cp = classfile.getConstPool();
+ int target_type_index = cp.addClassInfo( this.targetBean.getName() );
+ String desc = GET_SETTER_DESC;
+ MethodInfo mi = new MethodInfo( cp, GENERATED_SETTER_NAME, desc );
+
+ Bytecode code = new Bytecode( cp, 4, 6 );
+ /* | this | bean | args | i | raw bean | exception | */
+ if ( setters.length > 0 ) {
+ int start, end; // required to exception table
+ // iconst_0 // i
+ code.addIconst( 0 );
+ // istore_3 // store i
+ code.addIstore( 3 );
+ // aload_1 // load the bean
+ code.addAload( 1 );
+ // checkcast // cast the bean into a raw bean
+ code.addCheckcast( this.targetBean.getName() );
+ // astore 4 // store the raw bean
+ code.addAstore( 4 );
+ /* current stack len = 0 */
+ // start region to handling exception (BulkAccessorException)
+ start = code.currentPc();
+ int lastIndex = 0;
+ for ( int i = 0; i < setters.length; ++i ) {
+ if ( setters[i] != null ) {
+ int diff = i - lastIndex;
+ if ( diff > 0 ) {
+ // iinc 3, 1
+ code.addOpcode( Opcode.IINC );
+ code.add( 3 );
+ code.add( diff );
+ lastIndex = i;
+ }
+ }
+ /* current stack len = 0 */
+ // aload 4 // load the raw bean
+ code.addAload( 4 );
+ // aload_2 // load the args
+ code.addAload( 2 );
+ // iconst_i
+ code.addIconst( i );
+ // aaload
+ code.addOpcode( Opcode.AALOAD );
+ // checkcast
+ Class[] setterParamTypes = setters[i].getParameterTypes();
+ Class setterParamType = setterParamTypes[0];
+ if ( setterParamType.isPrimitive() ) {
+ // checkcast (case of primitive type)
+ // invokevirtual (case of primitive type)
+ this.addUnwrapper( classfile, code, setterParamType );
+ }
+ else {
+ // checkcast (case of reference type)
+ code.addCheckcast( setterParamType.getName() );
+ }
+ /* current stack len = 2 */
+ String rawSetterMethod_desc = RuntimeSupport.makeDescriptor( setters[i] );
+ if ( !this.targetBean.isInterface() ) {
+ // invokevirtual
+ code.addInvokevirtual( target_type_index, setters[i].getName(), rawSetterMethod_desc );
+ }
+ else {
+ // invokeinterface
+ Class[] params = setters[i].getParameterTypes();
+ int size;
+ if ( params[0].equals( Double.TYPE ) || params[0].equals( Long.TYPE ) ) {
+ size = 3;
+ }
+ else {
+ size = 2;
+ }
+
+ code.addInvokeinterface( target_type_index, setters[i].getName(), rawSetterMethod_desc, size );
+ }
+ }
+
+ // end region to handling exception (BulkAccessorException)
+ end = code.currentPc();
+ // return
+ code.addOpcode( Opcode.RETURN );
+ /* current stack len = 0 */
+ // register in exception table
+ int throwableType_index = cp.addClassInfo( THROWABLE_CLASS_NAME );
+ code.addExceptionHandler( start, end, code.currentPc(), throwableType_index );
+ // astore 5 // store exception
+ code.addAstore( 5 );
+ // new // BulkAccessorException
+ code.addNew( BULKEXCEPTION_CLASS_NAME );
+ // dup
+ code.addOpcode( Opcode.DUP );
+ // aload 5 // load exception
+ code.addAload( 5 );
+ // iload_3 // i
+ code.addIload( 3 );
+ // invokespecial // BulkAccessorException.
+ String cons_desc = "(Ljava/lang/Throwable;I)V";
+ code.addInvokespecial( BULKEXCEPTION_CLASS_NAME, MethodInfo.nameInit, cons_desc );
+ // athrow
+ code.addOpcode( Opcode.ATHROW );
+ }
+ else {
+ // return
+ code.addOpcode( Opcode.RETURN );
+ }
+
+ mi.setCodeAttribute( code.toCodeAttribute() );
+ mi.setAccessFlags( AccessFlag.PUBLIC );
+ classfile.addMethod( mi );
+ }
+
+ private void addUnwrapper(
+ ClassFile classfile,
+ Bytecode code,
+ Class type) {
+ int index = FactoryHelper.typeIndex( type );
+ String wrapperType = FactoryHelper.wrapperTypes[index];
+ // checkcast
+ code.addCheckcast( wrapperType );
+ // invokevirtual
+ code.addInvokevirtual( wrapperType, FactoryHelper.unwarpMethods[index], FactoryHelper.unwrapDesc[index] );
+ }
+
+ private static void findAccessors(
+ Class clazz,
+ String[] getterNames,
+ String[] setterNames,
+ Class[] types,
+ Method[] getters,
+ Method[] setters) {
+ int length = types.length;
+ if ( setterNames.length != length || getterNames.length != length ) {
+ throw new BulkAccessorException( "bad number of accessors" );
+ }
+
+ Class[] getParam = new Class[0];
+ Class[] setParam = new Class[1];
+ for ( int i = 0; i < length; i++ ) {
+ if ( getterNames[i] != null ) {
+ Method getter = findAccessor( clazz, getterNames[i], getParam, i );
+ if ( getter.getReturnType() != types[i] ) {
+ throw new BulkAccessorException( "wrong return type: " + getterNames[i], i );
+ }
+
+ getters[i] = getter;
+ }
+
+ if ( setterNames[i] != null ) {
+ setParam[0] = types[i];
+ setters[i] = findAccessor( clazz, setterNames[i], setParam, i );
+ }
+ }
+ }
+
+ private static Method findAccessor(
+ Class clazz,
+ String name,
+ Class[] params,
+ int index) throws BulkAccessorException {
+ try {
+ Method method = clazz.getDeclaredMethod( name, params );
+ if ( Modifier.isPrivate( method.getModifiers() ) ) {
+ throw new BulkAccessorException( "private property", index );
+ }
+
+ return method;
+ }
+ catch ( NoSuchMethodException e ) {
+ throw new BulkAccessorException( "cannot find an accessor", index );
+ }
+ }
+}
diff --git a/src/org/hibernate/bytecode/javassist/BytecodeProviderImpl.java b/src/org/hibernate/bytecode/javassist/BytecodeProviderImpl.java
new file mode 100644
index 0000000000..80d2ccb64f
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/BytecodeProviderImpl.java
@@ -0,0 +1,84 @@
+package org.hibernate.bytecode.javassist;
+
+import java.lang.reflect.Modifier;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.bytecode.BytecodeProvider;
+import org.hibernate.bytecode.ClassTransformer;
+import org.hibernate.bytecode.ProxyFactoryFactory;
+import org.hibernate.bytecode.ReflectionOptimizer;
+import org.hibernate.bytecode.util.ClassFilter;
+import org.hibernate.bytecode.util.FieldFilter;
+import org.hibernate.util.StringHelper;
+
+/**
+ * Bytecode provider implementation for Javassist.
+ *
+ * @author Steve Ebersole
+ */
+public class BytecodeProviderImpl implements BytecodeProvider {
+
+ private static final Log log = LogFactory.getLog( BytecodeProviderImpl.class );
+
+ public ProxyFactoryFactory getProxyFactoryFactory() {
+ return new ProxyFactoryFactoryImpl();
+ }
+
+ public ReflectionOptimizer getReflectionOptimizer(
+ Class clazz,
+ String[] getterNames,
+ String[] setterNames,
+ Class[] types) {
+ FastClass fastClass;
+ BulkAccessor bulkAccessor;
+ try {
+ fastClass = FastClass.create( clazz );
+ bulkAccessor = BulkAccessor.create( clazz, getterNames, setterNames, types );
+ if ( !clazz.isInterface() && !Modifier.isAbstract( clazz.getModifiers() ) ) {
+ if ( fastClass == null ) {
+ bulkAccessor = null;
+ }
+ else {
+ //test out the optimizer:
+ Object instance = fastClass.newInstance();
+ bulkAccessor.setPropertyValues( instance, bulkAccessor.getPropertyValues( instance ) );
+ }
+ }
+ }
+ catch ( Throwable t ) {
+ fastClass = null;
+ bulkAccessor = null;
+ String message = "reflection optimizer disabled for: " +
+ clazz.getName() +
+ " [" +
+ StringHelper.unqualify( t.getClass().getName() ) +
+ ": " +
+ t.getMessage();
+
+ if ( t instanceof BulkAccessorException ) {
+ int index = ( ( BulkAccessorException ) t ).getIndex();
+ if ( index >= 0 ) {
+ message += " (property " + setterNames[index] + ")";
+ }
+ }
+
+ log.debug( message );
+ }
+
+ if ( fastClass != null && bulkAccessor != null ) {
+ return new ReflectionOptimizerImpl(
+ new InstantiationOptimizerAdapter( fastClass ),
+ new AccessOptimizerAdapter( bulkAccessor, clazz )
+ );
+ }
+ else {
+ return null;
+ }
+ }
+
+ public ClassTransformer getTransformer(ClassFilter classFilter, FieldFilter fieldFilter) {
+ return new JavassistClassTransformer( classFilter, fieldFilter );
+ }
+
+}
diff --git a/src/org/hibernate/bytecode/javassist/FastClass.java b/src/org/hibernate/bytecode/javassist/FastClass.java
new file mode 100644
index 0000000000..60ae94ce60
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/FastClass.java
@@ -0,0 +1,170 @@
+package org.hibernate.bytecode.javassist;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.io.Serializable;
+
+/**
+ * @author Muga Nishizawa
+ */
+public class FastClass implements Serializable {
+
+ private static final Class[] EMPTY_CLASS_ARRAY = new Class[0];
+
+ private Class type;
+
+ private FastClass() {
+ }
+
+ private FastClass(Class type) {
+ this.type = type;
+ }
+
+ public Object invoke(
+ String name,
+ Class[] parameterTypes,
+ Object obj,
+ Object[] args) throws InvocationTargetException {
+ return this.invoke( this.getIndex( name, parameterTypes ), obj, args );
+ }
+
+ public Object invoke(
+ int index,
+ Object obj,
+ Object[] args) throws InvocationTargetException {
+ Method[] methods = this.type.getMethods();
+ try {
+ return methods[index].invoke( obj, args );
+ }
+ catch ( ArrayIndexOutOfBoundsException e ) {
+ throw new IllegalArgumentException(
+ "Cannot find matching method/constructor"
+ );
+ }
+ catch ( IllegalAccessException e ) {
+ throw new InvocationTargetException( e );
+ }
+ }
+
+ public Object newInstance() throws InvocationTargetException {
+ return this.newInstance( this.getIndex( EMPTY_CLASS_ARRAY ), null );
+ }
+
+ public Object newInstance(
+ Class[] parameterTypes,
+ Object[] args) throws InvocationTargetException {
+ return this.newInstance( this.getIndex( parameterTypes ), args );
+ }
+
+ public Object newInstance(
+ int index,
+ Object[] args) throws InvocationTargetException {
+ Constructor[] conss = this.type.getConstructors();
+ try {
+ return conss[index].newInstance( args );
+ }
+ catch ( ArrayIndexOutOfBoundsException e ) {
+ throw new IllegalArgumentException( "Cannot find matching method/constructor" );
+ }
+ catch ( InstantiationException e ) {
+ throw new InvocationTargetException( e );
+ }
+ catch ( IllegalAccessException e ) {
+ throw new InvocationTargetException( e );
+ }
+ }
+
+ public int getIndex(String name, Class[] parameterTypes) {
+ Method[] methods = this.type.getMethods();
+ boolean eq = true;
+ for ( int i = 0; i < methods.length; ++i ) {
+ if ( !Modifier.isPublic( methods[i].getModifiers() ) ) {
+ continue;
+ }
+ if ( !methods[i].getName().equals( name ) ) {
+ continue;
+ }
+ Class[] params = methods[i].getParameterTypes();
+ if ( params.length != parameterTypes.length ) {
+ continue;
+ }
+ eq = true;
+ for ( int j = 0; j < params.length; ++j ) {
+ if ( !params[j].equals( parameterTypes[j] ) ) {
+ eq = false;
+ break;
+ }
+ }
+ if ( eq ) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ public int getIndex(Class[] parameterTypes) {
+ Constructor[] conss = this.type.getConstructors();
+ boolean eq = true;
+ for ( int i = 0; i < conss.length; ++i ) {
+ if ( !Modifier.isPublic( conss[i].getModifiers() ) ) {
+ continue;
+ }
+ Class[] params = conss[i].getParameterTypes();
+ if ( params.length != parameterTypes.length ) {
+ continue;
+ }
+ eq = true;
+ for ( int j = 0; j < params.length; ++j ) {
+ if ( !params[j].equals( parameterTypes[j] ) ) {
+ eq = false;
+ break;
+ }
+ }
+ if ( eq ) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ public int getMaxIndex() {
+ Method[] methods = this.type.getMethods();
+ int count = 0;
+ for ( int i = 0; i < methods.length; ++i ) {
+ if ( Modifier.isPublic( methods[i].getModifiers() ) ) {
+ count++;
+ }
+ }
+ return count;
+ }
+
+ public String getName() {
+ return this.type.getName();
+ }
+
+ public Class getJavaClass() {
+ return this.type;
+ }
+
+ public String toString() {
+ return this.type.toString();
+ }
+
+ public int hashCode() {
+ return this.type.hashCode();
+ }
+
+ public boolean equals(Object o) {
+ if ( !( o instanceof FastClass ) ) {
+ return false;
+ }
+ return this.type.equals( ( ( FastClass ) o ).type );
+ }
+
+ public static FastClass create(Class type) {
+ FastClass fc = new FastClass( type );
+ return fc;
+ }
+}
diff --git a/src/org/hibernate/bytecode/javassist/FieldFilter.java b/src/org/hibernate/bytecode/javassist/FieldFilter.java
new file mode 100644
index 0000000000..7a5ee0d5a6
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/FieldFilter.java
@@ -0,0 +1,28 @@
+package org.hibernate.bytecode.javassist;
+
+/**
+ * Contract for deciding whether fields should be read and/or write intercepted.
+ *
+ * @author Muga Nishizawa
+ */
+public interface FieldFilter {
+ /**
+ * Should the given field be read intercepted?
+ *
+ * @param desc
+ * @param name
+ * @return true if the given field should be read intercepted; otherwise
+ * false.
+ */
+ boolean handleRead(String desc, String name);
+
+ /**
+ * Should the given field be write intercepted?
+ *
+ * @param desc
+ * @param name
+ * @return true if the given field should be write intercepted; otherwise
+ * false.
+ */
+ boolean handleWrite(String desc, String name);
+}
diff --git a/src/org/hibernate/bytecode/javassist/FieldHandled.java b/src/org/hibernate/bytecode/javassist/FieldHandled.java
new file mode 100644
index 0000000000..c25fbef8a6
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/FieldHandled.java
@@ -0,0 +1,23 @@
+package org.hibernate.bytecode.javassist;
+
+/**
+ * Interface introduced to the enhanced class in order to be able to
+ * inject a {@link FieldHandler} to define the interception behavior.
+ *
+ * @author Muga Nishizawa
+ */
+public interface FieldHandled {
+ /**
+ * Inject the field interception handler to be used.
+ *
+ * @param handler The field interception handler.
+ */
+ public void setFieldHandler(FieldHandler handler);
+
+ /**
+ * Access to the current field interception handler.
+ *
+ * @return The current field interception handler.
+ */
+ public FieldHandler getFieldHandler();
+}
diff --git a/src/org/hibernate/bytecode/javassist/FieldHandler.java b/src/org/hibernate/bytecode/javassist/FieldHandler.java
new file mode 100644
index 0000000000..66ca6edcb8
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/FieldHandler.java
@@ -0,0 +1,56 @@
+package org.hibernate.bytecode.javassist;
+
+/**
+ * The interface defining how interception of a field should be handled.
+ *
+ * @author Muga Nishizawa
+ */
+public interface FieldHandler {
+
+ /**
+ * Called to handle writing an int value to a given field.
+ *
+ * @param obj ?
+ * @param name The name of the field being written
+ * @param oldValue The old field value
+ * @param newValue The new field value.
+ * @return ?
+ */
+ int writeInt(Object obj, String name, int oldValue, int newValue);
+
+ char writeChar(Object obj, String name, char oldValue, char newValue);
+
+ byte writeByte(Object obj, String name, byte oldValue, byte newValue);
+
+ boolean writeBoolean(Object obj, String name, boolean oldValue,
+ boolean newValue);
+
+ short writeShort(Object obj, String name, short oldValue, short newValue);
+
+ float writeFloat(Object obj, String name, float oldValue, float newValue);
+
+ double writeDouble(Object obj, String name, double oldValue, double newValue);
+
+ long writeLong(Object obj, String name, long oldValue, long newValue);
+
+ Object writeObject(Object obj, String name, Object oldValue, Object newValue);
+
+ int readInt(Object obj, String name, int oldValue);
+
+ char readChar(Object obj, String name, char oldValue);
+
+ byte readByte(Object obj, String name, byte oldValue);
+
+ boolean readBoolean(Object obj, String name, boolean oldValue);
+
+ short readShort(Object obj, String name, short oldValue);
+
+ float readFloat(Object obj, String name, float oldValue);
+
+ double readDouble(Object obj, String name, double oldValue);
+
+ long readLong(Object obj, String name, long oldValue);
+
+ Object readObject(Object obj, String name, Object oldValue);
+
+}
diff --git a/src/org/hibernate/bytecode/javassist/FieldTransformer.java b/src/org/hibernate/bytecode/javassist/FieldTransformer.java
new file mode 100644
index 0000000000..2394cb41d0
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/FieldTransformer.java
@@ -0,0 +1,592 @@
+package org.hibernate.bytecode.javassist;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+import javassist.CannotCompileException;
+import javassist.bytecode.AccessFlag;
+import javassist.bytecode.BadBytecode;
+import javassist.bytecode.Bytecode;
+import javassist.bytecode.ClassFile;
+import javassist.bytecode.CodeAttribute;
+import javassist.bytecode.CodeIterator;
+import javassist.bytecode.ConstPool;
+import javassist.bytecode.Descriptor;
+import javassist.bytecode.FieldInfo;
+import javassist.bytecode.MethodInfo;
+import javassist.bytecode.Opcode;
+import org.hibernate.bytecode.javassist.FieldFilter;
+import org.hibernate.bytecode.javassist.FieldHandled;
+import org.hibernate.bytecode.javassist.FieldHandler;
+
+/**
+ * The thing that handles actual class enhancement in regards to
+ * intercepting field accesses.
+ *
+ * @author Muga Nishizawa
+ */
+public class FieldTransformer {
+
+ private static final String EACH_READ_METHOD_PREFIX = "$javassist_read_";
+
+ private static final String EACH_WRITE_METHOD_PREFIX = "$javassist_write_";
+
+ private static final String FIELD_HANDLED_TYPE_NAME = FieldHandled.class
+ .getName();
+
+ private static final String HANDLER_FIELD_NAME = "$JAVASSIST_READ_WRITE_HANDLER";
+
+ private static final String FIELD_HANDLER_TYPE_NAME = FieldHandler.class
+ .getName();
+
+ private static final String HANDLER_FIELD_DESCRIPTOR = 'L' + FIELD_HANDLER_TYPE_NAME
+ .replace('.', '/') + ';';
+
+ private static final String GETFIELDHANDLER_METHOD_NAME = "getFieldHandler";
+
+ private static final String SETFIELDHANDLER_METHOD_NAME = "setFieldHandler";
+
+ private static final String GETFIELDHANDLER_METHOD_DESCRIPTOR = "()"
+ + HANDLER_FIELD_DESCRIPTOR;
+
+ private static final String SETFIELDHANDLER_METHOD_DESCRIPTOR = "("
+ + HANDLER_FIELD_DESCRIPTOR + ")V";
+
+ private FieldFilter filter;
+
+ private HashMap readableFields;
+
+ private HashMap writableFields;
+
+ public FieldTransformer() {
+ this(null);
+ }
+
+ public FieldTransformer(FieldFilter f) {
+ filter = f;
+ readableFields = new HashMap();
+ writableFields = new HashMap();
+ }
+
+ public void setFieldFilter(FieldFilter f) {
+ filter = f;
+ }
+
+ public void transform(File file) throws Exception {
+ DataInputStream in = new DataInputStream(new FileInputStream(file));
+ ClassFile classfile = new ClassFile(in);
+ transform(classfile);
+ DataOutputStream out = new DataOutputStream(new FileOutputStream(file));
+ try {
+ classfile.write(out);
+ } finally {
+ out.close();
+ }
+ }
+
+ public void transform(ClassFile classfile) throws Exception {
+ if (classfile.isInterface()) {
+ return;
+ }
+ try {
+ addFieldHandlerField(classfile);
+ addGetFieldHandlerMethod(classfile);
+ addSetFieldHandlerMethod(classfile);
+ addFieldHandledInterface(classfile);
+ addReadWriteMethods(classfile);
+ transformInvokevirtualsIntoPutAndGetfields(classfile);
+ } catch (CannotCompileException e) {
+ throw new RuntimeException(e.getMessage(), e);
+ }
+ }
+
+ private void addFieldHandlerField(ClassFile classfile)
+ throws CannotCompileException {
+ ConstPool cp = classfile.getConstPool();
+ FieldInfo finfo = new FieldInfo(cp, HANDLER_FIELD_NAME,
+ HANDLER_FIELD_DESCRIPTOR);
+ finfo.setAccessFlags(AccessFlag.PRIVATE | AccessFlag.TRANSIENT);
+ classfile.addField(finfo);
+ }
+
+ private void addGetFieldHandlerMethod(ClassFile classfile)
+ throws CannotCompileException {
+ ConstPool cp = classfile.getConstPool();
+ int this_class_index = cp.getThisClassInfo();
+ MethodInfo minfo = new MethodInfo(cp, GETFIELDHANDLER_METHOD_NAME,
+ GETFIELDHANDLER_METHOD_DESCRIPTOR);
+ /* local variable | this | */
+ Bytecode code = new Bytecode(cp, 2, 1);
+ // aload_0 // load this
+ code.addAload(0);
+ // getfield // get field "$JAVASSIST_CALLBACK" defined already
+ code.addOpcode(Opcode.GETFIELD);
+ int field_index = cp.addFieldrefInfo(this_class_index,
+ HANDLER_FIELD_NAME, HANDLER_FIELD_DESCRIPTOR);
+ code.addIndex(field_index);
+ // areturn // return the value of the field
+ code.addOpcode(Opcode.ARETURN);
+ minfo.setCodeAttribute(code.toCodeAttribute());
+ minfo.setAccessFlags(AccessFlag.PUBLIC);
+ classfile.addMethod(minfo);
+ }
+
+ private void addSetFieldHandlerMethod(ClassFile classfile)
+ throws CannotCompileException {
+ ConstPool cp = classfile.getConstPool();
+ int this_class_index = cp.getThisClassInfo();
+ MethodInfo minfo = new MethodInfo(cp, SETFIELDHANDLER_METHOD_NAME,
+ SETFIELDHANDLER_METHOD_DESCRIPTOR);
+ /* local variables | this | callback | */
+ Bytecode code = new Bytecode(cp, 3, 3);
+ // aload_0 // load this
+ code.addAload(0);
+ // aload_1 // load callback
+ code.addAload(1);
+ // putfield // put field "$JAVASSIST_CALLBACK" defined already
+ code.addOpcode(Opcode.PUTFIELD);
+ int field_index = cp.addFieldrefInfo(this_class_index,
+ HANDLER_FIELD_NAME, HANDLER_FIELD_DESCRIPTOR);
+ code.addIndex(field_index);
+ // return
+ code.addOpcode(Opcode.RETURN);
+ minfo.setCodeAttribute(code.toCodeAttribute());
+ minfo.setAccessFlags(AccessFlag.PUBLIC);
+ classfile.addMethod(minfo);
+ }
+
+ private void addFieldHandledInterface(ClassFile classfile) {
+ String[] interfaceNames = classfile.getInterfaces();
+ String[] newInterfaceNames = new String[interfaceNames.length + 1];
+ System.arraycopy(interfaceNames, 0, newInterfaceNames, 0,
+ interfaceNames.length);
+ newInterfaceNames[newInterfaceNames.length - 1] = FIELD_HANDLED_TYPE_NAME;
+ classfile.setInterfaces(newInterfaceNames);
+ }
+
+ private void addReadWriteMethods(ClassFile classfile)
+ throws CannotCompileException {
+ List fields = classfile.getFields();
+ for (Iterator field_iter = fields.iterator(); field_iter.hasNext();) {
+ FieldInfo finfo = (FieldInfo) field_iter.next();
+ if ((finfo.getAccessFlags() & AccessFlag.STATIC) == 0
+ && (!finfo.getName().equals(HANDLER_FIELD_NAME))) {
+ // case of non-static field
+ if (filter.handleRead(finfo.getDescriptor(), finfo
+ .getName())) {
+ addReadMethod(classfile, finfo);
+ readableFields.put(finfo.getName(), finfo
+ .getDescriptor());
+ }
+ if (filter.handleWrite(finfo.getDescriptor(), finfo
+ .getName())) {
+ addWriteMethod(classfile, finfo);
+ writableFields.put(finfo.getName(), finfo
+ .getDescriptor());
+ }
+ }
+ }
+ }
+
+ private void addReadMethod(ClassFile classfile, FieldInfo finfo)
+ throws CannotCompileException {
+ ConstPool cp = classfile.getConstPool();
+ int this_class_index = cp.getThisClassInfo();
+ String desc = "()" + finfo.getDescriptor();
+ MethodInfo minfo = new MethodInfo(cp, EACH_READ_METHOD_PREFIX
+ + finfo.getName(), desc);
+ /* local variables | target obj | each oldvalue | */
+ Bytecode code = new Bytecode(cp, 5, 3);
+ // aload_0
+ code.addAload(0);
+ // getfield // get each field
+ code.addOpcode(Opcode.GETFIELD);
+ int base_field_index = cp.addFieldrefInfo(this_class_index, finfo
+ .getName(), finfo.getDescriptor());
+ code.addIndex(base_field_index);
+ // aload_0
+ code.addAload(0);
+ // invokeinterface // invoke Enabled.getInterceptFieldCallback()
+ int enabled_class_index = cp.addClassInfo(FIELD_HANDLED_TYPE_NAME);
+ code.addInvokeinterface(enabled_class_index,
+ GETFIELDHANDLER_METHOD_NAME, GETFIELDHANDLER_METHOD_DESCRIPTOR,
+ 1);
+ // ifnonnull
+ code.addOpcode(Opcode.IFNONNULL);
+ code.addIndex(4);
+ // *return // each type
+ addTypeDependDataReturn(code, finfo.getDescriptor());
+ // *store_1 // each type
+ addTypeDependDataStore(code, finfo.getDescriptor(), 1);
+ // aload_0
+ code.addAload(0);
+ // invokeinterface // invoke Enabled.getInterceptFieldCallback()
+ code.addInvokeinterface(enabled_class_index,
+ GETFIELDHANDLER_METHOD_NAME, GETFIELDHANDLER_METHOD_DESCRIPTOR,
+ 1);
+ // aload_0
+ code.addAload(0);
+ // ldc // name of the field
+ code.addLdc(finfo.getName());
+ // *load_1 // each type
+ addTypeDependDataLoad(code, finfo.getDescriptor(), 1);
+ // invokeinterface // invoke Callback.read*() // each type
+ addInvokeFieldHandlerMethod(classfile, code, finfo.getDescriptor(),
+ true);
+ // *return // each type
+ addTypeDependDataReturn(code, finfo.getDescriptor());
+
+ minfo.setCodeAttribute(code.toCodeAttribute());
+ minfo.setAccessFlags(AccessFlag.PUBLIC);
+ classfile.addMethod(minfo);
+ }
+
+ private void addWriteMethod(ClassFile classfile, FieldInfo finfo)
+ throws CannotCompileException {
+ ConstPool cp = classfile.getConstPool();
+ int this_class_index = cp.getThisClassInfo();
+ String desc = "(" + finfo.getDescriptor() + ")V";
+ MethodInfo minfo = new MethodInfo(cp, EACH_WRITE_METHOD_PREFIX
+ + finfo.getName(), desc);
+ /* local variables | target obj | each oldvalue | */
+ Bytecode code = new Bytecode(cp, 6, 3);
+ // aload_0
+ code.addAload(0);
+ // invokeinterface // enabled.getInterceptFieldCallback()
+ int enabled_class_index = cp.addClassInfo(FIELD_HANDLED_TYPE_NAME);
+ code.addInvokeinterface(enabled_class_index,
+ GETFIELDHANDLER_METHOD_NAME, GETFIELDHANDLER_METHOD_DESCRIPTOR,
+ 1);
+ // ifnonnull (label1)
+ code.addOpcode(Opcode.IFNONNULL);
+ code.addIndex(9);
+ // aload_0
+ code.addAload(0);
+ // *load_1
+ addTypeDependDataLoad(code, finfo.getDescriptor(), 1);
+ // putfield
+ code.addOpcode(Opcode.PUTFIELD);
+ int base_field_index = cp.addFieldrefInfo(this_class_index, finfo
+ .getName(), finfo.getDescriptor());
+ code.addIndex(base_field_index);
+ code.growStack(-Descriptor.dataSize(finfo.getDescriptor()));
+ // return ;
+ code.addOpcode(Opcode.RETURN);
+ // aload_0
+ code.addAload(0);
+ // dup
+ code.addOpcode(Opcode.DUP);
+ // invokeinterface // enabled.getInterceptFieldCallback()
+ code.addInvokeinterface(enabled_class_index,
+ GETFIELDHANDLER_METHOD_NAME, GETFIELDHANDLER_METHOD_DESCRIPTOR,
+ 1);
+ // aload_0
+ code.addAload(0);
+ // ldc // field name
+ code.addLdc(finfo.getName());
+ // aload_0
+ code.addAload(0);
+ // getfield // old value of the field
+ code.addOpcode(Opcode.GETFIELD);
+ code.addIndex(base_field_index);
+ code.growStack(Descriptor.dataSize(finfo.getDescriptor()) - 1);
+ // *load_1
+ addTypeDependDataLoad(code, finfo.getDescriptor(), 1);
+ // invokeinterface // callback.write*(..)
+ addInvokeFieldHandlerMethod(classfile, code, finfo.getDescriptor(),
+ false);
+ // putfield // new value of the field
+ code.addOpcode(Opcode.PUTFIELD);
+ code.addIndex(base_field_index);
+ code.growStack(-Descriptor.dataSize(finfo.getDescriptor()));
+ // return
+ code.addOpcode(Opcode.RETURN);
+
+ minfo.setCodeAttribute(code.toCodeAttribute());
+ minfo.setAccessFlags(AccessFlag.PUBLIC);
+ classfile.addMethod(minfo);
+ }
+
+ private void transformInvokevirtualsIntoPutAndGetfields(ClassFile classfile)
+ throws CannotCompileException {
+ List methods = classfile.getMethods();
+ for (Iterator method_iter = methods.iterator(); method_iter.hasNext();) {
+ MethodInfo minfo = (MethodInfo) method_iter.next();
+ String methodName = minfo.getName();
+ if (methodName.startsWith(EACH_READ_METHOD_PREFIX)
+ || methodName.startsWith(EACH_WRITE_METHOD_PREFIX)
+ || methodName.equals(GETFIELDHANDLER_METHOD_NAME)
+ || methodName.equals(SETFIELDHANDLER_METHOD_NAME)) {
+ continue;
+ }
+ CodeAttribute codeAttr = minfo.getCodeAttribute();
+ if (codeAttr == null) {
+ return;
+ }
+ CodeIterator iter = codeAttr.iterator();
+ while (iter.hasNext()) {
+ try {
+ int pos = iter.next();
+ pos = transformInvokevirtualsIntoGetfields(classfile, iter,
+ pos);
+ pos = transformInvokevirtualsIntoPutfields(classfile, iter,
+ pos);
+
+ } catch (BadBytecode e) {
+ throw new CannotCompileException(e);
+ }
+ }
+ }
+ }
+
+ private int transformInvokevirtualsIntoGetfields(ClassFile classfile,
+ CodeIterator iter, int pos) {
+ ConstPool cp = classfile.getConstPool();
+ int c = iter.byteAt(pos);
+ if (c != Opcode.GETFIELD) {
+ return pos;
+ }
+ int index = iter.u16bitAt(pos + 1);
+ String fieldName = cp.getFieldrefName(index);
+ String className = cp.getFieldrefClassName(index);
+ if ((!classfile.getName().equals(className))
+ || (!readableFields.containsKey(fieldName))) {
+ return pos;
+ }
+ String desc = "()" + (String) readableFields.get(fieldName);
+ int read_method_index = cp.addMethodrefInfo(cp.getThisClassInfo(),
+ EACH_READ_METHOD_PREFIX + fieldName, desc);
+ iter.writeByte(Opcode.INVOKEVIRTUAL, pos);
+ iter.write16bit(read_method_index, pos + 1);
+ return pos;
+ }
+
+ private int transformInvokevirtualsIntoPutfields(ClassFile classfile,
+ CodeIterator iter, int pos) {
+ ConstPool cp = classfile.getConstPool();
+ int c = iter.byteAt(pos);
+ if (c != Opcode.PUTFIELD) {
+ return pos;
+ }
+ int index = iter.u16bitAt(pos + 1);
+ String fieldName = cp.getFieldrefName(index);
+ String className = cp.getFieldrefClassName(index);
+ if ((!classfile.getName().equals(className))
+ || (!writableFields.containsKey(fieldName))) {
+ return pos;
+ }
+ String desc = "(" + (String) writableFields.get(fieldName) + ")V";
+ int write_method_index = cp.addMethodrefInfo(cp.getThisClassInfo(),
+ EACH_WRITE_METHOD_PREFIX + fieldName, desc);
+ iter.writeByte(Opcode.INVOKEVIRTUAL, pos);
+ iter.write16bit(write_method_index, pos + 1);
+ return pos;
+ }
+
+ private static void addInvokeFieldHandlerMethod(ClassFile classfile,
+ Bytecode code, String typeName, boolean isReadMethod) {
+ ConstPool cp = classfile.getConstPool();
+ // invokeinterface
+ int callback_type_index = cp.addClassInfo(FIELD_HANDLER_TYPE_NAME);
+ if ((typeName.charAt(0) == 'L')
+ && (typeName.charAt(typeName.length() - 1) == ';')
+ || (typeName.charAt(0) == '[')) {
+ // reference type
+ int indexOfL = typeName.indexOf('L');
+ String type;
+ if (indexOfL == 0) {
+ // not array
+ type = typeName.substring(1, typeName.length() - 1);
+ type = type.replace('/', '.');
+ } else if (indexOfL == -1) {
+ // array of primitive type
+ // do nothing
+ type = typeName;
+ } else {
+ // array of reference type
+ type = typeName.replace('/', '.');
+ }
+ if (isReadMethod) {
+ code
+ .addInvokeinterface(
+ callback_type_index,
+ "readObject",
+ "(Ljava/lang/Object;Ljava/lang/String;Ljava/lang/Object;)Ljava/lang/Object;",
+ 4);
+ // checkcast
+ code.addCheckcast(type);
+ } else {
+ code
+ .addInvokeinterface(
+ callback_type_index,
+ "writeObject",
+ "(Ljava/lang/Object;Ljava/lang/String;Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
+ 5);
+ // checkcast
+ code.addCheckcast(type);
+ }
+ } else if (typeName.equals("Z")) {
+ // boolean
+ if (isReadMethod) {
+ code.addInvokeinterface(callback_type_index, "readBoolean",
+ "(Ljava/lang/Object;Ljava/lang/String;Z)Z", 4);
+ } else {
+ code.addInvokeinterface(callback_type_index, "writeBoolean",
+ "(Ljava/lang/Object;Ljava/lang/String;ZZ)Z", 5);
+ }
+ } else if (typeName.equals("B")) {
+ // byte
+ if (isReadMethod) {
+ code.addInvokeinterface(callback_type_index, "readByte",
+ "(Ljava/lang/Object;Ljava/lang/String;B)B", 4);
+ } else {
+ code.addInvokeinterface(callback_type_index, "writeByte",
+ "(Ljava/lang/Object;Ljava/lang/String;BB)B", 5);
+ }
+ } else if (typeName.equals("C")) {
+ // char
+ if (isReadMethod) {
+ code.addInvokeinterface(callback_type_index, "readChar",
+ "(Ljava/lang/Object;Ljava/lang/String;C)C", 4);
+ } else {
+ code.addInvokeinterface(callback_type_index, "writeChar",
+ "(Ljava/lang/Object;Ljava/lang/String;CC)C", 5);
+ }
+ } else if (typeName.equals("I")) {
+ // int
+ if (isReadMethod) {
+ code.addInvokeinterface(callback_type_index, "readInt",
+ "(Ljava/lang/Object;Ljava/lang/String;I)I", 4);
+ } else {
+ code.addInvokeinterface(callback_type_index, "writeInt",
+ "(Ljava/lang/Object;Ljava/lang/String;II)I", 5);
+ }
+ } else if (typeName.equals("S")) {
+ // short
+ if (isReadMethod) {
+ code.addInvokeinterface(callback_type_index, "readShort",
+ "(Ljava/lang/Object;Ljava/lang/String;S)S", 4);
+ } else {
+ code.addInvokeinterface(callback_type_index, "writeShort",
+ "(Ljava/lang/Object;Ljava/lang/String;SS)S", 5);
+ }
+ } else if (typeName.equals("D")) {
+ // double
+ if (isReadMethod) {
+ code.addInvokeinterface(callback_type_index, "readDouble",
+ "(Ljava/lang/Object;Ljava/lang/String;D)D", 5);
+ } else {
+ code.addInvokeinterface(callback_type_index, "writeDouble",
+ "(Ljava/lang/Object;Ljava/lang/String;DD)D", 7);
+ }
+ } else if (typeName.equals("F")) {
+ // float
+ if (isReadMethod) {
+ code.addInvokeinterface(callback_type_index, "readFloat",
+ "(Ljava/lang/Object;Ljava/lang/String;F)F", 4);
+ } else {
+ code.addInvokeinterface(callback_type_index, "writeFloat",
+ "(Ljava/lang/Object;Ljava/lang/String;FF)F", 5);
+ }
+ } else if (typeName.equals("J")) {
+ // long
+ if (isReadMethod) {
+ code.addInvokeinterface(callback_type_index, "readLong",
+ "(Ljava/lang/Object;Ljava/lang/String;J)J", 5);
+ } else {
+ code.addInvokeinterface(callback_type_index, "writeLong",
+ "(Ljava/lang/Object;Ljava/lang/String;JJ)J", 7);
+ }
+ } else {
+ // bad type
+ throw new RuntimeException("bad type: " + typeName);
+ }
+ }
+
+ private static void addTypeDependDataLoad(Bytecode code, String typeName,
+ int i) {
+ if ((typeName.charAt(0) == 'L')
+ && (typeName.charAt(typeName.length() - 1) == ';')
+ || (typeName.charAt(0) == '[')) {
+ // reference type
+ code.addAload(i);
+ } else if (typeName.equals("Z") || typeName.equals("B")
+ || typeName.equals("C") || typeName.equals("I")
+ || typeName.equals("S")) {
+ // boolean, byte, char, int, short
+ code.addIload(i);
+ } else if (typeName.equals("D")) {
+ // double
+ code.addDload(i);
+ } else if (typeName.equals("F")) {
+ // float
+ code.addFload(i);
+ } else if (typeName.equals("J")) {
+ // long
+ code.addLload(i);
+ } else {
+ // bad type
+ throw new RuntimeException("bad type: " + typeName);
+ }
+ }
+
+ private static void addTypeDependDataStore(Bytecode code, String typeName,
+ int i) {
+ if ((typeName.charAt(0) == 'L')
+ && (typeName.charAt(typeName.length() - 1) == ';')
+ || (typeName.charAt(0) == '[')) {
+ // reference type
+ code.addAstore(i);
+ } else if (typeName.equals("Z") || typeName.equals("B")
+ || typeName.equals("C") || typeName.equals("I")
+ || typeName.equals("S")) {
+ // boolean, byte, char, int, short
+ code.addIstore(i);
+ } else if (typeName.equals("D")) {
+ // double
+ code.addDstore(i);
+ } else if (typeName.equals("F")) {
+ // float
+ code.addFstore(i);
+ } else if (typeName.equals("J")) {
+ // long
+ code.addLstore(i);
+ } else {
+ // bad type
+ throw new RuntimeException("bad type: " + typeName);
+ }
+ }
+
+ private static void addTypeDependDataReturn(Bytecode code, String typeName) {
+ if ((typeName.charAt(0) == 'L')
+ && (typeName.charAt(typeName.length() - 1) == ';')
+ || (typeName.charAt(0) == '[')) {
+ // reference type
+ code.addOpcode(Opcode.ARETURN);
+ } else if (typeName.equals("Z") || typeName.equals("B")
+ || typeName.equals("C") || typeName.equals("I")
+ || typeName.equals("S")) {
+ // boolean, byte, char, int, short
+ code.addOpcode(Opcode.IRETURN);
+ } else if (typeName.equals("D")) {
+ // double
+ code.addOpcode(Opcode.DRETURN);
+ } else if (typeName.equals("F")) {
+ // float
+ code.addOpcode(Opcode.FRETURN);
+ } else if (typeName.equals("J")) {
+ // long
+ code.addOpcode(Opcode.LRETURN);
+ } else {
+ // bad type
+ throw new RuntimeException("bad type: " + typeName);
+ }
+ }
+
+}
diff --git a/src/org/hibernate/bytecode/javassist/InstantiationOptimizerAdapter.java b/src/org/hibernate/bytecode/javassist/InstantiationOptimizerAdapter.java
new file mode 100644
index 0000000000..a8c59be5b9
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/InstantiationOptimizerAdapter.java
@@ -0,0 +1,32 @@
+package org.hibernate.bytecode.javassist;
+
+import org.hibernate.bytecode.ReflectionOptimizer;
+import org.hibernate.InstantiationException;
+
+import java.io.Serializable;
+
+/**
+ * The {@link ReflectionOptimizer.InstantiationOptimizer} implementation for Javassist
+ * which simply acts as an adpater to the {@link FastClass} class.
+ *
+ * @author Steve Ebersole
+ */
+public class InstantiationOptimizerAdapter implements ReflectionOptimizer.InstantiationOptimizer, Serializable {
+ private final FastClass fastClass;
+
+ public InstantiationOptimizerAdapter(FastClass fastClass) {
+ this.fastClass = fastClass;
+ }
+
+ public Object newInstance() {
+ try {
+ return fastClass.newInstance();
+ }
+ catch ( Throwable t ) {
+ throw new InstantiationException(
+ "Could not instantiate entity with Javassist optimizer: ",
+ fastClass.getJavaClass(), t
+ );
+ }
+ }
+}
diff --git a/src/org/hibernate/bytecode/javassist/JavassistClassTransformer.java b/src/org/hibernate/bytecode/javassist/JavassistClassTransformer.java
new file mode 100644
index 0000000000..95f77747c8
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/JavassistClassTransformer.java
@@ -0,0 +1,111 @@
+//$Id: $
+package org.hibernate.bytecode.javassist;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.security.ProtectionDomain;
+
+import javassist.bytecode.ClassFile;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.HibernateException;
+import org.hibernate.bytecode.AbstractClassTransformerImpl;
+import org.hibernate.bytecode.util.ClassFilter;
+
+/**
+ * Enhance the classes allowing them to implements InterceptFieldEnabled
+ * This interface is then used by Hibernate for some optimizations.
+ *
+ * @author Emmanuel Bernard
+ * @author Steve Ebersole
+ */
+public class JavassistClassTransformer extends AbstractClassTransformerImpl {
+
+ private static Log log = LogFactory.getLog( JavassistClassTransformer.class.getName() );
+
+ public JavassistClassTransformer(ClassFilter classFilter, org.hibernate.bytecode.util.FieldFilter fieldFilter) {
+ super( classFilter, fieldFilter );
+ }
+
+ protected byte[] doTransform(
+ ClassLoader loader,
+ String className,
+ Class classBeingRedefined,
+ ProtectionDomain protectionDomain,
+ byte[] classfileBuffer) {
+ ClassFile classfile;
+ try {
+ // WARNING: classfile only
+ classfile = new ClassFile( new DataInputStream( new ByteArrayInputStream( classfileBuffer ) ) );
+ }
+ catch (IOException e) {
+ log.error( "Unable to build enhancement metamodel for " + className );
+ return classfileBuffer;
+ }
+ FieldTransformer transformer = getFieldTransformer( classfile );
+ if ( transformer != null ) {
+ if ( log.isDebugEnabled() ) {
+ log.debug( "Enhancing " + className );
+ }
+ DataOutputStream out = null;
+ try {
+ transformer.transform( classfile );
+ ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
+ out = new DataOutputStream( byteStream );
+ classfile.write( out );
+ return byteStream.toByteArray();
+ }
+ catch (Exception e) {
+ log.error( "Unable to transform class", e );
+ throw new HibernateException( "Unable to transform class: " + e.getMessage() );
+ }
+ finally {
+ try {
+ if ( out != null ) out.close();
+ }
+ catch (IOException e) {
+ //swallow
+ }
+ }
+ }
+ return classfileBuffer;
+ }
+
+ protected FieldTransformer getFieldTransformer(final ClassFile classfile) {
+ if ( alreadyInstrumented( classfile ) ) {
+ return null;
+ }
+ return new FieldTransformer(
+ new FieldFilter() {
+ public boolean handleRead(String desc, String name) {
+ return fieldFilter.shouldInstrumentField( classfile.getName(), name );
+ }
+
+ public boolean handleWrite(String desc, String name) {
+ return fieldFilter.shouldInstrumentField( classfile.getName(), name );
+ }
+
+ public boolean handleReadAccess(String fieldOwnerClassName, String fieldName) {
+ return fieldFilter.shouldTransformFieldAccess( classfile.getName(), fieldOwnerClassName, fieldName );
+ }
+
+ public boolean handleWriteAccess(String fieldOwnerClassName, String fieldName) {
+ return fieldFilter.shouldTransformFieldAccess( classfile.getName(), fieldOwnerClassName, fieldName );
+ }
+ }
+ );
+ }
+
+ private boolean alreadyInstrumented(ClassFile classfile) {
+ String[] intfs = classfile.getInterfaces();
+ for ( int i = 0; i < intfs.length; i++ ) {
+ if ( FieldHandled.class.getName().equals( intfs[i] ) ) {
+ return true;
+ }
+ }
+ return false;
+ }
+}
diff --git a/src/org/hibernate/bytecode/javassist/ProxyFactoryFactoryImpl.java b/src/org/hibernate/bytecode/javassist/ProxyFactoryFactoryImpl.java
new file mode 100644
index 0000000000..13c4ff53a4
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/ProxyFactoryFactoryImpl.java
@@ -0,0 +1,123 @@
+package org.hibernate.bytecode.javassist;
+
+import org.hibernate.bytecode.ProxyFactoryFactory;
+import org.hibernate.bytecode.BasicProxyFactory;
+import org.hibernate.proxy.ProxyFactory;
+import org.hibernate.proxy.pojo.javassist.JavassistProxyFactory;
+import org.hibernate.AssertionFailure;
+import org.hibernate.HibernateException;
+import javassist.util.proxy.MethodFilter;
+import javassist.util.proxy.ProxyObject;
+import javassist.util.proxy.MethodHandler;
+
+import java.lang.reflect.Method;
+import java.util.HashMap;
+
+/**
+ * A factory for Javassist-based {@link ProxyFactory} instances.
+ *
+ * @author Steve Ebersole
+ */
+public class ProxyFactoryFactoryImpl implements ProxyFactoryFactory {
+
+ /**
+ * Builds a Javassist-based proxy factory.
+ *
+ * @return a new Javassist-based proxy factory.
+ */
+ public ProxyFactory buildProxyFactory() {
+ return new JavassistProxyFactory();
+ }
+
+ public BasicProxyFactory buildBasicProxyFactory(Class superClass, Class[] interfaces) {
+ return new BasicProxyFactoryImpl( superClass, interfaces );
+ }
+
+ private static class BasicProxyFactoryImpl implements BasicProxyFactory {
+ private final Class proxyClass;
+
+ public BasicProxyFactoryImpl(Class superClass, Class[] interfaces) {
+ if ( superClass == null && ( interfaces == null || interfaces.length < 1 ) ) {
+ throw new AssertionFailure( "attempting to build proxy without any superclass or interfaces" );
+ }
+ javassist.util.proxy.ProxyFactory factory = new javassist.util.proxy.ProxyFactory();
+ factory.setFilter( FINALIZE_FILTER );
+ if ( superClass != null ) {
+ factory.setSuperclass( superClass );
+ }
+ if ( interfaces != null && interfaces.length > 0 ) {
+ factory.setInterfaces( interfaces );
+ }
+ proxyClass = factory.createClass();
+ }
+
+ public Object getProxy() {
+ try {
+ ProxyObject proxy = ( ProxyObject ) proxyClass.newInstance();
+ proxy.setHandler( new PassThroughHandler( proxy, proxyClass.getName() ) );
+ return proxy;
+ }
+ catch ( Throwable t ) {
+ throw new HibernateException( "Unable to instantiated proxy instance" );
+ }
+ }
+
+ public boolean isInstance(Object object) {
+ return proxyClass.isInstance( object );
+ }
+ }
+
+ private static final MethodFilter FINALIZE_FILTER = new MethodFilter() {
+ public boolean isHandled(Method m) {
+ // skip finalize methods
+ return !( m.getParameterTypes().length == 0 && m.getName().equals( "finalize" ) );
+ }
+ };
+
+ private static class PassThroughHandler implements MethodHandler {
+ private HashMap data = new HashMap();
+ private final Object proxiedObject;
+ private final String proxiedClassName;
+
+ public PassThroughHandler(Object proxiedObject, String proxiedClassName) {
+ this.proxiedObject = proxiedObject;
+ this.proxiedClassName = proxiedClassName;
+ }
+
+ public Object invoke(
+ Object object,
+ Method method,
+ Method method1,
+ Object[] args) throws Exception {
+ String name = method.getName();
+ if ( "toString".equals( name ) ) {
+ return proxiedClassName + "@" + System.identityHashCode( object );
+ }
+ else if ( "equals".equals( name ) ) {
+ return proxiedObject == object ? Boolean.TRUE : Boolean.FALSE;
+ }
+ else if ( "hashCode".equals( name ) ) {
+ return new Integer( System.identityHashCode( object ) );
+ }
+ boolean hasGetterSignature = method.getParameterTypes().length == 0 && method.getReturnType() != null;
+ boolean hasSetterSignature = method.getParameterTypes().length == 1 && ( method.getReturnType() == null || method.getReturnType() == void.class );
+ if ( name.startsWith( "get" ) && hasGetterSignature ) {
+ String propName = name.substring( 3 );
+ return data.get( propName );
+ }
+ else if ( name.startsWith( "is" ) && hasGetterSignature ) {
+ String propName = name.substring( 2 );
+ return data.get( propName );
+ }
+ else if ( name.startsWith( "set" ) && hasSetterSignature) {
+ String propName = name.substring( 3 );
+ data.put( propName, args[0] );
+ return null;
+ }
+ else {
+ // todo : what else to do here?
+ return null;
+ }
+ }
+ }
+}
diff --git a/src/org/hibernate/bytecode/javassist/ReflectionOptimizerImpl.java b/src/org/hibernate/bytecode/javassist/ReflectionOptimizerImpl.java
new file mode 100644
index 0000000000..ce553f20e7
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/ReflectionOptimizerImpl.java
@@ -0,0 +1,32 @@
+package org.hibernate.bytecode.javassist;
+
+import org.hibernate.bytecode.ReflectionOptimizer;
+
+import java.io.Serializable;
+
+/**
+ * ReflectionOptimizer implementation for Javassist.
+ *
+ * @author Steve Ebersole
+ */
+public class ReflectionOptimizerImpl implements ReflectionOptimizer, Serializable {
+
+ private final InstantiationOptimizer instantiationOptimizer;
+ private final AccessOptimizer accessOptimizer;
+
+ public ReflectionOptimizerImpl(
+ InstantiationOptimizer instantiationOptimizer,
+ AccessOptimizer accessOptimizer) {
+ this.instantiationOptimizer = instantiationOptimizer;
+ this.accessOptimizer = accessOptimizer;
+ }
+
+ public InstantiationOptimizer getInstantiationOptimizer() {
+ return instantiationOptimizer;
+ }
+
+ public AccessOptimizer getAccessOptimizer() {
+ return accessOptimizer;
+ }
+
+}
diff --git a/src/org/hibernate/bytecode/javassist/TransformingClassLoader.java b/src/org/hibernate/bytecode/javassist/TransformingClassLoader.java
new file mode 100644
index 0000000000..2e373f8e73
--- /dev/null
+++ b/src/org/hibernate/bytecode/javassist/TransformingClassLoader.java
@@ -0,0 +1,57 @@
+package org.hibernate.bytecode.javassist;
+
+import javassist.ClassPool;
+import javassist.NotFoundException;
+import javassist.CtClass;
+import javassist.CannotCompileException;
+import org.hibernate.HibernateException;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+/**
+ * @author Steve Ebersole
+ */
+public class TransformingClassLoader extends ClassLoader {
+ private ClassLoader parent;
+ private ClassPool classPool;
+
+ /*package*/ TransformingClassLoader(ClassLoader parent, String[] classpath) {
+ this.parent = parent;
+ classPool = new ClassPool( true );
+ for ( int i = 0; i < classpath.length; i++ ) {
+ try {
+ classPool.appendClassPath( classpath[i] );
+ }
+ catch ( NotFoundException e ) {
+ throw new HibernateException(
+ "Unable to resolve requested classpath for transformation [" +
+ classpath[i] + "] : " + e.getMessage()
+ );
+ }
+ }
+ }
+
+ protected Class findClass(String name) throws ClassNotFoundException {
+ try {
+ CtClass cc = classPool.get( name );
+ // todo : modify the class definition if not already transformed...
+ byte[] b = cc.toBytecode();
+ return defineClass( name, b, 0, b.length );
+ }
+ catch ( NotFoundException e ) {
+ throw new ClassNotFoundException();
+ }
+ catch ( IOException e ) {
+ throw new ClassNotFoundException();
+ }
+ catch ( CannotCompileException e ) {
+ throw new ClassNotFoundException();
+ }
+ }
+
+ public void release() {
+ classPool = null;
+ parent = null;
+ }
+}
diff --git a/src/org/hibernate/bytecode/package.html b/src/org/hibernate/bytecode/package.html
new file mode 100644
index 0000000000..f9c8121809
--- /dev/null
+++ b/src/org/hibernate/bytecode/package.html
@@ -0,0 +1,38 @@
+
+
+
+
+ This package defines the API for plugging in bytecode libraries
+ for usage by Hibernate. Hibernate uses these bytecode libraries
+ in three scenarios:
+ -
+ Reflection optimization - to speed up the performance of
+ POJO entity and component conctruction and field/property access
+
+ -
+ Proxy generation - runtime building of proxies used for
+ deferred loading of lazy entities
+
+ -
+ Field-level interception - build-time instrumentation of entity
+ classes for the purpose of intercepting field-level access (read/write)
+ for both lazy loading and dirty tracking.
+
+
+
+
+ Currently, both CGLIB and Javassist are supported out-of-the-box.
+
+
+ Note that for field-level interception, simply plugging in a new {@link BytecodeProvider}
+ is not enough for Hibernate to be able to recognize new providers. You would additionally
+ need to make appropriate code changes to the {@link org.hibernate.intercept.Helper}
+ class. This is because the detection of these enhanced classes is needed in a static
+ environment (i.e. outside the scope of any {@link org.hibernate.SessionFactory}.
+
+
+ Note that in the current form the ability to specify a different bytecode provider
+ is actually considered a global settings (global to the JVM).
+
+
+
diff --git a/src/org/hibernate/bytecode/util/BasicClassFilter.java b/src/org/hibernate/bytecode/util/BasicClassFilter.java
new file mode 100644
index 0000000000..64e179aff7
--- /dev/null
+++ b/src/org/hibernate/bytecode/util/BasicClassFilter.java
@@ -0,0 +1,59 @@
+package org.hibernate.bytecode.util;
+
+import java.util.Set;
+import java.util.HashSet;
+
+/**
+ * BasicClassFilter provides class filtering based on a series of packages to
+ * be included and/or a series of explicit class names to be included. If
+ * neither is specified, then no restrictions are applied.
+ *
+ * @author Steve Ebersole
+ */
+public class BasicClassFilter implements ClassFilter {
+ private final String[] includedPackages;
+ private final Set includedClassNames = new HashSet();
+ private final boolean isAllEmpty;
+
+ public BasicClassFilter() {
+ this( null, null );
+ }
+
+ public BasicClassFilter(String[] includedPackages, String[] includedClassNames) {
+ this.includedPackages = includedPackages;
+ if ( includedClassNames != null ) {
+ for ( int i = 0; i < includedClassNames.length; i++ ) {
+ this.includedClassNames.add( includedClassNames[i] );
+ }
+ }
+
+ isAllEmpty = ( this.includedPackages == null || this.includedPackages.length == 0 )
+ && ( this.includedClassNames.isEmpty() );
+ }
+
+ public boolean shouldInstrumentClass(String className) {
+ if ( isAllEmpty ) {
+ return true;
+ }
+ else if ( includedClassNames.contains( className ) ) {
+ return true;
+ }
+ else if ( isInIncludedPackage( className ) ) {
+ return true;
+ }
+ else {
+ return false;
+ }
+ }
+
+ private boolean isInIncludedPackage(String className) {
+ if ( includedPackages != null ) {
+ for ( int i = 0; i < includedPackages.length; i++ ) {
+ if ( className.startsWith( includedPackages[i] ) ) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+}
diff --git a/src/org/hibernate/bytecode/util/ByteCodeHelper.java b/src/org/hibernate/bytecode/util/ByteCodeHelper.java
new file mode 100644
index 0000000000..8ada73f7e9
--- /dev/null
+++ b/src/org/hibernate/bytecode/util/ByteCodeHelper.java
@@ -0,0 +1,78 @@
+package org.hibernate.bytecode.util;
+
+import java.io.InputStream;
+import java.io.IOException;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.BufferedInputStream;
+import java.util.zip.ZipInputStream;
+
+/**
+ * A helper for reading byte code from various input sources.
+ *
+ * @author Steve Ebersole
+ */
+public class ByteCodeHelper {
+ private ByteCodeHelper() {
+ }
+
+ /**
+ * Reads class byte array info from the given input stream.
+ *
+ * The stream is closed within this method!
+ *
+ * @param inputStream
+ * @return
+ * @throws IOException
+ */
+ public static byte[] readByteCode(InputStream inputStream) throws IOException {
+ if ( inputStream == null ) {
+ throw new IOException( "null input stream" );
+ }
+
+ byte[] buffer = new byte[409600];
+ byte[] classBytes = new byte[0];
+ int r = 0;
+
+ try {
+ r = inputStream.read( buffer );
+ while ( r >= buffer.length ) {
+ byte[] temp = new byte[ classBytes.length + buffer.length ];
+ System.arraycopy( classBytes, 0, temp, 0, classBytes.length );
+ System.arraycopy( buffer, 0, temp, classBytes.length, buffer.length );
+ classBytes = temp;
+ }
+ if ( r != -1 ) {
+ byte[] temp = new byte[ classBytes.length + r ];
+ System.arraycopy( classBytes, 0, temp, 0, classBytes.length );
+ System.arraycopy( buffer, 0, temp, classBytes.length, r );
+ classBytes = temp;
+ }
+ }
+ finally {
+ try {
+ inputStream.close();
+ }
+ catch (IOException ignore) {
+ // intentionally empty
+ }
+ }
+
+ return classBytes;
+ }
+
+ public static byte[] readByteCode(File file) throws IOException {
+ return ByteCodeHelper.readByteCode( new FileInputStream( file ) );
+ }
+
+ public static byte[] readByteCode(ZipInputStream zip) throws IOException {
+ ByteArrayOutputStream bout = new ByteArrayOutputStream();
+ InputStream in = new BufferedInputStream( zip );
+ int b;
+ while ( ( b = in.read() ) != -1 ) {
+ bout.write( b );
+ }
+ return bout.toByteArray();
+ }
+}
diff --git a/src/org/hibernate/bytecode/util/ClassDescriptor.java b/src/org/hibernate/bytecode/util/ClassDescriptor.java
new file mode 100644
index 0000000000..5e2601aa4b
--- /dev/null
+++ b/src/org/hibernate/bytecode/util/ClassDescriptor.java
@@ -0,0 +1,30 @@
+package org.hibernate.bytecode.util;
+
+/**
+ * Contract describing the information Hibernate needs in terms of instrumenting
+ * a class, either via ant task or dynamic classloader.
+ *
+ * @author Steve Ebersole
+ */
+public interface ClassDescriptor {
+ /**
+ * The name of the class.
+ *
+ * @return The class name.
+ */
+ public String getName();
+
+ /**
+ * Determine if the class is already instrumented.
+ *
+ * @return True if already instrumented; false otherwise.
+ */
+ public boolean isInstrumented();
+
+ /**
+ * The bytes making up the class' bytecode.
+ *
+ * @return The bytecode bytes.
+ */
+ public byte[] getBytes();
+}
diff --git a/src/org/hibernate/bytecode/util/ClassFilter.java b/src/org/hibernate/bytecode/util/ClassFilter.java
new file mode 100644
index 0000000000..9418fe4bb1
--- /dev/null
+++ b/src/org/hibernate/bytecode/util/ClassFilter.java
@@ -0,0 +1,10 @@
+package org.hibernate.bytecode.util;
+
+/**
+ * Used to determine whether a class should be instrumented.
+ *
+ * @author Steve Ebersole
+ */
+public interface ClassFilter {
+ public boolean shouldInstrumentClass(String className);
+}
diff --git a/src/org/hibernate/bytecode/util/FieldFilter.java b/src/org/hibernate/bytecode/util/FieldFilter.java
new file mode 100644
index 0000000000..6625120b1e
--- /dev/null
+++ b/src/org/hibernate/bytecode/util/FieldFilter.java
@@ -0,0 +1,29 @@
+package org.hibernate.bytecode.util;
+
+/**
+ * Used to determine whether a field reference should be instrumented.
+ *
+ * @author Steve Ebersole
+ */
+public interface FieldFilter {
+ /**
+ * Should this field definition be instrumented?
+ *
+ * @param className The name of the class currently being processed
+ * @param fieldName The name of the field being checked.
+ * @return True if we should instrument this field.
+ */
+ public boolean shouldInstrumentField(String className, String fieldName);
+
+ /**
+ * Should we instrument *access to* the given field. This differs from
+ * {@link #shouldInstrumentField} in that here we are talking about a particular usage of
+ * a field.
+ *
+ * @param transformingClassName The class currently being transformed.
+ * @param fieldOwnerClassName The name of the class owning this field being checked.
+ * @param fieldName The name of the field being checked.
+ * @return True if this access should be transformed.
+ */
+ public boolean shouldTransformFieldAccess(String transformingClassName, String fieldOwnerClassName, String fieldName);
+}
diff --git a/src/org/hibernate/cache/AbstractJndiBoundCacheProvider.java b/src/org/hibernate/cache/AbstractJndiBoundCacheProvider.java
new file mode 100644
index 0000000000..ba013736f1
--- /dev/null
+++ b/src/org/hibernate/cache/AbstractJndiBoundCacheProvider.java
@@ -0,0 +1,86 @@
+// $Id$
+package org.hibernate.cache;
+
+import java.util.Properties;
+
+import javax.naming.Context;
+import javax.naming.InitialContext;
+import javax.naming.NamingException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.cfg.Environment;
+import org.hibernate.util.NamingHelper;
+import org.hibernate.util.StringHelper;
+
+/**
+ * Support for CacheProvider implementations which are backed by caches bound
+ * into JNDI namespace.
+ *
+ * @author Steve Ebersole
+ */
+public abstract class AbstractJndiBoundCacheProvider implements CacheProvider {
+
+ private static final Log log = LogFactory.getLog( AbstractJndiBoundCacheProvider.class );
+ private Object cache;
+
+ protected void prepare(Properties properties) {
+ // Do nothing; subclasses may override.
+ }
+
+ protected void release() {
+ // Do nothing; subclasses may override.
+ }
+
+ /**
+ * Callback to perform any necessary initialization of the underlying cache implementation during SessionFactory
+ * construction.
+ *
+ * @param properties current configuration settings.
+ */
+ public final void start(Properties properties) throws CacheException {
+ String jndiNamespace = properties.getProperty( Environment.CACHE_NAMESPACE );
+ if ( StringHelper.isEmpty( jndiNamespace ) ) {
+ throw new CacheException( "No JNDI namespace specified for cache" );
+ }
+ cache = locateCache( jndiNamespace, NamingHelper.getJndiProperties( properties ) );
+ prepare( properties );
+ }
+
+ /**
+ * Callback to perform any necessary cleanup of the underlying cache
+ * implementation during SessionFactory.close().
+ */
+ public final void stop() {
+ release();
+ cache = null;
+ }
+
+ private Object locateCache(String jndiNamespace, Properties jndiProperties) {
+
+ Context ctx = null;
+ try {
+ ctx = new InitialContext( jndiProperties );
+ return ctx.lookup( jndiNamespace );
+ }
+ catch (NamingException ne) {
+ String msg = "Unable to retreive Cache from JNDI [" + jndiNamespace + "]";
+ log.info( msg, ne );
+ throw new CacheException( msg );
+ }
+ finally {
+ if ( ctx != null ) {
+ try {
+ ctx.close();
+ }
+ catch( NamingException ne ) {
+ log.info( "Unable to release initial context", ne );
+ }
+ }
+ }
+ }
+
+ public Object getCache() {
+ return cache;
+ }
+}
diff --git a/src/org/hibernate/cache/Cache.java b/src/org/hibernate/cache/Cache.java
new file mode 100644
index 0000000000..186d00c674
--- /dev/null
+++ b/src/org/hibernate/cache/Cache.java
@@ -0,0 +1,106 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.Map;
+
+/**
+ * Implementors define a caching algorithm. All implementors
+ * must be threadsafe.
+ */
+public interface Cache {
+ /**
+ * Get an item from the cache
+ * @param key
+ * @return the cached object or null
+ * @throws CacheException
+ */
+ public Object read(Object key) throws CacheException;
+ /**
+ * Get an item from the cache, nontransactionally
+ * @param key
+ * @return the cached object or null
+ * @throws CacheException
+ */
+ public Object get(Object key) throws CacheException;
+ /**
+ * Add an item to the cache, nontransactionally, with
+ * failfast semantics
+ * @param key
+ * @param value
+ * @throws CacheException
+ */
+ public void put(Object key, Object value) throws CacheException;
+ /**
+ * Add an item to the cache
+ * @param key
+ * @param value
+ * @throws CacheException
+ */
+ public void update(Object key, Object value) throws CacheException;
+ /**
+ * Remove an item from the cache
+ */
+ public void remove(Object key) throws CacheException;
+ /**
+ * Clear the cache
+ */
+ public void clear() throws CacheException;
+ /**
+ * Clean up
+ */
+ public void destroy() throws CacheException;
+ /**
+ * If this is a clustered cache, lock the item
+ */
+ public void lock(Object key) throws CacheException;
+ /**
+ * If this is a clustered cache, unlock the item
+ */
+ public void unlock(Object key) throws CacheException;
+ /**
+ * Generate a timestamp
+ */
+ public long nextTimestamp();
+ /**
+ * Get a reasonable "lock timeout"
+ */
+ public int getTimeout();
+
+ /**
+ * Get the name of the cache region
+ */
+ public String getRegionName();
+
+ /**
+ * The number of bytes is this cache region currently consuming in memory.
+ *
+ * @return The number of bytes consumed by this region; -1 if unknown or
+ * unsupported.
+ */
+ public long getSizeInMemory();
+
+ /**
+ * The count of entries currently contained in the regions in-memory store.
+ *
+ * @return The count of entries in memory; -1 if unknown or unsupported.
+ */
+ public long getElementCountInMemory();
+
+ /**
+ * The count of entries currently contained in the regions disk store.
+ *
+ * @return The count of entries on disk; -1 if unknown or unsupported.
+ */
+ public long getElementCountOnDisk();
+
+ /**
+ * optional operation
+ */
+ public Map toMap();
+}
+
+
+
+
+
+
diff --git a/src/org/hibernate/cache/CacheConcurrencyStrategy.java b/src/org/hibernate/cache/CacheConcurrencyStrategy.java
new file mode 100644
index 0000000000..0870b8f50b
--- /dev/null
+++ b/src/org/hibernate/cache/CacheConcurrencyStrategy.java
@@ -0,0 +1,177 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.Comparator;
+
+/**
+ * Implementors manage transactional access to cached data. Transactions
+ * pass in a timestamp indicating transaction start time. Two different
+ * implementation patterns are provided for.
+ * - A transaction-aware cache implementation might be wrapped by a
+ * "synchronous" concurrency strategy, where updates to the cache are written
+ * to the cache inside the transaction.
+ * - A non transaction-aware cache would be wrapped by an "asynchronous"
+ * concurrency strategy, where items are merely "soft locked" during the
+ * transaction and then updated during the "after transaction completion"
+ * phase; the soft lock is not an actual lock on the database row -
+ * only upon the cached representation of the item.
+ *
+ *
+ * In terms of entity caches, the expected call sequences are:
+ * - DELETES : {@link #lock} -> {@link #evict} -> {@link #release}
+ * - UPDATES : {@link #lock} -> {@link #update} -> {@link #afterUpdate}
+ * - INSERTS : {@link #insert} -> {@link #afterInsert}
+ *
+ *
+ * In terms of collection caches, all modification actions actually just
+ * invalidate the entry(s). The call sequence here is:
+ * {@link #lock} -> {@link #evict} -> {@link #release}
+ *
+ * Note that, for an asynchronous cache, cache invalidation must be a two
+ * step process (lock->release, or lock-afterUpdate), since this is the only
+ * way to guarantee consistency with the database for a nontransactional cache
+ * implementation. For a synchronous cache, cache invalidation is a single
+ * step process (evict, or update). Hence, this interface defines a three
+ * step process, to cater for both models.
+ *
+ * Note that query result caching does not go through a concurrency strategy; they
+ * are managed directly against the underlying {@link Cache cache regions}.
+ */
+public interface CacheConcurrencyStrategy {
+
+ /**
+ * Attempt to retrieve an object from the cache. Mainly used in attempting
+ * to resolve entities/collections from the second level cache.
+ *
+ * @param key
+ * @param txTimestamp a timestamp prior to the transaction start time
+ * @return the cached object or null
+ * @throws CacheException
+ */
+ public Object get(Object key, long txTimestamp) throws CacheException;
+
+ /**
+ * Attempt to cache an object, after loading from the database.
+ *
+ * @param key
+ * @param value
+ * @param txTimestamp a timestamp prior to the transaction start time
+ * @param version the item version number
+ * @param versionComparator a comparator used to compare version numbers
+ * @param minimalPut indicates that the cache should avoid a put is the item is already cached
+ * @return true if the object was successfully cached
+ * @throws CacheException
+ */
+ public boolean put(
+ Object key,
+ Object value,
+ long txTimestamp,
+ Object version,
+ Comparator versionComparator,
+ boolean minimalPut)
+ throws CacheException;
+
+ /**
+ * We are going to attempt to update/delete the keyed object. This
+ * method is used by "asynchronous" concurrency strategies.
+ *
+ * The returned object must be passed back to release(), to release the
+ * lock. Concurrency strategies which do not support client-visible
+ * locks may silently return null.
+ *
+ * @param key
+ * @param version
+ * @throws CacheException
+ */
+ public SoftLock lock(Object key, Object version) throws CacheException;
+
+ /**
+ * Called after an item has become stale (before the transaction completes).
+ * This method is used by "synchronous" concurrency strategies.
+ */
+ public void evict(Object key) throws CacheException;
+
+ /**
+ * Called after an item has been updated (before the transaction completes),
+ * instead of calling evict().
+ * This method is used by "synchronous" concurrency strategies.
+ */
+ public boolean update(Object key, Object value, Object currentVersion, Object previousVersion) throws CacheException;
+
+ /**
+ * Called after an item has been inserted (before the transaction completes),
+ * instead of calling evict().
+ * This method is used by "synchronous" concurrency strategies.
+ */
+ public boolean insert(Object key, Object value, Object currentVersion) throws CacheException;
+
+
+ /**
+ * Called when we have finished the attempted update/delete (which may or
+ * may not have been successful), after transaction completion.
+ * This method is used by "asynchronous" concurrency strategies.
+ * @param key
+ * @throws CacheException
+ */
+ public void release(Object key, SoftLock lock) throws CacheException;
+ /**
+ * Called after an item has been updated (after the transaction completes),
+ * instead of calling release().
+ * This method is used by "asynchronous" concurrency strategies.
+ */
+ public boolean afterUpdate(Object key, Object value, Object version, SoftLock lock)
+ throws CacheException;
+ /**
+ * Called after an item has been inserted (after the transaction completes),
+ * instead of calling release().
+ * This method is used by "asynchronous" concurrency strategies.
+ */
+ public boolean afterInsert(Object key, Object value, Object version)
+ throws CacheException;
+
+
+ /**
+ * Evict an item from the cache immediately (without regard for transaction
+ * isolation).
+ * @param key
+ * @throws CacheException
+ */
+ public void remove(Object key) throws CacheException;
+ /**
+ * Evict all items from the cache immediately.
+ * @throws CacheException
+ */
+ public void clear() throws CacheException;
+ /**
+ * Clean up all resources.
+ */
+ public void destroy();
+ /**
+ * Set the underlying cache implementation.
+ * @param cache
+ */
+ public void setCache(Cache cache);
+
+ /**
+ * Marker interface, denoting a client-visible "soft lock"
+ * on a cached item.
+ * @author Gavin King
+ */
+ public static interface SoftLock {}
+
+ /**
+ * Get the cache region name
+ */
+ public String getRegionName();
+
+ /**
+ * Get the wrapped cache implementation
+ */
+ public Cache getCache();
+}
+
+
+
+
+
+
diff --git a/src/org/hibernate/cache/CacheException.java b/src/org/hibernate/cache/CacheException.java
new file mode 100644
index 0000000000..3540974b9b
--- /dev/null
+++ b/src/org/hibernate/cache/CacheException.java
@@ -0,0 +1,23 @@
+//$Id$
+package org.hibernate.cache;
+
+import org.hibernate.HibernateException;
+
+/**
+ * Something went wrong in the cache
+ */
+public class CacheException extends HibernateException {
+
+ public CacheException(String s) {
+ super(s);
+ }
+
+ public CacheException(String s, Throwable e) {
+ super(s, e);
+ }
+
+ public CacheException(Throwable e) {
+ super(e);
+ }
+
+}
diff --git a/src/org/hibernate/cache/CacheFactory.java b/src/org/hibernate/cache/CacheFactory.java
new file mode 100644
index 0000000000..52bf67236a
--- /dev/null
+++ b/src/org/hibernate/cache/CacheFactory.java
@@ -0,0 +1,71 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.Properties;
+
+import org.hibernate.HibernateException;
+import org.hibernate.MappingException;
+import org.hibernate.cfg.Settings;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * @author Gavin King
+ */
+public final class CacheFactory {
+
+ private static final Log log = LogFactory.getLog(CacheFactory.class);
+
+ private CacheFactory() {}
+
+ public static final String READ_ONLY = "read-only";
+ public static final String READ_WRITE = "read-write";
+ public static final String NONSTRICT_READ_WRITE = "nonstrict-read-write";
+ public static final String TRANSACTIONAL = "transactional";
+
+ public static CacheConcurrencyStrategy createCache(
+ final String concurrencyStrategy,
+ String regionName,
+ final boolean mutable,
+ final Settings settings,
+ final Properties properties)
+ throws HibernateException {
+
+ if ( concurrencyStrategy==null || !settings.isSecondLevelCacheEnabled() ) return null; //no cache
+
+ String prefix = settings.getCacheRegionPrefix();
+ if ( prefix!=null ) regionName = prefix + '.' + regionName;
+
+ if ( log.isDebugEnabled() ) log.debug("instantiating cache region: " + regionName + " usage strategy: " + concurrencyStrategy);
+
+ final CacheConcurrencyStrategy ccs;
+ if ( concurrencyStrategy.equals(READ_ONLY) ) {
+ if (mutable) log.warn( "read-only cache configured for mutable class: " + regionName );
+ ccs = new ReadOnlyCache();
+ }
+ else if ( concurrencyStrategy.equals(READ_WRITE) ) {
+ ccs = new ReadWriteCache();
+ }
+ else if ( concurrencyStrategy.equals(NONSTRICT_READ_WRITE) ) {
+ ccs = new NonstrictReadWriteCache();
+ }
+ else if ( concurrencyStrategy.equals(TRANSACTIONAL) ) {
+ ccs = new TransactionalCache();
+ }
+ else {
+ throw new MappingException("cache usage attribute should be read-write, read-only, nonstrict-read-write or transactional");
+ }
+
+ final Cache impl;
+ try {
+ impl = settings.getCacheProvider().buildCache(regionName, properties);
+ }
+ catch (CacheException e) {
+ throw new HibernateException( "Could not instantiate cache implementation", e );
+ }
+ ccs.setCache(impl);
+
+ return ccs;
+ }
+
+}
diff --git a/src/org/hibernate/cache/CacheKey.java b/src/org/hibernate/cache/CacheKey.java
new file mode 100755
index 0000000000..274915487d
--- /dev/null
+++ b/src/org/hibernate/cache/CacheKey.java
@@ -0,0 +1,72 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.io.Serializable;
+
+import org.hibernate.EntityMode;
+import org.hibernate.engine.SessionFactoryImplementor;
+import org.hibernate.type.Type;
+
+/**
+ * Allows multiple entity classes / collection roles to be
+ * stored in the same cache region. Also allows for composite
+ * keys which do not properly implement equals()/hashCode().
+ *
+ * @author Gavin King
+ */
+public class CacheKey implements Serializable {
+ private final Serializable key;
+ private final Type type;
+ private final String entityOrRoleName;
+ private final EntityMode entityMode;
+ private final int hashCode;
+
+ /**
+ * Construct a new key for a collection or entity instance.
+ * Note that an entity name should always be the root entity
+ * name, not a subclass entity name.
+ *
+ * @param id The identifier associated with the cached data
+ * @param type The Hibernate type mapping
+ * @param entityOrRoleName The entity or collection-role name.
+ * @param entityMode The entiyt mode of the originating session
+ * @param factory The session factory for which we are caching
+ */
+ public CacheKey(
+ final Serializable id,
+ final Type type,
+ final String entityOrRoleName,
+ final EntityMode entityMode,
+ final SessionFactoryImplementor factory) {
+ this.key = id;
+ this.type = type;
+ this.entityOrRoleName = entityOrRoleName;
+ this.entityMode = entityMode;
+ hashCode = type.getHashCode( key, entityMode, factory );
+ }
+
+ //Mainly for OSCache
+ public String toString() {
+ return entityOrRoleName + '#' + key.toString();//"CacheKey#" + type.toString(key, sf);
+ }
+
+ public boolean equals(Object other) {
+ if ( !(other instanceof CacheKey) ) return false;
+ CacheKey that = (CacheKey) other;
+ return entityOrRoleName.equals( that.entityOrRoleName )
+ && type.isEqual( key, that.key, entityMode );
+ }
+
+ public int hashCode() {
+ return hashCode;
+ }
+
+ public Serializable getKey() {
+ return key;
+ }
+
+ public String getEntityOrRoleName() {
+ return entityOrRoleName;
+ }
+
+}
diff --git a/src/org/hibernate/cache/CacheProvider.java b/src/org/hibernate/cache/CacheProvider.java
new file mode 100644
index 0000000000..bbfb7def54
--- /dev/null
+++ b/src/org/hibernate/cache/CacheProvider.java
@@ -0,0 +1,43 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.Properties;
+
+/**
+ * Support for pluggable caches.
+ *
+ * @author Gavin King
+ */
+public interface CacheProvider {
+
+ /**
+ * Configure the cache
+ *
+ * @param regionName the name of the cache region
+ * @param properties configuration settings
+ * @throws CacheException
+ */
+ public Cache buildCache(String regionName, Properties properties) throws CacheException;
+
+ /**
+ * Generate a timestamp
+ */
+ public long nextTimestamp();
+
+ /**
+ * Callback to perform any necessary initialization of the underlying cache implementation
+ * during SessionFactory construction.
+ *
+ * @param properties current configuration settings.
+ */
+ public void start(Properties properties) throws CacheException;
+
+ /**
+ * Callback to perform any necessary cleanup of the underlying cache implementation
+ * during SessionFactory.close().
+ */
+ public void stop();
+
+ public boolean isMinimalPutsEnabledByDefault();
+
+}
diff --git a/src/org/hibernate/cache/EhCache.java b/src/org/hibernate/cache/EhCache.java
new file mode 100644
index 0000000000..981741810c
--- /dev/null
+++ b/src/org/hibernate/cache/EhCache.java
@@ -0,0 +1,275 @@
+//$Id$
+/**
+ * Copyright 2003-2006 Greg Luck, Jboss Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.hibernate.cache;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import net.sf.ehcache.CacheManager;
+import net.sf.ehcache.Element;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * EHCache plugin for Hibernate
+ *
+ * EHCache uses a {@link net.sf.ehcache.store.MemoryStore} and a
+ * {@link net.sf.ehcache.store.DiskStore}.
+ * The {@link net.sf.ehcache.store.DiskStore} requires that both keys and values be {@link java.io.Serializable}.
+ * However the MemoryStore does not and in ehcache-1.2 nonSerializable Objects are permitted. They are discarded
+ * if an attempt it made to overflow them to Disk or to replicate them to remote cache peers.
+ *
+ * @author Greg Luck
+ * @author Emmanuel Bernard
+ */
+public class EhCache implements Cache {
+ private static final Log log = LogFactory.getLog( EhCache.class );
+
+ private static final int SIXTY_THOUSAND_MS = 60000;
+
+ private net.sf.ehcache.Cache cache;
+
+ /**
+ * Creates a new Hibernate pluggable cache based on a cache name.
+ *
+ *
+ * @param cache The underlying EhCache instance to use.
+ */
+ public EhCache(net.sf.ehcache.Cache cache) {
+ this.cache = cache;
+ }
+
+ /**
+ * Gets a value of an element which matches the given key.
+ *
+ * @param key the key of the element to return.
+ * @return The value placed into the cache with an earlier put, or null if not found or expired
+ * @throws CacheException
+ */
+ public Object get(Object key) throws CacheException {
+ try {
+ if ( log.isDebugEnabled() ) {
+ log.debug( "key: " + key );
+ }
+ if ( key == null ) {
+ return null;
+ }
+ else {
+ Element element = cache.get( key );
+ if ( element == null ) {
+ if ( log.isDebugEnabled() ) {
+ log.debug( "Element for " + key + " is null" );
+ }
+ return null;
+ }
+ else {
+ return element.getObjectValue();
+ }
+ }
+ }
+ catch (net.sf.ehcache.CacheException e) {
+ throw new CacheException( e );
+ }
+ }
+
+ public Object read(Object key) throws CacheException {
+ return get( key );
+ }
+
+
+ /**
+ * Puts an object into the cache.
+ *
+ * @param key a key
+ * @param value a value
+ * @throws CacheException if the {@link CacheManager}
+ * is shutdown or another {@link Exception} occurs.
+ */
+ public void update(Object key, Object value) throws CacheException {
+ put( key, value );
+ }
+
+ /**
+ * Puts an object into the cache.
+ *
+ * @param key a key
+ * @param value a value
+ * @throws CacheException if the {@link CacheManager}
+ * is shutdown or another {@link Exception} occurs.
+ */
+ public void put(Object key, Object value) throws CacheException {
+ try {
+ Element element = new Element( key, value );
+ cache.put( element );
+ }
+ catch (IllegalArgumentException e) {
+ throw new CacheException( e );
+ }
+ catch (IllegalStateException e) {
+ throw new CacheException( e );
+ }
+ catch (net.sf.ehcache.CacheException e) {
+ throw new CacheException( e );
+ }
+
+ }
+
+ /**
+ * Removes the element which matches the key.
+ *
+ * If no element matches, nothing is removed and no Exception is thrown.
+ *
+ * @param key the key of the element to remove
+ * @throws CacheException
+ */
+ public void remove(Object key) throws CacheException {
+ try {
+ cache.remove( key );
+ }
+ catch (ClassCastException e) {
+ throw new CacheException( e );
+ }
+ catch (IllegalStateException e) {
+ throw new CacheException( e );
+ }
+ catch (net.sf.ehcache.CacheException e) {
+ throw new CacheException( e );
+ }
+ }
+
+ /**
+ * Remove all elements in the cache, but leave the cache
+ * in a useable state.
+ *
+ * @throws CacheException
+ */
+ public void clear() throws CacheException {
+ try {
+ cache.removeAll();
+ }
+ catch (IllegalStateException e) {
+ throw new CacheException( e );
+ }
+ catch (net.sf.ehcache.CacheException e) {
+ throw new CacheException( e );
+ }
+ }
+
+ /**
+ * Remove the cache and make it unuseable.
+ *
+ * @throws CacheException
+ */
+ public void destroy() throws CacheException {
+ try {
+ cache.getCacheManager().removeCache( cache.getName() );
+ }
+ catch (IllegalStateException e) {
+ throw new CacheException( e );
+ }
+ catch (net.sf.ehcache.CacheException e) {
+ throw new CacheException( e );
+ }
+ }
+
+ /**
+ * Calls to this method should perform there own synchronization.
+ * It is provided for distributed caches. Because EHCache is not distributed
+ * this method does nothing.
+ */
+ public void lock(Object key) throws CacheException {
+ }
+
+ /**
+ * Calls to this method should perform there own synchronization.
+ * It is provided for distributed caches. Because EHCache is not distributed
+ * this method does nothing.
+ */
+ public void unlock(Object key) throws CacheException {
+ }
+
+ /**
+ * Gets the next timestamp;
+ */
+ public long nextTimestamp() {
+ return Timestamper.next();
+ }
+
+ /**
+ * Returns the lock timeout for this cache.
+ */
+ public int getTimeout() {
+ // 60 second lock timeout
+ return Timestamper.ONE_MS * SIXTY_THOUSAND_MS;
+ }
+
+ public String getRegionName() {
+ return cache.getName();
+ }
+
+ /**
+ * Warning: This method can be very expensive to run. Allow approximately 1 second
+ * per 1MB of entries. Running this method could create liveness problems
+ * because the object lock is held for a long period
+ *
+ *
+ * @return the approximate size of memory ehcache is using for the MemoryStore for this cache
+ */
+ public long getSizeInMemory() {
+ try {
+ return cache.calculateInMemorySize();
+ }
+ catch (Throwable t) {
+ return -1;
+ }
+ }
+
+ public long getElementCountInMemory() {
+ try {
+ return cache.getMemoryStoreSize();
+ }
+ catch (net.sf.ehcache.CacheException ce) {
+ throw new CacheException( ce );
+ }
+ }
+
+ public long getElementCountOnDisk() {
+ return cache.getDiskStoreSize();
+ }
+
+ public Map toMap() {
+ try {
+ Map result = new HashMap();
+ Iterator iter = cache.getKeys().iterator();
+ while ( iter.hasNext() ) {
+ Object key = iter.next();
+ result.put( key, cache.get( key ).getObjectValue() );
+ }
+ return result;
+ }
+ catch (Exception e) {
+ throw new CacheException( e );
+ }
+ }
+
+ public String toString() {
+ return "EHCache(" + getRegionName() + ')';
+ }
+
+}
\ No newline at end of file
diff --git a/src/org/hibernate/cache/EhCacheProvider.java b/src/org/hibernate/cache/EhCacheProvider.java
new file mode 100644
index 0000000000..7258fa98f9
--- /dev/null
+++ b/src/org/hibernate/cache/EhCacheProvider.java
@@ -0,0 +1,167 @@
+//$Id$
+/**
+ * Copyright 2003-2006 Greg Luck, Jboss Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.hibernate.cache;
+
+import java.util.Properties;
+import java.net.URL;
+
+import net.sf.ehcache.CacheManager;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.cfg.Environment;
+import org.hibernate.util.StringHelper;
+import org.hibernate.util.ConfigHelper;
+
+/**
+ * Cache Provider plugin for Hibernate
+ *
+ * Use hibernate.cache.provider_class=org.hibernate.cache.EhCacheProvider
+ * in Hibernate 3.x or later
+ *
+ * Taken from EhCache 0.9 distribution
+ * @author Greg Luck
+ * @author Emmanuel Bernard
+ */
+/**
+ * Cache Provider plugin for ehcache-1.2. New in this provider are ehcache support for multiple
+ * Hibernate session factories, each with its own ehcache configuration, and non Serializable keys and values.
+ * Ehcache-1.2 also has many other features such as cluster support and listeners, which can be used seamlessly simply
+ * by configurion in ehcache.xml.
+ *
+ * Use hibernate.cache.provider_class=org.hibernate.cache.EhCacheProvider
in the Hibernate configuration
+ * to enable this provider for Hibernate's second level cache.
+ *
+ * When configuring multiple ehcache CacheManagers, as you would where you have multiple Hibernate Configurations and
+ * multiple SessionFactories, specify in each Hibernate configuration the ehcache configuration using
+ * the property hibernate.cache.provider_configuration_file_resource_path
An example to set an ehcache configuration
+ * called ehcache-2.xml would be hibernate.cache.provider_configuration_file_resource_path=/ehcache-2.xml
. If the leading
+ * slash is not there one will be added. The configuration file will be looked for in the root of the classpath.
+ *
+ * Updated for ehcache-1.2. Note this provider requires ehcache-1.2.jar. Make sure ehcache-1.1.jar or earlier
+ * is not in the classpath or it will not work.
+ *
+ * See http://ehcache.sf.net for documentation on ehcache
+ *
+ *
+ * @author Greg Luck
+ * @author Emmanuel Bernard
+ */
+public class EhCacheProvider implements CacheProvider {
+
+ private static final Log log = LogFactory.getLog(EhCacheProvider.class);
+
+ private CacheManager manager;
+
+ /**
+ * Builds a Cache.
+ *
+ * Even though this method provides properties, they are not used.
+ * Properties for EHCache are specified in the ehcache.xml file.
+ * Configuration will be read from ehcache.xml for a cache declaration
+ * where the name attribute matches the name parameter in this builder.
+ *
+ * @param name the name of the cache. Must match a cache configured in ehcache.xml
+ * @param properties not used
+ * @return a newly built cache will be built and initialised
+ * @throws CacheException inter alia, if a cache of the same name already exists
+ */
+ public Cache buildCache(String name, Properties properties) throws CacheException {
+ try {
+ net.sf.ehcache.Cache cache = manager.getCache(name);
+ if (cache == null) {
+ log.warn("Could not find configuration [" + name + "]; using defaults.");
+ manager.addCache(name);
+ cache = manager.getCache(name);
+ log.debug("started EHCache region: " + name);
+ }
+ return new EhCache(cache);
+ }
+ catch (net.sf.ehcache.CacheException e) {
+ throw new CacheException(e);
+ }
+ }
+
+ /**
+ * Returns the next timestamp.
+ */
+ public long nextTimestamp() {
+ return Timestamper.next();
+ }
+
+ /**
+ * Callback to perform any necessary initialization of the underlying cache implementation
+ * during SessionFactory construction.
+ *
+ * @param properties current configuration settings.
+ */
+ public void start(Properties properties) throws CacheException {
+ if (manager != null) {
+ log.warn("Attempt to restart an already started EhCacheProvider. Use sessionFactory.close() " +
+ " between repeated calls to buildSessionFactory. Using previously created EhCacheProvider." +
+ " If this behaviour is required, consider using net.sf.ehcache.hibernate.SingletonEhCacheProvider.");
+ return;
+ }
+ try {
+ String configurationResourceName = null;
+ if (properties != null) {
+ configurationResourceName = (String) properties.get( Environment.CACHE_PROVIDER_CONFIG );
+ }
+ if ( StringHelper.isEmpty( configurationResourceName ) ) {
+ manager = new CacheManager();
+ } else {
+ URL url = loadResource(configurationResourceName);
+ manager = new CacheManager(url);
+ }
+ } catch (net.sf.ehcache.CacheException e) {
+ //yukky! Don't you have subclasses for that!
+ //TODO race conditions can happen here
+ if (e.getMessage().startsWith("Cannot parseConfiguration CacheManager. Attempt to create a new instance of " +
+ "CacheManager using the diskStorePath")) {
+ throw new CacheException("Attempt to restart an already started EhCacheProvider. Use sessionFactory.close() " +
+ " between repeated calls to buildSessionFactory. Consider using net.sf.ehcache.hibernate.SingletonEhCacheProvider."
+ , e );
+ } else {
+ throw e;
+ }
+ }
+ }
+
+ private URL loadResource(String configurationResourceName) {
+ URL url = ConfigHelper.locateConfig( configurationResourceName );
+ if (log.isDebugEnabled()) {
+ log.debug("Creating EhCacheProvider from a specified resource: "
+ + configurationResourceName + " Resolved to URL: " + url);
+ }
+ return url;
+ }
+
+ /**
+ * Callback to perform any necessary cleanup of the underlying cache implementation
+ * during SessionFactory.close().
+ */
+ public void stop() {
+ if (manager != null) {
+ manager.shutdown();
+ manager = null;
+ }
+ }
+
+ public boolean isMinimalPutsEnabledByDefault() {
+ return false;
+ }
+
+}
diff --git a/src/org/hibernate/cache/FilterKey.java b/src/org/hibernate/cache/FilterKey.java
new file mode 100755
index 0000000000..15d6b26423
--- /dev/null
+++ b/src/org/hibernate/cache/FilterKey.java
@@ -0,0 +1,70 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+import org.hibernate.EntityMode;
+import org.hibernate.engine.TypedValue;
+import org.hibernate.impl.FilterImpl;
+import org.hibernate.type.Type;
+
+/**
+ * Allows cached queries to be keyed by enabled filters.
+ *
+ * @author Gavin King
+ */
+public final class FilterKey implements Serializable {
+ private String filterName;
+ private Map filterParameters = new HashMap();
+
+ public FilterKey(String name, Map params, Map types, EntityMode entityMode) {
+ filterName = name;
+ Iterator iter = params.entrySet().iterator();
+ while ( iter.hasNext() ) {
+ Map.Entry me = (Map.Entry) iter.next();
+ Type type = (Type) types.get( me.getKey() );
+ filterParameters.put( me.getKey(), new TypedValue( type, me.getValue(), entityMode ) );
+ }
+ }
+
+ public int hashCode() {
+ int result = 13;
+ result = 37 * result + filterName.hashCode();
+ result = 37 * result + filterParameters.hashCode();
+ return result;
+ }
+
+ public boolean equals(Object other) {
+ if ( !(other instanceof FilterKey) ) return false;
+ FilterKey that = (FilterKey) other;
+ if ( !that.filterName.equals(filterName) ) return false;
+ if ( !that.filterParameters.equals(filterParameters) ) return false;
+ return true;
+ }
+
+ public String toString() {
+ return "FilterKey[" + filterName + filterParameters + ']';
+ }
+
+ public static Set createFilterKeys(Map enabledFilters, EntityMode entityMode) {
+ if ( enabledFilters.size()==0 ) return null;
+ Set result = new HashSet();
+ Iterator iter = enabledFilters.values().iterator();
+ while ( iter.hasNext() ) {
+ FilterImpl filter = (FilterImpl) iter.next();
+ FilterKey key = new FilterKey(
+ filter.getName(),
+ filter.getParameters(),
+ filter.getFilterDefinition().getParameterTypes(),
+ entityMode
+ );
+ result.add(key);
+ }
+ return result;
+ }
+}
diff --git a/src/org/hibernate/cache/HashtableCache.java b/src/org/hibernate/cache/HashtableCache.java
new file mode 100644
index 0000000000..1cc65cc01a
--- /dev/null
+++ b/src/org/hibernate/cache/HashtableCache.java
@@ -0,0 +1,90 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.Collections;
+import java.util.Hashtable;
+
+import java.util.Map;
+
+/**
+ * A lightweight implementation of the Cache interface
+ * @author Gavin King
+ */
+public class HashtableCache implements Cache {
+
+ private final Map hashtable = new Hashtable();
+ private final String regionName;
+
+ public HashtableCache(String regionName) {
+ this.regionName = regionName;
+ }
+
+ public String getRegionName() {
+ return regionName;
+ }
+
+ public Object read(Object key) throws CacheException {
+ return hashtable.get(key);
+ }
+
+ public Object get(Object key) throws CacheException {
+ return hashtable.get(key);
+ }
+
+ public void update(Object key, Object value) throws CacheException {
+ put(key, value);
+ }
+
+ public void put(Object key, Object value) throws CacheException {
+ hashtable.put(key, value);
+ }
+
+ public void remove(Object key) throws CacheException {
+ hashtable.remove(key);
+ }
+
+ public void clear() throws CacheException {
+ hashtable.clear();
+ }
+
+ public void destroy() throws CacheException {
+
+ }
+
+ public void lock(Object key) throws CacheException {
+ // local cache, so we use synchronization
+ }
+
+ public void unlock(Object key) throws CacheException {
+ // local cache, so we use synchronization
+ }
+
+ public long nextTimestamp() {
+ return Timestamper.next();
+ }
+
+ public int getTimeout() {
+ return Timestamper.ONE_MS * 60000; //ie. 60 seconds
+ }
+
+ public long getSizeInMemory() {
+ return -1;
+ }
+
+ public long getElementCountInMemory() {
+ return hashtable.size();
+ }
+
+ public long getElementCountOnDisk() {
+ return 0;
+ }
+
+ public Map toMap() {
+ return Collections.unmodifiableMap(hashtable);
+ }
+
+ public String toString() {
+ return "HashtableCache(" + regionName + ')';
+ }
+
+}
diff --git a/src/org/hibernate/cache/HashtableCacheProvider.java b/src/org/hibernate/cache/HashtableCacheProvider.java
new file mode 100644
index 0000000000..13ac176983
--- /dev/null
+++ b/src/org/hibernate/cache/HashtableCacheProvider.java
@@ -0,0 +1,42 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.Properties;
+
+/**
+ * A simple in-memory Hashtable-based cache impl.
+ *
+ * @author Gavin King
+ */
+public class HashtableCacheProvider implements CacheProvider {
+
+ public Cache buildCache(String regionName, Properties properties) throws CacheException {
+ return new HashtableCache( regionName );
+ }
+
+ public long nextTimestamp() {
+ return Timestamper.next();
+ }
+
+ /**
+ * Callback to perform any necessary initialization of the underlying cache implementation
+ * during SessionFactory construction.
+ *
+ * @param properties current configuration settings.
+ */
+ public void start(Properties properties) throws CacheException {
+ }
+
+ /**
+ * Callback to perform any necessary cleanup of the underlying cache implementation
+ * during SessionFactory.close().
+ */
+ public void stop() {
+ }
+
+ public boolean isMinimalPutsEnabledByDefault() {
+ return false;
+ }
+
+}
+
diff --git a/src/org/hibernate/cache/JndiBoundTreeCacheProvider.java b/src/org/hibernate/cache/JndiBoundTreeCacheProvider.java
new file mode 100644
index 0000000000..cda5dd5111
--- /dev/null
+++ b/src/org/hibernate/cache/JndiBoundTreeCacheProvider.java
@@ -0,0 +1,63 @@
+// $Id$
+package org.hibernate.cache;
+
+import java.util.Properties;
+
+import javax.transaction.TransactionManager;
+
+import org.hibernate.transaction.TransactionManagerLookup;
+import org.hibernate.transaction.TransactionManagerLookupFactory;
+
+/**
+ * Support for JBossCache (TreeCache), where the cache instance is available
+ * via JNDI lookup.
+ *
+ * @author Steve Ebersole
+ */
+public class JndiBoundTreeCacheProvider extends AbstractJndiBoundCacheProvider {
+
+ private TransactionManager transactionManager;
+
+ /**
+ * Construct a Cache representing the "region" within in the underlying cache
+ * provider.
+ *
+ * @param regionName the name of the cache region
+ * @param properties configuration settings
+ *
+ * @throws CacheException
+ */
+ public Cache buildCache(String regionName, Properties properties) throws CacheException {
+ return new TreeCache( getTreeCacheInstance(), regionName, transactionManager );
+ }
+
+ public void prepare(Properties properties) throws CacheException {
+ TransactionManagerLookup transactionManagerLookup = TransactionManagerLookupFactory.getTransactionManagerLookup(properties);
+ if (transactionManagerLookup!=null) {
+ transactionManager = transactionManagerLookup.getTransactionManager(properties);
+ }
+ }
+ /**
+ * Generate a timestamp
+ */
+ public long nextTimestamp() {
+ return System.currentTimeMillis() / 100;
+ }
+
+ /**
+ * By default, should minimal-puts mode be enabled when using this cache.
+ *
+ * Since TreeCache is a clusterable cache and we are only getting a
+ * reference the instance from JNDI, safest to assume a clustered
+ * setup and return true here.
+ *
+ * @return True.
+ */
+ public boolean isMinimalPutsEnabledByDefault() {
+ return true;
+ }
+
+ public org.jboss.cache.TreeCache getTreeCacheInstance() {
+ return ( org.jboss.cache.TreeCache ) super.getCache();
+ }
+}
diff --git a/src/org/hibernate/cache/NoCacheProvider.java b/src/org/hibernate/cache/NoCacheProvider.java
new file mode 100644
index 0000000000..df77e55569
--- /dev/null
+++ b/src/org/hibernate/cache/NoCacheProvider.java
@@ -0,0 +1,58 @@
+// $Id$
+package org.hibernate.cache;
+
+import java.util.Properties;
+
+/**
+ * Implementation of NoCacheProvider.
+ *
+ * @author Steve Ebersole
+ */
+public class NoCacheProvider implements CacheProvider {
+ /**
+ * Configure the cache
+ *
+ * @param regionName the name of the cache region
+ * @param properties configuration settings
+ *
+ * @throws CacheException
+ */
+ public Cache buildCache(String regionName, Properties properties) throws CacheException {
+ throw new NoCachingEnabledException();
+ }
+
+ /**
+ * Generate a timestamp
+ */
+ public long nextTimestamp() {
+ // This, is used by SessionFactoryImpl to hand to the generated SessionImpl;
+ // was the only reason I could see that we cannot just use null as
+ // Settings.cacheProvider
+ return System.currentTimeMillis() / 100;
+ }
+
+ /**
+ * Callback to perform any necessary initialization of the underlying cache implementation during SessionFactory
+ * construction.
+ *
+ * @param properties current configuration settings.
+ */
+ public void start(Properties properties) throws CacheException {
+ // this is called by SessionFactory irregardless; we just disregard here;
+ // could also add a check to SessionFactory to only conditionally call start
+ }
+
+ /**
+ * Callback to perform any necessary cleanup of the underlying cache implementation during SessionFactory.close().
+ */
+ public void stop() {
+ // this is called by SessionFactory irregardless; we just disregard here;
+ // could also add a check to SessionFactory to only conditionally call stop
+ }
+
+ public boolean isMinimalPutsEnabledByDefault() {
+ // this is called from SettingsFactory irregardless; trivial to simply disregard
+ return false;
+ }
+
+}
diff --git a/src/org/hibernate/cache/NoCachingEnabledException.java b/src/org/hibernate/cache/NoCachingEnabledException.java
new file mode 100644
index 0000000000..6b713d8653
--- /dev/null
+++ b/src/org/hibernate/cache/NoCachingEnabledException.java
@@ -0,0 +1,20 @@
+// $Id$
+package org.hibernate.cache;
+
+import org.hibernate.cfg.Environment;
+
+/**
+ * Implementation of NoCachingEnabledException.
+ *
+ * @author Steve Ebersole
+ */
+public class NoCachingEnabledException extends CacheException {
+ private static final String MSG =
+ "Second-level cache is not enabled for usage [" +
+ Environment.USE_SECOND_LEVEL_CACHE +
+ " | " + Environment.USE_QUERY_CACHE + "]";
+
+ public NoCachingEnabledException() {
+ super( MSG );
+ }
+}
diff --git a/src/org/hibernate/cache/NonstrictReadWriteCache.java b/src/org/hibernate/cache/NonstrictReadWriteCache.java
new file mode 100644
index 0000000000..1cc71080f7
--- /dev/null
+++ b/src/org/hibernate/cache/NonstrictReadWriteCache.java
@@ -0,0 +1,170 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.Comparator;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * Caches data that is sometimes updated without ever locking the cache.
+ * If concurrent access to an item is possible, this concurrency strategy
+ * makes no guarantee that the item returned from the cache is the latest
+ * version available in the database. Configure your cache timeout accordingly!
+ * This is an "asynchronous" concurrency strategy.
+ *
+ * @author Gavin King
+ * @see ReadWriteCache for a much stricter algorithm
+ */
+public class NonstrictReadWriteCache implements CacheConcurrencyStrategy {
+
+ private Cache cache;
+
+ private static final Log log = LogFactory.getLog( NonstrictReadWriteCache.class );
+
+ public NonstrictReadWriteCache() {
+ }
+
+ public void setCache(Cache cache) {
+ this.cache = cache;
+ }
+
+ public Cache getCache() {
+ return cache;
+ }
+
+ /**
+ * Get the most recent version, if available.
+ */
+ public Object get(Object key, long txTimestamp) throws CacheException {
+ if ( log.isDebugEnabled() ) {
+ log.debug( "Cache lookup: " + key );
+ }
+
+ Object result = cache.get( key );
+ if ( result != null ) {
+ log.debug( "Cache hit" );
+ }
+ else {
+ log.debug( "Cache miss" );
+ }
+ return result;
+ }
+
+ /**
+ * Add an item to the cache.
+ */
+ public boolean put(
+ Object key,
+ Object value,
+ long txTimestamp,
+ Object version,
+ Comparator versionComparator,
+ boolean minimalPut) throws CacheException {
+ if ( minimalPut && cache.get( key ) != null ) {
+ if ( log.isDebugEnabled() ) {
+ log.debug( "item already cached: " + key );
+ }
+ return false;
+ }
+ if ( log.isDebugEnabled() ) {
+ log.debug( "Caching: " + key );
+ }
+
+ cache.put( key, value );
+ return true;
+
+ }
+
+ /**
+ * Do nothing.
+ *
+ * @return null, no lock
+ */
+ public SoftLock lock(Object key, Object version) throws CacheException {
+ return null;
+ }
+
+ public void remove(Object key) throws CacheException {
+ if ( log.isDebugEnabled() ) {
+ log.debug( "Removing: " + key );
+ }
+ cache.remove( key );
+ }
+
+ public void clear() throws CacheException {
+ if ( log.isDebugEnabled() ) {
+ log.debug( "Clearing" );
+ }
+ cache.clear();
+ }
+
+ public void destroy() {
+ try {
+ cache.destroy();
+ }
+ catch ( Exception e ) {
+ log.warn( "could not destroy cache", e );
+ }
+ }
+
+ /**
+ * Invalidate the item
+ */
+ public void evict(Object key) throws CacheException {
+ if ( log.isDebugEnabled() ) {
+ log.debug( "Invalidating: " + key );
+ }
+
+ cache.remove( key );
+ }
+
+ /**
+ * Invalidate the item
+ */
+ public boolean insert(Object key, Object value, Object currentVersion) {
+ return false;
+ }
+
+ /**
+ * Do nothing.
+ */
+ public boolean update(Object key, Object value, Object currentVersion, Object previousVersion) {
+ evict( key );
+ return false;
+ }
+
+ /**
+ * Invalidate the item (again, for safety).
+ */
+ public void release(Object key, SoftLock lock) throws CacheException {
+ if ( log.isDebugEnabled() ) {
+ log.debug( "Invalidating (again): " + key );
+ }
+
+ cache.remove( key );
+ }
+
+ /**
+ * Invalidate the item (again, for safety).
+ */
+ public boolean afterUpdate(Object key, Object value, Object version, SoftLock lock) throws CacheException {
+ release( key, lock );
+ return false;
+ }
+
+ /**
+ * Do nothing.
+ */
+ public boolean afterInsert(Object key, Object value, Object version) throws CacheException {
+ return false;
+ }
+
+ public String getRegionName() {
+ return cache.getRegionName();
+ }
+
+ public String toString() {
+ return cache + "(nonstrict-read-write)";
+ }
+}
diff --git a/src/org/hibernate/cache/OSCache.java b/src/org/hibernate/cache/OSCache.java
new file mode 100644
index 0000000000..2802482aa8
--- /dev/null
+++ b/src/org/hibernate/cache/OSCache.java
@@ -0,0 +1,111 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.Map;
+
+import com.opensymphony.oscache.base.NeedsRefreshException;
+import com.opensymphony.oscache.general.GeneralCacheAdministrator;
+
+/**
+ * @author Mathias Bogaert
+ */
+public class OSCache implements Cache {
+
+ /**
+ * The OSCache 2.0 cache administrator.
+ */
+ private GeneralCacheAdministrator cache = new GeneralCacheAdministrator();
+
+ private final int refreshPeriod;
+ private final String cron;
+ private final String regionName;
+
+ private String toString(Object key) {
+ return String.valueOf(key) + '.' + regionName;
+ }
+
+ public OSCache(int refreshPeriod, String cron, String region) {
+ this.refreshPeriod = refreshPeriod;
+ this.cron = cron;
+ this.regionName = region;
+ }
+
+ public void setCacheCapacity(int cacheCapacity) {
+ cache.setCacheCapacity(cacheCapacity);
+ }
+
+ public Object get(Object key) throws CacheException {
+ try {
+ return cache.getFromCache( toString(key), refreshPeriod, cron );
+ }
+ catch (NeedsRefreshException e) {
+ cache.cancelUpdate( toString(key) );
+ return null;
+ }
+ }
+
+ public Object read(Object key) throws CacheException {
+ return get(key);
+ }
+
+ public void update(Object key, Object value) throws CacheException {
+ put(key, value);
+ }
+
+ public void put(Object key, Object value) throws CacheException {
+ cache.putInCache( toString(key), value );
+ }
+
+ public void remove(Object key) throws CacheException {
+ cache.flushEntry( toString(key) );
+ }
+
+ public void clear() throws CacheException {
+ cache.flushAll();
+ }
+
+ public void destroy() throws CacheException {
+ cache.destroy();
+ }
+
+ public void lock(Object key) throws CacheException {
+ // local cache, so we use synchronization
+ }
+
+ public void unlock(Object key) throws CacheException {
+ // local cache, so we use synchronization
+ }
+
+ public long nextTimestamp() {
+ return Timestamper.next();
+ }
+
+ public int getTimeout() {
+ return Timestamper.ONE_MS * 60000; //ie. 60 seconds
+ }
+
+ public String getRegionName() {
+ return regionName;
+ }
+
+ public long getSizeInMemory() {
+ return -1;
+ }
+
+ public long getElementCountInMemory() {
+ return -1;
+ }
+
+ public long getElementCountOnDisk() {
+ return -1;
+ }
+
+ public Map toMap() {
+ throw new UnsupportedOperationException();
+ }
+
+ public String toString() {
+ return "OSCache(" + regionName + ')';
+ }
+
+}
diff --git a/src/org/hibernate/cache/OSCacheProvider.java b/src/org/hibernate/cache/OSCacheProvider.java
new file mode 100644
index 0000000000..ca3cf80ca8
--- /dev/null
+++ b/src/org/hibernate/cache/OSCacheProvider.java
@@ -0,0 +1,87 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.Properties;
+
+import org.hibernate.util.PropertiesHelper;
+import org.hibernate.util.StringHelper;
+
+import com.opensymphony.oscache.base.CacheEntry;
+import com.opensymphony.oscache.base.Config;
+
+/**
+ * Support for OpenSymphony OSCache. This implementation assumes
+ * that identifiers have well-behaved toString() methods.
+ *
+ * @author Mathias Bogaert
+ */
+public class OSCacheProvider implements CacheProvider {
+
+ /**
+ * The OSCache refresh period property suffix.
+ */
+ public static final String OSCACHE_REFRESH_PERIOD = "refresh.period";
+ /**
+ * The OSCache CRON expression property suffix.
+ */
+ public static final String OSCACHE_CRON = "cron";
+ /**
+ * The OSCache cache capacity property suffix.
+ */
+ public static final String OSCACHE_CAPACITY = "capacity";
+
+ private static final Properties OSCACHE_PROPERTIES = new Config().getProperties();
+
+ /**
+ * Builds a new {@link Cache} instance, and gets it's properties from the OSCache {@link Config}
+ * which reads the properties file (oscache.properties
) from the classpath.
+ * If the file cannot be found or loaded, an the defaults are used.
+ *
+ * @param region
+ * @param properties
+ * @return
+ * @throws CacheException
+ */
+ public Cache buildCache(String region, Properties properties) throws CacheException {
+
+ int refreshPeriod = PropertiesHelper.getInt(
+ StringHelper.qualify(region, OSCACHE_REFRESH_PERIOD),
+ OSCACHE_PROPERTIES,
+ CacheEntry.INDEFINITE_EXPIRY
+ );
+ String cron = OSCACHE_PROPERTIES.getProperty( StringHelper.qualify(region, OSCACHE_CRON) );
+
+ // construct the cache
+ final OSCache cache = new OSCache(refreshPeriod, cron, region);
+
+ Integer capacity = PropertiesHelper.getInteger( StringHelper.qualify(region, OSCACHE_CAPACITY), OSCACHE_PROPERTIES );
+ if ( capacity!=null ) cache.setCacheCapacity( capacity.intValue() );
+
+ return cache;
+ }
+
+ public long nextTimestamp() {
+ return Timestamper.next();
+ }
+
+ /**
+ * Callback to perform any necessary initialization of the underlying cache implementation
+ * during SessionFactory construction.
+ *
+ * @param properties current configuration settings.
+ */
+ public void start(Properties properties) throws CacheException {
+ }
+
+ /**
+ * Callback to perform any necessary cleanup of the underlying cache implementation
+ * during SessionFactory.close().
+ */
+ public void stop() {
+ }
+
+ public boolean isMinimalPutsEnabledByDefault() {
+ return false;
+ }
+
+}
diff --git a/src/org/hibernate/cache/OptimisticCache.java b/src/org/hibernate/cache/OptimisticCache.java
new file mode 100644
index 0000000000..7bcbcb24b8
--- /dev/null
+++ b/src/org/hibernate/cache/OptimisticCache.java
@@ -0,0 +1,64 @@
+package org.hibernate.cache;
+
+/**
+ * A contract for transactional cache implementations which support
+ * optimistic locking of items within the cache.
+ *
+ * The optimisitic locking capabilities are only utilized for
+ * the entity cache regions.
+ *
+ * Unlike the methods on the {@link Cache} interface, all the methods
+ * here will only ever be called from access scenarios where versioned
+ * data is actually a possiblity (i.e., entity data). Be sure to consult
+ * with {@link OptimisticCacheSource#isVersioned()} to determine whether
+ * versioning is actually in effect.
+ *
+ * @author Steve Ebersole
+ */
+public interface OptimisticCache extends Cache {
+ /**
+ * Indicates the "source" of the cached data. Currently this will
+ * only ever represent an {@link org.hibernate.persister.entity.EntityPersister}.
+ *
+ * Made available to the cache so that it can access certain information
+ * about versioning strategy.
+ *
+ * @param source The source.
+ */
+ public void setSource(OptimisticCacheSource source);
+
+ /**
+ * Called during {@link CacheConcurrencyStrategy#insert} processing for
+ * transactional strategies. Indicates we have just performed an insert
+ * into the DB and now need to cache that entity's data.
+ *
+ * @param key The cache key.
+ * @param value The data to be cached.
+ * @param currentVersion The entity's version; or null if not versioned.
+ */
+ public void writeInsert(Object key, Object value, Object currentVersion);
+
+ /**
+ * Called during {@link CacheConcurrencyStrategy#update} processing for
+ * transactional strategies. Indicates we have just performed an update
+ * against the DB and now need to cache the updated state.
+ *
+ * @param key The cache key.
+ * @param value The data to be cached.
+ * @param currentVersion The entity's current version
+ * @param previousVersion The entity's previous version (before the update);
+ * or null if not versioned.
+ */
+ public void writeUpdate(Object key, Object value, Object currentVersion, Object previousVersion);
+
+ /**
+ * Called during {@link CacheConcurrencyStrategy#put} processing for
+ * transactional strategies. Indicates we have just loaded an entity's
+ * state from the database and need it cached.
+ *
+ * @param key The cache key.
+ * @param value The data to be cached.
+ * @param currentVersion The entity's version; or null if not versioned.
+ */
+ public void writeLoad(Object key, Object value, Object currentVersion);
+}
diff --git a/src/org/hibernate/cache/OptimisticCacheSource.java b/src/org/hibernate/cache/OptimisticCacheSource.java
new file mode 100644
index 0000000000..ca01e4f8c7
--- /dev/null
+++ b/src/org/hibernate/cache/OptimisticCacheSource.java
@@ -0,0 +1,29 @@
+package org.hibernate.cache;
+
+import java.util.Comparator;
+
+/**
+ * Contract for sources of optimistically lockable data sent to the second level
+ * cache.
+ *
+ * Note currently {@link org.hibernate.persister.entity.EntityPersister}s are
+ * the only viable source.
+ *
+ * @author Steve Ebersole
+ */
+public interface OptimisticCacheSource {
+ /**
+ * Does this source represent versioned (i.e., and thus optimistically
+ * lockable) data?
+ *
+ * @return True if this source represents versioned data; false otherwise.
+ */
+ public boolean isVersioned();
+
+ /**
+ * Get the comparator used to compare two different version values together.
+ *
+ * @return An appropriate comparator.
+ */
+ public Comparator getVersionComparator();
+}
diff --git a/src/org/hibernate/cache/OptimisticTreeCache.java b/src/org/hibernate/cache/OptimisticTreeCache.java
new file mode 100644
index 0000000000..d31c0cfbc3
--- /dev/null
+++ b/src/org/hibernate/cache/OptimisticTreeCache.java
@@ -0,0 +1,329 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.Comparator;
+
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.optimistic.DataVersion;
+import org.jboss.cache.config.Option;
+import org.jboss.cache.lock.TimeoutException;
+
+/**
+ * Represents a particular region within the given JBossCache TreeCache
+ * utilizing TreeCache's optimistic locking capabilities.
+ *
+ * @see OptimisticTreeCacheProvider for more details
+ *
+ * @author Steve Ebersole
+ */
+public class OptimisticTreeCache implements OptimisticCache {
+
+ // todo : eventually merge this with TreeCache and just add optional opt-lock support there.
+
+ private static final Log log = LogFactory.getLog( OptimisticTreeCache.class);
+
+ private static final String ITEM = "item";
+
+ private org.jboss.cache.TreeCache cache;
+ private final String regionName;
+ private final Fqn regionFqn;
+ private OptimisticCacheSource source;
+
+ public OptimisticTreeCache(org.jboss.cache.TreeCache cache, String regionName)
+ throws CacheException {
+ this.cache = cache;
+ this.regionName = regionName;
+ this.regionFqn = Fqn.fromString( regionName.replace( '.', '/' ) );
+ }
+
+
+ // OptimisticCache impl ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ public void setSource(OptimisticCacheSource source) {
+ this.source = source;
+ }
+
+ public void writeInsert(Object key, Object value, Object currentVersion) {
+ writeUpdate( key, value, currentVersion, null );
+ }
+
+ public void writeUpdate(Object key, Object value, Object currentVersion, Object previousVersion) {
+ try {
+ Option option = new Option();
+ DataVersion dv = ( source != null && source.isVersioned() )
+ ? new DataVersionAdapter( currentVersion, previousVersion, source.getVersionComparator(), source.toString() )
+ : NonLockingDataVersion.INSTANCE;
+ option.setDataVersion( dv );
+ cache.put( new Fqn( regionFqn, key ), ITEM, value, option );
+ }
+ catch ( Exception e ) {
+ throw new CacheException( e );
+ }
+ }
+
+ public void writeLoad(Object key, Object value, Object currentVersion) {
+ try {
+ Option option = new Option();
+ option.setFailSilently( true );
+ option.setDataVersion( NonLockingDataVersion.INSTANCE );
+ cache.remove( new Fqn( regionFqn, key ), "ITEM", option );
+
+ option = new Option();
+ option.setFailSilently( true );
+ DataVersion dv = ( source != null && source.isVersioned() )
+ ? new DataVersionAdapter( currentVersion, currentVersion, source.getVersionComparator(), source.toString() )
+ : NonLockingDataVersion.INSTANCE;
+ option.setDataVersion( dv );
+ cache.put( new Fqn( regionFqn, key ), ITEM, value, option );
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+
+ // Cache impl ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ public Object get(Object key) throws CacheException {
+ try {
+ Option option = new Option();
+ option.setFailSilently( true );
+// option.setDataVersion( NonLockingDataVersion.INSTANCE );
+ return cache.get( new Fqn( regionFqn, key ), ITEM, option );
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public Object read(Object key) throws CacheException {
+ try {
+ return cache.get( new Fqn( regionFqn, key ), ITEM );
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public void update(Object key, Object value) throws CacheException {
+ try {
+ Option option = new Option();
+ option.setDataVersion( NonLockingDataVersion.INSTANCE );
+ cache.put( new Fqn( regionFqn, key ), ITEM, value, option );
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public void put(Object key, Object value) throws CacheException {
+ try {
+ log.trace( "performing put() into region [" + regionName + "]" );
+ // do the put outside the scope of the JTA txn
+ Option option = new Option();
+ option.setFailSilently( true );
+ option.setDataVersion( NonLockingDataVersion.INSTANCE );
+ cache.put( new Fqn( regionFqn, key ), ITEM, value, option );
+ }
+ catch (TimeoutException te) {
+ //ignore!
+ log.debug("ignoring write lock acquisition failure");
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public void remove(Object key) throws CacheException {
+ try {
+ // tree cache in optimistic mode seems to have as very difficult
+ // time with remove calls on non-existent nodes (NPEs)...
+ if ( cache.get( new Fqn( regionFqn, key ), ITEM ) != null ) {
+ Option option = new Option();
+ option.setDataVersion( NonLockingDataVersion.INSTANCE );
+ cache.remove( new Fqn( regionFqn, key ), option );
+ }
+ else {
+ log.trace( "skipping remove() call as the underlying node did not seem to exist" );
+ }
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public void clear() throws CacheException {
+ try {
+ Option option = new Option();
+ option.setDataVersion( NonLockingDataVersion.INSTANCE );
+ cache.remove( regionFqn, option );
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public void destroy() throws CacheException {
+ try {
+ Option option = new Option();
+ option.setCacheModeLocal( true );
+ option.setFailSilently( true );
+ option.setDataVersion( NonLockingDataVersion.INSTANCE );
+ cache.remove( regionFqn, option );
+ }
+ catch( Exception e ) {
+ throw new CacheException( e );
+ }
+ }
+
+ public void lock(Object key) throws CacheException {
+ throw new UnsupportedOperationException( "TreeCache is a fully transactional cache" + regionName );
+ }
+
+ public void unlock(Object key) throws CacheException {
+ throw new UnsupportedOperationException( "TreeCache is a fully transactional cache: " + regionName );
+ }
+
+ public long nextTimestamp() {
+ return System.currentTimeMillis() / 100;
+ }
+
+ public int getTimeout() {
+ return 600; //60 seconds
+ }
+
+ public String getRegionName() {
+ return regionName;
+ }
+
+ public long getSizeInMemory() {
+ return -1;
+ }
+
+ public long getElementCountInMemory() {
+ try {
+ Set children = cache.getChildrenNames( regionFqn );
+ return children == null ? 0 : children.size();
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public long getElementCountOnDisk() {
+ return 0;
+ }
+
+ public Map toMap() {
+ try {
+ Map result = new HashMap();
+ Set childrenNames = cache.getChildrenNames( regionFqn );
+ if (childrenNames != null) {
+ Iterator iter = childrenNames.iterator();
+ while ( iter.hasNext() ) {
+ Object key = iter.next();
+ result.put(
+ key,
+ cache.get( new Fqn( regionFqn, key ), ITEM )
+ );
+ }
+ }
+ return result;
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public String toString() {
+ return "OptimisticTreeCache(" + regionName + ')';
+ }
+
+ public static class DataVersionAdapter implements DataVersion {
+ private final Object currentVersion;
+ private final Object previousVersion;
+ private final Comparator versionComparator;
+ private final String sourceIdentifer;
+
+ public DataVersionAdapter(Object currentVersion, Object previousVersion, Comparator versionComparator, String sourceIdentifer) {
+ this.currentVersion = currentVersion;
+ this.previousVersion = previousVersion;
+ this.versionComparator = versionComparator;
+ this.sourceIdentifer = sourceIdentifer;
+ log.trace( "created " + this );
+ }
+
+ /**
+ * newerThan() call is dispatched against the DataVersion currently
+ * associated with the node; the passed dataVersion param is the
+ * DataVersion associated with the data we are trying to put into
+ * the node.
+ *
+ * we are expected to return true in the case where we (the current
+ * node DataVersion) are newer that then incoming value. Returning
+ * true here essentially means that a optimistic lock failure has
+ * occured (because conversely, the value we are trying to put into
+ * the node is "older than" the value already there...)
+ */
+ public boolean newerThan(DataVersion dataVersion) {
+ log.trace( "checking [" + this + "] against [" + dataVersion + "]" );
+ if ( dataVersion instanceof CircumventChecksDataVersion ) {
+ log.trace( "skipping lock checks..." );
+ return false;
+ }
+ else if ( dataVersion instanceof NonLockingDataVersion ) {
+ // can happen because of the multiple ways Cache.remove()
+ // can be invoked :(
+ log.trace( "skipping lock checks..." );
+ return false;
+ }
+ DataVersionAdapter other = ( DataVersionAdapter ) dataVersion;
+ if ( other.previousVersion == null ) {
+ log.warn( "Unexpected optimistic lock check on inserting data" );
+ // work around the "feature" where tree cache is validating the
+ // inserted node during the next transaction. no idea...
+ if ( this == dataVersion ) {
+ log.trace( "skipping lock checks due to same DV instance" );
+ return false;
+ }
+ }
+ return versionComparator.compare( currentVersion, other.previousVersion ) >= 1;
+ }
+
+ public String toString() {
+ return super.toString() + " [current=" + currentVersion + ", previous=" + previousVersion + ", src=" + sourceIdentifer + "]";
+ }
+ }
+
+ /**
+ * Used in regions where no locking should ever occur. This includes query-caches,
+ * update-timestamps caches, collection caches, and entity caches where the entity
+ * is not versioned.
+ */
+ public static class NonLockingDataVersion implements DataVersion {
+ public static final DataVersion INSTANCE = new NonLockingDataVersion();
+ public boolean newerThan(DataVersion dataVersion) {
+ log.trace( "non locking lock check...");
+ return false;
+ }
+ }
+
+ /**
+ * Used to signal to a DataVersionAdapter to simply not perform any checks. This
+ * is currently needed for proper handling of remove() calls for entity cache regions
+ * (we do not know the version info...).
+ */
+ public static class CircumventChecksDataVersion implements DataVersion {
+ public static final DataVersion INSTANCE = new CircumventChecksDataVersion();
+ public boolean newerThan(DataVersion dataVersion) {
+ throw new CacheException( "optimistic locking checks should never happen on CircumventChecksDataVersion" );
+ }
+ }
+}
diff --git a/src/org/hibernate/cache/OptimisticTreeCacheProvider.java b/src/org/hibernate/cache/OptimisticTreeCacheProvider.java
new file mode 100644
index 0000000000..fd2cc7458c
--- /dev/null
+++ b/src/org/hibernate/cache/OptimisticTreeCacheProvider.java
@@ -0,0 +1,130 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.Properties;
+import javax.transaction.TransactionManager;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.cfg.Environment;
+import org.hibernate.transaction.TransactionManagerLookup;
+import org.hibernate.transaction.TransactionManagerLookupFactory;
+import org.jboss.cache.PropertyConfigurator;
+
+/**
+ * Support for a standalone JBossCache TreeCache instance utilizing TreeCache's
+ * optimistic locking capabilities. This capability was added in JBossCache
+ * version 1.3.0; as such this provider will only work with that version or
+ * higher.
+ *
+ * The TreeCache instance is configured via a local config resource. The
+ * resource to be used for configuration can be controlled by specifying a value
+ * for the {@link #CONFIG_RESOURCE} config property.
+ *
+ * @author Steve Ebersole
+ */
+public class OptimisticTreeCacheProvider implements CacheProvider {
+
+ /**
+ * @deprecated use {@link Environment.CACHE_PROVIDER_CONFIG}
+ */
+ public static final String CONFIG_RESOURCE = "hibernate.cache.opt_tree_cache.config";
+ public static final String DEFAULT_CONFIG = "treecache.xml";
+
+ private static final String NODE_LOCKING_SCHEME = "OPTIMISTIC";
+ private static final Log log = LogFactory.getLog( OptimisticTreeCacheProvider.class );
+
+ private org.jboss.cache.TreeCache cache;
+
+ /**
+ * Construct and configure the Cache representation of a named cache region.
+ *
+ * @param regionName the name of the cache region
+ * @param properties configuration settings
+ * @return The Cache representation of the named cache region.
+ * @throws CacheException
+ * Indicates an error building the cache region.
+ */
+ public Cache buildCache(String regionName, Properties properties) throws CacheException {
+ return new OptimisticTreeCache( cache, regionName );
+ }
+
+ public long nextTimestamp() {
+ return System.currentTimeMillis() / 100;
+ }
+
+ /**
+ * Prepare the underlying JBossCache TreeCache instance.
+ *
+ * @param properties All current config settings.
+ * @throws CacheException
+ * Indicates a problem preparing cache for use.
+ */
+ public void start(Properties properties) {
+ String resource = properties.getProperty( Environment.CACHE_PROVIDER_CONFIG );
+ if (resource == null) {
+ resource = properties.getProperty( CONFIG_RESOURCE );
+ }
+ if ( resource == null ) {
+ resource = DEFAULT_CONFIG;
+ }
+ log.debug( "Configuring TreeCache from resource [" + resource + "]" );
+ try {
+ cache = new org.jboss.cache.TreeCache();
+ PropertyConfigurator config = new PropertyConfigurator();
+ config.configure( cache, resource );
+ TransactionManagerLookup transactionManagerLookup =
+ TransactionManagerLookupFactory.getTransactionManagerLookup( properties );
+ if ( transactionManagerLookup == null ) {
+ throw new CacheException(
+ "JBossCache only supports optimisitc locking with a configured " +
+ "TransactionManagerLookup (" + Environment.TRANSACTION_MANAGER_STRATEGY + ")"
+ );
+ }
+ cache.setTransactionManagerLookup(
+ new TransactionManagerLookupAdaptor(
+ transactionManagerLookup,
+ properties
+ )
+ );
+ if ( ! NODE_LOCKING_SCHEME.equalsIgnoreCase( cache.getNodeLockingScheme() ) ) {
+ log.info( "Overriding node-locking-scheme to : " + NODE_LOCKING_SCHEME );
+ cache.setNodeLockingScheme( NODE_LOCKING_SCHEME );
+ }
+ cache.start();
+ }
+ catch ( Exception e ) {
+ throw new CacheException( e );
+ }
+ }
+
+ public void stop() {
+ if ( cache != null ) {
+ cache.stop();
+ cache.destroy();
+ cache = null;
+ }
+ }
+
+ public boolean isMinimalPutsEnabledByDefault() {
+ return true;
+ }
+
+ static final class TransactionManagerLookupAdaptor implements org.jboss.cache.TransactionManagerLookup {
+ private final TransactionManagerLookup tml;
+ private final Properties props;
+
+ TransactionManagerLookupAdaptor(TransactionManagerLookup tml, Properties props) {
+ this.tml = tml;
+ this.props = props;
+ }
+
+ public TransactionManager getTransactionManager() throws Exception {
+ return tml.getTransactionManager( props );
+ }
+ }
+
+ public org.jboss.cache.TreeCache getUnderlyingCache() {
+ return cache;
+ }
+}
diff --git a/src/org/hibernate/cache/QueryCache.java b/src/org/hibernate/cache/QueryCache.java
new file mode 100644
index 0000000000..0cc457cf31
--- /dev/null
+++ b/src/org/hibernate/cache/QueryCache.java
@@ -0,0 +1,33 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.List;
+import java.util.Set;
+
+import org.hibernate.HibernateException;
+import org.hibernate.engine.SessionImplementor;
+import org.hibernate.type.Type;
+
+/**
+ * Defines the contract for caches capable of storing query results. These
+ * caches should only concern themselves with storing the matching result ids.
+ * The transactional semantics are necessarily less strict than the semantics
+ * of an item cache.
+ *
+ * @author Gavin King
+ */
+public interface QueryCache {
+
+ public void clear() throws CacheException;
+
+ public boolean put(QueryKey key, Type[] returnTypes, List result, boolean isNaturalKeyLookup, SessionImplementor session) throws HibernateException;
+
+ public List get(QueryKey key, Type[] returnTypes, boolean isNaturalKeyLookup, Set spaces, SessionImplementor session)
+ throws HibernateException;
+
+ public void destroy();
+
+ public Cache getCache();
+
+ public String getRegionName();
+}
diff --git a/src/org/hibernate/cache/QueryCacheFactory.java b/src/org/hibernate/cache/QueryCacheFactory.java
new file mode 100644
index 0000000000..9472364fa6
--- /dev/null
+++ b/src/org/hibernate/cache/QueryCacheFactory.java
@@ -0,0 +1,24 @@
+// $Id$
+package org.hibernate.cache;
+
+import org.hibernate.HibernateException;
+import org.hibernate.cfg.Settings;
+
+import java.util.Properties;
+
+/**
+ * Defines a factory for query cache instances. These factories are responsible for
+ * creating individual QueryCache instances.
+ *
+ * @author Steve Ebersole
+ */
+public interface QueryCacheFactory {
+
+ public QueryCache getQueryCache(
+ String regionName,
+ UpdateTimestampsCache updateTimestampsCache,
+ Settings settings,
+ Properties props)
+ throws HibernateException;
+
+}
diff --git a/src/org/hibernate/cache/QueryKey.java b/src/org/hibernate/cache/QueryKey.java
new file mode 100644
index 0000000000..5ccdcf48e2
--- /dev/null
+++ b/src/org/hibernate/cache/QueryKey.java
@@ -0,0 +1,117 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.io.Serializable;
+import java.util.Map;
+import java.util.Set;
+
+import org.hibernate.EntityMode;
+import org.hibernate.engine.QueryParameters;
+import org.hibernate.engine.RowSelection;
+import org.hibernate.transform.ResultTransformer;
+import org.hibernate.type.Type;
+import org.hibernate.util.EqualsHelper;
+
+/**
+ * A key that identifies a particular query with bound parameter values
+ * @author Gavin King
+ */
+public class QueryKey implements Serializable {
+ private final String sqlQueryString;
+ private final Type[] types;
+ private final Object[] values;
+ private final Integer firstRow;
+ private final Integer maxRows;
+ private final Map namedParameters;
+ private final EntityMode entityMode;
+ private final Set filters;
+ private final int hashCode;
+
+ // the user provided resulttransformer, not the one used with "select new". Here to avoid mangling transformed/non-transformed results.
+ private final ResultTransformer customTransformer;
+
+ public QueryKey(String queryString, QueryParameters queryParameters, Set filters, EntityMode entityMode) {
+ this.sqlQueryString = queryString;
+ this.types = queryParameters.getPositionalParameterTypes();
+ this.values = queryParameters.getPositionalParameterValues();
+ RowSelection selection = queryParameters.getRowSelection();
+ if (selection!=null) {
+ firstRow = selection.getFirstRow();
+ maxRows = selection.getMaxRows();
+ }
+ else {
+ firstRow = null;
+ maxRows = null;
+ }
+ this.namedParameters = queryParameters.getNamedParameters();
+ this.entityMode = entityMode;
+ this.filters = filters;
+ this.customTransformer = queryParameters.getResultTransformer();
+ this.hashCode = getHashCode();
+ }
+
+ public boolean equals(Object other) {
+ QueryKey that = (QueryKey) other;
+ if ( !sqlQueryString.equals(that.sqlQueryString) ) return false;
+ if ( !EqualsHelper.equals(firstRow, that.firstRow) || !EqualsHelper.equals(maxRows, that.maxRows) ) return false;
+ if ( !EqualsHelper.equals(customTransformer, that.customTransformer) ) return false;
+ if (types==null) {
+ if (that.types!=null) return false;
+ }
+ else {
+ if (that.types==null) return false;
+ if ( types.length!=that.types.length ) return false;
+ for ( int i=0; ialmost maintains the semantics.
+ * Repeatable read isolation is compromised in the case of concurrent writes.
+ * This is an "asynchronous" concurrency strategy.
+ *
+ * If this strategy is used in a cluster, the underlying cache implementation
+ * must support distributed hard locks (which are held only momentarily). This
+ * strategy also assumes that the underlying cache implementation does not do
+ * asynchronous replication and that state has been fully replicated as soon
+ * as the lock is released.
+ *
+ * @see NonstrictReadWriteCache for a faster algorithm
+ * @see CacheConcurrencyStrategy
+ */
+public class ReadWriteCache implements CacheConcurrencyStrategy {
+
+ private static final Log log = LogFactory.getLog(ReadWriteCache.class);
+
+ private Cache cache;
+ private int nextLockId;
+
+ public ReadWriteCache() {}
+
+ public void setCache(Cache cache) {
+ this.cache=cache;
+ }
+
+ public Cache getCache() {
+ return cache;
+ }
+
+ public String getRegionName() {
+ return cache.getRegionName();
+ }
+
+ /**
+ * Generate an id for a new lock. Uniqueness per cache instance is very
+ * desirable but not absolutely critical. Must be called from one of the
+ * synchronized methods of this class.
+ */
+ private int nextLockId() {
+ if (nextLockId==Integer.MAX_VALUE) nextLockId = Integer.MIN_VALUE;
+ return nextLockId++;
+ }
+
+ /**
+ * Do not return an item whose timestamp is later than the current
+ * transaction timestamp. (Otherwise we might compromise repeatable
+ * read unnecessarily.) Do not return an item which is soft-locked.
+ * Always go straight to the database instead.
+ *
+ * Note that since reading an item from that cache does not actually
+ * go to the database, it is possible to see a kind of phantom read
+ * due to the underlying row being updated after we have read it
+ * from the cache. This would not be possible in a lock-based
+ * implementation of repeatable read isolation. It is also possible
+ * to overwrite changes made and committed by another transaction
+ * after the current transaction read the item from the cache. This
+ * problem would be caught by the update-time version-checking, if
+ * the data is versioned or timestamped.
+ */
+ public synchronized Object get(Object key, long txTimestamp) throws CacheException {
+
+ if ( log.isTraceEnabled() ) log.trace("Cache lookup: " + key);
+
+ /*try {
+ cache.lock(key);*/
+
+ Lockable lockable = (Lockable) cache.get(key);
+
+ boolean gettable = lockable!=null && lockable.isGettable(txTimestamp);
+
+ if (gettable) {
+ if ( log.isTraceEnabled() ) log.trace("Cache hit: " + key);
+ return ( (Item) lockable ).getValue();
+ }
+ else {
+ if ( log.isTraceEnabled() ) {
+ if (lockable==null) {
+ log.trace("Cache miss: " + key);
+ }
+ else {
+ log.trace("Cached item was locked: " + key);
+ }
+ }
+ return null;
+ }
+ /*}
+ finally {
+ cache.unlock(key);
+ }*/
+ }
+
+ /**
+ * Stop any other transactions reading or writing this item to/from
+ * the cache. Send them straight to the database instead. (The lock
+ * does time out eventually.) This implementation tracks concurrent
+ * locks of transactions which simultaneously attempt to write to an
+ * item.
+ */
+ public synchronized SoftLock lock(Object key, Object version) throws CacheException {
+ if ( log.isTraceEnabled() ) log.trace("Invalidating: " + key);
+
+ try {
+ cache.lock(key);
+
+ Lockable lockable = (Lockable) cache.get(key);
+ long timeout = cache.nextTimestamp() + cache.getTimeout();
+ final Lock lock = (lockable==null) ?
+ new Lock( timeout, nextLockId(), version ) :
+ lockable.lock( timeout, nextLockId() );
+ cache.update(key, lock);
+ return lock;
+ }
+ finally {
+ cache.unlock(key);
+ }
+
+ }
+
+ /**
+ * Do not add an item to the cache unless the current transaction
+ * timestamp is later than the timestamp at which the item was
+ * invalidated. (Otherwise, a stale item might be re-added if the
+ * database is operating in repeatable read isolation mode.)
+ * For versioned data, don't add the item unless it is the later
+ * version.
+ */
+ public synchronized boolean put(
+ Object key,
+ Object value,
+ long txTimestamp,
+ Object version,
+ Comparator versionComparator,
+ boolean minimalPut)
+ throws CacheException {
+ if ( log.isTraceEnabled() ) log.trace("Caching: " + key);
+
+ try {
+ cache.lock(key);
+
+ Lockable lockable = (Lockable) cache.get(key);
+
+ boolean puttable = lockable==null ||
+ lockable.isPuttable(txTimestamp, version, versionComparator);
+
+ if (puttable) {
+ cache.put( key, new Item( value, version, cache.nextTimestamp() ) );
+ if ( log.isTraceEnabled() ) log.trace("Cached: " + key);
+ return true;
+ }
+ else {
+ if ( log.isTraceEnabled() ) {
+ if ( lockable.isLock() ) {
+ log.trace("Item was locked: " + key);
+ }
+ else {
+ log.trace("Item was already cached: " + key);
+ }
+ }
+ return false;
+ }
+ }
+ finally {
+ cache.unlock(key);
+ }
+ }
+
+ /**
+ * decrement a lock and put it back in the cache
+ */
+ private void decrementLock(Object key, Lock lock) throws CacheException {
+ //decrement the lock
+ lock.unlock( cache.nextTimestamp() );
+ cache.update(key, lock);
+ }
+
+ /**
+ * Release the soft lock on the item. Other transactions may now
+ * re-cache the item (assuming that no other transaction holds a
+ * simultaneous lock).
+ */
+ public synchronized void release(Object key, SoftLock clientLock) throws CacheException {
+ if ( log.isTraceEnabled() ) log.trace("Releasing: " + key);
+
+ try {
+ cache.lock(key);
+
+ Lockable lockable = (Lockable) cache.get(key);
+ if ( isUnlockable(clientLock, lockable) ) {
+ decrementLock(key, (Lock) lockable);
+ }
+ else {
+ handleLockExpiry(key);
+ }
+ }
+ finally {
+ cache.unlock(key);
+ }
+ }
+
+ void handleLockExpiry(Object key) throws CacheException {
+ log.warn("An item was expired by the cache while it was locked (increase your cache timeout): " + key);
+ long ts = cache.nextTimestamp() + cache.getTimeout();
+ // create new lock that times out immediately
+ Lock lock = new Lock( ts, nextLockId(), null );
+ lock.unlock(ts);
+ cache.update(key, lock);
+ }
+
+ public void clear() throws CacheException {
+ cache.clear();
+ }
+
+ public void remove(Object key) throws CacheException {
+ cache.remove(key);
+ }
+
+ public void destroy() {
+ try {
+ cache.destroy();
+ }
+ catch (Exception e) {
+ log.warn("could not destroy cache", e);
+ }
+ }
+
+ /**
+ * Re-cache the updated state, if and only if there there are
+ * no other concurrent soft locks. Release our lock.
+ */
+ public synchronized boolean afterUpdate(Object key, Object value, Object version, SoftLock clientLock)
+ throws CacheException {
+
+ if ( log.isTraceEnabled() ) log.trace("Updating: " + key);
+
+ try {
+ cache.lock(key);
+
+ Lockable lockable = (Lockable) cache.get(key);
+ if ( isUnlockable(clientLock, lockable) ) {
+ Lock lock = (Lock) lockable;
+ if ( lock.wasLockedConcurrently() ) {
+ // just decrement the lock, don't recache
+ // (we don't know which transaction won)
+ decrementLock(key, lock);
+ return false;
+ }
+ else {
+ //recache the updated state
+ cache.update( key, new Item( value, version, cache.nextTimestamp() ) );
+ if ( log.isTraceEnabled() ) log.trace("Updated: " + key);
+ return true;
+ }
+ }
+ else {
+ handleLockExpiry(key);
+ return false;
+ }
+
+ }
+ finally {
+ cache.unlock(key);
+ }
+ }
+
+ /**
+ * Add the new item to the cache, checking that no other transaction has
+ * accessed the item.
+ */
+ public synchronized boolean afterInsert(Object key, Object value, Object version)
+ throws CacheException {
+
+ if ( log.isTraceEnabled() ) log.trace("Inserting: " + key);
+ try {
+ cache.lock(key);
+
+ Lockable lockable = (Lockable) cache.get(key);
+ if (lockable==null) {
+ cache.update( key, new Item( value, version, cache.nextTimestamp() ) );
+ if ( log.isTraceEnabled() ) log.trace("Inserted: " + key);
+ return true;
+ }
+ else {
+ return false;
+ }
+ }
+ finally {
+ cache.unlock(key);
+ }
+ }
+
+ /**
+ * Do nothing.
+ */
+ public void evict(Object key) throws CacheException {
+ // noop
+ }
+
+ /**
+ * Do nothing.
+ */
+ public boolean insert(Object key, Object value, Object currentVersion) {
+ return false;
+ }
+
+ /**
+ * Do nothing.
+ */
+ public boolean update(Object key, Object value, Object currentVersion, Object previousVersion) {
+ return false;
+ }
+
+ /**
+ * Is the client's lock commensurate with the item in the cache?
+ * If it is not, we know that the cache expired the original
+ * lock.
+ */
+ private boolean isUnlockable(SoftLock clientLock, Lockable myLock)
+ throws CacheException {
+ //null clientLock is remotely possible but will never happen in practice
+ return myLock!=null &&
+ myLock.isLock() &&
+ clientLock!=null &&
+ ( (Lock) clientLock ).getId()==( (Lock) myLock ).getId();
+ }
+
+ public static interface Lockable {
+ public Lock lock(long timeout, int id);
+ public boolean isLock();
+ public boolean isGettable(long txTimestamp);
+ public boolean isPuttable(long txTimestamp, Object newVersion, Comparator comparator);
+ }
+
+ /**
+ * An item of cached data, timestamped with the time it was cached,.
+ * @see ReadWriteCache
+ */
+ public static final class Item implements Serializable, Lockable {
+
+ private final long freshTimestamp;
+ private final Object value;
+ private final Object version;
+
+ public Item(Object value, Object version, long currentTimestamp) {
+ this.value = value;
+ this.version = version;
+ freshTimestamp = currentTimestamp;
+ }
+ /**
+ * The timestamp on the cached data
+ */
+ public long getFreshTimestamp() {
+ return freshTimestamp;
+ }
+ /**
+ * The actual cached data
+ */
+ public Object getValue() {
+ return value;
+ }
+
+ /**
+ * Lock the item
+ */
+ public Lock lock(long timeout, int id) {
+ return new Lock(timeout, id, version);
+ }
+ /**
+ * Not a lock!
+ */
+ public boolean isLock() {
+ return false;
+ }
+ /**
+ * Is this item visible to the timestamped
+ * transaction?
+ */
+ public boolean isGettable(long txTimestamp) {
+ return freshTimestamp < txTimestamp;
+ }
+
+ /**
+ * Don't overwite already cached items
+ */
+ public boolean isPuttable(long txTimestamp, Object newVersion, Comparator comparator) {
+ // we really could refresh the item if it
+ // is not a lock, but it might be slower
+ //return freshTimestamp < txTimestamp
+ return version!=null && comparator.compare(version, newVersion) < 0;
+ }
+
+ public String toString() {
+ return "Item{version=" + version +
+ ",freshTimestamp=" + freshTimestamp;
+ }
+ }
+
+ /**
+ * A soft lock which supports concurrent locking,
+ * timestamped with the time it was released
+ * @author Gavin King
+ */
+ public static final class Lock implements Serializable, Lockable, SoftLock {
+ private long unlockTimestamp = -1;
+ private int multiplicity = 1;
+ private boolean concurrentLock = false;
+ private long timeout;
+ private final int id;
+ private final Object version;
+
+ public Lock(long timeout, int id, Object version) {
+ this.timeout = timeout;
+ this.id = id;
+ this.version = version;
+ }
+
+ public long getUnlockTimestamp() {
+ return unlockTimestamp;
+ }
+ /**
+ * Increment the lock, setting the
+ * new lock timeout
+ */
+ public Lock lock(long timeout, int id) {
+ concurrentLock = true;
+ multiplicity++;
+ this.timeout = timeout;
+ return this;
+ }
+ /**
+ * Decrement the lock, setting the unlock
+ * timestamp if now unlocked
+ * @param currentTimestamp
+ */
+ public void unlock(long currentTimestamp) {
+ if ( --multiplicity == 0 ) {
+ unlockTimestamp = currentTimestamp;
+ }
+ }
+
+ /**
+ * Can the timestamped transaction re-cache this
+ * locked item now?
+ */
+ public boolean isPuttable(long txTimestamp, Object newVersion, Comparator comparator) {
+ if (timeout < txTimestamp) return true;
+ if (multiplicity>0) return false;
+ return version==null ?
+ unlockTimestamp < txTimestamp :
+ comparator.compare(version, newVersion) < 0; //by requiring <, we rely on lock timeout in the case of an unsuccessful update!
+ }
+
+ /**
+ * Was this lock held concurrently by multiple
+ * transactions?
+ */
+ public boolean wasLockedConcurrently() {
+ return concurrentLock;
+ }
+ /**
+ * Yes, this is a lock
+ */
+ public boolean isLock() {
+ return true;
+ }
+ /**
+ * locks are not returned to the client!
+ */
+ public boolean isGettable(long txTimestamp) {
+ return false;
+ }
+
+ public int getId() { return id; }
+
+ public String toString() {
+ return "Lock{id=" + id +
+ ",version=" + version +
+ ",multiplicity=" + multiplicity +
+ ",unlockTimestamp=" + unlockTimestamp;
+ }
+
+ }
+
+ public String toString() {
+ return cache + "(read-write)";
+ }
+
+}
+
+
+
+
+
+
diff --git a/src/org/hibernate/cache/StandardQueryCache.java b/src/org/hibernate/cache/StandardQueryCache.java
new file mode 100644
index 0000000000..cdb7693076
--- /dev/null
+++ b/src/org/hibernate/cache/StandardQueryCache.java
@@ -0,0 +1,182 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.hibernate.HibernateException;
+import org.hibernate.UnresolvableObjectException;
+import org.hibernate.cfg.Settings;
+import org.hibernate.engine.SessionImplementor;
+import org.hibernate.type.Type;
+import org.hibernate.type.TypeFactory;
+
+/**
+ * The standard implementation of the Hibernate QueryCache interface. This
+ * implementation is very good at recognizing stale query results and
+ * and re-running queries when it detects this condition, recaching the new
+ * results.
+ *
+ * @author Gavin King
+ */
+public class StandardQueryCache implements QueryCache {
+
+ private static final Log log = LogFactory.getLog( StandardQueryCache.class );
+
+ private Cache queryCache;
+ private UpdateTimestampsCache updateTimestampsCache;
+ private final String regionName;
+
+ public void clear() throws CacheException {
+ queryCache.clear();
+ }
+
+ public StandardQueryCache(
+ final Settings settings,
+ final Properties props,
+ final UpdateTimestampsCache updateTimestampsCache,
+ String regionName) throws HibernateException {
+ if ( regionName == null ) {
+ regionName = StandardQueryCache.class.getName();
+ }
+ String prefix = settings.getCacheRegionPrefix();
+ if ( prefix != null ) {
+ regionName = prefix + '.' + regionName;
+ }
+ log.info( "starting query cache at region: " + regionName );
+
+ this.queryCache = settings.getCacheProvider().buildCache(regionName, props);
+ this.updateTimestampsCache = updateTimestampsCache;
+ this.regionName = regionName;
+ }
+
+ public boolean put(
+ QueryKey key,
+ Type[] returnTypes,
+ List result,
+ boolean isNaturalKeyLookup,
+ SessionImplementor session) throws HibernateException {
+
+ if ( isNaturalKeyLookup && result.size()==0 ) {
+ return false;
+ }
+ else {
+ Long ts = new Long( session.getTimestamp() );
+
+ if ( log.isDebugEnabled() ) {
+ log.debug( "caching query results in region: " + regionName + "; timestamp=" + ts );
+ }
+
+ List cacheable = new ArrayList( result.size()+1 );
+ cacheable.add( ts );
+ for ( int i=0; inull
+ * @throws CacheException
+ */
+ public Object get(Object key) throws CacheException {
+ if (key instanceof Serializable) {
+ return cache.get( (Serializable) key );
+ }
+ else {
+ throw new CacheException("Keys must implement Serializable");
+ }
+ }
+
+ public Object read(Object key) throws CacheException {
+ return get(key);
+ }
+
+ /**
+ * Add an item to the cache
+ * @param key
+ * @param value
+ * @throws CacheException
+ */
+ public void update(Object key, Object value) throws CacheException {
+ put(key, value);
+ }
+
+ /**
+ * Add an item to the cache
+ * @param key
+ * @param value
+ * @throws CacheException
+ */
+ public void put(Object key, Object value) throws CacheException {
+ if (key instanceof Serializable) {
+ cache.put( (Serializable) key, value );
+ }
+ else {
+ throw new CacheException("Keys must implement Serializable");
+ }
+ }
+
+ /**
+ * Remove an item from the cache
+ */
+ public void remove(Object key) throws CacheException {
+ if (key instanceof Serializable) {
+ cache.clear( (Serializable) key );
+ }
+ else {
+ throw new CacheException("Keys must implement Serializable");
+ }
+ }
+
+ /**
+ * Clear the cache
+ */
+ public void clear() throws CacheException {
+ cache.clearAll();
+ }
+
+ /**
+ * Clean up
+ */
+ public void destroy() throws CacheException {
+ cache.clearAll();
+ }
+
+ /**
+ * If this is a clustered cache, lock the item
+ */
+ public void lock(Object key) throws CacheException {
+ throw new UnsupportedOperationException("SwarmCache does not support locking (use nonstrict-read-write)");
+ }
+
+ /**
+ * If this is a clustered cache, unlock the item
+ */
+ public void unlock(Object key) throws CacheException {
+ throw new UnsupportedOperationException("SwarmCache does not support locking (use nonstrict-read-write)");
+ }
+
+ /**
+ * Generate a (coarse) timestamp
+ */
+ public long nextTimestamp() {
+ return System.currentTimeMillis() / 100;
+ }
+
+ /**
+ * Get a reasonable "lock timeout"
+ */
+ public int getTimeout() {
+ return 600;
+ }
+
+ public String getRegionName() {
+ return regionName;
+ }
+
+ public long getSizeInMemory() {
+ return -1;
+ }
+
+ public long getElementCountInMemory() {
+ return -1;
+ }
+
+ public long getElementCountOnDisk() {
+ return -1;
+ }
+
+ public Map toMap() {
+ throw new UnsupportedOperationException();
+ }
+
+ public String toString() {
+ return "SwarmCache(" + regionName + ')';
+ }
+
+}
diff --git a/src/org/hibernate/cache/SwarmCacheProvider.java b/src/org/hibernate/cache/SwarmCacheProvider.java
new file mode 100644
index 0000000000..38c53db5d5
--- /dev/null
+++ b/src/org/hibernate/cache/SwarmCacheProvider.java
@@ -0,0 +1,58 @@
+//$Id$
+package org.hibernate.cache;
+
+import net.sf.swarmcache.CacheConfiguration;
+import net.sf.swarmcache.CacheConfigurationManager;
+import net.sf.swarmcache.CacheFactory;
+import net.sf.swarmcache.ObjectCache;
+
+import java.util.Properties;
+
+/**
+ * Support for SwarmCache replicated cache. SwarmCache does not support
+ * locking, so strict "read-write" semantics are unsupported.
+ * @author Jason Carreira
+ */
+public class SwarmCacheProvider implements CacheProvider {
+
+ private CacheFactory factory;
+
+ public Cache buildCache(String regionName, Properties properties) throws CacheException {
+ ObjectCache cache = factory.createCache(regionName);
+ if (cache==null) {
+ throw new CacheException("SwarmCache did not create a cache: " + regionName);
+ }
+ return new SwarmCache(cache, regionName);
+ }
+
+ public long nextTimestamp() {
+ return System.currentTimeMillis() / 100;
+ }
+
+ /**
+ * Callback to perform any necessary initialization of the underlying cache implementation
+ * during SessionFactory construction.
+ *
+ * @param properties current configuration settings.
+ */
+ public void start(Properties properties) throws CacheException {
+ CacheConfiguration config = CacheConfigurationManager.getConfig(properties);
+ factory = new CacheFactory(config);
+ }
+
+ /**
+ * Callback to perform any necessary cleanup of the underlying cache implementation
+ * during SessionFactory.close().
+ */
+ public void stop() {
+ if (factory != null) {
+ factory.shutdown();
+ factory = null;
+ }
+ }
+
+ public boolean isMinimalPutsEnabledByDefault() {
+ return true;
+ }
+
+}
diff --git a/src/org/hibernate/cache/Timestamper.java b/src/org/hibernate/cache/Timestamper.java
new file mode 100644
index 0000000000..6aca831d02
--- /dev/null
+++ b/src/org/hibernate/cache/Timestamper.java
@@ -0,0 +1,37 @@
+//$Id$
+package org.hibernate.cache;
+
+/**
+ * Generates increasing identifiers (in a single VM only).
+ * Not valid across multiple VMs. Identifiers are not necessarily
+ * strictly increasing, but usually are.
+ */
+public final class Timestamper {
+ private static short counter = 0;
+ private static long time;
+ private static final int BIN_DIGITS = 12;
+ public static final short ONE_MS = 1<ReadWriteCache. This is
+ * a "synchronous" concurrency strategy.
+ *
+ * @author Gavin King
+ */
+public class TransactionalCache implements CacheConcurrencyStrategy {
+
+ private static final Log log = LogFactory.getLog( TransactionalCache.class );
+
+ private Cache cache;
+
+ public String getRegionName() {
+ return cache.getRegionName();
+ }
+
+ public Object get(Object key, long txTimestamp) throws CacheException {
+ if ( log.isDebugEnabled() ) {
+ log.debug( "cache lookup: " + key );
+ }
+ Object result = cache.read( key );
+ if ( log.isDebugEnabled() ) {
+ log.debug( result == null ? "cache miss" : "cache hit" );
+ }
+ return result;
+ }
+
+ public boolean put(
+ Object key,
+ Object value,
+ long txTimestamp,
+ Object version,
+ Comparator versionComparator,
+ boolean minimalPut) throws CacheException {
+ if ( minimalPut && cache.read( key ) != null ) {
+ if ( log.isDebugEnabled() ) {
+ log.debug( "item already cached: " + key );
+ }
+ return false;
+ }
+ if ( log.isDebugEnabled() ) {
+ log.debug( "caching: " + key );
+ }
+ if ( cache instanceof OptimisticCache ) {
+ ( ( OptimisticCache ) cache ).writeLoad( key, value, version );
+ }
+ else {
+ cache.put( key, value );
+ }
+ return true;
+ }
+
+ /**
+ * Do nothing, returning null.
+ */
+ public SoftLock lock(Object key, Object version) throws CacheException {
+ //noop
+ return null;
+ }
+
+ /**
+ * Do nothing.
+ */
+ public void release(Object key, SoftLock clientLock) throws CacheException {
+ //noop
+ }
+
+ public boolean update(
+ Object key,
+ Object value,
+ Object currentVersion,
+ Object previousVersion) throws CacheException {
+ if ( log.isDebugEnabled() ) {
+ log.debug( "updating: " + key );
+ }
+ if ( cache instanceof OptimisticCache ) {
+ ( ( OptimisticCache ) cache ).writeUpdate( key, value, currentVersion, previousVersion );
+ }
+ else {
+ cache.update( key, value );
+ }
+ return true;
+ }
+
+ public boolean insert(
+ Object key,
+ Object value,
+ Object currentVersion) throws CacheException {
+ if ( log.isDebugEnabled() ) {
+ log.debug( "inserting: " + key );
+ }
+ if ( cache instanceof OptimisticCache ) {
+ ( ( OptimisticCache ) cache ).writeInsert( key, value, currentVersion );
+ }
+ else {
+ cache.update( key, value );
+ }
+ return true;
+ }
+
+ public void evict(Object key) throws CacheException {
+ cache.remove( key );
+ }
+
+ public void remove(Object key) throws CacheException {
+ if ( log.isDebugEnabled() ) {
+ log.debug( "removing: " + key );
+ }
+ cache.remove( key );
+ }
+
+ public void clear() throws CacheException {
+ log.debug( "clearing" );
+ cache.clear();
+ }
+
+ public void destroy() {
+ try {
+ cache.destroy();
+ }
+ catch ( Exception e ) {
+ log.warn( "could not destroy cache", e );
+ }
+ }
+
+ public void setCache(Cache cache) {
+ this.cache = cache;
+ }
+
+ public Cache getCache() {
+ return cache;
+ }
+
+ /**
+ * Do nothing.
+ */
+ public boolean afterInsert(
+ Object key,
+ Object value,
+ Object version) throws CacheException {
+ return false;
+ }
+
+ /**
+ * Do nothing.
+ */
+ public boolean afterUpdate(
+ Object key,
+ Object value,
+ Object version,
+ SoftLock clientLock) throws CacheException {
+ return false;
+ }
+
+ public String toString() {
+ return cache + "(transactional)";
+ }
+
+}
diff --git a/src/org/hibernate/cache/TreeCache.java b/src/org/hibernate/cache/TreeCache.java
new file mode 100644
index 0000000000..a89599ac9c
--- /dev/null
+++ b/src/org/hibernate/cache/TreeCache.java
@@ -0,0 +1,205 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+import javax.transaction.SystemException;
+import javax.transaction.Transaction;
+import javax.transaction.TransactionManager;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.lock.TimeoutException;
+
+/**
+ * Represents a particular region within the given JBossCache TreeCache.
+ *
+ * @author Gavin King
+ */
+public class TreeCache implements Cache {
+
+ private static final Log log = LogFactory.getLog(TreeCache.class);
+
+ private static final String ITEM = "item";
+
+ private org.jboss.cache.TreeCache cache;
+ private final String regionName;
+ private final Fqn regionFqn;
+ private final TransactionManager transactionManager;
+
+ public TreeCache(org.jboss.cache.TreeCache cache, String regionName, TransactionManager transactionManager)
+ throws CacheException {
+ this.cache = cache;
+ this.regionName = regionName;
+ this.regionFqn = Fqn.fromString( regionName.replace( '.', '/' ) );
+ this.transactionManager = transactionManager;
+ }
+
+ public Object get(Object key) throws CacheException {
+ Transaction tx = suspend();
+ try {
+ return read(key);
+ }
+ finally {
+ resume( tx );
+ }
+ }
+
+ public Object read(Object key) throws CacheException {
+ try {
+ return cache.get( new Fqn( regionFqn, key ), ITEM );
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public void update(Object key, Object value) throws CacheException {
+ try {
+ cache.put( new Fqn( regionFqn, key ), ITEM, value );
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public void put(Object key, Object value) throws CacheException {
+ Transaction tx = suspend();
+ try {
+ //do the failfast put outside the scope of the JTA txn
+ cache.putFailFast( new Fqn( regionFqn, key ), ITEM, value, 0 );
+ }
+ catch (TimeoutException te) {
+ //ignore!
+ log.debug("ignoring write lock acquisition failure");
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ finally {
+ resume( tx );
+ }
+ }
+
+ private void resume(Transaction tx) {
+ try {
+ if (tx!=null) transactionManager.resume(tx);
+ }
+ catch (Exception e) {
+ throw new CacheException("Could not resume transaction", e);
+ }
+ }
+
+ private Transaction suspend() {
+ Transaction tx = null;
+ try {
+ if ( transactionManager!=null ) {
+ tx = transactionManager.suspend();
+ }
+ }
+ catch (SystemException se) {
+ throw new CacheException("Could not suspend transaction", se);
+ }
+ return tx;
+ }
+
+ public void remove(Object key) throws CacheException {
+ try {
+ cache.remove( new Fqn( regionFqn, key ) );
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public void clear() throws CacheException {
+ try {
+ cache.remove( regionFqn );
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public void destroy() throws CacheException {
+ try {
+ // NOTE : evict() operates locally only (i.e., does not propogate
+ // to any other nodes in the potential cluster). This is
+ // exactly what is needed when we destroy() here; destroy() is used
+ // as part of the process of shutting down a SessionFactory; thus
+ // these removals should not be propogated
+ cache.evict( regionFqn );
+ }
+ catch( Exception e ) {
+ throw new CacheException( e );
+ }
+ }
+
+ public void lock(Object key) throws CacheException {
+ throw new UnsupportedOperationException( "TreeCache is a fully transactional cache" + regionName );
+ }
+
+ public void unlock(Object key) throws CacheException {
+ throw new UnsupportedOperationException( "TreeCache is a fully transactional cache: " + regionName );
+ }
+
+ public long nextTimestamp() {
+ return System.currentTimeMillis() / 100;
+ }
+
+ public int getTimeout() {
+ return 600; //60 seconds
+ }
+
+ public String getRegionName() {
+ return regionName;
+ }
+
+ public long getSizeInMemory() {
+ return -1;
+ }
+
+ public long getElementCountInMemory() {
+ try {
+ Set children = cache.getChildrenNames( regionFqn );
+ return children == null ? 0 : children.size();
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public long getElementCountOnDisk() {
+ return 0;
+ }
+
+ public Map toMap() {
+ try {
+ Map result = new HashMap();
+ Set childrenNames = cache.getChildrenNames( regionFqn );
+ if (childrenNames != null) {
+ Iterator iter = childrenNames.iterator();
+ while ( iter.hasNext() ) {
+ Object key = iter.next();
+ result.put(
+ key,
+ cache.get( new Fqn( regionFqn, key ), ITEM )
+ );
+ }
+ }
+ return result;
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public String toString() {
+ return "TreeCache(" + regionName + ')';
+ }
+
+}
diff --git a/src/org/hibernate/cache/TreeCacheProvider.java b/src/org/hibernate/cache/TreeCacheProvider.java
new file mode 100644
index 0000000000..b73737e5a7
--- /dev/null
+++ b/src/org/hibernate/cache/TreeCacheProvider.java
@@ -0,0 +1,109 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.util.Properties;
+import javax.transaction.TransactionManager;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.hibernate.transaction.TransactionManagerLookup;
+import org.hibernate.transaction.TransactionManagerLookupFactory;
+import org.hibernate.cfg.Environment;
+import org.jboss.cache.PropertyConfigurator;
+
+/**
+ * Support for a standalone JBossCache (TreeCache) instance. The JBossCache is configured
+ * via a local config resource.
+ *
+ * @author Gavin King
+ */
+public class TreeCacheProvider implements CacheProvider {
+
+ /**
+ * @deprecated use {@link org.hibernate.cfg.Environment.CACHE_PROVIDER_CONFIG}
+ */
+ public static final String CONFIG_RESOURCE = "hibernate.cache.tree_cache.config";
+ public static final String DEFAULT_CONFIG = "treecache.xml";
+
+ private static final Log log = LogFactory.getLog( TreeCacheProvider.class );
+
+ private org.jboss.cache.TreeCache cache;
+ private TransactionManager transactionManager;
+
+ /**
+ * Construct and configure the Cache representation of a named cache region.
+ *
+ * @param regionName the name of the cache region
+ * @param properties configuration settings
+ * @return The Cache representation of the named cache region.
+ * @throws CacheException Indicates an error building the cache region.
+ */
+ public Cache buildCache(String regionName, Properties properties) throws CacheException {
+ return new TreeCache(cache, regionName, transactionManager);
+ }
+
+ public long nextTimestamp() {
+ return System.currentTimeMillis() / 100;
+ }
+
+ /**
+ * Prepare the underlying JBossCache TreeCache instance.
+ *
+ * @param properties All current config settings.
+ *
+ * @throws CacheException Indicates a problem preparing cache for use.
+ */
+ public void start(Properties properties) {
+ String resource = properties.getProperty( Environment.CACHE_PROVIDER_CONFIG );
+
+ if ( resource == null ) {
+ resource = properties.getProperty( CONFIG_RESOURCE );
+ }
+ if ( resource == null ) {
+ resource = DEFAULT_CONFIG;
+ }
+ log.debug( "Configuring TreeCache from resource [" + resource + "]" );
+ try {
+ cache = new org.jboss.cache.TreeCache();
+ PropertyConfigurator config = new PropertyConfigurator();
+ config.configure( cache, resource );
+ TransactionManagerLookup transactionManagerLookup = TransactionManagerLookupFactory.getTransactionManagerLookup(properties);
+ if (transactionManagerLookup!=null) {
+ cache.setTransactionManagerLookup( new TransactionManagerLookupAdaptor(transactionManagerLookup, properties) );
+ transactionManager = transactionManagerLookup.getTransactionManager(properties);
+ }
+ cache.start();
+ }
+ catch (Exception e) {
+ throw new CacheException(e);
+ }
+ }
+
+ public void stop() {
+ if (cache!=null) {
+ cache.stop();
+ cache.destroy();
+ cache=null;
+ }
+ }
+
+ public boolean isMinimalPutsEnabledByDefault() {
+ return true;
+ }
+
+ static final class TransactionManagerLookupAdaptor implements org.jboss.cache.TransactionManagerLookup {
+ private final TransactionManagerLookup tml;
+ private final Properties props;
+ TransactionManagerLookupAdaptor(TransactionManagerLookup tml, Properties props) {
+ this.tml=tml;
+ this.props=props;
+ }
+ public TransactionManager getTransactionManager() throws Exception {
+ return tml.getTransactionManager(props);
+ }
+ }
+
+ public org.jboss.cache.TreeCache getUnderlyingCache() {
+ return cache;
+ }
+}
diff --git a/src/org/hibernate/cache/UpdateTimestampsCache.java b/src/org/hibernate/cache/UpdateTimestampsCache.java
new file mode 100644
index 0000000000..d45fec29d3
--- /dev/null
+++ b/src/org/hibernate/cache/UpdateTimestampsCache.java
@@ -0,0 +1,117 @@
+//$Id$
+package org.hibernate.cache;
+
+import java.io.Serializable;
+import java.util.Iterator;
+import java.util.Properties;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.hibernate.HibernateException;
+import org.hibernate.cfg.Settings;
+
+/**
+ * Tracks the timestamps of the most recent updates to particular tables. It is
+ * important that the cache timeout of the underlying cache implementation be set
+ * to a higher value than the timeouts of any of the query caches. In fact, we
+ * recommend that the the underlying cache not be configured for expiry at all.
+ * Note, in particular, that an LRU cache expiry policy is never appropriate.
+ *
+ * @author Gavin King
+ * @author Mikheil Kapanadze
+ */
+public class UpdateTimestampsCache {
+
+ public static final String REGION_NAME = UpdateTimestampsCache.class.getName();
+
+ private static final Log log = LogFactory.getLog(UpdateTimestampsCache.class);
+
+ private Cache updateTimestamps;
+ private final String regionName;
+
+ public void clear() throws CacheException {
+ updateTimestamps.clear();
+ }
+
+ public UpdateTimestampsCache(Settings settings, Properties props) throws HibernateException {
+ String prefix = settings.getCacheRegionPrefix();
+ regionName = prefix == null ? REGION_NAME : prefix + '.' + REGION_NAME;
+ log.info( "starting update timestamps cache at region: " + regionName );
+ this.updateTimestamps = settings.getCacheProvider().buildCache( regionName, props );
+ }
+
+ public synchronized void preinvalidate(Serializable[] spaces) throws CacheException {
+ //TODO: to handle concurrent writes correctly, this should return a Lock to the client
+ Long ts = new Long( updateTimestamps.nextTimestamp() + updateTimestamps.getTimeout() );
+ for ( int i=0; i= timestamp.longValue() ) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ public void destroy() {
+ try {
+ updateTimestamps.destroy();
+ }
+ catch (Exception e) {
+ log.warn("could not destroy UpdateTimestamps cache", e);
+ }
+ }
+
+ public Cache getCache() {
+ return updateTimestamps;
+ }
+
+ public String getRegionName() {
+ return regionName;
+ }
+
+ public String toString() {
+ return "UpdateTimestampeCache";
+ }
+
+}
diff --git a/src/org/hibernate/cache/entry/CacheEntry.java b/src/org/hibernate/cache/entry/CacheEntry.java
new file mode 100644
index 0000000000..0f0451611a
--- /dev/null
+++ b/src/org/hibernate/cache/entry/CacheEntry.java
@@ -0,0 +1,143 @@
+//$Id$
+package org.hibernate.cache.entry;
+
+import java.io.Serializable;
+
+import org.hibernate.AssertionFailure;
+import org.hibernate.HibernateException;
+import org.hibernate.Interceptor;
+import org.hibernate.engine.SessionImplementor;
+import org.hibernate.event.EventSource;
+import org.hibernate.event.PreLoadEvent;
+import org.hibernate.event.PreLoadEventListener;
+import org.hibernate.persister.entity.EntityPersister;
+import org.hibernate.type.TypeFactory;
+import org.hibernate.util.ArrayHelper;
+
+/**
+ * A cached instance of a persistent class
+ *
+ * @author Gavin King
+ */
+public final class CacheEntry implements Serializable {
+
+ private final Serializable[] disassembledState;
+ private final String subclass;
+ private final boolean lazyPropertiesAreUnfetched;
+ private final Object version;
+
+ public String getSubclass() {
+ return subclass;
+ }
+
+ public boolean areLazyPropertiesUnfetched() {
+ return lazyPropertiesAreUnfetched;
+ }
+
+ public CacheEntry(
+ final Object[] state,
+ final EntityPersister persister,
+ final boolean unfetched,
+ final Object version,
+ final SessionImplementor session,
+ final Object owner)
+ throws HibernateException {
+ //disassembled state gets put in a new array (we write to cache by value!)
+ this.disassembledState = TypeFactory.disassemble(
+ state,
+ persister.getPropertyTypes(),
+ persister.isLazyPropertiesCacheable() ?
+ null : persister.getPropertyLaziness(),
+ session,
+ owner
+ );
+ subclass = persister.getEntityName();
+ lazyPropertiesAreUnfetched = unfetched || !persister.isLazyPropertiesCacheable();
+ this.version = version;
+ }
+
+ public Object getVersion() {
+ return version;
+ }
+
+ CacheEntry(Serializable[] state, String subclass, boolean unfetched, Object version) {
+ this.disassembledState = state;
+ this.subclass = subclass;
+ this.lazyPropertiesAreUnfetched = unfetched;
+ this.version = version;
+ }
+
+ public Object[] assemble(
+ final Object instance,
+ final Serializable id,
+ final EntityPersister persister,
+ final Interceptor interceptor,
+ final EventSource session)
+ throws HibernateException {
+
+ if ( !persister.getEntityName().equals(subclass) ) {
+ throw new AssertionFailure("Tried to assemble a different subclass instance");
+ }
+
+ return assemble(disassembledState, instance, id, persister, interceptor, session);
+
+ }
+
+ private static Object[] assemble(
+ final Serializable[] values,
+ final Object result,
+ final Serializable id,
+ final EntityPersister persister,
+ final Interceptor interceptor,
+ final EventSource session)
+ throws HibernateException {
+
+ //assembled state gets put in a new array (we read from cache by value!)
+ Object[] assembledProps = TypeFactory.assemble(
+ values,
+ persister.getPropertyTypes(),
+ session, result
+ );
+
+ //persister.setIdentifier(result, id); //before calling interceptor, for consistency with normal load
+
+ //TODO: reuse the PreLoadEvent
+ PreLoadEvent preLoadEvent = new PreLoadEvent( session )
+ .setEntity(result)
+ .setState(assembledProps)
+ .setId(id)
+ .setPersister(persister);
+
+ PreLoadEventListener[] listeners = session.getListeners().getPreLoadEventListeners();
+ for ( int i = 0; i < listeners.length; i++ ) {
+ listeners[i].onPreLoad(preLoadEvent);
+ }
+
+ persister.setPropertyValues(
+ result,
+ assembledProps,
+ session.getEntityMode()
+ );
+
+ return assembledProps;
+ }
+
+ public Serializable[] getDisassembledState() {
+ // todo: this was added to support initializing an entity's EntityEntry snapshot during reattach;
+ // this should be refactored to instead expose a method to assemble a EntityEntry based on this
+ // state for return.
+ return disassembledState;
+ }
+
+ public String toString() {
+ return "CacheEntry(" + subclass + ')' +
+ ArrayHelper.toString(disassembledState);
+ }
+
+}
+
+
+
+
+
+
diff --git a/src/org/hibernate/cache/entry/CacheEntryStructure.java b/src/org/hibernate/cache/entry/CacheEntryStructure.java
new file mode 100755
index 0000000000..7dc0f331ba
--- /dev/null
+++ b/src/org/hibernate/cache/entry/CacheEntryStructure.java
@@ -0,0 +1,14 @@
+//$Id$
+package org.hibernate.cache.entry;
+
+import org.hibernate.engine.SessionFactoryImplementor;
+
+
+
+/**
+ * @author Gavin King
+ */
+public interface CacheEntryStructure {
+ public Object structure(Object item);
+ public Object destructure(Object map, SessionFactoryImplementor factory);
+}
diff --git a/src/org/hibernate/cache/entry/CollectionCacheEntry.java b/src/org/hibernate/cache/entry/CollectionCacheEntry.java
new file mode 100755
index 0000000000..155d922fd9
--- /dev/null
+++ b/src/org/hibernate/cache/entry/CollectionCacheEntry.java
@@ -0,0 +1,43 @@
+//$Id$
+package org.hibernate.cache.entry;
+
+import java.io.Serializable;
+
+import org.hibernate.collection.PersistentCollection;
+import org.hibernate.persister.collection.CollectionPersister;
+import org.hibernate.util.ArrayHelper;
+
+/**
+ * @author Gavin King
+ */
+public class CollectionCacheEntry implements Serializable {
+
+ private final Serializable state;
+
+ public Serializable[] getState() {
+ //TODO: assumes all collections disassemble to an array!
+ return (Serializable[]) state;
+ }
+
+ public CollectionCacheEntry(PersistentCollection collection, CollectionPersister persister) {
+ this.state = collection.disassemble(persister);
+ }
+
+ CollectionCacheEntry(Serializable state) {
+ this.state = state;
+ }
+
+ public void assemble(
+ final PersistentCollection collection,
+ final CollectionPersister persister,
+ final Object owner
+ ) {
+ collection.initializeFromCache(persister, state, owner);
+ collection.afterInitialize();
+ }
+
+ public String toString() {
+ return "CollectionCacheEntry" + ArrayHelper.toString( getState() );
+ }
+
+}
diff --git a/src/org/hibernate/cache/entry/StructuredCacheEntry.java b/src/org/hibernate/cache/entry/StructuredCacheEntry.java
new file mode 100755
index 0000000000..596e60dc44
--- /dev/null
+++ b/src/org/hibernate/cache/entry/StructuredCacheEntry.java
@@ -0,0 +1,48 @@
+//$Id$
+package org.hibernate.cache.entry;
+
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.hibernate.engine.SessionFactoryImplementor;
+import org.hibernate.persister.entity.EntityPersister;
+
+/**
+ * @author Gavin King
+ */
+public class StructuredCacheEntry implements CacheEntryStructure {
+
+ private EntityPersister persister;
+
+ public StructuredCacheEntry(EntityPersister persister) {
+ this.persister = persister;
+ }
+
+ public Object destructure(Object item, SessionFactoryImplementor factory) {
+ Map map = (Map) item;
+ boolean lazyPropertiesUnfetched = ( (Boolean) map.get("_lazyPropertiesUnfetched") ).booleanValue();
+ String subclass = (String) map.get("_subclass");
+ Object version = map.get("_version");
+ EntityPersister subclassPersister = factory.getEntityPersister(subclass);
+ String[] names = subclassPersister.getPropertyNames();
+ Serializable[] state = new Serializable[names.length];
+ for ( int i=0; i
+
+
+
+
+ This package defines formats for disassembled state
+ kept in the second level cache.
+
+
+