HHH-11318 - Fix Infinispan Javadoc documentation typos

Revert after/before -> afterQuery/beforeQuery in documentation
This commit is contained in:
Radim Vansa 2016-12-05 11:38:43 +01:00 committed by Vlad Mihalcea
parent eaf5b10cf1
commit ed804d500e
27 changed files with 68 additions and 68 deletions

View File

@ -104,11 +104,11 @@ public class TypeOverrides implements Serializable {
} }
/** /**
* Maximum lifespan of a cache entry, afterQuery which the entry is expired * Maximum lifespan of a cache entry, after which the entry is expired
* cluster-wide, in milliseconds. -1 means the entries never expire. * cluster-wide, in milliseconds. -1 means the entries never expire.
* *
* @param expirationLifespan long representing the maximum lifespan, * @param expirationLifespan long representing the maximum lifespan,
* in milliseconds, for a cached entry beforeQuery * in milliseconds, for a cached entry before
* it's expired * it's expired
*/ */
public void setExpirationLifespan(long expirationLifespan) { public void setExpirationLifespan(long expirationLifespan) {
@ -126,7 +126,7 @@ public class TypeOverrides implements Serializable {
* cluster-wide. -1 means the entries never expire. * cluster-wide. -1 means the entries never expire.
* *
* @param expirationMaxIdle long representing the maximum idle time, in * @param expirationMaxIdle long representing the maximum idle time, in
* milliseconds, for a cached entry beforeQuery it's * milliseconds, for a cached entry before it's
* expired * expired
*/ */
public void setExpirationMaxIdle(long expirationMaxIdle) { public void setExpirationMaxIdle(long expirationMaxIdle) {

View File

@ -23,7 +23,7 @@ public interface AccessDelegate {
Object get(SharedSessionContractImplementor session, Object key, long txTimestamp) throws CacheException; Object get(SharedSessionContractImplementor session, Object key, long txTimestamp) throws CacheException;
/** /**
* Attempt to cache an object, afterQuery loading from the database. * Attempt to cache an object, after loading from the database.
* *
* @param session Current session * @param session Current session
* @param key The item key * @param key The item key
@ -35,7 +35,7 @@ public interface AccessDelegate {
boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version); boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version);
/** /**
* Attempt to cache an object, afterQuery loading from the database, explicitly * Attempt to cache an object, after loading from the database, explicitly
* specifying the minimalPut behavior. * specifying the minimalPut behavior.
* *
* @param session Current session. * @param session Current session.
@ -51,7 +51,7 @@ public interface AccessDelegate {
throws CacheException; throws CacheException;
/** /**
* Called afterQuery an item has been inserted (beforeQuery the transaction completes), * Called after an item has been inserted (before the transaction completes),
* instead of calling evict(). * instead of calling evict().
* *
* @param session Current session * @param session Current session
@ -64,7 +64,7 @@ public interface AccessDelegate {
boolean insert(SharedSessionContractImplementor session, Object key, Object value, Object version) throws CacheException; boolean insert(SharedSessionContractImplementor session, Object key, Object value, Object version) throws CacheException;
/** /**
* Called afterQuery an item has been updated (beforeQuery the transaction completes), * Called after an item has been updated (before the transaction completes),
* instead of calling evict(). * instead of calling evict().
* *
* @param session Current session * @param session Current session
@ -79,7 +79,7 @@ public interface AccessDelegate {
throws CacheException; throws CacheException;
/** /**
* Called afterQuery an item has become stale (beforeQuery the transaction completes). * Called after an item has become stale (before the transaction completes).
* *
* @param session Current session * @param session Current session
* @param key The key of the item to remove * @param key The key of the item to remove
@ -113,7 +113,7 @@ public interface AccessDelegate {
/** /**
* Called when we have finished the attempted update/delete (which may or * Called when we have finished the attempted update/delete (which may or
* may not have been successful), afterQuery transaction completion. This method * may not have been successful), after transaction completion. This method
* is used by "asynchronous" concurrency strategies. * is used by "asynchronous" concurrency strategies.
* *
* *
@ -124,7 +124,7 @@ public interface AccessDelegate {
void unlockItem(SharedSessionContractImplementor session, Object key) throws CacheException; void unlockItem(SharedSessionContractImplementor session, Object key) throws CacheException;
/** /**
* Called afterQuery an item has been inserted (afterQuery the transaction completes), * Called after an item has been inserted (after the transaction completes),
* instead of calling release(). * instead of calling release().
* This method is used by "asynchronous" concurrency strategies. * This method is used by "asynchronous" concurrency strategies.
* *
@ -139,7 +139,7 @@ public interface AccessDelegate {
boolean afterInsert(SharedSessionContractImplementor session, Object key, Object value, Object version); boolean afterInsert(SharedSessionContractImplementor session, Object key, Object value, Object version);
/** /**
* Called afterQuery an item has been updated (afterQuery the transaction completes), * Called after an item has been updated (after the transaction completes),
* instead of calling release(). This method is used by "asynchronous" * instead of calling release(). This method is used by "asynchronous"
* concurrency strategies. * concurrency strategies.
* *

View File

@ -71,7 +71,7 @@ public abstract class InvalidationCacheAccessDelegate implements AccessDelegate
} }
/** /**
* Attempt to cache an object, afterQuery loading from the database, explicitly * Attempt to cache an object, after loading from the database, explicitly
* specifying the minimalPut behavior. * specifying the minimalPut behavior.
* *
* @param session Current session * @param session Current session

View File

@ -9,7 +9,7 @@ package org.hibernate.cache.infinispan.access;
import java.util.UUID; import java.util.UUID;
/** /**
* Synchronization that should release the locks afterQuery invalidation is complete. * Synchronization that should release the locks after invalidation is complete.
* *
* @author Radim Vansa <rvansa@redhat.com> * @author Radim Vansa <rvansa@redhat.com>
*/ */

View File

@ -24,9 +24,9 @@ import org.infinispan.configuration.cache.Configuration;
import org.infinispan.context.Flag; import org.infinispan.context.Flag;
/** /**
* Access delegate that relaxes the consistency a bit: stale reads are prohibited only afterQuery the transaction * Access delegate that relaxes the consistency a bit: stale reads are prohibited only after the transaction
* commits. This should also be able to work with async caches, and that would allow the replication delay * commits. This should also be able to work with async caches, and that would allow the replication delay
* even afterQuery the commit. * even after the commit.
* *
* @author Radim Vansa <rvansa@redhat.com> * @author Radim Vansa <rvansa@redhat.com>
*/ */
@ -80,7 +80,7 @@ public class NonStrictAccessDelegate implements AccessDelegate {
public boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version, boolean minimalPutOverride) throws CacheException { public boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version, boolean minimalPutOverride) throws CacheException {
long lastRegionInvalidation = region.getLastRegionInvalidation(); long lastRegionInvalidation = region.getLastRegionInvalidation();
if (txTimestamp < lastRegionInvalidation) { if (txTimestamp < lastRegionInvalidation) {
log.tracef("putFromLoad not executed since tx started at %d, beforeQuery last region invalidation finished = %d", txTimestamp, lastRegionInvalidation); log.tracef("putFromLoad not executed since tx started at %d, before last region invalidation finished = %d", txTimestamp, lastRegionInvalidation);
return false; return false;
} }
assert version != null; assert version != null;

View File

@ -35,7 +35,7 @@ public class NonTxInvalidationCacheAccessDelegate extends InvalidationCacheAcces
} }
// We need to be invalidating even for regular writes; if we were not and the write was followed by eviction // We need to be invalidating even for regular writes; if we were not and the write was followed by eviction
// (or any other invalidation), naked put that was started afterQuery the eviction ended but beforeQuery this insert // (or any other invalidation), naked put that was started after the eviction ended but before this insert
// ended could insert the stale entry into the cache (since the entry was removed by eviction). // ended could insert the stale entry into the cache (since the entry was removed by eviction).
if ( !putValidator.beginInvalidatingWithPFER(session, key, value)) { if ( !putValidator.beginInvalidatingWithPFER(session, key, value)) {
throw log.failedInvalidatePendingPut(key, region.getName()); throw log.failedInvalidatePendingPut(key, region.getName());
@ -59,7 +59,7 @@ public class NonTxInvalidationCacheAccessDelegate extends InvalidationCacheAcces
// be informed of the change. // be informed of the change.
// We need to be invalidating even for regular writes; if we were not and the write was followed by eviction // We need to be invalidating even for regular writes; if we were not and the write was followed by eviction
// (or any other invalidation), naked put that was started afterQuery the eviction ended but beforeQuery this update // (or any other invalidation), naked put that was started after the eviction ended but before this update
// ended could insert the stale entry into the cache (since the entry was removed by eviction). // ended could insert the stale entry into the cache (since the entry was removed by eviction).
if ( !putValidator.beginInvalidatingWithPFER(session, key, value)) { if ( !putValidator.beginInvalidatingWithPFER(session, key, value)) {
throw log.failedInvalidatePendingPut(key, region.getName()); throw log.failedInvalidatePendingPut(key, region.getName());
@ -76,7 +76,7 @@ public class NonTxInvalidationCacheAccessDelegate extends InvalidationCacheAcces
protected boolean isCommitted(SharedSessionContractImplementor session) { protected boolean isCommitted(SharedSessionContractImplementor session) {
if (session.isClosed()) { if (session.isClosed()) {
// If the session has been closed beforeQuery transaction ends, so we cannot find out // If the session has been closed before transaction ends, so we cannot find out
// if the transaction was successful and if we can do the PFER. // if the transaction was successful and if we can do the PFER.
// As this can happen only in JTA environment, we can query the TransactionManager // As this can happen only in JTA environment, we can query the TransactionManager
// directly here. // directly here.

View File

@ -27,7 +27,7 @@ import java.util.List;
/** /**
* Non-transactional counterpart of {@link TxPutFromLoadInterceptor}. * Non-transactional counterpart of {@link TxPutFromLoadInterceptor}.
* Invokes {@link PutFromLoadValidator#beginInvalidatingKey(Object, Object)} for each invalidation from * Invokes {@link PutFromLoadValidator#beginInvalidatingKey(Object, Object)} for each invalidation from
* remote node ({@link BeginInvalidationCommand} and sends {@link EndInvalidationCommand} afterQuery the transaction * remote node ({@link BeginInvalidationCommand} and sends {@link EndInvalidationCommand} after the transaction
* is complete, with help of {@link InvalidationSynchronization}; * is complete, with help of {@link InvalidationSynchronization};
* *
* @author Radim Vansa &lt;rvansa@redhat.com&gt; * @author Radim Vansa &lt;rvansa@redhat.com&gt;

View File

@ -73,10 +73,10 @@ import org.infinispan.manager.EmbeddedCacheManager;
* This class also supports the concept of "naked puts", which are calls to * This class also supports the concept of "naked puts", which are calls to
* {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long)} without a preceding {@link #registerPendingPut(SharedSessionContractImplementor, Object, long)}. * {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long)} without a preceding {@link #registerPendingPut(SharedSessionContractImplementor, Object, long)}.
* Besides not acquiring lock in {@link #registerPendingPut(SharedSessionContractImplementor, Object, long)} this can happen when collection * Besides not acquiring lock in {@link #registerPendingPut(SharedSessionContractImplementor, Object, long)} this can happen when collection
* elements are loaded afterQuery the collection has not been found in the cache, where the elements * elements are loaded after the collection has not been found in the cache, where the elements
* don't have their own table but can be listed as 'select ... from Element where collection_id = ...'. * don't have their own table but can be listed as 'select ... from Element where collection_id = ...'.
* Naked puts are handled according to txTimestamp obtained by calling {@link RegionFactory#nextTimestamp()} * Naked puts are handled according to txTimestamp obtained by calling {@link RegionFactory#nextTimestamp()}
* beforeQuery the transaction is started. The timestamp is compared with timestamp of last invalidation end time * before the transaction is started. The timestamp is compared with timestamp of last invalidation end time
* and the write to the cache is denied if it is lower or equal. * and the write to the cache is denied if it is lower or equal.
* </p> * </p>
* *
@ -88,7 +88,7 @@ public class PutFromLoadValidator {
private static final boolean trace = log.isTraceEnabled(); private static final boolean trace = log.isTraceEnabled();
/** /**
* Period afterQuery which ongoing invalidation is removed. Value is retrieved from cache configuration. * Period after which ongoing invalidation is removed. Value is retrieved from cache configuration.
*/ */
private final long expirationPeriod; private final long expirationPeriod;
@ -109,7 +109,7 @@ public class PutFromLoadValidator {
private final NonTxPutFromLoadInterceptor nonTxPutFromLoadInterceptor; private final NonTxPutFromLoadInterceptor nonTxPutFromLoadInterceptor;
/** /**
* The time of the last call to {@link #endInvalidatingRegion()}. Puts from transactions started afterQuery * The time of the last call to {@link #endInvalidatingRegion()}. Puts from transactions started after
* this timestamp are denied. * this timestamp are denied.
*/ */
private volatile long regionInvalidationTimestamp = Long.MIN_VALUE; private volatile long regionInvalidationTimestamp = Long.MIN_VALUE;
@ -165,7 +165,7 @@ public class PutFromLoadValidator {
List<CommandInterceptor> interceptorChain = cache.getInterceptorChain(); List<CommandInterceptor> interceptorChain = cache.getInterceptorChain();
log.debug("Interceptor chain was: " + interceptorChain); log.debug("Interceptor chain was: " + interceptorChain);
int position = 0; int position = 0;
// add interceptor beforeQuery uses exact match, not instanceof match // add interceptor before uses exact match, not instanceof match
int invalidationPosition = 0; int invalidationPosition = 0;
int entryWrappingPosition = 0; int entryWrappingPosition = 0;
for (CommandInterceptor ci : interceptorChain) { for (CommandInterceptor ci : interceptorChain) {
@ -184,7 +184,7 @@ public class PutFromLoadValidator {
cache.getComponentRegistry().registerComponent(txInvalidationInterceptor, TxInvalidationInterceptor.class); cache.getComponentRegistry().registerComponent(txInvalidationInterceptor, TxInvalidationInterceptor.class);
cache.addInterceptor(txInvalidationInterceptor, invalidationPosition); cache.addInterceptor(txInvalidationInterceptor, invalidationPosition);
// Note that invalidation does *NOT* acquire locks; therefore, we have to start invalidating beforeQuery // Note that invalidation does *NOT* acquire locks; therefore, we have to start invalidating before
// wrapping the entry, since if putFromLoad was invoked between wrap and beginInvalidatingKey, the invalidation // wrapping the entry, since if putFromLoad was invoked between wrap and beginInvalidatingKey, the invalidation
// would not commit the entry removal (as during wrap the entry was not in cache) // would not commit the entry removal (as during wrap the entry was not in cache)
TxPutFromLoadInterceptor txPutFromLoadInterceptor = new TxPutFromLoadInterceptor(this, cache.getName()); TxPutFromLoadInterceptor txPutFromLoadInterceptor = new TxPutFromLoadInterceptor(this, cache.getName());
@ -309,7 +309,7 @@ public class PutFromLoadValidator {
// we need this check since registerPendingPut (creating new pp) can get between invalidation // we need this check since registerPendingPut (creating new pp) can get between invalidation
// and naked put caused by the invalidation // and naked put caused by the invalidation
else if (pending.lastInvalidationEnd != Long.MIN_VALUE) { else if (pending.lastInvalidationEnd != Long.MIN_VALUE) {
// if this transaction started afterQuery last invalidation we can continue // if this transaction started after last invalidation we can continue
valid = txTimestamp > pending.lastInvalidationEnd; valid = txTimestamp > pending.lastInvalidationEnd;
} }
else { else {
@ -401,7 +401,7 @@ public class PutFromLoadValidator {
* Invalidates all {@link #registerPendingPut(SharedSessionContractImplementor, Object, long) previously registered pending puts} ensuring a subsequent call to * Invalidates all {@link #registerPendingPut(SharedSessionContractImplementor, Object, long) previously registered pending puts} ensuring a subsequent call to
* {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long)} will return <code>false</code>. <p> This method will block until any * {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long)} will return <code>false</code>. <p> This method will block until any
* concurrent thread that has {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long) acquired the putFromLoad lock} for the any key has * concurrent thread that has {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long) acquired the putFromLoad lock} for the any key has
* released the lock. This allows the caller to be certain the putFromLoad will not execute afterQuery this method returns, * released the lock. This allows the caller to be certain the putFromLoad will not execute after this method returns,
* possibly caching stale data. </p> * possibly caching stale data. </p>
* *
* @return <code>true</code> if the invalidation was successful; <code>false</code> if a problem occurred (which the * @return <code>true</code> if the invalidation was successful; <code>false</code> if a problem occurred (which the
@ -422,9 +422,9 @@ public class PutFromLoadValidator {
try { try {
// Acquire the lock for each entry to ensure any ongoing // Acquire the lock for each entry to ensure any ongoing
// work associated with it is completed beforeQuery we return // work associated with it is completed before we return
// We cannot erase the map: if there was ongoing invalidation and we removed it, registerPendingPut // We cannot erase the map: if there was ongoing invalidation and we removed it, registerPendingPut
// started afterQuery that would have no way of finding out that the entity *is* invalidated (it was // started after that would have no way of finding out that the entity *is* invalidated (it was
// removed from the cache and now the DB is about to be updated). // removed from the cache and now the DB is about to be updated).
for (Iterator<PendingPutMap> it = pendingPuts.values().iterator(); it.hasNext(); ) { for (Iterator<PendingPutMap> it = pendingPuts.values().iterator(); it.hasNext(); ) {
PendingPutMap entry = it.next(); PendingPutMap entry = it.next();
@ -533,7 +533,7 @@ public class PutFromLoadValidator {
* and disables further registrations ensuring a subsequent call to {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long)} * and disables further registrations ensuring a subsequent call to {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long)}
* will return <code>false</code>. <p> This method will block until any concurrent thread that has * will return <code>false</code>. <p> This method will block until any concurrent thread that has
* {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long) acquired the putFromLoad lock} for the given key * {@link #acquirePutFromLoadLock(SharedSessionContractImplementor, Object, long) acquired the putFromLoad lock} for the given key
* has released the lock. This allows the caller to be certain the putFromLoad will not execute afterQuery this method * has released the lock. This allows the caller to be certain the putFromLoad will not execute after this method
* returns, possibly caching stale data. </p> * returns, possibly caching stale data. </p>
* After this transaction completes, {@link #endInvalidatingKey(Object, Object)} needs to be called } * After this transaction completes, {@link #endInvalidatingKey(Object, Object)} needs to be called }
* *
@ -585,7 +585,7 @@ public class PutFromLoadValidator {
} }
/** /**
* Called afterQuery the transaction completes, allowing caching of entries. It is possible that this method * Called after the transaction completes, allowing caching of entries. It is possible that this method
* is called without previous invocation of {@link #beginInvalidatingKey(Object, Object)}, then it should be a no-op. * is called without previous invocation of {@link #beginInvalidatingKey(Object, Object)}, then it should be a no-op.
* *
* @param lockOwner owner of the invalidation - transaction or thread * @param lockOwner owner of the invalidation - transaction or thread
@ -792,7 +792,7 @@ public class PutFromLoadValidator {
* are not accessed frequently; when these are accessed, we have to do the housekeeping * are not accessed frequently; when these are accessed, we have to do the housekeeping
* internally to prevent unlimited growth of the map. * internally to prevent unlimited growth of the map.
* The pending puts will get their timestamps when the map reaches {@link #GC_THRESHOLD} * The pending puts will get their timestamps when the map reaches {@link #GC_THRESHOLD}
* entries; afterQuery expiration period these will be removed completely either through * entries; after expiration period these will be removed completely either through
* invalidation or when we try to register next pending put. * invalidation or when we try to register next pending put.
*/ */
private void gc() { private void gc() {

View File

@ -79,7 +79,7 @@ public class TombstoneAccessDelegate implements AccessDelegate {
public boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version, boolean minimalPutOverride) throws CacheException { public boolean putFromLoad(SharedSessionContractImplementor session, Object key, Object value, long txTimestamp, Object version, boolean minimalPutOverride) throws CacheException {
long lastRegionInvalidation = region.getLastRegionInvalidation(); long lastRegionInvalidation = region.getLastRegionInvalidation();
if (txTimestamp < lastRegionInvalidation) { if (txTimestamp < lastRegionInvalidation) {
log.tracef("putFromLoad not executed since tx started at %d, beforeQuery last region invalidation finished = %d", txTimestamp, lastRegionInvalidation); log.tracef("putFromLoad not executed since tx started at %d, before last region invalidation finished = %d", txTimestamp, lastRegionInvalidation);
return false; return false;
} }
if (minimalPutOverride) { if (minimalPutOverride) {
@ -88,7 +88,7 @@ public class TombstoneAccessDelegate implements AccessDelegate {
Tombstone tombstone = (Tombstone) prev; Tombstone tombstone = (Tombstone) prev;
long lastTimestamp = tombstone.getLastTimestamp(); long lastTimestamp = tombstone.getLastTimestamp();
if (txTimestamp <= lastTimestamp) { if (txTimestamp <= lastTimestamp) {
log.tracef("putFromLoad not executed since tx started at %d, beforeQuery last invalidation finished = %d", txTimestamp, lastTimestamp); log.tracef("putFromLoad not executed since tx started at %d, before last invalidation finished = %d", txTimestamp, lastTimestamp);
return false; return false;
} }
} }

View File

@ -29,7 +29,7 @@ public class TxInvalidationCacheAccessDelegate extends InvalidationCacheAccessDe
} }
// We need to be invalidating even for regular writes; if we were not and the write was followed by eviction // We need to be invalidating even for regular writes; if we were not and the write was followed by eviction
// (or any other invalidation), naked put that was started afterQuery the eviction ended but beforeQuery this insert // (or any other invalidation), naked put that was started after the eviction ended but before this insert
// ended could insert the stale entry into the cache (since the entry was removed by eviction). // ended could insert the stale entry into the cache (since the entry was removed by eviction).
if ( !putValidator.beginInvalidatingKey(session, key)) { if ( !putValidator.beginInvalidatingKey(session, key)) {
throw log.failedInvalidatePendingPut(key, region.getName()); throw log.failedInvalidatePendingPut(key, region.getName());
@ -53,7 +53,7 @@ public class TxInvalidationCacheAccessDelegate extends InvalidationCacheAccessDe
// be informed of the change. // be informed of the change.
// We need to be invalidating even for regular writes; if we were not and the write was followed by eviction // We need to be invalidating even for regular writes; if we were not and the write was followed by eviction
// (or any other invalidation), naked put that was started afterQuery the eviction ended but beforeQuery this update // (or any other invalidation), naked put that was started after the eviction ended but before this update
// ended could insert the stale entry into the cache (since the entry was removed by eviction). // ended could insert the stale entry into the cache (since the entry was removed by eviction).
if ( !putValidator.beginInvalidatingKey(session, key)) { if ( !putValidator.beginInvalidatingKey(session, key)) {
log.failedInvalidatePendingPut(key, region.getName()); log.failedInvalidatePendingPut(key, region.getName());

View File

@ -91,7 +91,7 @@ public class TxInvalidationInterceptor extends BaseInvalidationInterceptor {
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable { public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
Object retval = invokeNextInterceptor( ctx, command ); Object retval = invokeNextInterceptor( ctx, command );
log.tracef( "Entering InvalidationInterceptor's prepare phase. Ctx flags are empty" ); log.tracef( "Entering InvalidationInterceptor's prepare phase. Ctx flags are empty" );
// fetch the modifications beforeQuery the transaction is committed (and thus removed from the txTable) // fetch the modifications before the transaction is committed (and thus removed from the txTable)
if ( shouldInvokeRemoteTxCommand( ctx ) ) { if ( shouldInvokeRemoteTxCommand( ctx ) ) {
if ( ctx.getTransaction() == null ) { if ( ctx.getTransaction() == null ) {
throw new IllegalStateException( "We must have an associated transaction" ); throw new IllegalStateException( "We must have an associated transaction" );

View File

@ -33,8 +33,8 @@ import org.infinispan.statetransfer.StateTransferManager;
/** /**
* Intercepts transactions in Infinispan, calling {@link PutFromLoadValidator#beginInvalidatingKey(Object, Object)} * Intercepts transactions in Infinispan, calling {@link PutFromLoadValidator#beginInvalidatingKey(Object, Object)}
* beforeQuery locks are acquired (and the entry is invalidated) and sends {@link EndInvalidationCommand} to release * before locks are acquired (and the entry is invalidated) and sends {@link EndInvalidationCommand} to release
* invalidation throught {@link PutFromLoadValidator#endInvalidatingKey(Object, Object)} afterQuery the transaction * invalidation throught {@link PutFromLoadValidator#endInvalidatingKey(Object, Object)} after the transaction
* is committed. * is committed.
* *
* @author Radim Vansa &lt;rvansa@redhat.com&gt; * @author Radim Vansa &lt;rvansa@redhat.com&gt;
@ -68,14 +68,14 @@ class TxPutFromLoadInterceptor extends BaseRpcInterceptor {
} }
// We need to intercept PrepareCommand, not InvalidateCommand since the interception takes // We need to intercept PrepareCommand, not InvalidateCommand since the interception takes
// place beforeQuery EntryWrappingInterceptor and the PrepareCommand is multiplexed into InvalidateCommands // place before EntryWrappingInterceptor and the PrepareCommand is multiplexed into InvalidateCommands
// as part of EntryWrappingInterceptor // as part of EntryWrappingInterceptor
@Override @Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable { public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
if (ctx.isOriginLocal()) { if (ctx.isOriginLocal()) {
// We can't wait to commit phase to remove the entry locally (invalidations are processed in 1pc // We can't wait to commit phase to remove the entry locally (invalidations are processed in 1pc
// on remote nodes, so only local case matters here). The problem is that while the entry is locked // on remote nodes, so only local case matters here). The problem is that while the entry is locked
// reads still can take place and we can read outdated collection afterQuery reading updated entity // reads still can take place and we can read outdated collection after reading updated entity
// owning this collection from DB; when this happens, the version lock on entity cannot protect // owning this collection from DB; when this happens, the version lock on entity cannot protect
// us against concurrent modification of the collection. Therefore, we need to remove the entry // us against concurrent modification of the collection. Therefore, we need to remove the entry
// here (even without lock!) and let possible update happen in commit phase. // here (even without lock!) and let possible update happen in commit phase.

View File

@ -14,7 +14,7 @@ import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.context.InvocationContext; import org.infinispan.context.InvocationContext;
/** /**
* Sent in commit phase (afterQuery DB commit) to remote nodes in order to stop invalidating * Sent in commit phase (after DB commit) to remote nodes in order to stop invalidating
* putFromLoads. * putFromLoads.
* *
* @author Radim Vansa &lt;rvansa@redhat.com&gt; * @author Radim Vansa &lt;rvansa@redhat.com&gt;

View File

@ -646,7 +646,7 @@ public class InfinispanRegionFactoryTestCase {
else else
m = super.createCacheManager(properties, serviceRegistry); m = super.createCacheManager(properties, serviceRegistry);
// since data type cache configuration templates are defined when cache manager is created, // since data type cache configuration templates are defined when cache manager is created,
// we have to use hooks and set the configuration beforeQuery the whole factory starts // we have to use hooks and set the configuration before the whole factory starts
if (afterCacheManagerCreated != null) { if (afterCacheManagerCreated != null) {
afterCacheManagerCreated.accept(this, m); afterCacheManagerCreated.accept(this, m);
} }

View File

@ -159,7 +159,7 @@ public class PutFromLoadValidatorUnitTest {
public void call() { public void call() {
PutFromLoadValidator testee = new PutFromLoadValidator(cm.getCache().getAdvancedCache(), regionFactory(cm)); PutFromLoadValidator testee = new PutFromLoadValidator(cm.getCache().getAdvancedCache(), regionFactory(cm));
Invalidation invalidation = new Invalidation(testee, removeRegion); Invalidation invalidation = new Invalidation(testee, removeRegion);
// the naked put can succeed because it has txTimestamp afterQuery invalidation // the naked put can succeed because it has txTimestamp after invalidation
NakedPut nakedPut = new NakedPut(testee, true); NakedPut nakedPut = new NakedPut(testee, true);
exec(transactional, invalidation, nakedPut); exec(transactional, invalidation, nakedPut);
} }
@ -469,7 +469,7 @@ public class PutFromLoadValidatorUnitTest {
@Override @Override
public Void call() throws Exception { public Void call() throws Exception {
try { try {
long txTimestamp = System.currentTimeMillis(); // this should be acquired beforeQuery UserTransaction.begin() long txTimestamp = System.currentTimeMillis(); // this should be acquired before UserTransaction.begin()
SharedSessionContractImplementor session = mock (SharedSessionContractImplementor.class); SharedSessionContractImplementor session = mock (SharedSessionContractImplementor.class);
putFromLoadValidator.registerPendingPut(session, KEY1, txTimestamp); putFromLoadValidator.registerPendingPut(session, KEY1, txTimestamp);
@ -500,7 +500,7 @@ public class PutFromLoadValidatorUnitTest {
@Override @Override
public Void call() throws Exception { public Void call() throws Exception {
try { try {
long txTimestamp = System.currentTimeMillis(); // this should be acquired beforeQuery UserTransaction.begin() long txTimestamp = System.currentTimeMillis(); // this should be acquired before UserTransaction.begin()
SharedSessionContractImplementor session = mock (SharedSessionContractImplementor.class); SharedSessionContractImplementor session = mock (SharedSessionContractImplementor.class);
PutFromLoadValidator.Lock lock = testee.acquirePutFromLoadLock(session, KEY1, txTimestamp); PutFromLoadValidator.Lock lock = testee.acquirePutFromLoadLock(session, KEY1, txTimestamp);
try { try {

View File

@ -149,7 +149,7 @@ public class CollectionRegionAccessStrategyTest extends
try { try {
removeLatch.countDown(); removeLatch.countDown();
// the remove should be blocked because the putFromLoad has been acquired // the remove should be blocked because the putFromLoad has been acquired
// and the remove can continue only afterQuery we've inserted the entry // and the remove can continue only after we've inserted the entry
assertFalse(pferLatch.await( 2, TimeUnit.SECONDS ) ); assertFalse(pferLatch.await( 2, TimeUnit.SECONDS ) );
} }
catch (InterruptedException e) { catch (InterruptedException e) {

View File

@ -316,7 +316,7 @@ public class EntityRegionAccessStrategyTest extends
log.debug("Read latch acquired, verify local access strategy"); log.debug("Read latch acquired, verify local access strategy");
// This won't block w/ mvc and will read the old value (if transactional as the transaction // This won't block w/ mvc and will read the old value (if transactional as the transaction
// is not being committed yet, or if non-strict as we do the actual update only afterQuery transaction) // is not being committed yet, or if non-strict as we do the actual update only after transaction)
// or null if non-transactional // or null if non-transactional
Object expected = isTransactional() || accessType == AccessType.NONSTRICT_READ_WRITE ? VALUE1 : null; Object expected = isTransactional() || accessType == AccessType.NONSTRICT_READ_WRITE ? VALUE1 : null;
assertEquals("Correct value", expected, localAccessStrategy.get(session, KEY, session.getTimestamp())); assertEquals("Correct value", expected, localAccessStrategy.get(session, KEY, session.getTimestamp()));

View File

@ -112,7 +112,7 @@ public abstract class AbstractNonInvalidationTest extends SingleNodeTest {
return executor.submit(() -> withTxSessionApply(s -> { return executor.submit(() -> withTxSessionApply(s -> {
try { try {
Item item = s.load(Item.class, id); Item item = s.load(Item.class, id);
item.getName(); // force load & putFromLoad beforeQuery the barrier item.getName(); // force load & putFromLoad before the barrier
loadBarrier.await(WAIT_TIMEOUT, TimeUnit.SECONDS); loadBarrier.await(WAIT_TIMEOUT, TimeUnit.SECONDS);
s.delete(item); s.delete(item);
if (preFlushLatch != null) { if (preFlushLatch != null) {
@ -141,7 +141,7 @@ public abstract class AbstractNonInvalidationTest extends SingleNodeTest {
return executor.submit(() -> withTxSessionApply(s -> { return executor.submit(() -> withTxSessionApply(s -> {
try { try {
Item item = s.load(Item.class, id); Item item = s.load(Item.class, id);
item.getName(); // force load & putFromLoad beforeQuery the barrier item.getName(); // force load & putFromLoad before the barrier
loadBarrier.await(WAIT_TIMEOUT, TimeUnit.SECONDS); loadBarrier.await(WAIT_TIMEOUT, TimeUnit.SECONDS);
item.setDescription("Updated item"); item.setDescription("Updated item");
s.update(item); s.update(item);

View File

@ -89,14 +89,14 @@ public class ConcurrentWriteTest extends SingleNodeTest {
public void testSingleUser() throws Exception { public void testSingleUser() throws Exception {
// setup // setup
sessionFactory().getStatistics().clear(); sessionFactory().getStatistics().clear();
// wait a while to make sure that timestamp comparison works afterQuery invalidateRegion // wait a while to make sure that timestamp comparison works after invalidateRegion
Thread.sleep(1); Thread.sleep(1);
Customer customer = createCustomer( 0 ); Customer customer = createCustomer( 0 );
final Integer customerId = customer.getId(); final Integer customerId = customer.getId();
getCustomerIDs().add( customerId ); getCustomerIDs().add( customerId );
// wait a while to make sure that timestamp comparison works afterQuery collection remove (during insert) // wait a while to make sure that timestamp comparison works after collection remove (during insert)
Thread.sleep(1); Thread.sleep(1);
assertNull( "contact exists despite not being added", getFirstContact( customerId ) ); assertNull( "contact exists despite not being added", getFirstContact( customerId ) );
@ -120,17 +120,17 @@ public class ConcurrentWriteTest extends SingleNodeTest {
final Contact contact = addContact( customerId ); final Contact contact = addContact( customerId );
assertNotNull( "contact returned by addContact is null", contact ); assertNotNull( "contact returned by addContact is null", contact );
assertEquals( assertEquals(
"Customer.contacts cache was not invalidated afterQuery addContact", 0, "Customer.contacts cache was not invalidated after addContact", 0,
contactsCollectionSlcs.getElementCountInMemory() contactsCollectionSlcs.getElementCountInMemory()
); );
assertNotNull( "Contact missing afterQuery successful add call", getFirstContact( customerId ) ); assertNotNull( "Contact missing after successful add call", getFirstContact( customerId ) );
// read everyone's contacts // read everyone's contacts
readEveryonesFirstContact(); readEveryonesFirstContact();
removeContact( customerId ); removeContact( customerId );
assertNull( "contact still exists afterQuery successful remove call", getFirstContact( customerId ) ); assertNull( "contact still exists after successful remove call", getFirstContact( customerId ) );
} }

View File

@ -91,8 +91,8 @@ public class InvalidationTest extends SingleNodeTest {
Thread getThread = new Thread(() -> { Thread getThread = new Thread(() -> {
try { try {
withTxSession(s -> { withTxSession(s -> {
// DB load should happen beforeQuery the record is deleted, // DB load should happen before the record is deleted,
// putFromLoad should happen afterQuery deleteThread ends // putFromLoad should happen after deleteThread ends
Item loadedItem = s.get(Item.class, item.getId()); Item loadedItem = s.get(Item.class, item.getId());
if (getThreadBlockedInDB.get()) { if (getThreadBlockedInDB.get()) {
assertNull(loadedItem); assertNull(loadedItem);

View File

@ -477,7 +477,7 @@ public class ReadWriteTest extends ReadOnlyTest {
public void testNaturalIdCached() throws Exception { public void testNaturalIdCached() throws Exception {
saveSomeCitizens(); saveSomeCitizens();
// Clear the cache beforeQuery the transaction begins // Clear the cache before the transaction begins
ReadWriteTest.this.cleanupCache(); ReadWriteTest.this.cleanupCache();
Thread.sleep(10); Thread.sleep(10);
@ -533,7 +533,7 @@ public class ReadWriteTest extends ReadOnlyTest {
assertEquals( "NaturalId Cache Puts", 2, stats.getNaturalIdCachePutCount() ); assertEquals( "NaturalId Cache Puts", 2, stats.getNaturalIdCachePutCount() );
assertEquals( "NaturalId Cache Queries", 0, stats.getNaturalIdQueryExecutionCount() ); assertEquals( "NaturalId Cache Queries", 0, stats.getNaturalIdQueryExecutionCount() );
//Try NaturalIdLoadAccess afterQuery insert //Try NaturalIdLoadAccess after insert
final Citizen citizen = withTxSessionApply(s -> { final Citizen citizen = withTxSessionApply(s -> {
State france = ReadWriteTest.this.getState(s, "Ile de France"); State france = ReadWriteTest.this.getState(s, "Ile de France");
NaturalIdLoadAccess<Citizen> naturalIdLoader = s.byNaturalId(Citizen.class); NaturalIdLoadAccess<Citizen> naturalIdLoader = s.byNaturalId(Citizen.class);
@ -575,7 +575,7 @@ public class ReadWriteTest extends ReadOnlyTest {
markRollbackOnly(s); markRollbackOnly(s);
}); });
// Try NaturalIdLoadAccess afterQuery load // Try NaturalIdLoadAccess after load
withTxSession(s -> { withTxSession(s -> {
State france = ReadWriteTest.this.getState(s, "Ile de France"); State france = ReadWriteTest.this.getState(s, "Ile de France");
NaturalIdLoadAccess naturalIdLoader = s.byNaturalId(Citizen.class); NaturalIdLoadAccess naturalIdLoader = s.byNaturalId(Citizen.class);

View File

@ -50,7 +50,7 @@ public class TombstoneTest extends AbstractNonInvalidationTest {
first.get(WAIT_TIMEOUT, TimeUnit.SECONDS); first.get(WAIT_TIMEOUT, TimeUnit.SECONDS);
second.get(WAIT_TIMEOUT, TimeUnit.SECONDS); second.get(WAIT_TIMEOUT, TimeUnit.SECONDS);
// afterQuery commit, the tombstone should still be in memory for some time (though, updatable) // after commit, the tombstone should still be in memory for some time (though, updatable)
contents = Caches.entrySet(entityCache).toMap(); contents = Caches.entrySet(entityCache).toMap();
assertEquals(1, contents.size()); assertEquals(1, contents.size());
assertEquals(Tombstone.class, contents.get(itemId).getClass()); assertEquals(Tombstone.class, contents.get(itemId).getClass());

View File

@ -45,7 +45,7 @@ import static org.junit.Assert.assertTrue;
* Tests specific to versioned entries -based caches. * Tests specific to versioned entries -based caches.
* Similar to {@link TombstoneTest} but some cases have been removed since * Similar to {@link TombstoneTest} but some cases have been removed since
* we are modifying the cache only once, therefore some sequences of operations * we are modifying the cache only once, therefore some sequences of operations
* would fail beforeQuery touching the cache. * would fail before touching the cache.
* *
* @author Radim Vansa &lt;rvansa@redhat.com&gt; * @author Radim Vansa &lt;rvansa@redhat.com&gt;
*/ */

View File

@ -106,7 +106,7 @@ public class SessionRefreshTest extends DualNodeTest {
acct0 = dao0.getAccountWithRefresh( id ); acct0 = dao0.getAccountWithRefresh( id );
assertNotNull( acct0 ); assertNotNull( acct0 );
assertEquals( DualNodeTest.REMOTE, acct0.getBranch() ); assertEquals( DualNodeTest.REMOTE, acct0.getBranch() );
log.debug( "Contents afterQuery refreshing in remote: " + TestingUtil.printCache( localCache ) ); log.debug( "Contents after refreshing in remote: " + TestingUtil.printCache( localCache ) );
// Double check with a brand new session, in case the other session // Double check with a brand new session, in case the other session
// for some reason bypassed the 2nd level cache // for some reason bypassed the 2nd level cache
@ -114,6 +114,6 @@ public class SessionRefreshTest extends DualNodeTest {
Account acct0A = dao0A.getAccount( id ); Account acct0A = dao0A.getAccount( id );
assertNotNull( acct0A ); assertNotNull( acct0A );
assertEquals( DualNodeTest.REMOTE, acct0A.getBranch() ); assertEquals( DualNodeTest.REMOTE, acct0A.getBranch() );
log.debug( "Contents afterQuery creating a new session: " + TestingUtil.printCache( localCache ) ); log.debug( "Contents after creating a new session: " + TestingUtil.printCache( localCache ) );
} }
} }

View File

@ -688,7 +688,7 @@ public abstract class CorrectnessTestCase {
throw e; throw e;
} }
} }
// cannot close beforeQuery XA commit since force increment requires open connection // cannot close before XA commit since force increment requires open connection
// s.close(); // s.close();
} }

View File

@ -186,7 +186,7 @@ public class CacheTestUtil {
/** /**
* Periodically calls callable and compares returned value with expected value. If the value matches to expected, * Periodically calls callable and compares returned value with expected value. If the value matches to expected,
* the method returns. If callable throws an exception, this is propagated. If the returned value does not match to * the method returns. If callable throws an exception, this is propagated. If the returned value does not match to
* expected beforeQuery timeout, {@link TimeoutException} is thrown. * expected before timeout, {@link TimeoutException} is thrown.
* @param expected * @param expected
* @param callable * @param callable
* @param timeout If non-positive, there is no limit. * @param timeout If non-positive, there is no limit.

View File

@ -28,7 +28,7 @@ public final class TxUtil {
Caches.withinTx(tm, () -> { Caches.withinTx(tm, () -> {
withSession(sb, s -> { withSession(sb, s -> {
consumer.accept(s); consumer.accept(s);
// we need to flush the session beforeQuery close when running with JTA transactions // we need to flush the session before close when running with JTA transactions
s.flush(); s.flush();
}); });
return null; return null;