code format and simple improvement

This commit is contained in:
Strong Liu 2012-02-03 05:34:39 +08:00
parent 13c9dbfc21
commit a512ede38b
6 changed files with 37 additions and 58 deletions

View File

@ -94,8 +94,8 @@ public enum LockMode {
OPTIMISTIC( 6 ),
/**
* Optimisticly assume that transaction will not experience contention for entities.
* The entity version will be verified and incremented near the transaction end.
* Optimisticly assume that transaction will not experience contention for
* entities. The entity version will be verified and incremented near the transaction end.
*/
OPTIMISTIC_FORCE_INCREMENT( 7 ),

View File

@ -93,23 +93,24 @@ public class StandardQueryCache implements QueryCache {
List result,
boolean isNaturalKeyLookup,
SessionImplementor session) throws HibernateException {
if ( isNaturalKeyLookup && result.size() == 0 ) {
if ( isNaturalKeyLookup && result.isEmpty() ) {
return false;
}
Long ts = session.getFactory().getSettings().getRegionFactory().nextTimestamp();
long ts = cacheRegion.nextTimestamp();
LOG.debugf( "Caching query results in region: %s; timestamp=%s", cacheRegion.getName(), ts );
List cacheable = new ArrayList( result.size() + 1 );
logCachedResultDetails( key, null, returnTypes, cacheable );
cacheable.add( ts );
final boolean singleResult = returnTypes.length == 1;
for ( Object aResult : result ) {
if ( returnTypes.length == 1 ) {
cacheable.add( returnTypes[0].disassemble( aResult, session, null ) );
}
else {
cacheable.add( TypeHelper.disassemble( (Object[]) aResult, returnTypes, null, session, null ) );
}
Serializable cacheItem = singleResult ? returnTypes[0].disassemble(
aResult,
session,
null
) : TypeHelper.disassemble( (Object[]) aResult, returnTypes, null, session, null );
cacheable.add( cacheItem );
logCachedResultRowDetails( returnTypes, aResult );
}
@ -141,8 +142,9 @@ public class StandardQueryCache implements QueryCache {
}
LOG.debug( "Returning cached query results" );
final boolean singleResult = returnTypes.length == 1;
for ( int i = 1; i < cacheable.size(); i++ ) {
if ( returnTypes.length == 1 ) {
if ( singleResult ) {
returnTypes[0].beforeAssemble( (Serializable) cacheable.get( i ), session );
}
else {
@ -152,7 +154,7 @@ public class StandardQueryCache implements QueryCache {
List result = new ArrayList( cacheable.size() - 1 );
for ( int i = 1; i < cacheable.size(); i++ ) {
try {
if ( returnTypes.length == 1 ) {
if ( singleResult ) {
result.add( returnTypes[0].assemble( (Serializable) cacheable.get( i ), session, null ) );
}
else {

View File

@ -75,7 +75,7 @@ public class UpdateTimestampsCache {
try {
Long ts = region.nextTimestamp() + region.getTimeout();
for ( Serializable space : spaces ) {
LOG.debugf( "Pre-invalidating space [%s]", space );
LOG.debugf( "Pre-invalidating space [%s], timestamp: %s", space, ts );
//put() has nowait semantics, is this really appropriate?
//note that it needs to be async replication, never local or sync
region.put( space, ts );

View File

@ -27,8 +27,6 @@ package org.hibernate.internal.util.collections;
import java.io.IOException;
import java.io.Serializable;
import org.apache.commons.collections.map.LRUMap;
/**
* Cache following a "Most Recently Used" (MRU) algorithm for maintaining a
* bounded in-memory size; the "Least Recently Used" (LRU) entry is the first

View File

@ -238,7 +238,7 @@ public abstract class Loader {
return sql;
}
else {
return new StringBuffer( comment.length() + sql.length() + 5 )
return new StringBuilder( comment.length() + sql.length() + 5 )
.append( "/* " )
.append( comment )
.append( " */ " )
@ -819,7 +819,7 @@ public abstract class Loader {
final RowSelection selection = queryParameters.getRowSelection();
final int maxRows = hasMaxRows( selection ) ?
selection.getMaxRows().intValue() :
selection.getMaxRows() :
Integer.MAX_VALUE;
final int entitySpan = getEntityPersisters().length;
@ -841,18 +841,12 @@ public abstract class Loader {
final List results = new ArrayList();
try {
handleEmptyCollections( queryParameters.getCollectionKeys(), rs, session );
EntityKey[] keys = new EntityKey[entitySpan]; //we can reuse it for each row
LOG.trace( "Processing result set" );
int count;
for ( count = 0; count < maxRows && rs.next(); count++ ) {
LOG.debugf( "Result set row: %s", count );
Object result = getRowFromResultSet(
rs,
session,
@ -865,12 +859,10 @@ public abstract class Loader {
forcedResultTransformer
);
results.add( result );
if ( createSubselects ) {
subselectResultKeys.add(keys);
keys = new EntityKey[entitySpan]; //can't reuse in this case
}
}
LOG.tracev( "Done processing result set ({0} rows)", count );
@ -879,12 +871,9 @@ public abstract class Loader {
finally {
st.close();
}
initializeEntitiesAndCollections( hydratedObjects, rs, session, queryParameters.isReadOnly( session ) );
if ( createSubselects ) createSubselects( subselectResultKeys, queryParameters, session );
return results; //getResultList(results);
return results;
}
@ -1662,12 +1651,7 @@ public abstract class Loader {
}
private static int getFirstRow(RowSelection selection) {
if ( selection == null || selection.getFirstRow() == null ) {
return 0;
}
else {
return selection.getFirstRow().intValue();
}
return ( selection == null || selection.getFirstRow() == null ) ? 0 : selection.getFirstRow();
}
private int interpretFirstRow(int zeroBasedFirstResult) {
@ -1733,10 +1717,7 @@ public abstract class Loader {
sql = preprocessSQL( sql, queryParameters, dialect );
PreparedStatement st = null;
st = session.getTransactionCoordinator().getJdbcCoordinator().getStatementPreparer().prepareQueryStatement(
PreparedStatement st = session.getTransactionCoordinator().getJdbcCoordinator().getStatementPreparer().prepareQueryStatement(
sql,
callable,
scrollMode
@ -1765,10 +1746,10 @@ public abstract class Loader {
if ( selection != null ) {
if ( selection.getTimeout() != null ) {
st.setQueryTimeout( selection.getTimeout().intValue() );
st.setQueryTimeout( selection.getTimeout() );
}
if ( selection.getFetchSize() != null ) {
st.setFetchSize( selection.getFetchSize().intValue() );
st.setFetchSize( selection.getFetchSize() );
}
}
@ -1776,9 +1757,17 @@ public abstract class Loader {
LockOptions lockOptions = queryParameters.getLockOptions();
if ( lockOptions != null ) {
if ( lockOptions.getTimeOut() != LockOptions.WAIT_FOREVER ) {
if (!dialect.supportsLockTimeouts()) LOG.debugf("Lock timeout [%s] requested but dialect reported to not support lock timeouts",
lockOptions.getTimeOut());
else if (dialect.isLockTimeoutParameterized()) st.setInt(col++, lockOptions.getTimeOut());
if ( !dialect.supportsLockTimeouts() ) {
if ( LOG.isDebugEnabled() ) {
LOG.debugf(
"Lock timeout [%s] requested but dialect reported to not support lock timeouts",
lockOptions.getTimeOut()
);
}
}
else if ( dialect.isLockTimeoutParameterized() ) {
st.setInt( col++, lockOptions.getTimeOut() );
}
}
}
@ -1807,13 +1796,8 @@ public abstract class Loader {
*/
private static int getMaxOrLimit(final RowSelection selection, final Dialect dialect) {
final int firstRow = dialect.convertToFirstRowValue( getFirstRow( selection ) );
final int lastRow = selection.getMaxRows().intValue();
if ( dialect.useMaxForLimit() ) {
return lastRow + firstRow;
}
else {
return lastRow;
}
final int lastRow = selection.getMaxRows();
return dialect.useMaxForLimit() ? lastRow + firstRow : lastRow;
}
/**
@ -1854,7 +1838,7 @@ public abstract class Loader {
final PreparedStatement st,
final RowSelection selection) throws SQLException {
if ( hasMaxRows( selection ) ) {
st.setMaxRows( selection.getMaxRows().intValue() + interpretFirstRow( getFirstRow( selection ) ) );
st.setMaxRows( selection.getMaxRows() + interpretFirstRow( getFirstRow( selection ) ) );
}
}

View File

@ -292,11 +292,6 @@ public abstract class AbstractPropertyMapping implements PropertyMapping {
}
private static String extendPath(String path, String property) {
if ( path==null || "".equals(path) ) {
return property;
}
else {
return StringHelper.qualify(path, property);
}
return StringHelper.isEmpty( path ) ? property : StringHelper.qualify( path, property );
}
}