From 39b0774ae36f81643a0026b39db5a799220d302c Mon Sep 17 00:00:00 2001 From: Steve Ebersole Date: Wed, 18 Aug 2010 17:05:44 +0000 Subject: [PATCH] HHH-5469 - HHH-3659 is only half done, due to HHH-4989 (i.e. no HQL performance log when running Java 5) git-svn-id: https://svn.jboss.org/repos/hibernate/core/trunk@20161 1b8cb986-b30d-0410-93ca-fae66ebed9b2 --- .../stat/ConcurrentStatisticsImpl.java | 424 ++++++++++-------- 1 file changed, 239 insertions(+), 185 deletions(-) diff --git a/core/src/main/java/org/hibernate/stat/ConcurrentStatisticsImpl.java b/core/src/main/java/org/hibernate/stat/ConcurrentStatisticsImpl.java index dbcf774920..fbe912a89c 100644 --- a/core/src/main/java/org/hibernate/stat/ConcurrentStatisticsImpl.java +++ b/core/src/main/java/org/hibernate/stat/ConcurrentStatisticsImpl.java @@ -1,3 +1,26 @@ +/* + * Hibernate, Relational Persistence for Idiomatic Java + * + * Copyright (c) 2010, Red Hat Inc. or third-party contributors as + * indicated by the @author tags or express copyright attribution + * statements applied by the authors. All third-party contributions are + * distributed under license by Red Hat Inc. + * + * This copyrighted material is made available to anyone wishing to use, modify, + * copy, or redistribute it subject to the terms and conditions of the GNU + * Lesser General Public License, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this distribution; if not, write to: + * Free Software Foundation, Inc. + * 51 Franklin Street, Fifth Floor + * Boston, MA 02110-1301 USA + */ package org.hibernate.stat; import java.util.concurrent.ConcurrentHashMap; @@ -6,58 +29,60 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.hibernate.cache.Region; import org.hibernate.engine.SessionFactoryImplementor; import org.hibernate.util.ArrayHelper; /** + * Implementation of {@link Statistics}, as well as {@link StatisticsImplementor}, based on the + * {@link java.util.concurrent} package introduced in Java 5. + * * @author Alex Snaps - * @see org.hibernate.stat.Statistics */ +@SuppressWarnings({ "unchecked" }) public class ConcurrentStatisticsImpl implements Statistics, StatisticsImplementor { + private static final Logger log = LoggerFactory.getLogger( ConcurrentStatisticsImpl.class ); + private static final Logger PERF_LOG = LoggerFactory.getLogger( Statistics.class ); - //TODO: we should provide some way to get keys of collection of statistics to make it easier to retrieve from a GUI perspective + private SessionFactoryImplementor sessionFactory; - private static final Logger log = LoggerFactory.getLogger(ConcurrentStatisticsImpl.class); + private volatile boolean isStatisticsEnabled; + private volatile long startTime; + private AtomicLong sessionOpenCount = new AtomicLong(); + private AtomicLong sessionCloseCount = new AtomicLong(); + private AtomicLong flushCount = new AtomicLong(); + private AtomicLong connectCount = new AtomicLong(); - private SessionFactoryImplementor sessionFactory; + private AtomicLong prepareStatementCount = new AtomicLong(); + private AtomicLong closeStatementCount = new AtomicLong(); - private volatile boolean isStatisticsEnabled; - private volatile long startTime; - private AtomicLong sessionOpenCount = new AtomicLong(); - private AtomicLong sessionCloseCount = new AtomicLong(); - private AtomicLong flushCount = new AtomicLong(); - private AtomicLong connectCount = new AtomicLong(); + private AtomicLong entityLoadCount = new AtomicLong(); + private AtomicLong entityUpdateCount = new AtomicLong(); + private AtomicLong entityInsertCount = new AtomicLong(); + private AtomicLong entityDeleteCount = new AtomicLong(); + private AtomicLong entityFetchCount = new AtomicLong(); + private AtomicLong collectionLoadCount = new AtomicLong(); + private AtomicLong collectionUpdateCount = new AtomicLong(); + private AtomicLong collectionRemoveCount = new AtomicLong(); + private AtomicLong collectionRecreateCount = new AtomicLong(); + private AtomicLong collectionFetchCount = new AtomicLong(); - private AtomicLong prepareStatementCount = new AtomicLong(); - private AtomicLong closeStatementCount = new AtomicLong(); + private AtomicLong secondLevelCacheHitCount = new AtomicLong(); + private AtomicLong secondLevelCacheMissCount = new AtomicLong(); + private AtomicLong secondLevelCachePutCount = new AtomicLong(); - private AtomicLong entityLoadCount = new AtomicLong(); - private AtomicLong entityUpdateCount = new AtomicLong(); - private AtomicLong entityInsertCount = new AtomicLong(); - private AtomicLong entityDeleteCount = new AtomicLong(); - private AtomicLong entityFetchCount = new AtomicLong(); - private AtomicLong collectionLoadCount = new AtomicLong(); - private AtomicLong collectionUpdateCount = new AtomicLong(); - private AtomicLong collectionRemoveCount = new AtomicLong(); - private AtomicLong collectionRecreateCount = new AtomicLong(); - private AtomicLong collectionFetchCount = new AtomicLong(); + private AtomicLong queryExecutionCount = new AtomicLong(); + private AtomicLong queryExecutionMaxTime = new AtomicLong(); + private volatile String queryExecutionMaxTimeQueryString; + private AtomicLong queryCacheHitCount = new AtomicLong(); + private AtomicLong queryCacheMissCount = new AtomicLong(); + private AtomicLong queryCachePutCount = new AtomicLong(); - private AtomicLong secondLevelCacheHitCount = new AtomicLong(); - private AtomicLong secondLevelCacheMissCount = new AtomicLong(); - private AtomicLong secondLevelCachePutCount = new AtomicLong(); + private AtomicLong committedTransactionCount = new AtomicLong(); + private AtomicLong transactionCount = new AtomicLong(); - private AtomicLong queryExecutionCount = new AtomicLong(); - private AtomicLong queryExecutionMaxTime = new AtomicLong(); - private volatile String queryExecutionMaxTimeQueryString; - private AtomicLong queryCacheHitCount = new AtomicLong(); - private AtomicLong queryCacheMissCount = new AtomicLong(); - private AtomicLong queryCachePutCount = new AtomicLong(); - - private AtomicLong commitedTransactionCount = new AtomicLong(); - private AtomicLong transactionCount = new AtomicLong(); - - private AtomicLong optimisticFailureCount = new AtomicLong(); + private AtomicLong optimisticFailureCount = new AtomicLong(); /** * second level cache statistics per region @@ -76,6 +101,7 @@ public class ConcurrentStatisticsImpl implements Statistics, StatisticsImplement */ private final ConcurrentMap queryStatistics = new ConcurrentHashMap(); + @SuppressWarnings({ "UnusedDeclaration" }) public ConcurrentStatisticsImpl() { clear(); } @@ -89,41 +115,41 @@ public ConcurrentStatisticsImpl(SessionFactoryImplementor sessionFactory) { * reset all statistics */ public void clear() { - secondLevelCacheHitCount.set(0); - secondLevelCacheMissCount.set(0); - secondLevelCachePutCount.set(0); + secondLevelCacheHitCount.set( 0 ); + secondLevelCacheMissCount.set( 0 ); + secondLevelCachePutCount.set( 0 ); - sessionCloseCount.set(0); - sessionOpenCount.set(0); - flushCount.set(0); - connectCount.set(0); + sessionCloseCount.set( 0 ); + sessionOpenCount.set( 0 ); + flushCount.set( 0 ); + connectCount.set( 0 ); - prepareStatementCount.set(0); - closeStatementCount.set(0); + prepareStatementCount.set( 0 ); + closeStatementCount.set( 0 ); - entityDeleteCount.set(0); - entityInsertCount.set(0); - entityUpdateCount.set(0); - entityLoadCount.set(0); - entityFetchCount.set(0); + entityDeleteCount.set( 0 ); + entityInsertCount.set( 0 ); + entityUpdateCount.set( 0 ); + entityLoadCount.set( 0 ); + entityFetchCount.set( 0 ); - collectionRemoveCount.set(0); - collectionUpdateCount.set(0); - collectionRecreateCount.set(0); - collectionLoadCount.set(0); - collectionFetchCount.set(0); + collectionRemoveCount.set( 0 ); + collectionUpdateCount.set( 0 ); + collectionRecreateCount.set( 0 ); + collectionLoadCount.set( 0 ); + collectionFetchCount.set( 0 ); - queryExecutionCount.set(0); - queryCacheHitCount.set(0); - queryExecutionMaxTime.set(0); + queryExecutionCount.set( 0 ); + queryCacheHitCount.set( 0 ); + queryExecutionMaxTime.set( 0 ); queryExecutionMaxTimeQueryString = null; - queryCacheMissCount.set(0); - queryCachePutCount.set(0); + queryCacheMissCount.set( 0 ); + queryCachePutCount.set( 0 ); - transactionCount.set(0); - commitedTransactionCount.set(0); + transactionCount.set( 0 ); + committedTransactionCount.set( 0 ); - optimisticFailureCount.set(0); + optimisticFailureCount.set( 0 ); secondLevelCacheStatistics.clear(); entityStatistics.clear(); @@ -151,26 +177,29 @@ public void connect() { public void loadEntity(String entityName) { entityLoadCount.getAndIncrement(); - ((ConcurrentEntityStatisticsImpl) getEntityStatistics(entityName)).incrementLoadCount(); + ( (ConcurrentEntityStatisticsImpl) getEntityStatistics( entityName ) ).incrementLoadCount(); } public void fetchEntity(String entityName) { entityFetchCount.getAndIncrement(); - ((ConcurrentEntityStatisticsImpl) getEntityStatistics(entityName)).incrementFetchCount(); + ( (ConcurrentEntityStatisticsImpl) getEntityStatistics( entityName ) ).incrementFetchCount(); } /** * find entity statistics per name * * @param entityName entity name + * * @return EntityStatistics object */ public EntityStatistics getEntityStatistics(String entityName) { - ConcurrentEntityStatisticsImpl es = (ConcurrentEntityStatisticsImpl) entityStatistics.get(entityName); - if (es == null) { - es = new ConcurrentEntityStatisticsImpl(entityName); + ConcurrentEntityStatisticsImpl es = (ConcurrentEntityStatisticsImpl) entityStatistics.get( entityName ); + if ( es == null ) { + es = new ConcurrentEntityStatisticsImpl( entityName ); ConcurrentEntityStatisticsImpl previous; - if ((previous = (ConcurrentEntityStatisticsImpl) entityStatistics.putIfAbsent(entityName, es)) != null) { + if ( ( previous = (ConcurrentEntityStatisticsImpl) entityStatistics.putIfAbsent( + entityName, es + ) ) != null ) { es = previous; } } @@ -179,19 +208,19 @@ public EntityStatistics getEntityStatistics(String entityName) { public void updateEntity(String entityName) { entityUpdateCount.getAndIncrement(); - ConcurrentEntityStatisticsImpl es = (ConcurrentEntityStatisticsImpl) getEntityStatistics(entityName); + ConcurrentEntityStatisticsImpl es = (ConcurrentEntityStatisticsImpl) getEntityStatistics( entityName ); es.incrementUpdateCount(); } public void insertEntity(String entityName) { entityInsertCount.getAndIncrement(); - ConcurrentEntityStatisticsImpl es = (ConcurrentEntityStatisticsImpl) getEntityStatistics(entityName); + ConcurrentEntityStatisticsImpl es = (ConcurrentEntityStatisticsImpl) getEntityStatistics( entityName ); es.incrementInsertCount(); } public void deleteEntity(String entityName) { entityDeleteCount.getAndIncrement(); - ConcurrentEntityStatisticsImpl es = (ConcurrentEntityStatisticsImpl) getEntityStatistics(entityName); + ConcurrentEntityStatisticsImpl es = (ConcurrentEntityStatisticsImpl) getEntityStatistics( entityName ); es.incrementDeleteCount(); } @@ -199,14 +228,17 @@ public void deleteEntity(String entityName) { * Get collection statistics per role * * @param role collection role + * * @return CollectionStatistics */ public CollectionStatistics getCollectionStatistics(String role) { - ConcurrentCollectionStatisticsImpl cs = (ConcurrentCollectionStatisticsImpl) collectionStatistics.get(role); - if (cs == null) { - cs = new ConcurrentCollectionStatisticsImpl(role); + ConcurrentCollectionStatisticsImpl cs = (ConcurrentCollectionStatisticsImpl) collectionStatistics.get( role ); + if ( cs == null ) { + cs = new ConcurrentCollectionStatisticsImpl( role ); ConcurrentCollectionStatisticsImpl previous; - if ((previous = (ConcurrentCollectionStatisticsImpl) collectionStatistics.putIfAbsent(role, cs)) != null) { + if ( ( previous = (ConcurrentCollectionStatisticsImpl) collectionStatistics.putIfAbsent( + role, cs + ) ) != null ) { cs = previous; } } @@ -215,49 +247,52 @@ public CollectionStatistics getCollectionStatistics(String role) { public void loadCollection(String role) { collectionLoadCount.getAndIncrement(); - ((ConcurrentCollectionStatisticsImpl) getCollectionStatistics(role)).incrementLoadCount(); + ( (ConcurrentCollectionStatisticsImpl) getCollectionStatistics( role ) ).incrementLoadCount(); } public void fetchCollection(String role) { collectionFetchCount.getAndIncrement(); - ((ConcurrentCollectionStatisticsImpl) getCollectionStatistics(role)).incrementFetchCount(); + ( (ConcurrentCollectionStatisticsImpl) getCollectionStatistics( role ) ).incrementFetchCount(); } public void updateCollection(String role) { collectionUpdateCount.getAndIncrement(); - ((ConcurrentCollectionStatisticsImpl) getCollectionStatistics(role)).incrementUpdateCount(); + ( (ConcurrentCollectionStatisticsImpl) getCollectionStatistics( role ) ).incrementUpdateCount(); } public void recreateCollection(String role) { collectionRecreateCount.getAndIncrement(); - ((ConcurrentCollectionStatisticsImpl) getCollectionStatistics(role)).incrementRecreateCount(); + ( (ConcurrentCollectionStatisticsImpl) getCollectionStatistics( role ) ).incrementRecreateCount(); } public void removeCollection(String role) { collectionRemoveCount.getAndIncrement(); - ((ConcurrentCollectionStatisticsImpl) getCollectionStatistics(role)).incrementRemoveCount(); + ( (ConcurrentCollectionStatisticsImpl) getCollectionStatistics( role ) ).incrementRemoveCount(); } /** * Second level cache statistics per region * * @param regionName region name + * * @return SecondLevelCacheStatistics */ public SecondLevelCacheStatistics getSecondLevelCacheStatistics(String regionName) { ConcurrentSecondLevelCacheStatisticsImpl slcs - = (ConcurrentSecondLevelCacheStatisticsImpl) secondLevelCacheStatistics.get(regionName); - if (slcs == null) { - if (sessionFactory == null) { + = (ConcurrentSecondLevelCacheStatisticsImpl) secondLevelCacheStatistics.get( regionName ); + if ( slcs == null ) { + if ( sessionFactory == null ) { return null; } - Region region = sessionFactory.getSecondLevelCacheRegion(regionName); - if (region == null) { + Region region = sessionFactory.getSecondLevelCacheRegion( regionName ); + if ( region == null ) { return null; } - slcs = new ConcurrentSecondLevelCacheStatisticsImpl(region); + slcs = new ConcurrentSecondLevelCacheStatisticsImpl( region ); ConcurrentSecondLevelCacheStatisticsImpl previous; - if ((previous = (ConcurrentSecondLevelCacheStatisticsImpl) secondLevelCacheStatistics.putIfAbsent(regionName, slcs)) != null) { + if ( ( previous = (ConcurrentSecondLevelCacheStatisticsImpl) secondLevelCacheStatistics.putIfAbsent( + regionName, slcs + ) ) != null ) { slcs = previous; } } @@ -266,60 +301,71 @@ public SecondLevelCacheStatistics getSecondLevelCacheStatistics(String regionNam public void secondLevelCachePut(String regionName) { secondLevelCachePutCount.getAndIncrement(); - ((ConcurrentSecondLevelCacheStatisticsImpl) getSecondLevelCacheStatistics(regionName)).incrementPutCount(); + ( (ConcurrentSecondLevelCacheStatisticsImpl) getSecondLevelCacheStatistics( regionName ) ).incrementPutCount(); } public void secondLevelCacheHit(String regionName) { secondLevelCacheHitCount.getAndIncrement(); - ((ConcurrentSecondLevelCacheStatisticsImpl) getSecondLevelCacheStatistics(regionName)).incrementHitCount(); + ( (ConcurrentSecondLevelCacheStatisticsImpl) getSecondLevelCacheStatistics( regionName ) ).incrementHitCount(); } public void secondLevelCacheMiss(String regionName) { secondLevelCacheMissCount.getAndIncrement(); - ((ConcurrentSecondLevelCacheStatisticsImpl) getSecondLevelCacheStatistics(regionName)).incrementMissCount(); + ( (ConcurrentSecondLevelCacheStatisticsImpl) getSecondLevelCacheStatistics( regionName ) ).incrementMissCount(); } + @SuppressWarnings({ "UnnecessaryBoxing" }) public void queryExecuted(String hql, int rows, long time) { + PERF_LOG.info( "HQL: {}, time: {}ms, rows: {}", new Object[] {hql, Long.valueOf( time ), Long.valueOf(rows)} ); queryExecutionCount.getAndIncrement(); boolean isLongestQuery = false; - for (long old = queryExecutionMaxTime.get(); (time > old) && (isLongestQuery = !queryExecutionMaxTime.compareAndSet(old, time)); old = queryExecutionMaxTime.get()) - ; - if (isLongestQuery) { + for ( long old = queryExecutionMaxTime.get(); + ( time > old ) && ( isLongestQuery = !queryExecutionMaxTime.compareAndSet( old, time ) ); + old = queryExecutionMaxTime.get() ) { + // nothing to do here given the odd loop structure... + } + if ( isLongestQuery ) { queryExecutionMaxTimeQueryString = hql; } - if (hql != null) { - ConcurrentQueryStatisticsImpl qs = (ConcurrentQueryStatisticsImpl) getQueryStatistics(hql); - qs.executed(rows, time); + if ( hql != null ) { + ConcurrentQueryStatisticsImpl qs = (ConcurrentQueryStatisticsImpl) getQueryStatistics( hql ); + qs.executed( rows, time ); } } public void queryCacheHit(String hql, String regionName) { queryCacheHitCount.getAndIncrement(); - if (hql != null) { - ConcurrentQueryStatisticsImpl qs = (ConcurrentQueryStatisticsImpl) getQueryStatistics(hql); + if ( hql != null ) { + ConcurrentQueryStatisticsImpl qs = (ConcurrentQueryStatisticsImpl) getQueryStatistics( hql ); qs.incrementCacheHitCount(); } - ConcurrentSecondLevelCacheStatisticsImpl slcs = (ConcurrentSecondLevelCacheStatisticsImpl) getSecondLevelCacheStatistics(regionName); + ConcurrentSecondLevelCacheStatisticsImpl slcs = (ConcurrentSecondLevelCacheStatisticsImpl) getSecondLevelCacheStatistics( + regionName + ); slcs.incrementHitCount(); } public void queryCacheMiss(String hql, String regionName) { queryCacheMissCount.getAndIncrement(); - if (hql != null) { - ConcurrentQueryStatisticsImpl qs = (ConcurrentQueryStatisticsImpl) getQueryStatistics(hql); + if ( hql != null ) { + ConcurrentQueryStatisticsImpl qs = (ConcurrentQueryStatisticsImpl) getQueryStatistics( hql ); qs.incrementCacheMissCount(); } - ConcurrentSecondLevelCacheStatisticsImpl slcs = (ConcurrentSecondLevelCacheStatisticsImpl) getSecondLevelCacheStatistics(regionName); + ConcurrentSecondLevelCacheStatisticsImpl slcs = (ConcurrentSecondLevelCacheStatisticsImpl) getSecondLevelCacheStatistics( + regionName + ); slcs.incrementMissCount(); } public void queryCachePut(String hql, String regionName) { queryCachePutCount.getAndIncrement(); - if (hql != null) { - ConcurrentQueryStatisticsImpl qs = (ConcurrentQueryStatisticsImpl) getQueryStatistics(hql); + if ( hql != null ) { + ConcurrentQueryStatisticsImpl qs = (ConcurrentQueryStatisticsImpl) getQueryStatistics( hql ); qs.incrementCachePutCount(); } - ConcurrentSecondLevelCacheStatisticsImpl slcs = (ConcurrentSecondLevelCacheStatisticsImpl) getSecondLevelCacheStatistics(regionName); + ConcurrentSecondLevelCacheStatisticsImpl slcs = (ConcurrentSecondLevelCacheStatisticsImpl) getSecondLevelCacheStatistics( + regionName + ); slcs.incrementPutCount(); } @@ -327,14 +373,17 @@ public void queryCachePut(String hql, String regionName) { * Query statistics from query string (HQL or SQL) * * @param queryString query string + * * @return QueryStatistics */ public QueryStatistics getQueryStatistics(String queryString) { - ConcurrentQueryStatisticsImpl qs = (ConcurrentQueryStatisticsImpl) queryStatistics.get(queryString); - if (qs == null) { - qs = new ConcurrentQueryStatisticsImpl(queryString); + ConcurrentQueryStatisticsImpl qs = (ConcurrentQueryStatisticsImpl) queryStatistics.get( queryString ); + if ( qs == null ) { + qs = new ConcurrentQueryStatisticsImpl( queryString ); ConcurrentQueryStatisticsImpl previous; - if ((previous = (ConcurrentQueryStatisticsImpl) queryStatistics.putIfAbsent(queryString, qs)) != null) { + if ( ( previous = (ConcurrentQueryStatisticsImpl) queryStatistics.putIfAbsent( + queryString, qs + ) ) != null ) { qs = previous; } } @@ -488,35 +537,35 @@ public long getStartTime() { * log in info level the main statistics */ public void logSummary() { - log.info("Logging statistics...."); - log.info("start time: " + startTime); - log.info("sessions opened: " + sessionOpenCount); - log.info("sessions closed: " + sessionCloseCount); - log.info("transactions: " + transactionCount); - log.info("successful transactions: " + commitedTransactionCount); - log.info("optimistic lock failures: " + optimisticFailureCount); - log.info("flushes: " + flushCount); - log.info("connections obtained: " + connectCount); - log.info("statements prepared: " + prepareStatementCount); - log.info("statements closed: " + closeStatementCount); - log.info("second level cache puts: " + secondLevelCachePutCount); - log.info("second level cache hits: " + secondLevelCacheHitCount); - log.info("second level cache misses: " + secondLevelCacheMissCount); - log.info("entities loaded: " + entityLoadCount); - log.info("entities updated: " + entityUpdateCount); - log.info("entities inserted: " + entityInsertCount); - log.info("entities deleted: " + entityDeleteCount); - log.info("entities fetched (minimize this): " + entityFetchCount); - log.info("collections loaded: " + collectionLoadCount); - log.info("collections updated: " + collectionUpdateCount); - log.info("collections removed: " + collectionRemoveCount); - log.info("collections recreated: " + collectionRecreateCount); - log.info("collections fetched (minimize this): " + collectionFetchCount); - log.info("queries executed to database: " + queryExecutionCount); - log.info("query cache puts: " + queryCachePutCount); - log.info("query cache hits: " + queryCacheHitCount); - log.info("query cache misses: " + queryCacheMissCount); - log.info("max query time: " + queryExecutionMaxTime + "ms"); + log.info( "Logging statistics...." ); + log.info( "start time: " + startTime ); + log.info( "sessions opened: " + sessionOpenCount ); + log.info( "sessions closed: " + sessionCloseCount ); + log.info( "transactions: " + transactionCount ); + log.info( "successful transactions: " + committedTransactionCount ); + log.info( "optimistic lock failures: " + optimisticFailureCount ); + log.info( "flushes: " + flushCount ); + log.info( "connections obtained: " + connectCount ); + log.info( "statements prepared: " + prepareStatementCount ); + log.info( "statements closed: " + closeStatementCount ); + log.info( "second level cache puts: " + secondLevelCachePutCount ); + log.info( "second level cache hits: " + secondLevelCacheHitCount ); + log.info( "second level cache misses: " + secondLevelCacheMissCount ); + log.info( "entities loaded: " + entityLoadCount ); + log.info( "entities updated: " + entityUpdateCount ); + log.info( "entities inserted: " + entityInsertCount ); + log.info( "entities deleted: " + entityDeleteCount ); + log.info( "entities fetched (minimize this): " + entityFetchCount ); + log.info( "collections loaded: " + collectionLoadCount ); + log.info( "collections updated: " + collectionUpdateCount ); + log.info( "collections removed: " + collectionRemoveCount ); + log.info( "collections recreated: " + collectionRecreateCount ); + log.info( "collections fetched (minimize this): " + collectionFetchCount ); + log.info( "queries executed to database: " + queryExecutionCount ); + log.info( "query cache puts: " + queryCachePutCount ); + log.info( "query cache hits: " + queryCacheHitCount ); + log.info( "query cache misses: " + queryCacheMissCount ); + log.info( "max query time: " + queryExecutionMaxTime + "ms" ); } /** @@ -545,17 +594,18 @@ public long getQueryExecutionMaxTime() { * Get all executed query strings */ public String[] getQueries() { - return ArrayHelper.toStringArray(queryStatistics.keySet()); + return ArrayHelper.toStringArray( queryStatistics.keySet() ); } /** * Get the names of all entities */ public String[] getEntityNames() { - if (sessionFactory == null) { - return ArrayHelper.toStringArray(entityStatistics.keySet()); - } else { - return ArrayHelper.toStringArray(sessionFactory.getAllClassMetadata().keySet()); + if ( sessionFactory == null ) { + return ArrayHelper.toStringArray( entityStatistics.keySet() ); + } + else { + return ArrayHelper.toStringArray( sessionFactory.getAllClassMetadata().keySet() ); } } @@ -563,10 +613,11 @@ public String[] getEntityNames() { * Get the names of all collection roles */ public String[] getCollectionRoleNames() { - if (sessionFactory == null) { - return ArrayHelper.toStringArray(collectionStatistics.keySet()); - } else { - return ArrayHelper.toStringArray(sessionFactory.getAllCollectionMetadata().keySet()); + if ( sessionFactory == null ) { + return ArrayHelper.toStringArray( collectionStatistics.keySet() ); + } + else { + return ArrayHelper.toStringArray( sessionFactory.getAllCollectionMetadata().keySet() ); } } @@ -574,20 +625,23 @@ public String[] getCollectionRoleNames() { * Get all second-level cache region names */ public String[] getSecondLevelCacheRegionNames() { - if (sessionFactory == null) { - return ArrayHelper.toStringArray(secondLevelCacheStatistics.keySet()); - } else { - return ArrayHelper.toStringArray(sessionFactory.getAllSecondLevelCacheRegions().keySet()); + if ( sessionFactory == null ) { + return ArrayHelper.toStringArray( secondLevelCacheStatistics.keySet() ); + } + else { + return ArrayHelper.toStringArray( sessionFactory.getAllSecondLevelCacheRegions().keySet() ); } } public void endTransaction(boolean success) { transactionCount.getAndIncrement(); - if (success) commitedTransactionCount.getAndIncrement(); + if ( success ) { + committedTransactionCount.getAndIncrement(); + } } public long getSuccessfulTransactionCount() { - return commitedTransactionCount.get(); + return committedTransactionCount.get(); } public long getTransactionCount() { @@ -612,7 +666,7 @@ public long getPrepareStatementCount() { public void optimisticFailure(String entityName) { optimisticFailureCount.getAndIncrement(); - ((ConcurrentEntityStatisticsImpl) getEntityStatistics(entityName)).incrementOptimisticFailureCount(); + ( (ConcurrentEntityStatisticsImpl) getEntityStatistics( entityName ) ).incrementOptimisticFailureCount(); } public long getOptimisticFailureCount() { @@ -621,36 +675,36 @@ public long getOptimisticFailureCount() { public String toString() { return new StringBuilder() - .append("Statistics[") - .append("start time=").append(startTime) - .append(",sessions opened=").append(sessionOpenCount) - .append(",sessions closed=").append(sessionCloseCount) - .append(",transactions=").append(transactionCount) - .append(",successful transactions=").append(commitedTransactionCount) - .append(",optimistic lock failures=").append(optimisticFailureCount) - .append(",flushes=").append(flushCount) - .append(",connections obtained=").append(connectCount) - .append(",statements prepared=").append(prepareStatementCount) - .append(",statements closed=").append(closeStatementCount) - .append(",second level cache puts=").append(secondLevelCachePutCount) - .append(",second level cache hits=").append(secondLevelCacheHitCount) - .append(",second level cache misses=").append(secondLevelCacheMissCount) - .append(",entities loaded=").append(entityLoadCount) - .append(",entities updated=").append(entityUpdateCount) - .append(",entities inserted=").append(entityInsertCount) - .append(",entities deleted=").append(entityDeleteCount) - .append(",entities fetched=").append(entityFetchCount) - .append(",collections loaded=").append(collectionLoadCount) - .append(",collections updated=").append(collectionUpdateCount) - .append(",collections removed=").append(collectionRemoveCount) - .append(",collections recreated=").append(collectionRecreateCount) - .append(",collections fetched=").append(collectionFetchCount) - .append(",queries executed to database=").append(queryExecutionCount) - .append(",query cache puts=").append(queryCachePutCount) - .append(",query cache hits=").append(queryCacheHitCount) - .append(",query cache misses=").append(queryCacheMissCount) - .append(",max query time=").append(queryExecutionMaxTime) - .append(']') + .append( "Statistics[" ) + .append( "start time=" ).append( startTime ) + .append( ",sessions opened=" ).append( sessionOpenCount ) + .append( ",sessions closed=" ).append( sessionCloseCount ) + .append( ",transactions=" ).append( transactionCount ) + .append( ",successful transactions=" ).append( committedTransactionCount ) + .append( ",optimistic lock failures=" ).append( optimisticFailureCount ) + .append( ",flushes=" ).append( flushCount ) + .append( ",connections obtained=" ).append( connectCount ) + .append( ",statements prepared=" ).append( prepareStatementCount ) + .append( ",statements closed=" ).append( closeStatementCount ) + .append( ",second level cache puts=" ).append( secondLevelCachePutCount ) + .append( ",second level cache hits=" ).append( secondLevelCacheHitCount ) + .append( ",second level cache misses=" ).append( secondLevelCacheMissCount ) + .append( ",entities loaded=" ).append( entityLoadCount ) + .append( ",entities updated=" ).append( entityUpdateCount ) + .append( ",entities inserted=" ).append( entityInsertCount ) + .append( ",entities deleted=" ).append( entityDeleteCount ) + .append( ",entities fetched=" ).append( entityFetchCount ) + .append( ",collections loaded=" ).append( collectionLoadCount ) + .append( ",collections updated=" ).append( collectionUpdateCount ) + .append( ",collections removed=" ).append( collectionRemoveCount ) + .append( ",collections recreated=" ).append( collectionRecreateCount ) + .append( ",collections fetched=" ).append( collectionFetchCount ) + .append( ",queries executed to database=" ).append( queryExecutionCount ) + .append( ",query cache puts=" ).append( queryCachePutCount ) + .append( ",query cache hits=" ).append( queryCacheHitCount ) + .append( ",query cache misses=" ).append( queryCacheMissCount ) + .append( ",max query time=" ).append( queryExecutionMaxTime ) + .append( ']' ) .toString(); }