diff --git a/contrib/memory/src/java/org/apache/lucene/index/memory/AnalyzerUtil.java b/contrib/memory/src/java/org/apache/lucene/index/memory/AnalyzerUtil.java
index 4a8c9d8af22..858d30a1e31 100644
--- a/contrib/memory/src/java/org/apache/lucene/index/memory/AnalyzerUtil.java
+++ b/contrib/memory/src/java/org/apache/lucene/index/memory/AnalyzerUtil.java
@@ -207,7 +207,9 @@ public class AnalyzerUtil {
* token stream, and delivers those cached tokens on subsequent matching calls to
* tokenStream(String fieldName, Reader reader)
.
*
- * This can help improve performance in the presence of expensive Analyzer / TokenFilter chains. + * If Analyzer / TokenFilter chains are expensive in terms of I/O or CPU, such caching can + * help improve performance if the same document is added to multiple Lucene indexes, + * because the text analysis phase need not be performed more than once. *
* Caveats: * 2) Caching the tokens of large Lucene documents can lead to out of memory exceptions.