From b8387ec2417f019338ea42fdc3d5e097047b57eb Mon Sep 17 00:00:00 2001 From: Wolfgang Hoschek Date: Thu, 23 Nov 2006 00:25:53 +0000 Subject: [PATCH] javadoc git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@478406 13f79535-47bb-0310-9956-ffa450edef68 --- .../src/java/org/apache/lucene/index/memory/AnalyzerUtil.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/contrib/memory/src/java/org/apache/lucene/index/memory/AnalyzerUtil.java b/contrib/memory/src/java/org/apache/lucene/index/memory/AnalyzerUtil.java index 4a8c9d8af22..858d30a1e31 100644 --- a/contrib/memory/src/java/org/apache/lucene/index/memory/AnalyzerUtil.java +++ b/contrib/memory/src/java/org/apache/lucene/index/memory/AnalyzerUtil.java @@ -207,7 +207,9 @@ public class AnalyzerUtil { * token stream, and delivers those cached tokens on subsequent matching calls to * tokenStream(String fieldName, Reader reader). *

- * This can help improve performance in the presence of expensive Analyzer / TokenFilter chains. + * If Analyzer / TokenFilter chains are expensive in terms of I/O or CPU, such caching can + * help improve performance if the same document is added to multiple Lucene indexes, + * because the text analysis phase need not be performed more than once. *

* Caveats: * 2) Caching the tokens of large Lucene documents can lead to out of memory exceptions.