Merge pull request #15090 from mikemccand/improve_completion_stats

Pull Fields instance once from LeafReader in completion stats.

Closes #6593
This commit is contained in:
Michael McCandless 2015-11-29 06:17:53 -05:00
commit fc6a2f318a
1 changed files with 17 additions and 8 deletions

View File

@ -20,6 +20,8 @@
package org.elasticsearch.search.suggest.completion; package org.elasticsearch.search.suggest.completion;
import com.carrotsearch.hppc.ObjectLongHashMap; import com.carrotsearch.hppc.ObjectLongHashMap;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
@ -32,28 +34,35 @@ import java.io.IOException;
public class CompletionFieldStats { public class CompletionFieldStats {
public static CompletionStats completionStats(IndexReader indexReader, String ... fields) { /**
* Returns total in-heap bytes used by all suggesters. This method has CPU cost <code>O(numIndexedFields)</code>.
*
* @param fieldNamePatterns if non-null, any completion field name matching any of these patterns will break out its in-heap bytes
* separately in the returned {@link CompletionStats}
*/
public static CompletionStats completionStats(IndexReader indexReader, String ... fieldNamePatterns) {
long sizeInBytes = 0; long sizeInBytes = 0;
ObjectLongHashMap<String> completionFields = null; ObjectLongHashMap<String> completionFields = null;
if (fields != null && fields.length > 0) { if (fieldNamePatterns != null && fieldNamePatterns.length > 0) {
completionFields = new ObjectLongHashMap<>(fields.length); completionFields = new ObjectLongHashMap<>(fieldNamePatterns.length);
} }
for (LeafReaderContext atomicReaderContext : indexReader.leaves()) { for (LeafReaderContext atomicReaderContext : indexReader.leaves()) {
LeafReader atomicReader = atomicReaderContext.reader(); LeafReader atomicReader = atomicReaderContext.reader();
try { try {
for (String fieldName : atomicReader.fields()) { Fields fields = atomicReader.fields();
Terms terms = atomicReader.fields().terms(fieldName); for (String fieldName : fields) {
Terms terms = fields.terms(fieldName);
if (terms instanceof CompletionTerms) { if (terms instanceof CompletionTerms) {
// TODO: currently we load up the suggester for reporting its size // TODO: currently we load up the suggester for reporting its size
long fstSize = ((CompletionTerms) terms).suggester().ramBytesUsed(); long fstSize = ((CompletionTerms) terms).suggester().ramBytesUsed();
if (fields != null && fields.length > 0 && Regex.simpleMatch(fields, fieldName)) { if (fieldNamePatterns != null && fieldNamePatterns.length > 0 && Regex.simpleMatch(fieldNamePatterns, fieldName)) {
completionFields.addTo(fieldName, fstSize); completionFields.addTo(fieldName, fstSize);
} }
sizeInBytes += fstSize; sizeInBytes += fstSize;
} }
} }
} catch (IOException ignored) { } catch (IOException ioe) {
throw new ElasticsearchException(ignored); throw new ElasticsearchException(ioe);
} }
} }
return new CompletionStats(sizeInBytes, completionFields); return new CompletionStats(sizeInBytes, completionFields);