mirror of https://github.com/apache/lucene.git
LUCENE-5666: support the 2 crazy instances of insanity that are too hard for me to fix :(
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene5666@1594492 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
7b63afc003
commit
69d59acc31
|
@ -38,6 +38,7 @@ import java.util.concurrent.ThreadPoolExecutor;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.DocsEnum;
|
||||
import org.apache.lucene.index.Fields;
|
||||
|
@ -48,6 +49,8 @@ import org.apache.lucene.index.Terms;
|
|||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.search.FilterCollector;
|
||||
import org.apache.lucene.search.LeafCollector;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
|
@ -83,6 +86,7 @@ import org.apache.solr.search.DocIterator;
|
|||
import org.apache.solr.search.DocSet;
|
||||
import org.apache.solr.search.Grouping;
|
||||
import org.apache.solr.search.HashDocSet;
|
||||
import org.apache.solr.search.Insanity;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.search.QueryParsing;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
|
@ -446,7 +450,7 @@ public class SimpleFacets {
|
|||
String sort,
|
||||
String prefix) throws IOException {
|
||||
GroupingSpecification groupingSpecification = rb.getGroupingSpec();
|
||||
String groupField = groupingSpecification != null ? groupingSpecification.getFields()[0] : null;
|
||||
final String groupField = groupingSpecification != null ? groupingSpecification.getFields()[0] : null;
|
||||
if (groupField == null) {
|
||||
throw new SolrException (
|
||||
SolrException.ErrorCode.BAD_REQUEST,
|
||||
|
@ -455,8 +459,24 @@ public class SimpleFacets {
|
|||
}
|
||||
|
||||
BytesRef prefixBR = prefix != null ? new BytesRef(prefix) : null;
|
||||
TermGroupFacetCollector collector = TermGroupFacetCollector.createTermGroupFacetCollector(groupField, field, multiToken, prefixBR, 128);
|
||||
searcher.search(new MatchAllDocsQuery(), base.getTopFilter(), collector);
|
||||
final TermGroupFacetCollector collector = TermGroupFacetCollector.createTermGroupFacetCollector(groupField, field, multiToken, prefixBR, 128);
|
||||
|
||||
SchemaField sf = searcher.getSchema().getFieldOrNull(groupField);
|
||||
|
||||
if (sf != null && sf.hasDocValues() == false && sf.multiValued() == false && sf.getType().getNumericType() != null) {
|
||||
// its a single-valued numeric field: we must currently create insanity :(
|
||||
// there isnt a GroupedFacetCollector that works on numerics right now...
|
||||
searcher.search(new MatchAllDocsQuery(), base.getTopFilter(), new FilterCollector(collector) {
|
||||
@Override
|
||||
public LeafCollector getLeafCollector(AtomicReaderContext context) throws IOException {
|
||||
AtomicReader insane = Insanity.wrapInsanity(context.reader(), groupField);
|
||||
return in.getLeafCollector(insane.getContext());
|
||||
}
|
||||
});
|
||||
} else {
|
||||
searcher.search(new MatchAllDocsQuery(), base.getTopFilter(), collector);
|
||||
}
|
||||
|
||||
boolean orderByCount = sort.equals(FacetParams.FACET_SORT_COUNT) || sort.equals(FacetParams.FACET_SORT_COUNT_LEGACY);
|
||||
TermGroupFacetCollector.GroupedFacetResult result
|
||||
= collector.mergeSegmentResults(limit < 0 ? Integer.MAX_VALUE :
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
package org.apache.solr.search;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.FilterAtomicReader;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.uninverting.UninvertingReader;
|
||||
|
||||
/**
|
||||
* Lucene 5.0 removes "accidental" insanity, so you must explicitly
|
||||
* create it.
|
||||
* <p>
|
||||
* This class creates insanity for two specific situations:
|
||||
* <ul>
|
||||
* <li>calling {@code ord} or {@code rord} functions on a single-valued numeric field.
|
||||
* <li>doing grouped faceting ({@code group.facet}) on a single-valued numeric field.
|
||||
* </ul>
|
||||
*/
|
||||
@Deprecated
|
||||
public class Insanity {
|
||||
|
||||
/**
|
||||
* Returns a view over {@code sane} where {@code insaneField} is a string
|
||||
* instead of a numeric.
|
||||
*/
|
||||
public static AtomicReader wrapInsanity(AtomicReader sane, String insaneField) {
|
||||
return new UninvertingReader(new InsaneReader(sane, insaneField),
|
||||
Collections.singletonMap(insaneField, UninvertingReader.Type.SORTED));
|
||||
}
|
||||
|
||||
/** Hides the proper numeric dv type for the field */
|
||||
private static class InsaneReader extends FilterAtomicReader {
|
||||
final String insaneField;
|
||||
final FieldInfos fieldInfos;
|
||||
|
||||
InsaneReader(AtomicReader in, String insaneField) {
|
||||
super(in);
|
||||
this.insaneField = insaneField;
|
||||
ArrayList<FieldInfo> filteredInfos = new ArrayList<>();
|
||||
for (FieldInfo fi : in.getFieldInfos()) {
|
||||
if (fi.name.equals(insaneField)) {
|
||||
filteredInfos.add(new FieldInfo(fi.name, fi.isIndexed(), fi.number, fi.hasVectors(), fi.omitsNorms(),
|
||||
fi.hasPayloads(), fi.getIndexOptions(), null, fi.getNormType(), null));
|
||||
} else {
|
||||
filteredInfos.add(fi);
|
||||
}
|
||||
}
|
||||
fieldInfos = new FieldInfos(filteredInfos.toArray(new FieldInfo[filteredInfos.size()]));
|
||||
}
|
||||
|
||||
@Override
|
||||
public NumericDocValues getNumericDocValues(String field) throws IOException {
|
||||
if (insaneField.equals(field)) {
|
||||
return null;
|
||||
} else {
|
||||
return in.getNumericDocValues(field);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public BinaryDocValues getBinaryDocValues(String field) throws IOException {
|
||||
if (insaneField.equals(field)) {
|
||||
return null;
|
||||
} else {
|
||||
return in.getBinaryDocValues(field);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public SortedDocValues getSortedDocValues(String field) throws IOException {
|
||||
if (insaneField.equals(field)) {
|
||||
return null;
|
||||
} else {
|
||||
return in.getSortedDocValues(field);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public SortedSetDocValues getSortedSetDocValues(String field) throws IOException {
|
||||
if (insaneField.equals(field)) {
|
||||
return null;
|
||||
} else {
|
||||
return in.getSortedSetDocValues(field);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldInfos getFieldInfos() {
|
||||
return fieldInfos;
|
||||
}
|
||||
|
||||
// important to override these, so fieldcaches are shared on what we wrap
|
||||
|
||||
@Override
|
||||
public Object getCoreCacheKey() {
|
||||
return in.getCoreCacheKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getCombinedCoreAndDeletesKey() {
|
||||
return in.getCombinedCoreAndDeletesKey();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -18,12 +18,14 @@
|
|||
package org.apache.solr.search.function;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiReader;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
|
@ -33,6 +35,8 @@ import org.apache.lucene.queries.function.docvalues.IntDocValues;
|
|||
import org.apache.lucene.search.SortedSetSelector;
|
||||
import org.apache.lucene.util.mutable.MutableValue;
|
||||
import org.apache.lucene.util.mutable.MutableValueInt;
|
||||
import org.apache.solr.schema.SchemaField;
|
||||
import org.apache.solr.search.Insanity;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
|
||||
/**
|
||||
|
@ -72,8 +76,21 @@ public class OrdFieldSource extends ValueSource {
|
|||
final AtomicReader r;
|
||||
Object o = context.get("searcher");
|
||||
if (o instanceof SolrIndexSearcher) {
|
||||
// reuse ordinalmap
|
||||
r = ((SolrIndexSearcher)o).getAtomicReader();
|
||||
SolrIndexSearcher is = (SolrIndexSearcher) o;
|
||||
SchemaField sf = is.getSchema().getFieldOrNull(field);
|
||||
if (sf != null && sf.hasDocValues() == false && sf.multiValued() == false && sf.getType().getNumericType() != null) {
|
||||
// its a single-valued numeric field: we must currently create insanity :(
|
||||
List<AtomicReaderContext> leaves = is.getIndexReader().leaves();
|
||||
AtomicReader insaneLeaves[] = new AtomicReader[leaves.size()];
|
||||
int upto = 0;
|
||||
for (AtomicReaderContext raw : leaves) {
|
||||
insaneLeaves[upto++] = Insanity.wrapInsanity(raw.reader(), field);
|
||||
}
|
||||
r = SlowCompositeReaderWrapper.wrap(new MultiReader(insaneLeaves));
|
||||
} else {
|
||||
// reuse ordinalmap
|
||||
r = ((SolrIndexSearcher)o).getAtomicReader();
|
||||
}
|
||||
} else {
|
||||
IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
|
||||
r = SlowCompositeReaderWrapper.wrap(topReader);
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.solr.search.function;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
|
@ -25,6 +26,7 @@ import org.apache.lucene.index.AtomicReaderContext;
|
|||
import org.apache.lucene.index.CompositeReader;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiReader;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
|
@ -32,6 +34,8 @@ import org.apache.lucene.queries.function.FunctionValues;
|
|||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.docvalues.IntDocValues;
|
||||
import org.apache.lucene.search.SortedSetSelector;
|
||||
import org.apache.solr.schema.SchemaField;
|
||||
import org.apache.solr.search.Insanity;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
|
||||
/**
|
||||
|
@ -73,8 +77,21 @@ public class ReverseOrdFieldSource extends ValueSource {
|
|||
final AtomicReader r;
|
||||
Object o = context.get("searcher");
|
||||
if (o instanceof SolrIndexSearcher) {
|
||||
// reuse ordinalmap
|
||||
r = ((SolrIndexSearcher)o).getAtomicReader();
|
||||
SolrIndexSearcher is = (SolrIndexSearcher) o;
|
||||
SchemaField sf = is.getSchema().getFieldOrNull(field);
|
||||
if (sf != null && sf.hasDocValues() == false && sf.multiValued() == false && sf.getType().getNumericType() != null) {
|
||||
// its a single-valued numeric field: we must currently create insanity :(
|
||||
List<AtomicReaderContext> leaves = is.getIndexReader().leaves();
|
||||
AtomicReader insaneLeaves[] = new AtomicReader[leaves.size()];
|
||||
int upto = 0;
|
||||
for (AtomicReaderContext raw : leaves) {
|
||||
insaneLeaves[upto++] = Insanity.wrapInsanity(raw.reader(), field);
|
||||
}
|
||||
r = SlowCompositeReaderWrapper.wrap(new MultiReader(insaneLeaves));
|
||||
} else {
|
||||
// reuse ordinalmap
|
||||
r = ((SolrIndexSearcher)o).getAtomicReader();
|
||||
}
|
||||
} else {
|
||||
IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
|
||||
r = SlowCompositeReaderWrapper.wrap(topReader);
|
||||
|
|
Loading…
Reference in New Issue