mirror of https://github.com/apache/lucene.git
LUCENE-2858: Port Java to new API, share slow reader as SolrIndexSearcher.getAtomicReader()
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene2858@1237812 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
0d7055bf3d
commit
10614f9b26
|
@ -18,6 +18,7 @@ package org.apache.solr.core;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
|
@ -60,6 +61,6 @@ public abstract class IndexReaderFactory implements NamedListInitializedPlugin {
|
|||
* @return An IndexReader instance
|
||||
* @throws IOException
|
||||
*/
|
||||
public abstract IndexReader newReader(Directory indexDir)
|
||||
public abstract DirectoryReader newReader(Directory indexDir)
|
||||
throws IOException;
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.solr.core;
|
|||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.index.IndexDeletionPolicy;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -758,7 +758,7 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
}
|
||||
|
||||
try {
|
||||
updateHandler.close();
|
||||
if (updateHandler != null) updateHandler.close();
|
||||
} catch (Throwable e) {
|
||||
SolrException.log(log,e);
|
||||
}
|
||||
|
@ -1082,17 +1082,17 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
if (newestSearcher != null && solrConfig.reopenReaders
|
||||
&& (nrt || indexDirFile.equals(newIndexDirFile))) {
|
||||
|
||||
IndexReader newReader;
|
||||
IndexReader currentReader = newestSearcher.get().getIndexReader();
|
||||
DirectoryReader newReader;
|
||||
DirectoryReader currentReader = newestSearcher.get().getIndexReader();
|
||||
|
||||
if (updateHandlerReopens) {
|
||||
// SolrCore.verbose("start reopen from",previousSearcher,"writer=",writer);
|
||||
IndexWriter writer = getUpdateHandler().getSolrCoreState().getIndexWriter(this);
|
||||
newReader = IndexReader.openIfChanged(currentReader, writer, true);
|
||||
newReader = DirectoryReader.openIfChanged(currentReader, writer, true);
|
||||
|
||||
} else {
|
||||
// verbose("start reopen without writer, reader=", currentReader);
|
||||
newReader = IndexReader.openIfChanged(currentReader);
|
||||
newReader = DirectoryReader.openIfChanged(currentReader);
|
||||
// verbose("reopen result", newReader);
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ package org.apache.solr.core;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
||||
/**
|
||||
|
@ -30,7 +30,7 @@ import org.apache.lucene.store.Directory;
|
|||
public class StandardIndexReaderFactory extends IndexReaderFactory {
|
||||
|
||||
@Override
|
||||
public IndexReader newReader(Directory indexDir) throws IOException {
|
||||
return IndexReader.open(indexDir, termInfosIndexDivisor);
|
||||
public DirectoryReader newReader(Directory indexDir) throws IOException {
|
||||
return DirectoryReader.open(indexDir, termInfosIndexDivisor);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.commons.io.IOUtils;
|
|||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.apache.lucene.index.IndexDeletionPolicy;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.params.CommonParams;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
|
@ -844,11 +845,11 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
replicateOnStart = true;
|
||||
RefCounted<SolrIndexSearcher> s = core.getNewestSearcher(false);
|
||||
try {
|
||||
IndexReader reader = s==null ? null : s.get().getIndexReader();
|
||||
DirectoryReader reader = s==null ? null : s.get().getIndexReader();
|
||||
if (reader!=null && reader.getIndexCommit() != null && reader.getIndexCommit().getGeneration() != 1L) {
|
||||
try {
|
||||
if(replicateOnOptimize){
|
||||
Collection<IndexCommit> commits = IndexReader.listCommits(reader.directory());
|
||||
Collection<IndexCommit> commits = DirectoryReader.listCommits(reader.directory());
|
||||
for (IndexCommit ic : commits) {
|
||||
if(ic.getSegmentCount() == 1){
|
||||
if(indexCommitPoint == null || indexCommitPoint.getGeneration() < ic.getGeneration()) indexCommitPoint = ic;
|
||||
|
|
|
@ -25,7 +25,7 @@ import java.util.List;
|
|||
import java.util.Properties;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
@ -218,7 +218,7 @@ public class CoreAdminHandler extends RequestHandlerBase {
|
|||
SolrCore[] sourceCores = null;
|
||||
RefCounted<SolrIndexSearcher>[] searchers = null;
|
||||
// stores readers created from indexDir param values
|
||||
IndexReader[] readersToBeClosed = null;
|
||||
DirectoryReader[] readersToBeClosed = null;
|
||||
Directory[] dirsToBeReleased = null;
|
||||
if (core != null) {
|
||||
try {
|
||||
|
@ -239,22 +239,22 @@ public class CoreAdminHandler extends RequestHandlerBase {
|
|||
sourceCores[i] = srcCore;
|
||||
}
|
||||
} else {
|
||||
readersToBeClosed = new IndexReader[dirNames.length];
|
||||
readersToBeClosed = new DirectoryReader[dirNames.length];
|
||||
dirsToBeReleased = new Directory[dirNames.length];
|
||||
DirectoryFactory dirFactory = core.getDirectoryFactory();
|
||||
for (int i = 0; i < dirNames.length; i++) {
|
||||
Directory dir = dirFactory.get(dirNames[i], core.getSolrConfig().mainIndexConfig.lockType);
|
||||
dirsToBeReleased[i] = dir;
|
||||
// TODO: why doesn't this use the IR factory? what is going on here?
|
||||
readersToBeClosed[i] = IndexReader.open(dir);
|
||||
readersToBeClosed[i] = DirectoryReader.open(dir);
|
||||
}
|
||||
}
|
||||
|
||||
IndexReader[] readers = null;
|
||||
DirectoryReader[] readers = null;
|
||||
if (readersToBeClosed != null) {
|
||||
readers = readersToBeClosed;
|
||||
} else {
|
||||
readers = new IndexReader[sourceCores.length];
|
||||
readers = new DirectoryReader[sourceCores.length];
|
||||
searchers = new RefCounted[sourceCores.length];
|
||||
for (int i = 0; i < sourceCores.length; i++) {
|
||||
SolrCore solrCore = sourceCores[i];
|
||||
|
|
|
@ -90,7 +90,7 @@ public class LukeRequestHandler extends RequestHandlerBase
|
|||
{
|
||||
IndexSchema schema = req.getSchema();
|
||||
SolrIndexSearcher searcher = req.getSearcher();
|
||||
IndexReader reader = searcher.getIndexReader();
|
||||
DirectoryReader reader = searcher.getIndexReader();
|
||||
SolrParams params = req.getParams();
|
||||
int numTerms = params.getInt( NUMTERMS, DEFAULT_COUNT );
|
||||
|
||||
|
@ -287,17 +287,17 @@ public class LukeRequestHandler extends RequestHandlerBase
|
|||
final SolrIndexSearcher searcher, final Set<String> fields, final int numTerms, Map<String,TopTermQueue> ttinfo)
|
||||
throws Exception {
|
||||
|
||||
IndexReader reader = searcher.getIndexReader();
|
||||
AtomicReader reader = searcher.getAtomicReader();
|
||||
IndexSchema schema = searcher.getSchema();
|
||||
|
||||
Set<String> fieldNames = new TreeSet<String>();
|
||||
for(FieldInfo fieldInfo : ReaderUtil.getMergedFieldInfos(reader)) {
|
||||
for(FieldInfo fieldInfo : reader.getFieldInfos()) {
|
||||
fieldNames.add(fieldInfo.name);
|
||||
}
|
||||
|
||||
// Walk the term enum and keep a priority queue for each map in our set
|
||||
SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<Object>();
|
||||
Fields theFields = MultiFields.getFields(reader);
|
||||
Fields theFields = reader.fields();
|
||||
|
||||
for (String fieldName : fieldNames) {
|
||||
if (fields != null && ! fields.contains(fieldName)) {
|
||||
|
@ -328,8 +328,7 @@ public class LukeRequestHandler extends RequestHandlerBase
|
|||
Document doc = null;
|
||||
if (topTerms != null && topTerms.getTopTermInfo() != null) {
|
||||
Term term = topTerms.getTopTermInfo().term;
|
||||
DocsEnum docsEnum = MultiFields.getTermDocsEnum(reader,
|
||||
MultiFields.getLiveDocs(reader),
|
||||
DocsEnum docsEnum = reader.termDocsEnum(reader.getLiveDocs(),
|
||||
term.field(),
|
||||
new BytesRef(term.text()),
|
||||
false);
|
||||
|
@ -498,10 +497,10 @@ public class LukeRequestHandler extends RequestHandlerBase
|
|||
v.add( f.getName() );
|
||||
typeusemap.put( ft.getTypeName(), v );
|
||||
}
|
||||
public static SimpleOrderedMap<Object> getIndexInfo(IndexReader reader, boolean countTerms) throws IOException {
|
||||
public static SimpleOrderedMap<Object> getIndexInfo(DirectoryReader reader, boolean countTerms) throws IOException {
|
||||
return getIndexInfo(reader, countTerms ? 1 : 0, null, null);
|
||||
}
|
||||
public static SimpleOrderedMap<Object> getIndexInfo( IndexReader reader, int numTerms,
|
||||
public static SimpleOrderedMap<Object> getIndexInfo( DirectoryReader reader, int numTerms,
|
||||
Map<String, TopTermQueue> topTerms,
|
||||
Set<String> fieldList) throws IOException {
|
||||
Directory dir = reader.directory();
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.solr.handler.component;
|
|||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queryparser.classic.ParseException;
|
||||
|
|
|
@ -21,7 +21,7 @@ import org.apache.lucene.analysis.Analyzer;
|
|||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -535,13 +535,13 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore
|
|||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
//convert the ids to Lucene doc ids, the ordSet and termValues needs to be the same size as the number of elevation docs we have
|
||||
ordSet.clear();
|
||||
Fields fields = context.reader.fields();
|
||||
Fields fields = context.reader().fields();
|
||||
if (fields == null) return this;
|
||||
Terms terms = fields.terms(fieldname);
|
||||
if (terms == null) return this;
|
||||
termsEnum = terms.iterator(termsEnum);
|
||||
BytesRef term = new BytesRef();
|
||||
Bits liveDocs = context.reader.getLiveDocs();
|
||||
Bits liveDocs = context.reader().getLiveDocs();
|
||||
|
||||
for (String id : elevations.ids) {
|
||||
term.copyChars(id);
|
||||
|
|
|
@ -23,7 +23,6 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.SlowMultiReaderWrapper;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.solr.common.SolrException;
|
||||
|
@ -253,7 +252,7 @@ class SimpleStats {
|
|||
|
||||
FieldCache.DocTermsIndex si;
|
||||
try {
|
||||
si = FieldCache.DEFAULT.getTermsIndex(new SlowMultiReaderWrapper(searcher.getIndexReader()), fieldName);
|
||||
si = FieldCache.DEFAULT.getTermsIndex(searcher.getAtomicReader(), fieldName);
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw new RuntimeException( "failed to open field cache for: "+fieldName, e );
|
||||
|
@ -275,7 +274,7 @@ class SimpleStats {
|
|||
+ "[" + facetFieldType + "]");
|
||||
}
|
||||
try {
|
||||
facetTermsIndex = FieldCache.DEFAULT.getTermsIndex(new SlowMultiReaderWrapper(searcher.getIndexReader()), facetField);
|
||||
facetTermsIndex = FieldCache.DEFAULT.getTermsIndex(searcher.getAtomicReader(), facetField);
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw new RuntimeException( "failed to open field cache for: "
|
||||
|
|
|
@ -13,8 +13,6 @@ import org.apache.lucene.index.FieldInfo;
|
|||
import org.apache.lucene.index.Fields;
|
||||
import org.apache.lucene.index.FieldsEnum;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.StoredFieldVisitor.Status;
|
||||
import org.apache.lucene.index.StoredFieldVisitor;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -390,13 +388,7 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar
|
|||
private static int getDocFreq(IndexReader reader, String field, BytesRef term) {
|
||||
int result = 1;
|
||||
try {
|
||||
Terms terms = MultiFields.getTerms(reader, field);
|
||||
if (terms != null) {
|
||||
TermsEnum termsEnum = terms.iterator(null);
|
||||
if (termsEnum.seekExact(term, true)) {
|
||||
result = termsEnum.docFreq();
|
||||
}
|
||||
}
|
||||
result = reader.docFreq(field, term);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
|
|
@ -117,8 +117,8 @@ public class TermsComponent extends SearchComponent {
|
|||
boolean raw = params.getBool(TermsParams.TERMS_RAW, false);
|
||||
|
||||
|
||||
final IndexReader indexReader = rb.req.getSearcher().getTopReaderContext().reader;
|
||||
Fields lfields = MultiFields.getFields(indexReader);
|
||||
final AtomicReader indexReader = rb.req.getSearcher().getAtomicReader();
|
||||
Fields lfields = indexReader.fields();
|
||||
|
||||
for (String field : fields) {
|
||||
NamedList<Integer> fieldTerms = new NamedList<Integer>();
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
package org.apache.solr.request;
|
||||
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.DocIdSet;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
|
@ -235,7 +235,7 @@ class PerSegmentSingleValuedFaceting {
|
|||
BytesRef tempBR = new BytesRef();
|
||||
|
||||
void countTerms() throws IOException {
|
||||
si = FieldCache.DEFAULT.getTermsIndex(context.reader, fieldName);
|
||||
si = FieldCache.DEFAULT.getTermsIndex(context.reader(), fieldName);
|
||||
// SolrCore.log.info("reader= " + reader + " FC=" + System.identityHashCode(si));
|
||||
|
||||
if (prefix!=null) {
|
||||
|
|
|
@ -408,7 +408,7 @@ public class SimpleFacets {
|
|||
FieldType ft = searcher.getSchema().getFieldType(fieldName);
|
||||
NamedList<Integer> res = new NamedList<Integer>();
|
||||
|
||||
FieldCache.DocTermsIndex si = FieldCache.DEFAULT.getTermsIndex(new SlowMultiReaderWrapper(searcher.getIndexReader()), fieldName);
|
||||
FieldCache.DocTermsIndex si = FieldCache.DEFAULT.getTermsIndex(searcher.getAtomicReader(), fieldName);
|
||||
|
||||
final BytesRef prefixRef;
|
||||
if (prefix == null) {
|
||||
|
@ -609,7 +609,7 @@ public class SimpleFacets {
|
|||
|
||||
|
||||
IndexSchema schema = searcher.getSchema();
|
||||
IndexReader r = searcher.getIndexReader();
|
||||
AtomicReader r = searcher.getAtomicReader();
|
||||
FieldType ft = schema.getFieldType(field);
|
||||
|
||||
boolean sortByCount = sort.equals("count") || sort.equals("true");
|
||||
|
@ -627,7 +627,7 @@ public class SimpleFacets {
|
|||
startTermBytes = new BytesRef(indexedPrefix);
|
||||
}
|
||||
|
||||
Fields fields = MultiFields.getFields(r);
|
||||
Fields fields = r.fields();
|
||||
Terms terms = fields==null ? null : fields.terms(field);
|
||||
TermsEnum termsEnum = null;
|
||||
SolrIndexSearcher.DocsEnumState deState = null;
|
||||
|
@ -673,7 +673,7 @@ public class SimpleFacets {
|
|||
if (deState==null) {
|
||||
deState = new SolrIndexSearcher.DocsEnumState();
|
||||
deState.fieldName = field;
|
||||
deState.liveDocs = MultiFields.getLiveDocs(r);
|
||||
deState.liveDocs = r.getLiveDocs();
|
||||
deState.termsEnum = termsEnum;
|
||||
deState.docsEnum = docsEnum;
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.solr.request;
|
|||
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.index.DocTermOrds;
|
||||
import org.apache.lucene.index.SlowMultiReaderWrapper;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
|
@ -175,7 +174,7 @@ public class UnInvertedField extends DocTermOrds {
|
|||
final String prefix = TrieField.getMainValuePrefix(searcher.getSchema().getFieldType(field));
|
||||
this.searcher = searcher;
|
||||
try {
|
||||
uninvert(new SlowMultiReaderWrapper(searcher.getIndexReader()), prefix == null ? null : new BytesRef(prefix));
|
||||
uninvert(searcher.getAtomicReader(), prefix == null ? null : new BytesRef(prefix));
|
||||
} catch (IllegalStateException ise) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, ise.getMessage());
|
||||
}
|
||||
|
@ -227,7 +226,7 @@ public class UnInvertedField extends DocTermOrds {
|
|||
int startTerm = 0;
|
||||
int endTerm = numTermsInField; // one past the end
|
||||
|
||||
TermsEnum te = getOrdTermsEnum(new SlowMultiReaderWrapper(searcher.getIndexReader()));
|
||||
TermsEnum te = getOrdTermsEnum(searcher.getAtomicReader());
|
||||
if (prefix != null && prefix.length() > 0) {
|
||||
final BytesRef prefixBr = new BytesRef(prefix);
|
||||
if (te.seekCeil(prefixBr, true) == TermsEnum.SeekStatus.END) {
|
||||
|
@ -485,7 +484,7 @@ public class UnInvertedField extends DocTermOrds {
|
|||
for (String f : facet) {
|
||||
SchemaField facet_sf = searcher.getSchema().getField(f);
|
||||
try {
|
||||
si = FieldCache.DEFAULT.getTermsIndex(new SlowMultiReaderWrapper(searcher.getIndexReader()), f);
|
||||
si = FieldCache.DEFAULT.getTermsIndex(searcher.getAtomicReader(), f);
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw new RuntimeException("failed to open field cache for: " + f, e);
|
||||
|
@ -497,7 +496,7 @@ public class UnInvertedField extends DocTermOrds {
|
|||
final int[] index = this.index;
|
||||
final int[] counts = new int[numTermsInField];//keep track of the number of times we see each word in the field for all the documents in the docset
|
||||
|
||||
TermsEnum te = getOrdTermsEnum(new SlowMultiReaderWrapper(searcher.getIndexReader()));
|
||||
TermsEnum te = getOrdTermsEnum(searcher.getAtomicReader());
|
||||
|
||||
boolean doNegative = false;
|
||||
if (finfo.length == 0) {
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.solr.response.transform;
|
|||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
|
@ -76,7 +77,7 @@ public class ValueSourceAugmenter extends DocTransformer
|
|||
|
||||
Map fcontext;
|
||||
SolrIndexSearcher searcher;
|
||||
IndexReader.AtomicReaderContext[] readerContexts;
|
||||
AtomicReaderContext[] readerContexts;
|
||||
FunctionValues docValuesArr[];
|
||||
|
||||
|
||||
|
@ -88,7 +89,7 @@ public class ValueSourceAugmenter extends DocTransformer
|
|||
|
||||
// TODO: calculate this stuff just once across diff functions
|
||||
int idx = ReaderUtil.subIndex(docid, readerContexts);
|
||||
IndexReader.AtomicReaderContext rcontext = readerContexts[idx];
|
||||
AtomicReaderContext rcontext = readerContexts[idx];
|
||||
FunctionValues values = docValuesArr[idx];
|
||||
if (values == null) {
|
||||
docValuesArr[idx] = values = valueSource.getValues(fcontext, rcontext);
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.solr.schema;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.SortField;
|
||||
|
@ -170,8 +171,8 @@ class BoolFieldSource extends ValueSource {
|
|||
|
||||
|
||||
@Override
|
||||
public FunctionValues getValues(Map context, IndexReader.AtomicReaderContext readerContext) throws IOException {
|
||||
final FieldCache.DocTermsIndex sindex = FieldCache.DEFAULT.getTermsIndex(readerContext.reader, field);
|
||||
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
|
||||
final FieldCache.DocTermsIndex sindex = FieldCache.DEFAULT.getTermsIndex(readerContext.reader(), field);
|
||||
|
||||
// figure out what ord maps to true
|
||||
int nord = sindex.numOrd();
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
package org.apache.solr.schema;
|
||||
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortField;
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.solr.schema;
|
|||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.valuesource.VectorValueSource;
|
||||
|
@ -373,7 +373,7 @@ class SpatialDistanceQuery extends Query {
|
|||
|
||||
@Override
|
||||
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
||||
return ((SpatialScorer)scorer(context, true, true, context.reader.getLiveDocs())).explain(doc);
|
||||
return ((SpatialScorer)scorer(context, true, true, context.reader().getLiveDocs())).explain(doc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -405,7 +405,7 @@ class SpatialDistanceQuery extends Query {
|
|||
super(w);
|
||||
this.weight = w;
|
||||
this.qWeight = qWeight;
|
||||
this.reader = readerContext.reader;
|
||||
this.reader = readerContext.reader();
|
||||
this.maxDoc = reader.maxDoc();
|
||||
this.liveDocs = acceptDocs;
|
||||
latVals = latSource.getValues(weight.latContext, readerContext);
|
||||
|
|
|
@ -21,8 +21,8 @@ import java.io.IOException;
|
|||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.docvalues.IntDocValues;
|
||||
|
@ -80,7 +80,7 @@ public class RandomSortField extends FieldType {
|
|||
* Using dynamic fields, you can force the random order to change
|
||||
*/
|
||||
private static int getSeed(String fieldName, AtomicReaderContext context) {
|
||||
final IndexReader top = ReaderUtil.getTopLevelContext(context).reader;
|
||||
final DirectoryReader top = (DirectoryReader) ReaderUtil.getTopLevelContext(context).reader();
|
||||
// calling getVersion() on a segment will currently give you a null pointer exception, so
|
||||
// we use the top-level reader.
|
||||
return fieldName.hashCode() + context.docBase + (int)top.getVersion();
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.lucene.util.UnicodeUtil;
|
|||
import org.apache.lucene.util.mutable.MutableValue;
|
||||
import org.apache.lucene.util.mutable.MutableValueDouble;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.solr.util.NumberUtils;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.lucene.util.UnicodeUtil;
|
|||
import org.apache.lucene.util.mutable.MutableValue;
|
||||
import org.apache.lucene.util.mutable.MutableValueFloat;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.solr.util.NumberUtils;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.lucene.util.UnicodeUtil;
|
|||
import org.apache.lucene.util.mutable.MutableValue;
|
||||
import org.apache.lucene.util.mutable.MutableValueInt;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.solr.util.NumberUtils;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.lucene.util.UnicodeUtil;
|
|||
import org.apache.lucene.util.mutable.MutableValue;
|
||||
import org.apache.lucene.util.mutable.MutableValueLong;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.solr.util.NumberUtils;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
package org.apache.solr.schema;
|
||||
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.docvalues.StringIndexDocValues;
|
||||
import org.apache.lucene.queries.function.valuesource.FieldCacheSource;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.solr.search;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.search.BitsFilteredDocIdSet;
|
||||
import org.apache.lucene.search.DocIdSet;
|
||||
import org.apache.lucene.search.Filter;
|
||||
|
@ -247,8 +248,8 @@ public class BitDocSet extends DocSetBase {
|
|||
|
||||
return new Filter() {
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(final IndexReader.AtomicReaderContext context, final Bits acceptDocs) throws IOException {
|
||||
IndexReader reader = context.reader;
|
||||
public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
|
||||
IndexReader reader = context.reader();
|
||||
|
||||
if (context.isTopLevel) {
|
||||
return BitsFilteredDocIdSet.wrap(bs, acceptDocs);
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.solr.search;
|
|||
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
|
||||
|
@ -31,7 +32,7 @@ public class DelegatingCollector extends Collector {
|
|||
|
||||
protected Collector delegate;
|
||||
protected Scorer scorer;
|
||||
protected IndexReader.AtomicReaderContext context;
|
||||
protected AtomicReaderContext context;
|
||||
protected int docBase;
|
||||
|
||||
public Collector getDelegate() {
|
||||
|
@ -62,7 +63,7 @@ public class DelegatingCollector extends Collector {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
|
||||
public void setNextReader(AtomicReaderContext context) throws IOException {
|
||||
this.context = context;
|
||||
this.docBase = context.docBase;
|
||||
delegate.setNextReader(context);
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.lucene.search.Filter;
|
|||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.BitsFilteredDocIdSet;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -273,7 +273,7 @@ abstract class DocSetBase implements DocSet {
|
|||
return new Filter() {
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
|
||||
IndexReader reader = context.reader;
|
||||
IndexReader reader = context.reader();
|
||||
|
||||
if (context.isTopLevel) {
|
||||
return BitsFilteredDocIdSet.wrap(bs, acceptDocs);
|
||||
|
|
|
@ -1,95 +1,96 @@
|
|||
package org.apache.solr.search;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.util.OpenBitSet;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
||||
public class DocSetCollector extends Collector {
|
||||
int pos=0;
|
||||
OpenBitSet bits;
|
||||
final int maxDoc;
|
||||
final int smallSetSize;
|
||||
int base;
|
||||
|
||||
// in case there aren't that many hits, we may not want a very sparse
|
||||
// bit array. Optimistically collect the first few docs in an array
|
||||
// in case there are only a few.
|
||||
final int[] scratch;
|
||||
|
||||
public DocSetCollector(int smallSetSize, int maxDoc) {
|
||||
this.smallSetSize = smallSetSize;
|
||||
this.maxDoc = maxDoc;
|
||||
this.scratch = new int[smallSetSize];
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
doc += base;
|
||||
// optimistically collect the first docs in an array
|
||||
// in case the total number will be small enough to represent
|
||||
// as a small set like SortedIntDocSet instead...
|
||||
// Storing in this array will be quicker to convert
|
||||
// than scanning through a potentially huge bit vector.
|
||||
// FUTURE: when search methods all start returning docs in order, maybe
|
||||
// we could have a ListDocSet() and use the collected array directly.
|
||||
if (pos < scratch.length) {
|
||||
scratch[pos]=doc;
|
||||
} else {
|
||||
// this conditional could be removed if BitSet was preallocated, but that
|
||||
// would take up more memory, and add more GC time...
|
||||
if (bits==null) bits = new OpenBitSet(maxDoc);
|
||||
bits.fastSet(doc);
|
||||
}
|
||||
|
||||
pos++;
|
||||
}
|
||||
|
||||
public DocSet getDocSet() {
|
||||
if (pos<=scratch.length) {
|
||||
// assumes docs were collected in sorted order!
|
||||
return new SortedIntDocSet(scratch, pos);
|
||||
} else {
|
||||
// set the bits for ids that were collected in the array
|
||||
for (int i=0; i<scratch.length; i++) bits.fastSet(scratch[i]);
|
||||
return new BitDocSet(bits,pos);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
|
||||
this.base = context.docBase;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean acceptsDocsOutOfOrder() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
package org.apache.solr.search;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.util.OpenBitSet;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
||||
public class DocSetCollector extends Collector {
|
||||
int pos=0;
|
||||
OpenBitSet bits;
|
||||
final int maxDoc;
|
||||
final int smallSetSize;
|
||||
int base;
|
||||
|
||||
// in case there aren't that many hits, we may not want a very sparse
|
||||
// bit array. Optimistically collect the first few docs in an array
|
||||
// in case there are only a few.
|
||||
final int[] scratch;
|
||||
|
||||
public DocSetCollector(int smallSetSize, int maxDoc) {
|
||||
this.smallSetSize = smallSetSize;
|
||||
this.maxDoc = maxDoc;
|
||||
this.scratch = new int[smallSetSize];
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
doc += base;
|
||||
// optimistically collect the first docs in an array
|
||||
// in case the total number will be small enough to represent
|
||||
// as a small set like SortedIntDocSet instead...
|
||||
// Storing in this array will be quicker to convert
|
||||
// than scanning through a potentially huge bit vector.
|
||||
// FUTURE: when search methods all start returning docs in order, maybe
|
||||
// we could have a ListDocSet() and use the collected array directly.
|
||||
if (pos < scratch.length) {
|
||||
scratch[pos]=doc;
|
||||
} else {
|
||||
// this conditional could be removed if BitSet was preallocated, but that
|
||||
// would take up more memory, and add more GC time...
|
||||
if (bits==null) bits = new OpenBitSet(maxDoc);
|
||||
bits.fastSet(doc);
|
||||
}
|
||||
|
||||
pos++;
|
||||
}
|
||||
|
||||
public DocSet getDocSet() {
|
||||
if (pos<=scratch.length) {
|
||||
// assumes docs were collected in sorted order!
|
||||
return new SortedIntDocSet(scratch, pos);
|
||||
} else {
|
||||
// set the bits for ids that were collected in the array
|
||||
for (int i=0; i<scratch.length; i++) bits.fastSet(scratch[i]);
|
||||
return new BitDocSet(bits,pos);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) throws IOException {
|
||||
this.base = context.docBase;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean acceptsDocsOutOfOrder() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,67 +1,68 @@
|
|||
package org.apache.solr.search;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.util.OpenBitSet;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class DocSetDelegateCollector extends DocSetCollector {
|
||||
final Collector collector;
|
||||
|
||||
public DocSetDelegateCollector(int smallSetSize, int maxDoc, Collector collector) {
|
||||
super(smallSetSize, maxDoc);
|
||||
this.collector = collector;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
collector.collect(doc);
|
||||
|
||||
doc += base;
|
||||
// optimistically collect the first docs in an array
|
||||
// in case the total number will be small enough to represent
|
||||
// as a small set like SortedIntDocSet instead...
|
||||
// Storing in this array will be quicker to convert
|
||||
// than scanning through a potentially huge bit vector.
|
||||
// FUTURE: when search methods all start returning docs in order, maybe
|
||||
// we could have a ListDocSet() and use the collected array directly.
|
||||
if (pos < scratch.length) {
|
||||
scratch[pos]=doc;
|
||||
} else {
|
||||
// this conditional could be removed if BitSet was preallocated, but that
|
||||
// would take up more memory, and add more GC time...
|
||||
if (bits==null) bits = new OpenBitSet(maxDoc);
|
||||
bits.fastSet(doc);
|
||||
}
|
||||
|
||||
pos++;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocSet getDocSet() {
|
||||
if (pos<=scratch.length) {
|
||||
// assumes docs were collected in sorted order!
|
||||
return new SortedIntDocSet(scratch, pos);
|
||||
} else {
|
||||
// set the bits for ids that were collected in the array
|
||||
for (int i=0; i<scratch.length; i++) bits.fastSet(scratch[i]);
|
||||
return new BitDocSet(bits,pos);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
collector.setScorer(scorer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
|
||||
collector.setNextReader(context);
|
||||
this.base = context.docBase;
|
||||
}
|
||||
}
|
||||
package org.apache.solr.search;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.util.OpenBitSet;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class DocSetDelegateCollector extends DocSetCollector {
|
||||
final Collector collector;
|
||||
|
||||
public DocSetDelegateCollector(int smallSetSize, int maxDoc, Collector collector) {
|
||||
super(smallSetSize, maxDoc);
|
||||
this.collector = collector;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
collector.collect(doc);
|
||||
|
||||
doc += base;
|
||||
// optimistically collect the first docs in an array
|
||||
// in case the total number will be small enough to represent
|
||||
// as a small set like SortedIntDocSet instead...
|
||||
// Storing in this array will be quicker to convert
|
||||
// than scanning through a potentially huge bit vector.
|
||||
// FUTURE: when search methods all start returning docs in order, maybe
|
||||
// we could have a ListDocSet() and use the collected array directly.
|
||||
if (pos < scratch.length) {
|
||||
scratch[pos]=doc;
|
||||
} else {
|
||||
// this conditional could be removed if BitSet was preallocated, but that
|
||||
// would take up more memory, and add more GC time...
|
||||
if (bits==null) bits = new OpenBitSet(maxDoc);
|
||||
bits.fastSet(doc);
|
||||
}
|
||||
|
||||
pos++;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocSet getDocSet() {
|
||||
if (pos<=scratch.length) {
|
||||
// assumes docs were collected in sorted order!
|
||||
return new SortedIntDocSet(scratch, pos);
|
||||
} else {
|
||||
// set the bits for ids that were collected in the array
|
||||
for (int i=0; i<scratch.length; i++) bits.fastSet(scratch[i]);
|
||||
return new BitDocSet(bits,pos);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
collector.setScorer(scorer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) throws IOException {
|
||||
collector.setNextReader(context);
|
||||
this.base = context.docBase;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,69 +1,70 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.search;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.ValueSourceScorer;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.solr.search.function.ValueSourceRangeFilter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
// This class works as either a normal constant score query, or as a PostFilter using a collector
|
||||
public class FunctionRangeQuery extends SolrConstantScoreQuery implements PostFilter {
|
||||
final ValueSourceRangeFilter rangeFilt;
|
||||
|
||||
public FunctionRangeQuery(ValueSourceRangeFilter filter) {
|
||||
super(filter);
|
||||
this.rangeFilt = filter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DelegatingCollector getFilterCollector(IndexSearcher searcher) {
|
||||
Map fcontext = ValueSource.newContext(searcher);
|
||||
return new FunctionRangeCollector(fcontext);
|
||||
}
|
||||
|
||||
class FunctionRangeCollector extends DelegatingCollector {
|
||||
final Map fcontext;
|
||||
ValueSourceScorer scorer;
|
||||
int maxdoc;
|
||||
|
||||
public FunctionRangeCollector(Map fcontext) {
|
||||
this.fcontext = fcontext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (doc<maxdoc && scorer.matches(doc)) {
|
||||
delegate.collect(doc);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
|
||||
maxdoc = context.reader.maxDoc();
|
||||
FunctionValues dv = rangeFilt.getValueSource().getValues(fcontext, context);
|
||||
scorer = dv.getRangeScorer(context.reader, rangeFilt.getLowerVal(), rangeFilt.getUpperVal(), rangeFilt.isIncludeLower(), rangeFilt.isIncludeUpper());
|
||||
super.setNextReader(context);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.search;
|
||||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.ValueSourceScorer;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.solr.search.function.ValueSourceRangeFilter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
// This class works as either a normal constant score query, or as a PostFilter using a collector
|
||||
public class FunctionRangeQuery extends SolrConstantScoreQuery implements PostFilter {
|
||||
final ValueSourceRangeFilter rangeFilt;
|
||||
|
||||
public FunctionRangeQuery(ValueSourceRangeFilter filter) {
|
||||
super(filter);
|
||||
this.rangeFilt = filter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DelegatingCollector getFilterCollector(IndexSearcher searcher) {
|
||||
Map fcontext = ValueSource.newContext(searcher);
|
||||
return new FunctionRangeCollector(fcontext);
|
||||
}
|
||||
|
||||
class FunctionRangeCollector extends DelegatingCollector {
|
||||
final Map fcontext;
|
||||
ValueSourceScorer scorer;
|
||||
int maxdoc;
|
||||
|
||||
public FunctionRangeCollector(Map fcontext) {
|
||||
this.fcontext = fcontext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (doc<maxdoc && scorer.matches(doc)) {
|
||||
delegate.collect(doc);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) throws IOException {
|
||||
maxdoc = context.reader().maxDoc();
|
||||
FunctionValues dv = rangeFilt.getValueSource().getValues(fcontext, context);
|
||||
scorer = dv.getRangeScorer(context.reader(), rangeFilt.getLowerVal(), rangeFilt.getUpperVal(), rangeFilt.isIncludeLower(), rangeFilt.isIncludeUpper());
|
||||
super.setNextReader(context);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -188,7 +188,7 @@ class JoinQuery extends Query {
|
|||
|
||||
|
||||
@Override
|
||||
public Scorer scorer(IndexReader.AtomicReaderContext context, boolean scoreDocsInOrder,
|
||||
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
|
||||
boolean topScorer, Bits acceptDocs) throws IOException {
|
||||
if (filter == null) {
|
||||
boolean debug = rb != null && rb.isDebug();
|
||||
|
@ -261,8 +261,8 @@ class JoinQuery extends Query {
|
|||
fastForRandomSet = new HashDocSet(sset.getDocs(), 0, sset.size());
|
||||
}
|
||||
|
||||
Fields fromFields = MultiFields.getFields(fromSearcher.getIndexReader());
|
||||
Fields toFields = fromSearcher==toSearcher ? fromFields : MultiFields.getFields(toSearcher.getIndexReader());
|
||||
Fields fromFields = fromSearcher.getAtomicReader().fields();
|
||||
Fields toFields = fromSearcher==toSearcher ? fromFields : toSearcher.getAtomicReader().fields();
|
||||
if (fromFields == null) return DocSet.EMPTY;
|
||||
Terms terms = fromFields.terms(fromField);
|
||||
Terms toTerms = toFields.terms(toField);
|
||||
|
@ -284,8 +284,8 @@ class JoinQuery extends Query {
|
|||
}
|
||||
}
|
||||
|
||||
Bits fromLiveDocs = MultiFields.getLiveDocs(fromSearcher.getIndexReader());
|
||||
Bits toLiveDocs = fromSearcher == toSearcher ? fromLiveDocs : MultiFields.getLiveDocs(toSearcher.getIndexReader());
|
||||
Bits fromLiveDocs = fromSearcher.getAtomicReader().getLiveDocs();
|
||||
Bits toLiveDocs = fromSearcher == toSearcher ? fromLiveDocs : toSearcher.getAtomicReader().getLiveDocs();
|
||||
|
||||
fromDeState = new SolrIndexSearcher.DocsEnumState();
|
||||
fromDeState.fieldName = fromField;
|
||||
|
@ -456,8 +456,8 @@ class JoinQuery extends Query {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Explanation explain(IndexReader.AtomicReaderContext context, int doc) throws IOException {
|
||||
Scorer scorer = scorer(context, true, false, context.reader.getLiveDocs());
|
||||
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
||||
Scorer scorer = scorer(context, true, false, context.reader().getLiveDocs());
|
||||
boolean exists = scorer.advance(doc) == doc;
|
||||
|
||||
ComplexExplanation result = new ComplexExplanation();
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
|
||||
package org.apache.solr.search;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.FieldComparator;
|
||||
import org.apache.lucene.search.FieldComparatorSource;
|
||||
|
@ -118,7 +118,7 @@ class TermOrdValComparator_SML extends FieldComparator<Comparable> {
|
|||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
return TermOrdValComparator_SML.createComparator(context.reader, this);
|
||||
return TermOrdValComparator_SML.createComparator(context.reader(), this);
|
||||
}
|
||||
|
||||
// Base class for specialized (per bit width of the
|
||||
|
@ -159,7 +159,7 @@ class TermOrdValComparator_SML extends FieldComparator<Comparable> {
|
|||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
return TermOrdValComparator_SML.createComparator(context.reader, parent);
|
||||
return TermOrdValComparator_SML.createComparator(context.reader(), parent);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -432,7 +432,7 @@ class TermOrdValComparator_SML extends FieldComparator<Comparable> {
|
|||
}
|
||||
}
|
||||
|
||||
public static FieldComparator createComparator(IndexReader reader, TermOrdValComparator_SML parent) throws IOException {
|
||||
public static FieldComparator createComparator(AtomicReader reader, TermOrdValComparator_SML parent) throws IOException {
|
||||
parent.termsIndex = FieldCache.DEFAULT.getTermsIndex(reader, parent.field);
|
||||
final PackedInts.Reader docToOrd = parent.termsIndex.getDocToOrd();
|
||||
PerSegmentComparator perSegComp = null;
|
||||
|
|
|
@ -4,7 +4,7 @@ import org.apache.lucene.queries.function.ValueSource;
|
|||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.solr.common.SolrException;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -127,7 +127,7 @@ public class SolrConstantScoreQuery extends ConstantScoreQuery implements Extend
|
|||
@Override
|
||||
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
||||
|
||||
ConstantScorer cs = new ConstantScorer(context, this, queryWeight, context.reader.getLiveDocs());
|
||||
ConstantScorer cs = new ConstantScorer(context, this, queryWeight, context.reader().getLiveDocs());
|
||||
boolean exists = cs.docIdSetIterator.advance(doc) == doc;
|
||||
|
||||
ComplexExplanation result = new ComplexExplanation();
|
||||
|
|
|
@ -22,8 +22,8 @@ import org.apache.lucene.search.IndexSearcher;
|
|||
import org.apache.lucene.search.DocIdSet;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
|
||||
import java.util.Map;
|
||||
import java.io.IOException;
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.lucene.document.LazyDocument;
|
|||
import org.apache.lucene.document.NumericField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
|
@ -81,7 +81,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
|
|||
private long openTime = System.currentTimeMillis();
|
||||
private long registerTime = 0;
|
||||
private long warmupTime = 0;
|
||||
private final IndexReader reader;
|
||||
private final DirectoryReader reader;
|
||||
private final boolean closeReader;
|
||||
|
||||
private final int queryResultWindowSize;
|
||||
|
@ -108,16 +108,19 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
|
|||
private final Collection<String> fieldNames;
|
||||
private Collection<String> storedHighlightFieldNames;
|
||||
private DirectoryFactory directoryFactory;
|
||||
|
||||
private final AtomicReader atomicReader;
|
||||
|
||||
public SolrIndexSearcher(SolrCore core, String path, IndexSchema schema, SolrIndexConfig config, String name, boolean enableCache, DirectoryFactory directoryFactory) throws IOException {
|
||||
// we don't need to reserve the directory because we get it from the factory
|
||||
this(core, schema,name, core.getIndexReaderFactory().newReader(directoryFactory.get(path, config.lockType)), true, enableCache, false, directoryFactory);
|
||||
}
|
||||
|
||||
public SolrIndexSearcher(SolrCore core, IndexSchema schema, String name, IndexReader r, boolean closeReader, boolean enableCache, boolean reserveDirectory, DirectoryFactory directoryFactory) {
|
||||
public SolrIndexSearcher(SolrCore core, IndexSchema schema, String name, DirectoryReader r, boolean closeReader, boolean enableCache, boolean reserveDirectory, DirectoryFactory directoryFactory) throws IOException {
|
||||
super(r);
|
||||
this.directoryFactory = directoryFactory;
|
||||
this.reader = getIndexReader();
|
||||
this.reader = r;
|
||||
this.atomicReader = SlowCompositeReaderWrapper.wrap(r);
|
||||
this.core = core;
|
||||
this.schema = schema;
|
||||
this.name = "Searcher@" + Integer.toHexString(hashCode()) + (name!=null ? " "+name : "");
|
||||
|
@ -184,7 +187,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
|
|||
optimizer = solrConfig.filtOptEnabled ? new LuceneQueryOptimizer(solrConfig.filtOptCacheSize,solrConfig.filtOptThreshold) : null;
|
||||
|
||||
fieldNames = new HashSet<String>();
|
||||
for(FieldInfo fieldInfo : ReaderUtil.getMergedFieldInfos(r)) {
|
||||
for(FieldInfo fieldInfo : atomicReader.getFieldInfos()) {
|
||||
fieldNames.add(fieldInfo.name);
|
||||
}
|
||||
|
||||
|
@ -208,6 +211,16 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
|
|||
public final int docFreq(Term term) throws IOException {
|
||||
return reader.docFreq(term);
|
||||
}
|
||||
|
||||
public final AtomicReader getAtomicReader() {
|
||||
return atomicReader;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final DirectoryReader getIndexReader() {
|
||||
assert reader == super.getIndexReader();
|
||||
return reader;
|
||||
}
|
||||
|
||||
/** Register sub-objects such as caches
|
||||
*/
|
||||
|
@ -556,7 +569,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
|
|||
* @return the first document number containing the term
|
||||
*/
|
||||
public int getFirstMatch(Term t) throws IOException {
|
||||
Fields fields = MultiFields.getFields(reader);
|
||||
Fields fields = atomicReader.fields();
|
||||
if (fields == null) return -1;
|
||||
Terms terms = fields.terms(t.field());
|
||||
if (terms == null) return -1;
|
||||
|
@ -565,7 +578,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
|
|||
if (!termsEnum.seekExact(termBytes, false)) {
|
||||
return -1;
|
||||
}
|
||||
DocsEnum docs = termsEnum.docs(MultiFields.getLiveDocs(reader), null, false);
|
||||
DocsEnum docs = termsEnum.docs(atomicReader.getLiveDocs(), null, false);
|
||||
if (docs == null) return -1;
|
||||
int id = docs.nextDoc();
|
||||
return id == DocIdSetIterator.NO_MORE_DOCS ? -1 : id;
|
||||
|
@ -582,7 +595,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
|
|||
|
||||
for (int i=0; i<leaves.length; i++) {
|
||||
final AtomicReaderContext leaf = leaves[i];
|
||||
final IndexReader reader = leaf.reader;
|
||||
final AtomicReader reader = leaf.reader();
|
||||
|
||||
final Fields fields = reader.fields();
|
||||
if (fields == null) continue;
|
||||
|
@ -736,7 +749,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
|
|||
|
||||
for (int i=0; i<leaves.length; i++) {
|
||||
final AtomicReaderContext leaf = leaves[i];
|
||||
final IndexReader reader = leaf.reader;
|
||||
final AtomicReader reader = leaf.reader();
|
||||
final Bits liveDocs = reader.getLiveDocs(); // TODO: the filter may already only have liveDocs...
|
||||
DocIdSet idSet = null;
|
||||
if (pf.filter != null) {
|
||||
|
@ -968,7 +981,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
|
|||
|
||||
for (int i=0; i<leaves.length; i++) {
|
||||
final AtomicReaderContext leaf = leaves[i];
|
||||
final IndexReader reader = leaf.reader;
|
||||
final AtomicReader reader = leaf.reader();
|
||||
collector.setNextReader(leaf);
|
||||
Fields fields = reader.fields();
|
||||
Terms terms = fields.terms(t.field());
|
||||
|
@ -979,7 +992,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
|
|||
if (terms != null) {
|
||||
final TermsEnum termsEnum = terms.iterator(null);
|
||||
if (termsEnum.seekExact(termBytes, false)) {
|
||||
docsEnum = termsEnum.docs(MultiFields.getLiveDocs(reader), null, false);
|
||||
docsEnum = termsEnum.docs(liveDocs, null, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1768,7 +1781,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
|
|||
while (doc>=end) {
|
||||
AtomicReaderContext leaf = leafContexts[readerIndex++];
|
||||
base = leaf.docBase;
|
||||
end = base + leaf.reader.maxDoc();
|
||||
end = base + leaf.reader().maxDoc();
|
||||
topCollector.setNextReader(leaf);
|
||||
// we should never need to set the scorer given the settings for the collector
|
||||
}
|
||||
|
@ -2173,7 +2186,7 @@ class FilterImpl extends Filter {
|
|||
iterators.add(iter);
|
||||
}
|
||||
for (Weight w : weights) {
|
||||
Scorer scorer = w.scorer(context, true, false, context.reader.getLiveDocs());
|
||||
Scorer scorer = w.scorer(context, true, false, context.reader().getLiveDocs());
|
||||
if (scorer == null) return null;
|
||||
iterators.add(scorer);
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.apache.lucene.search.DocIdSet;
|
|||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -658,7 +658,7 @@ public class SortedIntDocSet extends DocSetBase {
|
|||
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
|
||||
IndexReader reader = context.reader;
|
||||
IndexReader reader = context.reader();
|
||||
|
||||
final int base = context.docBase;
|
||||
final int maxDoc = reader.maxDoc();
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
*/
|
||||
package org.apache.solr.search;
|
||||
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queries.function.BoostedQuery;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.IndexReaderContext;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.docvalues.FloatDocValues;
|
||||
|
@ -80,7 +80,7 @@ public class FileFloatSource extends ValueSource {
|
|||
final int off = readerContext.docBase;
|
||||
IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(readerContext);
|
||||
|
||||
final float[] arr = getCachedFloats(topLevelContext.reader);
|
||||
final float[] arr = getCachedFloats(topLevelContext.reader());
|
||||
return new FloatDocValues(this) {
|
||||
@Override
|
||||
public float floatVal(int doc) {
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.apache.lucene.search.DocIdSet;
|
|||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.BitsFilteredDocIdSet;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.solr.search.SolrFilter;
|
||||
|
||||
|
@ -78,7 +78,7 @@ public class ValueSourceRangeFilter extends SolrFilter {
|
|||
return BitsFilteredDocIdSet.wrap(new DocIdSet() {
|
||||
@Override
|
||||
public DocIdSetIterator iterator() throws IOException {
|
||||
return valueSource.getValues(context, readerContext).getRangeScorer(readerContext.reader, lowerVal, upperVal, includeLower, includeUpper);
|
||||
return valueSource.getValues(context, readerContext).getRangeScorer(readerContext.reader(), lowerVal, upperVal, includeLower, includeUpper);
|
||||
}
|
||||
@Override
|
||||
public Bits bits() throws IOException {
|
||||
|
|
|
@ -16,7 +16,7 @@ package org.apache.solr.search.function.distance;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.spatial.geohash.GeoHashUtils;
|
||||
|
|
|
@ -21,7 +21,7 @@ import org.apache.lucene.queries.function.FunctionValues;
|
|||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
|
||||
import org.apache.lucene.spatial.DistanceUtils;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.spatial.geohash.GeoHashUtils;
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ package org.apache.solr.search.function.distance;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
|
||||
|
|
|
@ -16,7 +16,7 @@ package org.apache.solr.search.function.distance;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
|
||||
|
|
|
@ -17,7 +17,7 @@ package org.apache.solr.search.function.distance;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.docvalues.FloatDocValues;
|
||||
|
|
|
@ -16,7 +16,7 @@ package org.apache.solr.search.function.distance;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
|
||||
|
|
|
@ -1,76 +1,76 @@
|
|||
package org.apache.solr.search.grouping.collector;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.solr.search.DocSet;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A collector that filters incoming doc ids that are not in the filter.
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class FilterCollector extends Collector {
|
||||
|
||||
private final DocSet filter;
|
||||
private final Collector delegate;
|
||||
private int docBase;
|
||||
private int matches;
|
||||
|
||||
public FilterCollector(DocSet filter, Collector delegate) throws IOException {
|
||||
this.filter = filter;
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
delegate.setScorer(scorer);
|
||||
}
|
||||
|
||||
public void collect(int doc) throws IOException {
|
||||
matches++;
|
||||
if (filter.exists(doc + docBase)) {
|
||||
delegate.collect(doc);
|
||||
}
|
||||
}
|
||||
|
||||
public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
|
||||
this.docBase = context.docBase;
|
||||
delegate.setNextReader(context);
|
||||
}
|
||||
|
||||
public boolean acceptsDocsOutOfOrder() {
|
||||
return delegate.acceptsDocsOutOfOrder();
|
||||
}
|
||||
|
||||
public int getMatches() {
|
||||
return matches;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the delegate collector
|
||||
*
|
||||
* @return the delegate collector
|
||||
*/
|
||||
public Collector getDelegate() {
|
||||
return delegate;
|
||||
}
|
||||
}
|
||||
package org.apache.solr.search.grouping.collector;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.solr.search.DocSet;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A collector that filters incoming doc ids that are not in the filter.
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class FilterCollector extends Collector {
|
||||
|
||||
private final DocSet filter;
|
||||
private final Collector delegate;
|
||||
private int docBase;
|
||||
private int matches;
|
||||
|
||||
public FilterCollector(DocSet filter, Collector delegate) throws IOException {
|
||||
this.filter = filter;
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
delegate.setScorer(scorer);
|
||||
}
|
||||
|
||||
public void collect(int doc) throws IOException {
|
||||
matches++;
|
||||
if (filter.exists(doc + docBase)) {
|
||||
delegate.collect(doc);
|
||||
}
|
||||
}
|
||||
|
||||
public void setNextReader(AtomicReaderContext context) throws IOException {
|
||||
this.docBase = context.docBase;
|
||||
delegate.setNextReader(context);
|
||||
}
|
||||
|
||||
public boolean acceptsDocsOutOfOrder() {
|
||||
return delegate.acceptsDocsOutOfOrder();
|
||||
}
|
||||
|
||||
public int getMatches() {
|
||||
return matches;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the delegate collector
|
||||
*
|
||||
* @return the delegate collector
|
||||
*/
|
||||
public Collector getDelegate() {
|
||||
return delegate;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,8 +17,7 @@
|
|||
|
||||
package org.apache.solr.update;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
|
||||
/**
|
||||
|
@ -28,9 +27,9 @@ import org.apache.solr.request.SolrQueryRequest;
|
|||
*
|
||||
*/
|
||||
public class MergeIndexesCommand extends UpdateCommand {
|
||||
public IndexReader[] readers;
|
||||
public DirectoryReader[] readers;
|
||||
|
||||
public MergeIndexesCommand(IndexReader[] readers, SolrQueryRequest req) {
|
||||
public MergeIndexesCommand(DirectoryReader[] readers, SolrQueryRequest req) {
|
||||
super(req);
|
||||
this.readers = readers;
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.solr.core;
|
|||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -61,9 +61,9 @@ public class AlternateDirectoryTest extends SolrTestCaseJ4 {
|
|||
static volatile boolean newReaderCalled = false;
|
||||
|
||||
@Override
|
||||
public IndexReader newReader(Directory indexDir) throws IOException {
|
||||
public DirectoryReader newReader(Directory indexDir) throws IOException {
|
||||
TestIndexReaderFactory.newReaderCalled = true;
|
||||
return IndexReader.open(indexDir);
|
||||
return DirectoryReader.open(indexDir);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ import java.util.Locale;
|
|||
import java.util.Random;
|
||||
|
||||
import org.apache.lucene.index.DocTermOrds;
|
||||
import org.apache.lucene.index.SlowMultiReaderWrapper;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -81,7 +80,7 @@ public class TestFaceting extends SolrTestCaseJ4 {
|
|||
|
||||
assertEquals(size, uif.getNumTerms());
|
||||
|
||||
TermsEnum te = uif.getOrdTermsEnum(new SlowMultiReaderWrapper(req.getSearcher().getIndexReader()));
|
||||
TermsEnum te = uif.getOrdTermsEnum(req.getSearcher().getAtomicReader());
|
||||
assertEquals(size == 0, te == null);
|
||||
|
||||
Random r = new Random(size);
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.lucene.index.FieldInfos;
|
|||
import org.apache.lucene.index.FilterIndexReader;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.MultiReader;
|
||||
import org.apache.lucene.index.IndexReaderContext;
|
||||
import org.apache.lucene.search.DocIdSet;
|
||||
|
@ -355,11 +355,6 @@ public class TestDocSet extends LuceneTestCase {
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexReader[] getSequentialSubReaders() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldInfos getFieldInfos() {
|
||||
return new FieldInfos();
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
*/
|
||||
package org.apache.solr.search;
|
||||
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReaderContext;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
|
@ -78,7 +78,7 @@ public class TestIndexSearcher extends SolrTestCaseJ4 {
|
|||
|
||||
// make sure the readers share the first segment
|
||||
// Didn't work w/ older versions of lucene2.9 going from segment -> multi
|
||||
assertEquals(ReaderUtil.leaves(rCtx1)[0].reader, ReaderUtil.leaves(rCtx2)[0].reader);
|
||||
assertEquals(ReaderUtil.leaves(rCtx1)[0].reader(), ReaderUtil.leaves(rCtx2)[0].reader());
|
||||
|
||||
assertU(adoc("id","5", "v_f","3.14159"));
|
||||
assertU(adoc("id","6", "v_f","8983", "v_s1","string6"));
|
||||
|
@ -88,14 +88,14 @@ public class TestIndexSearcher extends SolrTestCaseJ4 {
|
|||
IndexReaderContext rCtx3 = sr3.getSearcher().getTopReaderContext();
|
||||
// make sure the readers share segments
|
||||
// assertEquals(r1.getLeafReaders()[0], r3.getLeafReaders()[0]);
|
||||
assertEquals(ReaderUtil.leaves(rCtx2)[0].reader, ReaderUtil.leaves(rCtx3)[0].reader);
|
||||
assertEquals(ReaderUtil.leaves(rCtx2)[1].reader, ReaderUtil.leaves(rCtx3)[1].reader);
|
||||
assertEquals(ReaderUtil.leaves(rCtx2)[0].reader(), ReaderUtil.leaves(rCtx3)[0].reader());
|
||||
assertEquals(ReaderUtil.leaves(rCtx2)[1].reader(), ReaderUtil.leaves(rCtx3)[1].reader());
|
||||
|
||||
sr1.close();
|
||||
sr2.close();
|
||||
|
||||
// should currently be 1, but this could change depending on future index management
|
||||
int baseRefCount = rCtx3.reader.getRefCount();
|
||||
int baseRefCount = rCtx3.reader().getRefCount();
|
||||
assertEquals(1, baseRefCount);
|
||||
|
||||
assertU(commit());
|
||||
|
@ -108,12 +108,12 @@ public class TestIndexSearcher extends SolrTestCaseJ4 {
|
|||
assertU(commit());
|
||||
|
||||
// test that reader didn't change (according to equals at least... which uses the wrapped reader)
|
||||
assertEquals(rCtx3.reader, rCtx4.reader);
|
||||
assertEquals(baseRefCount+1, rCtx4.reader.getRefCount());
|
||||
assertEquals(rCtx3.reader(), rCtx4.reader());
|
||||
assertEquals(baseRefCount+1, rCtx4.reader().getRefCount());
|
||||
sr3.close();
|
||||
assertEquals(baseRefCount, rCtx4.reader.getRefCount());
|
||||
assertEquals(baseRefCount, rCtx4.reader().getRefCount());
|
||||
sr4.close();
|
||||
assertEquals(baseRefCount-1, rCtx4.reader.getRefCount());
|
||||
assertEquals(baseRefCount-1, rCtx4.reader().getRefCount());
|
||||
|
||||
|
||||
SolrQueryRequest sr5 = req("q","foo");
|
||||
|
@ -123,8 +123,8 @@ public class TestIndexSearcher extends SolrTestCaseJ4 {
|
|||
assertU(commit());
|
||||
SolrQueryRequest sr6 = req("q","foo");
|
||||
IndexReaderContext rCtx6 = sr6.getSearcher().getTopReaderContext();
|
||||
assertEquals(1, ReaderUtil.leaves(rCtx6)[0].reader.numDocs()); // only a single doc left in the first segment
|
||||
assertTrue( !ReaderUtil.leaves(rCtx5)[0].reader.equals(ReaderUtil.leaves(rCtx6)[0].reader) ); // readers now different
|
||||
assertEquals(1, ReaderUtil.leaves(rCtx6)[0].reader().numDocs()); // only a single doc left in the first segment
|
||||
assertTrue( !ReaderUtil.leaves(rCtx5)[0].reader().equals(ReaderUtil.leaves(rCtx6)[0].reader()) ); // readers now different
|
||||
|
||||
sr5.close();
|
||||
sr6.close();
|
||||
|
|
|
@ -1342,7 +1342,7 @@ public class TestRealTimeGet extends SolrTestCaseJ4 {
|
|||
|
||||
|
||||
// The purpose of this test is to roughly model how solr uses lucene
|
||||
IndexReader reader;
|
||||
DirectoryReader reader;
|
||||
@Test
|
||||
public void testStressLuceneNRT() throws Exception {
|
||||
final int commitPercent = 5 + random.nextInt(20);
|
||||
|
@ -1408,7 +1408,7 @@ public class TestRealTimeGet extends SolrTestCaseJ4 {
|
|||
// reader = IndexReader.open(dir);
|
||||
// make this reader an NRT reader from the start to avoid the first non-writer openIfChanged
|
||||
// to only opening at the last commit point.
|
||||
reader = IndexReader.open(writer.w, true);
|
||||
reader = DirectoryReader.open(writer.w, true);
|
||||
|
||||
for (int i=0; i<nWriteThreads; i++) {
|
||||
Thread thread = new Thread("WRITER"+i) {
|
||||
|
@ -1424,7 +1424,7 @@ public class TestRealTimeGet extends SolrTestCaseJ4 {
|
|||
if (numCommitting.incrementAndGet() <= maxConcurrentCommits) {
|
||||
Map<Integer,DocInfo> newCommittedModel;
|
||||
long version;
|
||||
IndexReader oldReader;
|
||||
DirectoryReader oldReader;
|
||||
|
||||
boolean softCommit = rand.nextInt(100) < softCommitPercent;
|
||||
|
||||
|
@ -1452,12 +1452,12 @@ public class TestRealTimeGet extends SolrTestCaseJ4 {
|
|||
|
||||
verbose("reopen start using", oldReader);
|
||||
|
||||
IndexReader newReader;
|
||||
DirectoryReader newReader;
|
||||
if (softCommit) {
|
||||
newReader = IndexReader.openIfChanged(oldReader, writer.w, true);
|
||||
newReader = DirectoryReader.openIfChanged(oldReader, writer.w, true);
|
||||
} else {
|
||||
// will only open to last commit
|
||||
newReader = IndexReader.openIfChanged(oldReader);
|
||||
newReader = DirectoryReader.openIfChanged(oldReader);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -24,8 +24,8 @@ import org.apache.lucene.analysis.core.SimpleAnalyzer;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.search.*;
|
||||
|
@ -194,7 +194,7 @@ public class TestSort extends SolrTestCaseJ4 {
|
|||
iw.close();
|
||||
|
||||
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
DirectoryReader reader = DirectoryReader.open(dir);
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
// System.out.println("segments="+searcher.getIndexReader().getSequentialSubReaders().length);
|
||||
assertTrue(reader.getSequentialSubReaders().length > 1);
|
||||
|
@ -203,7 +203,7 @@ public class TestSort extends SolrTestCaseJ4 {
|
|||
Filter filt = new Filter() {
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
||||
return BitsFilteredDocIdSet.wrap(randSet(context.reader.maxDoc()), acceptDocs);
|
||||
return BitsFilteredDocIdSet.wrap(randSet(context.reader().maxDoc()), acceptDocs);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.common.params.CommonParams;
|
||||
|
@ -256,18 +256,16 @@ public class DirectUpdateHandlerTest extends SolrTestCaseJ4 {
|
|||
assertU(commit());
|
||||
|
||||
SolrQueryRequest sr = req("q","foo");
|
||||
IndexReader r = sr.getSearcher().getTopReaderContext().reader;
|
||||
DirectoryReader r = sr.getSearcher().getIndexReader();
|
||||
assertTrue(r.maxDoc() > r.numDocs()); // should have deletions
|
||||
assertFalse(r.getTopReaderContext().isAtomic); // more than 1 segment
|
||||
sr.close();
|
||||
|
||||
assertU(commit("expungeDeletes","true"));
|
||||
|
||||
sr = req("q","foo");
|
||||
r = sr.getSearcher().getTopReaderContext().reader;
|
||||
r = r = sr.getSearcher().getIndexReader();
|
||||
assertEquals(r.maxDoc(), r.numDocs()); // no deletions
|
||||
assertEquals(4,r.maxDoc()); // no dups
|
||||
assertFalse(r.getTopReaderContext().isAtomic); //still more than 1 segment
|
||||
sr.close();
|
||||
}
|
||||
|
||||
|
@ -278,7 +276,7 @@ public class DirectUpdateHandlerTest extends SolrTestCaseJ4 {
|
|||
assertU(commit()); // commit a second time to make sure index files aren't still referenced by the old searcher
|
||||
|
||||
SolrQueryRequest sr = req();
|
||||
IndexReader r = sr.getSearcher().getTopReaderContext().reader;
|
||||
DirectoryReader r = sr.getSearcher().getIndexReader();
|
||||
Directory d = r.directory();
|
||||
|
||||
log.info("FILES before addDoc="+ Arrays.asList(d.listAll()));
|
||||
|
|
Loading…
Reference in New Issue