SOLR-12374 Add SolrCore.withSearcher(lambda accepting SolrIndexSearcher)

This commit is contained in:
David Smiley 2018-05-29 16:27:11 -04:00
parent 64e4dda64e
commit 6e0da7e2f8
25 changed files with 404 additions and 445 deletions

View File

@ -36,8 +36,6 @@ import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.search.DocList;
import org.apache.solr.search.QueryCommand;
import org.apache.solr.search.QueryResult;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.RefCounted;
import org.junit.Before;
import org.junit.Test;
@ -109,9 +107,7 @@ public class ClusteringComponentTest extends AbstractClusteringTestCase {
"val_dynamic", "quick brown fox"));
assertU("", commit());
RefCounted<SolrIndexSearcher> holder = h.getCore().getSearcher();
try {
SolrIndexSearcher srchr = holder.get();
h.getCore().withSearcher(srchr -> {
QueryResult qr = new QueryResult();
QueryCommand cmd = new QueryCommand();
cmd.setQuery(new MatchAllDocsQuery());
@ -140,9 +136,8 @@ public class ClusteringComponentTest extends AbstractClusteringTestCase {
assertNotNull("dyn copy field null", document.get("dynamic_val"));
assertNotNull("copy field null", document.get("range_facet_l"));
}
} finally {
if (null != holder) holder.decref();
}
return null;
});
}
}

View File

@ -40,8 +40,6 @@ import org.apache.solr.handler.clustering.ClusteringEngine;
import org.apache.solr.handler.clustering.SearchClusteringEngine;
import org.apache.solr.request.LocalSolrQueryRequest;
import org.apache.solr.search.DocList;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.RefCounted;
import org.carrot2.clustering.lingo.LingoClusteringAlgorithm;
import org.carrot2.core.LanguageCode;
import org.carrot2.util.attribute.AttributeUtils;
@ -449,13 +447,9 @@ public class CarrotClusteringEngineTest extends AbstractClusteringTestCase {
private List<NamedList<Object>> checkEngine(CarrotClusteringEngine engine, int expectedNumDocs,
int expectedNumClusters, Query query, SolrParams clusteringParams) throws IOException {
// Get all documents to cluster
RefCounted<SolrIndexSearcher> ref = h.getCore().getSearcher();
DocList docList;
try {
SolrIndexSearcher searcher = ref.get();
docList = searcher.getDocList(query, (Query) null, new Sort(), 0,
numberOfDocs);
return h.getCore().withSearcher(searcher -> {
DocList docList = searcher.getDocList(query, (Query) null, new Sort(), 0,
numberOfDocs);
assertEquals("docList size", expectedNumDocs, docList.matches());
ModifiableSolrParams solrParams = new ModifiableSolrParams();
@ -472,9 +466,7 @@ public class CarrotClusteringEngineTest extends AbstractClusteringTestCase {
assertEquals("number of clusters: " + results, expectedNumClusters, results.size());
checkClusters(results, false);
return results;
} finally {
ref.decref();
}
});
}
private void checkClusters(List<NamedList<Object>> results, int expectedDocCount,

View File

@ -157,6 +157,7 @@ import org.apache.solr.update.processor.UpdateRequestProcessorChain;
import org.apache.solr.update.processor.UpdateRequestProcessorChain.ProcessorInfo;
import org.apache.solr.update.processor.UpdateRequestProcessorFactory;
import org.apache.solr.util.DefaultSolrThreadFactory;
import org.apache.solr.util.IOFunction;
import org.apache.solr.util.NumberUtils;
import org.apache.solr.util.PropertiesInputStream;
import org.apache.solr.util.PropertiesOutputStream;
@ -1841,17 +1842,41 @@ public final class SolrCore implements SolrInfoBean, SolrMetricProducer, Closeab
}
/**
* Return a registered {@link RefCounted}&lt;{@link SolrIndexSearcher}&gt; with
* the reference count incremented. It <b>must</b> be decremented when no longer needed.
* This method should not be called from SolrCoreAware.inform() since it can result
* in a deadlock if useColdSearcher==false.
* If handling a normal request, the searcher should be obtained from
* Return a registered {@link RefCounted}&lt;{@link SolrIndexSearcher}&gt; with
* the reference count incremented. It <b>must</b> be decremented when no longer needed.
* This method should not be called from SolrCoreAware.inform() since it can result
* in a deadlock if useColdSearcher==false.
* If handling a normal request, the searcher should be obtained from
* {@link org.apache.solr.request.SolrQueryRequest#getSearcher()} instead.
*/
* If you still think you need to call this, consider {@link #withSearcher(IOFunction)} instead which is easier to
* use.
* @see SolrQueryRequest#getSearcher()
* @see #withSearcher(IOFunction)
*/
public RefCounted<SolrIndexSearcher> getSearcher() {
return getSearcher(false,true,null);
}
/**
* Executes the lambda with the {@link SolrIndexSearcher}. This is more convenient than using
* {@link #getSearcher()} since there is no ref-counting business to worry about.
* Example:
* <pre class="prettyprint">
* IndexReader reader = h.getCore().withSearcher(SolrIndexSearcher::getIndexReader);
* </pre>
* Warning: although a lambda is concise, it may be inappropriate to simply return the IndexReader because it might
* be closed soon after this method returns; it really depends.
*/
@SuppressWarnings("unchecked")
public <R> R withSearcher(IOFunction<SolrIndexSearcher,R> lambda) throws IOException {
final RefCounted<SolrIndexSearcher> refCounted = getSearcher();
try {
return lambda.apply(refCounted.get());
} finally {
refCounted.decref();
}
}
/**
* Computes fingerprint of a segment and caches it only if all the version in segment are included in the fingerprint.
* We can't use computeIfAbsent as caching is conditional (as described above)

View File

@ -825,16 +825,12 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
* returns the CommitVersionInfo for the current searcher, or null on error.
*/
private CommitVersionInfo getIndexVersion() {
CommitVersionInfo v = null;
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
try {
v = CommitVersionInfo.build(searcher.get().getIndexReader().getIndexCommit());
return core.withSearcher(searcher -> CommitVersionInfo.build(searcher.getIndexReader().getIndexCommit()));
} catch (IOException e) {
LOG.warn("Unable to get index commit: ", e);
} finally {
searcher.decref();
return null;
}
return v;
}
@Override
@ -1543,14 +1539,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
try {
initWrite();
RefCounted<SolrIndexSearcher> sref = core.getSearcher();
Directory dir;
try {
SolrIndexSearcher searcher = sref.get();
dir = searcher.getIndexReader().directory();
} finally {
sref.decref();
}
Directory dir = core.withSearcher(searcher -> searcher.getIndexReader().directory());
in = dir.openInput(fileName, IOContext.READONCE);
// if offset is mentioned move the pointer to that point
if (offset != -1) in.seek(offset);

View File

@ -168,11 +168,10 @@ public class SnapShooter {
private IndexCommit getIndexCommit() throws IOException {
IndexDeletionPolicyWrapper delPolicy = solrCore.getDeletionPolicy();
IndexCommit indexCommit = delPolicy.getLatestCommit();
if (indexCommit == null) {
indexCommit = solrCore.getSearcher().get().getIndexReader().getIndexCommit();
if (indexCommit != null) {
return indexCommit;
}
return indexCommit;
return solrCore.withSearcher(searcher -> searcher.getIndexReader().getIndexCommit());
}
private IndexCommit getIndexCommitFromName() throws IOException {

View File

@ -25,8 +25,6 @@ import org.apache.solr.core.CoreContainer;
import org.apache.solr.core.SolrCore;
import org.apache.solr.core.snapshots.SolrSnapshotManager;
import org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.RefCounted;
class CreateSnapshotOp implements CoreAdminHandler.CoreAdminOp {
@Override
@ -45,12 +43,7 @@ class CreateSnapshotOp implements CoreAdminHandler.CoreAdminOp {
String indexDirPath = core.getIndexDir();
IndexCommit ic = core.getDeletionPolicy().getLatestCommit();
if (ic == null) {
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
try {
ic = searcher.get().getIndexReader().getIndexCommit();
} finally {
searcher.decref();
}
ic = core.withSearcher(searcher -> searcher.getIndexReader().getIndexCommit());
}
SolrSnapshotMetaDataManager mgr = core.getSnapshotMetaDataManager();
mgr.snapshot(commitName, indexDirPath, ic.getGeneration());

View File

@ -0,0 +1,29 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.util;
import java.io.IOException;
/**
* A Function that may throw an IOException
* @see java.util.function.Function
*/
@FunctionalInterface
public interface IOFunction<T, R> {
R apply(T t) throws IOException;
}

View File

@ -43,7 +43,6 @@ import org.apache.solr.common.util.SuppressForbidden;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.core.SolrCore;
import org.apache.solr.handler.ReplicationHandler;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.update.SolrIndexWriter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -425,19 +424,16 @@ public class TestInjection {
NamedList<Object> response = leaderClient.request(new QueryRequest(params));
long leaderVersion = (long) ((NamedList)response.get("details")).get("indexVersion");
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
try {
String localVersion = searcher.get().getIndexReader().getIndexCommit().getUserData().get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
if (localVersion == null && leaderVersion == 0 && !core.getUpdateHandler().getUpdateLog().hasUncommittedChanges()) return true;
if (localVersion != null && Long.parseLong(localVersion) == leaderVersion && (leaderVersion >= t || i >= 6)) {
log.info("Waiting time for tlog replica to be in sync with leader: {}", System.currentTimeMillis()-currentTime);
return true;
} else {
log.debug("Tlog replica not in sync with leader yet. Attempt: {}. Local Version={}, leader Version={}", i, localVersion, leaderVersion);
Thread.sleep(500);
}
} finally {
searcher.decref();
String localVersion = core.withSearcher(searcher ->
searcher.getIndexReader().getIndexCommit().getUserData().get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY));
if (localVersion == null && leaderVersion == 0 && !core.getUpdateHandler().getUpdateLog().hasUncommittedChanges())
return true;
if (localVersion != null && Long.parseLong(localVersion) == leaderVersion && (leaderVersion >= t || i >= 6)) {
log.info("Waiting time for tlog replica to be in sync with leader: {}", System.currentTimeMillis()-currentTime);
return true;
} else {
log.debug("Tlog replica not in sync with leader yet. Attempt: {}. Local Version={}, leader Version={}", i, localVersion, leaderVersion);
Thread.sleep(500);
}
}

View File

@ -33,8 +33,6 @@ import org.apache.solr.common.util.NamedList;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.schema.IndexSchemaFactory;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.RefCounted;
import org.apache.solr.util.TestHarness;
import org.junit.BeforeClass;
@ -111,22 +109,17 @@ public class TestCodecSupport extends SolrTestCaseJ4 {
}
protected void assertCompressionMode(String expectedModeString, SolrCore core) throws IOException {
RefCounted<SolrIndexSearcher> ref = null;
SolrIndexSearcher searcher = null;
try {
ref = core.getSearcher();
searcher = ref.get();
h.getCore().withSearcher(searcher -> {
SegmentInfos infos = SegmentInfos.readLatestCommit(searcher.getIndexReader().directory());
SegmentInfo info = infos.info(infos.size() - 1).info;
assertEquals("Expecting compression mode string to be " + expectedModeString +
" but got: " + info.getAttribute(Lucene50StoredFieldsFormat.MODE_KEY) +
"\n SegmentInfo: " + info +
"\n SegmentInfos: " + infos +
"\n Codec: " + core.getCodec(),
assertEquals("Expecting compression mode string to be " + expectedModeString +
" but got: " + info.getAttribute(Lucene50StoredFieldsFormat.MODE_KEY) +
"\n SegmentInfo: " + info +
"\n SegmentInfos: " + infos +
"\n Codec: " + core.getCodec(),
expectedModeString, info.getAttribute(Lucene50StoredFieldsFormat.MODE_KEY));
} finally {
if (ref != null) ref.decref();
}
return null;
});
}
public void testCompressionMode() throws Exception {

View File

@ -17,21 +17,20 @@
package org.apache.solr.core;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.IdentityHashMap;
import java.util.Set;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.index.LogDocMergePolicyFactory;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.RefCounted;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public class TestNRTOpen extends SolrTestCaseJ4 {
@BeforeClass
public static void beforeClass() throws Exception {
// use a filesystem, because we need to create an index, then "start up solr"
@ -53,7 +52,7 @@ public class TestNRTOpen extends SolrTestCaseJ4 {
// startup
assertNRT(1);
}
@AfterClass
public static void afterClass() throws Exception {
// ensure we clean up after ourselves, this will fire before superclass...
@ -62,7 +61,7 @@ public class TestNRTOpen extends SolrTestCaseJ4 {
System.clearProperty("solr.tests.maxBufferedDocs");
systemClearPropertySolrTestsMergePolicyFactory();
}
public void setUp() throws Exception {
super.setUp();
// delete all, then add initial doc
@ -70,83 +69,82 @@ public class TestNRTOpen extends SolrTestCaseJ4 {
assertU(adoc("foo", "bar"));
assertU(commit());
}
public void testReaderIsNRT() {
public void testReaderIsNRT() throws IOException {
// core reload
String core = h.getCore().getName();
h.getCoreContainer().reload(core);
assertNRT(1);
// add a doc and soft commit
assertU(adoc("baz", "doc"));
assertU(commit("softCommit", "true"));
assertNRT(2);
// add a doc and hard commit
assertU(adoc("bazz", "doc"));
assertU(commit());
assertNRT(3);
// add a doc and core reload
assertU(adoc("bazzz", "doc2"));
h.getCoreContainer().reload(core);
assertNRT(4);
}
public void testSharedCores() {
// clear out any junk
assertU(optimize());
Set<Object> s1 = getCoreCacheKeys();
assertEquals(1, s1.size());
// add a doc, will go in a new segment
assertU(adoc("baz", "doc"));
assertU(commit("softCommit", "true"));
Set<Object> s2 = getCoreCacheKeys();
assertEquals(2, s2.size());
assertTrue(s2.containsAll(s1));
// add two docs, will go in a new segment
assertU(adoc("foo", "doc"));
assertU(adoc("foo2", "doc"));
assertU(commit());
Set<Object> s3 = getCoreCacheKeys();
assertEquals(3, s3.size());
assertTrue(s3.containsAll(s2));
// delete a doc
assertU(delQ("foo2:doc"));
assertU(commit());
// same cores
assertEquals(s3, getCoreCacheKeys());
}
static void assertNRT(int maxDoc) {
RefCounted<SolrIndexSearcher> searcher = h.getCore().getSearcher();
try {
DirectoryReader ir = searcher.get().getRawReader();
static void assertNRT(int maxDoc) throws IOException {
h.getCore().withSearcher(searcher -> {
DirectoryReader ir = searcher.getRawReader();
assertEquals(maxDoc, ir.maxDoc());
assertTrue("expected NRT reader, got: " + ir, ir.toString().contains(":nrt"));
} finally {
searcher.decref();
}
return null;
});
}
private Set<Object> getCoreCacheKeys() {
RefCounted<SolrIndexSearcher> searcher = h.getCore().getSearcher();
Set<Object> set = Collections.newSetFromMap(new IdentityHashMap<Object,Boolean>());
try {
DirectoryReader ir = searcher.get().getRawReader();
for (LeafReaderContext context : ir.leaves()) {
set.add(context.reader().getCoreCacheHelper().getKey());
}
} finally {
searcher.decref();
return h.getCore().withSearcher(searcher -> {
Set<Object> set = Collections.newSetFromMap(new IdentityHashMap<>());
DirectoryReader ir = searcher.getRawReader();
for (LeafReaderContext context : ir.leaves()) {
set.add(context.reader().getCoreCacheHelper().getKey());
}
return set;
});
} catch (IOException e) {
throw new RuntimeException(e);
}
return set;
}
}

View File

@ -20,8 +20,6 @@ import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.params.EventParams;
import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.request.SolrRequestInfo;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.RefCounted;
import org.junit.BeforeClass;
import org.junit.Test;
@ -72,24 +70,31 @@ public class TestQuerySenderListener extends SolrTestCaseJ4 {
assertTrue("Not an instance of QuerySenderListener", newSearcherListener instanceof QuerySenderListener);
QuerySenderListener qsl = (QuerySenderListener) newSearcherListener;
RefCounted<SolrIndexSearcher> currentSearcherRef = core.getSearcher();
SolrIndexSearcher currentSearcher = currentSearcherRef.get();
qsl.newSearcher(currentSearcher, null);//test new Searcher
MockQuerySenderListenerReqHandler mock = (MockQuerySenderListenerReqHandler) core.getRequestHandler("/mock");
assertNotNull("Mock is null", mock);
String evt = mock.req.getParams().get(EventParams.EVENT);
assertNotNull("Event is null", evt);
assertTrue(evt + " is not equal to " + EventParams.FIRST_SEARCHER, evt.equals(EventParams.FIRST_SEARCHER) == true);
assertU(adoc("id", "1"));
assertU(commit());
h.getCore().withSearcher(currentSearcher -> {
qsl.newSearcher(currentSearcher, null);//test new Searcher
MockQuerySenderListenerReqHandler mock = (MockQuerySenderListenerReqHandler) core.getRequestHandler("/mock");
assertNotNull("Mock is null", mock);
{
String evt = mock.req.getParams().get(EventParams.EVENT);
assertNotNull("Event is null", evt);
assertTrue(evt + " is not equal to " + EventParams.FIRST_SEARCHER, evt.equals(EventParams.FIRST_SEARCHER) == true);
assertU(adoc("id", "1"));
assertU(commit());
}
h.getCore().withSearcher(newSearcher -> {
String evt = mock.req.getParams().get(EventParams.EVENT);
assertNotNull("Event is null", evt);
assertTrue(evt + " is not equal to " + EventParams.NEW_SEARCHER, evt.equals(EventParams.NEW_SEARCHER) == true);
return null;
});
return null;
});
RefCounted<SolrIndexSearcher> newSearcherRef = core.getSearcher();
evt = mock.req.getParams().get(EventParams.EVENT);
assertNotNull("Event is null", evt);
assertTrue(evt + " is not equal to " + EventParams.NEW_SEARCHER, evt.equals(EventParams.NEW_SEARCHER) == true);
newSearcherRef.decref();
currentSearcherRef.decref();
}
}

View File

@ -18,7 +18,6 @@ package org.apache.solr.core;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.RefCounted;
import org.junit.BeforeClass;
import org.junit.Test;
@ -66,20 +65,20 @@ public class TestQuerySenderNoQuery extends SolrTestCaseJ4 {
assertTrue("Not an instance of QuerySenderListener", newSearcherListener instanceof QuerySenderListener);
QuerySenderListener qsl = (QuerySenderListener) newSearcherListener;
RefCounted<SolrIndexSearcher> currentSearcherRef = core.getSearcher();
SolrIndexSearcher currentSearcher = currentSearcherRef.get();
SolrIndexSearcher dummy = null;
qsl.newSearcher(currentSearcher, dummy);//test first Searcher (since param is null)
MockQuerySenderListenerReqHandler mock = (MockQuerySenderListenerReqHandler) core.getRequestHandler("/mock");
assertNotNull("Mock is null", mock);
assertNull("Req (firstsearcher) is not null", mock.req);
h.getCore().withSearcher(currentSearcher -> {
SolrIndexSearcher dummy = null;
qsl.newSearcher(currentSearcher, dummy);//test first Searcher (since param is null)
MockQuerySenderListenerReqHandler mock = (MockQuerySenderListenerReqHandler) core.getRequestHandler("/mock");
assertNotNull("Mock is null", mock);
assertNull("Req (firstsearcher) is not null", mock.req);
SolrIndexSearcher newSearcher = new SolrIndexSearcher(core, core.getNewIndexDir(), core.getLatestSchema(), core.getSolrConfig().indexConfig, "testQuerySenderNoQuery", false, core.getDirectoryFactory());
SolrIndexSearcher newSearcher = new SolrIndexSearcher(core, core.getNewIndexDir(), core.getLatestSchema(), core.getSolrConfig().indexConfig, "testQuerySenderNoQuery", false, core.getDirectoryFactory());
qsl.newSearcher(newSearcher, currentSearcher); // get newSearcher.
assertNull("Req (newsearcher) is not null", mock.req);
newSearcher.close();
currentSearcherRef.decref();
qsl.newSearcher(newSearcher, currentSearcher); // get newSearcher.
assertNull("Req (newsearcher) is not null", mock.req);
newSearcher.close();
return null;
});
}
}

View File

@ -21,7 +21,6 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.SegmentInfo;
import org.apache.lucene.index.SegmentInfos;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.RefCounted;
import org.junit.BeforeClass;
@ -51,15 +50,12 @@ public class TestSimpleTextCodec extends SolrTestCaseJ4 {
assertU(add(doc("id","1", "text","textual content goes here")));
assertU(commit());
RefCounted<SolrIndexSearcher> searcherRef = h.getCore().getSearcher();
try {
SolrIndexSearcher searcher = searcherRef.get();
h.getCore().withSearcher(searcher -> {
SegmentInfos infos = SegmentInfos.readLatestCommit(searcher.getIndexReader().directory());
SegmentInfo info = infos.info(infos.size() - 1).info;
assertEquals("Unexpected segment codec", "SimpleText", info.getCodec().getName());
} finally {
searcherRef.decref();
}
return null;
});
assertQ(req("q", "id:1"),
"*[count(//doc)=1]");

View File

@ -28,9 +28,7 @@ import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.params.FacetParams;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.uninverting.DocTermOrds;
import org.apache.solr.util.RefCounted;
import org.junit.After;
import org.junit.BeforeClass;
import org.junit.Test;
@ -738,9 +736,8 @@ public class TestFaceting extends SolrTestCaseJ4 {
);
RefCounted<SolrIndexSearcher> currentSearcherRef = h.getCore().getSearcher();
try {
SolrIndexSearcher currentSearcher = currentSearcherRef.get();
h.getCore().withSearcher(currentSearcher -> {
SortedSetDocValues ui0 = DocValues.getSortedSet(currentSearcher.getSlowAtomicReader(), "f0_ws");
SortedSetDocValues ui1 = DocValues.getSortedSet(currentSearcher.getSlowAtomicReader(), "f1_ws");
SortedSetDocValues ui2 = DocValues.getSortedSet(currentSearcher.getSlowAtomicReader(), "f2_ws");
@ -900,9 +897,8 @@ public class TestFaceting extends SolrTestCaseJ4 {
, "*[count(//lst[@name='facet_fields']/lst)=10]"
, "*[count(//lst[@name='facet_fields']/lst/int)=20]"
);
} finally {
currentSearcherRef.decref();
}
return null;
});
}
}

View File

@ -16,6 +16,7 @@
*/
package org.apache.solr.request;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.text.SimpleDateFormat;
import java.util.Arrays;
@ -23,6 +24,7 @@ import java.util.Date;
import java.util.HashSet;
import java.util.Locale;
import java.util.Set;
import org.apache.lucene.util.BytesRef;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.client.solrj.SolrClient;
@ -39,9 +41,7 @@ import org.apache.solr.schema.FieldType;
import org.apache.solr.schema.NumberType;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.schema.StrField;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.search.SyntaxError;
import org.apache.solr.util.RefCounted;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
@ -155,13 +155,12 @@ public class TestIntervalFaceting extends SolrTestCaseJ4 {
}
private int getNumberOfReaders() {
RefCounted<SolrIndexSearcher> searcherRef = h.getCore().getSearcher();
try {
SolrIndexSearcher searcher = searcherRef.get();
return searcher.getTopReaderContext().leaves().size();
} finally {
searcherRef.decref();
return h.getCore().withSearcher(searcher -> searcher.getTopReaderContext().leaves().size());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Test

View File

@ -42,6 +42,7 @@ import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import com.google.common.collect.ImmutableMap;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DoublePoint;
import org.apache.lucene.document.FloatPoint;
@ -50,6 +51,7 @@ import org.apache.lucene.document.LongPoint;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.document.StoredField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexableField;
@ -65,16 +67,12 @@ import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.index.SlowCompositeReaderWrapper;
import org.apache.solr.schema.IndexSchema.DynamicField;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.search.SolrQueryParser;
import org.apache.solr.util.DateMathParser;
import org.apache.solr.util.RefCounted;
import org.junit.After;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.common.collect.ImmutableMap;
/** Tests for PointField functionality */
public class TestPointFields extends SolrTestCaseJ4 {
@ -2276,15 +2274,11 @@ public class TestPointFields extends SolrTestCaseJ4 {
assertU(commit());
assertQ(req("q", "*:*"), "//*[@numFound='10']");
assertQ("Can't search on index=false docValues=false field", req("q", field + ":[* TO *]"), "//*[@numFound='0']");
IndexReader ir;
RefCounted<SolrIndexSearcher> ref = null;
try {
ref = h.getCore().getSearcher();
ir = ref.get().getIndexReader();
h.getCore().withSearcher(searcher -> {
IndexReader ir = searcher.getIndexReader();
assertEquals("Field " + field + " should have no point values", 0, PointValues.size(ir, field));
} finally {
ref.decref();
}
return null;
});
}
@ -3743,14 +3737,11 @@ public class TestPointFields extends SolrTestCaseJ4 {
assertU(adoc("id", String.valueOf(i), field, values[i]));
}
assertU(commit());
IndexReader ir;
RefCounted<SolrIndexSearcher> ref = null;
SchemaField sf = h.getCore().getLatestSchema().getField(field);
boolean ignoredField = !(sf.indexed() || sf.stored() || sf.hasDocValues());
try {
ref = h.getCore().getSearcher();
SolrIndexSearcher searcher = ref.get();
ir = searcher.getIndexReader();
h.getCore().withSearcher(searcher -> {
DirectoryReader ir = searcher.getIndexReader();
// our own SlowCompositeReader to check DocValues on disk w/o the UninvertingReader added by SolrIndexSearcher
final LeafReader leafReaderForCheckingDVs = SlowCompositeReaderWrapper.wrap(searcher.getRawReader());
@ -3795,9 +3786,8 @@ public class TestPointFields extends SolrTestCaseJ4 {
}
}
}
} finally {
ref.decref();
}
return null;
});
clearIndex();
assertU(commit());
}

View File

@ -25,7 +25,6 @@ import org.apache.lucene.document.LazyDocument;
import org.apache.lucene.index.IndexableField;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.util.RefCounted;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@ -80,13 +79,7 @@ public class LargeFieldTest extends SolrTestCaseJ4 {
assertQ(req("q", "101", "df", ID_FLD, "fl", ID_FLD)); // eager load ID_FLD; rest are lazy
// fetch the document; we know it will be from the documentCache, docId 0
final Document d;
RefCounted<SolrIndexSearcher> searcherRef = h.getCore().getSearcher();
try {
d = searcherRef.get().doc(0);
} finally {
searcherRef.decref();
}
final Document d = h.getCore().withSearcher(searcher -> searcher.doc(0));
assertEager(d, ID_FLD);
assertLazyNotLoaded(d, LAZY_FIELD);

View File

@ -16,18 +16,6 @@
*/
package org.apache.solr.search;
import org.apache.lucene.util.TestUtil;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.metrics.SolrMetricManager;
import org.apache.solr.util.ConcurrentLFUCache;
import org.apache.solr.util.DefaultSolrThreadFactory;
import org.apache.solr.util.RefCounted;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.HashMap;
@ -38,6 +26,17 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.lucene.util.TestUtil;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.metrics.SolrMetricManager;
import org.apache.solr.util.ConcurrentLFUCache;
import org.apache.solr.util.DefaultSolrThreadFactory;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Test for LFUCache
@ -56,9 +55,7 @@ public class TestLFUCache extends SolrTestCaseJ4 {
@Test
public void testTimeDecayParams() throws IOException {
RefCounted<SolrIndexSearcher> holder = h.getCore().getSearcher();
try {
SolrIndexSearcher searcher = holder.get();
h.getCore().withSearcher(searcher -> {
LFUCache cacheDecayTrue = (LFUCache) searcher.getCache("lfuCacheDecayTrue");
assertNotNull(cacheDecayTrue);
Map<String,Object> stats = cacheDecayTrue.getMetricsMap().getValue();
@ -101,10 +98,8 @@ public class TestLFUCache extends SolrTestCaseJ4 {
addCache(cacheDecayFalse, idx);
}
assertCache(cacheDecayFalse, 1, 2, 3, 4, 5);
} finally {
holder.decref();
}
return null;
});
}
private void addCache(LFUCache cache, int... inserts) {

View File

@ -17,7 +17,6 @@
package org.apache.solr.search;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.util.RefCounted;
import org.junit.BeforeClass;
/** Tests that NoOpRegenerator does what it should */
@ -35,32 +34,27 @@ public class TestNoOpRegenerator extends SolrTestCaseJ4 {
assertU(commit());
// add some items
RefCounted<SolrIndexSearcher> ref = h.getCore().getSearcher();
try {
SolrIndexSearcher searcher = ref.get();
h.getCore().withSearcher(searcher -> {
assertEquals(2, searcher.maxDoc());
SolrCache<Object,Object> cache = searcher.getCache("myPerSegmentCache");
assertEquals(0, cache.size());
cache.put("key1", "value1");
cache.put("key2", "value2");
assertEquals(2, cache.size());
} finally {
ref.decref();
}
return null;
});
// add a doc and commit: we should see our cached items still there
assertU(adoc("id", "3"));
assertU(commit());
ref = h.getCore().getSearcher();
try {
SolrIndexSearcher searcher = ref.get();
h.getCore().withSearcher(searcher -> {
assertEquals(3, searcher.maxDoc());
SolrCache<Object,Object> cache = searcher.getCache("myPerSegmentCache");
assertEquals(2, cache.size());
assertEquals("value1", cache.get("key1"));
assertEquals("value2", cache.get("key2"));
} finally {
ref.decref();
}
return null;
});
}
}

View File

@ -16,21 +16,23 @@
*/
package org.apache.solr.search.similarities;
import java.io.IOException;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.core.SolrCore;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.RefCounted;
public abstract class BaseSimilarityTestCase extends SolrTestCaseJ4 {
/** returns the similarity in use for the field */
protected Similarity getSimilarity(String field) {
SolrCore core = h.getCore();
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
Similarity sim = searcher.get().getSimilarity();
searcher.decref();
Similarity sim = null;
try {
sim = h.getCore().withSearcher(IndexSearcher::getSimilarity);
} catch (IOException e) {
throw new RuntimeException(e);
}
while (sim instanceof PerFieldSimilarityWrapper) {
sim = ((PerFieldSimilarityWrapper)sim).get(field);
}

View File

@ -25,8 +25,6 @@ import org.apache.solr.common.params.SpellingParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.SolrCore;
import org.apache.solr.handler.component.SpellCheckComponent;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.RefCounted;
import org.junit.BeforeClass;
import org.junit.Test;
@ -63,22 +61,23 @@ public class DirectSolrSpellCheckerTest extends SolrTestCaseJ4 {
SolrCore core = h.getCore();
checker.init(spellchecker, core);
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
Collection<Token> tokens = queryConverter.convert("fob");
SpellingOptions spellOpts = new SpellingOptions(tokens, searcher.get().getIndexReader());
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
Map<String, Integer> suggestions = result.get(tokens.iterator().next());
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "foo", entry.getKey().equals("foo") == true);
assertFalse(entry.getValue() + " equals: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
h.getCore().withSearcher(searcher -> {
Collection<Token> tokens = queryConverter.convert("fob");
SpellingOptions spellOpts = new SpellingOptions(tokens, searcher.getIndexReader());
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
Map<String, Integer> suggestions = result.get(tokens.iterator().next());
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "foo", entry.getKey().equals("foo") == true);
assertFalse(entry.getValue() + " equals: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
searcher.decref();
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
return null;
});
}
@Test

View File

@ -25,8 +25,6 @@ import org.apache.lucene.util.LuceneTestCase.SuppressTempFileChecks;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.SolrCore;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.RefCounted;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@ -75,22 +73,23 @@ public class FileBasedSpellCheckerTest extends SolrTestCaseJ4 {
assertTrue(dictName + " is not equal to " + "external", dictName.equals("external") == true);
checker.build(core, null);
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
Collection<Token> tokens = queryConverter.convert("fob");
SpellingOptions spellOpts = new SpellingOptions(tokens, searcher.get().getIndexReader());
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
Map<String, Integer> suggestions = result.get(tokens.iterator().next());
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "foo", entry.getKey().equals("foo") == true);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
h.getCore().withSearcher(searcher -> {
Collection<Token> tokens = queryConverter.convert("fob");
SpellingOptions spellOpts = new SpellingOptions(tokens, searcher.getIndexReader());
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
Map<String, Integer> suggestions = result.get(tokens.iterator().next());
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "foo", entry.getKey().equals("foo") == true);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
searcher.decref();
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
return null;
});
}
@ -113,26 +112,26 @@ public class FileBasedSpellCheckerTest extends SolrTestCaseJ4 {
assertTrue(dictName + " is not equal to " + "external", dictName.equals("external") == true);
checker.build(core, null);
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
Collection<Token> tokens = queryConverter.convert("Solar");
h.getCore().withSearcher(searcher -> {
SpellingOptions spellOpts = new SpellingOptions(tokens, searcher.getIndexReader());
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
//should be lowercased, b/c we are using a lowercasing analyzer
Map<String, Integer> suggestions = result.get(tokens.iterator().next());
assertTrue("suggestions Size: " + suggestions.size() + " is not: " + 1, suggestions.size() == 1);
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "solr", entry.getKey().equals("solr") == true);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
SpellingOptions spellOpts = new SpellingOptions(tokens, searcher.get().getIndexReader());
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
//should be lowercased, b/c we are using a lowercasing analyzer
Map<String, Integer> suggestions = result.get(tokens.iterator().next());
assertTrue("suggestions Size: " + suggestions.size() + " is not: " + 1, suggestions.size() == 1);
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "solr", entry.getKey().equals("solr") == true);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
//test something not in the spell checker
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
searcher.decref();
//test something not in the spell checker
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
return null;
});
}
/**
@ -156,24 +155,25 @@ public class FileBasedSpellCheckerTest extends SolrTestCaseJ4 {
assertTrue(dictName + " is not equal to " + "external", dictName.equals("external") == true);
checker.build(core, null);
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
Collection<Token> tokens = queryConverter.convert("solar");
SpellingOptions spellOpts = new SpellingOptions(tokens, searcher.get().getIndexReader());
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
//should be lowercased, b/c we are using a lowercasing analyzer
Map<String, Integer> suggestions = result.get(tokens.iterator().next());
assertTrue("suggestions Size: " + suggestions.size() + " is not: " + 1, suggestions.size() == 1);
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "solr", entry.getKey().equals("solr") == true);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
h.getCore().withSearcher(searcher -> {
Collection<Token> tokens = queryConverter.convert("solar");
SpellingOptions spellOpts = new SpellingOptions(tokens, searcher.getIndexReader());
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
//should be lowercased, b/c we are using a lowercasing analyzer
Map<String, Integer> suggestions = result.get(tokens.iterator().next());
assertTrue("suggestions Size: " + suggestions.size() + " is not: " + 1, suggestions.size() == 1);
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "solr", entry.getKey().equals("solr") == true);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions size should be 0", suggestions.size()==0);
searcher.decref();
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions size should be 0", suggestions.size()==0);
return null;
});
}
}

View File

@ -41,8 +41,6 @@ import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.SolrCore;
import org.apache.solr.handler.component.SpellCheckComponent;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.RefCounted;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@ -119,64 +117,61 @@ public class IndexBasedSpellCheckerTest extends SolrTestCaseJ4 {
String dictName = checker.init(spellchecker, core);
assertTrue(dictName + " is not equal to " + SolrSpellChecker.DEFAULT_DICTIONARY_NAME,
dictName.equals(SolrSpellChecker.DEFAULT_DICTIONARY_NAME) == true);
RefCounted<SolrIndexSearcher> holder = core.getSearcher();
SolrIndexSearcher searcher = holder.get();
try {
checker.build(core, searcher);
h.getCore().withSearcher(searcher -> {
checker.build(core, searcher);
IndexReader reader = searcher.getIndexReader();
Collection<Token> tokens = queryConverter.convert("documemt");
SpellingOptions spellOpts = new SpellingOptions(tokens, reader);
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
//should be lowercased, b/c we are using a lowercasing analyzer
Map<String, Integer> suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("documemt is null and it shouldn't be", suggestions != null);
assertTrue("documemt Size: " + suggestions.size() + " is not: " + 1, suggestions.size() == 1);
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "document", entry.getKey().equals("document") == true);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
IndexReader reader = searcher.getIndexReader();
Collection<Token> tokens = queryConverter.convert("documemt");
SpellingOptions spellOpts = new SpellingOptions(tokens, reader);
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
//should be lowercased, b/c we are using a lowercasing analyzer
Map<String, Integer> suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("documemt is null and it shouldn't be", suggestions != null);
assertTrue("documemt Size: " + suggestions.size() + " is not: " + 1, suggestions.size() == 1);
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "document", entry.getKey().equals("document") == true);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
//test something not in the spell checker
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions size should be 0", suggestions.size()==0);
//test something not in the spell checker
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions size should be 0", suggestions.size()==0);
//test something that is spelled correctly
spellOpts.tokens = queryConverter.convert("document");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions is null and it shouldn't be", suggestions == null);
//test something that is spelled correctly
spellOpts.tokens = queryConverter.convert("document");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions is null and it shouldn't be", suggestions == null);
//Has multiple possibilities, but the exact exists, so that should be returned
spellOpts.tokens = queryConverter.convert("red");
spellOpts.count = 2;
result = checker.getSuggestions(spellOpts);
assertNotNull(result);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
//Has multiple possibilities, but the exact exists, so that should be returned
spellOpts.tokens = queryConverter.convert("red");
spellOpts.count = 2;
result = checker.getSuggestions(spellOpts);
assertNotNull(result);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
//Try out something which should have multiple suggestions
spellOpts.tokens = queryConverter.convert("bug");
result = checker.getSuggestions(spellOpts);
assertNotNull(result);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertNotNull(suggestions);
assertTrue("suggestions Size: " + suggestions.size() + " is not: " + 2, suggestions.size() == 2);
//Try out something which should have multiple suggestions
spellOpts.tokens = queryConverter.convert("bug");
result = checker.getSuggestions(spellOpts);
assertNotNull(result);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertNotNull(suggestions);
assertTrue("suggestions Size: " + suggestions.size() + " is not: " + 2, suggestions.size() == 2);
entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is equal to " + "bug and it shouldn't be", entry.getKey().equals("bug") == false);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is equal to " + "bug and it shouldn't be", entry.getKey().equals("bug") == false);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is equal to " + "bug and it shouldn't be", entry.getKey().equals("bug") == false);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
} finally {
holder.decref();
}
entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is equal to " + "bug and it shouldn't be", entry.getKey().equals("bug") == false);
assertTrue(entry.getValue() + " does not equal: " + SpellingResult.NO_FREQUENCY_INFO, entry.getValue() == SpellingResult.NO_FREQUENCY_INFO);
return null;
});
}
@Test
@ -194,39 +189,36 @@ public class IndexBasedSpellCheckerTest extends SolrTestCaseJ4 {
String dictName = checker.init(spellchecker, core);
assertTrue(dictName + " is not equal to " + SolrSpellChecker.DEFAULT_DICTIONARY_NAME,
dictName.equals(SolrSpellChecker.DEFAULT_DICTIONARY_NAME) == true);
RefCounted<SolrIndexSearcher> holder = core.getSearcher();
SolrIndexSearcher searcher = holder.get();
try {
checker.build(core, searcher);
h.getCore().withSearcher(searcher -> {
checker.build(core, searcher);
IndexReader reader = searcher.getIndexReader();
Collection<Token> tokens = queryConverter.convert("documemt");
SpellingOptions spellOpts = new SpellingOptions(tokens, reader, 1, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, true, 0.5f, null);
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
//should be lowercased, b/c we are using a lowercasing analyzer
Map<String, Integer> suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("documemt is null and it shouldn't be", suggestions != null);
assertTrue("documemt Size: " + suggestions.size() + " is not: " + 1, suggestions.size() == 1);
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "document", entry.getKey().equals("document") == true);
assertTrue(entry.getValue() + " does not equal: " + 2, entry.getValue() == 2);
IndexReader reader = searcher.getIndexReader();
Collection<Token> tokens = queryConverter.convert("documemt");
SpellingOptions spellOpts = new SpellingOptions(tokens, reader, 1, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, true, 0.5f, null);
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
//should be lowercased, b/c we are using a lowercasing analyzer
Map<String, Integer> suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("documemt is null and it shouldn't be", suggestions != null);
assertTrue("documemt Size: " + suggestions.size() + " is not: " + 1, suggestions.size() == 1);
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "document", entry.getKey().equals("document") == true);
assertTrue(entry.getValue() + " does not equal: " + 2, entry.getValue() == 2);
//test something not in the spell checker
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions size should be 0", suggestions.size()==0);
//test something not in the spell checker
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions size should be 0", suggestions.size()==0);
spellOpts.tokens = queryConverter.convert("document");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
} finally {
holder.decref();
}
spellOpts.tokens = queryConverter.convert("document");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
return null;
});
}
private static class TestSpellChecker extends IndexBasedSpellChecker{
@ -251,18 +243,16 @@ public class IndexBasedSpellCheckerTest extends SolrTestCaseJ4 {
String dictName = checker.init(spellchecker, core);
assertTrue(dictName + " is not equal to " + SolrSpellChecker.DEFAULT_DICTIONARY_NAME,
dictName.equals(SolrSpellChecker.DEFAULT_DICTIONARY_NAME) == true);
RefCounted<SolrIndexSearcher> holder = core.getSearcher();
SolrIndexSearcher searcher = holder.get();
try {
checker.build(core, searcher);
SpellChecker sc = checker.getSpellChecker();
assertTrue("sc is null and it shouldn't be", sc != null);
StringDistance sd = sc.getStringDistance();
assertTrue("sd is null and it shouldn't be", sd != null);
assertTrue("sd is not an instance of " + JaroWinklerDistance.class.getName(), sd instanceof JaroWinklerDistance);
} finally {
holder.decref();
}
h.getCore().withSearcher(searcher -> {
checker.build(core, searcher);
SpellChecker sc = checker.getSpellChecker();
assertTrue("sc is null and it shouldn't be", sc != null);
StringDistance sd = sc.getStringDistance();
assertTrue("sd is null and it shouldn't be", sd != null);
assertTrue("sd is not an instance of " + JaroWinklerDistance.class.getName(), sd instanceof JaroWinklerDistance);
return null;
});
}
@Test
@ -307,39 +297,36 @@ public class IndexBasedSpellCheckerTest extends SolrTestCaseJ4 {
String dictName = checker.init(spellchecker, core);
assertTrue(dictName + " is not equal to " + SolrSpellChecker.DEFAULT_DICTIONARY_NAME,
dictName.equals(SolrSpellChecker.DEFAULT_DICTIONARY_NAME) == true);
RefCounted<SolrIndexSearcher> holder = core.getSearcher();
SolrIndexSearcher searcher = holder.get();
try {
checker.build(core, searcher);
h.getCore().withSearcher(searcher -> {
checker.build(core, searcher);
IndexReader reader = searcher.getIndexReader();
Collection<Token> tokens = queryConverter.convert("flesh");
SpellingOptions spellOpts = new SpellingOptions(tokens, reader, 1, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, true, 0.5f, null);
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
//should be lowercased, b/c we are using a lowercasing analyzer
Map<String, Integer> suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("flesh is null and it shouldn't be", suggestions != null);
assertTrue("flesh Size: " + suggestions.size() + " is not: " + 1, suggestions.size() == 1);
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "flash", entry.getKey().equals("flash") == true);
assertTrue(entry.getValue() + " does not equal: " + 1, entry.getValue() == 1);
IndexReader reader = searcher.getIndexReader();
Collection<Token> tokens = queryConverter.convert("flesh");
SpellingOptions spellOpts = new SpellingOptions(tokens, reader, 1, SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX, true, 0.5f, null);
SpellingResult result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
//should be lowercased, b/c we are using a lowercasing analyzer
Map<String, Integer> suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("flesh is null and it shouldn't be", suggestions != null);
assertTrue("flesh Size: " + suggestions.size() + " is not: " + 1, suggestions.size() == 1);
Map.Entry<String, Integer> entry = suggestions.entrySet().iterator().next();
assertTrue(entry.getKey() + " is not equal to " + "flash", entry.getKey().equals("flash") == true);
assertTrue(entry.getValue() + " does not equal: " + 1, entry.getValue() == 1);
//test something not in the spell checker
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions size should be 0", suggestions.size()==0);
//test something not in the spell checker
spellOpts.tokens = queryConverter.convert("super");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions size should be 0", suggestions.size()==0);
spellOpts.tokens = queryConverter.convert("Caroline");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
} finally {
holder.decref();
}
spellOpts.tokens = queryConverter.convert("Caroline");
result = checker.getSuggestions(spellOpts);
assertTrue("result is null and it shouldn't be", result != null);
suggestions = result.get(spellOpts.tokens.iterator().next());
assertTrue("suggestions is not null and it should be", suggestions == null);
return null;
});
}
}

View File

@ -65,6 +65,7 @@ public class WordBreakSolrSpellCheckerTest extends SolrTestCaseJ4 {
params.add(WordBreakSolrSpellChecker.PARAM_MAX_CHANGES, "10");
checker.init(params, core);
//TODO can we use core.withSearcher ? refcounting here is confusing; not sure if intentional
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
QueryConverter qc = new SpellingQueryConverter();
qc.setAnalyzer(new MockAnalyzer(random()));

View File

@ -18,9 +18,6 @@
package org.apache.solr.update;
import static org.junit.internal.matchers.StringContains.containsString;
import static org.apache.solr.update.UpdateLogTest.buildAddUpdateCommand;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
@ -48,7 +45,6 @@ import org.apache.solr.index.NoMergePolicyFactory;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.update.processor.AtomicUpdateDocumentMerger;
import org.apache.solr.util.RefCounted;
import org.junit.AfterClass;
@ -56,6 +52,9 @@ import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.apache.solr.update.UpdateLogTest.buildAddUpdateCommand;
import static org.junit.internal.matchers.StringContains.containsString;
/**
* Tests the in-place updates (docValues updates) for a standalone Solr instance.
@ -414,13 +413,8 @@ public class TestInPlaceUpdatesStandalone extends SolrTestCaseJ4 {
@Test
public void testUpdateOfNonExistentDVsShouldNotFail() throws Exception {
// schema sanity check: assert that the nonexistent_field_i_dvo doesn't exist already
FieldInfo fi;
RefCounted<SolrIndexSearcher> holder = h.getCore().getSearcher();
try {
fi = holder.get().getSlowAtomicReader().getFieldInfos().fieldInfo("nonexistent_field_i_dvo");
} finally {
holder.decref();
}
FieldInfo fi = h.getCore().withSearcher(searcher ->
searcher.getSlowAtomicReader().getFieldInfos().fieldInfo("nonexistent_field_i_dvo"));
assertNull(fi);
// Partial update