SOLR-14759 fix tests that need on lucene test-src (#2462)

Rewrite one, ignore the other two.
This commit is contained in:
Mike Drob 2021-03-08 07:32:40 -06:00 committed by GitHub
parent d53b3da0ea
commit 408b3775dd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 64 additions and 19 deletions

View File

@ -31,4 +31,7 @@ dependencies {
implementation project(':lucene:analysis:stempel')
testImplementation project(':solr:test-framework')
testImplementation('org.mockito:mockito-core', {
exclude group: "net.bytebuddy", module: "byte-buddy-agent"
})
}

View File

@ -18,6 +18,7 @@ package org.apache.solr.schema;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@ -201,7 +202,7 @@ public class ICUCollationField extends FieldType {
InputStream input = null;
try {
input = loader.openResource(fileName);
String rules = IOUtils.toString(input, "UTF-8");
String rules = IOUtils.toString(input, StandardCharsets.UTF_8);
return new RuleBasedCollator(rules);
} catch (Exception e) {
// io error or invalid rules

View File

@ -16,20 +16,22 @@
*/
package org.apache.solr.schema;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.nio.charset.StandardCharsets;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.analysis.util.FilesystemResourceLoader;
import org.apache.lucene.util.ResourceLoader;
import org.apache.lucene.analysis.util.StringMockResourceLoader;
import org.apache.solr.SolrTestCaseJ4;
import org.junit.BeforeClass;
import com.ibm.icu.text.Collator;
import com.ibm.icu.text.RuleBasedCollator;
import com.ibm.icu.util.ULocale;
import org.mockito.Mockito;
/**
* Tests {@link ICUCollationField} with TermQueries, RangeQueries, and sort order.
@ -88,9 +90,13 @@ public class TestICUCollationField extends SolrTestCaseJ4 {
IOUtils.write(tailoredRules, os, "UTF-8");
os.close();
assumeWorkingMockito();
final ResourceLoader loader;
if (random().nextBoolean()) {
loader = new StringMockResourceLoader(tailoredRules);
loader = Mockito.mock(ResourceLoader.class);
Mockito.when(loader.openResource(Mockito.anyString()))
.thenReturn(new ByteArrayInputStream(tailoredRules.getBytes(StandardCharsets.UTF_8)));
} else {
loader = new FilesystemResourceLoader(confDir.toPath());
}

View File

@ -23,9 +23,6 @@ import java.io.Writer;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import org.apache.commons.io.FileUtils;
@ -34,19 +31,54 @@ import org.apache.lucene.util.TestUtil;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.util.TestHarness;
import org.junit.Ignore;
import org.junit.Test;
/** Verify we can read/write previous versions' Lucene indexes. */
@Ignore("Missing Lucene back-compat index files")
public class TestLuceneIndexBackCompat extends SolrTestCaseJ4 {
private static final String[] oldNames = TestBackwardsCompatibility.getOldNames();
private static final String[] oldSingleSegmentNames = TestBackwardsCompatibility.getOldSingleSegmentNames();
private static final String[] oldNames = {
"8.0.0-cfs",
"8.0.0-nocfs",
"8.1.0-cfs",
"8.1.0-nocfs",
"8.1.1-cfs",
"8.1.1-nocfs",
"8.2.0-cfs",
"8.2.0-nocfs",
"8.3.0-cfs",
"8.3.0-nocfs",
"8.3.1-cfs",
"8.3.1-nocfs",
"8.4.0-cfs",
"8.4.0-nocfs",
"8.4.1-cfs",
"8.4.1-nocfs",
"8.5.0-cfs",
"8.5.0-nocfs",
"8.5.1-cfs",
"8.5.1-nocfs",
"8.5.2-cfs",
"8.5.2-nocfs",
"8.6.0-cfs",
"8.6.0-nocfs",
"8.6.1-cfs",
"8.6.1-nocfs",
"8.6.2-cfs",
"8.6.2-nocfs",
"8.6.3-cfs",
"8.6.3-nocfs",
"8.7.0-cfs",
"8.7.0-nocfs",
"8.8.0-cfs",
"8.8.0-nocfs",
"8.8.1-cfs",
"8.8.1-nocfs"
};
@Test
public void testOldIndexes() throws Exception {
List<String> names = new ArrayList<>(oldNames.length + oldSingleSegmentNames.length);
names.addAll(Arrays.asList(oldNames));
names.addAll(Arrays.asList(oldSingleSegmentNames));
for (String name : names) {
for (String name : oldNames) {
setupCore(name);
assertQ(req("q", "*:*", "rows", "0"), "//result[@numFound='35']");

View File

@ -18,20 +18,23 @@ package org.apache.solr.search;
import java.lang.invoke.MethodHandles;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.queryparser.xml.CoreParser;
import org.apache.lucene.queryparser.xml.TestCoreParser;
import org.apache.solr.SolrTestCase;
import org.apache.solr.util.StartupLoggingUtils;
import org.apache.solr.util.TestHarness;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TestXmlQParser extends TestCoreParser {
@Ignore("Was relying on Lucene test sources. Should copy?")
public class TestXmlQParser extends SolrTestCase /* extends TestCoreParser */ {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private CoreParser solrCoreParser;
@ -50,12 +53,12 @@ public class TestXmlQParser extends TestCoreParser {
StartupLoggingUtils.shutdown();
}
@Override
// @Override
protected CoreParser coreParser() {
if (solrCoreParser == null) {
solrCoreParser = new SolrCoreParser(
super.defaultField(),
super.analyzer(),
"contents",
new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET),
harness.getRequestFactory("/select", 0, 0).makeRequest());
}
return solrCoreParser;