Merge branch 'branch_6x' of https://git-wip-us.apache.org/repos/asf/lucene-solr into branch_6x

This commit is contained in:
Noble Paul 2016-10-28 08:31:46 +05:30
commit 6e563d0f4f
27 changed files with 741 additions and 113 deletions

View File

@ -56,6 +56,9 @@ Bug Fixes
allTermsRequired is false and context filters are specified (Mike
McCandless)
* LUCENE-7429: AnalyzerWrapper can now modify the normalization chain too and
DelegatingAnalyzerWrapper does the right thing automatically. (Adrien Grand)
Improvements
* LUCENE-7439: FuzzyQuery now matches all terms within the specified

View File

@ -131,7 +131,7 @@ public final class CustomAnalyzer extends Analyzer {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
final Tokenizer tk = tokenizer.create(attributeFactory());
final Tokenizer tk = tokenizer.create(attributeFactory(fieldName));
TokenStream ts = tk;
for (final TokenFilterFactory filter : tokenFilters) {
ts = filter.create(ts);

View File

@ -85,7 +85,7 @@ public final class CollationKeyAnalyzer extends Analyzer {
}
@Override
protected AttributeFactory attributeFactory() {
protected AttributeFactory attributeFactory(String fieldName) {
return factory;
}

View File

@ -238,7 +238,7 @@ public abstract class Analyzer implements Closeable {
throw new IllegalStateException("Normalization threw an unexpected exeption", e);
}
final AttributeFactory attributeFactory = attributeFactory();
final AttributeFactory attributeFactory = attributeFactory(fieldName);
try (TokenStream ts = normalize(fieldName,
new StringTokenStream(attributeFactory, filteredText, text.length()))) {
final TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class);
@ -286,9 +286,10 @@ public abstract class Analyzer implements Closeable {
/** Return the {@link AttributeFactory} to be used for
* {@link #tokenStream analysis} and
* {@link #normalize(String, String) normalization}. The default
* implementation returns {@link TokenStream#DEFAULT_TOKEN_ATTRIBUTE_FACTORY}. */
protected AttributeFactory attributeFactory() {
* {@link #normalize(String, String) normalization} on the given
* {@code FieldName}. The default implementation returns
* {@link TokenStream#DEFAULT_TOKEN_ATTRIBUTE_FACTORY}. */
protected AttributeFactory attributeFactory(String fieldName) {
return TokenStream.DEFAULT_TOKEN_ATTRIBUTE_FACTORY;
}

View File

@ -19,6 +19,8 @@ package org.apache.lucene.analysis;
import java.io.Reader;
import org.apache.lucene.util.AttributeFactory;
/**
* Extension to {@link Analyzer} suitable for Analyzers which wrap
* other Analyzers.
@ -81,6 +83,22 @@ public abstract class AnalyzerWrapper extends Analyzer {
return components;
}
/**
* Wraps / alters the given TokenStream for normalization purposes, taken
* from the wrapped Analyzer, to form new components. It is through this
* method that new TokenFilters can be added by AnalyzerWrappers. By default,
* the given token stream are returned.
*
* @param fieldName
* Name of the field which is to be analyzed
* @param in
* TokenStream taken from the wrapped Analyzer
* @return Wrapped / altered TokenStreamComponents.
*/
protected TokenStream wrapTokenStreamForNormalization(String fieldName, TokenStream in) {
return in;
}
/**
* Wraps / alters the given Reader. Through this method AnalyzerWrappers can
* implement {@link #initReader(String, Reader)}. By default, the given reader
@ -96,11 +114,31 @@ public abstract class AnalyzerWrapper extends Analyzer {
return reader;
}
/**
* Wraps / alters the given Reader. Through this method AnalyzerWrappers can
* implement {@link #initReaderForNormalization(String, Reader)}. By default,
* the given reader is returned.
*
* @param fieldName
* name of the field which is to be analyzed
* @param reader
* the reader to wrap
* @return the wrapped reader
*/
protected Reader wrapReaderForNormalization(String fieldName, Reader reader) {
return reader;
}
@Override
protected final TokenStreamComponents createComponents(String fieldName) {
return wrapComponents(fieldName, getWrappedAnalyzer(fieldName).createComponents(fieldName));
}
@Override
protected final TokenStream normalize(String fieldName, TokenStream in) {
return wrapTokenStreamForNormalization(fieldName, getWrappedAnalyzer(fieldName).normalize(fieldName, in));
}
@Override
public int getPositionIncrementGap(String fieldName) {
return getWrappedAnalyzer(fieldName).getPositionIncrementGap(fieldName);
@ -115,4 +153,14 @@ public abstract class AnalyzerWrapper extends Analyzer {
public final Reader initReader(String fieldName, Reader reader) {
return getWrappedAnalyzer(fieldName).initReader(fieldName, wrapReader(fieldName, reader));
}
@Override
protected final Reader initReaderForNormalization(String fieldName, Reader reader) {
return getWrappedAnalyzer(fieldName).initReaderForNormalization(fieldName, wrapReaderForNormalization(fieldName, reader));
}
@Override
protected final AttributeFactory attributeFactory(String fieldName) {
return getWrappedAnalyzer(fieldName).attributeFactory(fieldName);
}
}

View File

@ -55,11 +55,21 @@ public abstract class DelegatingAnalyzerWrapper extends AnalyzerWrapper {
return super.wrapComponents(fieldName, components);
}
@Override
protected final TokenStream wrapTokenStreamForNormalization(String fieldName, TokenStream in) {
return super.wrapTokenStreamForNormalization(fieldName, in);
}
@Override
protected final Reader wrapReader(String fieldName, Reader reader) {
return super.wrapReader(fieldName, reader);
}
@Override
protected final Reader wrapReaderForNormalization(String fieldName, Reader reader) {
return super.wrapReaderForNormalization(fieldName, reader);
}
private static final class DelegatingReuseStrategy extends ReuseStrategy {
DelegatingAnalyzerWrapper wrapper;
private final ReuseStrategy fallbackStrategy;

View File

@ -0,0 +1,107 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis;
import java.io.IOException;
import java.io.Reader;
import java.nio.charset.StandardCharsets;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
public class TestDelegatingAnalyzerWrapper extends LuceneTestCase {
public void testDelegatesNormalization() {
Analyzer analyzer1 = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
DelegatingAnalyzerWrapper w1 = new DelegatingAnalyzerWrapper(Analyzer.GLOBAL_REUSE_STRATEGY) {
@Override
protected Analyzer getWrappedAnalyzer(String fieldName) {
return analyzer1;
}
};
assertEquals(new BytesRef("Ab C"), w1.normalize("foo", "Ab C"));
Analyzer analyzer2 = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true);
DelegatingAnalyzerWrapper w2 = new DelegatingAnalyzerWrapper(Analyzer.GLOBAL_REUSE_STRATEGY) {
@Override
protected Analyzer getWrappedAnalyzer(String fieldName) {
return analyzer2;
}
};
assertEquals(new BytesRef("ab c"), w2.normalize("foo", "Ab C"));
}
public void testDelegatesAttributeFactory() throws Exception {
Analyzer analyzer1 = new MockBytesAnalyzer();
DelegatingAnalyzerWrapper w1 = new DelegatingAnalyzerWrapper(Analyzer.GLOBAL_REUSE_STRATEGY) {
@Override
protected Analyzer getWrappedAnalyzer(String fieldName) {
return analyzer1;
}
};
assertEquals(new BytesRef("Ab C".getBytes(StandardCharsets.UTF_16LE)), w1.normalize("foo", "Ab C"));
}
public void testDelegatesCharFilter() throws Exception {
Analyzer analyzer1 = new Analyzer() {
@Override
protected Reader initReaderForNormalization(String fieldName, Reader reader) {
return new DummyCharFilter(reader, 'b', 'z');
}
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tokenizer = new MockTokenizer(attributeFactory(fieldName));
return new TokenStreamComponents(tokenizer);
}
};
DelegatingAnalyzerWrapper w1 = new DelegatingAnalyzerWrapper(Analyzer.GLOBAL_REUSE_STRATEGY) {
@Override
protected Analyzer getWrappedAnalyzer(String fieldName) {
return analyzer1;
}
};
assertEquals(new BytesRef("az c"), w1.normalize("foo", "ab c"));
}
private static class DummyCharFilter extends CharFilter {
private final char match, repl;
public DummyCharFilter(Reader input, char match, char repl) {
super(input);
this.match = match;
this.repl = repl;
}
@Override
protected int correct(int currentOff) {
return currentOff;
}
@Override
public int read(char[] cbuf, int off, int len) throws IOException {
final int read = input.read(cbuf, off, len);
for (int i = 0; i < read; ++i) {
if (cbuf[off+i] == match) {
cbuf[off+i] = repl;
}
}
return read;
}
}
}

View File

@ -30,7 +30,7 @@ public final class MockBytesAnalyzer extends Analyzer {
}
@Override
protected AttributeFactory attributeFactory() {
protected AttributeFactory attributeFactory(String fieldName) {
return MockUTF16TermAttributeImpl.UTF16_TERM_ATTRIBUTE_FACTORY;
}
}

View File

@ -174,6 +174,9 @@ New Features
* SOLR-9559: Add ExecutorStream to execute stored Streaming Expressions (Joel Bernstein)
* SOLR-1085: Add support for MoreLikeThis queries and responses in SolrJ client.
(Maurice Jumelet, Bill Mitchell, Cao Manh Dat via shalin)
Bug Fixes
----------------------
@ -243,6 +246,9 @@ Bug Fixes
* SOLR-9692: blockUnknown property makes inter-node communication impossible (noble)
* SOLR-2094: XPathEntityProcessor should reinitialize the XPathRecordReader instance if
the 'forEach' or 'xpath' attributes are templates & it is not a root entity (Cao Manh Dat, noble)
Optimizations
----------------------
@ -384,6 +390,10 @@ Other Changes
* SOLR-9533: Reload core config when a core is reloaded (Gethin James, Joel Bernstein)
* SOLR-9371: Fix bin/solr calculations for start/stop wait time and RMI_PORT.
(Shawn Heisey via Erick Erickson)
================== 6.2.1 ==================
Bug Fixes

View File

@ -119,6 +119,9 @@ else
JAVA=java
fi
if [ -z "$SOLR_STOP_WAIT" ]; then
SOLR_STOP_WAIT=180
fi
# test that Java exists, is executable and correct version
JAVA_VER=$("$JAVA" -version 2>&1)
if [[ $? -ne 0 ]] ; then
@ -227,7 +230,7 @@ function print_usage() {
echo ""
echo " -p <port> Specify the port to start the Solr HTTP listener on; default is 8983"
echo " The specified port (SOLR_PORT) will also be used to determine the stop port"
echo " STOP_PORT=(\$SOLR_PORT-1000) and JMX RMI listen port RMI_PORT=(1\$SOLR_PORT). "
echo " STOP_PORT=(\$SOLR_PORT-1000) and JMX RMI listen port RMI_PORT=(\$SOLR_PORT+10000). "
echo " For instance, if you set -p 8985, then the STOP_PORT=7985 and RMI_PORT=18985"
echo ""
echo " -d <dir> Specify the Solr server directory; defaults to server"
@ -571,9 +574,24 @@ function stop_solr() {
SOLR_PID="$4"
if [ "$SOLR_PID" != "" ]; then
echo -e "Sending stop command to Solr running on port $SOLR_PORT ... waiting 5 seconds to allow Jetty process $SOLR_PID to stop gracefully."
echo -e "Sending stop command to Solr running on port $SOLR_PORT ... waiting up to $SOLR_STOP_WAIT seconds to allow Jetty process $SOLR_PID to stop gracefully."
"$JAVA" $SOLR_SSL_OPTS $AUTHC_OPTS -jar "$DIR/start.jar" "STOP.PORT=$STOP_PORT" "STOP.KEY=$STOP_KEY" --stop || true
(sleep 5) &
(loops=0
while true
do
CHECK_PID=`ps auxww | awk '{print $2}' | grep -w $SOLR_PID | sort -r | tr -d ' '`
if [ "$CHECK_PID" != "" ]; then
slept=$((loops * 2))
if [ $slept -lt $SOLR_STOP_WAIT ]; then
sleep 2
loops=$[$loops+1]
else
exit # subshell!
fi
else
exit # subshell!
fi
done) &
spinner $!
rm -f "$SOLR_PID_DIR/solr-$SOLR_PORT.pid"
else
@ -1455,7 +1473,11 @@ fi
if [ "$ENABLE_REMOTE_JMX_OPTS" == "true" ]; then
if [ -z "$RMI_PORT" ]; then
RMI_PORT="1$SOLR_PORT"
RMI_PORT=`expr $SOLR_PORT + 10000`
if [ $RMI_PORT -gt 65535 ]; then
echo -e "\nRMI_PORT is $RMI_PORT, which is invalid!\n"
exit 1
fi
fi
REMOTE_JMX_OPTS=('-Dcom.sun.management.jmxremote' \
@ -1616,18 +1638,19 @@ function launch_solr() {
# no lsof on cygwin though
if hash lsof 2>/dev/null ; then # hash returns true if lsof is on the path
echo -n "Waiting up to 30 seconds to see Solr running on port $SOLR_PORT"
echo -n "Waiting up to $SOLR_STOP_WAIT seconds to see Solr running on port $SOLR_PORT"
# Launch in a subshell to show the spinner
(loops=0
while true
do
running=`lsof -PniTCP:$SOLR_PORT -sTCP:LISTEN`
if [ -z "$running" ]; then
if [ $loops -lt 6 ]; then
sleep 5
slept=$((loops * 2))
if [ $slept -lt $SOLR_STOP_WAIT ]; then
sleep 2
loops=$[$loops+1]
else
echo -e "Still not seeing Solr listening on $SOLR_PORT after 30 seconds!"
echo -e "Still not seeing Solr listening on $SOLR_PORT after $SOLR_STOP_WAIT seconds!"
tail -30 "$SOLR_LOGS_DIR/solr.log"
exit # subshell!
fi

View File

@ -21,6 +21,12 @@
# affecting other Java applications on your server/workstation.
#SOLR_JAVA_HOME=""
# This controls the number of seconds that the solr script will wait for
# Solr to stop gracefully or Solr to start. If the graceful stop fails,
# the script will forcibly stop Solr. If the start fails, the script will
# give up waiting and display the last few lines of the logfile.
#SOLR_STOP_WAIT="180"
# Increase Java Heap as needed to support your indexing / query needs
#SOLR_HEAP="512m"

View File

@ -35,7 +35,7 @@ public interface DIHCache extends Iterable<Map<String,Object>> {
* includes any parameters needed by the cache impl. This must be called
* before any read/write operations are permitted.
*/
public void open(Context context);
void open(Context context);
/**
* <p>
@ -43,14 +43,14 @@ public interface DIHCache extends Iterable<Map<String,Object>> {
* but not destroyed.
* </p>
*/
public void close();
void close();
/**
* <p>
* Persists any pending data to the cache
* </p>
*/
public void flush();
void flush();
/**
* <p>
@ -67,7 +67,7 @@ public interface DIHCache extends Iterable<Map<String,Object>> {
* update a key's documents, first call delete(Object key).
* </p>
*/
public void add(Map<String,Object> rec);
void add(Map<String, Object> rec);
/**
* <p>
@ -76,7 +76,7 @@ public interface DIHCache extends Iterable<Map<String,Object>> {
* </p>
*/
@Override
public Iterator<Map<String,Object>> iterator();
Iterator<Map<String,Object>> iterator();
/**
* <p>
@ -84,20 +84,20 @@ public interface DIHCache extends Iterable<Map<String,Object>> {
* match the given key in insertion order.
* </p>
*/
public Iterator<Map<String,Object>> iterator(Object key);
Iterator<Map<String,Object>> iterator(Object key);
/**
* <p>
* Delete all documents associated with the given key
* </p>
*/
public void delete(Object key);
void delete(Object key);
/**
* <p>
* Delete all data from the cache,leaving the empty cache intact.
* </p>
*/
public void deleteAll();
void deleteAll();
}

View File

@ -30,6 +30,6 @@ public interface EventListener {
*
* @param ctx the Context in which this event was called
*/
public void onEvent(Context ctx);
void onEvent(Context ctx);
}

View File

@ -86,11 +86,13 @@ public class XPathEntityProcessor extends EntityProcessorBase {
protected Thread publisherThread;
protected boolean reinitXPathReader = true;
@Override
@SuppressWarnings("unchecked")
public void init(Context context) {
super.init(context);
if (xpathReader == null)
if (reinitXPathReader)
initXpathReader(context.getVariableResolver());
pk = context.getEntityAttribute("pk");
dataSource = context.getDataSource();
@ -99,6 +101,7 @@ public class XPathEntityProcessor extends EntityProcessorBase {
}
private void initXpathReader(VariableResolver resolver) {
reinitXPathReader = false;
useSolrAddXml = Boolean.parseBoolean(context
.getEntityAttribute(USE_SOLR_ADD_SCHEMA));
streamRows = Boolean.parseBoolean(context
@ -147,11 +150,12 @@ public class XPathEntityProcessor extends EntityProcessorBase {
xpathReader.addField("name", "/add/doc/field/@name", true);
xpathReader.addField("value", "/add/doc/field", true);
} else {
String forEachXpath = context.getEntityAttribute(FOR_EACH);
String forEachXpath = context.getResolvedEntityAttribute(FOR_EACH);
if (forEachXpath == null)
throw new DataImportHandlerException(SEVERE,
"Entity : " + context.getEntityAttribute("name")
+ " must have a 'forEach' attribute");
if (forEachXpath.equals(context.getEntityAttribute(FOR_EACH))) reinitXPathReader = true;
try {
xpathReader = new XPathRecordReader(forEachXpath);
@ -164,6 +168,10 @@ public class XPathEntityProcessor extends EntityProcessorBase {
}
String xpath = field.get(XPATH);
xpath = context.replaceTokens(xpath);
//!xpath.equals(field.get(XPATH) means the field xpath has a template
//in that case ensure that the XPathRecordReader is reinitialized
//for each xml
if (!xpath.equals(field.get(XPATH)) && !context.isRootEntity()) reinitXPathReader = true;
xpathReader.addField(field.get(DataImporter.COLUMN),
xpath,
Boolean.parseBoolean(field.get(DataImporter.MULTI_VALUED)),
@ -315,13 +323,7 @@ public class XPathEntityProcessor extends EntityProcessorBase {
rowIterator = getRowIterator(data, s);
} else {
try {
xpathReader.streamRecords(data, new XPathRecordReader.Handler() {
@Override
@SuppressWarnings("unchecked")
public void handle(Map<String, Object> record, String xpath) {
rows.add(readRow(record, xpath));
}
});
xpathReader.streamRecords(data, (record, xpath) -> rows.add(readRow(record, xpath)));
} catch (Exception e) {
String msg = "Parsing failed for xml, url:" + s + " rows processed:" + rows.size();
if (rows.size() > 0) msg += " last row: " + rows.get(rows.size() - 1);
@ -425,25 +427,21 @@ public class XPathEntityProcessor extends EntityProcessorBase {
@Override
public void run() {
try {
xpathReader.streamRecords(data, new XPathRecordReader.Handler() {
@Override
@SuppressWarnings("unchecked")
public void handle(Map<String, Object> record, String xpath) {
if (isEnd.get()) {
throwExp.set(false);
//To end the streaming . otherwise the parsing will go on forever
//though consumer has gone away
throw new RuntimeException("BREAK");
}
Map<String, Object> row;
try {
row = readRow(record, xpath);
} catch (Exception e) {
isEnd.set(true);
return;
}
offer(row);
xpathReader.streamRecords(data, (record, xpath) -> {
if (isEnd.get()) {
throwExp.set(false);
//To end the streaming . otherwise the parsing will go on forever
//though consumer has gone away
throw new RuntimeException("BREAK");
}
Map<String, Object> row;
try {
row = readRow(record, xpath);
} catch (Exception e) {
isEnd.set(true);
return;
}
offer(row);
});
} catch (Exception e) {
if(throwExp.get()) exp.set(e);

View File

@ -162,12 +162,7 @@ public class XPathRecordReader {
*/
public List<Map<String, Object>> getAllRecords(Reader r) {
final List<Map<String, Object>> results = new ArrayList<>();
streamRecords(r, new Handler() {
@Override
public void handle(Map<String, Object> record, String s) {
results.add(record);
}
});
streamRecords(r, (record, s) -> results.add(record));
return results;
}
@ -182,8 +177,8 @@ public class XPathRecordReader {
public void streamRecords(Reader r, Handler handler) {
try {
XMLStreamReader parser = factory.createXMLStreamReader(r);
rootNode.parse(parser, handler, new HashMap<String, Object>(),
new Stack<Set<String>>(), false);
rootNode.parse(parser, handler, new HashMap<>(),
new Stack<>(), false);
} catch (Exception e) {
throw new RuntimeException(e);
}
@ -657,7 +652,7 @@ public class XPathRecordReader {
/**Implement this interface to stream records as and when one is found.
*
*/
public static interface Handler {
public interface Handler {
/**
* @param record The record map. The key is the field name as provided in
* the addField() methods. The value can be a single String (for single
@ -666,7 +661,7 @@ public class XPathRecordReader {
* If there is any change all parsing will be aborted and the Exception
* is propagated up
*/
public void handle(Map<String, Object> record, String xpath);
void handle(Map<String, Object> record, String xpath);
}
private static final Pattern ATTRIB_PRESENT_WITHVAL = Pattern

View File

@ -20,7 +20,6 @@ import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@ -28,6 +27,7 @@ import org.apache.commons.io.FileUtils;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SuppressForbidden;
import org.apache.solr.common.util.Utils;
import org.apache.solr.core.SolrCore;
import org.apache.solr.request.LocalSolrQueryRequest;
import org.apache.solr.request.SolrQueryRequest;
@ -145,7 +145,7 @@ public abstract class AbstractDataImportHandlerTestCase extends
if (resolver == null) resolver = new VariableResolver();
final Context delegate = new ContextImpl(parent, resolver,
parentDataSource, currProcess,
new HashMap<String, Object>(), null, null);
new HashMap<>(), null, null);
return new TestContext(entityAttrs, delegate, entityFields, parent == null);
}
@ -155,15 +155,7 @@ public abstract class AbstractDataImportHandlerTestCase extends
*/
@SuppressWarnings("unchecked")
public static Map createMap(Object... args) {
Map result = new LinkedHashMap();
if (args == null || args.length == 0)
return result;
for (int i = 0; i < args.length - 1; i += 2)
result.put(args[i], args[i + 1]);
return result;
return Utils.makeMap(args);
}
@SuppressForbidden(reason = "Needs currentTimeMillis to set modified time for a file")

View File

@ -0,0 +1,54 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler.dataimport;
import java.io.Reader;
import java.io.StringReader;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
public class MockStringDataSource extends DataSource<Reader> {
private static Map<String, String> cache = new HashMap<>();
public static void setData(String query,
String data) {
cache.put(query, data);
}
public static void clearCache() {
cache.clear();
}
@Override
public void init(Context context, Properties initProps) {
}
@Override
public Reader getData(String query) {
return new StringReader(cache.get(query));
}
@Override
public void close() {
cache.clear();
}
}

View File

@ -39,6 +39,7 @@ public class TestDocBuilder extends AbstractDataImportHandlerTestCase {
@After
public void tearDown() throws Exception {
MockDataSource.clearCache();
MockStringDataSource.clearCache();
super.tearDown();
}
@ -180,6 +181,52 @@ public class TestDocBuilder extends AbstractDataImportHandlerTestCase {
assertEquals(3, di.getDocBuilder().importStatistics.rowsCount.get());
}
@Test
public void templateXPath() {
DataImporter di = new DataImporter();
di.loadAndInit(dc_variableXpath);
DIHConfiguration cfg = di.getConfig();
RequestInfo rp = new RequestInfo(null, createMap("command", "full-import"), null);
List<Map<String, Object>> l = new ArrayList<>();
l.add(createMap("id", 1, "name", "iphone", "manufacturer", "Apple"));
l.add(createMap("id", 2, "name", "ipad", "manufacturer", "Apple"));
l.add(createMap("id", 3, "name", "pixel", "manufacturer", "Google"));
MockDataSource.setIterator("select * from x", l.iterator());
List<Map<String,Object>> nestedData = new ArrayList<>();
nestedData.add(createMap("founded", "Cupertino, California, U.S", "year", "1976", "year2", "1976"));
nestedData.add(createMap("founded", "Cupertino, California, U.S", "year", "1976", "year2", "1976"));
nestedData.add(createMap("founded", "Menlo Park, California, U.S", "year", "1998", "year2", "1998"));
MockStringDataSource.setData("companies.xml", xml_attrVariableXpath);
MockStringDataSource.setData("companies2.xml", xml_variableXpath);
MockStringDataSource.setData("companies3.xml", xml_variableForEach);
SolrWriterImpl swi = new SolrWriterImpl();
di.runCmd(rp, swi);
assertEquals(Boolean.TRUE, swi.deleteAllCalled);
assertEquals(Boolean.TRUE, swi.commitCalled);
assertEquals(Boolean.TRUE, swi.finishCalled);
assertEquals(3, swi.docs.size());
for (int i = 0; i < l.size(); i++) {
SolrInputDocument doc = swi.docs.get(i);
Map<String, Object> map = l.get(i);
for (Map.Entry<String, Object> entry : map.entrySet()) {
assertEquals(entry.getValue(), doc.getFieldValue(entry.getKey()));
}
map = nestedData.get(i);
for (Map.Entry<String, Object> entry : map.entrySet()) {
assertEquals(entry.getValue(), doc.getFieldValue(entry.getKey()));
}
}
assertEquals(1, di.getDocBuilder().importStatistics.queryCount.get());
assertEquals(3, di.getDocBuilder().importStatistics.docCount.get());
}
static class SolrWriterImpl extends SolrWriter {
List<SolrInputDocument> docs = new ArrayList<>();
@ -215,21 +262,73 @@ public class TestDocBuilder extends AbstractDataImportHandlerTestCase {
}
public static final String dc_singleEntity = "<dataConfig>\n"
+ "<dataSource type=\"MockDataSource\"/>\n"
+ " <document name=\"X\" >\n"
+ " <entity name=\"x\" query=\"select * from x\">\n"
+ " <field column=\"id\"/>\n"
+ " <field column=\"desc\"/>\n"
+ " <field column=\"desc\" name=\"desc_s\" />" + " </entity>\n"
+ " </document>\n" + "</dataConfig>";
+ "<dataSource type=\"MockDataSource\"/>\n"
+ " <document name=\"X\" >\n"
+ " <entity name=\"x\" query=\"select * from x\">\n"
+ " <field column=\"id\"/>\n"
+ " <field column=\"desc\"/>\n"
+ " <field column=\"desc\" name=\"desc_s\" />" + " </entity>\n"
+ " </document>\n" + "</dataConfig>";
public static final String dc_deltaConfig = "<dataConfig>\n"
+ "<dataSource type=\"MockDataSource\"/>\n"
+ " <document name=\"X\" >\n"
+ " <entity name=\"x\" query=\"select * from x\" deltaQuery=\"select id from x\">\n"
+ " <field column=\"id\"/>\n"
+ " <field column=\"desc\"/>\n"
+ " <field column=\"desc\" name=\"desc_s\" />" + " </entity>\n"
+ " </document>\n" + "</dataConfig>";
+ "<dataSource type=\"MockDataSource\"/>\n"
+ " <document name=\"X\" >\n"
+ " <entity name=\"x\" query=\"select * from x\" deltaQuery=\"select id from x\">\n"
+ " <field column=\"id\"/>\n"
+ " <field column=\"desc\"/>\n"
+ " <field column=\"desc\" name=\"desc_s\" />" + " </entity>\n"
+ " </document>\n" + "</dataConfig>";
public static final String dc_variableXpath = "<dataConfig>\n"
+ "<dataSource type=\"MockDataSource\"/>\n"
+ "<dataSource name=\"xml\" type=\"MockStringDataSource\"/>\n"
+ " <document name=\"X\" >\n"
+ " <entity name=\"x\" query=\"select * from x\">\n"
+ " <field column=\"id\"/>\n"
+ " <field column=\"name\"/>\n"
+ " <field column=\"manufacturer\"/>"
+ " <entity name=\"c1\" url=\"companies.xml\" dataSource=\"xml\" forEach=\"/companies/company\" processor=\"XPathEntityProcessor\">"
+ " <field column=\"year\" xpath=\"/companies/company/year[@name='p_${x.manufacturer}_s']\" />"
+ " </entity>"
+ " <entity name=\"c2\" url=\"companies2.xml\" dataSource=\"xml\" forEach=\"/companies/company\" processor=\"XPathEntityProcessor\">"
+ " <field column=\"founded\" xpath=\"/companies/company/p_${x.manufacturer}_s/founded\" />"
+ " </entity>"
+ " <entity name=\"c3\" url=\"companies3.xml\" dataSource=\"xml\" forEach=\"/companies/${x.manufacturer}\" processor=\"XPathEntityProcessor\">"
+ " <field column=\"year2\" xpath=\"/companies/${x.manufacturer}/year\" />"
+ " </entity>"
+ " </entity>\n"
+ " </document>\n" + "</dataConfig>";
public static final String xml_variableForEach = "<companies>\n" +
"\t<Apple>\n" +
"\t\t<year>1976</year>\n" +
"\t</Apple>\n" +
"\t<Google>\n" +
"\t\t<year>1998</year>\n" +
"\t</Google>\n" +
"</companies>";
public static final String xml_variableXpath = "<companies>\n" +
"\t<company>\n" +
"\t\t<p_Apple_s>\n" +
"\t\t\t<founded>Cupertino, California, U.S</founded>\n" +
"\t\t</p_Apple_s>\t\t\n" +
"\t</company>\n" +
"\t<company>\n" +
"\t\t<p_Google_s>\n" +
"\t\t\t<founded>Menlo Park, California, U.S</founded>\n" +
"\t\t</p_Google_s>\n" +
"\t</company>\n" +
"</companies>";
public static final String xml_attrVariableXpath = "<companies>\n" +
"\t<company>\n" +
"\t\t<year name='p_Apple_s'>1976</year>\n" +
"\t</company>\n" +
"\t<company>\n" +
"\t\t<year name='p_Google_s'>1998</year>\t\t\n" +
"\t</company>\n" +
"</companies>";
}

View File

@ -16,13 +16,13 @@
*/
package org.apache.solr.handler.dataimport;
import org.junit.Test;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.junit.Test;
/**
* <p> Test for XPathRecordReader </p>
*
@ -138,13 +138,10 @@ public class TestXPathRecordReader extends AbstractDataImportHandlerTestCase {
final List<Map<String, Object>> a = new ArrayList<>();
final List<Map<String, Object>> x = new ArrayList<>();
rr.streamRecords(new StringReader(xml), new XPathRecordReader.Handler() {
@Override
public void handle(Map<String, Object> record, String xpath) {
if (record == null) return;
if (xpath.equals("/root/a")) a.add(record);
if (xpath.equals("/root/x")) x.add(record);
}
rr.streamRecords(new StringReader(xml), (record, xpath) -> {
if (record == null) return;
if (xpath.equals("/root/a")) a.add(record);
if (xpath.equals("/root/x")) x.add(record);
});
assertEquals(1, a.size());

View File

@ -99,7 +99,7 @@ public final class TokenizerChain extends SolrAnalyzer {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tk = tokenizer.create(attributeFactory());
Tokenizer tk = tokenizer.create(attributeFactory(fieldName));
TokenStream ts = tk;
for (TokenFilterFactory filter : filters) {
ts = filter.create(ts);

View File

@ -170,7 +170,7 @@ public class MoreLikeThisComponent extends SearchComponent {
&& rb.req.getParams().getBool(COMPONENT_NAME, false)) {
Map<Object,SolrDocumentList> tempResults = new LinkedHashMap<>();
int mltcount = rb.req.getParams().getInt(MoreLikeThisParams.DOC_COUNT, 5);
int mltcount = rb.req.getParams().getInt(MoreLikeThisParams.DOC_COUNT, MoreLikeThisParams.DEFAULT_DOC_COUNT);
String keyName = rb.req.getSchema().getUniqueKeyField().getName();
for (ShardRequest sreq : rb.finished) {

View File

@ -3263,7 +3263,7 @@ public class SolrCLI {
.create("m"),
OptionBuilder
.withDescription("Timeout in ms for commands supporting a timeout")
.withLongOpt("ms")
.withLongOpt("timeout")
.hasArg(true)
.withType(Long.class)
.withArgName("ms")

View File

@ -27,6 +27,7 @@ import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.FacetParams;
import org.apache.solr.common.params.HighlightParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.MoreLikeThisParams;
import org.apache.solr.common.params.StatsParams;
import org.apache.solr.common.params.TermsParams;
@ -801,6 +802,253 @@ public class SolrQuery extends ModifiableSolrParams
return this;
}
/**
* Add field for MoreLikeThis. Automatically
* enables MoreLikeThis.
*
* @param field the names of the field to be added
* @return this
*/
public SolrQuery addMoreLikeThisField(String field) {
this.setMoreLikeThis(true);
return addValueToParam(MoreLikeThisParams.SIMILARITY_FIELDS, field);
}
public SolrQuery setMoreLikeThisFields(String... fields) {
if( fields == null || fields.length == 0 ) {
this.remove( MoreLikeThisParams.SIMILARITY_FIELDS );
this.setMoreLikeThis(false);
return this;
}
StringBuilder sb = new StringBuilder();
sb.append(fields[0]);
for (int i = 1; i < fields.length; i++) {
sb.append(',');
sb.append(fields[i]);
}
this.set(MoreLikeThisParams.SIMILARITY_FIELDS, sb.toString());
this.setMoreLikeThis(true);
return this;
}
/**
* @return an array with the fields used to compute similarity.
*/
public String[] getMoreLikeThisFields() {
String fl = this.get(MoreLikeThisParams.SIMILARITY_FIELDS);
if(fl==null || fl.length()==0) {
return null;
}
return fl.split(",");
}
/**
* Sets the frequency below which terms will be ignored in the source doc
*
* @param mintf the minimum term frequency
* @return this
*/
public SolrQuery setMoreLikeThisMinTermFreq(int mintf) {
this.set(MoreLikeThisParams.MIN_TERM_FREQ, mintf);
return this;
}
/**
* Gets the frequency below which terms will be ignored in the source doc
*/
public int getMoreLikeThisMinTermFreq() {
return this.getInt(MoreLikeThisParams.MIN_TERM_FREQ, 2);
}
/**
* Sets the frequency at which words will be ignored which do not occur in
* at least this many docs.
*
* @param mindf the minimum document frequency
* @return this
*/
public SolrQuery setMoreLikeThisMinDocFreq(int mindf) {
this.set(MoreLikeThisParams.MIN_DOC_FREQ, mindf);
return this;
}
/**
* Gets the frequency at which words will be ignored which do not occur in
* at least this many docs.
*/
public int getMoreLikeThisMinDocFreq() {
return this.getInt(MoreLikeThisParams.MIN_DOC_FREQ, 5);
}
/**
* Sets the minimum word length below which words will be ignored.
*
* @param minwl the minimum word length
* @return this
*/
public SolrQuery setMoreLikeThisMinWordLen(int minwl) {
this.set(MoreLikeThisParams.MIN_WORD_LEN, minwl);
return this;
}
/**
* Gets the minimum word length below which words will be ignored.
*/
public int getMoreLikeThisMinWordLen() {
return this.getInt(MoreLikeThisParams.MIN_WORD_LEN, 0);
}
/**
* Sets the maximum word length above which words will be ignored.
*
* @param maxwl the maximum word length
* @return this
*/
public SolrQuery setMoreLikeThisMaxWordLen(int maxwl) {
this.set(MoreLikeThisParams.MAX_WORD_LEN, maxwl);
return this;
}
/**
* Gets the maximum word length above which words will be ignored.
*/
public int getMoreLikeThisMaxWordLen() {
return this.getInt(MoreLikeThisParams.MAX_WORD_LEN, 0);
}
/**
* Sets the maximum number of query terms that will be included in any
* generated query.
*
* @param maxqt the maximum number of query terms
* @return this
*/
public SolrQuery setMoreLikeThisMaxQueryTerms(int maxqt) {
this.set(MoreLikeThisParams.MAX_QUERY_TERMS, maxqt);
return this;
}
/**
* Gets the maximum number of query terms that will be included in any
* generated query.
*/
public int getMoreLikeThisMaxQueryTerms() {
return this.getInt(MoreLikeThisParams.MAX_QUERY_TERMS, 25);
}
/**
* Sets the maximum number of tokens to parse in each example doc field
* that is not stored with TermVector support.
*
* @param maxntp the maximum number of tokens to parse
* @return this
*/
public SolrQuery setMoreLikeThisMaxTokensParsed(int maxntp) {
this.set(MoreLikeThisParams.MAX_NUM_TOKENS_PARSED, maxntp);
return this;
}
/**
* Gets the maximum number of tokens to parse in each example doc field
* that is not stored with TermVector support.
*/
public int getMoreLikeThisMaxTokensParsed() {
return this.getInt(MoreLikeThisParams.MAX_NUM_TOKENS_PARSED, 5000);
}
/**
* Sets if the query will be boosted by the interesting term relevance.
*
* @param b set to true to boost the query with the interesting term relevance
* @return this
*/
public SolrQuery setMoreLikeThisBoost(boolean b) {
this.set(MoreLikeThisParams.BOOST, b);
return this;
}
/**
* Gets if the query will be boosted by the interesting term relevance.
*/
public boolean getMoreLikeThisBoost() {
return this.getBool(MoreLikeThisParams.BOOST, false);
}
/**
* Sets the query fields and their boosts using the same format as that
* used in DisMaxQParserPlugin. These fields must also be added
* using {@link #addMoreLikeThisField(String)}.
*
* @param qf the query fields
* @return this
*/
public SolrQuery setMoreLikeThisQF(String qf) {
this.set(MoreLikeThisParams.QF, qf);
return this;
}
/**
* Gets the query fields and their boosts.
*/
public String getMoreLikeThisQF() {
return this.get(MoreLikeThisParams.QF);
}
/**
* Sets the number of similar documents to return for each result.
*
* @param count the number of similar documents to return for each result
* @return this
*/
public SolrQuery setMoreLikeThisCount(int count) {
this.set(MoreLikeThisParams.DOC_COUNT, count);
return this;
}
/**
* Gets the number of similar documents to return for each result.
*/
public int getMoreLikeThisCount() {
return this.getInt(MoreLikeThisParams.DOC_COUNT, MoreLikeThisParams.DEFAULT_DOC_COUNT);
}
/**
* Enable/Disable MoreLikeThis. After enabling MoreLikeThis, the fields
* used for computing similarity must be specified calling
* {@link #addMoreLikeThisField(String)}.
*
* @param b flag to indicate if MoreLikeThis should be enabled. if b==false
* removes all mlt.* parameters
* @return this
*/
public SolrQuery setMoreLikeThis(boolean b) {
if(b) {
this.set(MoreLikeThisParams.MLT, true);
} else {
this.remove(MoreLikeThisParams.MLT);
this.remove(MoreLikeThisParams.SIMILARITY_FIELDS);
this.remove(MoreLikeThisParams.MIN_TERM_FREQ);
this.remove(MoreLikeThisParams.MIN_DOC_FREQ);
this.remove(MoreLikeThisParams.MIN_WORD_LEN);
this.remove(MoreLikeThisParams.MAX_WORD_LEN);
this.remove(MoreLikeThisParams.MAX_QUERY_TERMS);
this.remove(MoreLikeThisParams.MAX_NUM_TOKENS_PARSED);
this.remove(MoreLikeThisParams.BOOST);
this.remove(MoreLikeThisParams.QF);
this.remove(MoreLikeThisParams.DOC_COUNT);
}
return this;
}
/**
* @return true if MoreLikeThis is enabled, false otherwise
*/
public boolean getMoreLikeThis() {
return this.getBool(MoreLikeThisParams.MLT, false);
}
public SolrQuery setFields(String ... fields) {
if( fields == null || fields.length == 0 ) {
this.remove( CommonParams.FL );

View File

@ -51,6 +51,7 @@ public class QueryResponse extends SolrResponseBase
private Map<String,NamedList<Object>> _suggestInfo = null;
private NamedList<Object> _statsInfo = null;
private NamedList<NamedList<Number>> _termsInfo = null;
private NamedList<SolrDocumentList> _moreLikeThisInfo = null;
private String _cursorMarkNext = null;
// Grouping response
@ -168,6 +169,9 @@ public class QueryResponse extends SolrResponseBase
_termsInfo = (NamedList<NamedList<Number>>) res.getVal( i );
extractTermsInfo( _termsInfo );
}
else if ( "moreLikeThis".equals( n ) ) {
_moreLikeThisInfo = (NamedList<SolrDocumentList>) res.getVal( i );
}
else if ( CursorMarkParams.CURSOR_MARK_NEXT.equals( n ) ) {
_cursorMarkNext = (String) res.getVal( i );
}
@ -548,6 +552,10 @@ public class QueryResponse extends SolrResponseBase
return _termsResponse;
}
public NamedList<SolrDocumentList> getMoreLikeThis() {
return _moreLikeThisInfo;
}
/**
* See also: {@link #getLimitingFacets()}
*/

View File

@ -51,6 +51,9 @@ public interface MoreLikeThisParams
// Do you want to include the original document in the results or not
public final static String INTERESTING_TERMS = PREFIX + "interestingTerms"; // false,details,(list or true)
// the default doc count
public final static int DEFAULT_DOC_COUNT = 5;
public enum TermStyle {
NONE,
LIST,

View File

@ -1996,37 +1996,38 @@ abstract public class SolrExampleTests extends SolrExampleTestsBase
// test with mlt.fl having comma separated values
SolrQuery q = new SolrQuery("*:*");
q.setRows(20);
q.setParam("mlt", "true");
q.setParam("mlt.mintf", "0");
q.setParam("mlt.count", "2");
q.setParam("mlt.fl", "x_s,y_s,z_s");
q.setMoreLikeThisFields("x_s", "y_s", "z_s");
q.setMoreLikeThisMinTermFreq(0);
q.setMoreLikeThisCount(2);
QueryResponse response = client.query(q);
assertEquals(20, response.getResults().getNumFound());
NamedList<Object> moreLikeThis = (NamedList<Object>) response.getResponse().get("moreLikeThis");
NamedList<SolrDocumentList> moreLikeThis = response.getMoreLikeThis();
assertNotNull("MoreLikeThis response should not have been null", moreLikeThis);
for (int i=0; i<20; i++) {
String id = "testMoreLikeThis" + i;
SolrDocumentList mltResp = (SolrDocumentList) moreLikeThis.get(id);
SolrDocumentList mltResp = moreLikeThis.get(id);
assertNotNull("MoreLikeThis response for id=" + id + " should not be null", mltResp);
assertTrue("MoreLikeThis response for id=" + id + " had numFound=0", mltResp.getNumFound() > 0);
assertTrue("MoreLikeThis response for id=" + id + " had not returned exactly 2 documents", mltResp.size() == 2);
}
// now test with multiple mlt.fl parameters
q = new SolrQuery("*:*");
q.setRows(20);
q.setParam("mlt", "true");
q.setParam("mlt.mintf", "0");
q.setParam("mlt.count", "2");
q.setParam("mlt.fl", "x_s", "y_s", "z_s");
q.setMoreLikeThisMinTermFreq(0);
q.setMoreLikeThisCount(2);
response = client.query(q);
assertEquals(20, response.getResults().getNumFound());
moreLikeThis = (NamedList<Object>) response.getResponse().get("moreLikeThis");
moreLikeThis = response.getMoreLikeThis();
assertNotNull("MoreLikeThis response should not have been null", moreLikeThis);
for (int i=0; i<20; i++) {
String id = "testMoreLikeThis" + i;
SolrDocumentList mltResp = (SolrDocumentList) moreLikeThis.get(id);
SolrDocumentList mltResp = moreLikeThis.get(id);
assertNotNull("MoreLikeThis response for id=" + id + " should not be null", mltResp);
assertTrue("MoreLikeThis response for id=" + id + " had numFound=0", mltResp.getNumFound() > 0);
assertTrue("MoreLikeThis response for id=" + id + " had not returned exactly 2 documents", mltResp.size() == 2);
}
}

View File

@ -431,4 +431,29 @@ public class SolrQueryTest extends LuceneTestCase {
assertNull(solrQuery.getParams("f.field3.facet.interval.set"));
}
public void testMoreLikeThis() {
SolrQuery solrQuery = new SolrQuery();
solrQuery.addMoreLikeThisField("mlt1");
assertTrue(solrQuery.getMoreLikeThis());
solrQuery.addMoreLikeThisField("mlt2");
solrQuery.addMoreLikeThisField("mlt3");
solrQuery.addMoreLikeThisField("mlt4");
assertEquals(4, solrQuery.getMoreLikeThisFields().length);
solrQuery.setMoreLikeThisFields(null);
assertTrue(null == solrQuery.getMoreLikeThisFields());
assertFalse(solrQuery.getMoreLikeThis());
assertEquals(true, solrQuery.setMoreLikeThisBoost(true).getMoreLikeThisBoost());
assertEquals("qf", solrQuery.setMoreLikeThisQF("qf").getMoreLikeThisQF());
assertEquals(10, solrQuery.setMoreLikeThisMaxTokensParsed(10).getMoreLikeThisMaxTokensParsed());
assertEquals(11, solrQuery.setMoreLikeThisMinTermFreq(11).getMoreLikeThisMinTermFreq());
assertEquals(12, solrQuery.setMoreLikeThisMinDocFreq(12).getMoreLikeThisMinDocFreq());
assertEquals(13, solrQuery.setMoreLikeThisMaxWordLen(13).getMoreLikeThisMaxWordLen());
assertEquals(14, solrQuery.setMoreLikeThisMinWordLen(14).getMoreLikeThisMinWordLen());
assertEquals(15, solrQuery.setMoreLikeThisMaxQueryTerms(15).getMoreLikeThisMaxQueryTerms());
assertEquals(16, solrQuery.setMoreLikeThisCount(16).getMoreLikeThisCount());
}
}