mirror of https://github.com/apache/lucene.git
merged with trunk
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/docvalues@1134287 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
dd7fec9795
|
@ -491,6 +491,11 @@ Bug fixes
|
|||
* LUCENE-3102: CachingCollector.replay was failing to call setScorer
|
||||
per-segment (Martijn van Groningen via Mike McCandless)
|
||||
|
||||
* LUCENE-3183: Fix rare corner case where seeking to empty term
|
||||
(field="", term="") with terms index interval 1 could hit
|
||||
ArrayIndexOutOfBoundsException (selckin, Robert Muir, Mike
|
||||
McCandless)
|
||||
|
||||
New Features
|
||||
|
||||
* LUCENE-3140: Added experimental FST implementation to Lucene.
|
||||
|
|
|
@ -1859,7 +1859,7 @@ document.write("Last Published: " + document.lastModified);
|
|||
(if compression is enabled, the algorithm used is ZLIB),
|
||||
only available for indexes until Lucene version 2.9.x</li>
|
||||
|
||||
<li>4th to 6th bits (mask: 0x7<<3) define the type of a
|
||||
<li>4th to 6th bit (mask: 0x7<<3) define the type of a
|
||||
numeric field: <ul>
|
||||
|
||||
<li>all bits in mask are cleared if no numeric field at all</li>
|
||||
|
@ -1868,7 +1868,7 @@ document.write("Last Published: " + document.lastModified);
|
|||
|
||||
<li>2<<3: Value is Long</li>
|
||||
|
||||
<li>3<<3: Value is Int as Float (as of Integer.intBitsToFloat)</li>
|
||||
<li>3<<3: Value is Int as Float (as of Float.intBitsToFloat)</li>
|
||||
|
||||
<li>4<<3: Value is Long as Double (as of Double.longBitsToDouble)</li>
|
||||
|
||||
|
|
Binary file not shown.
|
@ -153,8 +153,12 @@ public final class SegmentTermEnum implements Cloneable {
|
|||
return true;
|
||||
}
|
||||
|
||||
/** Optimized scan, without allocating new terms.
|
||||
* Return number of invocations to next(). */
|
||||
/* Optimized scan, without allocating new terms.
|
||||
* Return number of invocations to next().
|
||||
*
|
||||
* NOTE: LUCENE-3183: if you pass Term("", "") here then this
|
||||
* will incorrectly return before positioning the enum,
|
||||
* and position will be -1; caller must detect this. */
|
||||
final int scanTo(Term term) throws IOException {
|
||||
scanBuffer.set(term);
|
||||
int count = 0;
|
||||
|
|
|
@ -57,6 +57,7 @@ public final class TermInfosReader {
|
|||
final long termOrd;
|
||||
public TermInfoAndOrd(TermInfo ti, long termOrd) {
|
||||
super(ti);
|
||||
assert termOrd >= 0;
|
||||
this.termOrd = termOrd;
|
||||
}
|
||||
}
|
||||
|
@ -306,7 +307,13 @@ public final class TermInfosReader {
|
|||
ti = enumerator.termInfo;
|
||||
if (tiOrd == null) {
|
||||
if (useCache) {
|
||||
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
|
||||
// LUCENE-3183: it's possible, if term is Term("",
|
||||
// ""), for the STE to be incorrectly un-positioned
|
||||
// after scan-to; work around this by not caching in
|
||||
// this case:
|
||||
if (enumerator.position >= 0) {
|
||||
termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, enumerator.position));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert sameTermInfo(ti, tiOrd, enumerator);
|
||||
|
|
|
@ -1305,12 +1305,12 @@
|
|||
<li>third bit is one for fields with compression option enabled
|
||||
(if compression is enabled, the algorithm used is ZLIB),
|
||||
only available for indexes until Lucene version 2.9.x</li>
|
||||
<li>4th to 6th bits (mask: 0x7<<3) define the type of a
|
||||
<li>4th to 6th bit (mask: 0x7<<3) define the type of a
|
||||
numeric field: <ul>
|
||||
<li>all bits in mask are cleared if no numeric field at all</li>
|
||||
<li>1<<3: Value is Int</li>
|
||||
<li>2<<3: Value is Long</li>
|
||||
<li>3<<3: Value is Int as Float (as of Integer.intBitsToFloat)</li>
|
||||
<li>3<<3: Value is Int as Float (as of Float.intBitsToFloat)</li>
|
||||
<li>4<<3: Value is Long as Double (as of Double.longBitsToDouble)</li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
|
|
|
@ -73,6 +73,7 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
import org.apache.lucene.util.ThreadInterruptedException;
|
||||
import org.apache.lucene.util.UnicodeUtil;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.index.codecs.preflexrw.PreFlexRWCodec;
|
||||
|
||||
public class TestIndexWriter extends LuceneTestCase {
|
||||
|
||||
|
@ -1763,4 +1764,18 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-3183
|
||||
public void testEmptyFieldNameTIIOne() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
|
||||
iwc.setTermIndexInterval(1);
|
||||
iwc.setReaderTermsIndexDivisor(1);
|
||||
IndexWriter writer = new IndexWriter(dir, iwc);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,8 +24,12 @@ $Id$
|
|||
(No Changes)
|
||||
|
||||
================== 3.3.0-dev ==============
|
||||
|
||||
Bug Fixes
|
||||
----------------------
|
||||
|
||||
(No Changes)
|
||||
* SOLR-2579: UIMAUpdateRequestProcessor ignore error fails if text.length() < 100.
|
||||
(Elmer Garduno via koji)
|
||||
|
||||
================== 3.2.0 ==================
|
||||
|
||||
|
|
|
@ -89,15 +89,16 @@ public class UIMAUpdateRequestProcessor extends UpdateRequestProcessor {
|
|||
new StringBuilder(". ").append(logField).append("=")
|
||||
.append((String)cmd.getSolrInputDocument().getField(logField).getValue())
|
||||
.append(", ").toString();
|
||||
if (solrUIMAConfiguration.isIgnoreErrors())
|
||||
int len = Math.min(text.length(), 100);
|
||||
if (solrUIMAConfiguration.isIgnoreErrors()) {
|
||||
log.warn(new StringBuilder("skip the text processing due to ")
|
||||
.append(e.getLocalizedMessage()).append(optionalFieldInfo)
|
||||
.append(" text=\"").append(text.substring(0, 100)).append("...\"").toString());
|
||||
else{
|
||||
.append(" text=\"").append(text.substring(0, len)).append("...\"").toString());
|
||||
} else {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR,
|
||||
new StringBuilder("processing error: ")
|
||||
.append(e.getLocalizedMessage()).append(optionalFieldInfo)
|
||||
.append(" text=\"").append(text.substring(0, 100)).append("...\"").toString(), e);
|
||||
.append(" text=\"").append(text.substring(0, len)).append("...\"").toString(), e);
|
||||
}
|
||||
}
|
||||
super.processAdd(cmd);
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.util.HashMap;
|
|||
import java.util.Map;
|
||||
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.params.MultiMapSolrParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.common.params.UpdateParams;
|
||||
|
@ -158,6 +159,30 @@ public class UIMAUpdateRequestProcessorTest extends SolrTestCaseJ4 {
|
|||
+ " Last Lucene European Conference has been held in Prague."));
|
||||
assertU(commit());
|
||||
assertQ(req("*:*"), "//*[@numFound='1']");
|
||||
|
||||
try{
|
||||
addDoc("uima-not-ignoreErrors", adoc(
|
||||
"id",
|
||||
"2312312321312",
|
||||
"text",
|
||||
"SpellCheckComponent got improvement related to recent Lucene changes."));
|
||||
fail("exception shouldn't be ignored");
|
||||
}
|
||||
catch(StringIndexOutOfBoundsException e){ // SOLR-2579
|
||||
fail("exception shouldn't be raised");
|
||||
}
|
||||
catch(SolrException expected){}
|
||||
|
||||
try{
|
||||
addDoc("uima-ignoreErrors", adoc(
|
||||
"id",
|
||||
"2312312321312",
|
||||
"text",
|
||||
"SpellCheckComponent got improvement related to recent Lucene changes."));
|
||||
}
|
||||
catch(StringIndexOutOfBoundsException e){ // SOLR-2579
|
||||
fail("exception shouldn't be raised");
|
||||
}
|
||||
}
|
||||
|
||||
private void addDoc(String chain, String doc) throws Exception {
|
||||
|
|
|
@ -465,7 +465,7 @@ public class QueryParsing {
|
|||
BooleanQuery q = (BooleanQuery) query;
|
||||
boolean needParens = false;
|
||||
|
||||
if (q.getBoost() != 1.0 || q.getMinimumNumberShouldMatch() != 0) {
|
||||
if (q.getBoost() != 1.0 || q.getMinimumNumberShouldMatch() != 0 || q.isCoordDisabled()) {
|
||||
needParens = true;
|
||||
}
|
||||
if (needParens) {
|
||||
|
@ -511,6 +511,9 @@ public class QueryParsing {
|
|||
out.append('~');
|
||||
out.append(Integer.toString(q.getMinimumNumberShouldMatch()));
|
||||
}
|
||||
if (q.isCoordDisabled()) {
|
||||
out.append("/no_coord");
|
||||
}
|
||||
|
||||
} else if (query instanceof PrefixQuery) {
|
||||
PrefixQuery q = (PrefixQuery) query;
|
||||
|
|
Loading…
Reference in New Issue