mirror of https://github.com/apache/lucene.git
LUCENE-3796: throw an exception if you set an index-time boost on a field that omits norms
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1245947 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b430734e31
commit
28c3923ceb
|
@ -770,6 +770,13 @@ Changes in backwards compatibility policy
|
|||
method getCommitUserData (use getIndexCommit().getUserData()
|
||||
instead). (Ryan McKinley, Robert Muir, Mike McCandless)
|
||||
|
||||
Changes in Runtime Behavior
|
||||
|
||||
* LUCENE-3796: Throw an exception if you try to set an index-time
|
||||
boost on a field that omits norms. Because the index-time boost
|
||||
is multiplied into the norm, previously your boost would be
|
||||
silently discarded. (Robert Muir)
|
||||
|
||||
Security fixes
|
||||
|
||||
* LUCENE-3588: Try harder to prevent SIGSEGV on cloned MMapIndexInputs:
|
||||
|
|
|
@ -67,11 +67,17 @@ final class DocInverterPerField extends DocFieldConsumerPerField {
|
|||
for(int i=0;i<count;i++) {
|
||||
|
||||
final IndexableField field = fields[i];
|
||||
final IndexableFieldType fieldType = field.fieldType();
|
||||
|
||||
// TODO FI: this should be "genericized" to querying
|
||||
// consumer if it wants to see this particular field
|
||||
// tokenized.
|
||||
if (field.fieldType().indexed() && doInvert) {
|
||||
if (fieldType.indexed() && doInvert) {
|
||||
|
||||
// if the field omits norms, the boost cannot be indexed.
|
||||
if (fieldType.omitNorms() && field.boost() != 1.0f) {
|
||||
throw new UnsupportedOperationException("You cannot set an index-time boost: norms are omitted for field '" + field.name() + "'");
|
||||
}
|
||||
|
||||
if (i > 0) {
|
||||
fieldState.position += docState.analyzer == null ? 0 : docState.analyzer.getPositionIncrementGap(fieldInfo.name);
|
||||
|
|
|
@ -20,10 +20,14 @@ package org.apache.lucene.document;
|
|||
import java.io.StringReader;
|
||||
|
||||
import org.apache.lucene.analysis.EmptyTokenizer;
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||
import org.apache.lucene.index.Fields;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
|
@ -357,4 +361,33 @@ public class TestDocument extends LuceneTestCase {
|
|||
r.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testBoost() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
|
||||
iwc.setMergePolicy(newLogMergePolicy());
|
||||
IndexWriter iw = new IndexWriter(dir, iwc);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field1", "sometext", StringField.TYPE_STORED));
|
||||
doc.add(new TextField("field2", "sometext"));
|
||||
doc.add(new StringField("foo", "bar"));
|
||||
iw.addDocument(doc); // add an 'ok' document
|
||||
try {
|
||||
doc = new Document();
|
||||
// try to boost with norms omitted
|
||||
StringField field = new StringField("foo", "baz");
|
||||
field.setBoost(5.0f);
|
||||
doc.add(field);
|
||||
iw.addDocument(doc);
|
||||
fail("didn't get any exception, boost silently discarded");
|
||||
} catch (UnsupportedOperationException expected) {
|
||||
// expected
|
||||
}
|
||||
DirectoryReader ir = DirectoryReader.open(iw, false);
|
||||
assertEquals(1, ir.numDocs());
|
||||
assertEquals("sometext", ir.document(0).get("field1"));
|
||||
ir.close();
|
||||
iw.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -175,7 +175,9 @@ public class TestSort extends LuceneTestCase {
|
|||
if (data[i][11] != null) doc.add (new StringField ("parser", data[i][11]));
|
||||
|
||||
for(IndexableField f : doc.getFields()) {
|
||||
((Field) f).setBoost(2.0f);
|
||||
if (!f.fieldType().omitNorms()) {
|
||||
((Field) f).setBoost(2.0f);
|
||||
}
|
||||
}
|
||||
|
||||
writer.addDocument (doc);
|
||||
|
@ -221,7 +223,9 @@ public class TestSort extends LuceneTestCase {
|
|||
}
|
||||
doc.add (new Field ("tracer2", num2, onlyStored));
|
||||
for(IndexableField f2 : doc.getFields()) {
|
||||
((Field) f2).setBoost(2.0f);
|
||||
if (!f2.fieldType().omitNorms()) {
|
||||
((Field) f2).setBoost(2.0f);
|
||||
}
|
||||
}
|
||||
|
||||
String numFixed = getRandomCharString(fixedLen, 48, 52);
|
||||
|
@ -239,7 +243,9 @@ public class TestSort extends LuceneTestCase {
|
|||
doc.add (new Field ("tracer2_fixed", num2Fixed, onlyStored));
|
||||
|
||||
for(IndexableField f2 : doc.getFields()) {
|
||||
((Field) f2).setBoost(2.0f);
|
||||
if (!f2.fieldType().omitNorms()) {
|
||||
((Field) f2).setBoost(2.0f);
|
||||
}
|
||||
}
|
||||
|
||||
writer.addDocument (doc);
|
||||
|
|
|
@ -235,6 +235,7 @@ public class DocumentBuilder {
|
|||
SchemaField sfield = schema.getFieldOrNull(name);
|
||||
boolean used = false;
|
||||
float boost = field.getBoost();
|
||||
boolean omitNorms = sfield != null && sfield.omitNorms();
|
||||
|
||||
// Make sure it has the correct number
|
||||
if( sfield!=null && !sfield.multiValued() && field.getValueCount() > 1 ) {
|
||||
|
@ -243,6 +244,11 @@ public class DocumentBuilder {
|
|||
sfield.getName() + ": " +field.getValue() );
|
||||
}
|
||||
|
||||
if (omitNorms && boost != 1.0F) {
|
||||
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,
|
||||
"ERROR: "+getID(doc, schema)+"cannot set an index-time boost, norms are omitted for field " +
|
||||
sfield.getName() + ": " +field.getValue() );
|
||||
}
|
||||
|
||||
// load each field value
|
||||
boolean hasField = false;
|
||||
|
@ -254,7 +260,7 @@ public class DocumentBuilder {
|
|||
hasField = true;
|
||||
if (sfield != null) {
|
||||
used = true;
|
||||
addField(out, sfield, v, docBoost*boost);
|
||||
addField(out, sfield, v, omitNorms ? 1F : docBoost*boost);
|
||||
}
|
||||
|
||||
// Check if we should copy this field to any other fields.
|
||||
|
@ -277,7 +283,7 @@ public class DocumentBuilder {
|
|||
val = cf.getLimitedValue((String)val);
|
||||
}
|
||||
|
||||
IndexableField [] fields = destinationField.createFields(val, docBoost*boost);
|
||||
IndexableField [] fields = destinationField.createFields(val, omitNorms ? 1F : docBoost*boost);
|
||||
if (fields != null) { // null fields are not added
|
||||
for (IndexableField f : fields) {
|
||||
if(f != null) out.add(f);
|
||||
|
|
|
@ -57,7 +57,7 @@ public class XsltUpdateRequestHandlerTest extends SolrTestCaseJ4 {
|
|||
String xml =
|
||||
"<random>" +
|
||||
" <document>" +
|
||||
" <node name=\"id\" enhance=\"2.2\" value=\"12345\"/>" +
|
||||
" <node name=\"id\" value=\"12345\"/>" +
|
||||
" <node name=\"name\" value=\"kitten\"/>" +
|
||||
" <node name=\"text\" enhance=\"3\" value=\"some other day\"/>" +
|
||||
" <node name=\"title\" enhance=\"4\" value=\"A story\"/>" +
|
||||
|
|
Loading…
Reference in New Issue