diff --git a/CHANGES.txt b/CHANGES.txt
index 536dd21f102..c01a61baf1f 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -50,6 +50,13 @@ Changes in runtime behavior
...
+ 3. LUCENE-1604: IndexReader.norms(String field) is now allowed to
+ return null if the field has no norms, as long as you've
+ previously called IndexReader.setDisableFakeNorms(true). This
+ setting now defaults to false (to preserve the fake norms back
+ compatible behavior) but in 3.0 will be hardwired to true. (Shon
+ Vella via Mike McCandless).
+
API Changes
1. LUCENE-1419: Add expert API to set custom indexing chain. This API is
diff --git a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java
index 9e3786dc8b9..69fcb460bcd 100644
--- a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java
+++ b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java
@@ -72,13 +72,15 @@ public class TestEmptyIndex extends TestCase {
private void testNorms(IndexReader r) throws IOException {
byte[] norms;
norms = r.norms("foo");
- assertNotNull(norms);
- assertEquals(0, norms.length);
- norms = new byte[10];
- Arrays.fill(norms, (byte)10);
- r.norms("foo", norms, 10);
- for (byte b : norms) {
- assertEquals((byte)10, b);
+ if (!r.getDisableFakeNorms()) {
+ assertNotNull(norms);
+ assertEquals(0, norms.length);
+ norms = new byte[10];
+ Arrays.fill(norms, (byte)10);
+ r.norms("foo", norms, 10);
+ for (byte b : norms) {
+ assertEquals((byte)10, b);
+ }
}
}
diff --git a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
index 8c332b5782c..2cca53b5524 100644
--- a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
+++ b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
@@ -247,39 +247,41 @@ public class TestIndicesEquals extends TestCase {
byte[] aprioriNorms = aprioriReader.norms((String) field);
byte[] testNorms = testReader.norms((String) field);
- assertEquals(aprioriNorms.length, testNorms.length);
+ if (!aprioriReader.getDisableFakeNorms()) {
+ assertEquals(aprioriNorms.length, testNorms.length);
- for (int i = 0; i < aprioriNorms.length; i++) {
- assertEquals("norms does not equals for field " + field + " in document " + i, aprioriNorms[i], testNorms[i]);
- }
+ for (int i = 0; i < aprioriNorms.length; i++) {
+ assertEquals("norms does not equals for field " + field + " in document " + i, aprioriNorms[i], testNorms[i]);
+ }
- // test norms as used by multireader
+ // test norms as used by multireader
- aprioriNorms = new byte[aprioriReader.maxDoc()];
- aprioriReader.norms((String) field, aprioriNorms, 0);
+ aprioriNorms = new byte[aprioriReader.maxDoc()];
+ aprioriReader.norms((String) field, aprioriNorms, 0);
- testNorms = new byte[testReader.maxDoc()];
- testReader.norms((String) field, testNorms, 0);
+ testNorms = new byte[testReader.maxDoc()];
+ testReader.norms((String) field, testNorms, 0);
- assertEquals(aprioriNorms.length, testNorms.length);
+ assertEquals(aprioriNorms.length, testNorms.length);
- for (int i = 0; i < aprioriNorms.length; i++) {
- assertEquals("norms does not equals for field " + field + " in document " + i, aprioriNorms[i], testNorms[i]);
- }
+ for (int i = 0; i < aprioriNorms.length; i++) {
+ assertEquals("norms does not equals for field " + field + " in document " + i, aprioriNorms[i], testNorms[i]);
+ }
- // test norms as used by multireader
+ // test norms as used by multireader
- aprioriNorms = new byte[aprioriReader.maxDoc() + 10];
- aprioriReader.norms((String) field, aprioriNorms, 10);
+ aprioriNorms = new byte[aprioriReader.maxDoc() + 10];
+ aprioriReader.norms((String) field, aprioriNorms, 10);
- testNorms = new byte[testReader.maxDoc() + 10];
- testReader.norms((String) field, testNorms, 10);
+ testNorms = new byte[testReader.maxDoc() + 10];
+ testReader.norms((String) field, testNorms, 10);
- assertEquals(aprioriNorms.length, testNorms.length);
-
- for (int i = 0; i < aprioriNorms.length; i++) {
- assertEquals("norms does not equals for field " + field + " in document " + i, aprioriNorms[i], testNorms[i]);
+ assertEquals(aprioriNorms.length, testNorms.length);
+
+ for (int i = 0; i < aprioriNorms.length; i++) {
+ assertEquals("norms does not equals for field " + field + " in document " + i, aprioriNorms[i], testNorms[i]);
+ }
}
}
diff --git a/contrib/miscellaneous/src/test/org/apache/lucene/index/TestFieldNormModifier.java b/contrib/miscellaneous/src/test/org/apache/lucene/index/TestFieldNormModifier.java
index 7bc5da71e62..a4f248c5da3 100644
--- a/contrib/miscellaneous/src/test/org/apache/lucene/index/TestFieldNormModifier.java
+++ b/contrib/miscellaneous/src/test/org/apache/lucene/index/TestFieldNormModifier.java
@@ -92,8 +92,12 @@ public class TestFieldNormModifier extends TestCase {
// sanity check, norms should all be 1
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
- for (int i = 0; i< norms.length; i++) {
- assertEquals(""+i, DEFAULT_NORM, norms[i]);
+ if (!r.getDisableFakeNorms()) {
+ for (int i = 0; i< norms.length; i++) {
+ assertEquals(""+i, DEFAULT_NORM, norms[i]);
+ }
+ } else {
+ assertNull(norms);
}
r.close();
@@ -110,8 +114,12 @@ public class TestFieldNormModifier extends TestCase {
norms = r.norms("nonorm");
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
- for (int i = 0; i< norms.length; i++) {
- assertEquals(""+i, DEFAULT_NORM, norms[i]);
+ if (!r.getDisableFakeNorms()) {
+ for (int i = 0; i< norms.length; i++) {
+ assertEquals(""+i, DEFAULT_NORM, norms[i]);
+ }
+ } else {
+ assertNull(norms);
}
r.close();
diff --git a/contrib/miscellaneous/src/test/org/apache/lucene/misc/TestLengthNormModifier.java b/contrib/miscellaneous/src/test/org/apache/lucene/misc/TestLengthNormModifier.java
index 95610db7ae8..c2f74f11e22 100644
--- a/contrib/miscellaneous/src/test/org/apache/lucene/misc/TestLengthNormModifier.java
+++ b/contrib/miscellaneous/src/test/org/apache/lucene/misc/TestLengthNormModifier.java
@@ -98,9 +98,13 @@ public class TestLengthNormModifier extends TestCase {
// sanity check, norms should all be 1
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
- for (int i = 0; i< norms.length; i++) {
+ if (!r.getDisableFakeNorms()) {
+ for (int i = 0; i< norms.length; i++) {
assertEquals(""+i, DEFAULT_NORM, norms[i]);
- }
+ }
+ } else {
+ assertNull(norms);
+ }
r.close();
@@ -116,9 +120,13 @@ public class TestLengthNormModifier extends TestCase {
norms = r.norms("nonorm");
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
- for (int i = 0; i< norms.length; i++) {
+ if (!r.getDisableFakeNorms()) {
+ for (int i = 0; i< norms.length; i++) {
assertEquals(""+i, DEFAULT_NORM, norms[i]);
- }
+ }
+ } else {
+ assertNull(norms);
+ }
r.close();
diff --git a/src/java/org/apache/lucene/index/DirectoryIndexReader.java b/src/java/org/apache/lucene/index/DirectoryIndexReader.java
index 03792e376fd..52237f2f620 100644
--- a/src/java/org/apache/lucene/index/DirectoryIndexReader.java
+++ b/src/java/org/apache/lucene/index/DirectoryIndexReader.java
@@ -227,7 +227,9 @@ abstract class DirectoryIndexReader extends IndexReader implements Cloneable {
// TODO: right now we *always* make a new reader; in
// the future we could have write make some effort to
// detect that no changes have occurred
- return writer.getReader();
+ IndexReader reader = writer.getReader();
+ reader.setDisableFakeNorms(getDisableFakeNorms());
+ return reader;
}
if (commit == null) {
@@ -298,6 +300,7 @@ abstract class DirectoryIndexReader extends IndexReader implements Cloneable {
} else {
reader = (DirectoryIndexReader) finder.doBody(commit.getSegmentsFileName());
}
+ reader.setDisableFakeNorms(getDisableFakeNorms());
} finally {
if (myCloseDirectory) {
assert directory instanceof FSDirectory;
diff --git a/src/java/org/apache/lucene/index/IndexReader.java b/src/java/org/apache/lucene/index/IndexReader.java
index 9b3bb7c94fb..cafad750b04 100644
--- a/src/java/org/apache/lucene/index/IndexReader.java
+++ b/src/java/org/apache/lucene/index/IndexReader.java
@@ -116,6 +116,8 @@ public abstract class IndexReader implements Cloneable {
private volatile int refCount;
+ private boolean disableFakeNorms = false;
+
/** Expert: returns the current refCount for this reader */
public synchronized int getRefCount() {
return refCount;
@@ -792,6 +794,9 @@ public abstract class IndexReader implements Cloneable {
* int) length normalization}. Thus, to preserve the length normalization
* values when resetting this, one should base the new value upon the old.
*
+ * NOTE: If this field does not store norms, then
+ * this method call will silently do nothing.
+ *
* @see #norms(String)
* @see Similarity#decodeNorm(byte)
* @throws StaleReaderException if the index has changed
@@ -1275,4 +1280,26 @@ public abstract class IndexReader implements Cloneable {
public long getUniqueTermCount() throws IOException {
throw new UnsupportedOperationException("this reader does not implement getUniqueTermCount()");
}
+
+ /** Expert: Return the state of the flag that disables fakes norms in favor of representing the absence of field norms with null.
+ * @return true if fake norms are disabled
+ * @deprecated This currently defaults to false (to remain
+ * back-compatible), but in 3.0 it will be hardwired to
+ * true, meaning the norms() methods will return null for
+ * fields that had disabled norms.
+ */
+ public boolean getDisableFakeNorms() {
+ return disableFakeNorms;
+ }
+
+ /** Expert: Set the state of the flag that disables fakes norms in favor of representing the absence of field norms with null.
+ * @param disableFakeNorms true to disable fake norms, false to preserve the legacy behavior
+ * @deprecated This currently defaults to false (to remain
+ * back-compatible), but in 3.0 it will be hardwired to
+ * true, meaning the norms() methods will return null for
+ * fields that had disabled norms.
+ */
+ public void setDisableFakeNorms(boolean disableFakeNorms) {
+ this.disableFakeNorms = disableFakeNorms;
+ }
}
diff --git a/src/java/org/apache/lucene/index/MultiReader.java b/src/java/org/apache/lucene/index/MultiReader.java
index 2849a92ea1d..13aac0bb10b 100644
--- a/src/java/org/apache/lucene/index/MultiReader.java
+++ b/src/java/org/apache/lucene/index/MultiReader.java
@@ -18,6 +18,7 @@ package org.apache.lucene.index;
*/
import java.io.IOException;
+import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
@@ -27,6 +28,7 @@ import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.MultiSegmentReader.MultiTermDocs;
import org.apache.lucene.index.MultiSegmentReader.MultiTermEnum;
import org.apache.lucene.index.MultiSegmentReader.MultiTermPositions;
+import org.apache.lucene.search.DefaultSimilarity;
/** An IndexReader which reads multiple indexes, appending their content.
*
@@ -179,6 +181,7 @@ public class MultiReader extends IndexReader implements Cloneable {
}
MultiReader mr = new MultiReader(newSubReaders);
mr.decrefOnClose = newDecrefOnClose;
+ mr.setDisableFakeNorms(getDisableFakeNorms());
return mr;
} else {
return this;
@@ -288,7 +291,7 @@ public class MultiReader extends IndexReader implements Cloneable {
if (bytes != null)
return bytes; // cache hit
if (!hasNorms(field))
- return fakeNorms();
+ return getDisableFakeNorms() ? null : fakeNorms();
bytes = new byte[maxDoc()];
for (int i = 0; i < subReaders.length; i++)
@@ -301,12 +304,18 @@ public class MultiReader extends IndexReader implements Cloneable {
throws IOException {
ensureOpen();
byte[] bytes = (byte[])normsCache.get(field);
- if (bytes==null && !hasNorms(field)) bytes=fakeNorms();
- if (bytes != null) // cache hit
- System.arraycopy(bytes, 0, result, offset, maxDoc());
-
for (int i = 0; i < subReaders.length; i++) // read from segments
subReaders[i].norms(field, result, offset + starts[i]);
+
+ if (bytes==null && !hasNorms(field)) {
+ Arrays.fill(result, offset, result.length, DefaultSimilarity.encodeNorm(1.0f));
+ } else if (bytes != null) { // cache hit
+ System.arraycopy(bytes, 0, result, offset, maxDoc());
+ } else {
+ for (int i = 0; i < subReaders.length; i++) { // read from segments
+ subReaders[i].norms(field, result, offset + starts[i]);
+ }
+ }
}
protected void doSetNorm(int n, String field, byte value)
diff --git a/src/java/org/apache/lucene/index/MultiSegmentReader.java b/src/java/org/apache/lucene/index/MultiSegmentReader.java
index 0d61359bad8..c14555b4080 100644
--- a/src/java/org/apache/lucene/index/MultiSegmentReader.java
+++ b/src/java/org/apache/lucene/index/MultiSegmentReader.java
@@ -18,6 +18,7 @@ package org.apache.lucene.index;
*/
import java.io.IOException;
+import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
@@ -27,6 +28,7 @@ import java.util.Set;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.store.Directory;
/**
@@ -262,15 +264,18 @@ class MultiSegmentReader extends DirectoryIndexReader implements Cloneable {
}
protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos, boolean doClone, boolean openReadOnly) throws CorruptIndexException, IOException {
- if (infos.size() == 1) {
+ DirectoryIndexReader reader;
+ if (infos.size() == 1) {
// The index has only one segment now, so we can't refresh the MultiSegmentReader.
// Return a new [ReadOnly]SegmentReader instead
- return SegmentReader.get(openReadOnly, infos, infos.info(0), false);
+ reader = SegmentReader.get(openReadOnly, infos, infos.info(0), false);
} else if (openReadOnly) {
- return new ReadOnlyMultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache, doClone);
+ reader = new ReadOnlyMultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache, doClone);
} else {
- return new MultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache, false, doClone);
- }
+ reader = new MultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache, false, doClone);
+ }
+ reader.setDisableFakeNorms(getDisableFakeNorms());
+ return reader;
}
public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
@@ -397,7 +402,7 @@ class MultiSegmentReader extends DirectoryIndexReader implements Cloneable {
if (bytes != null)
return bytes; // cache hit
if (!hasNorms(field))
- return fakeNorms();
+ return getDisableFakeNorms() ? null : fakeNorms();
bytes = new byte[maxDoc()];
for (int i = 0; i < subReaders.length; i++)
@@ -410,12 +415,15 @@ class MultiSegmentReader extends DirectoryIndexReader implements Cloneable {
throws IOException {
ensureOpen();
byte[] bytes = (byte[])normsCache.get(field);
- if (bytes==null && !hasNorms(field)) bytes=fakeNorms();
- if (bytes != null) // cache hit
+ if (bytes==null && !hasNorms(field)) {
+ Arrays.fill(result, offset, result.length, DefaultSimilarity.encodeNorm(1.0f));
+ } else if (bytes != null) { // cache hit
System.arraycopy(bytes, 0, result, offset, maxDoc());
-
- for (int i = 0; i < subReaders.length; i++) // read from segments
- subReaders[i].norms(field, result, offset + starts[i]);
+ } else {
+ for (int i = 0; i < subReaders.length; i++) { // read from segments
+ subReaders[i].norms(field, result, offset + starts[i]);
+ }
+ }
}
protected void doSetNorm(int n, String field, byte value)
@@ -514,6 +522,12 @@ class MultiSegmentReader extends DirectoryIndexReader implements Cloneable {
throw new IllegalStateException("no readers");
}
+ public void setDisableFakeNorms(boolean disableFakeNorms) {
+ super.setDisableFakeNorms(disableFakeNorms);
+ for (int i = 0; i < subReaders.length; i++)
+ subReaders[i].setDisableFakeNorms(disableFakeNorms);
+ }
+
static class MultiTermEnum extends TermEnum {
private SegmentMergeQueue queue;
diff --git a/src/java/org/apache/lucene/index/SegmentReader.java b/src/java/org/apache/lucene/index/SegmentReader.java
index c33baa9e52c..2ee1ae41b53 100644
--- a/src/java/org/apache/lucene/index/SegmentReader.java
+++ b/src/java/org/apache/lucene/index/SegmentReader.java
@@ -629,11 +629,11 @@ class SegmentReader extends DirectoryIndexReader {
}
} else {
if (openReadOnly)
- return new ReadOnlyMultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null, doClone);
+ newReader = new ReadOnlyMultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null, doClone);
else
- return new MultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null, false, doClone);
+ newReader = new MultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null, false, doClone);
}
-
+ newReader.setDisableFakeNorms(getDisableFakeNorms());
return newReader;
}
@@ -708,6 +708,7 @@ class SegmentReader extends DirectoryIndexReader {
}
}
+ clone.setDisableFakeNorms(getDisableFakeNorms());
clone.norms = new HashMap();
// Clone norms
@@ -1017,6 +1018,7 @@ class SegmentReader extends DirectoryIndexReader {
private byte[] ones;
private byte[] fakeNorms() {
+ assert !getDisableFakeNorms();
if (ones==null) ones=createFakeNorms(maxDoc());
return ones;
}
@@ -1032,7 +1034,7 @@ class SegmentReader extends DirectoryIndexReader {
public synchronized byte[] norms(String field) throws IOException {
ensureOpen();
byte[] bytes = getNorms(field);
- if (bytes==null) bytes=fakeNorms();
+ if (bytes==null && !getDisableFakeNorms()) bytes=fakeNorms();
return bytes;
}
@@ -1053,7 +1055,7 @@ class SegmentReader extends DirectoryIndexReader {
ensureOpen();
Norm norm = (Norm) norms.get(field);
if (norm == null) {
- System.arraycopy(fakeNorms(), 0, bytes, offset, maxDoc());
+ Arrays.fill(bytes, offset, bytes.length, DefaultSimilarity.encodeNorm(1.0f));
return;
}
diff --git a/src/java/org/apache/lucene/search/MultiPhraseQuery.java b/src/java/org/apache/lucene/search/MultiPhraseQuery.java
index 2947ba3dfbc..04be80c4c90 100644
--- a/src/java/org/apache/lucene/search/MultiPhraseQuery.java
+++ b/src/java/org/apache/lucene/search/MultiPhraseQuery.java
@@ -24,7 +24,6 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultipleTermPositions;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermPositions;
-import org.apache.lucene.search.Query;
import org.apache.lucene.util.ToStringUtils;
/**
@@ -225,7 +224,7 @@ public class MultiPhraseQuery extends Query {
Explanation fieldNormExpl = new Explanation();
byte[] fieldNorms = reader.norms(field);
float fieldNorm =
- fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 0.0f;
+ fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 1.0f;
fieldNormExpl.setValue(fieldNorm);
fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")");
fieldExpl.addDetail(fieldNormExpl);
diff --git a/src/java/org/apache/lucene/search/PhraseQuery.java b/src/java/org/apache/lucene/search/PhraseQuery.java
index 806243a4a95..db850a8f71b 100644
--- a/src/java/org/apache/lucene/search/PhraseQuery.java
+++ b/src/java/org/apache/lucene/search/PhraseQuery.java
@@ -216,7 +216,7 @@ public class PhraseQuery extends Query {
Explanation fieldNormExpl = new Explanation();
byte[] fieldNorms = reader.norms(field);
float fieldNorm =
- fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 0.0f;
+ fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 1.0f;
fieldNormExpl.setValue(fieldNorm);
fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")");
fieldExpl.addDetail(fieldNormExpl);
diff --git a/src/java/org/apache/lucene/search/PhraseScorer.java b/src/java/org/apache/lucene/search/PhraseScorer.java
index f27c8649f37..7e5e720ad28 100644
--- a/src/java/org/apache/lucene/search/PhraseScorer.java
+++ b/src/java/org/apache/lucene/search/PhraseScorer.java
@@ -104,7 +104,7 @@ abstract class PhraseScorer extends Scorer {
public float score() throws IOException {
//System.out.println("scoring " + first.doc);
float raw = getSimilarity().tf(freq) * value; // raw score
- return raw * Similarity.decodeNorm(norms[first.doc]); // normalize
+ return norms == null ? raw : raw * Similarity.decodeNorm(norms[first.doc]); // normalize
}
public boolean skipTo(int target) throws IOException {
diff --git a/src/java/org/apache/lucene/search/TermQuery.java b/src/java/org/apache/lucene/search/TermQuery.java
index cb63eb896ae..6f7c0e3750e 100644
--- a/src/java/org/apache/lucene/search/TermQuery.java
+++ b/src/java/org/apache/lucene/search/TermQuery.java
@@ -111,7 +111,7 @@ public class TermQuery extends Query {
Explanation fieldNormExpl = new Explanation();
byte[] fieldNorms = reader.norms(field);
float fieldNorm =
- fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 0.0f;
+ fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 1.0f;
fieldNormExpl.setValue(fieldNorm);
fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")");
fieldExpl.addDetail(fieldNormExpl);
diff --git a/src/java/org/apache/lucene/search/TermScorer.java b/src/java/org/apache/lucene/search/TermScorer.java
index 1acc13bb4c8..35928a1fcc3 100644
--- a/src/java/org/apache/lucene/search/TermScorer.java
+++ b/src/java/org/apache/lucene/search/TermScorer.java
@@ -127,7 +127,7 @@ final class TermScorer extends Scorer {
? scoreCache[f] // cache hit
: getSimilarity().tf(f)*weightValue; // cache miss
- return raw * SIM_NORM_DECODER[norms[doc] & 0xFF]; // normalize for field
+ return norms == null ? raw : raw * SIM_NORM_DECODER[norms[doc] & 0xFF]; // normalize for field
}
/** Skips to the first match beyond the current whose document number is
diff --git a/src/java/org/apache/lucene/search/spans/SpanScorer.java b/src/java/org/apache/lucene/search/spans/SpanScorer.java
index 5399e2ba9e9..2432be83ee8 100644
--- a/src/java/org/apache/lucene/search/spans/SpanScorer.java
+++ b/src/java/org/apache/lucene/search/spans/SpanScorer.java
@@ -89,7 +89,7 @@ public class SpanScorer extends Scorer {
public float score() throws IOException {
float raw = getSimilarity().tf(freq) * value; // raw score
- return raw * Similarity.decodeNorm(norms[doc]); // normalize
+ return norms == null? raw : raw * Similarity.decodeNorm(norms[doc]); // normalize
}
public Explanation explain(final int doc) throws IOException {
diff --git a/src/java/org/apache/lucene/search/spans/SpanWeight.java b/src/java/org/apache/lucene/search/spans/SpanWeight.java
index aa8bfc18343..b0037fa9ebf 100644
--- a/src/java/org/apache/lucene/search/spans/SpanWeight.java
+++ b/src/java/org/apache/lucene/search/spans/SpanWeight.java
@@ -122,7 +122,7 @@ public class SpanWeight implements Weight {
Explanation fieldNormExpl = new Explanation();
byte[] fieldNorms = reader.norms(field);
float fieldNorm =
- fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 0.0f;
+ fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 1.0f;
fieldNormExpl.setValue(fieldNorm);
fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")");
fieldExpl.addDetail(fieldNormExpl);
diff --git a/src/test/org/apache/lucene/index/TestIndexReader.java b/src/test/org/apache/lucene/index/TestIndexReader.java
index e45c740bf1f..24adaa39aa2 100644
--- a/src/test/org/apache/lucene/index/TestIndexReader.java
+++ b/src/test/org/apache/lucene/index/TestIndexReader.java
@@ -1377,10 +1377,17 @@ public class TestIndexReader extends LuceneTestCase
String curField = (String) it1.next();
byte[] norms1 = index1.norms(curField);
byte[] norms2 = index2.norms(curField);
- assertEquals(norms1.length, norms2.length);
- for (int i = 0; i < norms1.length; i++) {
- assertEquals("Norm different for doc " + i + " and field '" + curField + "'.", norms1[i], norms2[i]);
- }
+ if (norms1 != null && norms2 != null)
+ {
+ assertEquals(norms1.length, norms2.length);
+ for (int i = 0; i < norms1.length; i++) {
+ assertEquals("Norm different for doc " + i + " and field '" + curField + "'.", norms1[i], norms2[i]);
+ }
+ }
+ else
+ {
+ assertSame(norms1, norms2);
+ }
}
// check deletions
diff --git a/src/test/org/apache/lucene/index/TestSegmentReader.java b/src/test/org/apache/lucene/index/TestSegmentReader.java
index 5fb7372f259..1bbe37879da 100644
--- a/src/test/org/apache/lucene/index/TestSegmentReader.java
+++ b/src/test/org/apache/lucene/index/TestSegmentReader.java
@@ -29,8 +29,6 @@ import org.apache.lucene.document.Fieldable;
import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.MockRAMDirectory;
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
-import org.apache.lucene.search.Similarity;
public class TestSegmentReader extends LuceneTestCase {
private RAMDirectory dir = new RAMDirectory();
@@ -173,16 +171,21 @@ public class TestSegmentReader extends LuceneTestCase {
assertEquals(reader.hasNorms(f.name()), !f.getOmitNorms());
assertEquals(reader.hasNorms(f.name()), !DocHelper.noNorms.containsKey(f.name()));
if (!reader.hasNorms(f.name())) {
- // test for fake norms of 1.0
+ // test for fake norms of 1.0 or null depending on the flag
byte [] norms = reader.norms(f.name());
- assertEquals(norms.length,reader.maxDoc());
- for (int j=0; j