mirror of https://github.com/apache/lucene.git
LUCENE-1979: Remove remaining deprecations from index package.
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@825022 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
f89460b246
commit
21ea806aa0
|
@ -5,6 +5,11 @@ $Id$
|
||||||
|
|
||||||
Changes in backwards compatibility policy
|
Changes in backwards compatibility policy
|
||||||
|
|
||||||
|
* LUCENE-1979: Change return type of SnapshotDeletionPolicy#snapshot()
|
||||||
|
from IndexCommitPoint to IndexCommit. Code that uses this method
|
||||||
|
needs to be recompiled against Lucene 3.0 in order to work. The previously
|
||||||
|
deprecated IndexCommitPoint is also removed. (Michael Busch)
|
||||||
|
|
||||||
Changes in runtime behavior
|
Changes in runtime behavior
|
||||||
|
|
||||||
API Changes
|
API Changes
|
||||||
|
@ -67,6 +72,9 @@ API Changes
|
||||||
|
|
||||||
* LUCENE-944: Remove deprecated methods in BooleanQuery. (Michael Busch)
|
* LUCENE-944: Remove deprecated methods in BooleanQuery. (Michael Busch)
|
||||||
|
|
||||||
|
* LUCENE-1979: Remove remaining deprecations from indexer package.
|
||||||
|
(Michael Busch)
|
||||||
|
|
||||||
Bug fixes
|
Bug fixes
|
||||||
|
|
||||||
* LUCENE-1951: When the text provided to WildcardQuery has no wildcard
|
* LUCENE-1951: When the text provided to WildcardQuery has no wildcard
|
||||||
|
|
|
@ -144,7 +144,7 @@ public class InstantiatedIndexReader extends IndexReader {
|
||||||
deletedDocuments.clear();
|
deletedDocuments.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void doCommit() throws IOException {
|
protected void doCommit(Map commitUserData) throws IOException {
|
||||||
// todo: read/write lock
|
// todo: read/write lock
|
||||||
|
|
||||||
boolean updated = false;
|
boolean updated = false;
|
||||||
|
|
|
@ -71,8 +71,7 @@ public class TestEmptyIndex extends TestCase {
|
||||||
private void testNorms(IndexReader r) throws IOException {
|
private void testNorms(IndexReader r) throws IOException {
|
||||||
byte[] norms;
|
byte[] norms;
|
||||||
norms = r.norms("foo");
|
norms = r.norms("foo");
|
||||||
if (!r.getDisableFakeNorms()) {
|
if (norms != null) {
|
||||||
assertNotNull(norms);
|
|
||||||
assertEquals(0, norms.length);
|
assertEquals(0, norms.length);
|
||||||
norms = new byte[10];
|
norms = new byte[10];
|
||||||
Arrays.fill(norms, (byte)10);
|
Arrays.fill(norms, (byte)10);
|
||||||
|
|
|
@ -253,7 +253,7 @@ public class TestIndicesEquals extends TestCase {
|
||||||
byte[] aprioriNorms = aprioriReader.norms((String) field);
|
byte[] aprioriNorms = aprioriReader.norms((String) field);
|
||||||
byte[] testNorms = testReader.norms((String) field);
|
byte[] testNorms = testReader.norms((String) field);
|
||||||
|
|
||||||
if (!aprioriReader.getDisableFakeNorms()) {
|
if (aprioriNorms != null) {
|
||||||
assertEquals(aprioriNorms.length, testNorms.length);
|
assertEquals(aprioriNorms.length, testNorms.length);
|
||||||
|
|
||||||
for (int i = 0; i < aprioriNorms.length; i++) {
|
for (int i = 0; i < aprioriNorms.length; i++) {
|
||||||
|
|
|
@ -1152,7 +1152,7 @@ public class MemoryIndex implements Serializable {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void doCommit() {
|
protected void doCommit(Map commitUserData) {
|
||||||
if (DEBUG) System.err.println("MemoryIndexReader.doCommit");
|
if (DEBUG) System.err.println("MemoryIndexReader.doCommit");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -92,13 +92,7 @@ public class TestFieldNormModifier extends TestCase {
|
||||||
|
|
||||||
// sanity check, norms should all be 1
|
// sanity check, norms should all be 1
|
||||||
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
||||||
if (!r.getDisableFakeNorms()) {
|
assertNull(norms);
|
||||||
for (int i = 0; i< norms.length; i++) {
|
|
||||||
assertEquals(""+i, DEFAULT_NORM, norms[i]);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
assertNull(norms);
|
|
||||||
}
|
|
||||||
|
|
||||||
r.close();
|
r.close();
|
||||||
|
|
||||||
|
@ -114,13 +108,7 @@ public class TestFieldNormModifier extends TestCase {
|
||||||
|
|
||||||
norms = r.norms("nonorm");
|
norms = r.norms("nonorm");
|
||||||
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
||||||
if (!r.getDisableFakeNorms()) {
|
assertNull(norms);
|
||||||
for (int i = 0; i< norms.length; i++) {
|
|
||||||
assertEquals(""+i, DEFAULT_NORM, norms[i]);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
assertNull(norms);
|
|
||||||
}
|
|
||||||
|
|
||||||
r.close();
|
r.close();
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,13 +98,7 @@ public class TestLengthNormModifier extends TestCase {
|
||||||
|
|
||||||
// sanity check, norms should all be 1
|
// sanity check, norms should all be 1
|
||||||
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
||||||
if (!r.getDisableFakeNorms()) {
|
assertNull(norms);
|
||||||
for (int i = 0; i< norms.length; i++) {
|
|
||||||
assertEquals(""+i, DEFAULT_NORM, norms[i]);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
assertNull(norms);
|
|
||||||
}
|
|
||||||
|
|
||||||
r.close();
|
r.close();
|
||||||
|
|
||||||
|
@ -120,13 +114,7 @@ public class TestLengthNormModifier extends TestCase {
|
||||||
|
|
||||||
norms = r.norms("nonorm");
|
norms = r.norms("nonorm");
|
||||||
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
||||||
if (!r.getDisableFakeNorms()) {
|
assertNull(norms);
|
||||||
for (int i = 0; i< norms.length; i++) {
|
|
||||||
assertEquals(""+i, DEFAULT_NORM, norms[i]);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
assertNull(norms);
|
|
||||||
}
|
|
||||||
|
|
||||||
r.close();
|
r.close();
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,7 @@ public class TestDistance extends TestCase{
|
||||||
addPoint(writer,"Iota Club and Cafe",38.8890000,-77.0923000);
|
addPoint(writer,"Iota Club and Cafe",38.8890000,-77.0923000);
|
||||||
addPoint(writer,"Hilton Washington Embassy Row",38.9103000,-77.0451000);
|
addPoint(writer,"Hilton Washington Embassy Row",38.9103000,-77.0451000);
|
||||||
addPoint(writer,"HorseFeathers, Bar & Grill", 39.01220000000001, -77.3942);
|
addPoint(writer,"HorseFeathers, Bar & Grill", 39.01220000000001, -77.3942);
|
||||||
writer.flush();
|
writer.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testLatLongFilterOnDeletedDocs() throws Exception {
|
public void testLatLongFilterOnDeletedDocs() throws Exception {
|
||||||
|
|
|
@ -289,23 +289,6 @@ public class CheckIndex {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Returns true if index is clean, else false.
|
|
||||||
* @deprecated Please instantiate a CheckIndex and then use {@link #checkIndex()} instead */
|
|
||||||
public static boolean check(Directory dir, boolean doFix) throws IOException {
|
|
||||||
return check(dir, doFix, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns true if index is clean, else false.
|
|
||||||
* @deprecated Please instantiate a CheckIndex and then use {@link #checkIndex(List)} instead */
|
|
||||||
public static boolean check(Directory dir, boolean doFix, List onlySegments) throws IOException {
|
|
||||||
CheckIndex checker = new CheckIndex(dir);
|
|
||||||
Status status = checker.checkIndex(onlySegments);
|
|
||||||
if (doFix && !status.clean)
|
|
||||||
checker.fixIndex(status);
|
|
||||||
|
|
||||||
return status.clean;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns a {@link Status} instance detailing
|
/** Returns a {@link Status} instance detailing
|
||||||
* the state of the index.
|
* the state of the index.
|
||||||
*
|
*
|
||||||
|
@ -488,7 +471,7 @@ public class CheckIndex {
|
||||||
}
|
}
|
||||||
if (infoStream != null)
|
if (infoStream != null)
|
||||||
infoStream.print(" test: open reader.........");
|
infoStream.print(" test: open reader.........");
|
||||||
reader = SegmentReader.get(info);
|
reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
|
|
||||||
segInfoStat.openReaderPassed = true;
|
segInfoStat.openReaderPassed = true;
|
||||||
|
|
||||||
|
|
|
@ -17,25 +17,25 @@ package org.apache.lucene.index;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
|
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.document.FieldSelector;
|
import org.apache.lucene.document.FieldSelector;
|
||||||
import org.apache.lucene.search.DefaultSimilarity;
|
import org.apache.lucene.search.DefaultSimilarity;
|
||||||
|
import org.apache.lucene.store.AlreadyClosedException;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.Lock;
|
import org.apache.lucene.store.Lock;
|
||||||
import org.apache.lucene.store.LockObtainFailedException;
|
import org.apache.lucene.store.LockObtainFailedException;
|
||||||
import org.apache.lucene.store.AlreadyClosedException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An IndexReader which reads indexes with multiple segments.
|
* An IndexReader which reads indexes with multiple segments.
|
||||||
|
@ -380,7 +380,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
||||||
// the future we could have write make some effort to
|
// the future we could have write make some effort to
|
||||||
// detect that no changes have occurred
|
// detect that no changes have occurred
|
||||||
IndexReader reader = writer.getReader();
|
IndexReader reader = writer.getReader();
|
||||||
reader.setDisableFakeNorms(getDisableFakeNorms());
|
|
||||||
return reader;
|
return reader;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -436,7 +435,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
||||||
} else {
|
} else {
|
||||||
reader = new DirectoryReader(directory, infos, subReaders, starts, normsCache, false, doClone, termInfosIndexDivisor);
|
reader = new DirectoryReader(directory, infos, subReaders, starts, normsCache, false, doClone, termInfosIndexDivisor);
|
||||||
}
|
}
|
||||||
reader.setDisableFakeNorms(getDisableFakeNorms());
|
|
||||||
return reader;
|
return reader;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -564,18 +562,13 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
||||||
}
|
}
|
||||||
|
|
||||||
private byte[] ones;
|
private byte[] ones;
|
||||||
private byte[] fakeNorms() {
|
|
||||||
if (ones==null) ones=SegmentReader.createFakeNorms(maxDoc());
|
|
||||||
return ones;
|
|
||||||
}
|
|
||||||
|
|
||||||
public synchronized byte[] norms(String field) throws IOException {
|
public synchronized byte[] norms(String field) throws IOException {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
byte[] bytes = (byte[])normsCache.get(field);
|
byte[] bytes = (byte[])normsCache.get(field);
|
||||||
if (bytes != null)
|
if (bytes != null)
|
||||||
return bytes; // cache hit
|
return bytes; // cache hit
|
||||||
if (!hasNorms(field))
|
if (!hasNorms(field))
|
||||||
return getDisableFakeNorms() ? null : fakeNorms();
|
return null;
|
||||||
|
|
||||||
bytes = new byte[maxDoc()];
|
bytes = new byte[maxDoc()];
|
||||||
for (int i = 0; i < subReaders.length; i++)
|
for (int i = 0; i < subReaders.length; i++)
|
||||||
|
@ -679,11 +672,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @deprecated */
|
|
||||||
protected void doCommit() throws IOException {
|
|
||||||
doCommit(null);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Commit changes resulting from delete, undeleteAll, or setNorm operations
|
* Commit changes resulting from delete, undeleteAll, or setNorm operations
|
||||||
* <p/>
|
* <p/>
|
||||||
|
@ -832,12 +820,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
||||||
return subReaders;
|
return subReaders;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setDisableFakeNorms(boolean disableFakeNorms) {
|
|
||||||
super.setDisableFakeNorms(disableFakeNorms);
|
|
||||||
for (int i = 0; i < subReaders.length; i++)
|
|
||||||
subReaders[i].setDisableFakeNorms(disableFakeNorms);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns the directory this index resides in. */
|
/** Returns the directory this index resides in. */
|
||||||
public Directory directory() {
|
public Directory directory() {
|
||||||
// Don't ensureOpen here -- in certain cases, when a
|
// Don't ensureOpen here -- in certain cases, when a
|
||||||
|
|
|
@ -41,7 +41,7 @@ import org.apache.lucene.store.Directory;
|
||||||
* may suddenly change. </p>
|
* may suddenly change. </p>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
public abstract class IndexCommit implements IndexCommitPoint {
|
public abstract class IndexCommit {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the segments file (<code>segments_N</code>) associated
|
* Get the segments file (<code>segments_N</code>) associated
|
||||||
|
|
|
@ -980,18 +980,9 @@ public abstract class IndexReader implements Cloneable {
|
||||||
hasChanges = false;
|
hasChanges = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Implements commit.
|
|
||||||
* @deprecated Please implement {@link #doCommit(Map)
|
|
||||||
* instead}. */
|
|
||||||
protected abstract void doCommit() throws IOException;
|
|
||||||
|
|
||||||
/** Implements commit. NOTE: subclasses should override
|
/** Implements commit. NOTE: subclasses should override
|
||||||
* this. In 3.0 this will become an abstract method. */
|
* this. In 3.0 this will become an abstract method. */
|
||||||
void doCommit(Map commitUserData) throws IOException {
|
protected abstract void doCommit(Map commitUserData) throws IOException;
|
||||||
// Default impl discards commitUserData; all Lucene
|
|
||||||
// subclasses override this (do not discard it).
|
|
||||||
doCommit();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Closes files associated with this index.
|
* Closes files associated with this index.
|
||||||
|
@ -1145,8 +1136,7 @@ public abstract class IndexReader implements Cloneable {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Expert
|
/** Expert */
|
||||||
* @deprecated */
|
|
||||||
public Object getFieldCacheKey() {
|
public Object getFieldCacheKey() {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
@ -1166,26 +1156,4 @@ public abstract class IndexReader implements Cloneable {
|
||||||
public long getUniqueTermCount() throws IOException {
|
public long getUniqueTermCount() throws IOException {
|
||||||
throw new UnsupportedOperationException("this reader does not implement getUniqueTermCount()");
|
throw new UnsupportedOperationException("this reader does not implement getUniqueTermCount()");
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Expert: Return the state of the flag that disables fakes norms in favor of representing the absence of field norms with null.
|
|
||||||
* @return true if fake norms are disabled
|
|
||||||
* @deprecated This currently defaults to false (to remain
|
|
||||||
* back-compatible), but in 3.0 it will be hardwired to
|
|
||||||
* true, meaning the norms() methods will return null for
|
|
||||||
* fields that had disabled norms.
|
|
||||||
*/
|
|
||||||
public boolean getDisableFakeNorms() {
|
|
||||||
return disableFakeNorms;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Expert: Set the state of the flag that disables fakes norms in favor of representing the absence of field norms with null.
|
|
||||||
* @param disableFakeNorms true to disable fake norms, false to preserve the legacy behavior
|
|
||||||
* @deprecated This currently defaults to false (to remain
|
|
||||||
* back-compatible), but in 3.0 it will be hardwired to
|
|
||||||
* true, meaning the norms() methods will return null for
|
|
||||||
* fields that had disabled norms.
|
|
||||||
*/
|
|
||||||
public void setDisableFakeNorms(boolean disableFakeNorms) {
|
|
||||||
this.disableFakeNorms = disableFakeNorms;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -611,7 +611,7 @@ public class IndexWriter {
|
||||||
// TODO: we may want to avoid doing this while
|
// TODO: we may want to avoid doing this while
|
||||||
// synchronized
|
// synchronized
|
||||||
// Returns a ref, which we xfer to readerMap:
|
// Returns a ref, which we xfer to readerMap:
|
||||||
sr = SegmentReader.get(info, readBufferSize, doOpenStores, termsIndexDivisor);
|
sr = SegmentReader.get(false, info.dir, info, readBufferSize, doOpenStores, termsIndexDivisor);
|
||||||
readerMap.put(info, sr);
|
readerMap.put(info, sr);
|
||||||
} else {
|
} else {
|
||||||
if (doOpenStores) {
|
if (doOpenStores) {
|
||||||
|
@ -3410,30 +3410,6 @@ public class IndexWriter {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Flush all in-memory buffered updates (adds and deletes)
|
|
||||||
* to the Directory.
|
|
||||||
* <p>Note: while this will force buffered docs to be
|
|
||||||
* pushed into the index, it will not make these docs
|
|
||||||
* visible to a reader. Use {@link #commit()} instead
|
|
||||||
*
|
|
||||||
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
|
|
||||||
* you should immediately close the writer. See <a
|
|
||||||
* href="#OOME">above</a> for details.</p>
|
|
||||||
*
|
|
||||||
* @deprecated please call {@link #commit()}) instead
|
|
||||||
*
|
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
|
||||||
* @throws IOException if there is a low-level IO error
|
|
||||||
*/
|
|
||||||
public final void flush() throws CorruptIndexException, IOException {
|
|
||||||
if (hitOOM) {
|
|
||||||
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot flush");
|
|
||||||
}
|
|
||||||
|
|
||||||
flush(true, false, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Expert: prepare for commit.
|
/** Expert: prepare for commit.
|
||||||
*
|
*
|
||||||
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
|
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
|
||||||
|
|
|
@ -179,7 +179,6 @@ public class MultiReader extends IndexReader implements Cloneable {
|
||||||
}
|
}
|
||||||
MultiReader mr = new MultiReader(newSubReaders);
|
MultiReader mr = new MultiReader(newSubReaders);
|
||||||
mr.decrefOnClose = newDecrefOnClose;
|
mr.decrefOnClose = newDecrefOnClose;
|
||||||
mr.setDisableFakeNorms(getDisableFakeNorms());
|
|
||||||
return mr;
|
return mr;
|
||||||
} else {
|
} else {
|
||||||
return this;
|
return this;
|
||||||
|
@ -289,7 +288,7 @@ public class MultiReader extends IndexReader implements Cloneable {
|
||||||
if (bytes != null)
|
if (bytes != null)
|
||||||
return bytes; // cache hit
|
return bytes; // cache hit
|
||||||
if (!hasNorms(field))
|
if (!hasNorms(field))
|
||||||
return getDisableFakeNorms() ? null : fakeNorms();
|
return null;
|
||||||
|
|
||||||
bytes = new byte[maxDoc()];
|
bytes = new byte[maxDoc()];
|
||||||
for (int i = 0; i < subReaders.length; i++)
|
for (int i = 0; i < subReaders.length; i++)
|
||||||
|
|
|
@ -582,15 +582,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
|
||||||
* @throws IOException if there is a low-level IO error
|
|
||||||
* @deprecated
|
|
||||||
*/
|
|
||||||
public static SegmentReader get(SegmentInfo si) throws CorruptIndexException, IOException {
|
|
||||||
return get(false, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
* @throws IOException if there is a low-level IO error
|
* @throws IOException if there is a low-level IO error
|
||||||
|
@ -599,15 +590,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
||||||
return get(readOnly, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true, termInfosIndexDivisor);
|
return get(readOnly, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true, termInfosIndexDivisor);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
|
||||||
* @throws IOException if there is a low-level IO error
|
|
||||||
* @deprecated
|
|
||||||
*/
|
|
||||||
static SegmentReader get(SegmentInfo si, int readBufferSize, boolean doOpenStores, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
|
||||||
return get(false, si.dir, si, readBufferSize, doOpenStores, termInfosIndexDivisor);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
* @throws IOException if there is a low-level IO error
|
* @throws IOException if there is a low-level IO error
|
||||||
|
@ -780,7 +762,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
clone.setDisableFakeNorms(getDisableFakeNorms());
|
|
||||||
clone.norms = new HashMap();
|
clone.norms = new HashMap();
|
||||||
|
|
||||||
// Clone norms
|
// Clone norms
|
||||||
|
@ -1055,11 +1036,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
||||||
}
|
}
|
||||||
|
|
||||||
private byte[] ones;
|
private byte[] ones;
|
||||||
private byte[] fakeNorms() {
|
|
||||||
assert !getDisableFakeNorms();
|
|
||||||
if (ones==null) ones=createFakeNorms(maxDoc());
|
|
||||||
return ones;
|
|
||||||
}
|
|
||||||
|
|
||||||
// can return null if norms aren't stored
|
// can return null if norms aren't stored
|
||||||
protected synchronized byte[] getNorms(String field) throws IOException {
|
protected synchronized byte[] getNorms(String field) throws IOException {
|
||||||
|
@ -1072,7 +1048,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
||||||
public synchronized byte[] norms(String field) throws IOException {
|
public synchronized byte[] norms(String field) throws IOException {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
byte[] bytes = getNorms(field);
|
byte[] bytes = getNorms(field);
|
||||||
if (bytes==null && !getDisableFakeNorms()) bytes=fakeNorms();
|
|
||||||
return bytes;
|
return bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -71,8 +71,7 @@ public class SnapshotDeletionPolicy implements IndexDeletionPolicy {
|
||||||
* you call optimize()) then in the worst case this could
|
* you call optimize()) then in the worst case this could
|
||||||
* consume an extra 1X of your total index size, until
|
* consume an extra 1X of your total index size, until
|
||||||
* you release the snapshot. */
|
* you release the snapshot. */
|
||||||
// TODO 3.0: change this to return IndexCommit instead
|
public synchronized IndexCommit snapshot() {
|
||||||
public synchronized IndexCommitPoint snapshot() {
|
|
||||||
if (snapshot == null)
|
if (snapshot == null)
|
||||||
snapshot = lastCommit.getSegmentsFileName();
|
snapshot = lastCommit.getSegmentsFileName();
|
||||||
else
|
else
|
||||||
|
|
|
@ -238,7 +238,7 @@ class DocHelper {
|
||||||
writer.setSimilarity(similarity);
|
writer.setSimilarity(similarity);
|
||||||
//writer.setUseCompoundFile(false);
|
//writer.setUseCompoundFile(false);
|
||||||
writer.addDocument(doc);
|
writer.addDocument(doc);
|
||||||
writer.flush();
|
writer.commit();
|
||||||
SegmentInfo info = writer.newestSegment();
|
SegmentInfo info = writer.newestSegment();
|
||||||
writer.close();
|
writer.close();
|
||||||
return info;
|
return info;
|
||||||
|
|
|
@ -88,7 +88,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
||||||
writer.addDocument(doc);
|
writer.addDocument(doc);
|
||||||
failure.setDoFail();
|
failure.setDoFail();
|
||||||
try {
|
try {
|
||||||
writer.flush();
|
writer.flush(true, false, true);
|
||||||
if (failure.hitExc) {
|
if (failure.hitExc) {
|
||||||
fail("failed to hit IOException");
|
fail("failed to hit IOException");
|
||||||
}
|
}
|
||||||
|
@ -140,7 +140,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
||||||
delID += 10;
|
delID += 10;
|
||||||
}
|
}
|
||||||
|
|
||||||
writer.flush();
|
writer.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
writer.close();
|
writer.close();
|
||||||
|
@ -210,7 +210,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
||||||
// stress out aborting them on close:
|
// stress out aborting them on close:
|
||||||
writer.setMergeFactor(3);
|
writer.setMergeFactor(3);
|
||||||
writer.addDocument(doc);
|
writer.addDocument(doc);
|
||||||
writer.flush();
|
writer.commit();
|
||||||
|
|
||||||
writer.close(false);
|
writer.close(false);
|
||||||
|
|
||||||
|
|
|
@ -169,15 +169,15 @@ public class TestDoc extends LuceneTestCase {
|
||||||
Document doc = new Document();
|
Document doc = new Document();
|
||||||
doc.add(new Field("contents", new FileReader(file)));
|
doc.add(new Field("contents", new FileReader(file)));
|
||||||
writer.addDocument(doc);
|
writer.addDocument(doc);
|
||||||
writer.flush();
|
writer.commit();
|
||||||
return writer.newestSegment();
|
return writer.newestSegment();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private SegmentInfo merge(SegmentInfo si1, SegmentInfo si2, String merged, boolean useCompoundFile)
|
private SegmentInfo merge(SegmentInfo si1, SegmentInfo si2, String merged, boolean useCompoundFile)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
SegmentReader r1 = SegmentReader.get(si1);
|
SegmentReader r1 = SegmentReader.get(true, si1, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
SegmentReader r2 = SegmentReader.get(si2);
|
SegmentReader r2 = SegmentReader.get(true, si2, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
|
|
||||||
SegmentMerger merger = new SegmentMerger(si1.dir, merged);
|
SegmentMerger merger = new SegmentMerger(si1.dir, merged);
|
||||||
|
|
||||||
|
@ -198,7 +198,7 @@ public class TestDoc extends LuceneTestCase {
|
||||||
|
|
||||||
private void printSegment(PrintWriter out, SegmentInfo si)
|
private void printSegment(PrintWriter out, SegmentInfo si)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
SegmentReader reader = SegmentReader.get(si);
|
SegmentReader reader = SegmentReader.get(true, si, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
|
|
||||||
for (int i = 0; i < reader.numDocs(); i++)
|
for (int i = 0; i < reader.numDocs(); i++)
|
||||||
out.println(reader.document(i));
|
out.println(reader.document(i));
|
||||||
|
|
|
@ -63,11 +63,11 @@ public class TestDocumentWriter extends BaseTokenStreamTestCase {
|
||||||
Analyzer analyzer = new WhitespaceAnalyzer();
|
Analyzer analyzer = new WhitespaceAnalyzer();
|
||||||
IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
|
IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||||
writer.addDocument(testDoc);
|
writer.addDocument(testDoc);
|
||||||
writer.flush();
|
writer.commit();
|
||||||
SegmentInfo info = writer.newestSegment();
|
SegmentInfo info = writer.newestSegment();
|
||||||
writer.close();
|
writer.close();
|
||||||
//After adding the document, we should be able to read it back in
|
//After adding the document, we should be able to read it back in
|
||||||
SegmentReader reader = SegmentReader.get(info);
|
SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
assertTrue(reader != null);
|
assertTrue(reader != null);
|
||||||
Document doc = reader.document(0);
|
Document doc = reader.document(0);
|
||||||
assertTrue(doc != null);
|
assertTrue(doc != null);
|
||||||
|
@ -123,10 +123,10 @@ public class TestDocumentWriter extends BaseTokenStreamTestCase {
|
||||||
doc.add(new Field("repeated", "repeated two", Field.Store.YES, Field.Index.ANALYZED));
|
doc.add(new Field("repeated", "repeated two", Field.Store.YES, Field.Index.ANALYZED));
|
||||||
|
|
||||||
writer.addDocument(doc);
|
writer.addDocument(doc);
|
||||||
writer.flush();
|
writer.commit();
|
||||||
SegmentInfo info = writer.newestSegment();
|
SegmentInfo info = writer.newestSegment();
|
||||||
writer.close();
|
writer.close();
|
||||||
SegmentReader reader = SegmentReader.get(info);
|
SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
|
|
||||||
TermPositions termPositions = reader.termPositions(new Term("repeated", "repeated"));
|
TermPositions termPositions = reader.termPositions(new Term("repeated", "repeated"));
|
||||||
assertTrue(termPositions.next());
|
assertTrue(termPositions.next());
|
||||||
|
@ -183,10 +183,10 @@ public class TestDocumentWriter extends BaseTokenStreamTestCase {
|
||||||
doc.add(new Field("f1", "a 5 a a", Field.Store.YES, Field.Index.ANALYZED));
|
doc.add(new Field("f1", "a 5 a a", Field.Store.YES, Field.Index.ANALYZED));
|
||||||
|
|
||||||
writer.addDocument(doc);
|
writer.addDocument(doc);
|
||||||
writer.flush();
|
writer.commit();
|
||||||
SegmentInfo info = writer.newestSegment();
|
SegmentInfo info = writer.newestSegment();
|
||||||
writer.close();
|
writer.close();
|
||||||
SegmentReader reader = SegmentReader.get(info);
|
SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
|
|
||||||
TermPositions termPositions = reader.termPositions(new Term("f1", "a"));
|
TermPositions termPositions = reader.termPositions(new Term("f1", "a"));
|
||||||
assertTrue(termPositions.next());
|
assertTrue(termPositions.next());
|
||||||
|
@ -223,10 +223,10 @@ public class TestDocumentWriter extends BaseTokenStreamTestCase {
|
||||||
}, TermVector.NO));
|
}, TermVector.NO));
|
||||||
|
|
||||||
writer.addDocument(doc);
|
writer.addDocument(doc);
|
||||||
writer.flush();
|
writer.commit();
|
||||||
SegmentInfo info = writer.newestSegment();
|
SegmentInfo info = writer.newestSegment();
|
||||||
writer.close();
|
writer.close();
|
||||||
SegmentReader reader = SegmentReader.get(info);
|
SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
|
|
||||||
TermPositions termPositions = reader.termPositions(new Term("preanalyzed", "term1"));
|
TermPositions termPositions = reader.termPositions(new Term("preanalyzed", "term1"));
|
||||||
assertTrue(termPositions.next());
|
assertTrue(termPositions.next());
|
||||||
|
|
|
@ -935,7 +935,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
w.addDocument(createDocument(i, 4));
|
w.addDocument(createDocument(i, 4));
|
||||||
if (multiSegment && (i % 10) == 0) {
|
if (multiSegment && (i % 10) == 0) {
|
||||||
w.flush();
|
w.commit();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1133,7 +1133,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
||||||
public void testIndexNoDocuments() throws IOException {
|
public void testIndexNoDocuments() throws IOException {
|
||||||
RAMDirectory dir = new RAMDirectory();
|
RAMDirectory dir = new RAMDirectory();
|
||||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||||
writer.flush();
|
writer.commit();
|
||||||
writer.close();
|
writer.close();
|
||||||
|
|
||||||
IndexReader reader = IndexReader.open(dir, true);
|
IndexReader reader = IndexReader.open(dir, true);
|
||||||
|
@ -1142,7 +1142,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
||||||
reader.close();
|
reader.close();
|
||||||
|
|
||||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||||
writer.flush();
|
writer.commit();
|
||||||
writer.close();
|
writer.close();
|
||||||
|
|
||||||
reader = IndexReader.open(dir, true);
|
reader = IndexReader.open(dir, true);
|
||||||
|
@ -1503,7 +1503,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
||||||
Document doc = new Document();
|
Document doc = new Document();
|
||||||
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
|
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
|
||||||
writer.addDocument(doc);
|
writer.addDocument(doc);
|
||||||
writer.flush();
|
writer.commit();
|
||||||
writer.addDocument(new Document());
|
writer.addDocument(new Document());
|
||||||
writer.close();
|
writer.close();
|
||||||
_TestUtil.checkIndex(dir);
|
_TestUtil.checkIndex(dir);
|
||||||
|
@ -1595,13 +1595,13 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
||||||
Field.TermVector.NO));
|
Field.TermVector.NO));
|
||||||
iw.addDocument(document);
|
iw.addDocument(document);
|
||||||
// Make first segment
|
// Make first segment
|
||||||
iw.flush();
|
iw.commit();
|
||||||
|
|
||||||
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
|
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
|
||||||
Field.TermVector.YES));
|
Field.TermVector.YES));
|
||||||
iw.addDocument(document);
|
iw.addDocument(document);
|
||||||
// Make 2nd segment
|
// Make 2nd segment
|
||||||
iw.flush();
|
iw.commit();
|
||||||
|
|
||||||
iw.optimize();
|
iw.optimize();
|
||||||
iw.close();
|
iw.close();
|
||||||
|
@ -1616,14 +1616,14 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
||||||
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
|
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
|
||||||
Field.TermVector.YES));
|
Field.TermVector.YES));
|
||||||
iw.addDocument(document);
|
iw.addDocument(document);
|
||||||
iw.flush();
|
iw.commit();
|
||||||
|
|
||||||
document = new Document();
|
document = new Document();
|
||||||
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
|
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
|
||||||
Field.TermVector.NO));
|
Field.TermVector.NO));
|
||||||
iw.addDocument(document);
|
iw.addDocument(document);
|
||||||
// Make first segment
|
// Make first segment
|
||||||
iw.flush();
|
iw.commit();
|
||||||
|
|
||||||
iw.optimize();
|
iw.optimize();
|
||||||
|
|
||||||
|
@ -1631,7 +1631,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
|
||||||
Field.TermVector.YES));
|
Field.TermVector.YES));
|
||||||
iw.addDocument(document);
|
iw.addDocument(document);
|
||||||
// Make 2nd segment
|
// Make 2nd segment
|
||||||
iw.flush();
|
iw.commit();
|
||||||
iw.optimize();
|
iw.optimize();
|
||||||
|
|
||||||
iw.close();
|
iw.close();
|
||||||
|
|
|
@ -152,7 +152,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
||||||
|
|
||||||
addDoc(modifier, ++id, value);
|
addDoc(modifier, ++id, value);
|
||||||
assertEquals(0, modifier.getSegmentCount());
|
assertEquals(0, modifier.getSegmentCount());
|
||||||
modifier.flush();
|
modifier.commit();
|
||||||
|
|
||||||
modifier.commit();
|
modifier.commit();
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase {
|
||||||
d1.add(new Field(term.field(), term.text(), Store.NO, Index.ANALYZED));
|
d1.add(new Field(term.field(), term.text(), Store.NO, Index.ANALYZED));
|
||||||
writer.addDocument(d1);
|
writer.addDocument(d1);
|
||||||
}
|
}
|
||||||
writer.flush();
|
writer.commit();
|
||||||
writer.optimize();
|
writer.optimize();
|
||||||
writer.close();
|
writer.close();
|
||||||
|
|
||||||
|
|
|
@ -28,8 +28,8 @@ public class TestMultiReader extends TestDirectoryReader {
|
||||||
IndexReader reader;
|
IndexReader reader;
|
||||||
|
|
||||||
sis.read(dir);
|
sis.read(dir);
|
||||||
SegmentReader reader1 = SegmentReader.get(sis.info(0));
|
SegmentReader reader1 = SegmentReader.get(false, sis.info(0), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
SegmentReader reader2 = SegmentReader.get(sis.info(1));
|
SegmentReader reader2 = SegmentReader.get(false, sis.info(1), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
readers[0] = reader1;
|
readers[0] = reader1;
|
||||||
readers[1] = reader2;
|
readers[1] = reader2;
|
||||||
assertTrue(reader1 != null);
|
assertTrue(reader1 != null);
|
||||||
|
|
|
@ -202,7 +202,7 @@ public class TestPayloads extends LuceneTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
// make sure we create more than one segment to test merging
|
// make sure we create more than one segment to test merging
|
||||||
writer.flush();
|
writer.commit();
|
||||||
|
|
||||||
// now we make sure to have different payload lengths next at the next skip point
|
// now we make sure to have different payload lengths next at the next skip point
|
||||||
for (int i = 0; i < numDocs; i++) {
|
for (int i = 0; i < numDocs; i++) {
|
||||||
|
|
|
@ -49,8 +49,8 @@ public class TestSegmentMerger extends LuceneTestCase {
|
||||||
SegmentInfo info1 = DocHelper.writeDoc(merge1Dir, doc1);
|
SegmentInfo info1 = DocHelper.writeDoc(merge1Dir, doc1);
|
||||||
DocHelper.setupDoc(doc2);
|
DocHelper.setupDoc(doc2);
|
||||||
SegmentInfo info2 = DocHelper.writeDoc(merge2Dir, doc2);
|
SegmentInfo info2 = DocHelper.writeDoc(merge2Dir, doc2);
|
||||||
reader1 = SegmentReader.get(info1);
|
reader1 = SegmentReader.get(true, info1, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
reader2 = SegmentReader.get(info2);
|
reader2 = SegmentReader.get(true, info2, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test() {
|
public void test() {
|
||||||
|
@ -69,7 +69,7 @@ public class TestSegmentMerger extends LuceneTestCase {
|
||||||
merger.closeReaders();
|
merger.closeReaders();
|
||||||
assertTrue(docsMerged == 2);
|
assertTrue(docsMerged == 2);
|
||||||
//Should be able to open a new SegmentReader against the new directory
|
//Should be able to open a new SegmentReader against the new directory
|
||||||
SegmentReader mergedReader = SegmentReader.get(new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, true));
|
SegmentReader mergedReader = SegmentReader.get(true, new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, true), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
assertTrue(mergedReader != null);
|
assertTrue(mergedReader != null);
|
||||||
assertTrue(mergedReader.numDocs() == 2);
|
assertTrue(mergedReader.numDocs() == 2);
|
||||||
Document newDoc1 = mergedReader.document(0);
|
Document newDoc1 = mergedReader.document(0);
|
||||||
|
|
|
@ -43,7 +43,7 @@ public class TestSegmentReader extends LuceneTestCase {
|
||||||
super.setUp();
|
super.setUp();
|
||||||
DocHelper.setupDoc(testDoc);
|
DocHelper.setupDoc(testDoc);
|
||||||
SegmentInfo info = DocHelper.writeDoc(dir, testDoc);
|
SegmentInfo info = DocHelper.writeDoc(dir, testDoc);
|
||||||
reader = SegmentReader.get(info);
|
reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void test() {
|
public void test() {
|
||||||
|
@ -73,7 +73,7 @@ public class TestSegmentReader extends LuceneTestCase {
|
||||||
Document docToDelete = new Document();
|
Document docToDelete = new Document();
|
||||||
DocHelper.setupDoc(docToDelete);
|
DocHelper.setupDoc(docToDelete);
|
||||||
SegmentInfo info = DocHelper.writeDoc(dir, docToDelete);
|
SegmentInfo info = DocHelper.writeDoc(dir, docToDelete);
|
||||||
SegmentReader deleteReader = SegmentReader.get(info);
|
SegmentReader deleteReader = SegmentReader.get(false, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
assertTrue(deleteReader != null);
|
assertTrue(deleteReader != null);
|
||||||
assertTrue(deleteReader.numDocs() == 1);
|
assertTrue(deleteReader.numDocs() == 1);
|
||||||
deleteReader.deleteDocument(0);
|
deleteReader.deleteDocument(0);
|
||||||
|
@ -167,14 +167,7 @@ public class TestSegmentReader extends LuceneTestCase {
|
||||||
// test for fake norms of 1.0 or null depending on the flag
|
// test for fake norms of 1.0 or null depending on the flag
|
||||||
byte [] norms = reader.norms(f.name());
|
byte [] norms = reader.norms(f.name());
|
||||||
byte norm1 = DefaultSimilarity.encodeNorm(1.0f);
|
byte norm1 = DefaultSimilarity.encodeNorm(1.0f);
|
||||||
if (reader.getDisableFakeNorms())
|
assertNull(norms);
|
||||||
assertNull(norms);
|
|
||||||
else {
|
|
||||||
assertEquals(norms.length,reader.maxDoc());
|
|
||||||
for (int j=0; j<reader.maxDoc(); j++) {
|
|
||||||
assertEquals(norms[j], norm1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
norms = new byte[reader.maxDoc()];
|
norms = new byte[reader.maxDoc()];
|
||||||
reader.norms(f.name(),norms, 0);
|
reader.norms(f.name(),norms, 0);
|
||||||
for (int j=0; j<reader.maxDoc(); j++) {
|
for (int j=0; j<reader.maxDoc(); j++) {
|
||||||
|
|
|
@ -111,7 +111,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
|
||||||
//terms
|
//terms
|
||||||
for(int j=0;j<5;j++)
|
for(int j=0;j<5;j++)
|
||||||
writer.addDocument(doc);
|
writer.addDocument(doc);
|
||||||
writer.flush();
|
writer.commit();
|
||||||
seg = writer.newestSegment().name;
|
seg = writer.newestSegment().name;
|
||||||
writer.close();
|
writer.close();
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue