LUCENE-1979: Remove remaining deprecations from index package.

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@825022 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Busch 2009-10-14 06:38:57 +00:00
parent f89460b246
commit 21ea806aa0
29 changed files with 66 additions and 208 deletions

View File

@ -5,6 +5,11 @@ $Id$
Changes in backwards compatibility policy
* LUCENE-1979: Change return type of SnapshotDeletionPolicy#snapshot()
from IndexCommitPoint to IndexCommit. Code that uses this method
needs to be recompiled against Lucene 3.0 in order to work. The previously
deprecated IndexCommitPoint is also removed. (Michael Busch)
Changes in runtime behavior
API Changes
@ -67,6 +72,9 @@ API Changes
* LUCENE-944: Remove deprecated methods in BooleanQuery. (Michael Busch)
* LUCENE-1979: Remove remaining deprecations from indexer package.
(Michael Busch)
Bug fixes
* LUCENE-1951: When the text provided to WildcardQuery has no wildcard

View File

@ -144,7 +144,7 @@ public class InstantiatedIndexReader extends IndexReader {
deletedDocuments.clear();
}
protected void doCommit() throws IOException {
protected void doCommit(Map commitUserData) throws IOException {
// todo: read/write lock
boolean updated = false;

View File

@ -71,8 +71,7 @@ public class TestEmptyIndex extends TestCase {
private void testNorms(IndexReader r) throws IOException {
byte[] norms;
norms = r.norms("foo");
if (!r.getDisableFakeNorms()) {
assertNotNull(norms);
if (norms != null) {
assertEquals(0, norms.length);
norms = new byte[10];
Arrays.fill(norms, (byte)10);

View File

@ -253,7 +253,7 @@ public class TestIndicesEquals extends TestCase {
byte[] aprioriNorms = aprioriReader.norms((String) field);
byte[] testNorms = testReader.norms((String) field);
if (!aprioriReader.getDisableFakeNorms()) {
if (aprioriNorms != null) {
assertEquals(aprioriNorms.length, testNorms.length);
for (int i = 0; i < aprioriNorms.length; i++) {

View File

@ -1152,7 +1152,7 @@ public class MemoryIndex implements Serializable {
throw new UnsupportedOperationException();
}
protected void doCommit() {
protected void doCommit(Map commitUserData) {
if (DEBUG) System.err.println("MemoryIndexReader.doCommit");
}

View File

@ -92,13 +92,7 @@ public class TestFieldNormModifier extends TestCase {
// sanity check, norms should all be 1
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
if (!r.getDisableFakeNorms()) {
for (int i = 0; i< norms.length; i++) {
assertEquals(""+i, DEFAULT_NORM, norms[i]);
}
} else {
assertNull(norms);
}
assertNull(norms);
r.close();
@ -114,13 +108,7 @@ public class TestFieldNormModifier extends TestCase {
norms = r.norms("nonorm");
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
if (!r.getDisableFakeNorms()) {
for (int i = 0; i< norms.length; i++) {
assertEquals(""+i, DEFAULT_NORM, norms[i]);
}
} else {
assertNull(norms);
}
assertNull(norms);
r.close();
}

View File

@ -98,13 +98,7 @@ public class TestLengthNormModifier extends TestCase {
// sanity check, norms should all be 1
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
if (!r.getDisableFakeNorms()) {
for (int i = 0; i< norms.length; i++) {
assertEquals(""+i, DEFAULT_NORM, norms[i]);
}
} else {
assertNull(norms);
}
assertNull(norms);
r.close();
@ -120,13 +114,7 @@ public class TestLengthNormModifier extends TestCase {
norms = r.norms("nonorm");
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
if (!r.getDisableFakeNorms()) {
for (int i = 0; i< norms.length; i++) {
assertEquals(""+i, DEFAULT_NORM, norms[i]);
}
} else {
assertNull(norms);
}
assertNull(norms);
r.close();

View File

@ -94,7 +94,7 @@ public class TestDistance extends TestCase{
addPoint(writer,"Iota Club and Cafe",38.8890000,-77.0923000);
addPoint(writer,"Hilton Washington Embassy Row",38.9103000,-77.0451000);
addPoint(writer,"HorseFeathers, Bar & Grill", 39.01220000000001, -77.3942);
writer.flush();
writer.commit();
}
public void testLatLongFilterOnDeletedDocs() throws Exception {

View File

@ -289,23 +289,6 @@ public class CheckIndex {
}
}
/** Returns true if index is clean, else false.
* @deprecated Please instantiate a CheckIndex and then use {@link #checkIndex()} instead */
public static boolean check(Directory dir, boolean doFix) throws IOException {
return check(dir, doFix, null);
}
/** Returns true if index is clean, else false.
* @deprecated Please instantiate a CheckIndex and then use {@link #checkIndex(List)} instead */
public static boolean check(Directory dir, boolean doFix, List onlySegments) throws IOException {
CheckIndex checker = new CheckIndex(dir);
Status status = checker.checkIndex(onlySegments);
if (doFix && !status.clean)
checker.fixIndex(status);
return status.clean;
}
/** Returns a {@link Status} instance detailing
* the state of the index.
*
@ -488,7 +471,7 @@ public class CheckIndex {
}
if (infoStream != null)
infoStream.print(" test: open reader.........");
reader = SegmentReader.get(info);
reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
segInfoStat.openReaderPassed = true;

View File

@ -17,25 +17,25 @@ package org.apache.lucene.index;
* limitations under the License.
*/
import java.io.IOException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.Collections;
import java.util.ArrayList;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.AlreadyClosedException;
/**
* An IndexReader which reads indexes with multiple segments.
@ -380,7 +380,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
// the future we could have write make some effort to
// detect that no changes have occurred
IndexReader reader = writer.getReader();
reader.setDisableFakeNorms(getDisableFakeNorms());
return reader;
}
@ -436,7 +435,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
} else {
reader = new DirectoryReader(directory, infos, subReaders, starts, normsCache, false, doClone, termInfosIndexDivisor);
}
reader.setDisableFakeNorms(getDisableFakeNorms());
return reader;
}
@ -564,18 +562,13 @@ class DirectoryReader extends IndexReader implements Cloneable {
}
private byte[] ones;
private byte[] fakeNorms() {
if (ones==null) ones=SegmentReader.createFakeNorms(maxDoc());
return ones;
}
public synchronized byte[] norms(String field) throws IOException {
ensureOpen();
byte[] bytes = (byte[])normsCache.get(field);
if (bytes != null)
return bytes; // cache hit
if (!hasNorms(field))
return getDisableFakeNorms() ? null : fakeNorms();
return null;
bytes = new byte[maxDoc()];
for (int i = 0; i < subReaders.length; i++)
@ -679,11 +672,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
}
}
/** @deprecated */
protected void doCommit() throws IOException {
doCommit(null);
}
/**
* Commit changes resulting from delete, undeleteAll, or setNorm operations
* <p/>
@ -832,12 +820,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
return subReaders;
}
public void setDisableFakeNorms(boolean disableFakeNorms) {
super.setDisableFakeNorms(disableFakeNorms);
for (int i = 0; i < subReaders.length; i++)
subReaders[i].setDisableFakeNorms(disableFakeNorms);
}
/** Returns the directory this index resides in. */
public Directory directory() {
// Don't ensureOpen here -- in certain cases, when a

View File

@ -41,7 +41,7 @@ import org.apache.lucene.store.Directory;
* may suddenly change. </p>
*/
public abstract class IndexCommit implements IndexCommitPoint {
public abstract class IndexCommit {
/**
* Get the segments file (<code>segments_N</code>) associated

View File

@ -980,18 +980,9 @@ public abstract class IndexReader implements Cloneable {
hasChanges = false;
}
/** Implements commit.
* @deprecated Please implement {@link #doCommit(Map)
* instead}. */
protected abstract void doCommit() throws IOException;
/** Implements commit. NOTE: subclasses should override
* this. In 3.0 this will become an abstract method. */
void doCommit(Map commitUserData) throws IOException {
// Default impl discards commitUserData; all Lucene
// subclasses override this (do not discard it).
doCommit();
}
protected abstract void doCommit(Map commitUserData) throws IOException;
/**
* Closes files associated with this index.
@ -1145,8 +1136,7 @@ public abstract class IndexReader implements Cloneable {
return null;
}
/** Expert
* @deprecated */
/** Expert */
public Object getFieldCacheKey() {
return this;
}
@ -1166,26 +1156,4 @@ public abstract class IndexReader implements Cloneable {
public long getUniqueTermCount() throws IOException {
throw new UnsupportedOperationException("this reader does not implement getUniqueTermCount()");
}
/** Expert: Return the state of the flag that disables fakes norms in favor of representing the absence of field norms with null.
* @return true if fake norms are disabled
* @deprecated This currently defaults to false (to remain
* back-compatible), but in 3.0 it will be hardwired to
* true, meaning the norms() methods will return null for
* fields that had disabled norms.
*/
public boolean getDisableFakeNorms() {
return disableFakeNorms;
}
/** Expert: Set the state of the flag that disables fakes norms in favor of representing the absence of field norms with null.
* @param disableFakeNorms true to disable fake norms, false to preserve the legacy behavior
* @deprecated This currently defaults to false (to remain
* back-compatible), but in 3.0 it will be hardwired to
* true, meaning the norms() methods will return null for
* fields that had disabled norms.
*/
public void setDisableFakeNorms(boolean disableFakeNorms) {
this.disableFakeNorms = disableFakeNorms;
}
}

View File

@ -611,7 +611,7 @@ public class IndexWriter {
// TODO: we may want to avoid doing this while
// synchronized
// Returns a ref, which we xfer to readerMap:
sr = SegmentReader.get(info, readBufferSize, doOpenStores, termsIndexDivisor);
sr = SegmentReader.get(false, info.dir, info, readBufferSize, doOpenStores, termsIndexDivisor);
readerMap.put(info, sr);
} else {
if (doOpenStores) {
@ -3410,30 +3410,6 @@ public class IndexWriter {
throws IOException {
}
/**
* Flush all in-memory buffered updates (adds and deletes)
* to the Directory.
* <p>Note: while this will force buffered docs to be
* pushed into the index, it will not make these docs
* visible to a reader. Use {@link #commit()} instead
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*
* @deprecated please call {@link #commit()}) instead
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public final void flush() throws CorruptIndexException, IOException {
if (hitOOM) {
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot flush");
}
flush(true, false, true);
}
/** Expert: prepare for commit.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError

View File

@ -179,7 +179,6 @@ public class MultiReader extends IndexReader implements Cloneable {
}
MultiReader mr = new MultiReader(newSubReaders);
mr.decrefOnClose = newDecrefOnClose;
mr.setDisableFakeNorms(getDisableFakeNorms());
return mr;
} else {
return this;
@ -289,7 +288,7 @@ public class MultiReader extends IndexReader implements Cloneable {
if (bytes != null)
return bytes; // cache hit
if (!hasNorms(field))
return getDisableFakeNorms() ? null : fakeNorms();
return null;
bytes = new byte[maxDoc()];
for (int i = 0; i < subReaders.length; i++)

View File

@ -582,15 +582,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
}
}
/**
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @deprecated
*/
public static SegmentReader get(SegmentInfo si) throws CorruptIndexException, IOException {
return get(false, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
}
/**
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
@ -599,15 +590,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
return get(readOnly, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true, termInfosIndexDivisor);
}
/**
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @deprecated
*/
static SegmentReader get(SegmentInfo si, int readBufferSize, boolean doOpenStores, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
return get(false, si.dir, si, readBufferSize, doOpenStores, termInfosIndexDivisor);
}
/**
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
@ -780,7 +762,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
}
}
clone.setDisableFakeNorms(getDisableFakeNorms());
clone.norms = new HashMap();
// Clone norms
@ -1055,11 +1036,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
}
private byte[] ones;
private byte[] fakeNorms() {
assert !getDisableFakeNorms();
if (ones==null) ones=createFakeNorms(maxDoc());
return ones;
}
// can return null if norms aren't stored
protected synchronized byte[] getNorms(String field) throws IOException {
@ -1072,7 +1048,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
public synchronized byte[] norms(String field) throws IOException {
ensureOpen();
byte[] bytes = getNorms(field);
if (bytes==null && !getDisableFakeNorms()) bytes=fakeNorms();
return bytes;
}

View File

@ -71,8 +71,7 @@ public class SnapshotDeletionPolicy implements IndexDeletionPolicy {
* you call optimize()) then in the worst case this could
* consume an extra 1X of your total index size, until
* you release the snapshot. */
// TODO 3.0: change this to return IndexCommit instead
public synchronized IndexCommitPoint snapshot() {
public synchronized IndexCommit snapshot() {
if (snapshot == null)
snapshot = lastCommit.getSegmentsFileName();
else

View File

@ -238,7 +238,7 @@ class DocHelper {
writer.setSimilarity(similarity);
//writer.setUseCompoundFile(false);
writer.addDocument(doc);
writer.flush();
writer.commit();
SegmentInfo info = writer.newestSegment();
writer.close();
return info;

View File

@ -88,7 +88,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
writer.addDocument(doc);
failure.setDoFail();
try {
writer.flush();
writer.flush(true, false, true);
if (failure.hitExc) {
fail("failed to hit IOException");
}
@ -140,7 +140,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
delID += 10;
}
writer.flush();
writer.commit();
}
writer.close();
@ -210,7 +210,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
// stress out aborting them on close:
writer.setMergeFactor(3);
writer.addDocument(doc);
writer.flush();
writer.commit();
writer.close(false);

View File

@ -169,15 +169,15 @@ public class TestDoc extends LuceneTestCase {
Document doc = new Document();
doc.add(new Field("contents", new FileReader(file)));
writer.addDocument(doc);
writer.flush();
writer.commit();
return writer.newestSegment();
}
private SegmentInfo merge(SegmentInfo si1, SegmentInfo si2, String merged, boolean useCompoundFile)
throws Exception {
SegmentReader r1 = SegmentReader.get(si1);
SegmentReader r2 = SegmentReader.get(si2);
SegmentReader r1 = SegmentReader.get(true, si1, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
SegmentReader r2 = SegmentReader.get(true, si2, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
SegmentMerger merger = new SegmentMerger(si1.dir, merged);
@ -198,7 +198,7 @@ public class TestDoc extends LuceneTestCase {
private void printSegment(PrintWriter out, SegmentInfo si)
throws Exception {
SegmentReader reader = SegmentReader.get(si);
SegmentReader reader = SegmentReader.get(true, si, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
for (int i = 0; i < reader.numDocs(); i++)
out.println(reader.document(i));

View File

@ -63,11 +63,11 @@ public class TestDocumentWriter extends BaseTokenStreamTestCase {
Analyzer analyzer = new WhitespaceAnalyzer();
IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
writer.addDocument(testDoc);
writer.flush();
writer.commit();
SegmentInfo info = writer.newestSegment();
writer.close();
//After adding the document, we should be able to read it back in
SegmentReader reader = SegmentReader.get(info);
SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
assertTrue(reader != null);
Document doc = reader.document(0);
assertTrue(doc != null);
@ -123,10 +123,10 @@ public class TestDocumentWriter extends BaseTokenStreamTestCase {
doc.add(new Field("repeated", "repeated two", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.flush();
writer.commit();
SegmentInfo info = writer.newestSegment();
writer.close();
SegmentReader reader = SegmentReader.get(info);
SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
TermPositions termPositions = reader.termPositions(new Term("repeated", "repeated"));
assertTrue(termPositions.next());
@ -183,10 +183,10 @@ public class TestDocumentWriter extends BaseTokenStreamTestCase {
doc.add(new Field("f1", "a 5 a a", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.flush();
writer.commit();
SegmentInfo info = writer.newestSegment();
writer.close();
SegmentReader reader = SegmentReader.get(info);
SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
TermPositions termPositions = reader.termPositions(new Term("f1", "a"));
assertTrue(termPositions.next());
@ -223,10 +223,10 @@ public class TestDocumentWriter extends BaseTokenStreamTestCase {
}, TermVector.NO));
writer.addDocument(doc);
writer.flush();
writer.commit();
SegmentInfo info = writer.newestSegment();
writer.close();
SegmentReader reader = SegmentReader.get(info);
SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
TermPositions termPositions = reader.termPositions(new Term("preanalyzed", "term1"));
assertTrue(termPositions.next());

View File

@ -935,7 +935,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
for (int i = 0; i < 100; i++) {
w.addDocument(createDocument(i, 4));
if (multiSegment && (i % 10) == 0) {
w.flush();
w.commit();
}
}

View File

@ -1133,7 +1133,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
public void testIndexNoDocuments() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.flush();
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
@ -1142,7 +1142,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
reader.close();
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
writer.flush();
writer.commit();
writer.close();
reader = IndexReader.open(dir, true);
@ -1503,7 +1503,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.flush();
writer.commit();
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
@ -1595,13 +1595,13 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
iw.flush();
iw.commit();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
iw.flush();
iw.commit();
iw.optimize();
iw.close();
@ -1616,14 +1616,14 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
iw.flush();
iw.commit();
document = new Document();
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
iw.flush();
iw.commit();
iw.optimize();
@ -1631,7 +1631,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
iw.flush();
iw.commit();
iw.optimize();
iw.close();

View File

@ -152,7 +152,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
addDoc(modifier, ++id, value);
assertEquals(0, modifier.getSegmentCount());
modifier.flush();
modifier.commit();
modifier.commit();

View File

@ -52,7 +52,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase {
d1.add(new Field(term.field(), term.text(), Store.NO, Index.ANALYZED));
writer.addDocument(d1);
}
writer.flush();
writer.commit();
writer.optimize();
writer.close();

View File

@ -28,8 +28,8 @@ public class TestMultiReader extends TestDirectoryReader {
IndexReader reader;
sis.read(dir);
SegmentReader reader1 = SegmentReader.get(sis.info(0));
SegmentReader reader2 = SegmentReader.get(sis.info(1));
SegmentReader reader1 = SegmentReader.get(false, sis.info(0), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
SegmentReader reader2 = SegmentReader.get(false, sis.info(1), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
readers[0] = reader1;
readers[1] = reader2;
assertTrue(reader1 != null);

View File

@ -202,7 +202,7 @@ public class TestPayloads extends LuceneTestCase {
}
// make sure we create more than one segment to test merging
writer.flush();
writer.commit();
// now we make sure to have different payload lengths next at the next skip point
for (int i = 0; i < numDocs; i++) {

View File

@ -49,8 +49,8 @@ public class TestSegmentMerger extends LuceneTestCase {
SegmentInfo info1 = DocHelper.writeDoc(merge1Dir, doc1);
DocHelper.setupDoc(doc2);
SegmentInfo info2 = DocHelper.writeDoc(merge2Dir, doc2);
reader1 = SegmentReader.get(info1);
reader2 = SegmentReader.get(info2);
reader1 = SegmentReader.get(true, info1, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
reader2 = SegmentReader.get(true, info2, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
}
public void test() {
@ -69,7 +69,7 @@ public class TestSegmentMerger extends LuceneTestCase {
merger.closeReaders();
assertTrue(docsMerged == 2);
//Should be able to open a new SegmentReader against the new directory
SegmentReader mergedReader = SegmentReader.get(new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, true));
SegmentReader mergedReader = SegmentReader.get(true, new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, true), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
assertTrue(mergedReader != null);
assertTrue(mergedReader.numDocs() == 2);
Document newDoc1 = mergedReader.document(0);

View File

@ -43,7 +43,7 @@ public class TestSegmentReader extends LuceneTestCase {
super.setUp();
DocHelper.setupDoc(testDoc);
SegmentInfo info = DocHelper.writeDoc(dir, testDoc);
reader = SegmentReader.get(info);
reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
}
public void test() {
@ -73,7 +73,7 @@ public class TestSegmentReader extends LuceneTestCase {
Document docToDelete = new Document();
DocHelper.setupDoc(docToDelete);
SegmentInfo info = DocHelper.writeDoc(dir, docToDelete);
SegmentReader deleteReader = SegmentReader.get(info);
SegmentReader deleteReader = SegmentReader.get(false, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
assertTrue(deleteReader != null);
assertTrue(deleteReader.numDocs() == 1);
deleteReader.deleteDocument(0);
@ -167,14 +167,7 @@ public class TestSegmentReader extends LuceneTestCase {
// test for fake norms of 1.0 or null depending on the flag
byte [] norms = reader.norms(f.name());
byte norm1 = DefaultSimilarity.encodeNorm(1.0f);
if (reader.getDisableFakeNorms())
assertNull(norms);
else {
assertEquals(norms.length,reader.maxDoc());
for (int j=0; j<reader.maxDoc(); j++) {
assertEquals(norms[j], norm1);
}
}
assertNull(norms);
norms = new byte[reader.maxDoc()];
reader.norms(f.name(),norms, 0);
for (int j=0; j<reader.maxDoc(); j++) {

View File

@ -111,7 +111,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
//terms
for(int j=0;j<5;j++)
writer.addDocument(doc);
writer.flush();
writer.commit();
seg = writer.newestSegment().name;
writer.close();