From 21ea806aa0cf8631a2895e713c86a69ac3244e39 Mon Sep 17 00:00:00 2001
From: Michael Busch
segments_N
) associated
diff --git a/src/java/org/apache/lucene/index/IndexReader.java b/src/java/org/apache/lucene/index/IndexReader.java
index f70f1423f60..4e0fbd1ceb6 100644
--- a/src/java/org/apache/lucene/index/IndexReader.java
+++ b/src/java/org/apache/lucene/index/IndexReader.java
@@ -980,18 +980,9 @@ public abstract class IndexReader implements Cloneable {
hasChanges = false;
}
- /** Implements commit.
- * @deprecated Please implement {@link #doCommit(Map)
- * instead}. */
- protected abstract void doCommit() throws IOException;
-
/** Implements commit. NOTE: subclasses should override
* this. In 3.0 this will become an abstract method. */
- void doCommit(Map commitUserData) throws IOException {
- // Default impl discards commitUserData; all Lucene
- // subclasses override this (do not discard it).
- doCommit();
- }
+ protected abstract void doCommit(Map commitUserData) throws IOException;
/**
* Closes files associated with this index.
@@ -1145,8 +1136,7 @@ public abstract class IndexReader implements Cloneable {
return null;
}
- /** Expert
- * @deprecated */
+ /** Expert */
public Object getFieldCacheKey() {
return this;
}
@@ -1166,26 +1156,4 @@ public abstract class IndexReader implements Cloneable {
public long getUniqueTermCount() throws IOException {
throw new UnsupportedOperationException("this reader does not implement getUniqueTermCount()");
}
-
- /** Expert: Return the state of the flag that disables fakes norms in favor of representing the absence of field norms with null.
- * @return true if fake norms are disabled
- * @deprecated This currently defaults to false (to remain
- * back-compatible), but in 3.0 it will be hardwired to
- * true, meaning the norms() methods will return null for
- * fields that had disabled norms.
- */
- public boolean getDisableFakeNorms() {
- return disableFakeNorms;
- }
-
- /** Expert: Set the state of the flag that disables fakes norms in favor of representing the absence of field norms with null.
- * @param disableFakeNorms true to disable fake norms, false to preserve the legacy behavior
- * @deprecated This currently defaults to false (to remain
- * back-compatible), but in 3.0 it will be hardwired to
- * true, meaning the norms() methods will return null for
- * fields that had disabled norms.
- */
- public void setDisableFakeNorms(boolean disableFakeNorms) {
- this.disableFakeNorms = disableFakeNorms;
- }
}
diff --git a/src/java/org/apache/lucene/index/IndexWriter.java b/src/java/org/apache/lucene/index/IndexWriter.java
index 5be27e3a841..836bee80438 100644
--- a/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/src/java/org/apache/lucene/index/IndexWriter.java
@@ -611,7 +611,7 @@ public class IndexWriter {
// TODO: we may want to avoid doing this while
// synchronized
// Returns a ref, which we xfer to readerMap:
- sr = SegmentReader.get(info, readBufferSize, doOpenStores, termsIndexDivisor);
+ sr = SegmentReader.get(false, info.dir, info, readBufferSize, doOpenStores, termsIndexDivisor);
readerMap.put(info, sr);
} else {
if (doOpenStores) {
@@ -3410,30 +3410,6 @@ public class IndexWriter {
throws IOException {
}
- /**
- * Flush all in-memory buffered updates (adds and deletes)
- * to the Directory.
- * Note: while this will force buffered docs to be - * pushed into the index, it will not make these docs - * visible to a reader. Use {@link #commit()} instead - * - *
NOTE: if this method hits an OutOfMemoryError - * you should immediately close the writer. See above for details.
- * - * @deprecated please call {@link #commit()}) instead - * - * @throws CorruptIndexException if the index is corrupt - * @throws IOException if there is a low-level IO error - */ - public final void flush() throws CorruptIndexException, IOException { - if (hitOOM) { - throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot flush"); - } - - flush(true, false, true); - } - /** Expert: prepare for commit. * *NOTE: if this method hits an OutOfMemoryError
diff --git a/src/java/org/apache/lucene/index/MultiReader.java b/src/java/org/apache/lucene/index/MultiReader.java
index cb2f412d20a..82f9ff6aacc 100644
--- a/src/java/org/apache/lucene/index/MultiReader.java
+++ b/src/java/org/apache/lucene/index/MultiReader.java
@@ -179,7 +179,6 @@ public class MultiReader extends IndexReader implements Cloneable {
}
MultiReader mr = new MultiReader(newSubReaders);
mr.decrefOnClose = newDecrefOnClose;
- mr.setDisableFakeNorms(getDisableFakeNorms());
return mr;
} else {
return this;
@@ -289,7 +288,7 @@ public class MultiReader extends IndexReader implements Cloneable {
if (bytes != null)
return bytes; // cache hit
if (!hasNorms(field))
- return getDisableFakeNorms() ? null : fakeNorms();
+ return null;
bytes = new byte[maxDoc()];
for (int i = 0; i < subReaders.length; i++)
diff --git a/src/java/org/apache/lucene/index/SegmentReader.java b/src/java/org/apache/lucene/index/SegmentReader.java
index ebec2644f6e..54bd076221c 100644
--- a/src/java/org/apache/lucene/index/SegmentReader.java
+++ b/src/java/org/apache/lucene/index/SegmentReader.java
@@ -582,15 +582,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
}
}
- /**
- * @throws CorruptIndexException if the index is corrupt
- * @throws IOException if there is a low-level IO error
- * @deprecated
- */
- public static SegmentReader get(SegmentInfo si) throws CorruptIndexException, IOException {
- return get(false, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
- }
-
/**
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
@@ -599,15 +590,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
return get(readOnly, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true, termInfosIndexDivisor);
}
- /**
- * @throws CorruptIndexException if the index is corrupt
- * @throws IOException if there is a low-level IO error
- * @deprecated
- */
- static SegmentReader get(SegmentInfo si, int readBufferSize, boolean doOpenStores, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
- return get(false, si.dir, si, readBufferSize, doOpenStores, termInfosIndexDivisor);
- }
-
/**
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
@@ -780,7 +762,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
}
}
- clone.setDisableFakeNorms(getDisableFakeNorms());
clone.norms = new HashMap();
// Clone norms
@@ -1055,11 +1036,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
}
private byte[] ones;
- private byte[] fakeNorms() {
- assert !getDisableFakeNorms();
- if (ones==null) ones=createFakeNorms(maxDoc());
- return ones;
- }
// can return null if norms aren't stored
protected synchronized byte[] getNorms(String field) throws IOException {
@@ -1072,7 +1048,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
public synchronized byte[] norms(String field) throws IOException {
ensureOpen();
byte[] bytes = getNorms(field);
- if (bytes==null && !getDisableFakeNorms()) bytes=fakeNorms();
return bytes;
}
diff --git a/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java b/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java
index 20fb5e0f883..1a24fd3f233 100644
--- a/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java
+++ b/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java
@@ -71,8 +71,7 @@ public class SnapshotDeletionPolicy implements IndexDeletionPolicy {
* you call optimize()) then in the worst case this could
* consume an extra 1X of your total index size, until
* you release the snapshot. */
- // TODO 3.0: change this to return IndexCommit instead
- public synchronized IndexCommitPoint snapshot() {
+ public synchronized IndexCommit snapshot() {
if (snapshot == null)
snapshot = lastCommit.getSegmentsFileName();
else
diff --git a/src/test/org/apache/lucene/index/DocHelper.java b/src/test/org/apache/lucene/index/DocHelper.java
index e9a899e58ad..5f72b9e1fba 100644
--- a/src/test/org/apache/lucene/index/DocHelper.java
+++ b/src/test/org/apache/lucene/index/DocHelper.java
@@ -238,7 +238,7 @@ class DocHelper {
writer.setSimilarity(similarity);
//writer.setUseCompoundFile(false);
writer.addDocument(doc);
- writer.flush();
+ writer.commit();
SegmentInfo info = writer.newestSegment();
writer.close();
return info;
diff --git a/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java b/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
index f8c2d11a12f..26827462433 100644
--- a/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
+++ b/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
@@ -88,7 +88,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
writer.addDocument(doc);
failure.setDoFail();
try {
- writer.flush();
+ writer.flush(true, false, true);
if (failure.hitExc) {
fail("failed to hit IOException");
}
@@ -140,7 +140,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
delID += 10;
}
- writer.flush();
+ writer.commit();
}
writer.close();
@@ -210,7 +210,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
// stress out aborting them on close:
writer.setMergeFactor(3);
writer.addDocument(doc);
- writer.flush();
+ writer.commit();
writer.close(false);
diff --git a/src/test/org/apache/lucene/index/TestDoc.java b/src/test/org/apache/lucene/index/TestDoc.java
index 01b8d56abf5..048fdaa88c1 100644
--- a/src/test/org/apache/lucene/index/TestDoc.java
+++ b/src/test/org/apache/lucene/index/TestDoc.java
@@ -169,15 +169,15 @@ public class TestDoc extends LuceneTestCase {
Document doc = new Document();
doc.add(new Field("contents", new FileReader(file)));
writer.addDocument(doc);
- writer.flush();
+ writer.commit();
return writer.newestSegment();
}
private SegmentInfo merge(SegmentInfo si1, SegmentInfo si2, String merged, boolean useCompoundFile)
throws Exception {
- SegmentReader r1 = SegmentReader.get(si1);
- SegmentReader r2 = SegmentReader.get(si2);
+ SegmentReader r1 = SegmentReader.get(true, si1, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+ SegmentReader r2 = SegmentReader.get(true, si2, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
SegmentMerger merger = new SegmentMerger(si1.dir, merged);
@@ -198,7 +198,7 @@ public class TestDoc extends LuceneTestCase {
private void printSegment(PrintWriter out, SegmentInfo si)
throws Exception {
- SegmentReader reader = SegmentReader.get(si);
+ SegmentReader reader = SegmentReader.get(true, si, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
for (int i = 0; i < reader.numDocs(); i++)
out.println(reader.document(i));
diff --git a/src/test/org/apache/lucene/index/TestDocumentWriter.java b/src/test/org/apache/lucene/index/TestDocumentWriter.java
index a41e385f050..43fd5bcfb33 100644
--- a/src/test/org/apache/lucene/index/TestDocumentWriter.java
+++ b/src/test/org/apache/lucene/index/TestDocumentWriter.java
@@ -63,11 +63,11 @@ public class TestDocumentWriter extends BaseTokenStreamTestCase {
Analyzer analyzer = new WhitespaceAnalyzer();
IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
writer.addDocument(testDoc);
- writer.flush();
+ writer.commit();
SegmentInfo info = writer.newestSegment();
writer.close();
//After adding the document, we should be able to read it back in
- SegmentReader reader = SegmentReader.get(info);
+ SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
assertTrue(reader != null);
Document doc = reader.document(0);
assertTrue(doc != null);
@@ -123,10 +123,10 @@ public class TestDocumentWriter extends BaseTokenStreamTestCase {
doc.add(new Field("repeated", "repeated two", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
- writer.flush();
+ writer.commit();
SegmentInfo info = writer.newestSegment();
writer.close();
- SegmentReader reader = SegmentReader.get(info);
+ SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
TermPositions termPositions = reader.termPositions(new Term("repeated", "repeated"));
assertTrue(termPositions.next());
@@ -183,10 +183,10 @@ public class TestDocumentWriter extends BaseTokenStreamTestCase {
doc.add(new Field("f1", "a 5 a a", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
- writer.flush();
+ writer.commit();
SegmentInfo info = writer.newestSegment();
writer.close();
- SegmentReader reader = SegmentReader.get(info);
+ SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
TermPositions termPositions = reader.termPositions(new Term("f1", "a"));
assertTrue(termPositions.next());
@@ -223,10 +223,10 @@ public class TestDocumentWriter extends BaseTokenStreamTestCase {
}, TermVector.NO));
writer.addDocument(doc);
- writer.flush();
+ writer.commit();
SegmentInfo info = writer.newestSegment();
writer.close();
- SegmentReader reader = SegmentReader.get(info);
+ SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
TermPositions termPositions = reader.termPositions(new Term("preanalyzed", "term1"));
assertTrue(termPositions.next());
diff --git a/src/test/org/apache/lucene/index/TestIndexReaderReopen.java b/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
index 17f77e10279..d332fa28748 100644
--- a/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
+++ b/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
@@ -935,7 +935,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
for (int i = 0; i < 100; i++) {
w.addDocument(createDocument(i, 4));
if (multiSegment && (i % 10) == 0) {
- w.flush();
+ w.commit();
}
}
diff --git a/src/test/org/apache/lucene/index/TestIndexWriter.java b/src/test/org/apache/lucene/index/TestIndexWriter.java
index ad7167d1fdc..450d8c95f9c 100644
--- a/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -1133,7 +1133,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
public void testIndexNoDocuments() throws IOException {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
- writer.flush();
+ writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir, true);
@@ -1142,7 +1142,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
reader.close();
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
- writer.flush();
+ writer.commit();
writer.close();
reader = IndexReader.open(dir, true);
@@ -1503,7 +1503,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
- writer.flush();
+ writer.commit();
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
@@ -1595,13 +1595,13 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
- iw.flush();
+ iw.commit();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
- iw.flush();
+ iw.commit();
iw.optimize();
iw.close();
@@ -1616,14 +1616,14 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
- iw.flush();
+ iw.commit();
document = new Document();
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
- iw.flush();
+ iw.commit();
iw.optimize();
@@ -1631,7 +1631,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
- iw.flush();
+ iw.commit();
iw.optimize();
iw.close();
diff --git a/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
index 9fb5795b404..9ba8e7b4c7a 100644
--- a/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
+++ b/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
@@ -152,7 +152,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
addDoc(modifier, ++id, value);
assertEquals(0, modifier.getSegmentCount());
- modifier.flush();
+ modifier.commit();
modifier.commit();
diff --git a/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java b/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
index d341c518484..9d7ae650e76 100644
--- a/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
+++ b/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
@@ -52,7 +52,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase {
d1.add(new Field(term.field(), term.text(), Store.NO, Index.ANALYZED));
writer.addDocument(d1);
}
- writer.flush();
+ writer.commit();
writer.optimize();
writer.close();
diff --git a/src/test/org/apache/lucene/index/TestMultiReader.java b/src/test/org/apache/lucene/index/TestMultiReader.java
index 335aa2ec620..1180a452032 100644
--- a/src/test/org/apache/lucene/index/TestMultiReader.java
+++ b/src/test/org/apache/lucene/index/TestMultiReader.java
@@ -28,8 +28,8 @@ public class TestMultiReader extends TestDirectoryReader {
IndexReader reader;
sis.read(dir);
- SegmentReader reader1 = SegmentReader.get(sis.info(0));
- SegmentReader reader2 = SegmentReader.get(sis.info(1));
+ SegmentReader reader1 = SegmentReader.get(false, sis.info(0), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+ SegmentReader reader2 = SegmentReader.get(false, sis.info(1), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
readers[0] = reader1;
readers[1] = reader2;
assertTrue(reader1 != null);
diff --git a/src/test/org/apache/lucene/index/TestPayloads.java b/src/test/org/apache/lucene/index/TestPayloads.java
index 913748580b7..4844d95ff50 100644
--- a/src/test/org/apache/lucene/index/TestPayloads.java
+++ b/src/test/org/apache/lucene/index/TestPayloads.java
@@ -202,7 +202,7 @@ public class TestPayloads extends LuceneTestCase {
}
// make sure we create more than one segment to test merging
- writer.flush();
+ writer.commit();
// now we make sure to have different payload lengths next at the next skip point
for (int i = 0; i < numDocs; i++) {
diff --git a/src/test/org/apache/lucene/index/TestSegmentMerger.java b/src/test/org/apache/lucene/index/TestSegmentMerger.java
index 9f19d1d60fa..4cefa1d4b24 100644
--- a/src/test/org/apache/lucene/index/TestSegmentMerger.java
+++ b/src/test/org/apache/lucene/index/TestSegmentMerger.java
@@ -49,8 +49,8 @@ public class TestSegmentMerger extends LuceneTestCase {
SegmentInfo info1 = DocHelper.writeDoc(merge1Dir, doc1);
DocHelper.setupDoc(doc2);
SegmentInfo info2 = DocHelper.writeDoc(merge2Dir, doc2);
- reader1 = SegmentReader.get(info1);
- reader2 = SegmentReader.get(info2);
+ reader1 = SegmentReader.get(true, info1, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+ reader2 = SegmentReader.get(true, info2, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
}
public void test() {
@@ -69,7 +69,7 @@ public class TestSegmentMerger extends LuceneTestCase {
merger.closeReaders();
assertTrue(docsMerged == 2);
//Should be able to open a new SegmentReader against the new directory
- SegmentReader mergedReader = SegmentReader.get(new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, true));
+ SegmentReader mergedReader = SegmentReader.get(true, new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, true), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
assertTrue(mergedReader != null);
assertTrue(mergedReader.numDocs() == 2);
Document newDoc1 = mergedReader.document(0);
diff --git a/src/test/org/apache/lucene/index/TestSegmentReader.java b/src/test/org/apache/lucene/index/TestSegmentReader.java
index 358114cdcd7..ae788563820 100644
--- a/src/test/org/apache/lucene/index/TestSegmentReader.java
+++ b/src/test/org/apache/lucene/index/TestSegmentReader.java
@@ -43,7 +43,7 @@ public class TestSegmentReader extends LuceneTestCase {
super.setUp();
DocHelper.setupDoc(testDoc);
SegmentInfo info = DocHelper.writeDoc(dir, testDoc);
- reader = SegmentReader.get(info);
+ reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
}
public void test() {
@@ -73,7 +73,7 @@ public class TestSegmentReader extends LuceneTestCase {
Document docToDelete = new Document();
DocHelper.setupDoc(docToDelete);
SegmentInfo info = DocHelper.writeDoc(dir, docToDelete);
- SegmentReader deleteReader = SegmentReader.get(info);
+ SegmentReader deleteReader = SegmentReader.get(false, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
assertTrue(deleteReader != null);
assertTrue(deleteReader.numDocs() == 1);
deleteReader.deleteDocument(0);
@@ -167,14 +167,7 @@ public class TestSegmentReader extends LuceneTestCase {
// test for fake norms of 1.0 or null depending on the flag
byte [] norms = reader.norms(f.name());
byte norm1 = DefaultSimilarity.encodeNorm(1.0f);
- if (reader.getDisableFakeNorms())
- assertNull(norms);
- else {
- assertEquals(norms.length,reader.maxDoc());
- for (int j=0; j