mirror of https://github.com/apache/lucene.git
LUCENE-4055: thread FIS/SI to stored fields writer
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene4055@1341665 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
c2047945cb
commit
4b00880436
|
@ -71,7 +71,7 @@ public class BlockTermsWriter extends FieldsConsumer {
|
|||
public BlockTermsWriter(TermsIndexWriterBase termsIndexWriter,
|
||||
SegmentWriteState state, PostingsWriterBase postingsWriter)
|
||||
throws IOException {
|
||||
final String termsFileName = IndexFileNames.segmentFileName(state.segmentName, state.segmentSuffix, TERMS_EXTENSION);
|
||||
final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_EXTENSION);
|
||||
this.termsIndexWriter = termsIndexWriter;
|
||||
out = state.directory.createOutput(termsFileName, state.context);
|
||||
boolean success = false;
|
||||
|
|
|
@ -144,7 +144,7 @@ public class BlockTreeTermsWriter extends FieldsConsumer {
|
|||
throw new IllegalArgumentException("maxItemsInBlock must be at least 2*(minItemsInBlock-1); got maxItemsInBlock=" + maxItemsInBlock + " minItemsInBlock=" + minItemsInBlock);
|
||||
}
|
||||
|
||||
final String termsFileName = IndexFileNames.segmentFileName(state.segmentName, state.segmentSuffix, TERMS_EXTENSION);
|
||||
final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_EXTENSION);
|
||||
out = state.directory.createOutput(termsFileName, state.context);
|
||||
boolean success = false;
|
||||
IndexOutput indexOut = null;
|
||||
|
@ -156,7 +156,7 @@ public class BlockTreeTermsWriter extends FieldsConsumer {
|
|||
|
||||
//DEBUG = state.segmentName.equals("_4a");
|
||||
|
||||
final String termsIndexFileName = IndexFileNames.segmentFileName(state.segmentName, state.segmentSuffix, TERMS_INDEX_EXTENSION);
|
||||
final String termsIndexFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_INDEX_EXTENSION);
|
||||
indexOut = state.directory.createOutput(termsIndexFileName, state.context);
|
||||
writeIndexHeader(indexOut);
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ public class FixedGapTermsIndexWriter extends TermsIndexWriterBase {
|
|||
@SuppressWarnings("unused") private final FieldInfos fieldInfos; // unread
|
||||
|
||||
public FixedGapTermsIndexWriter(SegmentWriteState state) throws IOException {
|
||||
final String indexFileName = IndexFileNames.segmentFileName(state.segmentName, state.segmentSuffix, TERMS_INDEX_EXTENSION);
|
||||
final String indexFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_INDEX_EXTENSION);
|
||||
termIndexInterval = state.termIndexInterval;
|
||||
out = state.directory.createOutput(indexFileName, state.context);
|
||||
boolean success = false;
|
||||
|
|
|
@ -25,6 +25,8 @@ import org.apache.lucene.store.IOContext;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// nocommit fix other formats to get SI/FIS too...
|
||||
|
||||
/**
|
||||
* Controls the format of stored fields
|
||||
*/
|
||||
|
@ -35,5 +37,5 @@ public abstract class StoredFieldsFormat {
|
|||
|
||||
/** Returns a {@link StoredFieldsWriter} to write stored
|
||||
* fields. */
|
||||
public abstract StoredFieldsWriter fieldsWriter(Directory directory, String segment, IOContext context) throws IOException;
|
||||
public abstract StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOContext context) throws IOException;
|
||||
}
|
||||
|
|
|
@ -1,15 +1,5 @@
|
|||
package org.apache.lucene.codecs;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.MergeState;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
/**
|
||||
* Copyright 2004 The Apache Software Foundation
|
||||
*
|
||||
|
@ -26,6 +16,16 @@ import org.apache.lucene.util.Bits;
|
|||
* the License.
|
||||
*/
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.MergeState;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
/**
|
||||
* Codec API for writing stored fields:
|
||||
* <p>
|
||||
|
@ -63,7 +63,7 @@ public abstract class StoredFieldsWriter implements Closeable {
|
|||
* calls to {@link #startDocument(int)}, but a Codec should
|
||||
* check that this is the case to detect the JRE bug described
|
||||
* in LUCENE-1282. */
|
||||
public abstract void finish(int numDocs) throws IOException;
|
||||
public abstract void finish(FieldInfos fis, int numDocs) throws IOException;
|
||||
|
||||
/** Merges in the stored fields from the readers in
|
||||
* <code>mergeState</code>. The default implementation skips
|
||||
|
@ -94,7 +94,7 @@ public abstract class StoredFieldsWriter implements Closeable {
|
|||
mergeState.checkAbort.work(300);
|
||||
}
|
||||
}
|
||||
finish(docCount);
|
||||
finish(mergeState.fieldInfos, docCount);
|
||||
return docCount;
|
||||
}
|
||||
|
||||
|
|
|
@ -173,7 +173,7 @@ public class VariableGapTermsIndexWriter extends TermsIndexWriterBase {
|
|||
// in the extremes.
|
||||
|
||||
public VariableGapTermsIndexWriter(SegmentWriteState state, IndexTermSelector policy) throws IOException {
|
||||
final String indexFileName = IndexFileNames.segmentFileName(state.segmentName, state.segmentSuffix, TERMS_INDEX_EXTENSION);
|
||||
final String indexFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_INDEX_EXTENSION);
|
||||
out = state.directory.createOutput(indexFileName, state.context);
|
||||
boolean success = false;
|
||||
try {
|
||||
|
|
|
@ -39,7 +39,7 @@ class Lucene3xStoredFieldsFormat extends StoredFieldsFormat {
|
|||
}
|
||||
|
||||
@Override
|
||||
public StoredFieldsWriter fieldsWriter(Directory directory, String segment,
|
||||
public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si,
|
||||
IOContext context) throws IOException {
|
||||
throw new UnsupportedOperationException("this codec can only be used for reading");
|
||||
}
|
||||
|
|
|
@ -99,7 +99,7 @@ public final class Lucene40PostingsWriter extends PostingsWriterBase {
|
|||
this.skipInterval = skipInterval;
|
||||
this.skipMinimum = skipInterval; /* set to the same for now */
|
||||
// this.segment = state.segmentName;
|
||||
String fileName = IndexFileNames.segmentFileName(state.segmentName, state.segmentSuffix, Lucene40PostingsFormat.FREQ_EXTENSION);
|
||||
String fileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, Lucene40PostingsFormat.FREQ_EXTENSION);
|
||||
freqOut = state.directory.createOutput(fileName, state.context);
|
||||
boolean success = false;
|
||||
try {
|
||||
|
@ -110,7 +110,7 @@ public final class Lucene40PostingsWriter extends PostingsWriterBase {
|
|||
if (state.fieldInfos.hasProx()) {
|
||||
// At least one field does not omit TF, so create the
|
||||
// prox file
|
||||
fileName = IndexFileNames.segmentFileName(state.segmentName, state.segmentSuffix, Lucene40PostingsFormat.PROX_EXTENSION);
|
||||
fileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, Lucene40PostingsFormat.PROX_EXTENSION);
|
||||
proxOut = state.directory.createOutput(fileName, state.context);
|
||||
} else {
|
||||
// Every field omits TF so we will write no prox file
|
||||
|
|
|
@ -86,8 +86,8 @@ public class Lucene40StoredFieldsFormat extends StoredFieldsFormat {
|
|||
}
|
||||
|
||||
@Override
|
||||
public StoredFieldsWriter fieldsWriter(Directory directory, String segment,
|
||||
public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si,
|
||||
IOContext context) throws IOException {
|
||||
return new Lucene40StoredFieldsWriter(directory, segment, context);
|
||||
return new Lucene40StoredFieldsWriter(directory, si.name, context);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.codecs.StoredFieldsWriter;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.MergePolicy.MergeAbortedException;
|
||||
|
@ -208,7 +209,7 @@ public final class Lucene40StoredFieldsWriter extends StoredFieldsWriter {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void finish(int numDocs) throws IOException {
|
||||
public void finish(FieldInfos fis, int numDocs) throws IOException {
|
||||
if (4+((long) numDocs)*8 != indexStream.getFilePointer())
|
||||
// This is most likely a bug in Sun JRE 1.6.0_04/_05;
|
||||
// we detect that the bug has struck, here, and
|
||||
|
@ -244,7 +245,7 @@ public final class Lucene40StoredFieldsWriter extends StoredFieldsWriter {
|
|||
reader, matchingFieldsReader, rawDocLengths);
|
||||
}
|
||||
}
|
||||
finish(docCount);
|
||||
finish(mergeState.fieldInfos, docCount);
|
||||
return docCount;
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,8 @@ public abstract class DocValuesWriterBase extends PerDocConsumer {
|
|||
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and {@link Type#BYTES_VAR_SORTED}.
|
||||
*/
|
||||
protected DocValuesWriterBase(PerDocWriteState state, boolean fasterButMoreRam) {
|
||||
this.segmentName = state.segmentName;
|
||||
// nocommit save away SegmentInfo instead?
|
||||
this.segmentName = state.segmentInfo.name;
|
||||
this.bytesUsed = state.bytesUsed;
|
||||
this.context = state.context;
|
||||
this.fasterButMoreRam = fasterButMoreRam;
|
||||
|
|
|
@ -285,7 +285,7 @@ public class MemoryPostingsFormat extends PostingsFormat {
|
|||
@Override
|
||||
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
|
||||
|
||||
final String fileName = IndexFileNames.segmentFileName(state.segmentName, state.segmentSuffix, EXTENSION);
|
||||
final String fileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, EXTENSION);
|
||||
final IndexOutput out = state.directory.createOutput(fileName, state.context);
|
||||
|
||||
return new FieldsConsumer() {
|
||||
|
|
|
@ -115,27 +115,27 @@ public final class SepPostingsWriter extends PostingsWriterBase {
|
|||
try {
|
||||
this.skipInterval = skipInterval;
|
||||
this.skipMinimum = skipInterval; /* set to the same for now */
|
||||
final String docFileName = IndexFileNames.segmentFileName(state.segmentName, state.segmentSuffix, DOC_EXTENSION);
|
||||
final String docFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, DOC_EXTENSION);
|
||||
docOut = factory.createOutput(state.directory, docFileName, state.context);
|
||||
docIndex = docOut.index();
|
||||
|
||||
if (state.fieldInfos.hasFreq()) {
|
||||
final String frqFileName = IndexFileNames.segmentFileName(state.segmentName, state.segmentSuffix, FREQ_EXTENSION);
|
||||
final String frqFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, FREQ_EXTENSION);
|
||||
freqOut = factory.createOutput(state.directory, frqFileName, state.context);
|
||||
freqIndex = freqOut.index();
|
||||
}
|
||||
|
||||
if (state.fieldInfos.hasProx()) {
|
||||
final String posFileName = IndexFileNames.segmentFileName(state.segmentName, state.segmentSuffix, POS_EXTENSION);
|
||||
final String posFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, POS_EXTENSION);
|
||||
posOut = factory.createOutput(state.directory, posFileName, state.context);
|
||||
posIndex = posOut.index();
|
||||
|
||||
// TODO: -- only if at least one field stores payloads?
|
||||
final String payloadFileName = IndexFileNames.segmentFileName(state.segmentName, state.segmentSuffix, PAYLOAD_EXTENSION);
|
||||
final String payloadFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, PAYLOAD_EXTENSION);
|
||||
payloadOut = state.directory.createOutput(payloadFileName, state.context);
|
||||
}
|
||||
|
||||
final String skipFileName = IndexFileNames.segmentFileName(state.segmentName, state.segmentSuffix, SKIP_EXTENSION);
|
||||
final String skipFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, SKIP_EXTENSION);
|
||||
skipOut = state.directory.createOutput(skipFileName, state.context);
|
||||
|
||||
totalNumDocs = state.numDocs;
|
||||
|
|
|
@ -46,7 +46,7 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
|
|||
final static BytesRef PAYLOAD = new BytesRef(" payload ");
|
||||
|
||||
public SimpleTextFieldsWriter(SegmentWriteState state) throws IOException {
|
||||
final String fileName = SimpleTextPostingsFormat.getPostingsFileName(state.segmentName, state.segmentSuffix);
|
||||
final String fileName = SimpleTextPostingsFormat.getPostingsFileName(state.segmentInfo.name, state.segmentSuffix);
|
||||
out = state.directory.createOutput(fileName, state.context);
|
||||
}
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ class SimpleTextPerDocConsumer extends PerDocConsumer {
|
|||
@Override
|
||||
public DocValuesConsumer addValuesField(Type type, FieldInfo field)
|
||||
throws IOException {
|
||||
return new SimpleTextDocValuesConsumer(SimpleTextDocValuesFormat.docValuesId(state.segmentName,
|
||||
return new SimpleTextDocValuesConsumer(SimpleTextDocValuesFormat.docValuesId(state.segmentInfo.name,
|
||||
field.number), state.directory, state.context, type, segmentSuffix);
|
||||
}
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ public class SimpleTextStoredFieldsFormat extends StoredFieldsFormat {
|
|||
}
|
||||
|
||||
@Override
|
||||
public StoredFieldsWriter fieldsWriter(Directory directory, String segment, IOContext context) throws IOException {
|
||||
return new SimpleTextStoredFieldsWriter(directory, segment, context);
|
||||
public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOContext context) throws IOException {
|
||||
return new SimpleTextStoredFieldsWriter(directory, si.name, context);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.lucene.codecs.StoredFieldsWriter;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -163,7 +164,7 @@ public class SimpleTextStoredFieldsWriter extends StoredFieldsWriter {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void finish(int numDocs) throws IOException {
|
||||
public void finish(FieldInfos fis, int numDocs) throws IOException {
|
||||
if (numDocsWritten != numDocs) {
|
||||
throw new RuntimeException("mergeFields produced an invalid result: docCount is " + numDocs
|
||||
+ " but only saw " + numDocsWritten + " file=" + out.toString() + "; now aborting this merge to prevent index corruption");
|
||||
|
|
|
@ -92,7 +92,7 @@ final class DocFieldProcessor extends DocConsumer {
|
|||
// FreqProxTermsWriter does this with
|
||||
// FieldInfo.storePayload.
|
||||
FieldInfosWriter infosWriter = codec.fieldInfosFormat().getFieldInfosWriter();
|
||||
infosWriter.write(state.directory, state.segmentName, state.fieldInfos, IOContext.DEFAULT);
|
||||
infosWriter.write(state.directory, state.segmentInfo.name, state.fieldInfos, IOContext.DEFAULT);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -494,6 +494,7 @@ final class DocumentsWriter {
|
|||
private void publishFlushedSegment(FlushedSegment newSegment, FrozenBufferedDeletes globalPacket)
|
||||
throws IOException {
|
||||
assert newSegment != null;
|
||||
assert newSegment.segmentInfo != null;
|
||||
final SegmentInfoPerCommit segInfo = indexWriter.prepareFlushedSegment(newSegment);
|
||||
final BufferedDeletes deletes = newSegment.segmentDeletes;
|
||||
if (infoStream.isEnabled("DW")) {
|
||||
|
|
|
@ -172,7 +172,7 @@ class DocumentsWriterPerThread {
|
|||
SegmentWriteState flushState;
|
||||
//Deletes for our still-in-RAM (to be flushed next) segment
|
||||
BufferedDeletes pendingDeletes;
|
||||
String segment; // Current segment we are working on
|
||||
SegmentInfo segmentInfo; // Current segment we are working on
|
||||
boolean aborting = false; // True if an abort is pending
|
||||
boolean hasAborted = false; // True if the last exception throws by #updateDocument was aborting
|
||||
|
||||
|
@ -231,17 +231,11 @@ class DocumentsWriterPerThread {
|
|||
docState.doc = doc;
|
||||
docState.analyzer = analyzer;
|
||||
docState.docID = numDocsInRAM;
|
||||
if (segment == null) {
|
||||
// this call is synchronized on IndexWriter.segmentInfos
|
||||
segment = writer.newSegmentName();
|
||||
assert numDocsInRAM == 0;
|
||||
if (INFO_VERBOSE && infoStream.isEnabled("DWPT")) {
|
||||
infoStream.message("DWPT", Thread.currentThread().getName() + " init seg=" + segment + " delQueue=" + deleteQueue);
|
||||
}
|
||||
|
||||
if (segmentInfo == null) {
|
||||
initSegmentInfo();
|
||||
}
|
||||
if (INFO_VERBOSE && infoStream.isEnabled("DWPT")) {
|
||||
infoStream.message("DWPT", Thread.currentThread().getName() + " update delTerm=" + delTerm + " docID=" + docState.docID + " seg=" + segment);
|
||||
infoStream.message("DWPT", Thread.currentThread().getName() + " update delTerm=" + delTerm + " docID=" + docState.docID + " seg=" + segmentInfo.name);
|
||||
}
|
||||
boolean success = false;
|
||||
try {
|
||||
|
@ -274,20 +268,27 @@ class DocumentsWriterPerThread {
|
|||
finishDocument(delTerm);
|
||||
}
|
||||
|
||||
private void initSegmentInfo() {
|
||||
String segment = writer.newSegmentName();
|
||||
segmentInfo = new SegmentInfo(directoryOrig, Constants.LUCENE_MAIN_VERSION, segment, 0,
|
||||
-1, segment, false, null, false,
|
||||
codec,
|
||||
null, null);
|
||||
assert numDocsInRAM == 0;
|
||||
if (INFO_VERBOSE && infoStream.isEnabled("DWPT")) {
|
||||
infoStream.message("DWPT", Thread.currentThread().getName() + " init seg=" + segment + " delQueue=" + deleteQueue);
|
||||
}
|
||||
}
|
||||
|
||||
public int updateDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer analyzer, Term delTerm) throws IOException {
|
||||
assert writer.testPoint("DocumentsWriterPerThread addDocuments start");
|
||||
assert deleteQueue != null;
|
||||
docState.analyzer = analyzer;
|
||||
if (segment == null) {
|
||||
// this call is synchronized on IndexWriter.segmentInfos
|
||||
segment = writer.newSegmentName();
|
||||
assert numDocsInRAM == 0;
|
||||
if (INFO_VERBOSE && infoStream.isEnabled("DWPT")) {
|
||||
infoStream.message("DWPT", Thread.currentThread().getName() + " init seg=" + segment + " delQueue=" + deleteQueue);
|
||||
}
|
||||
if (segmentInfo == null) {
|
||||
initSegmentInfo();
|
||||
}
|
||||
if (INFO_VERBOSE && infoStream.isEnabled("DWPT")) {
|
||||
infoStream.message("DWPT", Thread.currentThread().getName() + " update delTerm=" + delTerm + " docID=" + docState.docID + " seg=" + segment);
|
||||
infoStream.message("DWPT", Thread.currentThread().getName() + " update delTerm=" + delTerm + " docID=" + docState.docID + " seg=" + segmentInfo.name);
|
||||
}
|
||||
int docCount = 0;
|
||||
try {
|
||||
|
@ -419,7 +420,7 @@ class DocumentsWriterPerThread {
|
|||
|
||||
/** Reset after a flush */
|
||||
private void doAfterFlush() throws IOException {
|
||||
segment = null;
|
||||
segmentInfo = null;
|
||||
consumer.doAfterFlush();
|
||||
directory.getCreatedFiles().clear();
|
||||
fieldInfos = new FieldInfos.Builder(fieldInfos.globalFieldNumbers);
|
||||
|
@ -450,7 +451,7 @@ class DocumentsWriterPerThread {
|
|||
FlushedSegment flush() throws IOException {
|
||||
assert numDocsInRAM > 0;
|
||||
assert deleteSlice == null : "all deletes must be applied in prepareFlush";
|
||||
flushState = new SegmentWriteState(infoStream, directory, segment, fieldInfos.finish(),
|
||||
flushState = new SegmentWriteState(infoStream, directory, segmentInfo, fieldInfos.finish(),
|
||||
numDocsInRAM, writer.getConfig().getTermIndexInterval(),
|
||||
codec, pendingDeletes, new IOContext(new FlushInfo(numDocsInRAM, bytesUsed())));
|
||||
final double startMBUsed = parent.flushControl.netBytes() / 1024. / 1024.;
|
||||
|
@ -469,7 +470,7 @@ class DocumentsWriterPerThread {
|
|||
}
|
||||
|
||||
if (infoStream.isEnabled("DWPT")) {
|
||||
infoStream.message("DWPT", "flush postings as segment " + flushState.segmentName + " numDocs=" + numDocsInRAM);
|
||||
infoStream.message("DWPT", "flush postings as segment " + flushState.segmentInfo.name + " numDocs=" + numDocsInRAM);
|
||||
}
|
||||
|
||||
if (aborting) {
|
||||
|
@ -484,11 +485,8 @@ class DocumentsWriterPerThread {
|
|||
try {
|
||||
consumer.flush(flushState);
|
||||
pendingDeletes.terms.clear();
|
||||
final SegmentInfo newSegment = new SegmentInfo(directoryOrig, Constants.LUCENE_MAIN_VERSION, segment, flushState.numDocs,
|
||||
-1, segment, false, null, false,
|
||||
flushState.codec,
|
||||
null, null);
|
||||
newSegment.setFiles(new HashSet<String>(directory.getCreatedFiles()));
|
||||
segmentInfo.docCount = flushState.numDocs;
|
||||
segmentInfo.setFiles(new HashSet<String>(directory.getCreatedFiles()));
|
||||
|
||||
if (infoStream.isEnabled("DWPT")) {
|
||||
infoStream.message("DWPT", "new segment has " + (flushState.liveDocs == null ? 0 : (flushState.numDocs - flushState.delCountOnFlush)) + " deleted docs");
|
||||
|
@ -498,8 +496,8 @@ class DocumentsWriterPerThread {
|
|||
(flushState.fieldInfos.hasDocValues() ? "docValues" : "no docValues") + "; " +
|
||||
(flushState.fieldInfos.hasProx() ? "prox" : "no prox") + "; " +
|
||||
(flushState.fieldInfos.hasFreq() ? "freqs" : "no freqs"));
|
||||
infoStream.message("DWPT", "flushedFiles=" + newSegment.files());
|
||||
infoStream.message("DWPT", "flushed codec=" + newSegment.getCodec());
|
||||
infoStream.message("DWPT", "flushedFiles=" + segmentInfo.files());
|
||||
infoStream.message("DWPT", "flushed codec=" + codec);
|
||||
}
|
||||
|
||||
flushedDocCount += flushState.numDocs;
|
||||
|
@ -514,22 +512,26 @@ class DocumentsWriterPerThread {
|
|||
}
|
||||
|
||||
if (infoStream.isEnabled("DWPT")) {
|
||||
final double newSegmentSize = newSegment.sizeInBytes()/1024./1024.;
|
||||
infoStream.message("DWPT", "flushed: segment=" + newSegment +
|
||||
final double newSegmentSize = segmentInfo.sizeInBytes()/1024./1024.;
|
||||
infoStream.message("DWPT", "flushed: segment=" + segmentInfo.name +
|
||||
" ramUsed=" + nf.format(startMBUsed) + " MB" +
|
||||
" newFlushedSize(includes docstores)=" + nf.format(newSegmentSize) + " MB" +
|
||||
" docs/MB=" + nf.format(flushedDocCount / newSegmentSize));
|
||||
}
|
||||
|
||||
assert segmentInfo != null;
|
||||
|
||||
FlushedSegment fs = new FlushedSegment(new SegmentInfoPerCommit(segmentInfo, 0, -1L), flushState.fieldInfos,
|
||||
segmentDeletes, flushState.liveDocs, flushState.delCountOnFlush);
|
||||
doAfterFlush();
|
||||
success = true;
|
||||
|
||||
return new FlushedSegment(new SegmentInfoPerCommit(newSegment, 0, -1L), flushState.fieldInfos,
|
||||
segmentDeletes, flushState.liveDocs, flushState.delCountOnFlush);
|
||||
return fs;
|
||||
} finally {
|
||||
if (!success) {
|
||||
if (segment != null) {
|
||||
if (segmentInfo != null) {
|
||||
synchronized(parent.indexWriter) {
|
||||
parent.indexWriter.deleter.refresh(segment);
|
||||
parent.indexWriter.deleter.refresh(segmentInfo.name);
|
||||
}
|
||||
}
|
||||
abort();
|
||||
|
@ -537,9 +539,9 @@ class DocumentsWriterPerThread {
|
|||
}
|
||||
}
|
||||
|
||||
/** Get current segment name we are writing. */
|
||||
String getSegment() {
|
||||
return segment;
|
||||
/** Get current segment info we are writing. */
|
||||
SegmentInfo getSegmentInfo() {
|
||||
return segmentInfo;
|
||||
}
|
||||
|
||||
long bytesUsed() {
|
||||
|
@ -572,14 +574,14 @@ class DocumentsWriterPerThread {
|
|||
}
|
||||
|
||||
PerDocWriteState newPerDocWriteState(String segmentSuffix) {
|
||||
assert segment != null;
|
||||
return new PerDocWriteState(infoStream, directory, segment, bytesUsed, segmentSuffix, IOContext.DEFAULT);
|
||||
assert segmentInfo != null;
|
||||
return new PerDocWriteState(infoStream, directory, segmentInfo, bytesUsed, segmentSuffix, IOContext.DEFAULT);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "DocumentsWriterPerThread [pendingDeletes=" + pendingDeletes
|
||||
+ ", segment=" + segment + ", aborting=" + aborting + ", numDocsInRAM="
|
||||
+ ", segment=" + (segmentInfo != null ? segmentInfo.name : "null") + ", aborting=" + aborting + ", numDocsInRAM="
|
||||
+ numDocsInRAM + ", deleteQueue=" + deleteQueue + "]";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2288,7 +2288,12 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
// TODO: somehow we should fix this merge so it's
|
||||
// abortable so that IW.close(false) is able to stop it
|
||||
TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(directory);
|
||||
SegmentMerger merger = new SegmentMerger(infoStream, trackingDir, config.getTermIndexInterval(),
|
||||
|
||||
SegmentInfo info = new SegmentInfo(directory, Constants.LUCENE_MAIN_VERSION, mergedName, 0,
|
||||
-1, mergedName, false, null, false,
|
||||
codec, null, null);
|
||||
|
||||
SegmentMerger merger = new SegmentMerger(info, infoStream, trackingDir, config.getTermIndexInterval(),
|
||||
mergedName, MergeState.CheckAbort.NONE, payloadProcessorProvider,
|
||||
new FieldInfos.Builder(globalFieldNumberMap), codec, context);
|
||||
|
||||
|
@ -2297,9 +2302,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
}
|
||||
|
||||
MergeState mergeState = merger.merge(); // merge 'em
|
||||
SegmentInfo info = new SegmentInfo(directory, Constants.LUCENE_MAIN_VERSION, mergedName, mergeState.mergedDocCount,
|
||||
-1, mergedName, false, null, false,
|
||||
codec, null, null);
|
||||
info.docCount = mergeState.mergedDocCount;
|
||||
|
||||
SegmentInfoPerCommit infoPerCommit = new SegmentInfoPerCommit(info, 0, -1L);
|
||||
|
||||
info.setFiles(new HashSet<String>(trackingDir.getCreatedFiles()));
|
||||
|
@ -3433,7 +3437,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
|
||||
final MergeState.CheckAbort checkAbort = new MergeState.CheckAbort(merge, directory);
|
||||
final TrackingDirectoryWrapper dirWrapper = new TrackingDirectoryWrapper(directory);
|
||||
SegmentMerger merger = new SegmentMerger(infoStream, dirWrapper, config.getTermIndexInterval(), mergedName, checkAbort,
|
||||
|
||||
SegmentMerger merger = new SegmentMerger(merge.info.info, infoStream, dirWrapper, config.getTermIndexInterval(), mergedName, checkAbort,
|
||||
payloadProcessorProvider, new FieldInfos.Builder(globalFieldNumberMap), codec, context);
|
||||
|
||||
if (infoStream.isEnabled("IW")) {
|
||||
|
|
|
@ -40,6 +40,7 @@ public class MergeState {
|
|||
}
|
||||
}
|
||||
|
||||
public SegmentInfo segmentInfo;
|
||||
public FieldInfos fieldInfos;
|
||||
public List<IndexReaderAndLiveDocs> readers; // Readers & liveDocs being merged
|
||||
public int[][] docMaps; // Maps docIDs around deletions
|
||||
|
|
|
@ -32,17 +32,17 @@ import org.apache.lucene.util.InfoStream;
|
|||
public class PerDocWriteState {
|
||||
public final InfoStream infoStream;
|
||||
public final Directory directory;
|
||||
public final String segmentName;
|
||||
public final SegmentInfo segmentInfo;
|
||||
public final Counter bytesUsed;
|
||||
public final String segmentSuffix;
|
||||
public final IOContext context;
|
||||
|
||||
public PerDocWriteState(InfoStream infoStream, Directory directory,
|
||||
String segmentName, Counter bytesUsed,
|
||||
SegmentInfo segmentInfo, Counter bytesUsed,
|
||||
String segmentSuffix, IOContext context) {
|
||||
this.infoStream = infoStream;
|
||||
this.directory = directory;
|
||||
this.segmentName = segmentName;
|
||||
this.segmentInfo = segmentInfo;
|
||||
this.segmentSuffix = segmentSuffix;
|
||||
this.bytesUsed = bytesUsed;
|
||||
this.context = context;
|
||||
|
@ -51,7 +51,7 @@ public class PerDocWriteState {
|
|||
public PerDocWriteState(SegmentWriteState state) {
|
||||
infoStream = state.infoStream;
|
||||
directory = state.directory;
|
||||
segmentName = state.segmentName;
|
||||
segmentInfo = state.segmentInfo;
|
||||
segmentSuffix = state.segmentSuffix;
|
||||
bytesUsed = Counter.newCounter();
|
||||
context = state.context;
|
||||
|
@ -60,7 +60,7 @@ public class PerDocWriteState {
|
|||
public PerDocWriteState(PerDocWriteState state, String segmentSuffix) {
|
||||
this.infoStream = state.infoStream;
|
||||
this.directory = state.directory;
|
||||
this.segmentName = state.segmentName;
|
||||
this.segmentInfo = state.segmentInfo;
|
||||
this.segmentSuffix = segmentSuffix;
|
||||
this.bytesUsed = state.bytesUsed;
|
||||
this.context = state.context;
|
||||
|
|
|
@ -56,7 +56,12 @@ final class SegmentMerger {
|
|||
private final MergeState mergeState = new MergeState();
|
||||
private final FieldInfos.Builder fieldInfosBuilder;
|
||||
|
||||
SegmentMerger(InfoStream infoStream, Directory dir, int termIndexInterval, String name, MergeState.CheckAbort checkAbort, PayloadProcessorProvider payloadProcessorProvider, FieldInfos.Builder fieldInfosBuilder, Codec codec, IOContext context) {
|
||||
// nocommit nuke name since SI has it.... but Directory is
|
||||
// NOT the same!!
|
||||
SegmentMerger(SegmentInfo segmentInfo, InfoStream infoStream, Directory dir, int termIndexInterval, String name,
|
||||
MergeState.CheckAbort checkAbort, PayloadProcessorProvider payloadProcessorProvider,
|
||||
FieldInfos.Builder fieldInfosBuilder, Codec codec, IOContext context) {
|
||||
mergeState.segmentInfo = segmentInfo;
|
||||
mergeState.infoStream = infoStream;
|
||||
mergeState.readers = new ArrayList<MergeState.IndexReaderAndLiveDocs>();
|
||||
mergeState.checkAbort = checkAbort;
|
||||
|
@ -107,12 +112,14 @@ final class SegmentMerger {
|
|||
|
||||
mergeState.mergedDocCount = setDocMaps();
|
||||
|
||||
mergeFieldInfos();
|
||||
mergeDocValuesAndNormsFieldInfos();
|
||||
setMatchingSegmentReaders();
|
||||
int numMerged = mergeFields();
|
||||
assert numMerged == mergeState.mergedDocCount;
|
||||
|
||||
final SegmentWriteState segmentWriteState = new SegmentWriteState(mergeState.infoStream, directory, segment, mergeState.fieldInfos, mergeState.mergedDocCount, termIndexInterval, codec, null, context);
|
||||
final SegmentWriteState segmentWriteState = new SegmentWriteState(mergeState.infoStream, directory, mergeState.segmentInfo,
|
||||
mergeState.fieldInfos, mergeState.mergedDocCount,
|
||||
termIndexInterval, codec, null, context);
|
||||
mergeTerms(segmentWriteState);
|
||||
mergePerDoc(segmentWriteState);
|
||||
|
||||
|
@ -192,10 +199,6 @@ final class SegmentMerger {
|
|||
}
|
||||
}
|
||||
|
||||
private void mergeFieldInfos() throws IOException {
|
||||
mergeDocValuesAndNormsFieldInfos();
|
||||
}
|
||||
|
||||
// NOTE: this is actually merging all the fieldinfos
|
||||
public void mergeDocValuesAndNormsFieldInfos() throws IOException {
|
||||
// mapping from all docvalues fields found to their promoted types
|
||||
|
@ -261,7 +264,7 @@ final class SegmentMerger {
|
|||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
private int mergeFields() throws CorruptIndexException, IOException {
|
||||
final StoredFieldsWriter fieldsWriter = codec.storedFieldsFormat().fieldsWriter(directory, segment, context);
|
||||
final StoredFieldsWriter fieldsWriter = codec.storedFieldsFormat().fieldsWriter(directory, mergeState.segmentInfo, context);
|
||||
|
||||
try {
|
||||
return fieldsWriter.merge(mergeState);
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.lucene.util.MutableBits;
|
|||
public class SegmentWriteState {
|
||||
public final InfoStream infoStream;
|
||||
public final Directory directory;
|
||||
public final String segmentName;
|
||||
public final SegmentInfo segmentInfo;
|
||||
public final FieldInfos fieldInfos;
|
||||
public final int numDocs;
|
||||
public int delCountOnFlush;
|
||||
|
@ -57,12 +57,14 @@ public class SegmentWriteState {
|
|||
|
||||
public final IOContext context;
|
||||
|
||||
public SegmentWriteState(InfoStream infoStream, Directory directory, String segmentName, FieldInfos fieldInfos,
|
||||
public SegmentWriteState(InfoStream infoStream, Directory directory, SegmentInfo segmentInfo, FieldInfos fieldInfos,
|
||||
int numDocs, int termIndexInterval, Codec codec, BufferedDeletes segDeletes, IOContext context) {
|
||||
this.infoStream = infoStream;
|
||||
this.segDeletes = segDeletes;
|
||||
this.directory = directory;
|
||||
this.segmentName = segmentName;
|
||||
// nocommit a lot of this is redundant w/ SI! BUT not
|
||||
// the Directory!!!! one is tracking one is not!!!
|
||||
this.segmentInfo = segmentInfo;
|
||||
this.fieldInfos = fieldInfos;
|
||||
this.numDocs = numDocs;
|
||||
this.termIndexInterval = termIndexInterval;
|
||||
|
@ -77,7 +79,7 @@ public class SegmentWriteState {
|
|||
public SegmentWriteState(SegmentWriteState state, String segmentSuffix) {
|
||||
infoStream = state.infoStream;
|
||||
directory = state.directory;
|
||||
segmentName = state.segmentName;
|
||||
segmentInfo = state.segmentInfo;
|
||||
fieldInfos = state.fieldInfos;
|
||||
numDocs = state.numDocs;
|
||||
termIndexInterval = state.termIndexInterval;
|
||||
|
|
|
@ -69,7 +69,7 @@ final class StoredFieldsConsumer {
|
|||
|
||||
if (fieldsWriter != null) {
|
||||
try {
|
||||
fieldsWriter.finish(state.numDocs);
|
||||
fieldsWriter.finish(state.fieldInfos, state.numDocs);
|
||||
} finally {
|
||||
fieldsWriter.close();
|
||||
fieldsWriter = null;
|
||||
|
@ -80,7 +80,7 @@ final class StoredFieldsConsumer {
|
|||
|
||||
private synchronized void initFieldsWriter(IOContext context) throws IOException {
|
||||
if (fieldsWriter == null) {
|
||||
fieldsWriter = codec.storedFieldsFormat().fieldsWriter(docWriter.directory, docWriter.getSegment(), context);
|
||||
fieldsWriter = codec.storedFieldsFormat().fieldsWriter(docWriter.directory, docWriter.getSegmentInfo(), context);
|
||||
lastDocID = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ final class TermVectorsConsumer extends TermsHashConsumer {
|
|||
// At least one doc in this run had term vectors enabled
|
||||
try {
|
||||
fill(state.numDocs);
|
||||
assert state.segmentName != null;
|
||||
assert state.segmentInfo != null;
|
||||
writer.finish(state.numDocs);
|
||||
} finally {
|
||||
IOUtils.close(writer);
|
||||
|
@ -84,7 +84,7 @@ final class TermVectorsConsumer extends TermsHashConsumer {
|
|||
private final void initTermVectorsWriter() throws IOException {
|
||||
if (writer == null) {
|
||||
IOContext context = new IOContext(new FlushInfo(docWriter.getNumDocsInRAM(), docWriter.bytesUsed()));
|
||||
writer = docWriter.codec.termVectorsFormat().vectorsWriter(docWriter.directory, docWriter.getSegment(), context);
|
||||
writer = docWriter.codec.termVectorsFormat().vectorsWriter(docWriter.directory, docWriter.getSegmentInfo().name, context);
|
||||
lastDocID = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -619,7 +619,8 @@ public class TestCodecs extends LuceneTestCase {
|
|||
|
||||
final int termIndexInterval = _TestUtil.nextInt(random(), 13, 27);
|
||||
final Codec codec = Codec.getDefault();
|
||||
final SegmentWriteState state = new SegmentWriteState(InfoStream.getDefault(), dir, SEGMENT, fieldInfos, 10000, termIndexInterval, codec, null, newIOContext(random()));
|
||||
final SegmentInfo si = new SegmentInfo(dir, Constants.LUCENE_MAIN_VERSION, SEGMENT, 10000, -1, SEGMENT, false, null, false, codec, null, null);
|
||||
final SegmentWriteState state = new SegmentWriteState(InfoStream.getDefault(), dir, si, fieldInfos, 10000, termIndexInterval, codec, null, newIOContext(random()));
|
||||
|
||||
final FieldsConsumer consumer = codec.postingsFormat().fieldsConsumer(state);
|
||||
Arrays.sort(fields);
|
||||
|
|
|
@ -197,7 +197,10 @@ public class TestDoc extends LuceneTestCase {
|
|||
|
||||
final Codec codec = Codec.getDefault();
|
||||
TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(si1.info.dir);
|
||||
SegmentMerger merger = new SegmentMerger(InfoStream.getDefault(), trackingDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, merged, MergeState.CheckAbort.NONE, null, new FieldInfos.Builder(), codec, context);
|
||||
final SegmentInfo si = new SegmentInfo(si1.info.dir, Constants.LUCENE_MAIN_VERSION, merged, 10000, -1, merged, false, null, false, codec, null, null);
|
||||
|
||||
SegmentMerger merger = new SegmentMerger(si, InfoStream.getDefault(), trackingDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL,
|
||||
merged, MergeState.CheckAbort.NONE, null, new FieldInfos.Builder(), codec, context);
|
||||
|
||||
merger.add(r1);
|
||||
merger.add(r2);
|
||||
|
|
|
@ -77,7 +77,10 @@ public class TestSegmentMerger extends LuceneTestCase {
|
|||
|
||||
public void testMerge() throws IOException {
|
||||
final Codec codec = Codec.getDefault();
|
||||
SegmentMerger merger = new SegmentMerger(InfoStream.getDefault(), mergedDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, mergedSegment, MergeState.CheckAbort.NONE, null, new FieldInfos.Builder(), codec, newIOContext(random()));
|
||||
final SegmentInfo si = new SegmentInfo(mergedDir, Constants.LUCENE_MAIN_VERSION, mergedSegment, 10000, -1, mergedSegment, false, null, false, codec, null, null);
|
||||
|
||||
SegmentMerger merger = new SegmentMerger(si, InfoStream.getDefault(), mergedDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL,
|
||||
mergedSegment, MergeState.CheckAbort.NONE, null, new FieldInfos.Builder(), codec, newIOContext(random()));
|
||||
merger.add(reader1);
|
||||
merger.add(reader2);
|
||||
MergeState mergeState = merger.merge();
|
||||
|
|
|
@ -43,13 +43,13 @@ class PreFlexRWFieldsWriter extends FieldsConsumer {
|
|||
|
||||
public PreFlexRWFieldsWriter(SegmentWriteState state) throws IOException {
|
||||
termsOut = new TermInfosWriter(state.directory,
|
||||
state.segmentName,
|
||||
state.segmentInfo.name,
|
||||
state.fieldInfos,
|
||||
state.termIndexInterval);
|
||||
|
||||
boolean success = false;
|
||||
try {
|
||||
final String freqFile = IndexFileNames.segmentFileName(state.segmentName, "", Lucene3xPostingsFormat.FREQ_EXTENSION);
|
||||
final String freqFile = IndexFileNames.segmentFileName(state.segmentInfo.name, "", Lucene3xPostingsFormat.FREQ_EXTENSION);
|
||||
freqOut = state.directory.createOutput(freqFile, state.context);
|
||||
totalNumDocs = state.numDocs;
|
||||
success = true;
|
||||
|
@ -62,7 +62,7 @@ class PreFlexRWFieldsWriter extends FieldsConsumer {
|
|||
success = false;
|
||||
try {
|
||||
if (state.fieldInfos.hasProx()) {
|
||||
final String proxFile = IndexFileNames.segmentFileName(state.segmentName, "", Lucene3xPostingsFormat.PROX_EXTENSION);
|
||||
final String proxFile = IndexFileNames.segmentFileName(state.segmentInfo.name, "", Lucene3xPostingsFormat.PROX_EXTENSION);
|
||||
proxOut = state.directory.createOutput(proxFile, state.context);
|
||||
} else {
|
||||
proxOut = null;
|
||||
|
|
|
@ -28,7 +28,6 @@ class PreFlexRWNormsFormat extends Lucene3xNormsFormat {
|
|||
|
||||
@Override
|
||||
public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
|
||||
return new PreFlexRWNormsConsumer(state.directory, state.segmentName, state.context);
|
||||
return new PreFlexRWNormsConsumer(state.directory, state.segmentInfo.name, state.context);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,14 +20,15 @@ package org.apache.lucene.codecs.lucene3x;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.codecs.StoredFieldsWriter;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
|
||||
class PreFlexRWStoredFieldsFormat extends Lucene3xStoredFieldsFormat {
|
||||
|
||||
@Override
|
||||
public StoredFieldsWriter fieldsWriter(Directory directory, String segment, IOContext context) throws IOException {
|
||||
return new PreFlexRWStoredFieldsWriter(directory, segment, context);
|
||||
public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo segmentInfo, IOContext context) throws IOException {
|
||||
return new PreFlexRWStoredFieldsWriter(directory, segmentInfo.name, context);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.lucene.codecs.StoredFieldsWriter;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -143,7 +144,7 @@ final class PreFlexRWStoredFieldsWriter extends StoredFieldsWriter {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void finish(int numDocs) throws IOException {
|
||||
public void finish(FieldInfos fis, int numDocs) throws IOException {
|
||||
if (4+((long) numDocs)*8 != indexStream.getFilePointer())
|
||||
// This is most likely a bug in Sun JRE 1.6.0_04/_05;
|
||||
// we detect that the bug has struck, here, and
|
||||
|
|
|
@ -138,10 +138,10 @@ public class MockRandomPostingsFormat extends PostingsFormat {
|
|||
final long seed = seedRandom.nextLong();
|
||||
|
||||
if (LuceneTestCase.VERBOSE) {
|
||||
System.out.println("MockRandomCodec: writing to seg=" + state.segmentName + " formatID=" + state.segmentSuffix + " seed=" + seed);
|
||||
System.out.println("MockRandomCodec: writing to seg=" + state.segmentInfo.name + " formatID=" + state.segmentSuffix + " seed=" + seed);
|
||||
}
|
||||
|
||||
final String seedFileName = IndexFileNames.segmentFileName(state.segmentName, state.segmentSuffix, SEED_EXT);
|
||||
final String seedFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, SEED_EXT);
|
||||
final IndexOutput out = state.directory.createOutput(seedFileName, state.context);
|
||||
try {
|
||||
out.writeLong(seed);
|
||||
|
|
|
@ -540,7 +540,7 @@ public class RAMOnlyPostingsFormat extends PostingsFormat {
|
|||
// TODO -- ok to do this up front instead of
|
||||
// on close....? should be ok?
|
||||
// Write our ID:
|
||||
final String idFileName = IndexFileNames.segmentFileName(writeState.segmentName, writeState.segmentSuffix, ID_EXTENSION);
|
||||
final String idFileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name, writeState.segmentSuffix, ID_EXTENSION);
|
||||
IndexOutput out = writeState.directory.createOutput(idFileName, writeState.context);
|
||||
boolean success = false;
|
||||
try {
|
||||
|
|
Loading…
Reference in New Issue