mirror of https://github.com/apache/lucene.git
LUCENE-5611: always abort if we hit exc in StoredFieldsWriter.start/FinishDocument
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1591657 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9968bd91df
commit
46ae34459c
|
@ -230,13 +230,14 @@ public final class Lucene40StoredFieldsWriter extends StoredFieldsWriter {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void finish(FieldInfos fis, int numDocs) {
|
public void finish(FieldInfos fis, int numDocs) {
|
||||||
if (HEADER_LENGTH_IDX+((long) numDocs)*8 != indexStream.getFilePointer())
|
long indexFP = indexStream.getFilePointer();
|
||||||
|
if (HEADER_LENGTH_IDX+((long) numDocs)*8 != indexFP)
|
||||||
// This is most likely a bug in Sun JRE 1.6.0_04/_05;
|
// This is most likely a bug in Sun JRE 1.6.0_04/_05;
|
||||||
// we detect that the bug has struck, here, and
|
// we detect that the bug has struck, here, and
|
||||||
// throw an exception to prevent the corruption from
|
// throw an exception to prevent the corruption from
|
||||||
// entering the index. See LUCENE-1282 for
|
// entering the index. See LUCENE-1282 for
|
||||||
// details.
|
// details.
|
||||||
throw new RuntimeException("fdx size mismatch: docCount is " + numDocs + " but fdx file size is " + indexStream.getFilePointer() + " file=" + indexStream.toString() + "; now aborting this merge to prevent index corruption");
|
throw new RuntimeException("fdx size mismatch: docCount is " + numDocs + " but fdx file size is " + indexFP + " (wrote numDocs=" + ((indexFP-HEADER_LENGTH_IDX)/8.0) + " file=" + indexStream.toString() + "; now aborting this merge to prevent index corruption");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -425,13 +425,14 @@ public final class Lucene40TermVectorsWriter extends TermVectorsWriter {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void finish(FieldInfos fis, int numDocs) {
|
public void finish(FieldInfos fis, int numDocs) {
|
||||||
if (HEADER_LENGTH_INDEX+((long) numDocs)*16 != tvx.getFilePointer())
|
long indexFP = tvx.getFilePointer();
|
||||||
|
if (HEADER_LENGTH_INDEX+((long) numDocs)*16 != indexFP)
|
||||||
// This is most likely a bug in Sun JRE 1.6.0_04/_05;
|
// This is most likely a bug in Sun JRE 1.6.0_04/_05;
|
||||||
// we detect that the bug has struck, here, and
|
// we detect that the bug has struck, here, and
|
||||||
// throw an exception to prevent the corruption from
|
// throw an exception to prevent the corruption from
|
||||||
// entering the index. See LUCENE-1282 for
|
// entering the index. See LUCENE-1282 for
|
||||||
// details.
|
// details.
|
||||||
throw new RuntimeException("tvx size mismatch: mergedDocs is " + numDocs + " but tvx size is " + tvx.getFilePointer() + " file=" + tvx.toString() + "; now aborting this merge to prevent index corruption");
|
throw new RuntimeException("tvx size mismatch: mergedDocs is " + numDocs + " but tvx size is " + indexFP + " (wrote numDocs=" + ((indexFP - HEADER_LENGTH_INDEX)/16.0) + " file=" + tvx.toString() + "; now aborting this merge to prevent index corruption");
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Close all streams. */
|
/** Close all streams. */
|
||||||
|
|
|
@ -160,9 +160,8 @@ final class DefaultIndexingChain extends DocConsumer {
|
||||||
* stored fields. */
|
* stored fields. */
|
||||||
private void fillStoredFields(int docID) throws IOException {
|
private void fillStoredFields(int docID) throws IOException {
|
||||||
while (lastStoredDocID < docID) {
|
while (lastStoredDocID < docID) {
|
||||||
storedFieldsWriter.startDocument();
|
startStoredFields();
|
||||||
lastStoredDocID++;
|
finishStoredFields();
|
||||||
storedFieldsWriter.finishDocument();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -242,6 +241,35 @@ final class DefaultIndexingChain extends DocConsumer {
|
||||||
hashMask = newHashMask;
|
hashMask = newHashMask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Calls StoredFieldsWriter.startDocument, aborting the
|
||||||
|
* segment if it hits any exception. */
|
||||||
|
private void startStoredFields() throws IOException {
|
||||||
|
boolean success = false;
|
||||||
|
try {
|
||||||
|
storedFieldsWriter.startDocument();
|
||||||
|
success = true;
|
||||||
|
} finally {
|
||||||
|
if (success == false) {
|
||||||
|
docWriter.setAborting();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastStoredDocID++;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Calls StoredFieldsWriter.finishDocument, aborting the
|
||||||
|
* segment if it hits any exception. */
|
||||||
|
private void finishStoredFields() throws IOException {
|
||||||
|
boolean success = false;
|
||||||
|
try {
|
||||||
|
storedFieldsWriter.finishDocument();
|
||||||
|
success = true;
|
||||||
|
} finally {
|
||||||
|
if (success == false) {
|
||||||
|
docWriter.setAborting();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void processDocument() throws IOException {
|
public void processDocument() throws IOException {
|
||||||
|
|
||||||
|
@ -295,10 +323,9 @@ final class DefaultIndexingChain extends DocConsumer {
|
||||||
// Add stored fields:
|
// Add stored fields:
|
||||||
// TODO: if these hit exc today ->>> corrumption!
|
// TODO: if these hit exc today ->>> corrumption!
|
||||||
fillStoredFields(docState.docID);
|
fillStoredFields(docState.docID);
|
||||||
storedFieldsWriter.startDocument();
|
startStoredFields();
|
||||||
lastStoredDocID++;
|
|
||||||
|
|
||||||
// TODO: clean up this looop, its complicated because dv exceptions are non-aborting,
|
// TODO: clean up this loop, it's complicated because dv exceptions are non-aborting,
|
||||||
// but storedfields are. Its also bogus that docvalues are treated as stored fields...
|
// but storedfields are. Its also bogus that docvalues are treated as stored fields...
|
||||||
for (StorableField field : docState.doc.storableFields()) {
|
for (StorableField field : docState.doc.storableFields()) {
|
||||||
final String fieldName = field.name();
|
final String fieldName = field.name();
|
||||||
|
@ -331,28 +358,12 @@ final class DefaultIndexingChain extends DocConsumer {
|
||||||
} finally {
|
} finally {
|
||||||
if (!success) {
|
if (!success) {
|
||||||
// dv failed: so just try to bail on the current doc by calling finishDocument()...
|
// dv failed: so just try to bail on the current doc by calling finishDocument()...
|
||||||
success = false;
|
finishStoredFields();
|
||||||
try {
|
|
||||||
storedFieldsWriter.finishDocument();
|
|
||||||
success = true;
|
|
||||||
} finally {
|
|
||||||
if (!success) {
|
|
||||||
docWriter.setAborting();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
success = false;
|
finishStoredFields();
|
||||||
try {
|
|
||||||
storedFieldsWriter.finishDocument();
|
|
||||||
success = true;
|
|
||||||
} finally {
|
|
||||||
if (!success) {
|
|
||||||
docWriter.setAborting();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void verifyFieldType(String name, IndexableFieldType ft) {
|
private static void verifyFieldType(String name, IndexableFieldType ft) {
|
||||||
|
|
Loading…
Reference in New Issue