LUCENE-1200: prevent rare deadlock in IndexWriter.addIndexes

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@634232 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2008-03-06 11:52:46 +00:00
parent 5b113c8af6
commit 69f35943bd
1 changed files with 98 additions and 39 deletions

View File

@ -2445,7 +2445,7 @@ public class IndexWriter {
* within the transactions, so they must be flushed before the
* transaction is started.
*/
private void startTransaction() throws IOException {
private synchronized void startTransaction() throws IOException {
if (infoStream != null)
message("now start transaction");
@ -2478,7 +2478,7 @@ public class IndexWriter {
* Rolls back the transaction and restores state to where
* we were at the start.
*/
private void rollbackTransaction() throws IOException {
private synchronized void rollbackTransaction() throws IOException {
if (infoStream != null)
message("now rollback transaction");
@ -2513,7 +2513,7 @@ public class IndexWriter {
* segments file and remove and pending deletions we have
* accumulated during the transaction
*/
private void commitTransaction() throws IOException {
private synchronized void commitTransaction() throws IOException {
if (infoStream != null)
message("now commit transaction");
@ -2682,6 +2682,10 @@ public class IndexWriter {
* each input Directory, so it is up to the caller to
* enforce this.
*
* <p><b>NOTE:</b> while this is running, any attempts to
* add or delete documents (with another thread) will be
* paused until this method completes.
*
* <p>After this completes, the index is optimized.
*
* <p>This method is transactional in how Exceptions are
@ -2720,11 +2724,16 @@ public class IndexWriter {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public synchronized void addIndexes(Directory[] dirs)
public void addIndexes(Directory[] dirs)
throws CorruptIndexException, IOException {
ensureOpen();
// Do not allow add docs or deletes while we are running:
docWriter.pauseAllThreads();
try {
if (infoStream != null)
message("flush at addIndexes");
flush(true, false, true);
@ -2734,14 +2743,17 @@ public class IndexWriter {
startTransaction();
try {
int docCount = 0;
for (int i = 0; i < dirs.length; i++) {
SegmentInfos sis = new SegmentInfos(); // read infos from dir
sis.read(dirs[i]);
for (int j = 0; j < sis.size(); j++) {
final SegmentInfo info = sis.info(j);
docCount += info.docCount;
segmentInfos.addElement(info); // add each info
synchronized(this) {
for (int i = 0; i < dirs.length; i++) {
SegmentInfos sis = new SegmentInfos(); // read infos from dir
sis.read(dirs[i]);
for (int j = 0; j < sis.size(); j++) {
final SegmentInfo info = sis.info(j);
docCount += info.docCount;
segmentInfos.addElement(info); // add each info
}
}
}
@ -2761,6 +2773,8 @@ public class IndexWriter {
} catch (OutOfMemoryError oom) {
hitOOM = true;
throw oom;
} finally {
docWriter.resumeAllThreads();
}
}
@ -2782,6 +2796,10 @@ public class IndexWriter {
* each input Directory, so it is up to the caller to
* enforce this.
*
* <p><b>NOTE:</b> while this is running, any attempts to
* add or delete documents (with another thread) will be
* paused until this method completes.
*
* <p>
* This requires this index not be among those to be added, and the
* upper bound* of those segment doc counts not exceed maxMergeDocs.
@ -2793,11 +2811,14 @@ public class IndexWriter {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public synchronized void addIndexesNoOptimize(Directory[] dirs)
public void addIndexesNoOptimize(Directory[] dirs)
throws CorruptIndexException, IOException {
ensureOpen();
// Do not allow add docs or deletes while we are running:
docWriter.pauseAllThreads();
try {
if (infoStream != null)
message("flush at addIndexesNoOptimize");
@ -2810,18 +2831,20 @@ public class IndexWriter {
try {
int docCount = 0;
for (int i = 0; i < dirs.length; i++) {
if (directory == dirs[i]) {
// cannot add this index: segments may be deleted in merge before added
throw new IllegalArgumentException("Cannot add this index to itself");
}
synchronized(this) {
for (int i = 0; i < dirs.length; i++) {
if (directory == dirs[i]) {
// cannot add this index: segments may be deleted in merge before added
throw new IllegalArgumentException("Cannot add this index to itself");
}
SegmentInfos sis = new SegmentInfos(); // read infos from dir
sis.read(dirs[i]);
for (int j = 0; j < sis.size(); j++) {
SegmentInfo info = sis.info(j);
docCount += info.docCount;
segmentInfos.addElement(info); // add each info
SegmentInfos sis = new SegmentInfos(); // read infos from dir
sis.read(dirs[i]);
for (int j = 0; j < sis.size(); j++) {
SegmentInfo info = sis.info(j);
docCount += info.docCount;
segmentInfos.addElement(info); // add each info
}
}
}
@ -2849,18 +2872,30 @@ public class IndexWriter {
} catch (OutOfMemoryError oom) {
hitOOM = true;
throw oom;
} finally {
docWriter.resumeAllThreads();
}
}
/* If any of our segments are using a directory != ours
* then copy them over. Currently this is only used by
* addIndexesNoOptimize(). */
private synchronized void copyExternalSegments() throws CorruptIndexException, IOException {
final int numSegments = segmentInfos.size();
for(int i=0;i<numSegments;i++) {
SegmentInfo info = segmentInfos.info(i);
if (info.dir != directory) {
MergePolicy.OneMerge merge = new MergePolicy.OneMerge(segmentInfos.range(i, 1+i), info.getUseCompoundFile());
private void copyExternalSegments() throws CorruptIndexException, IOException {
while(true) {
SegmentInfo info = null;
MergePolicy.OneMerge merge = null;
synchronized(this) {
final int numSegments = segmentInfos.size();
for(int i=0;i<numSegments;i++) {
info = segmentInfos.info(i);
if (info.dir != directory) {
merge = new MergePolicy.OneMerge(segmentInfos.range(i, 1+i), info.getUseCompoundFile());
break;
}
}
}
if (merge != null) {
if (registerMerge(merge)) {
pendingMerges.remove(merge);
runningMerges.add(merge);
@ -2876,7 +2911,9 @@ public class IndexWriter {
// that an IndexReader would fail to load).
throw new MergePolicy.MergeException("segment \"" + info.name + " exists in external directory yet the MergeScheduler executed the merge in a separate thread",
directory);
}
} else
// No more external segments
break;
}
}
@ -2884,6 +2921,16 @@ public class IndexWriter {
* <p>After this completes, the index is optimized. </p>
* <p>The provided IndexReaders are not closed.</p>
* <p><b>NOTE:</b> the index in each Directory must not be
* changed (opened by a writer) while this method is
* running. This method does not acquire a write lock in
* each input Directory, so it is up to the caller to
* enforce this.
*
* <p><b>NOTE:</b> while this is running, any attempts to
* add or delete documents (with another thread) will be
* paused until this method completes.
*
* <p>See {@link #addIndexes(Directory[])} for
* details on transactional semantics, temporary free
* space required in the Directory, and non-CFS segments
@ -2891,10 +2938,14 @@ public class IndexWriter {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public synchronized void addIndexes(IndexReader[] readers)
public void addIndexes(IndexReader[] readers)
throws CorruptIndexException, IOException {
ensureOpen();
// Do not allow add docs or deletes while we are running:
docWriter.pauseAllThreads();
try {
optimize(); // start with zero or 1 seg
@ -2905,9 +2956,11 @@ public class IndexWriter {
IndexReader sReader = null;
try {
if (segmentInfos.size() == 1){ // add existing index, if any
sReader = SegmentReader.get(segmentInfos.info(0));
merger.add(sReader);
synchronized(this) {
if (segmentInfos.size() == 1){ // add existing index, if any
sReader = SegmentReader.get(segmentInfos.info(0));
merger.add(sReader);
}
}
for (int i = 0; i < readers.length; i++) // add new indexes
@ -2925,10 +2978,12 @@ public class IndexWriter {
sReader = null;
}
segmentInfos.setSize(0); // pop old infos & add new
info = new SegmentInfo(mergedName, docCount, directory, false, true,
-1, null, false);
segmentInfos.addElement(info);
synchronized(this) {
segmentInfos.setSize(0); // pop old infos & add new
info = new SegmentInfo(mergedName, docCount, directory, false, true,
-1, null, false);
segmentInfos.addElement(info);
}
// Notify DocumentsWriter that the flushed count just increased
docWriter.updateFlushedDocCount(docCount);
@ -2959,7 +3014,9 @@ public class IndexWriter {
try {
merger.createCompoundFile(mergedName + ".cfs");
info.setUseCompoundFile(true);
synchronized(this) {
info.setUseCompoundFile(true);
}
} finally {
if (!success) {
if (infoStream != null)
@ -2974,6 +3031,8 @@ public class IndexWriter {
} catch (OutOfMemoryError oom) {
hitOOM = true;
throw oom;
} finally {
docWriter.resumeAllThreads();
}
}