LUCENE-4061: fixed another potential concurrency issue; deleted TestIndexClose - the test-framework guarantees we close all our resources after us

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1339491 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Shai Erera 2012-05-17 05:31:39 +00:00
parent bc33ed0ef2
commit 3e7339af68
2 changed files with 19 additions and 226 deletions

View File

@ -36,7 +36,6 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.LogByteSizeMergePolicy;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
@ -46,7 +45,6 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.NativeFSLockFactory;
import org.apache.lucene.store.SimpleFSLockFactory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Version;
@ -232,7 +230,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
// Make sure that the taxonomy always contain the root category
// with category id 0.
addCategory(new CategoryPath());
refreshReader();
refreshInternalReader();
} else {
// There are some categories on the disk, which we have not yet
// read into the cache, and therefore the cache is incomplete.
@ -289,14 +287,14 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
new LogByteSizeMergePolicy());
}
// Currently overridden by a unit test that verifies that every index we open is close()ed.
/**
* Open an {@link IndexReader} from the internal {@link IndexWriter}, by
* calling {@link IndexReader#open(IndexWriter, boolean)}. Extending classes can override
* this method to return their own {@link IndexReader}.
*/
protected DirectoryReader openReader() throws IOException {
return DirectoryReader.open(indexWriter, true);
/** Opens a {@link DirectoryReader} from the internal {@link IndexWriter}. */
private synchronized void openInternalReader() throws IOException {
// verify that the taxo-writer hasn't been closed on us. the method is
// synchronized since it may be called from a non sync'ed block, and it
// needs to protect against close() happening concurrently.
ensureOpen();
assert reader == null : "a reader is already open !";
reader = DirectoryReader.open(indexWriter, false);
}
/**
@ -398,7 +396,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
// We need to get an answer from the on-disk index. If a reader
// is not yet open, do it now:
if (reader == null) {
reader = openReader();
openInternalReader();
}
int base = 0;
@ -442,7 +440,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
return cache.get(categoryPath, prefixLen);
}
if (reader == null) {
reader = openReader();
openInternalReader();
}
int base = 0;
@ -615,7 +613,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
// Because this is a slow operation, cache implementations are
// expected not to delete entries one-by-one but rather in bulk
// (LruTaxonomyWriterCache removes the 2/3rd oldest entries).
refreshReader();
refreshInternalReader();
cacheIsComplete = false;
}
}
@ -623,12 +621,12 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
private void addToCache(CategoryPath categoryPath, int prefixLen, int id)
throws IOException {
if (cache.put(categoryPath, prefixLen, id)) {
refreshReader();
refreshInternalReader();
cacheIsComplete = false;
}
}
protected synchronized void refreshReader() throws IOException {
private synchronized void refreshInternalReader() throws IOException {
if (reader != null) {
DirectoryReader r2 = DirectoryReader.openIfChanged(reader);
if (r2 != null) {
@ -648,7 +646,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
public synchronized void commit() throws CorruptIndexException, IOException {
ensureOpen();
indexWriter.commit(combinedCommitData(null));
refreshReader();
refreshInternalReader();
}
/**
@ -674,7 +672,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
public synchronized void commit(Map<String,String> commitUserData) throws CorruptIndexException, IOException {
ensureOpen();
indexWriter.commit(combinedCommitData(commitUserData));
refreshReader();
refreshInternalReader();
}
/**
@ -759,7 +757,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
// TODO (Facet): we should probably completely clear the cache before starting
// to read it?
if (reader == null) {
reader = openReader();
openInternalReader();
}
if (!cache.hasRoom(reader.numDocs())) {
@ -826,7 +824,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
private synchronized ParentArray getParentArray() throws IOException {
if (parentArray==null) {
if (reader == null) {
reader = openReader();
openInternalReader();
}
parentArray = new ParentArray();
parentArray.refresh(reader);
@ -888,18 +886,6 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
}
}
/**
* Expert: This method is only for expert use.
* Note also that any call to refresh() will invalidate the returned reader,
* so the caller needs to take care of appropriate locking.
*
* @return lucene indexReader
*/
DirectoryReader getInternalIndexReader() {
ensureOpen();
return this.reader;
}
/**
* Mapping from old ordinal to new ordinals, used when merging indexes
* wit separate taxonomies.

View File

@ -1,193 +0,0 @@
package org.apache.lucene.facet.taxonomy.directory;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.Set;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.LockObtainFailedException;
import org.junit.Test;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.facet.taxonomy.CategoryPath;
import org.apache.lucene.facet.taxonomy.InconsistentTaxonomyException;
import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This test case attempts to catch index "leaks" in LuceneTaxonomyReader/Writer,
* i.e., cases where an index has been opened, but never closed; In that case,
* Java would eventually collect this object and close the index, but leaving
* the index open might nevertheless cause problems - e.g., on Windows it prevents
* deleting it.
*/
public class TestIndexClose extends LuceneTestCase {
@Test
public void testLeaks() throws Exception {
LeakChecker checker = new LeakChecker();
Directory dir = newDirectory();
DirectoryTaxonomyWriter tw = checker.openWriter(dir);
tw.close();
assertEquals(0, checker.nopen());
tw = checker.openWriter(dir);
tw.addCategory(new CategoryPath("animal", "dog"));
tw.close();
assertEquals(0, checker.nopen());
DirectoryTaxonomyReader tr = checker.openReader(dir);
tr.getPath(1);
tr.refresh();
tr.close();
assertEquals(0, checker.nopen());
tr = checker.openReader(dir);
tw = checker.openWriter(dir);
tw.addCategory(new CategoryPath("animal", "cat"));
tr.refresh();
tw.commit();
tw.close();
tr.refresh();
tr.close();
assertEquals(0, checker.nopen());
tw = checker.openWriter(dir);
for (int i=0; i<10000; i++) {
tw.addCategory(new CategoryPath("number", Integer.toString(i)));
}
tw.close();
assertEquals(0, checker.nopen());
tw = checker.openWriter(dir);
for (int i=0; i<10000; i++) {
tw.addCategory(new CategoryPath("number", Integer.toString(i*2)));
}
tw.close();
assertEquals(0, checker.nopen());
dir.close();
}
private static class LeakChecker {
Set<DirectoryReader> readers = Collections.newSetFromMap(new IdentityHashMap<DirectoryReader,Boolean>());
int iwriter=0;
Set<Integer> openWriters = new HashSet<Integer>();
LeakChecker() { }
public DirectoryTaxonomyWriter openWriter(Directory dir) throws CorruptIndexException, LockObtainFailedException, IOException {
return new InstrumentedTaxonomyWriter(dir);
}
public DirectoryTaxonomyReader openReader(Directory dir) throws CorruptIndexException, LockObtainFailedException, IOException {
return new InstrumentedTaxonomyReader(dir);
}
public int nopen() {
int ret=0;
for (DirectoryReader r: readers) {
if (r.getRefCount() > 0) {
System.err.println("reader "+r+" still open");
ret++;
}
}
for (int i: openWriters) {
System.err.println("writer "+i+" still open");
ret++;
}
return ret;
}
private class InstrumentedTaxonomyWriter extends DirectoryTaxonomyWriter {
public InstrumentedTaxonomyWriter(Directory dir) throws CorruptIndexException, LockObtainFailedException, IOException {
super(dir);
}
@Override
protected DirectoryReader openReader() throws IOException {
DirectoryReader r = super.openReader();
readers.add(r);
return r;
}
@Override
protected synchronized void refreshReader() throws IOException {
super.refreshReader();
final DirectoryReader r = getInternalIndexReader();
if (r != null) readers.add(r);
}
@Override
protected IndexWriter openIndexWriter (Directory directory, IndexWriterConfig config) throws IOException {
return new InstrumentedIndexWriter(directory, config);
}
@Override
protected IndexWriterConfig createIndexWriterConfig(OpenMode openMode) {
return newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false))
.setOpenMode(openMode).setMergePolicy(newLogMergePolicy());
}
}
private class InstrumentedTaxonomyReader extends DirectoryTaxonomyReader {
public InstrumentedTaxonomyReader(Directory dir) throws CorruptIndexException, LockObtainFailedException, IOException {
super(dir);
}
@Override
protected DirectoryReader openIndexReader(Directory dir) throws CorruptIndexException, IOException {
DirectoryReader r = super.openIndexReader(dir);
readers.add(r);
return r;
}
@Override
public synchronized boolean refresh() throws IOException, InconsistentTaxonomyException {
final boolean ret = super.refresh();
readers.add(getInternalIndexReader());
return ret;
}
}
private class InstrumentedIndexWriter extends IndexWriter {
int mynum;
public InstrumentedIndexWriter(Directory d, IndexWriterConfig conf) throws CorruptIndexException, LockObtainFailedException, IOException {
super(d, conf);
mynum = iwriter++;
openWriters.add(mynum);
// System.err.println("openedw "+mynum);
}
@Override
public void close() throws IOException {
super.close();
if (!openWriters.contains(mynum)) { // probably can't happen...
fail("Writer #"+mynum+" was closed twice!");
}
openWriters.remove(mynum);
// System.err.println("closedw "+mynum);
}
}
}
}