LUCENE-818: throw AlreadyClosedException when accessing IndexWriter, *Reader, RAMDirectory and FieldsReader after close()

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@518262 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2007-03-14 18:46:03 +00:00
parent 4648912089
commit 9da8211775
13 changed files with 389 additions and 82 deletions

View File

@ -29,6 +29,11 @@ API Changes
share an index over NFS by customizing when prior commits are
deleted. (Mike McCandless)
4. LUCENE-818: changed most public methods of IndexWriter,
IndexReader (and its subclasses), FieldsReader and RAMDirectory to
throw AlreadyClosedException if they are accessed after being
closed. (Mike McCandless)
Bug fixes
1. LUCENE-804: Fixed build.xml to pack a fully compilable src dist. (Doron Cohen)

View File

@ -20,6 +20,7 @@ package org.apache.lucene.index;
import org.apache.lucene.document.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.AlreadyClosedException;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
@ -46,6 +47,7 @@ final class FieldsReader {
private final IndexInput indexStream;
private int size;
private boolean closed;
private ThreadLocal fieldsStreamTL = new ThreadLocal();
@ -58,6 +60,15 @@ final class FieldsReader {
size = (int) (indexStream.length() / 8);
}
/**
* @throws AlreadyClosedException if this FieldsReader is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (closed) {
throw new AlreadyClosedException("this FieldsReader is closed");
}
}
/**
* Closes the underlying {@link org.apache.lucene.store.IndexInput} streams, including any ones associated with a
* lazy implementation of a Field. This means that the Fields values will not be accessible.
@ -65,6 +76,7 @@ final class FieldsReader {
* @throws IOException
*/
final void close() throws IOException {
if (!closed) {
fieldsStream.close();
cloneableFieldsStream.close();
indexStream.close();
@ -73,6 +85,8 @@ final class FieldsReader {
localFieldsStream.close();
fieldsStreamTL.set(null);
}
closed = true;
}
}
final int size() {
@ -323,6 +337,7 @@ final class FieldsReader {
* binaryValue() must be set.
*/
public byte[] binaryValue() {
ensureOpen();
if (fieldsData == null) {
final byte[] b = new byte[toRead];
IndexInput localFieldsStream = getFieldStream();
@ -349,6 +364,7 @@ final class FieldsReader {
* and binaryValue() must be set.
*/
public Reader readerValue() {
ensureOpen();
return fieldsData instanceof Reader ? (Reader) fieldsData : null;
}
@ -358,6 +374,7 @@ final class FieldsReader {
* binaryValue() must be set.
*/
public String stringValue() {
ensureOpen();
if (fieldsData == null) {
IndexInput localFieldsStream = getFieldStream();
try {
@ -380,18 +397,22 @@ final class FieldsReader {
}
public long getPointer() {
ensureOpen();
return pointer;
}
public void setPointer(long pointer) {
ensureOpen();
this.pointer = pointer;
}
public int getToRead() {
ensureOpen();
return toRead;
}
public void setToRead(int toRead) {
ensureOpen();
this.toRead = toRead;
}
}

View File

@ -92,43 +92,84 @@ public class FilterIndexReader extends IndexReader {
public TermFreqVector[] getTermFreqVectors(int docNumber)
throws IOException {
ensureOpen();
return in.getTermFreqVectors(docNumber);
}
public TermFreqVector getTermFreqVector(int docNumber, String field)
throws IOException {
ensureOpen();
return in.getTermFreqVector(docNumber, field);
}
public int numDocs() { return in.numDocs(); }
public int maxDoc() { return in.maxDoc(); }
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
return in.numDocs();
}
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException { return in.document(n, fieldSelector); }
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return in.maxDoc();
}
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
return in.document(n, fieldSelector);
}
public boolean isDeleted(int n) {
// Don't call ensureOpen() here (it could affect performance)
return in.isDeleted(n);
}
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return in.hasDeletions();
}
public boolean isDeleted(int n) { return in.isDeleted(n); }
public boolean hasDeletions() { return in.hasDeletions(); }
protected void doUndeleteAll() throws CorruptIndexException, IOException {in.undeleteAll();}
public boolean hasNorms(String field) throws IOException {
ensureOpen();
return in.hasNorms(field);
}
public byte[] norms(String f) throws IOException { return in.norms(f); }
public byte[] norms(String f) throws IOException {
ensureOpen();
return in.norms(f);
}
public void norms(String f, byte[] bytes, int offset) throws IOException {
ensureOpen();
in.norms(f, bytes, offset);
}
protected void doSetNorm(int d, String f, byte b) throws CorruptIndexException, IOException {
in.setNorm(d, f, b);
}
public TermEnum terms() throws IOException { return in.terms(); }
public TermEnum terms(Term t) throws IOException { return in.terms(t); }
public TermEnum terms() throws IOException {
ensureOpen();
return in.terms();
}
public int docFreq(Term t) throws IOException { return in.docFreq(t); }
public TermEnum terms(Term t) throws IOException {
ensureOpen();
return in.terms(t);
}
public TermDocs termDocs() throws IOException { return in.termDocs(); }
public int docFreq(Term t) throws IOException {
ensureOpen();
return in.docFreq(t);
}
public TermDocs termDocs() throws IOException {
ensureOpen();
return in.termDocs();
}
public TermPositions termPositions() throws IOException {
ensureOpen();
return in.termPositions();
}
@ -138,9 +179,17 @@ public class FilterIndexReader extends IndexReader {
public Collection getFieldNames(IndexReader.FieldOption fieldNames) {
ensureOpen();
return in.getFieldNames(fieldNames);
}
public long getVersion() { return in.getVersion(); }
public boolean isCurrent() throws CorruptIndexException, IOException { return in.isCurrent(); }
public long getVersion() {
ensureOpen();
return in.getVersion();
}
public boolean isCurrent() throws CorruptIndexException, IOException {
ensureOpen();
return in.isCurrent();
}
}

View File

@ -25,6 +25,7 @@ import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.AlreadyClosedException;
import java.io.File;
import java.io.FileOutputStream;
@ -115,7 +116,16 @@ public abstract class IndexReader {
private boolean directoryOwner;
private boolean closeDirectory;
private IndexDeletionPolicy deletionPolicy;
private boolean isClosed;
private boolean closed;
/**
* @throws AlreadyClosedException if this IndexReader is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (closed) {
throw new AlreadyClosedException("this IndexReader is closed");
}
}
private SegmentInfos segmentInfos;
private Lock writeLock;
@ -208,8 +218,12 @@ public abstract class IndexReader {
}.run();
}
/** Returns the directory this index resides in. */
public Directory directory() { return directory; }
/** Returns the directory this index resides in.
*/
public Directory directory() {
ensureOpen();
return directory;
}
/**
* Returns the time the index in the named directory was last modified.
@ -301,6 +315,7 @@ public abstract class IndexReader {
* Version number when this IndexReader was opened.
*/
public long getVersion() {
ensureOpen();
return segmentInfos.getVersion();
}
@ -313,6 +328,7 @@ public abstract class IndexReader {
* @throws IOException if there is a low-level IO error
*/
public boolean isCurrent() throws CorruptIndexException, IOException {
ensureOpen();
return SegmentInfos.readCurrentVersion(directory) == segmentInfos.getVersion();
}
@ -321,6 +337,7 @@ public abstract class IndexReader {
* @return <code>true</code> if the index is optimized; <code>false</code> otherwise
*/
public boolean isOptimized() {
ensureOpen();
return segmentInfos.size() == 1 && hasDeletions() == false;
}
@ -407,6 +424,7 @@ public abstract class IndexReader {
* @throws IOException if there is a low-level IO error
*/
public Document document(int n) throws CorruptIndexException, IOException {
ensureOpen();
return document(n, null);
}
@ -445,6 +463,7 @@ public abstract class IndexReader {
public boolean hasNorms(String field) throws IOException {
// backward compatible implementation.
// SegmentReader has an efficient implementation.
ensureOpen();
return norms(field) != null;
}
@ -477,11 +496,11 @@ public abstract class IndexReader {
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws IOException if this reader was closed already
* or there is a low-level IO error
* @throws IOException if there is a low-level IO error
*/
public final synchronized void setNorm(int doc, String field, byte value)
throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
ensureOpen();
if(directoryOwner)
acquireWriteLock();
hasChanges = true;
@ -504,27 +523,31 @@ public abstract class IndexReader {
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws IOException if this reader was closed already
* or there is a low-level IO error
* @throws IOException if there is a low-level IO error
*/
public void setNorm(int doc, String field, float value)
throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
ensureOpen();
setNorm(doc, field, Similarity.encodeNorm(value));
}
/** Returns an enumeration of all the terms in the index.
* The enumeration is ordered by Term.compareTo(). Each term
* is greater than all that precede it in the enumeration.
* @throws IOException if there is a low-level IO error
*/
public abstract TermEnum terms() throws IOException;
/** Returns an enumeration of all terms after a given term.
* The enumeration is ordered by Term.compareTo(). Each term
* is greater than all that precede it in the enumeration.
* @throws IOException if there is a low-level IO error
*/
public abstract TermEnum terms(Term t) throws IOException;
/** Returns the number of documents containing the term <code>t</code>. */
/** Returns the number of documents containing the term <code>t</code>.
* @throws IOException if there is a low-level IO error
*/
public abstract int docFreq(Term t) throws IOException;
/** Returns an enumeration of all the documents which contain
@ -536,14 +559,18 @@ public abstract class IndexReader {
* </ul>
* <p>The enumeration is ordered by document number. Each document number
* is greater than all that precede it in the enumeration.
* @throws IOException if there is a low-level IO error
*/
public TermDocs termDocs(Term term) throws IOException {
ensureOpen();
TermDocs termDocs = termDocs();
termDocs.seek(term);
return termDocs;
}
/** Returns an unpositioned {@link TermDocs} enumerator. */
/** Returns an unpositioned {@link TermDocs} enumerator.
* @throws IOException if there is a low-level IO error
*/
public abstract TermDocs termDocs() throws IOException;
/** Returns an enumeration of all the documents which contain
@ -561,14 +588,18 @@ public abstract class IndexReader {
* <p> This positional information faciliates phrase and proximity searching.
* <p>The enumeration is ordered by document number. Each document number is
* greater than all that precede it in the enumeration.
* @throws IOException if there is a low-level IO error
*/
public TermPositions termPositions(Term term) throws IOException {
ensureOpen();
TermPositions termPositions = termPositions();
termPositions.seek(term);
return termPositions;
}
/** Returns an unpositioned {@link TermPositions} enumerator. */
/** Returns an unpositioned {@link TermPositions} enumerator.
* @throws IOException if there is a low-level IO error
*/
public abstract TermPositions termPositions() throws IOException;
/**
@ -584,10 +615,9 @@ public abstract class IndexReader {
* @throws IOException if there is a low-level IO error
*/
private void acquireWriteLock() throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
ensureOpen();
if (stale)
throw new StaleReaderException("IndexReader out of date and no longer valid for delete, undelete, or setNorm operations");
if (isClosed)
throw new IOException("this reader is closed");
if (writeLock == null) {
Lock writeLock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME);
@ -620,10 +650,10 @@ public abstract class IndexReader {
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws IOException if this reader was closed already
* or there is a low-level IO error
* @throws IOException if there is a low-level IO error
*/
public final synchronized void deleteDocument(int docNum) throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
ensureOpen();
if(directoryOwner)
acquireWriteLock();
hasChanges = true;
@ -652,10 +682,10 @@ public abstract class IndexReader {
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws IOException if this reader was closed already
* or there is a low-level IO error
* @throws IOException if there is a low-level IO error
*/
public final int deleteDocuments(Term term) throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
ensureOpen();
TermDocs docs = termDocs(term);
if (docs == null) return 0;
int n = 0;
@ -678,10 +708,10 @@ public abstract class IndexReader {
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if this reader was closed already
* or there is a low-level IO error
* @throws IOException if there is a low-level IO error
*/
public final synchronized void undeleteAll() throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
ensureOpen();
if(directoryOwner)
acquireWriteLock();
hasChanges = true;
@ -790,19 +820,16 @@ public abstract class IndexReader {
* Closes files associated with this index.
* Also saves any new deletions to disk.
* No other methods should be called after this has been called.
* @throws IOException if this reader was closed already
* or there is a low-level IO error
* @throws IOException if there is a low-level IO error
*/
public final synchronized void close() throws IOException {
if (directoryOwner && isClosed) {
throw new IOException("this reader is already closed");
}
if (!closed) {
commit();
doClose();
if (directoryOwner)
closed = true;
if(closeDirectory)
directory.close();
if (directoryOwner) {
isClosed = true;
}
}

View File

@ -24,6 +24,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.RAMDirectory;
import java.io.File;
@ -208,6 +209,16 @@ public class IndexWriter {
private boolean useCompoundFile = true;
private boolean closeDir;
private boolean closed;
/**
* @throws AlreadyClosedException if this IndexWriter is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (closed) {
throw new AlreadyClosedException("this IndexWriter is closed");
}
}
/** Get the current setting of whether to use the compound file format.
* Note that this just returns the value you set with setUseCompoundFile(boolean)
@ -215,6 +226,7 @@ public class IndexWriter {
* @see #setUseCompoundFile(boolean)
*/
public boolean getUseCompoundFile() {
ensureOpen();
return useCompoundFile;
}
@ -223,6 +235,7 @@ public class IndexWriter {
* is finished. This is done regardless of what directory is in use.
*/
public void setUseCompoundFile(boolean value) {
ensureOpen();
useCompoundFile = value;
}
@ -231,6 +244,7 @@ public class IndexWriter {
* @see Similarity#setDefault(Similarity)
*/
public void setSimilarity(Similarity similarity) {
ensureOpen();
this.similarity = similarity;
}
@ -239,6 +253,7 @@ public class IndexWriter {
* <p>This defaults to the current value of {@link Similarity#getDefault()}.
*/
public Similarity getSimilarity() {
ensureOpen();
return this.similarity;
}
@ -264,6 +279,7 @@ public class IndexWriter {
* @see #DEFAULT_TERM_INDEX_INTERVAL
*/
public void setTermIndexInterval(int interval) {
ensureOpen();
this.termIndexInterval = interval;
}
@ -271,7 +287,10 @@ public class IndexWriter {
*
* @see #setTermIndexInterval(int)
*/
public int getTermIndexInterval() { return termIndexInterval; }
public int getTermIndexInterval() {
ensureOpen();
return termIndexInterval;
}
/**
* Constructs an IndexWriter for the index in <code>path</code>.
@ -580,6 +599,7 @@ public class IndexWriter {
* <p>The default value is {@link Integer#MAX_VALUE}.
*/
public void setMaxMergeDocs(int maxMergeDocs) {
ensureOpen();
this.maxMergeDocs = maxMergeDocs;
}
@ -587,6 +607,7 @@ public class IndexWriter {
* @see #setMaxMergeDocs
*/
public int getMaxMergeDocs() {
ensureOpen();
return maxMergeDocs;
}
@ -603,6 +624,7 @@ public class IndexWriter {
* By default, no more than 10,000 terms will be indexed for a field.
*/
public void setMaxFieldLength(int maxFieldLength) {
ensureOpen();
this.maxFieldLength = maxFieldLength;
}
@ -610,6 +632,7 @@ public class IndexWriter {
* @see #setMaxFieldLength
*/
public int getMaxFieldLength() {
ensureOpen();
return maxFieldLength;
}
@ -624,6 +647,7 @@ public class IndexWriter {
* @throws IllegalArgumentException if maxBufferedDocs is smaller than 2
*/
public void setMaxBufferedDocs(int maxBufferedDocs) {
ensureOpen();
if (maxBufferedDocs < 2)
throw new IllegalArgumentException("maxBufferedDocs must at least be 2");
this.minMergeDocs = maxBufferedDocs;
@ -633,6 +657,7 @@ public class IndexWriter {
* @see #setMaxBufferedDocs
*/
public int getMaxBufferedDocs() {
ensureOpen();
return minMergeDocs;
}
@ -646,6 +671,7 @@ public class IndexWriter {
* @throws IllegalArgumentException if maxBufferedDeleteTerms is smaller than 1</p>
*/
public void setMaxBufferedDeleteTerms(int maxBufferedDeleteTerms) {
ensureOpen();
if (maxBufferedDeleteTerms < 1)
throw new IllegalArgumentException("maxBufferedDeleteTerms must at least be 1");
this.maxBufferedDeleteTerms = maxBufferedDeleteTerms;
@ -655,6 +681,7 @@ public class IndexWriter {
* @see #setMaxBufferedDeleteTerms
*/
public int getMaxBufferedDeleteTerms() {
ensureOpen();
return maxBufferedDeleteTerms;
}
@ -669,6 +696,7 @@ public class IndexWriter {
* <p>This must never be less than 2. The default value is 10.
*/
public void setMergeFactor(int mergeFactor) {
ensureOpen();
if (mergeFactor < 2)
throw new IllegalArgumentException("mergeFactor cannot be less than 2");
this.mergeFactor = mergeFactor;
@ -678,6 +706,7 @@ public class IndexWriter {
* @see #setMergeFactor
*/
public int getMergeFactor() {
ensureOpen();
return mergeFactor;
}
@ -701,6 +730,7 @@ public class IndexWriter {
* to this.
*/
public void setInfoStream(PrintStream infoStream) {
ensureOpen();
this.infoStream = infoStream;
deleter.setInfoStream(infoStream);
}
@ -709,6 +739,7 @@ public class IndexWriter {
* @see #setInfoStream
*/
public PrintStream getInfoStream() {
ensureOpen();
return infoStream;
}
@ -717,6 +748,7 @@ public class IndexWriter {
* @see #setDefaultWriteLockTimeout to change the default value for all instances of IndexWriter.
*/
public void setWriteLockTimeout(long writeLockTimeout) {
ensureOpen();
this.writeLockTimeout = writeLockTimeout;
}
@ -724,6 +756,7 @@ public class IndexWriter {
* @see #setWriteLockTimeout
*/
public long getWriteLockTimeout() {
ensureOpen();
return writeLockTimeout;
}
@ -777,6 +810,7 @@ public class IndexWriter {
* @throws IOException if there is a low-level IO error
*/
public synchronized void close() throws CorruptIndexException, IOException {
if (!closed) {
flushRamSegments();
if (commitPending) {
@ -791,9 +825,12 @@ public class IndexWriter {
writeLock.release(); // release write lock
writeLock = null;
}
closed = true;
if(closeDir)
directory.close();
}
}
/** Release the write lock, if needed. */
protected void finalize() throws Throwable {
@ -809,17 +846,20 @@ public class IndexWriter {
/** Returns the Directory used by this index. */
public Directory getDirectory() {
ensureOpen();
return directory;
}
/** Returns the analyzer used by this index. */
public Analyzer getAnalyzer() {
ensureOpen();
return analyzer;
}
/** Returns the number of documents currently in this index. */
public synchronized int docCount() {
ensureOpen();
int count = ramSegmentInfos.size();
for (int i = 0; i < segmentInfos.size(); i++) {
SegmentInfo si = segmentInfos.info(i);
@ -897,6 +937,7 @@ public class IndexWriter {
* @throws IOException if there is a low-level IO error
*/
public void addDocument(Document doc, Analyzer analyzer) throws CorruptIndexException, IOException {
ensureOpen();
SegmentInfo newSegmentInfo = buildSingleDocSegment(doc, analyzer);
synchronized (this) {
ramSegmentInfos.addElement(newSegmentInfo);
@ -922,6 +963,7 @@ public class IndexWriter {
* @throws IOException if there is a low-level IO error
*/
public synchronized void deleteDocuments(Term term) throws CorruptIndexException, IOException {
ensureOpen();
bufferDeleteTerm(term);
maybeFlushRamSegments();
}
@ -935,6 +977,7 @@ public class IndexWriter {
* @throws IOException if there is a low-level IO error
*/
public synchronized void deleteDocuments(Term[] terms) throws CorruptIndexException, IOException {
ensureOpen();
for (int i = 0; i < terms.length; i++) {
bufferDeleteTerm(terms[i]);
}
@ -954,6 +997,7 @@ public class IndexWriter {
* @throws IOException if there is a low-level IO error
*/
public void updateDocument(Term term, Document doc) throws CorruptIndexException, IOException {
ensureOpen();
updateDocument(term, doc, getAnalyzer());
}
@ -972,6 +1016,7 @@ public class IndexWriter {
*/
public void updateDocument(Term term, Document doc, Analyzer analyzer)
throws CorruptIndexException, IOException {
ensureOpen();
SegmentInfo newSegmentInfo = buildSingleDocSegment(doc, analyzer);
synchronized (this) {
bufferDeleteTerm(term);
@ -1107,6 +1152,7 @@ public class IndexWriter {
* @throws IOException if there is a low-level IO error
*/
public synchronized void optimize() throws CorruptIndexException, IOException {
ensureOpen();
flushRamSegments();
while (segmentInfos.size() > 1 ||
(segmentInfos.size() == 1 &&
@ -1200,6 +1246,7 @@ public class IndexWriter {
* @throws IOException if there is a low-level IO error
*/
public void abort() throws IOException {
ensureOpen();
if (!autoCommit) {
// Keep the same segmentInfos instance but replace all
@ -1290,6 +1337,7 @@ public class IndexWriter {
public synchronized void addIndexes(Directory[] dirs)
throws CorruptIndexException, IOException {
ensureOpen();
optimize(); // start with zero or 1 seg
int start = segmentInfos.size();
@ -1375,6 +1423,7 @@ public class IndexWriter {
// 1 flush ram segments
ensureOpen();
flushRamSegments();
// 2 copy segment infos and find the highest level from dirs
@ -1479,6 +1528,7 @@ public class IndexWriter {
public synchronized void addIndexes(IndexReader[] readers)
throws CorruptIndexException, IOException {
ensureOpen();
optimize(); // start with zero or 1 seg
final String mergedName = newSegmentName();
@ -1610,6 +1660,7 @@ public class IndexWriter {
* @throws IOException if there is a low-level IO error
*/
public final synchronized void flush() throws CorruptIndexException, IOException {
ensureOpen();
flushRamSegments();
}
@ -1617,6 +1668,7 @@ public class IndexWriter {
* Useful for size management with flushRamDocs()
*/
public final long ramSizeInBytes() {
ensureOpen();
return ramDirectory.sizeInBytes();
}
@ -1624,6 +1676,7 @@ public class IndexWriter {
* Useful when calling flushRamSegments()
*/
public final synchronized int numRamDocs() {
ensureOpen();
return ramSegmentInfos.size();
}

View File

@ -72,24 +72,21 @@ public class MultiReader extends IndexReader {
}
/** Return an array of term frequency vectors for the specified document.
* The array contains a vector for each vectorized field in the document.
* Each vector vector contains term numbers and frequencies for all terms
* in a given vectorized field.
* If no such fields existed, the method returns null.
*/
public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment
}
public TermFreqVector getTermFreqVector(int n, String field)
throws IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVector(n - starts[i], field);
}
public synchronized int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
if (numDocs == -1) { // check cache
int n = 0; // cache miss--recompute
for (int i = 0; i < subReaders.length; i++)
@ -100,21 +97,27 @@ public class MultiReader extends IndexReader {
}
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return maxDoc;
}
// inherit javadoc
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader
}
public boolean isDeleted(int n) {
// Don't call ensureOpen() here (it could affect performance)
int i = readerIndex(n); // find segment num
return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader
}
public boolean hasDeletions() { return hasDeletions; }
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return hasDeletions;
}
protected void doDelete(int n) throws CorruptIndexException, IOException {
numDocs = -1; // invalidate cache
@ -153,6 +156,7 @@ public class MultiReader extends IndexReader {
}
public boolean hasNorms(String field) throws IOException {
ensureOpen();
for (int i = 0; i < subReaders.length; i++) {
if (subReaders[i].hasNorms(field)) return true;
}
@ -166,6 +170,7 @@ public class MultiReader extends IndexReader {
}
public synchronized byte[] norms(String field) throws IOException {
ensureOpen();
byte[] bytes = (byte[])normsCache.get(field);
if (bytes != null)
return bytes; // cache hit
@ -181,6 +186,7 @@ public class MultiReader extends IndexReader {
public synchronized void norms(String field, byte[] result, int offset)
throws IOException {
ensureOpen();
byte[] bytes = (byte[])normsCache.get(field);
if (bytes==null && !hasNorms(field)) bytes=fakeNorms();
if (bytes != null) // cache hit
@ -198,14 +204,17 @@ public class MultiReader extends IndexReader {
}
public TermEnum terms() throws IOException {
ensureOpen();
return new MultiTermEnum(subReaders, starts, null);
}
public TermEnum terms(Term term) throws IOException {
ensureOpen();
return new MultiTermEnum(subReaders, starts, term);
}
public int docFreq(Term t) throws IOException {
ensureOpen();
int total = 0; // sum freqs in segments
for (int i = 0; i < subReaders.length; i++)
total += subReaders[i].docFreq(t);
@ -213,10 +222,12 @@ public class MultiReader extends IndexReader {
}
public TermDocs termDocs() throws IOException {
ensureOpen();
return new MultiTermDocs(subReaders, starts);
}
public TermPositions termPositions() throws IOException {
ensureOpen();
return new MultiTermPositions(subReaders, starts);
}
@ -244,11 +255,9 @@ public class MultiReader extends IndexReader {
subReaders[i].close();
}
/**
* @see IndexReader#getFieldNames(IndexReader.FieldOption)
*/
public Collection getFieldNames (IndexReader.FieldOption fieldNames) {
// maintain a unique set of field names
ensureOpen();
Set fieldSet = new HashSet();
for (int i = 0; i < subReaders.length; i++) {
IndexReader reader = subReaders[i];

View File

@ -66,8 +66,11 @@ public class ParallelReader extends IndexReader {
/** Construct a ParallelReader. */
public ParallelReader() throws IOException { super(null); }
/** Add an IndexReader. */
/** Add an IndexReader.
* @throws IOException if there is a low-level IO error
*/
public void add(IndexReader reader) throws IOException {
ensureOpen();
add(reader, false);
}
@ -79,10 +82,12 @@ public class ParallelReader extends IndexReader {
* of documents
* @throws IllegalArgumentException if not all indexes have the same value
* of {@link IndexReader#maxDoc()}
* @throws IOException if there is a low-level IO error
*/
public void add(IndexReader reader, boolean ignoreStoredFields)
throws IOException {
ensureOpen();
if (readers.size() == 0) {
this.maxDoc = reader.maxDoc();
this.numDocs = reader.numDocs();
@ -110,14 +115,24 @@ public class ParallelReader extends IndexReader {
readers.add(reader);
}
public int numDocs() { return numDocs; }
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
return numDocs;
}
public int maxDoc() { return maxDoc; }
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return maxDoc;
}
public boolean hasDeletions() { return hasDeletions; }
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return hasDeletions;
}
// check first reader
public boolean isDeleted(int n) {
// Don't call ensureOpen() here (it could affect performance)
if (readers.size() > 0)
return ((IndexReader)readers.get(0)).isDeleted(n);
return false;
@ -141,6 +156,7 @@ public class ParallelReader extends IndexReader {
// append fields from storedFieldReaders
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
Document result = new Document();
for (int i = 0; i < storedFieldReaders.size(); i++) {
IndexReader reader = (IndexReader)storedFieldReaders.get(i);
@ -166,6 +182,7 @@ public class ParallelReader extends IndexReader {
// get all vectors
public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
ensureOpen();
ArrayList results = new ArrayList();
Iterator i = fieldToReader.entrySet().iterator();
while (i.hasNext()) {
@ -182,22 +199,26 @@ public class ParallelReader extends IndexReader {
public TermFreqVector getTermFreqVector(int n, String field)
throws IOException {
ensureOpen();
IndexReader reader = ((IndexReader)fieldToReader.get(field));
return reader==null ? null : reader.getTermFreqVector(n, field);
}
public boolean hasNorms(String field) throws IOException {
ensureOpen();
IndexReader reader = ((IndexReader)fieldToReader.get(field));
return reader==null ? false : reader.hasNorms(field);
}
public byte[] norms(String field) throws IOException {
ensureOpen();
IndexReader reader = ((IndexReader)fieldToReader.get(field));
return reader==null ? null : reader.norms(field);
}
public void norms(String field, byte[] result, int offset)
throws IOException {
ensureOpen();
IndexReader reader = ((IndexReader)fieldToReader.get(field));
if (reader!=null)
reader.norms(field, result, offset);
@ -211,31 +232,38 @@ public class ParallelReader extends IndexReader {
}
public TermEnum terms() throws IOException {
ensureOpen();
return new ParallelTermEnum();
}
public TermEnum terms(Term term) throws IOException {
ensureOpen();
return new ParallelTermEnum(term);
}
public int docFreq(Term term) throws IOException {
ensureOpen();
IndexReader reader = ((IndexReader)fieldToReader.get(term.field()));
return reader==null ? 0 : reader.docFreq(term);
}
public TermDocs termDocs(Term term) throws IOException {
ensureOpen();
return new ParallelTermDocs(term);
}
public TermDocs termDocs() throws IOException {
ensureOpen();
return new ParallelTermDocs();
}
public TermPositions termPositions(Term term) throws IOException {
ensureOpen();
return new ParallelTermPositions(term);
}
public TermPositions termPositions() throws IOException {
ensureOpen();
return new ParallelTermPositions();
}
@ -251,6 +279,7 @@ public class ParallelReader extends IndexReader {
public Collection getFieldNames (IndexReader.FieldOption fieldNames) {
ensureOpen();
Set fieldSet = new HashSet();
for (int i = 0; i < readers.size(); i++) {
IndexReader reader = ((IndexReader)readers.get(i));

View File

@ -266,10 +266,12 @@ class SegmentReader extends IndexReader {
}
static boolean hasDeletions(SegmentInfo si) throws IOException {
// Don't call ensureOpen() here (it could affect performance)
return si.hasDeletions();
}
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return deletedDocs != null;
}
@ -300,10 +302,12 @@ class SegmentReader extends IndexReader {
}
public TermEnum terms() {
ensureOpen();
return tis.terms();
}
public TermEnum terms(Term t) throws IOException {
ensureOpen();
return tis.terms(t);
}
@ -312,6 +316,7 @@ class SegmentReader extends IndexReader {
* @throws IOException if there is a low-level IO error
*/
public synchronized Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
if (isDeleted(n))
throw new IllegalArgumentException
("attempt to access a deleted document");
@ -323,14 +328,17 @@ class SegmentReader extends IndexReader {
}
public TermDocs termDocs() throws IOException {
ensureOpen();
return new SegmentTermDocs(this);
}
public TermPositions termPositions() throws IOException {
ensureOpen();
return new SegmentTermPositions(this);
}
public int docFreq(Term t) throws IOException {
ensureOpen();
TermInfo ti = tis.get(t);
if (ti != null)
return ti.docFreq;
@ -339,6 +347,7 @@ class SegmentReader extends IndexReader {
}
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
int n = maxDoc();
if (deletedDocs != null)
n -= deletedDocs.count();
@ -346,6 +355,7 @@ class SegmentReader extends IndexReader {
}
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return si.docCount;
}
@ -353,6 +363,7 @@ class SegmentReader extends IndexReader {
* @see IndexReader#getFieldNames(IndexReader.FieldOption fldOption)
*/
public Collection getFieldNames(IndexReader.FieldOption fieldOption) {
ensureOpen();
Set fieldSet = new HashSet();
for (int i = 0; i < fieldInfos.size(); i++) {
@ -394,6 +405,7 @@ class SegmentReader extends IndexReader {
public synchronized boolean hasNorms(String field) {
ensureOpen();
return norms.containsKey(field);
}
@ -426,6 +438,7 @@ class SegmentReader extends IndexReader {
// returns fake norms if norms aren't available
public synchronized byte[] norms(String field) throws IOException {
ensureOpen();
byte[] bytes = getNorms(field);
if (bytes==null) bytes=fakeNorms();
return bytes;
@ -447,6 +460,7 @@ class SegmentReader extends IndexReader {
public synchronized void norms(String field, byte[] bytes, int offset)
throws IOException {
ensureOpen();
Norm norm = (Norm) norms.get(field);
if (norm == null) {
System.arraycopy(fakeNorms(), 0, bytes, offset, maxDoc());
@ -537,6 +551,7 @@ class SegmentReader extends IndexReader {
*/
public TermFreqVector getTermFreqVector(int docNumber, String field) throws IOException {
// Check if this field is invalid or has no stored term vector
ensureOpen();
FieldInfo fi = fieldInfos.fieldInfo(field);
if (fi == null || !fi.storeTermVector || termVectorsReaderOrig == null)
return null;
@ -557,6 +572,7 @@ class SegmentReader extends IndexReader {
* @throws IOException
*/
public TermFreqVector[] getTermFreqVectors(int docNumber) throws IOException {
ensureOpen();
if (termVectorsReaderOrig == null)
return null;

View File

@ -0,0 +1,28 @@
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This exception is thrown when there is an attempt to
* access something that has already been closed.
*/
public class AlreadyClosedException extends IllegalStateException {
public AlreadyClosedException(String message) {
super(message);
}
}

View File

@ -97,6 +97,7 @@ public class RAMDirectory extends Directory implements Serializable {
/** Returns an array of strings, one for each file in the directory. */
public synchronized final String[] list() {
ensureOpen();
Set fileNames = fileMap.keySet();
String[] result = new String[fileNames.size()];
int i = 0;
@ -108,6 +109,7 @@ public class RAMDirectory extends Directory implements Serializable {
/** Returns true iff the named file exists in this directory. */
public final boolean fileExists(String name) {
ensureOpen();
RAMFile file;
synchronized (this) {
file = (RAMFile)fileMap.get(name);
@ -119,6 +121,7 @@ public class RAMDirectory extends Directory implements Serializable {
* @throws IOException if the file does not exist
*/
public final long fileModified(String name) throws IOException {
ensureOpen();
RAMFile file;
synchronized (this) {
file = (RAMFile)fileMap.get(name);
@ -132,6 +135,7 @@ public class RAMDirectory extends Directory implements Serializable {
* @throws IOException if the file does not exist
*/
public void touchFile(String name) throws IOException {
ensureOpen();
RAMFile file;
synchronized (this) {
file = (RAMFile)fileMap.get(name);
@ -154,6 +158,7 @@ public class RAMDirectory extends Directory implements Serializable {
* @throws IOException if the file does not exist
*/
public final long fileLength(String name) throws IOException {
ensureOpen();
RAMFile file;
synchronized (this) {
file = (RAMFile)fileMap.get(name);
@ -167,6 +172,7 @@ public class RAMDirectory extends Directory implements Serializable {
* directory. This is currently quantized to
* BufferedIndexOutput.BUFFER_SIZE. */
public synchronized final long sizeInBytes() {
ensureOpen();
return sizeInBytes;
}
@ -174,6 +180,7 @@ public class RAMDirectory extends Directory implements Serializable {
* @throws IOException if the file does not exist
*/
public synchronized void deleteFile(String name) throws IOException {
ensureOpen();
RAMFile file = (RAMFile)fileMap.get(name);
if (file!=null) {
fileMap.remove(name);
@ -188,6 +195,7 @@ public class RAMDirectory extends Directory implements Serializable {
* @deprecated
*/
public synchronized final void renameFile(String from, String to) throws IOException {
ensureOpen();
RAMFile fromFile = (RAMFile)fileMap.get(from);
if (fromFile==null)
throw new FileNotFoundException(from);
@ -202,6 +210,7 @@ public class RAMDirectory extends Directory implements Serializable {
/** Creates a new, empty file in the directory with the given name. Returns a stream writing this file. */
public IndexOutput createOutput(String name) {
ensureOpen();
RAMFile file = new RAMFile(this);
synchronized (this) {
RAMFile existing = (RAMFile)fileMap.get(name);
@ -216,6 +225,7 @@ public class RAMDirectory extends Directory implements Serializable {
/** Returns a stream reading an existing file. */
public IndexInput openInput(String name) throws IOException {
ensureOpen();
RAMFile file;
synchronized (this) {
file = (RAMFile)fileMap.get(name);
@ -230,4 +240,12 @@ public class RAMDirectory extends Directory implements Serializable {
fileMap = null;
}
/**
* @throws AlreadyClosedException if this IndexReader is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (fileMap == null) {
throw new AlreadyClosedException("this RAMDirectory is closed");
}
}
}

View File

@ -23,6 +23,7 @@ import org.apache.lucene.document.*;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util._TestUtil;
import java.io.File;
@ -133,6 +134,36 @@ public class TestFieldsReader extends TestCase {
}
}
public void testLazyFieldsAfterClose() throws Exception {
assertTrue(dir != null);
assertTrue(fieldInfos != null);
FieldsReader reader = new FieldsReader(dir, "test", fieldInfos);
assertTrue(reader != null);
assertTrue(reader.size() == 1);
Set loadFieldNames = new HashSet();
loadFieldNames.add(DocHelper.TEXT_FIELD_1_KEY);
loadFieldNames.add(DocHelper.TEXT_FIELD_UTF1_KEY);
Set lazyFieldNames = new HashSet();
lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY);
lazyFieldNames.add(DocHelper.LAZY_FIELD_KEY);
lazyFieldNames.add(DocHelper.LAZY_FIELD_BINARY_KEY);
lazyFieldNames.add(DocHelper.TEXT_FIELD_UTF2_KEY);
lazyFieldNames.add(DocHelper.COMPRESSED_TEXT_FIELD_2_KEY);
SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(loadFieldNames, lazyFieldNames);
Document doc = reader.doc(0, fieldSelector);
assertTrue("doc is null and it shouldn't be", doc != null);
Fieldable field = doc.getFieldable(DocHelper.LAZY_FIELD_KEY);
assertTrue("field is null and it shouldn't be", field != null);
assertTrue("field is not lazy and it should be", field.isLazy());
reader.close();
try {
String value = field.stringValue();
fail("did not hit AlreadyClosedException as expected");
} catch (AlreadyClosedException e) {
// expected
}
}
public void testLoadFirst() throws Exception {
assertTrue(dir != null);
assertTrue(fieldInfos != null);

View File

@ -26,6 +26,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
@ -279,21 +280,21 @@ public class TestIndexReader extends TestCase
try {
reader.deleteDocument(4);
fail("deleteDocument after close failed to throw IOException");
} catch (IOException e) {
} catch (AlreadyClosedException e) {
// expected
}
try {
reader.setNorm(5, "aaa", 2.0f);
fail("setNorm after close failed to throw IOException");
} catch (IOException e) {
} catch (AlreadyClosedException e) {
// expected
}
try {
reader.undeleteAll();
fail("undeleteAll after close failed to throw IOException");
} catch (IOException e) {
} catch (AlreadyClosedException e) {
// expected
}
}

View File

@ -19,6 +19,7 @@ import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.LockFactory;
@ -724,6 +725,25 @@ public class TestIndexWriter extends TestCase
}
}
public void testChangesAfterClose() throws IOException {
Directory dir = new RAMDirectory();
IndexWriter writer = null;
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
addDoc(writer);
// close
writer.close();
try {
addDoc(writer);
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException e) {
// expected
}
}
// Simulate a corrupt index by removing one of the cfs
// files and make sure we get an IOException trying to
// open the index: