mirror of https://github.com/apache/lucene.git
only open .nrm file once: LUCENE-821
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@514344 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d7e203c8b4
commit
e6a9d9b318
|
@ -34,6 +34,10 @@ Bug fixes
|
|||
implementations to be specified via the System property
|
||||
org.apache.lucene.store.FSDirectoryLockFactoryClass. (Mike McCandless)
|
||||
|
||||
4. LUCENE-821: The new single-norm-file introduded by LUCENE-756
|
||||
failed to reduce the number of open descriptors since it was still
|
||||
opened once per field with norms. (yonik)
|
||||
|
||||
New features
|
||||
|
||||
1. LUCENE-759: Added two n-gram-producing TokenFilters.
|
||||
|
|
|
@ -54,6 +54,9 @@ class SegmentReader extends IndexReader {
|
|||
IndexInput freqStream;
|
||||
IndexInput proxStream;
|
||||
|
||||
// optionally used for the .nrm file shared by multiple norms
|
||||
private IndexInput singleNormStream;
|
||||
|
||||
// Compound File Reader when based on a compound file segment
|
||||
CompoundFileReader cfsReader = null;
|
||||
|
||||
|
@ -92,6 +95,17 @@ class SegmentReader extends IndexReader {
|
|||
}
|
||||
this.dirty = false;
|
||||
}
|
||||
|
||||
/** Closes the underlying IndexInput for this norm.
|
||||
* It is still valid to access all other norm properties after close is called.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void close() throws IOException {
|
||||
if (in != null && in != singleNormStream) {
|
||||
in.close();
|
||||
}
|
||||
in = null;
|
||||
}
|
||||
}
|
||||
|
||||
private Hashtable norms = new Hashtable();
|
||||
|
@ -457,6 +471,9 @@ class SegmentReader extends IndexReader {
|
|||
byte[] bytes = new byte[maxDoc()];
|
||||
norms(field, bytes, 0);
|
||||
norm.bytes = bytes; // cache it
|
||||
// it's OK to close the underlying IndexInput as we have cached the
|
||||
// norms and will never read them again.
|
||||
norm.close();
|
||||
}
|
||||
return norm.bytes;
|
||||
}
|
||||
|
@ -473,6 +490,7 @@ class SegmentReader extends IndexReader {
|
|||
Norm norm = (Norm) norms.get(field);
|
||||
if (norm == null) // not an indexed field
|
||||
return;
|
||||
|
||||
norm.dirty = true; // mark it dirty
|
||||
normsDirty = true;
|
||||
|
||||
|
@ -494,13 +512,10 @@ class SegmentReader extends IndexReader {
|
|||
return;
|
||||
}
|
||||
|
||||
IndexInput normStream = (IndexInput) norm.in.clone();
|
||||
try { // read from disk
|
||||
normStream.seek(norm.normSeek);
|
||||
normStream.readBytes(bytes, offset, maxDoc());
|
||||
} finally {
|
||||
normStream.close();
|
||||
}
|
||||
// Read from disk. norm.in may be shared across multiple norms and
|
||||
// should only be used in a synchronized context.
|
||||
norm.in.seek(norm.normSeek);
|
||||
norm.in.readBytes(bytes, offset, maxDoc());
|
||||
}
|
||||
|
||||
|
||||
|
@ -515,8 +530,27 @@ class SegmentReader extends IndexReader {
|
|||
if (!si.hasSeparateNorms(fi.number)) {
|
||||
d = cfsDir;
|
||||
}
|
||||
long normSeek = (fileName.endsWith("." + IndexFileNames.NORMS_EXTENSION) ? nextNormSeek : 0);
|
||||
norms.put(fi.name, new Norm(d.openInput(fileName), fi.number, normSeek));
|
||||
|
||||
// singleNormFile means multiple norms share this file
|
||||
boolean singleNormFile = fileName.endsWith("." + IndexFileNames.NORMS_EXTENSION);
|
||||
IndexInput normInput = null;
|
||||
long normSeek;
|
||||
|
||||
if (singleNormFile) {
|
||||
normSeek = nextNormSeek;
|
||||
if (singleNormStream==null) {
|
||||
singleNormStream = d.openInput(fileName);
|
||||
}
|
||||
// All norms in the .nrm file can share a single IndexInput since
|
||||
// they are only used in a synchronized context.
|
||||
// If this were to change in the future, a clone could be done here.
|
||||
normInput = singleNormStream;
|
||||
} else {
|
||||
normSeek = 0;
|
||||
normInput = d.openInput(fileName);
|
||||
}
|
||||
|
||||
norms.put(fi.name, new Norm(normInput, fi.number, normSeek));
|
||||
nextNormSeek += maxDoc; // increment also if some norms are separate
|
||||
}
|
||||
}
|
||||
|
@ -527,7 +561,11 @@ class SegmentReader extends IndexReader {
|
|||
Enumeration enumerator = norms.elements();
|
||||
while (enumerator.hasMoreElements()) {
|
||||
Norm norm = (Norm) enumerator.nextElement();
|
||||
norm.in.close();
|
||||
norm.close();
|
||||
}
|
||||
if (singleNormStream != null) {
|
||||
singleNormStream.close();
|
||||
singleNormStream = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -149,7 +149,6 @@ public class TestMultiSearcher extends TestCase
|
|||
// no exception should happen at this point
|
||||
Document d = hits2.doc(i);
|
||||
}
|
||||
mSearcher2.close();
|
||||
|
||||
// test the subSearcher() method:
|
||||
Query subSearcherQuery = parser.parse("id:doc1");
|
||||
|
@ -161,6 +160,7 @@ public class TestMultiSearcher extends TestCase
|
|||
hits2 = mSearcher2.search(subSearcherQuery);
|
||||
assertEquals(1, hits2.length());
|
||||
assertEquals(1, mSearcher2.subSearcher(hits2.id(0))); // hit from searchers2[1]
|
||||
mSearcher2.close();
|
||||
|
||||
//--------------------------------------------------------------------
|
||||
// scenario 3
|
||||
|
|
Loading…
Reference in New Issue