mirror of https://github.com/apache/lucene.git
LUCENE-1084: change all IW ctors to require up-front specification of max field length; deprecated existing ones
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@615168 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
674eae39ec
commit
ee835ccf21
|
@ -7,6 +7,11 @@ Changes in runtime behavior
|
|||
|
||||
API Changes
|
||||
|
||||
1. LUCENE-1084: Changed all IndexWriter constructors to take an
|
||||
explicit parameter for maximum field size. Deprecated all the
|
||||
pre-existing constructors; these will be removed in release 3.0.
|
||||
(Steven Rowe via Mike McCandless)
|
||||
|
||||
Bug fixes
|
||||
|
||||
New features
|
||||
|
|
|
@ -133,7 +133,7 @@ public class StandardBenchmarker extends AbstractBenchmarker implements Benchmar
|
|||
TestRunData trd = new TestRunData();
|
||||
trd.startRun();
|
||||
trd.setId(String.valueOf(i));
|
||||
IndexWriter iw = new IndexWriter(params.getDirectory(), params.getAnalyzer(), true);
|
||||
IndexWriter iw = new IndexWriter(params.getDirectory(), params.getAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
iw.setMergeFactor(params.getMergeFactor());
|
||||
iw.setMaxBufferedDocs(params.getMaxBufferedDocs());
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ public class IndexMergeTool {
|
|||
}
|
||||
File mergedIndex = new File(args[0]);
|
||||
|
||||
IndexWriter writer = new IndexWriter(mergedIndex, new SimpleAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(mergedIndex, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
Directory[] indexes = new Directory[args.length - 1];
|
||||
for (int i = 1; i < args.length; i++) {
|
||||
|
|
|
@ -36,7 +36,8 @@ public class TestRegexQuery extends TestCase {
|
|||
public void setUp() {
|
||||
RAMDirectory directory = new RAMDirectory();
|
||||
try {
|
||||
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.TOKENIZED));
|
||||
writer.addDocument(doc);
|
||||
|
|
|
@ -36,7 +36,8 @@ public class SingleFieldTestDb {
|
|||
docs = documents;
|
||||
fieldName = fName;
|
||||
Analyzer analyzer = new WhitespaceAnalyzer();
|
||||
IndexWriter writer = new IndexWriter(db, analyzer, true);
|
||||
IndexWriter writer = new IndexWriter(db, analyzer, true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
for (int j = 0; j < docs.length; j++) {
|
||||
Document d = new Document();
|
||||
d.add(new Field(fieldName, docs[j], Field.Store.NO, Field.Index.TOKENIZED));
|
||||
|
|
|
@ -111,7 +111,7 @@ public class ListSearcher extends AbstractListModel {
|
|||
try {
|
||||
// recreate the RAMDirectory
|
||||
directory = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, analyzer, true);
|
||||
IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
// iterate through all rows
|
||||
for (int row=0; row < listModel.getSize(); row++){
|
||||
|
|
|
@ -242,7 +242,7 @@ public class Syns2Index
|
|||
int mod = 1;
|
||||
|
||||
// override the specific index if it already exists
|
||||
IndexWriter writer = new IndexWriter(indexDir, ana, true);
|
||||
IndexWriter writer = new IndexWriter(indexDir, ana, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setUseCompoundFile(true); // why?
|
||||
// blindly up these parameters for speed
|
||||
writer.setMergeFactor( writer.getMergeFactor() * 2);
|
||||
|
|
|
@ -53,7 +53,7 @@ public class IndexFiles {
|
|||
|
||||
Date start = new Date();
|
||||
try {
|
||||
IndexWriter writer = new IndexWriter(INDEX_DIR, new StandardAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(INDEX_DIR, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
System.out.println("Indexing to directory '" +INDEX_DIR+ "'...");
|
||||
indexDocs(writer, docDir);
|
||||
System.out.println("Optimizing...");
|
||||
|
|
|
@ -68,7 +68,8 @@ public class IndexHTML {
|
|||
deleting = true;
|
||||
indexDocs(root, index, create);
|
||||
}
|
||||
writer = new IndexWriter(index, new StandardAnalyzer(), create);
|
||||
writer = new IndexWriter(index, new StandardAnalyzer(), create,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxFieldLength(1000000);
|
||||
indexDocs(root, index, create); // add new docs
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.store.Lock;
|
|||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.util.BitVector;
|
||||
import org.apache.lucene.util.Parameter;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
@ -438,6 +439,7 @@ public class IndexWriter {
|
|||
* @param create <code>true</code> to create the index or overwrite
|
||||
* the existing one; <code>false</code> to append to the existing
|
||||
* index
|
||||
* @param mfl whether or not to limit field lengths
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
|
@ -447,9 +449,9 @@ public class IndexWriter {
|
|||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
*/
|
||||
public IndexWriter(String path, Analyzer a, boolean create)
|
||||
public IndexWriter(String path, Analyzer a, boolean create, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(FSDirectory.getDirectory(path), a, create, true, null, true);
|
||||
init(FSDirectory.getDirectory(path), a, create, true, null, true, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -471,10 +473,91 @@ public class IndexWriter {
|
|||
* if it does not exist and <code>create</code> is
|
||||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(String,Analyzer,boolean,MaxFieldLength)} instead.
|
||||
*/
|
||||
public IndexWriter(String path, Analyzer a, boolean create)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(FSDirectory.getDirectory(path), a, create, true, null, true, DEFAULT_MAX_FIELD_LENGTH);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an IndexWriter for the index in <code>path</code>.
|
||||
* Text will be analyzed with <code>a</code>. If <code>create</code>
|
||||
* is true, then a new, empty index will be created in
|
||||
* <code>path</code>, replacing the index already there, if any.
|
||||
*
|
||||
* @param path the path to the index directory
|
||||
* @param a the analyzer to use
|
||||
* @param create <code>true</code> to create the index or overwrite
|
||||
* the existing one; <code>false</code> to append to the existing
|
||||
* index
|
||||
* @param mfl whether or not to limit field lengths
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be read/written to, or
|
||||
* if it does not exist and <code>create</code> is
|
||||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
*/
|
||||
public IndexWriter(File path, Analyzer a, boolean create, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(FSDirectory.getDirectory(path), a, create, true, null, true, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an IndexWriter for the index in <code>path</code>.
|
||||
* Text will be analyzed with <code>a</code>. If <code>create</code>
|
||||
* is true, then a new, empty index will be created in
|
||||
* <code>path</code>, replacing the index already there, if any.
|
||||
*
|
||||
* @param path the path to the index directory
|
||||
* @param a the analyzer to use
|
||||
* @param create <code>true</code> to create the index or overwrite
|
||||
* the existing one; <code>false</code> to append to the existing
|
||||
* index
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be read/written to, or
|
||||
* if it does not exist and <code>create</code> is
|
||||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(File,Analyzer,boolean,MaxFieldLength)} instead.
|
||||
*/
|
||||
public IndexWriter(File path, Analyzer a, boolean create)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(FSDirectory.getDirectory(path), a, create, true, null, true);
|
||||
init(FSDirectory.getDirectory(path), a, create, true, null, true, DEFAULT_MAX_FIELD_LENGTH);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an IndexWriter for the index in <code>d</code>.
|
||||
* Text will be analyzed with <code>a</code>. If <code>create</code>
|
||||
* is true, then a new, empty index will be created in
|
||||
* <code>d</code>, replacing the index already there, if any.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param a the analyzer to use
|
||||
* @param create <code>true</code> to create the index or overwrite
|
||||
* the existing one; <code>false</code> to append to the existing
|
||||
* index
|
||||
* @param mfl whether or not to limit field lengths
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be read/written to, or
|
||||
* if it does not exist and <code>create</code> is
|
||||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
*/
|
||||
public IndexWriter(Directory d, Analyzer a, boolean create, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, create, false, null, true, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -496,10 +579,34 @@ public class IndexWriter {
|
|||
* if it does not exist and <code>create</code> is
|
||||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)} instead.
|
||||
*/
|
||||
public IndexWriter(Directory d, Analyzer a, boolean create)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, create, false, null, true);
|
||||
init(d, a, create, false, null, true, DEFAULT_MAX_FIELD_LENGTH);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an IndexWriter for the index in
|
||||
* <code>path</code>, first creating it if it does not
|
||||
* already exist. Text will be analyzed with
|
||||
* <code>a</code>.
|
||||
*
|
||||
* @param path the path to the index directory
|
||||
* @param a the analyzer to use
|
||||
* @param mfl whether or not to limit field lengths
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be
|
||||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
*/
|
||||
public IndexWriter(String path, Analyzer a, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(FSDirectory.getDirectory(path), a, true, null, true, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -517,10 +624,34 @@ public class IndexWriter {
|
|||
* @throws IOException if the directory cannot be
|
||||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(String,Analyzer,MaxFieldLength)} instead.
|
||||
*/
|
||||
public IndexWriter(String path, Analyzer a)
|
||||
public IndexWriter(String path, Analyzer a)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(FSDirectory.getDirectory(path), a, true, null, true);
|
||||
init(FSDirectory.getDirectory(path), a, true, null, true, DEFAULT_MAX_FIELD_LENGTH);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an IndexWriter for the index in
|
||||
* <code>path</code>, first creating it if it does not
|
||||
* already exist. Text will be analyzed with
|
||||
* <code>a</code>.
|
||||
*
|
||||
* @param path the path to the index directory
|
||||
* @param a the analyzer to use
|
||||
* @param mfl whether or not to limit field lengths
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be
|
||||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
*/
|
||||
public IndexWriter(File path, Analyzer a, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(FSDirectory.getDirectory(path), a, true, null, true, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -538,10 +669,34 @@ public class IndexWriter {
|
|||
* @throws IOException if the directory cannot be
|
||||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(File,Analyzer,MaxFieldLength)} instead.
|
||||
*/
|
||||
public IndexWriter(File path, Analyzer a)
|
||||
public IndexWriter(File path, Analyzer a)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(FSDirectory.getDirectory(path), a, true, null, true);
|
||||
init(FSDirectory.getDirectory(path), a, true, null, true, DEFAULT_MAX_FIELD_LENGTH);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an IndexWriter for the index in
|
||||
* <code>d</code>, first creating it if it does not
|
||||
* already exist. Text will be analyzed with
|
||||
* <code>a</code>.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param a the analyzer to use
|
||||
* @param mfl whether or not to limit field lengths
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be
|
||||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
*/
|
||||
public IndexWriter(Directory d, Analyzer a, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, false, null, true, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -559,10 +714,35 @@ public class IndexWriter {
|
|||
* @throws IOException if the directory cannot be
|
||||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(Directory,Analyzer,MaxFieldLength)} instead.
|
||||
*/
|
||||
public IndexWriter(Directory d, Analyzer a)
|
||||
public IndexWriter(Directory d, Analyzer a)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, false, null, true);
|
||||
init(d, a, false, null, true, DEFAULT_MAX_FIELD_LENGTH);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an IndexWriter for the index in
|
||||
* <code>d</code>, first creating it if it does not
|
||||
* already exist. Text will be analyzed with
|
||||
* <code>a</code>.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param autoCommit see <a href="#autoCommit">above</a>
|
||||
* @param a the analyzer to use
|
||||
* @param mfl whether or not to limit field lengths
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be
|
||||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
*/
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, false, null, autoCommit, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -581,10 +761,39 @@ public class IndexWriter {
|
|||
* @throws IOException if the directory cannot be
|
||||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(Directory,boolean,Analyzer,MaxFieldLength)} instead.
|
||||
*/
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a)
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, false, null, autoCommit);
|
||||
init(d, a, false, null, autoCommit, DEFAULT_MAX_FIELD_LENGTH);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an IndexWriter for the index in <code>d</code>.
|
||||
* Text will be analyzed with <code>a</code>. If <code>create</code>
|
||||
* is true, then a new, empty index will be created in
|
||||
* <code>d</code>, replacing the index already there, if any.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param autoCommit see <a href="#autoCommit">above</a>
|
||||
* @param a the analyzer to use
|
||||
* @param create <code>true</code> to create the index or overwrite
|
||||
* the existing one; <code>false</code> to append to the existing
|
||||
* index
|
||||
* @param mfl whether or not to limit field lengths
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be read/written to, or
|
||||
* if it does not exist and <code>create</code> is
|
||||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
*/
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, create, false, null, autoCommit, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -607,10 +816,36 @@ public class IndexWriter {
|
|||
* if it does not exist and <code>create</code> is
|
||||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(Directory,boolean,Analyzer,boolean,MaxFieldLength)} instead.
|
||||
*/
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, create, false, null, autoCommit);
|
||||
init(d, a, create, false, null, autoCommit, DEFAULT_MAX_FIELD_LENGTH);
|
||||
}
|
||||
|
||||
/**
|
||||
* Expert: constructs an IndexWriter with a custom {@link
|
||||
* IndexDeletionPolicy}, for the index in <code>d</code>,
|
||||
* first creating it if it does not already exist. Text
|
||||
* will be analyzed with <code>a</code>.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param autoCommit see <a href="#autoCommit">above</a>
|
||||
* @param a the analyzer to use
|
||||
* @param deletionPolicy see <a href="#deletionPolicy">above</a>
|
||||
* @param mfl whether or not to limit field lengths
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be
|
||||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
*/
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, false, deletionPolicy, autoCommit, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -630,10 +865,42 @@ public class IndexWriter {
|
|||
* @throws IOException if the directory cannot be
|
||||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(Directory,boolean,Analyzer,IndexDeletionPolicy,MaxFieldLength)} instead.
|
||||
*/
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, IndexDeletionPolicy deletionPolicy)
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, IndexDeletionPolicy deletionPolicy)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, false, deletionPolicy, autoCommit);
|
||||
init(d, a, false, deletionPolicy, autoCommit, DEFAULT_MAX_FIELD_LENGTH);
|
||||
}
|
||||
|
||||
/**
|
||||
* Expert: constructs an IndexWriter with a custom {@link
|
||||
* IndexDeletionPolicy}, for the index in <code>d</code>.
|
||||
* Text will be analyzed with <code>a</code>. If
|
||||
* <code>create</code> is true, then a new, empty index
|
||||
* will be created in <code>d</code>, replacing the index
|
||||
* already there, if any.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param autoCommit see <a href="#autoCommit">above</a>
|
||||
* @param a the analyzer to use
|
||||
* @param create <code>true</code> to create the index or overwrite
|
||||
* the existing one; <code>false</code> to append to the existing
|
||||
* index
|
||||
* @param deletionPolicy see <a href="#deletionPolicy">above</a>
|
||||
* @param mfl whether or not to limit field lengths
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be read/written to, or
|
||||
* if it does not exist and <code>create</code> is
|
||||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
*/
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, create, false, deletionPolicy, autoCommit, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -659,27 +926,30 @@ public class IndexWriter {
|
|||
* if it does not exist and <code>create</code> is
|
||||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(Directory,boolean,Analyzer,boolean,IndexDeletionPolicy,MaxFieldLength)} instead.
|
||||
*/
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, create, false, deletionPolicy, autoCommit);
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, create, false, deletionPolicy, autoCommit, DEFAULT_MAX_FIELD_LENGTH);
|
||||
}
|
||||
|
||||
private void init(Directory d, Analyzer a, boolean closeDir, IndexDeletionPolicy deletionPolicy, boolean autoCommit)
|
||||
private void init(Directory d, Analyzer a, boolean closeDir, IndexDeletionPolicy deletionPolicy, boolean autoCommit, int maxFieldLength)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
if (IndexReader.indexExists(d)) {
|
||||
init(d, a, false, closeDir, deletionPolicy, autoCommit);
|
||||
init(d, a, false, closeDir, deletionPolicy, autoCommit, maxFieldLength);
|
||||
} else {
|
||||
init(d, a, true, closeDir, deletionPolicy, autoCommit);
|
||||
init(d, a, true, closeDir, deletionPolicy, autoCommit, maxFieldLength);
|
||||
}
|
||||
}
|
||||
|
||||
private void init(Directory d, Analyzer a, final boolean create, boolean closeDir, IndexDeletionPolicy deletionPolicy, boolean autoCommit)
|
||||
private void init(Directory d, Analyzer a, final boolean create, boolean closeDir, IndexDeletionPolicy deletionPolicy, boolean autoCommit, int maxFieldLength)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
this.closeDir = closeDir;
|
||||
directory = d;
|
||||
analyzer = a;
|
||||
this.infoStream = defaultInfoStream;
|
||||
this.maxFieldLength = maxFieldLength;
|
||||
setMessageID();
|
||||
|
||||
if (create) {
|
||||
|
@ -839,7 +1109,8 @@ public class IndexWriter {
|
|||
* documents are large, be sure to set this value high enough to accomodate
|
||||
* the expected size. If you set it to Integer.MAX_VALUE, then the only limit
|
||||
* is your memory, but you should anticipate an OutOfMemoryError.<p/>
|
||||
* By default, no more than 10,000 terms will be indexed for a field.
|
||||
* By default, no more than {@link IndexWriter#DEFAULT_MAX_FIELD_LENGTH} terms
|
||||
* will be indexed for a field.
|
||||
*/
|
||||
public void setMaxFieldLength(int maxFieldLength) {
|
||||
ensureOpen();
|
||||
|
@ -1381,8 +1652,9 @@ public class IndexWriter {
|
|||
* is your memory, but you should anticipate an OutOfMemoryError.<p/>
|
||||
* By default, no more than 10,000 terms will be indexed for a field.
|
||||
*
|
||||
* @see MaxFieldLength
|
||||
*/
|
||||
private int maxFieldLength = DEFAULT_MAX_FIELD_LENGTH;
|
||||
private int maxFieldLength;
|
||||
|
||||
/**
|
||||
* Adds a document to this index. If the document contains more than
|
||||
|
@ -3359,4 +3631,35 @@ public class IndexWriter {
|
|||
|
||||
return buffer.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies maximum field length in {@link IndexWriter} constructors.
|
||||
* {@link IndexWriter#setMaxFieldLength(int)} overrides the value set by
|
||||
* the constructor.
|
||||
*/
|
||||
public static final class MaxFieldLength extends Parameter implements java.io.Serializable {
|
||||
|
||||
private int limit;
|
||||
|
||||
private MaxFieldLength(String name, int limit) {
|
||||
// typesafe enum pattern, no public constructor
|
||||
super(name);
|
||||
this.limit = limit;
|
||||
}
|
||||
|
||||
public int getLimit() {
|
||||
return limit;
|
||||
}
|
||||
|
||||
/** Sets the maximum field length to {@link Integer#MAX_VALUE}. */
|
||||
public static final MaxFieldLength UNLIMITED
|
||||
= new MaxFieldLength("UNLIMITED", Integer.MAX_VALUE);
|
||||
|
||||
/**
|
||||
* Sets the maximum field length to
|
||||
* {@link IndexWriter#DEFAULT_MAX_FIELD_LENGTH}
|
||||
* */
|
||||
public static final MaxFieldLength LIMITED
|
||||
= new MaxFieldLength("LIMITED", DEFAULT_MAX_FIELD_LENGTH);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ class IndexTest {
|
|||
try {
|
||||
Date start = new Date();
|
||||
IndexWriter writer = new IndexWriter(File.createTempFile("luceneTest", "idx"),
|
||||
new SimpleAnalyzer(), true);
|
||||
new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
writer.setMergeFactor(20);
|
||||
|
||||
|
|
|
@ -50,7 +50,8 @@ public class TestDemo extends LuceneTestCase {
|
|||
// parameter true will overwrite the index in that directory
|
||||
// if one exists):
|
||||
//Directory directory = FSDirectory.getDirectory("/tmp/testindex", true);
|
||||
IndexWriter iwriter = new IndexWriter(directory, analyzer, true);
|
||||
IndexWriter iwriter = new IndexWriter(directory, analyzer, true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
iwriter.setMaxFieldLength(25000);
|
||||
Document doc = new Document();
|
||||
String text = "This is the text to be indexed.";
|
||||
|
|
|
@ -40,7 +40,8 @@ public class TestHitIterator extends LuceneTestCase {
|
|||
public void testIterator() throws Exception {
|
||||
RAMDirectory directory = new RAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "iterator test doc 1", Field.Store.YES, Field.Index.TOKENIZED));
|
||||
writer.addDocument(doc);
|
||||
|
|
|
@ -77,7 +77,8 @@ public class TestSearch extends LuceneTestCase {
|
|||
{
|
||||
Directory directory = new RAMDirectory();
|
||||
Analyzer analyzer = new SimpleAnalyzer();
|
||||
IndexWriter writer = new IndexWriter(directory, analyzer, true);
|
||||
IndexWriter writer = new IndexWriter(directory, analyzer, true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
|
||||
|
|
|
@ -84,7 +84,8 @@ public class TestSearchForDuplicates extends LuceneTestCase {
|
|||
private void doTest(PrintWriter out, boolean useCompoundFiles) throws Exception {
|
||||
Directory directory = new RAMDirectory();
|
||||
Analyzer analyzer = new SimpleAnalyzer();
|
||||
IndexWriter writer = new IndexWriter(directory, analyzer, true);
|
||||
IndexWriter writer = new IndexWriter(directory, analyzer, true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
writer.setUseCompoundFile(useCompoundFiles);
|
||||
|
||||
|
|
|
@ -68,7 +68,8 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase
|
|||
final long stopTime = System.currentTimeMillis() + 7000;
|
||||
|
||||
SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
|
||||
final IndexWriter writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp);
|
||||
final IndexWriter writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
// Force frequent commits
|
||||
writer.setMaxBufferedDocs(2);
|
||||
|
|
|
@ -37,7 +37,7 @@ public class TestCachingTokenFilter extends LuceneTestCase {
|
|||
|
||||
public void testCaching() throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer());
|
||||
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
TokenStream stream = new TokenStream() {
|
||||
private int index = 0;
|
||||
|
|
|
@ -33,7 +33,8 @@ public class TestCheckIndex extends LuceneTestCase {
|
|||
|
||||
public void testDeletedDocs() throws IOException {
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
|
||||
|
|
|
@ -93,8 +93,8 @@ public class TestFilterIndexReader extends LuceneTestCase {
|
|||
*/
|
||||
public void testFilterIndexReader() throws Exception {
|
||||
RAMDirectory directory = new MockRAMDirectory();
|
||||
IndexWriter writer =
|
||||
new IndexWriter(directory, new WhitespaceAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
Document d1 = new Document();
|
||||
d1.add(new Field("default","one two", Field.Store.YES, Field.Index.TOKENIZED));
|
||||
|
|
|
@ -2656,4 +2656,26 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
public void testIOExceptionDuringWriteSegmentWithThreadsOnlyOnce() throws IOException {
|
||||
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(true));
|
||||
}
|
||||
|
||||
// LUCENE-1084: test unlimited field length
|
||||
public void testUnlimitedMaxFieldLength() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
|
||||
Document doc = new Document();
|
||||
StringBuffer b = new StringBuffer();
|
||||
for(int i=0;i<10000;i++)
|
||||
b.append(" a");
|
||||
b.append(" x");
|
||||
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.TOKENIZED));
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
Term t = new Term("field", "x");
|
||||
assertEquals(1, reader.docFreq(t));
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,7 +45,8 @@ import org.apache.lucene.store.RAMDirectory;
|
|||
public class TestMultiLevelSkipList extends LuceneTestCase {
|
||||
public void testSimpleSkip() throws IOException {
|
||||
RAMDirectory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new PayloadAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(dir, new PayloadAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Term term = new Term("test", "a");
|
||||
for (int i = 0; i < 5000; i++) {
|
||||
Document d1 = new Document();
|
||||
|
|
|
@ -104,7 +104,8 @@ public class TestSegmentTermDocs extends LuceneTestCase {
|
|||
|
||||
public void testSkipTo(int indexDivisor) throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
Term ta = new Term("content","aaa");
|
||||
for(int i = 0; i < 10; i++)
|
||||
|
|
|
@ -75,10 +75,10 @@ public class BaseTestRangeFilter extends LuceneTestCase {
|
|||
try {
|
||||
|
||||
/* build an index */
|
||||
IndexWriter writer = new IndexWriter(index,
|
||||
new SimpleAnalyzer(), T);
|
||||
IndexWriter writer = new IndexWriter(index, new SimpleAnalyzer(), T,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
for (int d = minId; d <= maxId; d++) {
|
||||
for (int d = minId; d <= maxId; d++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("id",pad(d), Field.Store.YES, Field.Index.UN_TOKENIZED));
|
||||
int r= rand.nextInt();
|
||||
|
|
|
@ -67,11 +67,10 @@ public class TestConstantScoreRangeQuery extends BaseTestRangeFilter {
|
|||
};
|
||||
|
||||
small = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(small,
|
||||
new WhitespaceAnalyzer(),
|
||||
true);
|
||||
|
||||
for (int i = 0; i < data.length; i++) {
|
||||
IndexWriter writer = new IndexWriter(small, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
for (int i = 0; i < data.length; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("id", String.valueOf(i), Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("id",String.valueOf(i)));
|
||||
doc.add(new Field("all", "all", Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("all","all"));
|
||||
|
|
|
@ -49,7 +49,8 @@ public class TestDateSort extends TestCase {
|
|||
public void setUp() throws Exception {
|
||||
// Create an index writer.
|
||||
directory = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
// oldest doc:
|
||||
// Add the first document. text = "Document 1" dateTime = Oct 10 03:25:22 EDT 2007
|
||||
|
|
|
@ -64,7 +64,8 @@ public class TestExplanations extends LuceneTestCase {
|
|||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
RAMDirectory directory = new RAMDirectory();
|
||||
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(), true);
|
||||
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
for (int i = 0; i < docFields.length; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new Field(FIELD, docFields[i], Field.Store.NO, Field.Index.TOKENIZED));
|
||||
|
|
|
@ -114,11 +114,13 @@ public class TestMultiSearcherRanking extends LuceneTestCase {
|
|||
super.setUp();
|
||||
// create MultiSearcher from two seperate searchers
|
||||
Directory d1 = new RAMDirectory();
|
||||
IndexWriter iw1 = new IndexWriter(d1, new StandardAnalyzer(), true);
|
||||
IndexWriter iw1 = new IndexWriter(d1, new StandardAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
addCollection1(iw1);
|
||||
iw1.close();
|
||||
Directory d2 = new RAMDirectory();
|
||||
IndexWriter iw2 = new IndexWriter(d2, new StandardAnalyzer(), true);
|
||||
IndexWriter iw2 = new IndexWriter(d2, new StandardAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
addCollection2(iw2);
|
||||
iw2.close();
|
||||
|
||||
|
@ -129,7 +131,8 @@ public class TestMultiSearcherRanking extends LuceneTestCase {
|
|||
|
||||
// create IndexSearcher which contains all documents
|
||||
Directory d = new RAMDirectory();
|
||||
IndexWriter iw = new IndexWriter(d, new StandardAnalyzer(), true);
|
||||
IndexWriter iw = new IndexWriter(d, new StandardAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
addCollection1(iw);
|
||||
addCollection2(iw);
|
||||
iw.close();
|
||||
|
|
|
@ -55,7 +55,8 @@ public class TestPhraseQuery extends LuceneTestCase {
|
|||
return 100;
|
||||
}
|
||||
};
|
||||
IndexWriter writer = new IndexWriter(directory, analyzer, true);
|
||||
IndexWriter writer = new IndexWriter(directory, analyzer, true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "one two three four five", Field.Store.YES, Field.Index.TOKENIZED));
|
||||
|
@ -201,7 +202,8 @@ public class TestPhraseQuery extends LuceneTestCase {
|
|||
public void testPhraseQueryWithStopAnalyzer() throws Exception {
|
||||
RAMDirectory directory = new RAMDirectory();
|
||||
StopAnalyzer stopAnalyzer = new StopAnalyzer();
|
||||
IndexWriter writer = new IndexWriter(directory, stopAnalyzer, true);
|
||||
IndexWriter writer = new IndexWriter(directory, stopAnalyzer, true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "the stop words are here", Field.Store.YES, Field.Index.TOKENIZED));
|
||||
writer.addDocument(doc);
|
||||
|
@ -232,7 +234,8 @@ public class TestPhraseQuery extends LuceneTestCase {
|
|||
|
||||
public void testPhraseQueryInConjunctionScorer() throws Exception {
|
||||
RAMDirectory directory = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("source", "marketing info", Field.Store.YES, Field.Index.TOKENIZED));
|
||||
|
@ -267,7 +270,8 @@ public class TestPhraseQuery extends LuceneTestCase {
|
|||
|
||||
searcher.close();
|
||||
|
||||
writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
|
||||
writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
doc = new Document();
|
||||
doc.add(new Field("contents", "map entry woo", Field.Store.YES, Field.Index.TOKENIZED));
|
||||
writer.addDocument(doc);
|
||||
|
@ -316,7 +320,8 @@ public class TestPhraseQuery extends LuceneTestCase {
|
|||
|
||||
public void testSlopScoring() throws IOException {
|
||||
Directory directory = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "foo firstname lastname foo", Field.Store.YES, Field.Index.TOKENIZED));
|
||||
|
|
|
@ -66,7 +66,8 @@ public class TestPositionIncrement extends LuceneTestCase {
|
|||
}
|
||||
};
|
||||
RAMDirectory store = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(store, analyzer, true);
|
||||
IndexWriter writer = new IndexWriter(store, analyzer, true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document d = new Document();
|
||||
d.add(new Field("field", "bogus", Field.Store.YES, Field.Index.TOKENIZED));
|
||||
writer.addDocument(d);
|
||||
|
|
|
@ -55,7 +55,8 @@ public class TestRemoteCachingWrapperFilter extends LuceneTestCase {
|
|||
private static void startServer() throws Exception {
|
||||
// construct an index
|
||||
RAMDirectory indexStore = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(indexStore,new SimpleAnalyzer(),true);
|
||||
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("test", "test text", Field.Store.YES, Field.Index.TOKENIZED));
|
||||
doc.add(new Field("type", "A", Field.Store.YES, Field.Index.TOKENIZED));
|
||||
|
|
|
@ -51,7 +51,8 @@ public class TestSimilarity extends LuceneTestCase {
|
|||
|
||||
public void testSimilarity() throws Exception {
|
||||
RAMDirectory store = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setSimilarity(new SimpleSimilarity());
|
||||
|
||||
Document d1 = new Document();
|
||||
|
|
|
@ -41,7 +41,8 @@ public class TestSpanQueryFilter extends LuceneTestCase {
|
|||
|
||||
public void testFilterWorks() throws Exception {
|
||||
Directory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
for (int i = 0; i < 500; i++) {
|
||||
Document document = new Document();
|
||||
document.add(new Field("field", English.intToEnglish(i) + " equals " + English.intToEnglish(i),
|
||||
|
|
|
@ -41,8 +41,8 @@ public class TestTermVectors extends LuceneTestCase {
|
|||
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
IndexWriter writer
|
||||
= new IndexWriter(directory, new SimpleAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
//writer.setUseCompoundFile(true);
|
||||
//writer.infoStream = System.out;
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
|
@ -201,7 +201,8 @@ public class TestTermVectors extends LuceneTestCase {
|
|||
Directory dir = new RAMDirectory();
|
||||
|
||||
try {
|
||||
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
assertTrue(writer != null);
|
||||
writer.addDocument(testDoc1);
|
||||
writer.addDocument(testDoc2);
|
||||
|
@ -318,7 +319,8 @@ public class TestTermVectors extends LuceneTestCase {
|
|||
|
||||
// Test only a few docs having vectors
|
||||
public void testRareVectors() throws IOException {
|
||||
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
for(int i=0;i<100;i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", English.intToEnglish(i),
|
||||
|
@ -349,7 +351,8 @@ public class TestTermVectors extends LuceneTestCase {
|
|||
// In a single doc, for the same field, mix the term
|
||||
// vectors up
|
||||
public void testMixedVectrosVectors() throws IOException {
|
||||
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "one",
|
||||
Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO));
|
||||
|
|
|
@ -88,7 +88,8 @@ public abstract class FunctionTestSetup extends LuceneTestCase {
|
|||
super.setUp();
|
||||
dir = new RAMDirectory();
|
||||
anlzr = new StandardAnalyzer();
|
||||
IndexWriter iw = new IndexWriter(dir,anlzr);
|
||||
IndexWriter iw = new IndexWriter(dir, anlzr,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
// add docs not exactly in natural ID order, to verify we do check the order of docs by scores
|
||||
int remaining = N_DOCS;
|
||||
boolean done[] = new boolean[N_DOCS];
|
||||
|
|
|
@ -50,8 +50,8 @@ public class TestBasics extends LuceneTestCase {
|
|||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
RAMDirectory directory = new RAMDirectory();
|
||||
IndexWriter writer
|
||||
= new IndexWriter(directory, new SimpleAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
//writer.infoStream = System.out;
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
Document doc = new Document();
|
||||
|
|
|
@ -49,7 +49,8 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
// Lock prefix should have been set:
|
||||
assertTrue("lock prefix was not set by the RAMDirectory", lf.lockPrefixSet);
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
// add 100 documents (so that commit lock is used)
|
||||
for (int i = 0; i < 100; i++) {
|
||||
|
@ -82,13 +83,15 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
assertTrue("RAMDirectory.setLockFactory did not take",
|
||||
NoLockFactory.class.isInstance(dir.getLockFactory()));
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
// Create a 2nd IndexWriter. This is normally not allowed but it should run through since we're not
|
||||
// using any locks:
|
||||
IndexWriter writer2 = null;
|
||||
try {
|
||||
writer2 = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
|
||||
writer2 = new IndexWriter(dir, new WhitespaceAnalyzer(), false,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace(System.out);
|
||||
fail("Should not have hit an IOException with no locking");
|
||||
|
@ -108,12 +111,14 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
assertTrue("RAMDirectory did not use correct LockFactory: got " + dir.getLockFactory(),
|
||||
SingleInstanceLockFactory.class.isInstance(dir.getLockFactory()));
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
// Create a 2nd IndexWriter. This should fail:
|
||||
IndexWriter writer2 = null;
|
||||
try {
|
||||
writer2 = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
|
||||
writer2 = new IndexWriter(dir, new WhitespaceAnalyzer(), false,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
fail("Should have hit an IOException with two IndexWriters on default SingleInstanceLockFactory");
|
||||
} catch (IOException e) {
|
||||
}
|
||||
|
@ -129,7 +134,8 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
public void testDefaultFSDirectory() throws IOException {
|
||||
String indexDirName = "index.TestLockFactory1";
|
||||
|
||||
IndexWriter writer = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
assertTrue("FSDirectory did not use correct LockFactory: got " + writer.getDirectory().getLockFactory(),
|
||||
SimpleFSLockFactory.class.isInstance(writer.getDirectory().getLockFactory()) ||
|
||||
|
@ -139,7 +145,8 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
|
||||
// Create a 2nd IndexWriter. This should fail:
|
||||
try {
|
||||
writer2 = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), false);
|
||||
writer2 = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), false,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
fail("Should have hit an IOException with two IndexWriters on default SimpleFSLockFactory");
|
||||
} catch (IOException e) {
|
||||
}
|
||||
|
@ -157,7 +164,8 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
public void testFSDirectoryTwoCreates() throws IOException {
|
||||
String indexDirName = "index.TestLockFactory2";
|
||||
|
||||
IndexWriter writer = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
assertTrue("FSDirectory did not use correct LockFactory: got " + writer.getDirectory().getLockFactory(),
|
||||
SimpleFSLockFactory.class.isInstance(writer.getDirectory().getLockFactory()) ||
|
||||
|
@ -180,7 +188,8 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
// Create a 2nd IndexWriter. This should not fail:
|
||||
IndexWriter writer2 = null;
|
||||
try {
|
||||
writer2 = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), true);
|
||||
writer2 = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace(System.out);
|
||||
fail("Should not have hit an IOException with two IndexWriters with create=true, on default SimpleFSLockFactory");
|
||||
|
@ -213,28 +222,32 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
|
||||
// NoLockFactory:
|
||||
System.setProperty(prpName, "org.apache.lucene.store.NoLockFactory");
|
||||
IndexWriter writer = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
assertTrue("FSDirectory did not use correct LockFactory: got " + writer.getDirectory().getLockFactory(),
|
||||
NoLockFactory.class.isInstance(writer.getDirectory().getLockFactory()));
|
||||
writer.close();
|
||||
|
||||
// SingleInstanceLockFactory:
|
||||
System.setProperty(prpName, "org.apache.lucene.store.SingleInstanceLockFactory");
|
||||
writer = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), true);
|
||||
writer = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
assertTrue("FSDirectory did not use correct LockFactory: got " + writer.getDirectory().getLockFactory(),
|
||||
SingleInstanceLockFactory.class.isInstance(writer.getDirectory().getLockFactory()));
|
||||
writer.close();
|
||||
|
||||
// NativeFSLockFactory:
|
||||
System.setProperty(prpName, "org.apache.lucene.store.NativeFSLockFactory");
|
||||
writer = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), true);
|
||||
writer = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
assertTrue("FSDirectory did not use correct LockFactory: got " + writer.getDirectory().getLockFactory(),
|
||||
NativeFSLockFactory.class.isInstance(writer.getDirectory().getLockFactory()));
|
||||
writer.close();
|
||||
|
||||
// SimpleFSLockFactory:
|
||||
System.setProperty(prpName, "org.apache.lucene.store.SimpleFSLockFactory");
|
||||
writer = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), true);
|
||||
writer = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
assertTrue("FSDirectory did not use correct LockFactory: got " + writer.getDirectory().getLockFactory(),
|
||||
SimpleFSLockFactory.class.isInstance(writer.getDirectory().getLockFactory()));
|
||||
writer.close();
|
||||
|
@ -254,7 +267,8 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
assertTrue("Locks are already disabled", !FSDirectory.getDisableLocks());
|
||||
FSDirectory.setDisableLocks(true);
|
||||
|
||||
IndexWriter writer = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), true);
|
||||
IndexWriter writer = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
assertTrue("FSDirectory did not use correct default LockFactory: got " + writer.getDirectory().getLockFactory(),
|
||||
NoLockFactory.class.isInstance(writer.getDirectory().getLockFactory()));
|
||||
|
@ -262,7 +276,8 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
// Should be no error since locking is disabled:
|
||||
IndexWriter writer2 = null;
|
||||
try {
|
||||
writer2 = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), false);
|
||||
writer2 = new IndexWriter(indexDirName, new WhitespaceAnalyzer(), false,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace(System.out);
|
||||
fail("Should not have hit an IOException with locking disabled");
|
||||
|
@ -328,7 +343,8 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
FSDirectory fs1 = FSDirectory.getDirectory(indexDirName, lockFactory);
|
||||
|
||||
// First create a 1 doc index:
|
||||
IndexWriter w = new IndexWriter(fs1, new WhitespaceAnalyzer(), true);
|
||||
IndexWriter w = new IndexWriter(fs1, new WhitespaceAnalyzer(), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDoc(w);
|
||||
w.close();
|
||||
|
||||
|
@ -418,7 +434,8 @@ public class TestLockFactory extends LuceneTestCase {
|
|||
IndexWriter writer = null;
|
||||
for(int i=0;i<this.numIteration;i++) {
|
||||
try {
|
||||
writer = new IndexWriter(dir, analyzer, false);
|
||||
writer = new IndexWriter(dir, analyzer, false,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
} catch (IOException e) {
|
||||
if (e.toString().indexOf(" timed out:") == -1) {
|
||||
hitException = true;
|
||||
|
|
Loading…
Reference in New Issue