mirror of https://github.com/apache/lucene.git
LUCENE-1401: remove new deprecated IndexWriter ctors; default autoCommit=false for new ctors
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@698932 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d8a60238e3
commit
a40530e4a4
|
@ -67,7 +67,8 @@ API Changes
|
|||
1. LUCENE-1084: Changed all IndexWriter constructors to take an
|
||||
explicit parameter for maximum field size. Deprecated all the
|
||||
pre-existing constructors; these will be removed in release 3.0.
|
||||
(Steven Rowe via Mike McCandless)
|
||||
NOTE: these new constructors set autoCommit to false. (Steven
|
||||
Rowe via Mike McCandless)
|
||||
|
||||
2. LUCENE-584: Changed Filter API to return a DocIdSet instead of a
|
||||
java.util.BitSet. This allows using more efficient data structures
|
||||
|
|
|
@ -97,8 +97,7 @@ public class CreateIndexTask extends PerfTask {
|
|||
Config config = runData.getConfig();
|
||||
IndexWriter writer = new IndexWriter(runData.getDirectory(),
|
||||
runData.getConfig().get("autocommit", OpenIndexTask.DEFAULT_AUTO_COMMIT),
|
||||
runData.getAnalyzer(),
|
||||
true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
runData.getAnalyzer());
|
||||
CreateIndexTask.setIndexWriterConfig(writer, config);
|
||||
runData.setIndexWriter(writer);
|
||||
return 1;
|
||||
|
|
|
@ -50,7 +50,7 @@ public class OpenIndexTask extends PerfTask {
|
|||
IndexWriter writer = new IndexWriter(runData.getDirectory(),
|
||||
config.get("autocommit", DEFAULT_AUTO_COMMIT),
|
||||
runData.getAnalyzer(),
|
||||
false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
false);
|
||||
CreateIndexTask.setIndexWriterConfig(writer, config);
|
||||
runData.setIndexWriter(writer);
|
||||
return 1;
|
||||
|
|
|
@ -121,7 +121,7 @@ import java.util.Iterator;
|
|||
<p>When <code>autoCommit</code> is <code>true</code> then
|
||||
the writer will periodically commit on its own. [<b>Deprecated</b>: Note that in 3.0, IndexWriter will
|
||||
no longer accept autoCommit=true (it will be hardwired to
|
||||
false). You can always call {@link IndexWriter#commit()} yourself
|
||||
false). You can always call {@link #commit()} yourself
|
||||
when needed]. There is
|
||||
no guarantee when exactly an auto commit will occur (it
|
||||
used to be after every flush, but it is now after every
|
||||
|
@ -523,8 +523,11 @@ public class IndexWriter {
|
|||
* Text will be analyzed with <code>a</code>. If <code>create</code>
|
||||
* is true, then a new, empty index will be created in
|
||||
* <code>path</code>, replacing the index already there,
|
||||
* if any. Note that autoCommit defaults to true, but
|
||||
* starting in 3.0 it will be hardwired to false.
|
||||
* if any.
|
||||
*
|
||||
* <p><b>NOTE</b>: autoCommit (see <a
|
||||
* href="#autoCommit">above</a>) is set to false with this
|
||||
* constructor.
|
||||
*
|
||||
* @param path the path to the index directory
|
||||
* @param a the analyzer to use
|
||||
|
@ -544,7 +547,7 @@ public class IndexWriter {
|
|||
*/
|
||||
public IndexWriter(String path, Analyzer a, boolean create, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(FSDirectory.getDirectory(path), a, create, true, null, true, mfl.getLimit());
|
||||
init(FSDirectory.getDirectory(path), a, create, true, null, false, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -567,7 +570,9 @@ public class IndexWriter {
|
|||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(String,Analyzer,boolean,MaxFieldLength)} instead.
|
||||
* Use {@link
|
||||
* #IndexWriter(String,Analyzer,boolean,MaxFieldLength)}
|
||||
* instead, and call {@link #commit()} when needed.
|
||||
*/
|
||||
public IndexWriter(String path, Analyzer a, boolean create)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
|
@ -579,8 +584,10 @@ public class IndexWriter {
|
|||
* Text will be analyzed with <code>a</code>. If <code>create</code>
|
||||
* is true, then a new, empty index will be created in
|
||||
* <code>path</code>, replacing the index already there, if any.
|
||||
* Note that autoCommit defaults to true, but starting in 3.0
|
||||
* it will be hardwired to false.
|
||||
*
|
||||
* <p><b>NOTE</b>: autoCommit (see <a
|
||||
* href="#autoCommit">above</a>) is set to false with this
|
||||
* constructor.
|
||||
*
|
||||
* @param path the path to the index directory
|
||||
* @param a the analyzer to use
|
||||
|
@ -600,7 +607,7 @@ public class IndexWriter {
|
|||
*/
|
||||
public IndexWriter(File path, Analyzer a, boolean create, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(FSDirectory.getDirectory(path), a, create, true, null, true, mfl.getLimit());
|
||||
init(FSDirectory.getDirectory(path), a, create, true, null, false, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -623,7 +630,9 @@ public class IndexWriter {
|
|||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(File,Analyzer,boolean,MaxFieldLength)} instead.
|
||||
* Use {@link
|
||||
* #IndexWriter(File,Analyzer,boolean,MaxFieldLength)}
|
||||
* instead, and call {@link #commit()} when needed.
|
||||
*/
|
||||
public IndexWriter(File path, Analyzer a, boolean create)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
|
@ -635,8 +644,10 @@ public class IndexWriter {
|
|||
* Text will be analyzed with <code>a</code>. If <code>create</code>
|
||||
* is true, then a new, empty index will be created in
|
||||
* <code>d</code>, replacing the index already there, if any.
|
||||
* Note that autoCommit defaults to true, but starting in 3.0
|
||||
* it will be hardwired to false.
|
||||
*
|
||||
* <p><b>NOTE</b>: autoCommit (see <a
|
||||
* href="#autoCommit">above</a>) is set to false with this
|
||||
* constructor.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param a the analyzer to use
|
||||
|
@ -656,7 +667,7 @@ public class IndexWriter {
|
|||
*/
|
||||
public IndexWriter(Directory d, Analyzer a, boolean create, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, create, false, null, true, mfl.getLimit());
|
||||
init(d, a, create, false, null, false, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -678,7 +689,8 @@ public class IndexWriter {
|
|||
* if it does not exist and <code>create</code> is
|
||||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* @deprecated This constructor will be removed in the 3.0
|
||||
* release, and call {@link #commit()} when needed.
|
||||
* Use {@link #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)} instead.
|
||||
*/
|
||||
public IndexWriter(Directory d, Analyzer a, boolean create)
|
||||
|
@ -691,8 +703,10 @@ public class IndexWriter {
|
|||
* <code>path</code>, first creating it if it does not
|
||||
* already exist. Text will be analyzed with
|
||||
* <code>a</code>.
|
||||
* Note that autoCommit defaults to true, but starting in 3.0
|
||||
* it will be hardwired to false.
|
||||
*
|
||||
* <p><b>NOTE</b>: autoCommit (see <a
|
||||
* href="#autoCommit">above</a>) is set to false with this
|
||||
* constructor.
|
||||
*
|
||||
* @param path the path to the index directory
|
||||
* @param a the analyzer to use
|
||||
|
@ -708,7 +722,7 @@ public class IndexWriter {
|
|||
*/
|
||||
public IndexWriter(String path, Analyzer a, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(FSDirectory.getDirectory(path), a, true, null, true, mfl.getLimit());
|
||||
init(FSDirectory.getDirectory(path), a, true, null, false, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -726,7 +740,8 @@ public class IndexWriter {
|
|||
* @throws IOException if the directory cannot be
|
||||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* @deprecated This constructor will be removed in the 3.0
|
||||
* release, and call {@link #commit()} when needed.
|
||||
* Use {@link #IndexWriter(String,Analyzer,MaxFieldLength)} instead.
|
||||
*/
|
||||
public IndexWriter(String path, Analyzer a)
|
||||
|
@ -739,8 +754,10 @@ public class IndexWriter {
|
|||
* <code>path</code>, first creating it if it does not
|
||||
* already exist. Text will be analyzed with
|
||||
* <code>a</code>.
|
||||
* Note that autoCommit defaults to true, but starting in 3.0
|
||||
* it will be hardwired to false.
|
||||
*
|
||||
* <p><b>NOTE</b>: autoCommit (see <a
|
||||
* href="#autoCommit">above</a>) is set to false with this
|
||||
* constructor.
|
||||
*
|
||||
* @param path the path to the index directory
|
||||
* @param a the analyzer to use
|
||||
|
@ -756,7 +773,7 @@ public class IndexWriter {
|
|||
*/
|
||||
public IndexWriter(File path, Analyzer a, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(FSDirectory.getDirectory(path), a, true, null, true, mfl.getLimit());
|
||||
init(FSDirectory.getDirectory(path), a, true, null, false, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -775,7 +792,8 @@ public class IndexWriter {
|
|||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(File,Analyzer,MaxFieldLength)} instead.
|
||||
* Use {@link #IndexWriter(File,Analyzer,MaxFieldLength)}
|
||||
* instead, and call {@link #commit()} when needed.
|
||||
*/
|
||||
public IndexWriter(File path, Analyzer a)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
|
@ -787,8 +805,10 @@ public class IndexWriter {
|
|||
* <code>d</code>, first creating it if it does not
|
||||
* already exist. Text will be analyzed with
|
||||
* <code>a</code>.
|
||||
* Note that autoCommit defaults to true, but starting in 3.0
|
||||
* it will be hardwired to false.
|
||||
*
|
||||
* <p><b>NOTE</b>: autoCommit (see <a
|
||||
* href="#autoCommit">above</a>) is set to false with this
|
||||
* constructor.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param a the analyzer to use
|
||||
|
@ -804,7 +824,7 @@ public class IndexWriter {
|
|||
*/
|
||||
public IndexWriter(Directory d, Analyzer a, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, false, null, true, mfl.getLimit());
|
||||
init(d, a, false, null, false, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -823,41 +843,15 @@ public class IndexWriter {
|
|||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(Directory,Analyzer,MaxFieldLength)} instead.
|
||||
* Use {@link
|
||||
* #IndexWriter(Directory,Analyzer,MaxFieldLength)}
|
||||
* instead, and call {@link #commit()} when needed.
|
||||
*/
|
||||
public IndexWriter(Directory d, Analyzer a)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, false, null, true, DEFAULT_MAX_FIELD_LENGTH);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an IndexWriter for the index in
|
||||
* <code>d</code>, first creating it if it does not
|
||||
* already exist. Text will be analyzed with
|
||||
* <code>a</code>.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param autoCommit see <a href="#autoCommit">above</a>
|
||||
* @param a the analyzer to use
|
||||
* @param mfl Maximum field length: LIMITED, UNLIMITED, or user-specified
|
||||
* via the MaxFieldLength constructor.
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be
|
||||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This will be removed in 3.0, when
|
||||
* autoCommit will be hardwired to false. Use {@link
|
||||
* #IndexWriter(Directory,Analyzer,MaxFieldLength)}
|
||||
* instead, and call {@link #commit()} when needed.
|
||||
*/
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, false, null, autoCommit, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an IndexWriter for the index in
|
||||
* <code>d</code>, first creating it if it does not
|
||||
|
@ -875,45 +869,15 @@ public class IndexWriter {
|
|||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(Directory,Analyzer,MaxFieldLength)} instead.
|
||||
* Use {@link
|
||||
* #IndexWriter(Directory,Analyzer,MaxFieldLength)}
|
||||
* instead, and call {@link #commit()} when needed.
|
||||
*/
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, false, null, autoCommit, DEFAULT_MAX_FIELD_LENGTH);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an IndexWriter for the index in <code>d</code>.
|
||||
* Text will be analyzed with <code>a</code>. If <code>create</code>
|
||||
* is true, then a new, empty index will be created in
|
||||
* <code>d</code>, replacing the index already there, if any.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param autoCommit see <a href="#autoCommit">above</a>
|
||||
* @param a the analyzer to use
|
||||
* @param create <code>true</code> to create the index or overwrite
|
||||
* the existing one; <code>false</code> to append to the existing
|
||||
* index
|
||||
* @param mfl Maximum field length: LIMITED, UNLIMITED, or user-specified
|
||||
* via the MaxFieldLength constructor.
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be read/written to, or
|
||||
* if it does not exist and <code>create</code> is
|
||||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This will be removed in 3.0, when
|
||||
* autoCommit will be hardwired to false. Use {@link
|
||||
* #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)}
|
||||
* instead, and call {@link #commit()} when needed.
|
||||
*/
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, create, false, null, autoCommit, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an IndexWriter for the index in <code>d</code>.
|
||||
* Text will be analyzed with <code>a</code>. If <code>create</code>
|
||||
|
@ -935,7 +899,9 @@ public class IndexWriter {
|
|||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)} instead.
|
||||
* Use {@link
|
||||
* #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)}
|
||||
* instead, and call {@link #commit()} when needed.
|
||||
*/
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
|
@ -947,8 +913,10 @@ public class IndexWriter {
|
|||
* IndexDeletionPolicy}, for the index in <code>d</code>,
|
||||
* first creating it if it does not already exist. Text
|
||||
* will be analyzed with <code>a</code>.
|
||||
* Note that autoCommit defaults to true, but starting in 3.0
|
||||
* it will be hardwired to false.
|
||||
*
|
||||
* <p><b>NOTE</b>: autoCommit (see <a
|
||||
* href="#autoCommit">above</a>) is set to false with this
|
||||
* constructor.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param a the analyzer to use
|
||||
|
@ -964,36 +932,7 @@ public class IndexWriter {
|
|||
*/
|
||||
public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, false, deletionPolicy, true, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
* Expert: constructs an IndexWriter with a custom {@link
|
||||
* IndexDeletionPolicy}, for the index in <code>d</code>,
|
||||
* first creating it if it does not already exist. Text
|
||||
* will be analyzed with <code>a</code>.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param autoCommit see <a href="#autoCommit">above</a>
|
||||
* @param a the analyzer to use
|
||||
* @param deletionPolicy see <a href="#deletionPolicy">above</a>
|
||||
* @param mfl Maximum field length: LIMITED, UNLIMITED, or user-specified
|
||||
* via the MaxFieldLength constructor.
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be
|
||||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This will be removed in 3.0, when
|
||||
* autoCommit will be hardwired to false. Use {@link
|
||||
* #IndexWriter(Directory,Analyzer,IndexDeletionPolicy,MaxFieldLength)}
|
||||
* instead, and call {@link #commit()} when needed.
|
||||
*/
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, false, deletionPolicy, autoCommit, mfl.getLimit());
|
||||
init(d, a, false, deletionPolicy, false, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1014,7 +953,9 @@ public class IndexWriter {
|
|||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(Directory,Analyzer,IndexDeletionPolicy,MaxFieldLength)} instead.
|
||||
* Use {@link
|
||||
* #IndexWriter(Directory,Analyzer,IndexDeletionPolicy,MaxFieldLength)}
|
||||
* instead, and call {@link #commit()} when needed.
|
||||
*/
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, IndexDeletionPolicy deletionPolicy)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
|
@ -1028,8 +969,10 @@ public class IndexWriter {
|
|||
* <code>create</code> is true, then a new, empty index
|
||||
* will be created in <code>d</code>, replacing the index
|
||||
* already there, if any.
|
||||
* Note that autoCommit defaults to true, but starting in 3.0
|
||||
* it will be hardwired to false.
|
||||
*
|
||||
* <p><b>NOTE</b>: autoCommit (see <a
|
||||
* href="#autoCommit">above</a>) is set to false with this
|
||||
* constructor.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param a the analyzer to use
|
||||
|
@ -1049,42 +992,7 @@ public class IndexWriter {
|
|||
*/
|
||||
public IndexWriter(Directory d, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, create, false, deletionPolicy, true, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
* Expert: constructs an IndexWriter with a custom {@link
|
||||
* IndexDeletionPolicy}, for the index in <code>d</code>.
|
||||
* Text will be analyzed with <code>a</code>. If
|
||||
* <code>create</code> is true, then a new, empty index
|
||||
* will be created in <code>d</code>, replacing the index
|
||||
* already there, if any.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param autoCommit see <a href="#autoCommit">above</a>
|
||||
* @param a the analyzer to use
|
||||
* @param create <code>true</code> to create the index or overwrite
|
||||
* the existing one; <code>false</code> to append to the existing
|
||||
* index
|
||||
* @param deletionPolicy see <a href="#deletionPolicy">above</a>
|
||||
* @param mfl Maximum field length: LIMITED, UNLIMITED, or user-specified
|
||||
* via the MaxFieldLength constructor.
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be read/written to, or
|
||||
* if it does not exist and <code>create</code> is
|
||||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This will be removed in 3.0, when
|
||||
* autoCommit will be hardwired to false. Use {@link
|
||||
* #IndexWriter(Directory,Analyzer,boolean,IndexDeletionPolicy,MaxFieldLength)}
|
||||
* instead, and call {@link #commit()} when needed.
|
||||
*/
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
init(d, a, create, false, deletionPolicy, autoCommit, mfl.getLimit());
|
||||
init(d, a, create, false, deletionPolicy, false, mfl.getLimit());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1111,7 +1019,9 @@ public class IndexWriter {
|
|||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated This constructor will be removed in the 3.0 release.
|
||||
* Use {@link #IndexWriter(Directory,Analyzer,boolean,IndexDeletionPolicy,MaxFieldLength)} instead.
|
||||
* Use {@link
|
||||
* #IndexWriter(Directory,Analyzer,boolean,IndexDeletionPolicy,MaxFieldLength)}
|
||||
* instead, and call {@link #commit()} when needed.
|
||||
*/
|
||||
public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
|
@ -1309,7 +1219,7 @@ public class IndexWriter {
|
|||
* documents are large, be sure to set this value high enough to accomodate
|
||||
* the expected size. If you set it to Integer.MAX_VALUE, then the only limit
|
||||
* is your memory, but you should anticipate an OutOfMemoryError.<p/>
|
||||
* By default, no more than {@link IndexWriter#DEFAULT_MAX_FIELD_LENGTH} terms
|
||||
* By default, no more than {@link #DEFAULT_MAX_FIELD_LENGTH} terms
|
||||
* will be indexed for a field.
|
||||
*/
|
||||
public void setMaxFieldLength(int maxFieldLength) {
|
||||
|
@ -2594,7 +2504,7 @@ public class IndexWriter {
|
|||
}
|
||||
success = true;
|
||||
} finally {
|
||||
// Releaes the write lock if our caller held it, on
|
||||
// Release the write lock if our caller held it, on
|
||||
// hitting an exception
|
||||
if (!success && haveWriteLock)
|
||||
releaseWrite();
|
||||
|
@ -4795,7 +4705,7 @@ public class IndexWriter {
|
|||
|
||||
/**
|
||||
* Specifies maximum field length in {@link IndexWriter} constructors.
|
||||
* {@link IndexWriter#setMaxFieldLength(int)} overrides the value set by
|
||||
* {@link #setMaxFieldLength(int)} overrides the value set by
|
||||
* the constructor.
|
||||
*/
|
||||
public static final class MaxFieldLength {
|
||||
|
@ -4838,7 +4748,7 @@ public class IndexWriter {
|
|||
|
||||
/**
|
||||
* Sets the maximum field length to
|
||||
* {@link IndexWriter#DEFAULT_MAX_FIELD_LENGTH}
|
||||
* {@link #DEFAULT_MAX_FIELD_LENGTH}
|
||||
* */
|
||||
public static final MaxFieldLength LIMITED
|
||||
= new MaxFieldLength("LIMITED", DEFAULT_MAX_FIELD_LENGTH);
|
||||
|
|
|
@ -109,7 +109,7 @@ final class SegmentInfo {
|
|||
this.docStoreIsCompoundFile = docStoreIsCompoundFile;
|
||||
this.hasProx = hasProx;
|
||||
delCount = 0;
|
||||
assert docStoreOffset == -1 || docStoreSegment != null;
|
||||
assert docStoreOffset == -1 || docStoreSegment != null: "dso=" + docStoreOffset + " dss=" + docStoreSegment + " docCount=" + docCount;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -91,7 +91,7 @@ public class TestMergeSchedulerExternal extends LuceneTestCase {
|
|||
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
|
||||
doc.add(idField);
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
MyMergeScheduler ms = new MyMergeScheduler();
|
||||
writer.setMergeScheduler(ms);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
|
|
|
@ -67,8 +67,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase
|
|||
Directory dir = new MockRAMDirectory();
|
||||
|
||||
SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
|
||||
IndexWriter writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, true,new StandardAnalyzer(), dp);
|
||||
// Force frequent commits
|
||||
writer.setMaxBufferedDocs(2);
|
||||
Document doc = new Document();
|
||||
|
@ -80,8 +79,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase
|
|||
writer.close();
|
||||
copyFiles(dir, cp);
|
||||
|
||||
writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp);
|
||||
copyFiles(dir, cp);
|
||||
for(int i=0;i<7;i++)
|
||||
writer.addDocument(doc);
|
||||
|
@ -89,8 +87,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase
|
|||
writer.close();
|
||||
copyFiles(dir, cp);
|
||||
dp.release();
|
||||
writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp);
|
||||
writer.close();
|
||||
try {
|
||||
copyFiles(dir, cp);
|
||||
|
@ -106,8 +103,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase
|
|||
final long stopTime = System.currentTimeMillis() + 7000;
|
||||
|
||||
SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
|
||||
final IndexWriter writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
final IndexWriter writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp);
|
||||
|
||||
// Force frequent commits
|
||||
writer.setMaxBufferedDocs(2);
|
||||
|
|
|
@ -425,7 +425,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
|
||||
private IndexWriter newWriter(Directory dir, boolean create)
|
||||
throws IOException {
|
||||
final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), create, IndexWriter.MaxFieldLength.LIMITED);
|
||||
final IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), create);
|
||||
writer.setMergePolicy(new LogDocMergePolicy());
|
||||
return writer;
|
||||
}
|
||||
|
@ -499,7 +499,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
public void testHangOnClose() throws IOException {
|
||||
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMergePolicy(new LogByteSizeMergePolicy());
|
||||
writer.setMaxBufferedDocs(5);
|
||||
writer.setUseCompoundFile(false);
|
||||
|
@ -525,7 +525,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
Directory dir2 = new MockRAMDirectory();
|
||||
writer = new IndexWriter(dir2, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir2, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
|
||||
lmp.setMinMergeMB(0.0001);
|
||||
writer.setMergePolicy(lmp);
|
||||
|
|
|
@ -33,8 +33,8 @@ public class TestAtomicUpdate extends LuceneTestCase {
|
|||
|
||||
public class MockIndexWriter extends IndexWriter {
|
||||
|
||||
public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException {
|
||||
super(dir, autoCommit, a, create, mfl);
|
||||
public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create) throws IOException {
|
||||
super(dir, autoCommit, a, create);
|
||||
}
|
||||
|
||||
boolean testPoint(String name) {
|
||||
|
@ -125,7 +125,7 @@ public class TestAtomicUpdate extends LuceneTestCase {
|
|||
|
||||
TimedThread[] threads = new TimedThread[4];
|
||||
|
||||
IndexWriter writer = new MockIndexWriter(directory, true, ANALYZER, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new MockIndexWriter(directory, true, ANALYZER, true);
|
||||
writer.setMaxBufferedDocs(7);
|
||||
writer.setMergeFactor(3);
|
||||
|
||||
|
|
|
@ -257,7 +257,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase
|
|||
Directory dir = FSDirectory.getDirectory(dirName);
|
||||
|
||||
// open writer
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false);
|
||||
|
||||
// add 10 docs
|
||||
for(int i=0;i<10;i++) {
|
||||
|
@ -295,7 +295,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase
|
|||
searcher.close();
|
||||
|
||||
// optimize
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
|
@ -345,7 +345,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase
|
|||
searcher.close();
|
||||
|
||||
// optimize
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
|
@ -402,7 +402,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase
|
|||
|
||||
boolean autoCommit = 0 == pass;
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
|
||||
writer.setRAMBufferSizeMB(16.0);
|
||||
for(int i=0;i<35;i++) {
|
||||
addDoc(writer, i);
|
||||
|
|
|
@ -63,7 +63,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
FailOnlyOnFlush failure = new FailOnlyOnFlush();
|
||||
directory.failOn(failure);
|
||||
|
||||
IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true);
|
||||
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
|
||||
writer.setMergeScheduler(cms);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
|
@ -100,7 +100,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
|
||||
RAMDirectory directory = new MockRAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true);
|
||||
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
|
||||
writer.setMergeScheduler(cms);
|
||||
|
||||
|
@ -145,7 +145,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
for(int pass=0;pass<2;pass++) {
|
||||
|
||||
boolean autoCommit = pass==0;
|
||||
IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true);
|
||||
|
||||
for(int iter=0;iter<7;iter++) {
|
||||
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
|
||||
|
@ -162,7 +162,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
TestIndexWriter.assertNoUnreferencedFiles(directory, "testNoExtraFiles autoCommit=" + autoCommit);
|
||||
|
||||
// Reopen
|
||||
writer = new IndexWriter(directory, autoCommit, ANALYZER, false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(directory, autoCommit, ANALYZER, false);
|
||||
}
|
||||
|
||||
writer.close();
|
||||
|
@ -180,7 +180,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
|
||||
for(int pass=0;pass<2;pass++) {
|
||||
boolean autoCommit = pass==0;
|
||||
IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true);
|
||||
|
||||
for(int iter=0;iter<10;iter++) {
|
||||
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
|
||||
|
@ -212,7 +212,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
reader.close();
|
||||
|
||||
// Reopen
|
||||
writer = new IndexWriter(directory, autoCommit, ANALYZER, false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(directory, autoCommit, ANALYZER, false);
|
||||
}
|
||||
writer.close();
|
||||
}
|
||||
|
|
|
@ -206,7 +206,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
|
||||
Directory dir = new RAMDirectory();
|
||||
ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS);
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
writer.close();
|
||||
|
||||
|
@ -215,7 +215,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
// Record last time when writer performed deletes of
|
||||
// past commits
|
||||
lastDeleteTime = System.currentTimeMillis();
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
for(int j=0;j<17;j++) {
|
||||
addDoc(writer);
|
||||
|
@ -277,7 +277,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
Directory dir = new RAMDirectory();
|
||||
policy.dir = dir;
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
writer.setMergeScheduler(new SerialMergeScheduler());
|
||||
|
@ -288,7 +288,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
}
|
||||
writer.close();
|
||||
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
@ -333,7 +333,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
// Open & close a writer and assert that it
|
||||
// actually removed something:
|
||||
int preCount = dir.list().length;
|
||||
writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.close();
|
||||
int postCount = dir.list().length;
|
||||
assertTrue(postCount < preCount);
|
||||
|
@ -359,7 +359,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
|
||||
Directory dir = new RAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
for(int i=0;i<107;i++) {
|
||||
|
@ -367,7 +367,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
}
|
||||
writer.close();
|
||||
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
@ -404,7 +404,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
|
||||
|
||||
for(int j=0;j<N+1;j++) {
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
for(int i=0;i<17;i++) {
|
||||
|
@ -464,14 +464,14 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
|
||||
|
||||
Directory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
writer.close();
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
Query query = new TermQuery(searchTerm);
|
||||
|
||||
for(int i=0;i<N+1;i++) {
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
for(int j=0;j<17;j++) {
|
||||
addDoc(writer);
|
||||
|
@ -488,7 +488,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
reader.close();
|
||||
searcher.close();
|
||||
}
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
writer.optimize();
|
||||
// this is a commit when autoCommit=false:
|
||||
|
@ -564,7 +564,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
|
||||
|
||||
Directory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
writer.close();
|
||||
|
@ -573,7 +573,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
|
||||
for(int i=0;i<N+1;i++) {
|
||||
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
for(int j=0;j<17;j++) {
|
||||
|
@ -591,7 +591,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
reader.close();
|
||||
searcher.close();
|
||||
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy);
|
||||
// This will not commit: there are no changes
|
||||
// pending because we opened for "create":
|
||||
writer.close();
|
||||
|
|
|
@ -104,7 +104,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
reader.close();
|
||||
|
||||
// optimize the index and check that the new doc count is correct
|
||||
writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
|
||||
assertEquals(100, writer.maxDoc());
|
||||
assertEquals(60, writer.numDocs());
|
||||
writer.optimize();
|
||||
|
@ -241,7 +241,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
|
||||
// Make a new dir that will enforce disk usage:
|
||||
MockRAMDirectory dir = new MockRAMDirectory(startDir);
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false);
|
||||
IOException err = null;
|
||||
|
||||
MergeScheduler ms = writer.getMergeScheduler();
|
||||
|
@ -478,7 +478,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
System.out.println("TEST: cycle: diskFree=" + diskFree);
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
dir.setMaxSizeInBytes(diskFree);
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
|
||||
|
||||
MergeScheduler ms = writer.getMergeScheduler();
|
||||
if (ms instanceof ConcurrentMergeScheduler)
|
||||
|
@ -1027,7 +1027,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
|
||||
writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
for(int i=0;i<3;i++) {
|
||||
for(int j=0;j<11;j++) {
|
||||
addDoc(writer);
|
||||
|
@ -1073,7 +1073,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
assertEquals("first number of hits", 14, hits.length);
|
||||
searcher.close();
|
||||
|
||||
writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
for(int j=0;j<17;j++) {
|
||||
addDoc(writer);
|
||||
|
@ -1098,7 +1098,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
|
||||
// Now make sure we can re-open the index, add docs,
|
||||
// and all is good:
|
||||
writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
|
||||
// On abort, writer in fact may write to the same
|
||||
|
@ -1141,7 +1141,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
dir.resetMaxUsedSizeInBytes();
|
||||
|
||||
long startDiskUsage = dir.getMaxUsedSizeInBytes();
|
||||
writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setMergeScheduler(new SerialMergeScheduler());
|
||||
for(int j=0;j<1470;j++) {
|
||||
|
@ -1183,7 +1183,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
}
|
||||
writer.close();
|
||||
|
||||
writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.optimize();
|
||||
|
||||
// Open a reader before closing (commiting) the writer:
|
||||
|
@ -1205,7 +1205,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
|
||||
reader.close();
|
||||
|
||||
writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
|
||||
|
@ -2130,7 +2130,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
int delID = 0;
|
||||
for(int i=0;i<20;i++) {
|
||||
IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
writer.setMergeFactor(2);
|
||||
writer.setUseCompoundFile(false);
|
||||
|
@ -2166,7 +2166,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
reader.close();
|
||||
|
||||
if (0 == i % 4) {
|
||||
writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setUseCompoundFile(false);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
@ -2183,7 +2183,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
|
||||
for(int pass=0;pass<3;pass++) {
|
||||
boolean autoCommit = pass%2 == 0;
|
||||
IndexWriter writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), true);
|
||||
|
||||
//System.out.println("TEST: pass=" + pass + " ac=" + autoCommit + " cms=" + (pass >= 2));
|
||||
for(int iter=0;iter<10;iter++) {
|
||||
|
@ -2261,7 +2261,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
reader.close();
|
||||
|
||||
// Reopen
|
||||
writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), false);
|
||||
}
|
||||
writer.close();
|
||||
}
|
||||
|
@ -2433,7 +2433,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
}
|
||||
}
|
||||
|
||||
// LUCENE-1130: make sure immeidate disk full on creating
|
||||
// LUCENE-1130: make sure immediate disk full on creating
|
||||
// an IndexWriter (hit during DW.ThreadState.init()), with
|
||||
// multiple threads, is OK:
|
||||
public void testImmediateDiskFullWithThreads() throws IOException {
|
||||
|
@ -2442,7 +2442,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
|
||||
for(int iter=0;iter<10;iter++) {
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
|
||||
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
|
||||
// We expect disk full exceptions in the merge threads
|
||||
cms.setSuppressExceptions();
|
||||
|
@ -2513,7 +2513,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
public void _testSingleThreadFailure(MockRAMDirectory.Failure failure) throws IOException {
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
|
||||
writer.setMaxBufferedDocs(2);
|
||||
final Document doc = new Document();
|
||||
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
|
||||
|
@ -2774,7 +2774,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
public void testForceCommit() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
writer.setMergeFactor(5);
|
||||
|
||||
|
@ -2827,7 +2827,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
FailOnlyInSync failure = new FailOnlyInSync();
|
||||
dir.failOn(failure);
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer());
|
||||
failure.setDoFail();
|
||||
|
||||
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
|
||||
|
@ -2858,8 +2858,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
for(int iter=0;iter<4;iter++) {
|
||||
final boolean autoCommit = 1==iter/2;
|
||||
IndexWriter writer = new IndexWriter(dir,
|
||||
autoCommit, new StandardAnalyzer(),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
autoCommit, new StandardAnalyzer());
|
||||
writer.setMaxBufferedDocs(2);
|
||||
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
|
||||
writer.setMergeScheduler(new SerialMergeScheduler());
|
||||
|
@ -2892,8 +2891,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
reader.close();
|
||||
|
||||
writer = new IndexWriter(dir,
|
||||
autoCommit, new StandardAnalyzer(),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
autoCommit, new StandardAnalyzer());
|
||||
writer.setMaxBufferedDocs(2);
|
||||
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
|
||||
writer.setMergeScheduler(new SerialMergeScheduler());
|
||||
|
@ -2912,8 +2910,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
for(int iter=0;iter<4;iter++) {
|
||||
final boolean autoCommit = 1==iter/2;
|
||||
IndexWriter writer = new IndexWriter(dir,
|
||||
autoCommit, new StandardAnalyzer(),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
autoCommit, new StandardAnalyzer());
|
||||
writer.setMaxBufferedDocs(2);
|
||||
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
|
||||
writer.setMergeScheduler(new SerialMergeScheduler());
|
||||
|
@ -2950,7 +2947,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
public void testTermVectorCorruption3() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir,
|
||||
false, new StandardAnalyzer(),
|
||||
new StandardAnalyzer(),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
|
||||
|
@ -2972,7 +2969,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
writer.close();
|
||||
|
||||
writer = new IndexWriter(dir,
|
||||
false, new StandardAnalyzer(),
|
||||
new StandardAnalyzer(),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
|
||||
|
@ -3020,7 +3017,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
public void testExpungeDeletes() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir,
|
||||
false, new StandardAnalyzer(),
|
||||
new StandardAnalyzer(),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
|
||||
|
@ -3048,7 +3045,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
ir.close();
|
||||
|
||||
writer = new IndexWriter(dir,
|
||||
false, new StandardAnalyzer(),
|
||||
new StandardAnalyzer(),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
assertEquals(8, writer.numDocs());
|
||||
assertEquals(10, writer.maxDoc());
|
||||
|
@ -3066,7 +3063,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
public void testExpungeDeletes2() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir,
|
||||
false, new StandardAnalyzer(),
|
||||
new StandardAnalyzer(),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
writer.setMergeFactor(50);
|
||||
|
@ -3095,7 +3092,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
ir.close();
|
||||
|
||||
writer = new IndexWriter(dir,
|
||||
false, new StandardAnalyzer(),
|
||||
new StandardAnalyzer(),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMergeFactor(3);
|
||||
assertEquals(49, writer.numDocs());
|
||||
|
@ -3113,7 +3110,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
public void testExpungeDeletes3() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir,
|
||||
false, new StandardAnalyzer(),
|
||||
new StandardAnalyzer(),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
writer.setMergeFactor(50);
|
||||
|
@ -3142,7 +3139,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
ir.close();
|
||||
|
||||
writer = new IndexWriter(dir,
|
||||
false, new StandardAnalyzer(),
|
||||
new StandardAnalyzer(),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
// Force many merges to happen
|
||||
writer.setMergeFactor(3);
|
||||
|
@ -3168,8 +3165,8 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
// LUCENE-1198
|
||||
public class MockIndexWriter extends IndexWriter {
|
||||
|
||||
public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException {
|
||||
super(dir, autoCommit, a, create, mfl);
|
||||
public MockIndexWriter(Directory dir, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException {
|
||||
super(dir, a, create, mfl);
|
||||
}
|
||||
|
||||
boolean doFail;
|
||||
|
@ -3183,7 +3180,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
|
||||
public void testExceptionDocumentsWriterInit() throws IOException {
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
MockIndexWriter w = new MockIndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
MockIndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "a field", Field.Store.YES,
|
||||
Field.Index.ANALYZED));
|
||||
|
@ -3203,7 +3200,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
// LUCENE-1208
|
||||
public void testExceptionJustBeforeFlush() throws IOException {
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
MockIndexWriter w = new MockIndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
MockIndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
w.setMaxBufferedDocs(2);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "a field", Field.Store.YES,
|
||||
|
@ -3232,8 +3229,8 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
|
||||
public class MockIndexWriter2 extends IndexWriter {
|
||||
|
||||
public MockIndexWriter2(Directory dir, boolean autoCommit, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException {
|
||||
super(dir, autoCommit, a, create, mfl);
|
||||
public MockIndexWriter2(Directory dir, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException {
|
||||
super(dir, a, create, mfl);
|
||||
}
|
||||
|
||||
boolean doFail;
|
||||
|
@ -3251,7 +3248,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
// LUCENE-1210
|
||||
public void testExceptionOnMergeInit() throws IOException {
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
MockIndexWriter2 w = new MockIndexWriter2(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
MockIndexWriter2 w = new MockIndexWriter2(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
w.setMaxBufferedDocs(2);
|
||||
w.setMergeFactor(2);
|
||||
w.doFail = true;
|
||||
|
@ -3274,8 +3271,8 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
|
||||
public class MockIndexWriter3 extends IndexWriter {
|
||||
|
||||
public MockIndexWriter3(Directory dir, boolean autoCommit, Analyzer a, boolean create, IndexWriter.MaxFieldLength mfl) throws IOException {
|
||||
super(dir, autoCommit, a, create, mfl);
|
||||
public MockIndexWriter3(Directory dir, Analyzer a, boolean create, IndexWriter.MaxFieldLength mfl) throws IOException {
|
||||
super(dir, a, create, mfl);
|
||||
}
|
||||
|
||||
boolean wasCalled;
|
||||
|
@ -3288,7 +3285,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
// LUCENE-1222
|
||||
public void testDoAfterFlush() throws IOException {
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
MockIndexWriter3 w = new MockIndexWriter3(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
MockIndexWriter3 w = new MockIndexWriter3(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "a field", Field.Store.YES,
|
||||
Field.Index.ANALYZED));
|
||||
|
@ -3340,7 +3337,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
public void testExceptionsDuringCommit() throws Throwable {
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
FailOnlyInCommit failure = new FailOnlyInCommit();
|
||||
IndexWriter w = new IndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "a field", Field.Store.YES,
|
||||
Field.Index.ANALYZED));
|
||||
|
@ -3388,7 +3385,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
// LUCENE-510
|
||||
public void testInvalidUTF16() throws Throwable {
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
Document doc = new Document();
|
||||
|
||||
final int count = utf8Data.length/2;
|
||||
|
@ -3592,7 +3589,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
tokens.add(t);
|
||||
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", tokens));
|
||||
w.addDocument(doc);
|
||||
|
@ -3624,7 +3621,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
public void testPrepareCommit() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
writer.setMergeFactor(5);
|
||||
|
||||
|
@ -3676,7 +3673,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
dir.setPreventDoubleWrite(false);
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
writer.setMaxBufferedDocs(2);
|
||||
writer.setMergeFactor(5);
|
||||
|
@ -3701,7 +3698,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
reader.close();
|
||||
reader2.close();
|
||||
|
||||
writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
for (int i = 0; i < 17; i++)
|
||||
addDoc(writer);
|
||||
|
||||
|
@ -3729,7 +3726,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
public void testPrepareCommitNoChanges() throws IOException {
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.prepareCommit();
|
||||
writer.commit();
|
||||
writer.close();
|
||||
|
@ -3756,14 +3753,14 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
public RunAddIndexesThreads(int numCopy) throws Throwable {
|
||||
NUM_COPY = numCopy;
|
||||
dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
for (int i = 0; i < NUM_INIT_DOCS; i++)
|
||||
addDoc(writer);
|
||||
writer.close();
|
||||
|
||||
dir2 = new MockRAMDirectory();
|
||||
writer2 = new IndexWriter(dir2, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
cms = (ConcurrentMergeScheduler) writer2.getMergeScheduler();
|
||||
|
||||
readers = new IndexReader[NUM_COPY];
|
||||
|
@ -4015,8 +4012,8 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
// LUCENE-1347
|
||||
public class MockIndexWriter4 extends IndexWriter {
|
||||
|
||||
public MockIndexWriter4(Directory dir, boolean autoCommit, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException {
|
||||
super(dir, autoCommit, a, create, mfl);
|
||||
public MockIndexWriter4(Directory dir, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException {
|
||||
super(dir, a, create, mfl);
|
||||
}
|
||||
|
||||
boolean doFail;
|
||||
|
@ -4031,7 +4028,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
// LUCENE-1347
|
||||
public void testRollbackExceptionHang() throws Throwable {
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
MockIndexWriter4 w = new MockIndexWriter4(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
MockIndexWriter4 w = new MockIndexWriter4(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
addDoc(w);
|
||||
w.doFail = true;
|
||||
|
@ -4050,7 +4047,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
// LUCENE-1219
|
||||
public void testBinaryFieldOffsetLength() throws IOException {
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
byte[] b = new byte[50];
|
||||
for(int i=0;i<50;i++)
|
||||
b[i] = (byte) (i+77);
|
||||
|
@ -4121,7 +4118,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
|
||||
public void testOptimizeExceptions() throws IOException {
|
||||
RAMDirectory startDir = new MockRAMDirectory();
|
||||
IndexWriter w = new IndexWriter(startDir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter w = new IndexWriter(startDir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
w.setMaxBufferedDocs(2);
|
||||
w.setMergeFactor(100);
|
||||
for(int i=0;i<27;i++)
|
||||
|
@ -4130,7 +4127,7 @@ public class TestIndexWriter extends LuceneTestCase
|
|||
|
||||
for(int i=0;i<200;i++) {
|
||||
MockRAMDirectory dir = new MockRAMDirectory(startDir);
|
||||
w = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
((ConcurrentMergeScheduler) w.getMergeScheduler()).setSuppressExceptions();
|
||||
dir.setRandomIOExceptionRate(0.5, 100);
|
||||
try {
|
||||
|
|
|
@ -45,7 +45,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir, autoCommit,
|
||||
new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
new WhitespaceAnalyzer(), true);
|
||||
modifier.setUseCompoundFile(true);
|
||||
modifier.setMaxBufferedDeleteTerms(1);
|
||||
|
||||
|
@ -85,7 +85,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir, autoCommit,
|
||||
new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
new WhitespaceAnalyzer(), true);
|
||||
modifier.setMaxBufferedDocs(2);
|
||||
modifier.setMaxBufferedDeleteTerms(2);
|
||||
|
||||
|
@ -123,7 +123,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
boolean autoCommit = (0==pass);
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, autoCommit,
|
||||
new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
new WhitespaceAnalyzer(), true);
|
||||
writer.setMaxBufferedDeleteTerms(1);
|
||||
writer.deleteDocuments(new Term("foobar", "1"));
|
||||
writer.deleteDocuments(new Term("foobar", "1"));
|
||||
|
@ -141,7 +141,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
boolean autoCommit = (0==pass);
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir, autoCommit,
|
||||
new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
new WhitespaceAnalyzer(), true);
|
||||
modifier.setMaxBufferedDocs(4);
|
||||
modifier.setMaxBufferedDeleteTerms(4);
|
||||
|
||||
|
@ -187,7 +187,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir, autoCommit,
|
||||
new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
new WhitespaceAnalyzer(), true);
|
||||
modifier.setMaxBufferedDocs(100);
|
||||
modifier.setMaxBufferedDeleteTerms(100);
|
||||
|
||||
|
@ -223,7 +223,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
boolean autoCommit = (0==pass);
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir, autoCommit,
|
||||
new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
new WhitespaceAnalyzer(), true);
|
||||
modifier.setMaxBufferedDocs(2);
|
||||
modifier.setMaxBufferedDeleteTerms(2);
|
||||
|
||||
|
@ -307,7 +307,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
// First build up a starting index:
|
||||
MockRAMDirectory startDir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(startDir, autoCommit,
|
||||
new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
new WhitespaceAnalyzer(), true);
|
||||
for (int i = 0; i < 157; i++) {
|
||||
Document d = new Document();
|
||||
d.add(new Field("id", Integer.toString(i), Field.Store.YES,
|
||||
|
@ -330,7 +330,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
MockRAMDirectory dir = new MockRAMDirectory(startDir);
|
||||
dir.setPreventDoubleWrite(false);
|
||||
IndexWriter modifier = new IndexWriter(dir, autoCommit,
|
||||
new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
new WhitespaceAnalyzer());
|
||||
|
||||
modifier.setMaxBufferedDocs(1000); // use flush or close
|
||||
modifier.setMaxBufferedDeleteTerms(1000); // use flush or close
|
||||
|
@ -537,7 +537,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
boolean autoCommit = (0==pass);
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir, autoCommit,
|
||||
new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
new WhitespaceAnalyzer(), true);
|
||||
modifier.setUseCompoundFile(true);
|
||||
modifier.setMaxBufferedDeleteTerms(2);
|
||||
|
||||
|
@ -647,7 +647,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
boolean autoCommit = (0==pass);
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir, autoCommit,
|
||||
new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
new WhitespaceAnalyzer(), true);
|
||||
|
||||
dir.failOn(failure.reset());
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
Random r = new java.util.Random(17);
|
||||
|
||||
public MockIndexWriter(Directory dir, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException {
|
||||
super(dir, false, a, create, mfl);
|
||||
super(dir, a, create, mfl);
|
||||
}
|
||||
|
||||
boolean testPoint(String name) {
|
||||
|
|
|
@ -125,7 +125,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
public void testMaxBufferedDocsChange() throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true);
|
||||
writer.setMaxBufferedDocs(101);
|
||||
writer.setMergeFactor(101);
|
||||
writer.setMergePolicy(new LogDocMergePolicy());
|
||||
|
@ -139,7 +139,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
}
|
||||
writer.close();
|
||||
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), false);
|
||||
writer.setMaxBufferedDocs(101);
|
||||
writer.setMergeFactor(101);
|
||||
writer.setMergePolicy(new LogDocMergePolicy());
|
||||
|
@ -167,7 +167,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
public void testMergeDocCount0() throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true);
|
||||
writer.setMergePolicy(new LogDocMergePolicy());
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setMergeFactor(100);
|
||||
|
@ -182,7 +182,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
reader.deleteDocuments(new Term("content", "aaa"));
|
||||
reader.close();
|
||||
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), false);
|
||||
writer.setMergePolicy(new LogDocMergePolicy());
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setMergeFactor(5);
|
||||
|
|
|
@ -116,7 +116,7 @@ public class TestStressIndexing extends LuceneTestCase {
|
|||
stress test.
|
||||
*/
|
||||
public void runStressTest(Directory directory, boolean autoCommit, MergeScheduler mergeScheduler) throws Exception {
|
||||
IndexWriter modifier = new IndexWriter(directory, autoCommit, ANALYZER, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter modifier = new IndexWriter(directory, autoCommit, ANALYZER, true);
|
||||
|
||||
modifier.setMaxBufferedDocs(10);
|
||||
|
||||
|
|
|
@ -40,8 +40,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
|
||||
public class MockIndexWriter extends IndexWriter {
|
||||
|
||||
public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException {
|
||||
super(dir, autoCommit, a, create, mfl);
|
||||
public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create) throws IOException {
|
||||
super(dir, autoCommit, a, create);
|
||||
}
|
||||
|
||||
boolean testPoint(String name) {
|
||||
|
@ -103,7 +103,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
public Map indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
|
||||
Map docs = new HashMap();
|
||||
for(int iter=0;iter<3;iter++) {
|
||||
IndexWriter w = new MockIndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter w = new MockIndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
|
||||
w.setUseCompoundFile(false);
|
||||
|
||||
/***
|
||||
|
|
|
@ -53,7 +53,7 @@ public class TestThreadedOptimize extends LuceneTestCase {
|
|||
|
||||
public void runTest(Directory directory, boolean autoCommit, MergeScheduler merger) throws Exception {
|
||||
|
||||
IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
if (merger != null)
|
||||
writer.setMergeScheduler(merger);
|
||||
|
@ -120,7 +120,7 @@ public class TestThreadedOptimize extends LuceneTestCase {
|
|||
|
||||
if (!autoCommit) {
|
||||
writer.close();
|
||||
writer = new IndexWriter(directory, autoCommit, ANALYZER, false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(directory, autoCommit, ANALYZER, false);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
}
|
||||
|
||||
|
|
|
@ -83,12 +83,12 @@ public class TestTransactions extends LuceneTestCase
|
|||
|
||||
public void doWork() throws Throwable {
|
||||
|
||||
IndexWriter writer1 = new IndexWriter(dir1, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer1 = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer1.setMaxBufferedDocs(3);
|
||||
writer1.setMergeFactor(2);
|
||||
((ConcurrentMergeScheduler) writer1.getMergeScheduler()).setSuppressExceptions();
|
||||
|
||||
IndexWriter writer2 = new IndexWriter(dir2, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
// Intentionally use different params so flush/merge
|
||||
// happen @ different times
|
||||
writer2.setMaxBufferedDocs(2);
|
||||
|
@ -172,7 +172,7 @@ public class TestTransactions extends LuceneTestCase
|
|||
}
|
||||
|
||||
public void initIndex(Directory dir) throws Throwable {
|
||||
IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||
for(int j=0; j<7; j++) {
|
||||
Document d = new Document();
|
||||
int n = RANDOM.nextInt();
|
||||
|
|
Loading…
Reference in New Issue