mirror of https://github.com/apache/lucene.git
LUCENE-5708: remove IWC.clone
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1598489 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
add1e44fd7
commit
43be0a77b5
|
@ -174,6 +174,10 @@ API Changes
|
|||
* LUCENE-5700: Added oal.util.Accountable that is now implemented by all
|
||||
classes whose memory usage can be estimated. (Robert Muir, Adrien Grand)
|
||||
|
||||
* LUCENE-5708: Remove IndexWriterConfig.clone, so now IndexWriter
|
||||
simply uses the IndexWriterConfig you pass it, and you must create a
|
||||
new IndexWriterConfig for each IndexWriter. (Mike McCandless)
|
||||
|
||||
Optimizations
|
||||
|
||||
* LUCENE-5603: hunspell stemmer more efficiently strips prefixes
|
||||
|
|
|
@ -47,7 +47,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
|
|||
private int mergeThreadPriority = -1;
|
||||
|
||||
/** List of currently active {@link MergeThread}s. */
|
||||
protected List<MergeThread> mergeThreads = new ArrayList<>();
|
||||
protected final List<MergeThread> mergeThreads = new ArrayList<>();
|
||||
|
||||
/**
|
||||
* Default {@code maxThreadCount}.
|
||||
|
@ -565,13 +565,4 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
|
|||
sb.append("mergeThreadPriority=").append(mergeThreadPriority);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public MergeScheduler clone() {
|
||||
ConcurrentMergeScheduler clone = (ConcurrentMergeScheduler) super.clone();
|
||||
clone.writer = null;
|
||||
clone.dir = null;
|
||||
clone.mergeThreads = new ArrayList<>();
|
||||
return clone;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.lucene.util.ThreadInterruptedException;
|
|||
* new {@link DocumentsWriterPerThread} instance.
|
||||
* </p>
|
||||
*/
|
||||
final class DocumentsWriterPerThreadPool implements Cloneable {
|
||||
final class DocumentsWriterPerThreadPool {
|
||||
|
||||
/**
|
||||
* {@link ThreadState} references and guards a
|
||||
|
@ -148,16 +148,6 @@ final class DocumentsWriterPerThreadPool implements Cloneable {
|
|||
freeList = new ThreadState[maxNumThreadStates];
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocumentsWriterPerThreadPool clone() {
|
||||
// We should only be cloned before being used:
|
||||
if (numThreadStatesActive != 0) {
|
||||
throw new IllegalStateException("clone this object before it is used!");
|
||||
}
|
||||
|
||||
return new DocumentsWriterPerThreadPool(threadStates.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the max number of {@link ThreadState} instances available in this
|
||||
* {@link DocumentsWriterPerThreadPool}
|
||||
|
|
|
@ -21,7 +21,6 @@ import java.util.Iterator;
|
|||
import org.apache.lucene.index.DocumentsWriterPerThreadPool.ThreadState;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.InfoStream;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
|
||||
/**
|
||||
* {@link FlushPolicy} controls when segments are flushed from a RAM resident
|
||||
|
@ -52,7 +51,7 @@ import org.apache.lucene.util.SetOnce;
|
|||
* @see DocumentsWriterPerThread
|
||||
* @see IndexWriterConfig#setFlushPolicy(FlushPolicy)
|
||||
*/
|
||||
abstract class FlushPolicy implements Cloneable {
|
||||
abstract class FlushPolicy {
|
||||
protected LiveIndexWriterConfig indexWriterConfig;
|
||||
protected InfoStream infoStream;
|
||||
|
||||
|
@ -143,18 +142,4 @@ abstract class FlushPolicy implements Cloneable {
|
|||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FlushPolicy clone() {
|
||||
FlushPolicy clone;
|
||||
try {
|
||||
clone = (FlushPolicy) super.clone();
|
||||
} catch (CloneNotSupportedException e) {
|
||||
// should not happen
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
clone.indexWriterConfig = null;
|
||||
clone.infoStream = null;
|
||||
return clone;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ import org.apache.lucene.store.Directory;
|
|||
* or {@link Directory} instance.</p>
|
||||
*/
|
||||
|
||||
public abstract class IndexDeletionPolicy implements Cloneable {
|
||||
public abstract class IndexDeletionPolicy {
|
||||
|
||||
/** Sole constructor, typically called by sub-classes constructors. */
|
||||
protected IndexDeletionPolicy() {}
|
||||
|
@ -106,14 +106,4 @@ public abstract class IndexDeletionPolicy implements Cloneable {
|
|||
* sorted by age (the 0th one is the oldest commit).
|
||||
*/
|
||||
public abstract void onCommit(List<? extends IndexCommit> commits) throws IOException;
|
||||
|
||||
@Override
|
||||
public IndexDeletionPolicy clone() {
|
||||
try {
|
||||
return (IndexDeletionPolicy) super.clone();
|
||||
} catch (CloneNotSupportedException e) {
|
||||
throw new Error(e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.util.Version;
|
|||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.util.Collection;
|
||||
|
||||
/**
|
||||
|
@ -155,13 +154,12 @@ public final class IndexUpgrader {
|
|||
}
|
||||
}
|
||||
|
||||
final IndexWriterConfig c = iwc.clone();
|
||||
c.setMergePolicy(new UpgradeIndexMergePolicy(c.getMergePolicy()));
|
||||
c.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
|
||||
iwc.setMergePolicy(new UpgradeIndexMergePolicy(iwc.getMergePolicy()));
|
||||
iwc.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
|
||||
|
||||
final IndexWriter w = new IndexWriter(dir, c);
|
||||
final IndexWriter w = new IndexWriter(dir, iwc);
|
||||
try {
|
||||
InfoStream infoStream = c.getInfoStream();
|
||||
InfoStream infoStream = iwc.getInfoStream();
|
||||
if (infoStream.isEnabled("IndexUpgrader")) {
|
||||
infoStream.message("IndexUpgrader", "Upgrading all pre-" + Constants.LUCENE_MAIN_VERSION + " segments of index directory '" + dir + "' to version " + Constants.LUCENE_MAIN_VERSION + "...");
|
||||
}
|
||||
|
|
|
@ -700,7 +700,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{
|
|||
*/
|
||||
public IndexWriter(Directory d, IndexWriterConfig conf) throws IOException {
|
||||
conf.setIndexWriter(this); // prevent reuse by other instances
|
||||
config = new LiveIndexWriterConfig(conf);
|
||||
config = conf;
|
||||
directory = d;
|
||||
analyzer = config.getAnalyzer();
|
||||
infoStream = config.getInfoStream();
|
||||
|
|
|
@ -49,7 +49,7 @@ import org.apache.lucene.util.Version;
|
|||
*
|
||||
* @since 3.1
|
||||
*/
|
||||
public final class IndexWriterConfig extends LiveIndexWriterConfig implements Cloneable {
|
||||
public final class IndexWriterConfig extends LiveIndexWriterConfig {
|
||||
|
||||
/**
|
||||
* Specifies the open mode for {@link IndexWriter}.
|
||||
|
@ -164,31 +164,6 @@ public final class IndexWriterConfig extends LiveIndexWriterConfig implements Cl
|
|||
super(analyzer, matchVersion);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexWriterConfig clone() {
|
||||
try {
|
||||
IndexWriterConfig clone = (IndexWriterConfig) super.clone();
|
||||
|
||||
clone.writer = writer.clone();
|
||||
|
||||
// Mostly shallow clone, but do a deepish clone of
|
||||
// certain objects that have state that cannot be shared
|
||||
// across IW instances:
|
||||
clone.delPolicy = delPolicy.clone();
|
||||
clone.flushPolicy = flushPolicy.clone();
|
||||
clone.indexerThreadPool = indexerThreadPool.clone();
|
||||
// we clone the infoStream because some impls might have state variables
|
||||
// such as line numbers, message throughput, ...
|
||||
clone.infoStream = infoStream.clone();
|
||||
clone.mergePolicy = mergePolicy.clone();
|
||||
clone.mergeScheduler = mergeScheduler.clone();
|
||||
|
||||
return clone;
|
||||
} catch (CloneNotSupportedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/** Specifies {@link OpenMode} of the index.
|
||||
*
|
||||
* <p>Only takes effect when IndexWriter is first created. */
|
||||
|
|
|
@ -129,35 +129,6 @@ public class LiveIndexWriterConfig {
|
|||
perThreadHardLimitMB = IndexWriterConfig.DEFAULT_RAM_PER_THREAD_HARD_LIMIT_MB;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new config that that handles the live {@link IndexWriter}
|
||||
* settings.
|
||||
*/
|
||||
LiveIndexWriterConfig(IndexWriterConfig config) {
|
||||
maxBufferedDeleteTerms = config.getMaxBufferedDeleteTerms();
|
||||
maxBufferedDocs = config.getMaxBufferedDocs();
|
||||
mergedSegmentWarmer = config.getMergedSegmentWarmer();
|
||||
ramBufferSizeMB = config.getRAMBufferSizeMB();
|
||||
matchVersion = config.matchVersion;
|
||||
analyzer = config.getAnalyzer();
|
||||
delPolicy = config.getIndexDeletionPolicy();
|
||||
commit = config.getIndexCommit();
|
||||
openMode = config.getOpenMode();
|
||||
similarity = config.getSimilarity();
|
||||
mergeScheduler = config.getMergeScheduler();
|
||||
writeLockTimeout = config.getWriteLockTimeout();
|
||||
indexingChain = config.getIndexingChain();
|
||||
codec = config.getCodec();
|
||||
infoStream = config.getInfoStream();
|
||||
mergePolicy = config.getMergePolicy();
|
||||
indexerThreadPool = config.getIndexerThreadPool();
|
||||
readerPooling = config.getReaderPooling();
|
||||
flushPolicy = config.getFlushPolicy();
|
||||
perThreadHardLimitMB = config.getRAMPerThreadHardLimitMB();
|
||||
useCompoundFile = config.getUseCompoundFile();
|
||||
checkIntegrityAtMerge = config.getCheckIntegrityAtMerge();
|
||||
}
|
||||
|
||||
/** Returns the default analyzer to use for indexing documents. */
|
||||
public Analyzer getAnalyzer() {
|
||||
return analyzer;
|
||||
|
|
|
@ -57,7 +57,7 @@ import java.util.Map;
|
|||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public abstract class MergePolicy implements java.io.Closeable, Cloneable {
|
||||
public abstract class MergePolicy implements java.io.Closeable {
|
||||
|
||||
/** A map of doc IDs. */
|
||||
public static abstract class DocMap {
|
||||
|
@ -384,19 +384,6 @@ public abstract class MergePolicy implements java.io.Closeable, Cloneable {
|
|||
* this value then it will not use compound file format. */
|
||||
protected long maxCFSSegmentSize = DEFAULT_MAX_CFS_SEGMENT_SIZE;
|
||||
|
||||
@Override
|
||||
public MergePolicy clone() {
|
||||
MergePolicy clone;
|
||||
try {
|
||||
clone = (MergePolicy) super.clone();
|
||||
} catch (CloneNotSupportedException e) {
|
||||
// should not happen
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
clone.writer = new SetOnce<>();
|
||||
return clone;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new merge policy instance. Note that if you intend to use it
|
||||
* without passing it to {@link IndexWriter}, you should call
|
||||
|
|
|
@ -29,7 +29,7 @@ import java.io.IOException;
|
|||
* instance.</p>
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public abstract class MergeScheduler implements Closeable, Cloneable {
|
||||
public abstract class MergeScheduler implements Closeable {
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
|
@ -46,13 +46,4 @@ public abstract class MergeScheduler implements Closeable, Cloneable {
|
|||
/** Close this MergeScheduler. */
|
||||
@Override
|
||||
public abstract void close() throws IOException;
|
||||
|
||||
@Override
|
||||
public MergeScheduler clone() {
|
||||
try {
|
||||
return (MergeScheduler) super.clone();
|
||||
} catch (CloneNotSupportedException e) {
|
||||
throw new Error(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,13 +47,13 @@ public class SnapshotDeletionPolicy extends IndexDeletionPolicy {
|
|||
|
||||
/** Records how many snapshots are held against each
|
||||
* commit generation */
|
||||
protected Map<Long,Integer> refCounts = new HashMap<>();
|
||||
protected final Map<Long,Integer> refCounts = new HashMap<>();
|
||||
|
||||
/** Used to map gen to IndexCommit. */
|
||||
protected Map<Long,IndexCommit> indexCommits = new HashMap<>();
|
||||
protected final Map<Long,IndexCommit> indexCommits = new HashMap<>();
|
||||
|
||||
/** Wrapped {@link IndexDeletionPolicy} */
|
||||
private IndexDeletionPolicy primary;
|
||||
private final IndexDeletionPolicy primary;
|
||||
|
||||
/** Most recently committed {@link IndexCommit}. */
|
||||
protected IndexCommit lastCommit;
|
||||
|
@ -187,16 +187,6 @@ public class SnapshotDeletionPolicy extends IndexDeletionPolicy {
|
|||
return indexCommits.get(gen);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized IndexDeletionPolicy clone() {
|
||||
SnapshotDeletionPolicy other = (SnapshotDeletionPolicy) super.clone();
|
||||
other.primary = this.primary.clone();
|
||||
other.lastCommit = null;
|
||||
other.refCounts = new HashMap<>(refCounts);
|
||||
other.indexCommits = new HashMap<>(indexCommits);
|
||||
return other;
|
||||
}
|
||||
|
||||
/** Wraps each {@link IndexCommit} as a {@link
|
||||
* SnapshotCommitPoint}. */
|
||||
private List<IndexCommit> wrapCommits(List<? extends IndexCommit> commits) {
|
||||
|
|
|
@ -30,7 +30,7 @@ import java.io.Closeable;
|
|||
*
|
||||
* @lucene.internal
|
||||
*/
|
||||
public abstract class InfoStream implements Closeable, Cloneable {
|
||||
public abstract class InfoStream implements Closeable {
|
||||
|
||||
/** Instance of InfoStream that does no logging at all. */
|
||||
public static final InfoStream NO_OUTPUT = new NoOutput();
|
||||
|
@ -74,14 +74,4 @@ public abstract class InfoStream implements Closeable, Cloneable {
|
|||
}
|
||||
defaultInfoStream = infoStream;
|
||||
}
|
||||
|
||||
@Override
|
||||
public InfoStream clone() {
|
||||
try {
|
||||
return (InfoStream) super.clone();
|
||||
} catch (CloneNotSupportedException e) {
|
||||
throw new Error(e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -74,10 +74,4 @@ public final class SetOnce<T> implements Cloneable {
|
|||
public final T get() {
|
||||
return obj;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SetOnce<T> clone() {
|
||||
return obj == null ? new SetOnce<T>() : new SetOnce<>(obj);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -38,15 +38,14 @@ import org.apache.lucene.util.TestUtil;
|
|||
public class TestBlockPostingsFormat2 extends LuceneTestCase {
|
||||
Directory dir;
|
||||
RandomIndexWriter iw;
|
||||
IndexWriterConfig iwc;
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
dir = newFSDirectory(createTempDir("testDFBlockSize"));
|
||||
iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
iwc.setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat()));
|
||||
iw = new RandomIndexWriter(random(), dir, iwc.clone());
|
||||
iw = new RandomIndexWriter(random(), dir, iwc);
|
||||
iw.setDoRandomForceMerge(false); // we will ourselves
|
||||
}
|
||||
|
||||
|
@ -54,8 +53,9 @@ public class TestBlockPostingsFormat2 extends LuceneTestCase {
|
|||
public void tearDown() throws Exception {
|
||||
iw.shutdown();
|
||||
TestUtil.checkIndex(dir); // for some extra coverage, checkIndex before we forceMerge
|
||||
IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
iwc.setOpenMode(OpenMode.APPEND);
|
||||
IndexWriter iw = new IndexWriter(dir, iwc.clone());
|
||||
IndexWriter iw = new IndexWriter(dir, iwc);
|
||||
iw.forceMerge(1);
|
||||
iw.shutdown();
|
||||
dir.close(); // just force a checkindex for now
|
||||
|
|
|
@ -86,7 +86,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
|
|||
iwc.setCodec(TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat()));
|
||||
// TODO we could actually add more fields implemented with different PFs
|
||||
// or, just put this test into the usual rotation?
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc.clone());
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
|
||||
Document doc = new Document();
|
||||
FieldType docsOnlyType = new FieldType(TextField.TYPE_NOT_STORED);
|
||||
// turn this on for a cross-check
|
||||
|
@ -137,8 +137,9 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
|
|||
iw.shutdown();
|
||||
verify(dir);
|
||||
TestUtil.checkIndex(dir); // for some extra coverage, checkIndex before we forceMerge
|
||||
iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
|
||||
iwc.setOpenMode(OpenMode.APPEND);
|
||||
IndexWriter iw2 = new IndexWriter(dir, iwc.clone());
|
||||
IndexWriter iw2 = new IndexWriter(dir, iwc);
|
||||
iw2.forceMerge(1);
|
||||
iw2.shutdown();
|
||||
verify(dir);
|
||||
|
|
|
@ -566,7 +566,7 @@ public class TestBinaryDocValuesUpdates extends LuceneTestCase {
|
|||
Directory dir = newDirectory();
|
||||
Random random = random();
|
||||
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
|
||||
IndexWriter writer = new IndexWriter(dir, conf.clone());
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
|
||||
int docid = 0;
|
||||
int numRounds = atLeast(10);
|
||||
|
@ -593,7 +593,8 @@ public class TestBinaryDocValuesUpdates extends LuceneTestCase {
|
|||
writer.commit();
|
||||
} else if (random.nextDouble() < 0.1) {
|
||||
writer.shutdown();
|
||||
writer = new IndexWriter(dir, conf.clone());
|
||||
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
|
||||
writer = new IndexWriter(dir, conf);
|
||||
}
|
||||
|
||||
// add another document with the current value, to be sure forceMerge has
|
||||
|
@ -1082,7 +1083,7 @@ public class TestBinaryDocValuesUpdates extends LuceneTestCase {
|
|||
return new Lucene45DocValuesFormat();
|
||||
}
|
||||
});
|
||||
IndexWriter writer = new IndexWriter(dir, conf.clone());
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("id", "d0", Store.NO));
|
||||
doc.add(new BinaryDocValuesField("f1", toBytes(5L)));
|
||||
|
@ -1091,13 +1092,14 @@ public class TestBinaryDocValuesUpdates extends LuceneTestCase {
|
|||
writer.shutdown();
|
||||
|
||||
// change format
|
||||
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
conf.setCodec(new Lucene46Codec() {
|
||||
@Override
|
||||
public DocValuesFormat getDocValuesFormatForField(String field) {
|
||||
return new AssertingDocValuesFormat();
|
||||
}
|
||||
});
|
||||
writer = new IndexWriter(dir, conf.clone());
|
||||
writer = new IndexWriter(dir, conf);
|
||||
doc = new Document();
|
||||
doc.add(new StringField("id", "d1", Store.NO));
|
||||
doc.add(new BinaryDocValuesField("f1", toBytes(17L)));
|
||||
|
@ -1358,7 +1360,7 @@ public class TestBinaryDocValuesUpdates extends LuceneTestCase {
|
|||
conf.setMergePolicy(NoMergePolicy.INSTANCE);
|
||||
conf.setMaxBufferedDocs(Integer.MAX_VALUE); // manually flush
|
||||
conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
IndexWriter writer = new IndexWriter(dir, conf.clone());
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
for (int i = 0; i < 100; i++) {
|
||||
writer.addDocument(doc(i));
|
||||
}
|
||||
|
@ -1366,7 +1368,8 @@ public class TestBinaryDocValuesUpdates extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
NRTCachingDirectory cachingDir = new NRTCachingDirectory(dir, 100, 1/(1024.*1024.));
|
||||
writer = new IndexWriter(cachingDir, conf.clone());
|
||||
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
writer = new IndexWriter(cachingDir, conf);
|
||||
writer.updateBinaryDocValue(new Term("id", "doc-0"), "val", toBytes(100L));
|
||||
DirectoryReader reader = DirectoryReader.open(writer, true); // flush
|
||||
assertEquals(0, cachingDir.listCachedFiles().length);
|
||||
|
|
|
@ -572,13 +572,14 @@ public class TestDocValuesIndexing extends LuceneTestCase {
|
|||
public void testIllegalTypeChangeAcrossSegments() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
IndexWriter writer = new IndexWriter(dir, conf.clone());
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new NumericDocValuesField("dv", 0L));
|
||||
writer.addDocument(doc);
|
||||
writer.shutdown();
|
||||
|
||||
writer = new IndexWriter(dir, conf.clone());
|
||||
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
writer = new IndexWriter(dir, conf);
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
|
||||
try {
|
||||
|
@ -594,13 +595,14 @@ public class TestDocValuesIndexing extends LuceneTestCase {
|
|||
public void testTypeChangeAfterCloseAndDeleteAll() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
IndexWriter writer = new IndexWriter(dir, conf.clone());
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new NumericDocValuesField("dv", 0L));
|
||||
writer.addDocument(doc);
|
||||
writer.shutdown();
|
||||
|
||||
writer = new IndexWriter(dir, conf.clone());
|
||||
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
writer = new IndexWriter(dir, conf);
|
||||
writer.deleteAll();
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
|
||||
|
@ -643,13 +645,14 @@ public class TestDocValuesIndexing extends LuceneTestCase {
|
|||
public void testTypeChangeAfterOpenCreate() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
IndexWriter writer = new IndexWriter(dir, conf.clone());
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new NumericDocValuesField("dv", 0L));
|
||||
writer.addDocument(doc);
|
||||
writer.shutdown();
|
||||
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
|
||||
writer = new IndexWriter(dir, conf.clone());
|
||||
writer = new IndexWriter(dir, conf);
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
|
||||
writer.addDocument(doc);
|
||||
|
@ -660,14 +663,15 @@ public class TestDocValuesIndexing extends LuceneTestCase {
|
|||
public void testTypeChangeViaAddIndexes() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
IndexWriter writer = new IndexWriter(dir, conf.clone());
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new NumericDocValuesField("dv", 0L));
|
||||
writer.addDocument(doc);
|
||||
writer.shutdown();
|
||||
|
||||
Directory dir2 = newDirectory();
|
||||
writer = new IndexWriter(dir2, conf.clone());
|
||||
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
writer = new IndexWriter(dir2, conf);
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
|
||||
writer.addDocument(doc);
|
||||
|
@ -686,14 +690,15 @@ public class TestDocValuesIndexing extends LuceneTestCase {
|
|||
public void testTypeChangeViaAddIndexesIR() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
IndexWriter writer = new IndexWriter(dir, conf.clone());
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new NumericDocValuesField("dv", 0L));
|
||||
writer.addDocument(doc);
|
||||
writer.shutdown();
|
||||
|
||||
Directory dir2 = newDirectory();
|
||||
writer = new IndexWriter(dir2, conf.clone());
|
||||
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
writer = new IndexWriter(dir2, conf);
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
|
||||
writer.addDocument(doc);
|
||||
|
@ -714,14 +719,15 @@ public class TestDocValuesIndexing extends LuceneTestCase {
|
|||
public void testTypeChangeViaAddIndexes2() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
IndexWriter writer = new IndexWriter(dir, conf.clone());
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new NumericDocValuesField("dv", 0L));
|
||||
writer.addDocument(doc);
|
||||
writer.shutdown();
|
||||
|
||||
Directory dir2 = newDirectory();
|
||||
writer = new IndexWriter(dir2, conf.clone());
|
||||
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
writer = new IndexWriter(dir2, conf);
|
||||
writer.addIndexes(dir);
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("foo")));
|
||||
|
@ -739,14 +745,15 @@ public class TestDocValuesIndexing extends LuceneTestCase {
|
|||
public void testTypeChangeViaAddIndexesIR2() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
IndexWriter writer = new IndexWriter(dir, conf.clone());
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new NumericDocValuesField("dv", 0L));
|
||||
writer.addDocument(doc);
|
||||
writer.shutdown();
|
||||
|
||||
Directory dir2 = newDirectory();
|
||||
writer = new IndexWriter(dir2, conf.clone());
|
||||
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
writer = new IndexWriter(dir2, conf);
|
||||
IndexReader[] readers = new IndexReader[] {DirectoryReader.open(dir)};
|
||||
writer.addIndexes(readers);
|
||||
readers[0].close();
|
||||
|
|
|
@ -154,19 +154,6 @@ public class TestIndexWriterConfig extends LuceneTestCase {
|
|||
// expected
|
||||
}
|
||||
|
||||
// also cloning it won't help, after it has been used already
|
||||
try {
|
||||
assertNotNull(new RandomIndexWriter(random(), dir, conf.clone()));
|
||||
fail("should have hit AlreadySetException");
|
||||
} catch (AlreadySetException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
// if it's cloned in advance, it should be ok
|
||||
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, null);
|
||||
new RandomIndexWriter(random(), dir, conf.clone()).shutdown();
|
||||
new RandomIndexWriter(random(), dir, conf.clone()).shutdown();
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
|
@ -224,36 +211,6 @@ public class TestIndexWriterConfig extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClone() throws Exception {
|
||||
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
IndexWriterConfig clone = conf.clone();
|
||||
|
||||
// Make sure parameters that can't be reused are cloned
|
||||
IndexDeletionPolicy delPolicy = conf.delPolicy;
|
||||
IndexDeletionPolicy delPolicyClone = clone.delPolicy;
|
||||
assertTrue(delPolicy.getClass() == delPolicyClone.getClass() && (delPolicy != delPolicyClone || delPolicy.clone() == delPolicyClone.clone()));
|
||||
|
||||
FlushPolicy flushPolicy = conf.flushPolicy;
|
||||
FlushPolicy flushPolicyClone = clone.flushPolicy;
|
||||
assertTrue(flushPolicy.getClass() == flushPolicyClone.getClass() && (flushPolicy != flushPolicyClone || flushPolicy.clone() == flushPolicyClone.clone()));
|
||||
|
||||
DocumentsWriterPerThreadPool pool = conf.indexerThreadPool;
|
||||
DocumentsWriterPerThreadPool poolClone = clone.indexerThreadPool;
|
||||
assertTrue(pool.getClass() == poolClone.getClass() && (pool != poolClone || pool.clone() == poolClone.clone()));
|
||||
|
||||
MergePolicy mergePolicy = conf.mergePolicy;
|
||||
MergePolicy mergePolicyClone = clone.mergePolicy;
|
||||
assertTrue(mergePolicy.getClass() == mergePolicyClone.getClass() && (mergePolicy != mergePolicyClone || mergePolicy.clone() == mergePolicyClone.clone()));
|
||||
|
||||
MergeScheduler mergeSched = conf.mergeScheduler;
|
||||
MergeScheduler mergeSchedClone = clone.mergeScheduler;
|
||||
assertTrue(mergeSched.getClass() == mergeSchedClone.getClass() && (mergeSched != mergeSchedClone || mergeSched.clone() == mergeSchedClone.clone()));
|
||||
|
||||
conf.setMergeScheduler(new SerialMergeScheduler());
|
||||
assertEquals(ConcurrentMergeScheduler.class, clone.getMergeScheduler().getClass());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInvalidValues() throws Exception {
|
||||
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
|
|
|
@ -1153,7 +1153,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
Directory dir = newDirectory();
|
||||
IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
iwc.setMaxBufferedDocs(2);
|
||||
IndexWriter w = new IndexWriter(dir, iwc.clone());
|
||||
IndexWriter w = new IndexWriter(dir, iwc);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("field", "0", StringField.TYPE_NOT_STORED));
|
||||
w.addDocument(doc);
|
||||
|
@ -1178,7 +1178,8 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
|
||||
// Segment should have deletions:
|
||||
assertTrue(s.contains("has deletions"));
|
||||
w = new IndexWriter(dir, iwc.clone());
|
||||
iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
w = new IndexWriter(dir, iwc);
|
||||
w.forceMerge(1);
|
||||
w.shutdown();
|
||||
|
||||
|
|
|
@ -547,7 +547,7 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
|
|||
Directory dir = newDirectory();
|
||||
Random random = random();
|
||||
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
|
||||
IndexWriter writer = new IndexWriter(dir, conf.clone());
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
|
||||
int docid = 0;
|
||||
int numRounds = atLeast(10);
|
||||
|
@ -574,7 +574,8 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
|
|||
writer.commit();
|
||||
} else if (random.nextDouble() < 0.1) {
|
||||
writer.shutdown();
|
||||
writer = new IndexWriter(dir, conf.clone());
|
||||
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
|
||||
writer = new IndexWriter(dir, conf);
|
||||
}
|
||||
|
||||
// add another document with the current value, to be sure forceMerge has
|
||||
|
@ -1064,7 +1065,7 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
|
|||
return new Lucene45DocValuesFormat();
|
||||
}
|
||||
});
|
||||
IndexWriter writer = new IndexWriter(dir, conf.clone());
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("id", "d0", Store.NO));
|
||||
doc.add(new NumericDocValuesField("f1", 5L));
|
||||
|
@ -1073,13 +1074,14 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
|
|||
writer.shutdown();
|
||||
|
||||
// change format
|
||||
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
conf.setCodec(new Lucene46Codec() {
|
||||
@Override
|
||||
public DocValuesFormat getDocValuesFormatForField(String field) {
|
||||
return new AssertingDocValuesFormat();
|
||||
}
|
||||
});
|
||||
writer = new IndexWriter(dir, conf.clone());
|
||||
writer = new IndexWriter(dir, conf);
|
||||
doc = new Document();
|
||||
doc.add(new StringField("id", "d1", Store.NO));
|
||||
doc.add(new NumericDocValuesField("f1", 17L));
|
||||
|
@ -1341,7 +1343,7 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
|
|||
conf.setMergePolicy(NoMergePolicy.INSTANCE);
|
||||
conf.setMaxBufferedDocs(Integer.MAX_VALUE); // manually flush
|
||||
conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
IndexWriter writer = new IndexWriter(dir, conf.clone());
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
for (int i = 0; i < 100; i++) {
|
||||
writer.addDocument(doc(i));
|
||||
}
|
||||
|
@ -1349,7 +1351,8 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
NRTCachingDirectory cachingDir = new NRTCachingDirectory(dir, 100, 1/(1024.*1024.));
|
||||
writer = new IndexWriter(cachingDir, conf.clone());
|
||||
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
writer = new IndexWriter(cachingDir, conf);
|
||||
writer.updateNumericDocValue(new Term("id", "doc-0"), "val", 100L);
|
||||
DirectoryReader reader = DirectoryReader.open(writer, true); // flush
|
||||
assertEquals(0, cachingDir.listCachedFiles().length);
|
||||
|
|
|
@ -203,11 +203,6 @@ public final class SortingMergePolicy extends MergePolicy {
|
|||
return sortedMergeSpecification(in.findForcedDeletesMerges(segmentInfos));
|
||||
}
|
||||
|
||||
@Override
|
||||
public MergePolicy clone() {
|
||||
return new SortingMergePolicy(in.clone(), sort);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
in.close();
|
||||
|
|
|
@ -498,7 +498,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat
|
|||
Directory dir = newDirectory();
|
||||
IndexWriterConfig iwConf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
iwConf.setMaxBufferedDocs(RandomInts.randomIntBetween(random(), 2, 30));
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf.clone());
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf);
|
||||
|
||||
final int docCount = atLeast(200);
|
||||
final byte[][][] data = new byte [docCount][][];
|
||||
|
@ -531,13 +531,15 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat
|
|||
iw.w.addDocument(doc);
|
||||
if (random().nextBoolean() && (i % (data.length / 10) == 0)) {
|
||||
iw.w.shutdown();
|
||||
IndexWriterConfig iwConfNew = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
// test merging against a non-compressing codec
|
||||
if (iwConf.getCodec() == otherCodec) {
|
||||
iwConf.setCodec(Codec.getDefault());
|
||||
iwConfNew.setCodec(Codec.getDefault());
|
||||
} else {
|
||||
iwConf.setCodec(otherCodec);
|
||||
iwConfNew.setCodec(otherCodec);
|
||||
}
|
||||
iw = new RandomIndexWriter(random(), dir, iwConf.clone());
|
||||
iwConf = iwConfNew;
|
||||
iw = new RandomIndexWriter(random(), dir, iwConf);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -41,11 +41,12 @@ public final class RandomMergePolicy extends MergePolicy {
|
|||
* Not private so tests can inspect it,
|
||||
* Not final so it can be set on clone
|
||||
*/
|
||||
MergePolicy inner;
|
||||
final MergePolicy inner;
|
||||
|
||||
public RandomMergePolicy() {
|
||||
this(LuceneTestCase.newMergePolicy());
|
||||
}
|
||||
|
||||
private RandomMergePolicy(MergePolicy inner) {
|
||||
super(inner.getNoCFSRatio(),
|
||||
(long) (inner.getMaxCFSSegmentSizeMB() * 1024 * 1024));
|
||||
|
@ -54,12 +55,6 @@ public final class RandomMergePolicy extends MergePolicy {
|
|||
inner.getClass(), inner);
|
||||
}
|
||||
|
||||
public RandomMergePolicy clone() {
|
||||
RandomMergePolicy clone = (RandomMergePolicy) super.clone();
|
||||
clone.inner = this.inner.clone();
|
||||
return clone;
|
||||
}
|
||||
|
||||
public void close() {
|
||||
inner.close();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue