> {
diff --git a/lucene/src/java/org/apache/lucene/search/NumericRangeQuery.java b/lucene/src/java/org/apache/lucene/search/NumericRangeQuery.java
index 2cba90ad0bd..1daa453383c 100644
--- a/lucene/src/java/org/apache/lucene/search/NumericRangeQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/NumericRangeQuery.java
@@ -76,8 +76,6 @@ import org.apache.lucene.index.TermsEnum;
* BooleanQuery rewrite methods without changing
* BooleanQuery's default max clause count.
*
- * @lucene.experimental
- *
* How it works
*
* See the publication about panFMP ,
diff --git a/lucene/src/java/org/apache/lucene/util/NumericUtils.java b/lucene/src/java/org/apache/lucene/util/NumericUtils.java
index 0a08f95e60d..232461ddcf5 100644
--- a/lucene/src/java/org/apache/lucene/util/NumericUtils.java
+++ b/lucene/src/java/org/apache/lucene/util/NumericUtils.java
@@ -22,8 +22,6 @@ import org.apache.lucene.document.NumericField;
import org.apache.lucene.search.NumericRangeFilter;
import org.apache.lucene.search.NumericRangeQuery; // for javadocs
-// TODO: Remove the commented out methods before release!
-
/**
* This is a helper class to generate prefix-encoded representations for numerical values
* and supplies converters to represent float/double values as sortable integers/longs.
From c40a12dd46b4e87751cdc09e6a8b25cdbea0104e Mon Sep 17 00:00:00 2001
From: Michael McCandless
Date: Wed, 26 Jan 2011 19:18:48 +0000
Subject: [PATCH 017/185] jdocs: note that calling close(false) can hit
MergeAbortedExc's in optimize, addIndexes(IndexReader[]), expungeDeletes
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063837 13f79535-47bb-0310-9956-ffa450edef68
---
.../org/apache/lucene/index/IndexWriter.java | 20 +++++++++++++++++--
1 file changed, 18 insertions(+), 2 deletions(-)
diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java
index b7573b5b4e4..eb3dce99f00 100644
--- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java
@@ -1520,6 +1520,11 @@ public class IndexWriter implements Closeable {
* you should immediately close the writer. See above for details.
*
+ * NOTE : if you call {@link #close(boolean)}
+ * with false , which aborts all running merges,
+ * then any thread still running this method might hit a
+ * {@link MergePolicy.MergeAbortedException}.
+ *
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @see MergePolicy#findMergesForOptimize
@@ -1669,6 +1674,11 @@ public class IndexWriter implements Closeable {
*
NOTE : if this method hits an OutOfMemoryError
* you should immediately close the writer. See above for details.
+ *
+ * NOTE : if you call {@link #close(boolean)}
+ * with false , which aborts all running merges,
+ * then any thread still running this method might hit a
+ * {@link MergePolicy.MergeAbortedException}.
*/
public void expungeDeletes(boolean doWait)
throws CorruptIndexException, IOException {
@@ -1939,8 +1949,9 @@ public class IndexWriter implements Closeable {
*
*
NOTE: this method will forcefully abort all merges
* in progress. If other threads are running {@link
- * #optimize()} or any of the addIndexes methods, they
- * will receive {@link MergePolicy.MergeAbortedException}s.
+ * #optimize()}, {@link #addIndexes(IndexReader[])} or
+ * {@link #expungeDeletes} methods, they may receive
+ * {@link MergePolicy.MergeAbortedException}s.
*/
public synchronized void deleteAll() throws IOException {
try {
@@ -2220,6 +2231,11 @@ public class IndexWriter implements Closeable {
* you should immediately close the writer. See above for details.
*
+ * NOTE : if you call {@link #close(boolean)}
+ * with false , which aborts all running merges,
+ * then any thread still running this method might hit a
+ * {@link MergePolicy.MergeAbortedException}.
+ *
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
From 3da5bd87288c88b5cae6d1984863d92c59eea864 Mon Sep 17 00:00:00 2001
From: Michael McCandless
Date: Wed, 26 Jan 2011 19:25:50 +0000
Subject: [PATCH 018/185] remove dead code
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063842 13f79535-47bb-0310-9956-ffa450edef68
---
lucene/src/java/org/apache/lucene/index/IndexWriter.java | 5 -----
1 file changed, 5 deletions(-)
diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java
index eb3dce99f00..d4fd1a0f630 100644
--- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java
@@ -215,7 +215,6 @@ public class IndexWriter implements Closeable {
private long lastCommitChangeCount; // last changeCount that was committed
private SegmentInfos rollbackSegmentInfos; // segmentInfos we will fallback to if the commit fails
- private HashMap rollbackSegments;
volatile SegmentInfos pendingCommit; // set when a commit is pending (after prepareCommit() & before commit())
volatile long pendingCommitChangeCount;
@@ -860,10 +859,6 @@ public class IndexWriter implements Closeable {
private synchronized void setRollbackSegmentInfos(SegmentInfos infos) {
rollbackSegmentInfos = (SegmentInfos) infos.clone();
- rollbackSegments = new HashMap();
- final int size = rollbackSegmentInfos.size();
- for(int i=0;i
Date: Wed, 26 Jan 2011 20:55:49 +0000
Subject: [PATCH 019/185] LUCENE-2609: distribute the core tests, as they can
be useful downstream to people
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063868 13f79535-47bb-0310-9956-ffa450edef68
---
lucene/build.xml | 6 ++++--
lucene/common-build.xml | 4 ++++
2 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/lucene/build.xml b/lucene/build.xml
index 31b3b2e3011..7827695aa9d 100644
--- a/lucene/build.xml
+++ b/lucene/build.xml
@@ -48,7 +48,7 @@
excludes="contrib/db/*/lib/,contrib/*/ext-libs/,src/site/build/"
/>
-
+
@@ -401,6 +401,8 @@
classifier="sources"/>
+
diff --git a/lucene/common-build.xml b/lucene/common-build.xml
index be42d5bd648..fe93b40b62b 100644
--- a/lucene/common-build.xml
+++ b/lucene/common-build.xml
@@ -308,6 +308,10 @@
+
+
+
+
##################################################################
From 326ab7d5774b57a001db2f64f4df9d0ae6a84a5a Mon Sep 17 00:00:00 2001
From: Yonik Seeley
Date: Wed, 26 Jan 2011 20:57:05 +0000
Subject: [PATCH 020/185] SOLR-1711: fix hang when queue is full but there are
no runners
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063869 13f79535-47bb-0310-9956-ffa450edef68
---
solr/CHANGES.txt | 3 +-
.../solrj/impl/StreamingUpdateSolrServer.java | 30 ++++++++++++-------
2 files changed, 21 insertions(+), 12 deletions(-)
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 683a722329b..a4308254de6 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -516,7 +516,8 @@ Bug Fixes
* SOLR-1711: SolrJ - StreamingUpdateSolrServer had a race condition that
could halt the streaming of documents. The original patch to fix this
(never officially released) introduced another hanging bug due to
- connections not being released. (Attila Babo, Erik Hetzner via yonik)
+ connections not being released.
+ (Attila Babo, Erik Hetzner, Johannes Tuchscherer via yonik)
* SOLR-1748, SOLR-1747, SOLR-1746, SOLR-1745, SOLR-1744: Streams and Readers
retrieved from ContentStreams are not closed in various places, resulting
diff --git a/solr/src/solrj/org/apache/solr/client/solrj/impl/StreamingUpdateSolrServer.java b/solr/src/solrj/org/apache/solr/client/solrj/impl/StreamingUpdateSolrServer.java
index 4460dfb2ce0..c47f4a09957 100644
--- a/solr/src/solrj/org/apache/solr/client/solrj/impl/StreamingUpdateSolrServer.java
+++ b/solr/src/solrj/org/apache/solr/client/solrj/impl/StreamingUpdateSolrServer.java
@@ -173,12 +173,20 @@ public class StreamingUpdateSolrServer extends CommonsHttpSolrServer
}
catch (Throwable e) {
handleError( e );
- }
+ }
finally {
- // remove it from the list of running things...
+
+ // remove it from the list of running things unless we are the last runner and the queue is full...
+ // in which case, the next queue.put() would block and there would be no runners to handle it.
synchronized (runners) {
- runners.remove( this );
+ if (runners.size() == 1 && queue.remainingCapacity() == 0) {
+ // keep this runner alive
+ scheduler.execute(this);
+ } else {
+ runners.remove( this );
+ }
}
+
log.info( "finished: {}" , this );
runnerLock.unlock();
}
@@ -208,7 +216,7 @@ public class StreamingUpdateSolrServer extends CommonsHttpSolrServer
return super.request( request );
}
}
-
+
try {
CountDownLatch tmpLock = lock;
if( tmpLock != null ) {
@@ -216,18 +224,18 @@ public class StreamingUpdateSolrServer extends CommonsHttpSolrServer
}
queue.put( req );
-
- synchronized( runners ) {
- if( runners.isEmpty()
- || (queue.remainingCapacity() < queue.size()
- && runners.size() < threadCount) )
- {
+
+ synchronized( runners ) {
+ if( runners.isEmpty()
+ || (queue.remainingCapacity() < queue.size()
+ && runners.size() < threadCount) )
+ {
Runner r = new Runner();
scheduler.execute( r );
runners.add( r );
}
}
- }
+ }
catch (InterruptedException e) {
log.error( "interrupted", e );
throw new IOException( e.getLocalizedMessage() );
From 471c0ced8288326cf7b4d5cd0468d309b25bccf3 Mon Sep 17 00:00:00 2001
From: Yonik Seeley
Date: Wed, 26 Jan 2011 21:39:42 +0000
Subject: [PATCH 021/185] SOLR-2327: error handling - force
queryResultWindowSize to a min of 1
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063877 13f79535-47bb-0310-9956-ffa450edef68
---
solr/src/java/org/apache/solr/core/SolrConfig.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/solr/src/java/org/apache/solr/core/SolrConfig.java b/solr/src/java/org/apache/solr/core/SolrConfig.java
index f8421945762..e0cee0ccb08 100644
--- a/solr/src/java/org/apache/solr/core/SolrConfig.java
+++ b/solr/src/java/org/apache/solr/core/SolrConfig.java
@@ -141,7 +141,7 @@ public class SolrConfig extends Config {
filtOptThreshold = getFloat("query/boolTofilterOptimizer/@threshold",.05f);
useFilterForSortedQuery = getBool("query/useFilterForSortedQuery", false);
- queryResultWindowSize = getInt("query/queryResultWindowSize", 1);
+ queryResultWindowSize = Math.max(1, getInt("query/queryResultWindowSize", 1));
queryResultMaxDocsCached = getInt("query/queryResultMaxDocsCached", Integer.MAX_VALUE);
enableLazyFieldLoading = getBool("query/enableLazyFieldLoading", false);
From e54599568d77c1f66e09ea8a416c7ecedd56d074 Mon Sep 17 00:00:00 2001
From: Michael McCandless
Date: Wed, 26 Jan 2011 21:55:37 +0000
Subject: [PATCH 022/185] LUCENE-2010: don't assert no unref'd files in
TIR.testDiskFull; fix rollback on exc during commit to put back any pruned
segs
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063882 13f79535-47bb-0310-9956-ffa450edef68
---
.../java/org/apache/lucene/index/DirectoryReader.java | 10 +++++++++-
.../test/org/apache/lucene/index/TestIndexReader.java | 9 ---------
2 files changed, 9 insertions(+), 10 deletions(-)
diff --git a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
index aa372be4b66..0009a5f9322 100644
--- a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
+++ b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
@@ -715,12 +715,16 @@ class DirectoryReader extends IndexReader implements Cloneable {
// case we have to roll back:
startCommit();
+ final SegmentInfos rollbackSegmentInfos = new SegmentInfos();
+ rollbackSegmentInfos.addAll(segmentInfos);
+
boolean success = false;
try {
for (int i = 0; i < subReaders.length; i++)
subReaders[i].commit();
- // Remove segments that contain only 100% deleted docs:
+ // Remove segments that contain only 100% deleted
+ // docs:
segmentInfos.pruneDeletedSegments();
// Sync all files we just wrote
@@ -742,6 +746,10 @@ class DirectoryReader extends IndexReader implements Cloneable {
// partially written .del files, etc, are
// removed):
deleter.refresh();
+
+ // Restore all SegmentInfos (in case we pruned some)
+ segmentInfos.clear();
+ segmentInfos.addAll(rollbackSegmentInfos);
}
}
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReader.java b/lucene/src/test/org/apache/lucene/index/TestIndexReader.java
index 01b73877385..1256cbaff98 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexReader.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexReader.java
@@ -996,15 +996,6 @@ public class TestIndexReader extends LuceneTestCase
}
}
- // Whether we succeeded or failed, check that all
- // un-referenced files were in fact deleted (ie,
- // we did not create garbage). Just create a
- // new IndexFileDeleter, have it delete
- // unreferenced files, then verify that in fact
- // no files were deleted:
- IndexWriter.unlock(dir);
- TestIndexWriter.assertNoUnreferencedFiles(dir, "reader.close() failed to delete unreferenced files");
-
// Finally, verify index is not corrupt, and, if
// we succeeded, we see all docs changed, and if
// we failed, we see either all docs or no docs
From 63097d1bd8a7051f9b5b2f450eaab6e162e2b337 Mon Sep 17 00:00:00 2001
From: Michael McCandless
Date: Wed, 26 Jan 2011 22:17:57 +0000
Subject: [PATCH 023/185] LUCENE-2474: cutover to
MapBackedSet(ConcurrentHashMap) instead of Collections.syncSet(HashSet)
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063897 13f79535-47bb-0310-9956-ffa450edef68
---
.../apache/lucene/index/DirectoryReader.java | 4 +-
.../lucene/index/FilterIndexReader.java | 6 +-
.../org/apache/lucene/index/IndexReader.java | 6 +-
.../org/apache/lucene/index/IndexWriter.java | 5 +-
.../org/apache/lucene/index/MultiReader.java | 6 +-
.../apache/lucene/index/ParallelReader.java | 4 +-
.../org/apache/lucene/util/MapBackedSet.java | 73 +++++++++++++++++++
7 files changed, 89 insertions(+), 15 deletions(-)
create mode 100644 lucene/src/java/org/apache/lucene/util/MapBackedSet.java
diff --git a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
index 0009a5f9322..06c4d4009be 100644
--- a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
+++ b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
@@ -27,6 +27,7 @@ import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
@@ -36,6 +37,7 @@ import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.MapBackedSet;
/**
* An IndexReader which reads indexes with multiple segments.
@@ -104,7 +106,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
} else {
this.codecs = codecs;
}
- readerFinishedListeners = Collections.synchronizedSet(new HashSet());
+ readerFinishedListeners = new MapBackedSet(new ConcurrentHashMap());
// To reduce the chance of hitting FileNotFound
// (and having to retry), we open segments in
diff --git a/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java b/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java
index 6dc2f48227e..d922a48da7e 100644
--- a/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java
+++ b/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java
@@ -23,13 +23,13 @@ import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.MapBackedSet;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import java.util.Comparator;
-import java.util.HashSet;
-import java.util.Collections;
+import java.util.concurrent.ConcurrentHashMap;
/** A FilterIndexReader
contains another IndexReader, which it
* uses as its basic source of data, possibly transforming the data along the
@@ -287,7 +287,7 @@ public class FilterIndexReader extends IndexReader {
public FilterIndexReader(IndexReader in) {
super();
this.in = in;
- readerFinishedListeners = Collections.synchronizedSet(new HashSet());
+ readerFinishedListeners = new MapBackedSet(new ConcurrentHashMap());
}
@Override
diff --git a/lucene/src/java/org/apache/lucene/index/IndexReader.java b/lucene/src/java/org/apache/lucene/index/IndexReader.java
index 7688884bb19..684c14e628b 100644
--- a/lucene/src/java/org/apache/lucene/index/IndexReader.java
+++ b/lucene/src/java/org/apache/lucene/index/IndexReader.java
@@ -34,7 +34,6 @@ import java.io.IOException;
import java.io.Closeable;
import java.util.Collection;
import java.util.List;
-import java.util.HashSet;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
@@ -128,10 +127,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
// Defensive (should never be null -- all impls must set
// this):
if (readerFinishedListeners != null) {
-
- // Clone the set so that we don't have to sync on
- // readerFinishedListeners while invoking them:
- for(ReaderFinishedListener listener : new HashSet(readerFinishedListeners)) {
+ for(ReaderFinishedListener listener : readerFinishedListeners) {
listener.finished(this);
}
}
diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java
index d4fd1a0f630..321daa0aa2b 100644
--- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java
@@ -30,8 +30,8 @@ import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
-import java.util.Collections;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.ConcurrentHashMap;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
@@ -48,6 +48,7 @@ import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.ThreadInterruptedException;
+import org.apache.lucene.util.MapBackedSet;
/**
An IndexWriter
creates and maintains an index.
@@ -366,7 +367,7 @@ public class IndexWriter implements Closeable {
}
// Used for all SegmentReaders we open
- private final Collection readerFinishedListeners = Collections.synchronizedSet(new HashSet());
+ private final Collection readerFinishedListeners = new MapBackedSet(new ConcurrentHashMap());
Collection getReaderFinishedListeners() throws IOException {
return readerFinishedListeners;
diff --git a/lucene/src/java/org/apache/lucene/index/MultiReader.java b/lucene/src/java/org/apache/lucene/index/MultiReader.java
index 1e95cb272d9..0d3a082567b 100644
--- a/lucene/src/java/org/apache/lucene/index/MultiReader.java
+++ b/lucene/src/java/org/apache/lucene/index/MultiReader.java
@@ -20,14 +20,14 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
-import java.util.HashSet;
-import java.util.Collections;
+import java.util.concurrent.ConcurrentHashMap;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.ReaderUtil;
+import org.apache.lucene.util.MapBackedSet;
/** An IndexReader which reads multiple indexes, appending
* their content. */
@@ -83,7 +83,7 @@ public class MultiReader extends IndexReader implements Cloneable {
}
}
starts[subReaders.length] = maxDoc;
- readerFinishedListeners = Collections.synchronizedSet(new HashSet());
+ readerFinishedListeners = new MapBackedSet(new ConcurrentHashMap());
return ReaderUtil.buildReaderContext(this);
}
diff --git a/lucene/src/java/org/apache/lucene/index/ParallelReader.java b/lucene/src/java/org/apache/lucene/index/ParallelReader.java
index 8b789e02058..004066c4daa 100644
--- a/lucene/src/java/org/apache/lucene/index/ParallelReader.java
+++ b/lucene/src/java/org/apache/lucene/index/ParallelReader.java
@@ -23,9 +23,11 @@ import org.apache.lucene.document.FieldSelectorResult;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.MapBackedSet;
import java.io.IOException;
import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
/** An IndexReader which reads multiple, parallel indexes. Each index added
@@ -72,7 +74,7 @@ public class ParallelReader extends IndexReader {
public ParallelReader(boolean closeSubReaders) throws IOException {
super();
this.incRefReaders = !closeSubReaders;
- readerFinishedListeners = Collections.synchronizedSet(new HashSet());
+ readerFinishedListeners = new MapBackedSet(new ConcurrentHashMap());
}
/** {@inheritDoc} */
diff --git a/lucene/src/java/org/apache/lucene/util/MapBackedSet.java b/lucene/src/java/org/apache/lucene/util/MapBackedSet.java
new file mode 100644
index 00000000000..7b0c42c8ae3
--- /dev/null
+++ b/lucene/src/java/org/apache/lucene/util/MapBackedSet.java
@@ -0,0 +1,73 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Serializable;
+import java.util.AbstractSet;
+import java.util.Iterator;
+import java.util.Map;
+
+/**
+ * A Set implementation that wraps an actual Map based
+ * implementation.
+ *
+ * @lucene.internal
+ */
+public class MapBackedSet extends AbstractSet implements Serializable {
+
+ private static final long serialVersionUID = -6761513279741915432L;
+
+ private final Map map;
+
+ /**
+ * Creates a new instance which wraps the specified {@code map}.
+ */
+ public MapBackedSet(Map map) {
+ this.map = map;
+ }
+
+ @Override
+ public int size() {
+ return map.size();
+ }
+
+ @Override
+ public boolean contains(Object o) {
+ return map.containsKey(o);
+ }
+
+ @Override
+ public boolean add(E o) {
+ return map.put(o, Boolean.TRUE) == null;
+ }
+
+ @Override
+ public boolean remove(Object o) {
+ return map.remove(o) != null;
+ }
+
+ @Override
+ public void clear() {
+ map.clear();
+ }
+
+ @Override
+ public Iterator iterator() {
+ return map.keySet().iterator();
+ }
+}
From add8aecd99b03e88ceabd4bb5579cce652c4bc45 Mon Sep 17 00:00:00 2001
From: Michael McCandless
Date: Wed, 26 Jan 2011 22:42:08 +0000
Subject: [PATCH 024/185] LUCENE-2474: make MBS final
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063908 13f79535-47bb-0310-9956-ffa450edef68
---
lucene/src/java/org/apache/lucene/util/MapBackedSet.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lucene/src/java/org/apache/lucene/util/MapBackedSet.java b/lucene/src/java/org/apache/lucene/util/MapBackedSet.java
index 7b0c42c8ae3..9db05ec86ba 100644
--- a/lucene/src/java/org/apache/lucene/util/MapBackedSet.java
+++ b/lucene/src/java/org/apache/lucene/util/MapBackedSet.java
@@ -28,7 +28,7 @@ import java.util.Map;
*
* @lucene.internal
*/
-public class MapBackedSet extends AbstractSet implements Serializable {
+public final class MapBackedSet extends AbstractSet implements Serializable {
private static final long serialVersionUID = -6761513279741915432L;
From 51dc4159e6c4ed708cbcf8d18a543b57beb0037f Mon Sep 17 00:00:00 2001
From: Yonik Seeley
Date: Wed, 26 Jan 2011 23:40:08 +0000
Subject: [PATCH 025/185] SOLR-1283: fix numRead counter that caused mark
invalid exceptions
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063920 13f79535-47bb-0310-9956-ffa450edef68
---
.../apache/lucene/analysis/charfilter/HTMLStripCharFilter.java | 1 +
.../lucene/analysis/charfilter/HTMLStripCharFilterTest.java | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java
index 4ab01ab0d32..87591992e1f 100644
--- a/modules/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java
+++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java
@@ -101,6 +101,7 @@ public class HTMLStripCharFilter extends BaseCharFilter {
if (len>0) {
return pushed.charAt(len-1);
}
+ numRead++;
int ch = input.read();
push(ch);
return ch;
diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java
index 604f9668d53..f1af45ab350 100644
--- a/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java
+++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java
@@ -169,7 +169,7 @@ public class HTMLStripCharFilterTest extends LuceneTestCase {
public void testBufferOverflow() throws Exception {
StringBuilder testBuilder = new StringBuilder(HTMLStripCharFilter.DEFAULT_READ_AHEAD + 50);
- testBuilder.append("ah> ");
+ testBuilder.append("ah> ??????");
appendChars(testBuilder, HTMLStripCharFilter.DEFAULT_READ_AHEAD + 500);
processBuffer(testBuilder.toString(), "Failed on pseudo proc. instr.");//processing instructions
From 5a9c5aae0b54235ecac85818d2e6dfa06c088df4 Mon Sep 17 00:00:00 2001
From: Michael McCandless
Date: Thu, 27 Jan 2011 00:42:58 +0000
Subject: [PATCH 026/185] LUCENE-2680: deletes were being double-applied
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063936 13f79535-47bb-0310-9956-ffa450edef68
---
lucene/src/java/org/apache/lucene/index/IndexWriter.java | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java
index 321daa0aa2b..613d47058f5 100644
--- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java
@@ -2895,10 +2895,6 @@ public class IndexWriter implements Closeable {
final synchronized void mergeInit(MergePolicy.OneMerge merge) throws IOException {
boolean success = false;
try {
- // Lock order: IW -> BD
- if (bufferedDeletes.applyDeletes(readerPool, segmentInfos, merge.segments)) {
- checkpoint();
- }
_mergeInit(merge);
success = true;
} finally {
@@ -2929,6 +2925,11 @@ public class IndexWriter implements Closeable {
if (merge.isAborted())
return;
+ // Lock order: IW -> BD
+ if (bufferedDeletes.applyDeletes(readerPool, segmentInfos, merge.segments)) {
+ checkpoint();
+ }
+
// Bind a new segment name here so even with
// ConcurrentMergePolicy we keep deterministic segment
// names.
From ecea5e669a150a3d0171f75f53e0e8ad1a8dbb84 Mon Sep 17 00:00:00 2001
From: Doron Cohen
Date: Thu, 27 Jan 2011 09:26:04 +0000
Subject: [PATCH 027/185] LUCENE-914: Scorer.skipTo(current) remains on current
for some scorers - javadoc fix.
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064051 13f79535-47bb-0310-9956-ffa450edef68
---
.../src/java/org/apache/lucene/search/DocIdSetIterator.java | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/lucene/src/java/org/apache/lucene/search/DocIdSetIterator.java b/lucene/src/java/org/apache/lucene/search/DocIdSetIterator.java
index f10d04c0d48..39a73345f9b 100644
--- a/lucene/src/java/org/apache/lucene/search/DocIdSetIterator.java
+++ b/lucene/src/java/org/apache/lucene/search/DocIdSetIterator.java
@@ -78,10 +78,10 @@ public abstract class DocIdSetIterator {
*
* Some implementations are considerably more efficient than that.
*
- * NOTE: certain implementations may return a different value (each
- * time) if called several times in a row with the same target.
+ * NOTE: when target ≤ current
implementations may opt
+ * not to advance beyond their current {@link #docID()}.
*
- * NOTE: this method may be called with {@value #NO_MORE_DOCS} for
+ * NOTE: this method may be called with {@link #NO_MORE_DOCS} for
* efficiency by some Scorers. If your implementation cannot efficiently
* determine that it should exhaust, it is recommended that you check for that
* value in each call to this method.
From b24a26b251f0498d56312f7e0d62edc5678ae929 Mon Sep 17 00:00:00 2001
From: Shai Erera
Date: Thu, 27 Jan 2011 10:10:36 +0000
Subject: [PATCH 028/185] LUCENE-2609: Generate jar containing test classes
(trunk)
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064068 13f79535-47bb-0310-9956-ffa450edef68
---
dev-tools/testjar/testfiles | 24 ++++++++++++++++++++++++
lucene/build.xml | 12 ++++++++++++
lucene/common-build.xml | 4 ----
3 files changed, 36 insertions(+), 4 deletions(-)
create mode 100755 dev-tools/testjar/testfiles
diff --git a/dev-tools/testjar/testfiles b/dev-tools/testjar/testfiles
new file mode 100755
index 00000000000..84d8bfb2eab
--- /dev/null
+++ b/dev-tools/testjar/testfiles
@@ -0,0 +1,24 @@
+core.test.files=\
+ org/apache/lucene/util/_TestUtil.java,\
+ org/apache/lucene/util/LineFileDocs.java,\
+ org/apache/lucene/util/LuceneJUnitDividingSelector.java,\
+ org/apache/lucene/util/LuceneJUnitResultFormatter.java,\
+ org/apache/lucene/util/LuceneTestCase.java,\
+ org/apache/lucene/util/automaton/AutomatonTestUtil.java,\
+ org/apache/lucene/search/QueryUtils.java,\
+ org/apache/lucene/analysis/BaseTokenStreamTestCase.java,\
+ org/apache/lucene/analysis/MockAnalyzer.java,\
+ org/apache/lucene/analysis/MockPayloadAnalyzer.java,\
+ org/apache/lucene/analysis/MockTokenFilter.java,\
+ org/apache/lucene/analysis/MockTokenizer.java,\
+ org/apache/lucene/index/MockIndexInput.java,\
+ org/apache/lucene/index/RandomIndexWriter.java,\
+ org/apache/lucene/index/DocHelper.java,\
+ org/apache/lucene/codecs/preflexrw/PreFlexFieldsWriter.java,\
+ org/apache/lucene/codecs/preflexrw/PreFlexRWCodec.java,\
+ org/apache/lucene/codecs/preflexrw/TermInfosWriter.java,\
+ org/apache/lucene/codecs/mockrandom/MockRandomCodec.java,\
+ org/apache/lucene/store/_TestHelper.java,\
+ org/apache/lucene/store/MockDirectoryWrapper.java,\
+ org/apache/lucene/store/MockIndexInputWrapper.java,\
+ org/apache/lucene/store/MockIndexOutputWrapper.java,\
diff --git a/lucene/build.xml b/lucene/build.xml
index 7827695aa9d..4205d3c756d 100644
--- a/lucene/build.xml
+++ b/lucene/build.xml
@@ -618,4 +618,16 @@
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lucene/common-build.xml b/lucene/common-build.xml
index fe93b40b62b..be42d5bd648 100644
--- a/lucene/common-build.xml
+++ b/lucene/common-build.xml
@@ -308,10 +308,6 @@
-
-
-
-
##################################################################
From ad24f6a01fef7178dca09c7862acd134ea147a05 Mon Sep 17 00:00:00 2001
From: Shai Erera
Date: Thu, 27 Jan 2011 10:28:23 +0000
Subject: [PATCH 029/185] LUCENE-1469: make isValid protected and not static
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064072 13f79535-47bb-0310-9956-ffa450edef68
---
.../src/java/org/apache/lucene/wordnet/SynonymMap.java | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java b/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java
index 455c8118c5a..ee7eabd9cae 100644
--- a/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java
+++ b/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java
@@ -161,7 +161,7 @@ public class SynonymMap {
return word.toLowerCase();
}
- private static boolean isValid(String str) {
+ protected boolean isValid(String str) {
for (int i=str.length(); --i >= 0; ) {
if (!Character.isLetter(str.charAt(i))) return false;
}
@@ -395,4 +395,4 @@ public class SynonymMap {
}
}
-}
\ No newline at end of file
+}
From 4aa8a1f179d347b040d4b3fb541672254da88cf5 Mon Sep 17 00:00:00 2001
From: Shai Erera
Date: Thu, 27 Jan 2011 11:10:48 +0000
Subject: [PATCH 030/185] remove FilterManager
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064078 13f79535-47bb-0310-9956-ffa450edef68
---
.../apache/lucene/search/FilterManager.java | 203 ------------------
1 file changed, 203 deletions(-)
delete mode 100644 lucene/src/java/org/apache/lucene/search/FilterManager.java
diff --git a/lucene/src/java/org/apache/lucene/search/FilterManager.java b/lucene/src/java/org/apache/lucene/search/FilterManager.java
deleted file mode 100644
index 608f243890b..00000000000
--- a/lucene/src/java/org/apache/lucene/search/FilterManager.java
+++ /dev/null
@@ -1,203 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.Comparator;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.TreeSet;
-
-import org.apache.lucene.util.ThreadInterruptedException;
-
-/**
- * Filter caching singleton. It can be used
- * to save filters locally for reuse.
- * This class makes it possible to cache Filters even when using RMI, as it
- * keeps the cache on the searcher side of the RMI connection.
- *
- * Also could be used as a persistent storage for any filter as long as the
- * filter provides a proper hashCode(), as that is used as the key in the cache.
- *
- * The cache is periodically cleaned up from a separate thread to ensure the
- * cache doesn't exceed the maximum size.
- */
-public class FilterManager {
-
- protected static FilterManager manager;
-
- /** The default maximum number of Filters in the cache */
- protected static final int DEFAULT_CACHE_CLEAN_SIZE = 100;
- /** The default frequency of cache cleanup */
- protected static final long DEFAULT_CACHE_SLEEP_TIME = 1000 * 60 * 10;
-
- /** The cache itself */
- protected Map cache;
- /** Maximum allowed cache size */
- protected int cacheCleanSize;
- /** Cache cleaning frequency */
- protected long cleanSleepTime;
- /** Cache cleaner that runs in a separate thread */
- protected FilterCleaner filterCleaner;
-
- public synchronized static FilterManager getInstance() {
- if (manager == null) {
- manager = new FilterManager();
- }
- return manager;
- }
-
- /**
- * Sets up the FilterManager singleton.
- */
- protected FilterManager() {
- cache = new HashMap();
- cacheCleanSize = DEFAULT_CACHE_CLEAN_SIZE; // Let the cache get to 100 items
- cleanSleepTime = DEFAULT_CACHE_SLEEP_TIME; // 10 minutes between cleanings
-
- filterCleaner = new FilterCleaner();
- Thread fcThread = new Thread(filterCleaner);
- // set to be a Daemon so it doesn't have to be stopped
- fcThread.setDaemon(true);
- fcThread.start();
- }
-
- /**
- * Sets the max size that cache should reach before it is cleaned up
- * @param cacheCleanSize maximum allowed cache size
- */
- public void setCacheSize(int cacheCleanSize) {
- this.cacheCleanSize = cacheCleanSize;
- }
-
- /**
- * Sets the cache cleaning frequency in milliseconds.
- * @param cleanSleepTime cleaning frequency in milliseconds
- */
- public void setCleanThreadSleepTime(long cleanSleepTime) {
- this.cleanSleepTime = cleanSleepTime;
- }
-
- /**
- * Returns the cached version of the filter. Allows the caller to pass up
- * a small filter but this will keep a persistent version around and allow
- * the caching filter to do its job.
- *
- * @param filter The input filter
- * @return The cached version of the filter
- */
- public Filter getFilter(Filter filter) {
- synchronized(cache) {
- FilterItem fi = null;
- fi = cache.get(Integer.valueOf(filter.hashCode()));
- if (fi != null) {
- fi.timestamp = new Date().getTime();
- return fi.filter;
- }
- cache.put(Integer.valueOf(filter.hashCode()), new FilterItem(filter));
- return filter;
- }
- }
-
- /**
- * Holds the filter and the last time the filter was used, to make LRU-based
- * cache cleaning possible.
- * TODO: Clean this up when we switch to Java 1.5
- */
- protected class FilterItem {
- public Filter filter;
- public long timestamp;
-
- public FilterItem (Filter filter) {
- this.filter = filter;
- this.timestamp = new Date().getTime();
- }
- }
-
-
- /**
- * Keeps the cache from getting too big.
- * If we were using Java 1.5, we could use LinkedHashMap and we would not need this thread
- * to clean out the cache.
- *
- * The SortedSet sortedFilterItems is used only to sort the items from the cache,
- * so when it's time to clean up we have the TreeSet sort the FilterItems by
- * timestamp.
- *
- * Removes 1.5 * the numbers of items to make the cache smaller.
- * For example:
- * If cache clean size is 10, and the cache is at 15, we would remove (15 - 10) * 1.5 = 7.5 round up to 8.
- * This way we clean the cache a bit more, and avoid having the cache cleaner having to do it frequently.
- */
- protected class FilterCleaner implements Runnable {
-
- private boolean running = true;
- private TreeSet> sortedFilterItems;
-
- public FilterCleaner() {
- sortedFilterItems = new TreeSet>(new Comparator>() {
- public int compare(Map.Entry a, Map.Entry b) {
- FilterItem fia = a.getValue();
- FilterItem fib = b.getValue();
- if ( fia.timestamp == fib.timestamp ) {
- return 0;
- }
- // smaller timestamp first
- if ( fia.timestamp < fib.timestamp ) {
- return -1;
- }
- // larger timestamp last
- return 1;
-
- }
- });
- }
-
- public void run () {
- while (running) {
-
- // sort items from oldest to newest
- // we delete the oldest filters
- if (cache.size() > cacheCleanSize) {
- // empty the temporary set
- sortedFilterItems.clear();
- synchronized (cache) {
- sortedFilterItems.addAll(cache.entrySet());
- Iterator> it = sortedFilterItems.iterator();
- int numToDelete = (int) ((cache.size() - cacheCleanSize) * 1.5);
- int counter = 0;
- // loop over the set and delete all of the cache entries not used in a while
- while (it.hasNext() && counter++ < numToDelete) {
- Map.Entry entry = it.next();
- cache.remove(entry.getKey());
- }
- }
- // empty the set so we don't tie up the memory
- sortedFilterItems.clear();
- }
- // take a nap
- try {
- Thread.sleep(cleanSleepTime);
- } catch (InterruptedException ie) {
- throw new ThreadInterruptedException(ie);
- }
- }
- }
- }
-}
From 4c62240087b896ec5dde2383300c4a3d396d7693 Mon Sep 17 00:00:00 2001
From: Shai Erera
Date: Thu, 27 Jan 2011 20:07:43 +0000
Subject: [PATCH 031/185] LUCENE-2891: merge to trunk
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064285 13f79535-47bb-0310-9956-ffa450edef68
---
lucene/CHANGES.txt | 3 ++
.../org/apache/lucene/index/IndexReader.java | 5 ++-
.../lucene/index/IndexWriterConfig.java | 9 +++--
.../lucene/index/TestIndexWriterConfig.java | 17 +++++++++
.../lucene/index/TestIndexWriterReader.java | 35 +++++++++++++++++++
5 files changed, 65 insertions(+), 4 deletions(-)
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index d131a02b8c6..e859ecd042c 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -701,6 +701,9 @@ Bug fixes
* LUCENE-1846: DateTools now uses the US locale everywhere, so DateTools.round()
is safe also in strange locales. (Uwe Schindler)
+* LUCENE-2891: IndexWriterConfig did not accept -1 in setReaderTermIndexDivisor,
+ which can be used to prevent loading the terms index into memory. (Shai Erera)
+
New features
* LUCENE-2128: Parallelized fetching document frequencies during weight
diff --git a/lucene/src/java/org/apache/lucene/index/IndexReader.java b/lucene/src/java/org/apache/lucene/index/IndexReader.java
index 684c14e628b..c73c514edf4 100644
--- a/lucene/src/java/org/apache/lucene/index/IndexReader.java
+++ b/lucene/src/java/org/apache/lucene/index/IndexReader.java
@@ -415,7 +415,10 @@ public abstract class IndexReader implements Cloneable,Closeable {
* memory. By setting this to a value > 1 you can reduce
* memory usage, at the expense of higher latency when
* loading a TermInfo. The default value is 1. Set this
- * to -1 to skip loading the terms index entirely.
+ * to -1 to skip loading the terms index entirely. This is only useful in
+ * advanced situations when you will only .next() through all terms;
+ * attempts to seek will hit an exception.
+ *
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java b/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java
index 18daa12e06c..812306cf4e8 100644
--- a/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java
+++ b/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java
@@ -552,10 +552,13 @@ public final class IndexWriterConfig implements Cloneable {
/** Sets the termsIndexDivisor passed to any readers that
* IndexWriter opens, for example when applying deletes
* or creating a near-real-time reader in {@link
- * IndexWriter#getReader}. */
+ * IndexWriter#getReader}. If you pass -1, the terms index
+ * won't be loaded by the readers. This is only useful in
+ * advanced situations when you will only .next() through
+ * all terms; attempts to seek will hit an exception. */
public IndexWriterConfig setReaderTermsIndexDivisor(int divisor) {
- if (divisor <= 0) {
- throw new IllegalArgumentException("divisor must be >= 1 (got " + divisor + ")");
+ if (divisor <= 0 && divisor != -1) {
+ throw new IllegalArgumentException("divisor must be >= 1, or -1 (got " + divisor + ")");
}
readerTermsIndexDivisor = divisor;
return this;
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterConfig.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
index 1e12d8531fd..c8c203bef01 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
@@ -222,6 +222,23 @@ public class TestIndexWriterConfig extends LuceneTestCase {
// this is expected
}
+ // Test setReaderTermsIndexDivisor
+ try {
+ conf.setReaderTermsIndexDivisor(0);
+ fail("should not have succeeded to set termsIndexDivisor to 0");
+ } catch (IllegalArgumentException e) {
+ // this is expected
+ }
+
+ // Setting to -1 is ok
+ conf.setReaderTermsIndexDivisor(-1);
+ try {
+ conf.setReaderTermsIndexDivisor(-2);
+ fail("should not have succeeded to set termsIndexDivisor to < -1");
+ } catch (IllegalArgumentException e) {
+ // this is expected
+ }
+
assertEquals(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES, conf.getMaxThreadStates());
conf.setMaxThreadStates(5);
assertEquals(5, conf.getMaxThreadStates());
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
index d0883d32825..09c7e1972b6 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
@@ -20,6 +20,7 @@ import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.HashSet;
import java.util.List;
import java.util.Random;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -30,6 +31,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
@@ -990,4 +992,37 @@ public class TestIndexWriterReader extends LuceneTestCase {
dir.close();
assertTrue(didWarm.get());
}
+
+ public void testNoTermsIndex() throws Exception {
+ // Some Codecs don't honor the ReaderTermsIndexDiviso, so skip the test if
+ // they're picked.
+ HashSet illegalCodecs = new HashSet();
+ illegalCodecs.add("PreFlex");
+ illegalCodecs.add("MockRandom");
+ illegalCodecs.add("SimpleText");
+
+ IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
+ new MockAnalyzer()).setReaderTermsIndexDivisor(-1);
+
+ // Don't proceed if picked Codec is in the list of illegal ones.
+ if (illegalCodecs.contains(conf.getCodecProvider().getFieldCodec("f"))) return;
+
+ Directory dir = newDirectory();
+ IndexWriter w = new IndexWriter(dir, conf);
+ Document doc = new Document();
+ doc.add(new Field("f", "val", Store.NO, Index.ANALYZED));
+ w.addDocument(doc);
+ IndexReader r = IndexReader.open(w).getSequentialSubReaders()[0];
+ try {
+ r.termDocsEnum(null, "f", new BytesRef("val"));
+ fail("should have failed to seek since terms index was not loaded. Codec used " + conf.getCodecProvider().getFieldCodec("f"));
+ } catch (IllegalStateException e) {
+ // expected - we didn't load the term index
+ } finally {
+ r.close();
+ w.close();
+ dir.close();
+ }
+ }
+
}
From 946dc5c68adb747ee067daebdfd28a51e07a1e82 Mon Sep 17 00:00:00 2001
From: Yonik Seeley
Date: Thu, 27 Jan 2011 22:00:14 +0000
Subject: [PATCH 032/185] SOLR-2263: Add ability for RawResponseWriter to
stream binary files
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064330 13f79535-47bb-0310-9956-ffa450edef68
---
solr/CHANGES.txt | 3 +++
.../solr/response/RawResponseWriter.java | 23 ++++++++++++++++++-
2 files changed, 25 insertions(+), 1 deletion(-)
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index a4308254de6..d25f1ab1342 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -419,6 +419,9 @@ New Features
* SOLR-2129: Added a Solr module for dynamic metadata extraction/indexing with Apache UIMA.
See contrib/uima/README.txt for more information. (Tommaso Teofili via rmuir)
+* SOLR-1283: Add ability for RawResponseWriter to stream binary files as well as
+ text files. (Eric Pugh via yonik)
+
Optimizations
----------------------
diff --git a/solr/src/java/org/apache/solr/response/RawResponseWriter.java b/solr/src/java/org/apache/solr/response/RawResponseWriter.java
index 45e40b9bfd6..e34691c192d 100644
--- a/solr/src/java/org/apache/solr/response/RawResponseWriter.java
+++ b/solr/src/java/org/apache/solr/response/RawResponseWriter.java
@@ -18,6 +18,7 @@
package org.apache.solr.response;
import java.io.IOException;
+import java.io.OutputStream;
import java.io.Reader;
import java.io.Writer;
@@ -44,7 +45,7 @@ import org.apache.solr.request.SolrQueryRequest;
* @version $Id$
* @since solr 1.3
*/
-public class RawResponseWriter implements QueryResponseWriter
+public class RawResponseWriter implements BinaryQueryResponseWriter
{
/**
* The key that should be used to add a ContentStream to the
@@ -93,4 +94,24 @@ public class RawResponseWriter implements QueryResponseWriter
getBaseWriter( request ).write( writer, request, response );
}
}
+
+public void write(OutputStream out, SolrQueryRequest request,
+ SolrQueryResponse response) throws IOException {
+ Object obj = response.getValues().get( CONTENT );
+ if( obj != null && (obj instanceof ContentStream ) ) {
+ // copy the contents to the writer...
+ ContentStream content = (ContentStream)obj;
+ java.io.InputStream in = content.getStream();
+ try {
+ IOUtils.copy( in, out );
+ } finally {
+ in.close();
+ }
+ }
+ else {
+ //getBaseWriter( request ).write( writer, request, response );
+ throw new IOException("did not find a CONTENT object");
+ }
+
+}
}
From 261a161c293a6c48d695b304dff325655db68efe Mon Sep 17 00:00:00 2001
From: Koji Sekiguchi
Date: Fri, 28 Jan 2011 00:19:47 +0000
Subject: [PATCH 033/185] SOLR-2263: correct the ticket number. SOLR-1283 ->
SOLR-2263
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064379 13f79535-47bb-0310-9956-ffa450edef68
---
solr/CHANGES.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index d25f1ab1342..7285ed6b69e 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -419,7 +419,7 @@ New Features
* SOLR-2129: Added a Solr module for dynamic metadata extraction/indexing with Apache UIMA.
See contrib/uima/README.txt for more information. (Tommaso Teofili via rmuir)
-* SOLR-1283: Add ability for RawResponseWriter to stream binary files as well as
+* SOLR-2263: Add ability for RawResponseWriter to stream binary files as well as
text files. (Eric Pugh via yonik)
Optimizations
From e70311f3860158dc6d12c7a5e03714cc830d1e6a Mon Sep 17 00:00:00 2001
From: "Chris M. Hostetter"
Date: Fri, 28 Jan 2011 00:34:40 +0000
Subject: [PATCH 034/185] SOLR-2085: Improve SolrJ behavior when FacetComponent
comes before QueryComponent
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064386 13f79535-47bb-0310-9956-ffa450edef68
---
solr/CHANGES.txt | 4 ++++
.../org/apache/solr/client/solrj/response/QueryResponse.java | 4 +++-
2 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 7285ed6b69e..f30713d253a 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -166,6 +166,10 @@ Bug Fixes
* SOLR-2320: Fixed ReplicationHandler detail reporting for masters
(hossman)
+* SOLR-2085: Improve SolrJ behavior when FacetComponent comes before
+ QueryComponent (Tomas Salfischberger via hossman)
+
+
Other Changes
----------------------
diff --git a/solr/src/solrj/org/apache/solr/client/solrj/response/QueryResponse.java b/solr/src/solrj/org/apache/solr/client/solrj/response/QueryResponse.java
index c80334070af..f1259d1ccbc 100644
--- a/solr/src/solrj/org/apache/solr/client/solrj/response/QueryResponse.java
+++ b/solr/src/solrj/org/apache/solr/client/solrj/response/QueryResponse.java
@@ -105,7 +105,8 @@ public class QueryResponse extends SolrResponseBase
}
else if( "facet_counts".equals( n ) ) {
_facetInfo = (NamedList) res.getVal( i );
- extractFacetInfo( _facetInfo );
+ // extractFacetInfo inspects _results, so defer calling it
+ // in case it hasn't been populated yet.
}
else if( "debug".equals( n ) ) {
_debugInfo = (NamedList) res.getVal( i );
@@ -128,6 +129,7 @@ public class QueryResponse extends SolrResponseBase
extractTermsInfo( _termsInfo );
}
}
+ if(_facetInfo != null) extractFacetInfo( _facetInfo );
}
private void extractSpellCheckInfo(NamedList> spellInfo) {
From f13449ce484ea3af99aa9de09633d8e9cf1e852f Mon Sep 17 00:00:00 2001
From: "Chris M. Hostetter"
Date: Fri, 28 Jan 2011 01:02:28 +0000
Subject: [PATCH 035/185] SOLR-1940: Fix SolrDispatchFilter behavior when
Content-Type is unknown
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064395 13f79535-47bb-0310-9956-ffa450edef68
---
solr/CHANGES.txt | 3 +++
.../src/org/apache/solr/servlet/SolrDispatchFilter.java | 4 +++-
solr/src/webapp/web/admin/index.jsp | 4 ++--
3 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index f30713d253a..d1275cf0f39 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -169,6 +169,9 @@ Bug Fixes
* SOLR-2085: Improve SolrJ behavior when FacetComponent comes before
QueryComponent (Tomas Salfischberger via hossman)
+* SOLR-1940: Fix SolrDispatchFilter behavior when Content-Type is
+ unknown (Lance Norskog and hossman)
+
Other Changes
----------------------
diff --git a/solr/src/webapp/src/org/apache/solr/servlet/SolrDispatchFilter.java b/solr/src/webapp/src/org/apache/solr/servlet/SolrDispatchFilter.java
index 93bdddc1720..0dcc8373d95 100644
--- a/solr/src/webapp/src/org/apache/solr/servlet/SolrDispatchFilter.java
+++ b/solr/src/webapp/src/org/apache/solr/servlet/SolrDispatchFilter.java
@@ -315,7 +315,9 @@ public class SolrDispatchFilter implements Filter
sendError((HttpServletResponse) response, solrRsp.getException());
} else {
// Now write it out
- response.setContentType(responseWriter.getContentType(solrReq, solrRsp));
+ final String ct = responseWriter.getContentType(solrReq, solrRsp);
+ // don't call setContentType on null
+ if (null != ct) response.setContentType(ct);
if (Method.HEAD != reqMethod) {
if (responseWriter instanceof BinaryQueryResponseWriter) {
BinaryQueryResponseWriter binWriter = (BinaryQueryResponseWriter) responseWriter;
diff --git a/solr/src/webapp/web/admin/index.jsp b/solr/src/webapp/web/admin/index.jsp
index b38c6884b82..a34a2b0c876 100644
--- a/solr/src/webapp/web/admin/index.jsp
+++ b/solr/src/webapp/web/admin/index.jsp
@@ -39,10 +39,10 @@
<% if (null != core.getSchemaResource()) { %>
- [Schema ]
+ [Schema ]
<% }
if (null != core.getConfigResource()) { %>
- [Config ]
+ [Config ]
<% } %>
[Analysis ]
[Schema Browser ] <%if(replicationhandler){%>[Replication ]<%}%>
From 4f1fe2b66db4a284152ba5e4e7740cff266a8f25 Mon Sep 17 00:00:00 2001
From: Shai Erera
Date: Fri, 28 Jan 2011 05:25:35 +0000
Subject: [PATCH 036/185] LUCENE-2895: MockRandomCodec loads termsIndex even if
termsIndexDivisor is set to -1
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064463 13f79535-47bb-0310-9956-ffa450edef68
---
.../test/org/apache/lucene/index/TestIndexWriterReader.java | 5 +----
.../lucene/index/codecs/mockrandom/MockRandomCodec.java | 6 +++++-
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
index 09c7e1972b6..6758e89b5d2 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
@@ -31,7 +31,6 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
-import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
@@ -994,16 +993,14 @@ public class TestIndexWriterReader extends LuceneTestCase {
}
public void testNoTermsIndex() throws Exception {
- // Some Codecs don't honor the ReaderTermsIndexDiviso, so skip the test if
+ // Some Codecs don't honor the ReaderTermsIndexDivisor, so skip the test if
// they're picked.
HashSet illegalCodecs = new HashSet();
illegalCodecs.add("PreFlex");
- illegalCodecs.add("MockRandom");
illegalCodecs.add("SimpleText");
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setReaderTermsIndexDivisor(-1);
-
// Don't proceed if picked Codec is in the list of illegal ones.
if (illegalCodecs.contains(conf.getCodecProvider().getFieldCodec("f"))) return;
diff --git a/lucene/src/test/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java b/lucene/src/test/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java
index d5554512be7..d00854ec9f1 100644
--- a/lucene/src/test/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java
+++ b/lucene/src/test/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java
@@ -236,7 +236,11 @@ public class MockRandomCodec extends Codec {
try {
if (random.nextBoolean()) {
- state.termsIndexDivisor = _TestUtil.nextInt(random, 1, 10);
+ // if termsIndexDivisor is set to -1, we should not touch it. It means a
+ // test explicitly instructed not to load the terms index.
+ if (state.termsIndexDivisor != -1) {
+ state.termsIndexDivisor = _TestUtil.nextInt(random, 1, 10);
+ }
if (LuceneTestCase.VERBOSE) {
System.out.println("MockRandomCodec: fixed-gap terms index (divisor=" + state.termsIndexDivisor + ")");
}
From a7a9be923ec56c2918294da427f7f467c9a82d7e Mon Sep 17 00:00:00 2001
From: Simon Willnauer
Date: Fri, 28 Jan 2011 15:25:33 +0000
Subject: [PATCH 037/185] Only create a Filter list if there is a non-empty fq
parameter
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064730 13f79535-47bb-0310-9956-ffa450edef68
---
.../org/apache/solr/handler/component/QueryComponent.java | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/solr/src/java/org/apache/solr/handler/component/QueryComponent.java b/solr/src/java/org/apache/solr/handler/component/QueryComponent.java
index f98d65493ca..10049937917 100644
--- a/solr/src/java/org/apache/solr/handler/component/QueryComponent.java
+++ b/solr/src/java/org/apache/solr/handler/component/QueryComponent.java
@@ -107,7 +107,6 @@ public class QueryComponent extends SearchComponent
List filters = rb.getFilters();
if (filters==null) {
filters = new ArrayList(fqs.length);
- rb.setFilters( filters );
}
for (String fq : fqs) {
if (fq != null && fq.trim().length()!=0) {
@@ -115,6 +114,12 @@ public class QueryComponent extends SearchComponent
filters.add(fqp.getQuery());
}
}
+ // only set the filters if they are not empty otherwise
+ // fq=&someotherParam= will trigger all docs filter for every request
+ // if filter cache is disabled
+ if (!filters.isEmpty()) {
+ rb.setFilters( filters );
+ }
}
} catch (ParseException e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
From 92874ddaa629e3fdbb4cae4b4f414246676df393 Mon Sep 17 00:00:00 2001
From: Koji Sekiguchi
Date: Fri, 28 Jan 2011 15:37:43 +0000
Subject: [PATCH 038/185] SOLR-860: Add debug output for MoreLikeThis
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064735 13f79535-47bb-0310-9956-ffa450edef68
---
solr/CHANGES.txt | 2 +
.../solr/handler/MoreLikeThisHandler.java | 57 ++++++++++++-------
.../component/MoreLikeThisComponent.java | 57 +++++++++++++++++--
.../solr/handler/MoreLikeThisHandlerTest.java | 14 ++++-
4 files changed, 102 insertions(+), 28 deletions(-)
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index d1275cf0f39..8a3ad22e8e1 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -429,6 +429,8 @@ New Features
* SOLR-2263: Add ability for RawResponseWriter to stream binary files as well as
text files. (Eric Pugh via yonik)
+* SOLR-860: Add debug output for MoreLikeThis. (koji)
+
Optimizations
----------------------
diff --git a/solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java b/solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java
index e367d8922f4..fcd41e24dd9 100644
--- a/solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java
+++ b/solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java
@@ -232,7 +232,7 @@ public class MoreLikeThisHandler extends RequestHandlerBase
// Copied from StandardRequestHandler... perhaps it should be added to doStandardDebug?
if (dbg == true) {
try {
- NamedList dbgInfo = SolrPluginUtils.doStandardDebug(req, q, mlt.mltquery, mltDocs.docList, dbgQuery, dbgResults);
+ NamedList dbgInfo = SolrPluginUtils.doStandardDebug(req, q, mlt.getRawMLTQuery(), mltDocs.docList, dbgQuery, dbgResults);
if (null != dbgInfo) {
if (null != filters) {
dbgInfo.add("filter_queries",req.getParams().getParams(CommonParams.FQ));
@@ -279,8 +279,6 @@ public class MoreLikeThisHandler extends RequestHandlerBase
final boolean needDocSet;
Map boostFields;
- Query mltquery; // expose this for debugging
-
public MoreLikeThisHelper( SolrParams params, SolrIndexSearcher searcher )
{
this.searcher = searcher;
@@ -310,9 +308,26 @@ public class MoreLikeThisHandler extends RequestHandlerBase
boostFields = SolrPluginUtils.parseFieldBoosts(params.getParams(MoreLikeThisParams.QF));
}
- private void setBoosts(Query mltquery) {
+ private Query rawMLTQuery;
+ private Query boostedMLTQuery;
+ private BooleanQuery realMLTQuery;
+
+ public Query getRawMLTQuery(){
+ return rawMLTQuery;
+ }
+
+ public Query getBoostedMLTQuery(){
+ return boostedMLTQuery;
+ }
+
+ public Query getRealMLTQuery(){
+ return realMLTQuery;
+ }
+
+ private Query getBoostedQuery(Query mltquery) {
+ BooleanQuery boostedQuery = (BooleanQuery)mltquery.clone();
if (boostFields.size() > 0) {
- List clauses = ((BooleanQuery)mltquery).clauses();
+ List clauses = boostedQuery.clauses();
for( Object o : clauses ) {
TermQuery q = (TermQuery)((BooleanClause)o).getQuery();
Float b = this.boostFields.get(q.getTerm().field());
@@ -321,49 +336,51 @@ public class MoreLikeThisHandler extends RequestHandlerBase
}
}
}
+ return boostedQuery;
}
public DocListAndSet getMoreLikeThis( int id, int start, int rows, List filters, List terms, int flags ) throws IOException
{
Document doc = reader.document(id);
- mltquery = mlt.like(id);
- setBoosts(mltquery);
+ rawMLTQuery = mlt.like(id);
+ boostedMLTQuery = getBoostedQuery( rawMLTQuery );
if( terms != null ) {
- fillInterestingTermsFromMLTQuery( mltquery, terms );
+ fillInterestingTermsFromMLTQuery( rawMLTQuery, terms );
}
// exclude current document from results
- BooleanQuery mltQuery = new BooleanQuery();
- mltQuery.add(mltquery, BooleanClause.Occur.MUST);
- mltQuery.add(
+ realMLTQuery = new BooleanQuery();
+ realMLTQuery.add(boostedMLTQuery, BooleanClause.Occur.MUST);
+ realMLTQuery.add(
new TermQuery(new Term(uniqueKeyField.getName(), uniqueKeyField.getType().storedToIndexed(doc.getFieldable(uniqueKeyField.getName())))),
BooleanClause.Occur.MUST_NOT);
DocListAndSet results = new DocListAndSet();
if (this.needDocSet) {
- results = searcher.getDocListAndSet(mltQuery, filters, null, start, rows, flags);
+ results = searcher.getDocListAndSet(realMLTQuery, filters, null, start, rows, flags);
} else {
- results.docList = searcher.getDocList(mltQuery, filters, null, start, rows, flags);
+ results.docList = searcher.getDocList(realMLTQuery, filters, null, start, rows, flags);
}
return results;
}
public DocListAndSet getMoreLikeThis( Reader reader, int start, int rows, List filters, List terms, int flags ) throws IOException
{
- mltquery = mlt.like(reader);
- setBoosts(mltquery);
+ rawMLTQuery = mlt.like(reader);
+ boostedMLTQuery = getBoostedQuery( rawMLTQuery );
if( terms != null ) {
- fillInterestingTermsFromMLTQuery( mltquery, terms );
+ fillInterestingTermsFromMLTQuery( boostedMLTQuery, terms );
}
DocListAndSet results = new DocListAndSet();
if (this.needDocSet) {
- results = searcher.getDocListAndSet(mltquery, filters, null, start, rows, flags);
+ results = searcher.getDocListAndSet( boostedMLTQuery, filters, null, start, rows, flags);
} else {
- results.docList = searcher.getDocList(mltquery, filters, null, start, rows, flags);
+ results.docList = searcher.getDocList( boostedMLTQuery, filters, null, start, rows, flags);
}
return results;
}
-
+
+ @Deprecated
public NamedList getMoreLikeThese( DocList docs, int rows, int flags ) throws IOException
{
IndexSchema schema = searcher.getSchema();
@@ -382,7 +399,7 @@ public class MoreLikeThisHandler extends RequestHandlerBase
private void fillInterestingTermsFromMLTQuery( Query query, List terms )
{
- List clauses = ((BooleanQuery)mltquery).clauses();
+ List clauses = ((BooleanQuery)query).clauses();
for( Object o : clauses ) {
TermQuery q = (TermQuery)((BooleanClause)o).getQuery();
InterestingTerm it = new InterestingTerm();
diff --git a/solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java b/solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java
index 61c97d1faf4..8851ff7761d 100644
--- a/solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java
+++ b/solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java
@@ -23,8 +23,12 @@ import java.net.URL;
import org.apache.solr.common.params.MoreLikeThisParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.handler.MoreLikeThisHandler;
+import org.apache.solr.schema.IndexSchema;
+import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocList;
+import org.apache.solr.search.DocListAndSet;
import org.apache.solr.search.SolrIndexSearcher;
/**
@@ -50,18 +54,59 @@ public class MoreLikeThisComponent extends SearchComponent
if( p.getBool( MoreLikeThisParams.MLT, false ) ) {
SolrIndexSearcher searcher = rb.req.getSearcher();
- MoreLikeThisHandler.MoreLikeThisHelper mlt
- = new MoreLikeThisHandler.MoreLikeThisHelper( p, searcher );
-
- int mltcount = p.getInt( MoreLikeThisParams.DOC_COUNT, 5 );
- NamedList sim = mlt.getMoreLikeThese(
- rb.getResults().docList, mltcount, rb.getFieldFlags() );
+ NamedList sim = getMoreLikeThese( rb, searcher,
+ rb.getResults().docList, rb.getFieldFlags() );
// TODO ???? add this directly to the response?
rb.rsp.add( "moreLikeThis", sim );
}
}
+ NamedList getMoreLikeThese( ResponseBuilder rb, SolrIndexSearcher searcher,
+ DocList docs, int flags ) throws IOException {
+ SolrParams p = rb.req.getParams();
+ IndexSchema schema = searcher.getSchema();
+ MoreLikeThisHandler.MoreLikeThisHelper mltHelper
+ = new MoreLikeThisHandler.MoreLikeThisHelper( p, searcher );
+ NamedList mlt = new SimpleOrderedMap();
+ DocIterator iterator = docs.iterator();
+
+ SimpleOrderedMap dbg = null;
+ if( rb.isDebug() ){
+ dbg = new SimpleOrderedMap();
+ }
+
+ while( iterator.hasNext() ) {
+ int id = iterator.nextDoc();
+ int rows = p.getInt( MoreLikeThisParams.DOC_COUNT, 5 );
+ DocListAndSet sim = mltHelper.getMoreLikeThis( id, 0, rows, null, null, flags );
+ String name = schema.printableUniqueKey( searcher.doc( id ) );
+ mlt.add(name, sim.docList);
+
+ if( dbg != null ){
+ SimpleOrderedMap docDbg = new SimpleOrderedMap();
+ docDbg.add( "rawMLTQuery", mltHelper.getRawMLTQuery().toString() );
+ docDbg.add( "boostedMLTQuery", mltHelper.getBoostedMLTQuery().toString() );
+ docDbg.add( "realMLTQuery", mltHelper.getRealMLTQuery().toString() );
+ SimpleOrderedMap explains = new SimpleOrderedMap();
+ DocIterator mltIte = sim.docList.iterator();
+ while( mltIte.hasNext() ){
+ int mltid = mltIte.nextDoc();
+ String key = schema.printableUniqueKey( searcher.doc( mltid ) );
+ explains.add( key, searcher.explain( mltHelper.getRealMLTQuery(), mltid ) );
+ }
+ docDbg.add( "explain", explains );
+ dbg.add( name, docDbg );
+ }
+ }
+
+ // add debug information
+ if( dbg != null ){
+ rb.addDebugInfo( "moreLikeThis", dbg );
+ }
+ return mlt;
+ }
+
/////////////////////////////////////////////
/// SolrInfoMBean
////////////////////////////////////////////
diff --git a/solr/src/test/org/apache/solr/handler/MoreLikeThisHandlerTest.java b/solr/src/test/org/apache/solr/handler/MoreLikeThisHandlerTest.java
index 63b1edde582..6dbae21f244 100644
--- a/solr/src/test/org/apache/solr/handler/MoreLikeThisHandlerTest.java
+++ b/solr/src/test/org/apache/solr/handler/MoreLikeThisHandlerTest.java
@@ -94,7 +94,17 @@ public class MoreLikeThisHandlerTest extends SolrTestCaseJ4 {
assertQ("morelike this - harrison ford",mltreq
,"//result/doc[1]/int[@name='id'][.='45']");
+ // test MoreLikeThis debug
+ params.set(CommonParams.DEBUG_QUERY, "true");
+ assertQ("morelike this - harrison ford",mltreq
+ ,"//lst[@name='debug']/lst[@name='moreLikeThis']/lst[@name='44']/str[@name='rawMLTQuery']"
+ ,"//lst[@name='debug']/lst[@name='moreLikeThis']/lst[@name='44']/str[@name='boostedMLTQuery']"
+ ,"//lst[@name='debug']/lst[@name='moreLikeThis']/lst[@name='44']/str[@name='realMLTQuery']"
+ ,"//lst[@name='debug']/lst[@name='moreLikeThis']/lst[@name='44']/lst[@name='explain']/str[@name='45']"
+ );
+
// test that qparser plugins work
+ params.remove(CommonParams.DEBUG_QUERY);
params.set(CommonParams.Q, "{!field f=id}44");
assertQ(mltreq
,"//result/doc[1]/int[@name='id'][.='45']");
@@ -112,9 +122,9 @@ public class MoreLikeThisHandlerTest extends SolrTestCaseJ4 {
assertQ(mltreq
,"//result/doc[1]/int[@name='id'][.='45']");
- // test that debugging works
+ // test that debugging works (test for MoreLikeThis*Handler*)
params.set(CommonParams.QT, "/mlt");
- params.set("debugQuery", "true");
+ params.set(CommonParams.DEBUG_QUERY, "true");
assertQ(mltreq
,"//result/doc[1]/int[@name='id'][.='45']"
,"//lst[@name='debug']/lst[@name='explain']"
From da24882340857b782a4d44939730bfbe53f4a3a3 Mon Sep 17 00:00:00 2001
From: Yonik Seeley
Date: Fri, 28 Jan 2011 15:45:55 +0000
Subject: [PATCH 039/185] docs: move changes entry to 3.1
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064738 13f79535-47bb-0310-9956-ffa450edef68
---
solr/CHANGES.txt | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 8a3ad22e8e1..815b140524b 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -106,8 +106,6 @@ New Features
Adding a parameter NOW= to the request will override the
current time. (Peter Sturge, yonik)
-* SOLR-2325: Allow tagging and exlcusion of main query for faceting. (yonik)
-
Optimizations
----------------------
@@ -426,6 +424,8 @@ New Features
* SOLR-2129: Added a Solr module for dynamic metadata extraction/indexing with Apache UIMA.
See contrib/uima/README.txt for more information. (Tommaso Teofili via rmuir)
+* SOLR-2325: Allow tagging and exlcusion of main query for faceting. (yonik)
+
* SOLR-2263: Add ability for RawResponseWriter to stream binary files as well as
text files. (Eric Pugh via yonik)
From 99a60c33c5ef7e90fe9580bc7f555536f4025713 Mon Sep 17 00:00:00 2001
From: Yonik Seeley
Date: Fri, 28 Jan 2011 17:07:50 +0000
Subject: [PATCH 040/185] SOLR-2265: update jetty to 6.1.26 (missed start.jar)
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064781 13f79535-47bb-0310-9956-ffa450edef68
---
solr/example/start.jar | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/solr/example/start.jar b/solr/example/start.jar
index 2bd8f2d6eb5..b2fca2178f2 100755
--- a/solr/example/start.jar
+++ b/solr/example/start.jar
@@ -1,2 +1,2 @@
-AnyObjectId[2a4a9a163d79f9214d9b1d9c0dbb611f741d8f16] was removed in git history.
+AnyObjectId[d3a94bcfae630a90d4103437bd3c2da0d37d98c9] was removed in git history.
Apache SVN contains full history.
\ No newline at end of file
From 0d9559e1b4f72c7a2c2d91350f0797bf8e1a0955 Mon Sep 17 00:00:00 2001
From: Robert Muir
Date: Fri, 28 Jan 2011 19:55:24 +0000
Subject: [PATCH 041/185] LUCENE-1866: enable rat-sources for all
lucene/contrib/modules/solr src and tests
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064844 13f79535-47bb-0310-9956-ffa450edef68
---
lucene/build.xml | 16 ----------------
lucene/common-build.xml | 16 ++++++++++++++++
solr/build.xml | 2 ++
3 files changed, 18 insertions(+), 16 deletions(-)
diff --git a/lucene/build.xml b/lucene/build.xml
index 4205d3c756d..3fe5b815403 100644
--- a/lucene/build.xml
+++ b/lucene/build.xml
@@ -602,22 +602,6 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/lucene/common-build.xml b/lucene/common-build.xml
index be42d5bd648..86e31dd6382 100644
--- a/lucene/common-build.xml
+++ b/lucene/common-build.xml
@@ -638,6 +638,22 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/solr/build.xml b/solr/build.xml
index a29cb50ab62..c3ef9682c18 100644
--- a/solr/build.xml
+++ b/solr/build.xml
@@ -963,6 +963,8 @@
+
+
From 06c4c204c80e3932aca22f4e7bba794d73d73cab Mon Sep 17 00:00:00 2001
From: Michael McCandless
Date: Fri, 28 Jan 2011 23:12:22 +0000
Subject: [PATCH 042/185] LUCENE-2895: MockRandomCodec loads termsIndex even if
termsIndexDivisor is set to -1
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064926 13f79535-47bb-0310-9956-ffa450edef68
---
lucene/src/test/org/apache/lucene/index/TestSegmentInfo.java | 1 +
.../lucene/index/codecs/mockrandom/MockRandomCodec.java | 4 +++-
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/lucene/src/test/org/apache/lucene/index/TestSegmentInfo.java b/lucene/src/test/org/apache/lucene/index/TestSegmentInfo.java
index 58b7cb8d9a6..dbd9ddbf1d0 100644
--- a/lucene/src/test/org/apache/lucene/index/TestSegmentInfo.java
+++ b/lucene/src/test/org/apache/lucene/index/TestSegmentInfo.java
@@ -31,6 +31,7 @@ public class TestSegmentInfo extends LuceneTestCase {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
IndexWriter writer = new IndexWriter(dir, conf);
+ writer.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(new Field("a", "value", Store.YES, Index.ANALYZED));
writer.addDocument(doc);
diff --git a/lucene/src/test/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java b/lucene/src/test/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java
index d00854ec9f1..745c619cb87 100644
--- a/lucene/src/test/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java
+++ b/lucene/src/test/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java
@@ -260,7 +260,9 @@ public class MockRandomCodec extends Codec {
if (LuceneTestCase.VERBOSE) {
System.out.println("MockRandomCodec: variable-gap terms index (divisor=" + state.termsIndexDivisor + ")");
}
- state.termsIndexDivisor = _TestUtil.nextInt(random, 1, 10);
+ if (state.termsIndexDivisor != -1) {
+ state.termsIndexDivisor = _TestUtil.nextInt(random, 1, 10);
+ }
indexReader = new VariableGapTermsIndexReader(state.dir,
state.fieldInfos,
state.segmentInfo.name,
From f12b4ab2ff281070cece87158cf04a467cedf599 Mon Sep 17 00:00:00 2001
From: Michael McCandless
Date: Fri, 28 Jan 2011 23:12:48 +0000
Subject: [PATCH 043/185] fix typo
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064927 13f79535-47bb-0310-9956-ffa450edef68
---
.../src/test/org/apache/lucene/index/TestDocsAndPositions.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java b/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java
index 654e33dfb52..a63e63db589 100644
--- a/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java
+++ b/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java
@@ -111,7 +111,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
* random. All positions for that number are saved up front and compared to
* the enums positions.
*/
- public void testRandomPositons() throws IOException {
+ public void testRandomPositions() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(
From 5d8790eb85220b082ea5b288be51392c2613ecdc Mon Sep 17 00:00:00 2001
From: Yonik Seeley
Date: Sat, 29 Jan 2011 02:24:53 +0000
Subject: [PATCH 044/185] SOLR-1983 SOLR-2156: set replication flags and
cleanup
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064942 13f79535-47bb-0310-9956-ffa450edef68
---
solr/CHANGES.txt | 5 +++++
solr/src/java/org/apache/solr/handler/SnapPuller.java | 9 ++++++---
2 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 815b140524b..5022a1ed447 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -653,6 +653,11 @@ Bug Fixes
* SOLR-2261: fix velocity template layout.vm that referred to an older
version of jquery. (Eric Pugh via rmuir)
+* SOLR-1983: snappuller fails when modifiedConfFiles is not empty and
+ full copy of index is needed. (Alexander Kanarsky via yonik)
+
+* SOLR-2156: SnapPuller fails to clean Old Index Directories on Full Copy
+ (Jayendra Patil via yonik)
Other Changes
----------------------
diff --git a/solr/src/java/org/apache/solr/handler/SnapPuller.java b/solr/src/java/org/apache/solr/handler/SnapPuller.java
index 88ac16671cf..b93d34f389b 100644
--- a/solr/src/java/org/apache/solr/handler/SnapPuller.java
+++ b/solr/src/java/org/apache/solr/handler/SnapPuller.java
@@ -300,15 +300,17 @@ public class SnapPuller {
isFullCopyNeeded = true;
successfulInstall = false;
boolean deleteTmpIdxDir = true;
+ File indexDir = null ;
try {
- File indexDir = new File(core.getIndexDir());
+ indexDir = new File(core.getIndexDir());
downloadIndexFiles(isFullCopyNeeded, tmpIndexDir, latestVersion);
LOG.info("Total time taken for download : " + ((System.currentTimeMillis() - replicationStartTime) / 1000) + " secs");
Collection> modifiedConfFiles = getModifiedConfFiles(confFilesToDownload);
if (!modifiedConfFiles.isEmpty()) {
downloadConfFiles(confFilesToDownload, latestVersion);
if (isFullCopyNeeded) {
- modifyIndexProps(tmpIndexDir.getName());
+ successfulInstall = modifyIndexProps(tmpIndexDir.getName());
+ deleteTmpIdxDir = false;
} else {
successfulInstall = copyIndexFiles(tmpIndexDir, indexDir);
}
@@ -339,7 +341,8 @@ public class SnapPuller {
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Index fetch failed : ", e);
} finally {
- if(deleteTmpIdxDir) delTree(tmpIndexDir);
+ if (deleteTmpIdxDir) delTree(tmpIndexDir);
+ else delTree(indexDir);
}
return successfulInstall;
} finally {
From cbf8d878f7efa76a063a11a00db9f6fea9ec874c Mon Sep 17 00:00:00 2001
From: Koji Sekiguchi
Date: Sat, 29 Jan 2011 14:30:45 +0000
Subject: [PATCH 045/185] SOLR-792: fix comparison for mincount
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065020 13f79535-47bb-0310-9956-ffa450edef68
---
.../org/apache/solr/handler/component/PivotFacetHelper.java | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/solr/src/java/org/apache/solr/handler/component/PivotFacetHelper.java b/solr/src/java/org/apache/solr/handler/component/PivotFacetHelper.java
index b47be4fe6a4..c00add58464 100644
--- a/solr/src/java/org/apache/solr/handler/component/PivotFacetHelper.java
+++ b/solr/src/java/org/apache/solr/handler/component/PivotFacetHelper.java
@@ -102,7 +102,7 @@ public class PivotFacetHelper
List> values = new ArrayList>( superFacets.size() );
for (Map.Entry kv : superFacets) {
// Only sub-facet if parent facet has positive count - still may not be any values for the sub-field though
- if (kv.getValue() > minMatch ) {
+ if (kv.getValue() >= minMatch ) {
// don't reuse the same BytesRef each time since we will be constructing Term
// objects that will most likely be cached.
BytesRef termval = new BytesRef();
@@ -122,7 +122,7 @@ public class PivotFacetHelper
SimpleFacets sf = getFacetImplementation(rb.req, subset, rb.req.getParams());
NamedList nl = sf.getTermCounts(subField);
- if (nl.size() > minMatch ) {
+ if (nl.size() >= minMatch ) {
pivot.add( "pivot", doPivots( nl, subField, nextField, fnames, rb, subset, minMatch ) );
values.add( pivot ); // only add response if there are some counts
}
From 5342d8676da8f794efbcf93b5001b107514f1363 Mon Sep 17 00:00:00 2001
From: Michael McCandless
Date: Sat, 29 Jan 2011 16:22:59 +0000
Subject: [PATCH 046/185] LUCENE-2898: fix CMS throttling to be independent of
number of incoming producer threads; some defensive concurrency fixes for
SegmentInfo
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065059 13f79535-47bb-0310-9956-ffa450edef68
---
.../index/ConcurrentMergeScheduler.java | 48 +++++++++----------
.../org/apache/lucene/index/SegmentInfo.java | 27 +++++++----
2 files changed, 41 insertions(+), 34 deletions(-)
diff --git a/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java b/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
index 55d682d593c..1927235cdce 100644
--- a/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
+++ b/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
@@ -308,10 +308,31 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
// pending merges, until it's empty:
while (true) {
+ synchronized(this) {
+ long startStallTime = 0;
+ while (mergeThreadCount() >= 1+maxMergeCount) {
+ startStallTime = System.currentTimeMillis();
+ if (verbose()) {
+ message(" too many merges; stalling...");
+ }
+ try {
+ wait();
+ } catch (InterruptedException ie) {
+ throw new ThreadInterruptedException(ie);
+ }
+ }
+
+ if (verbose()) {
+ if (startStallTime != 0) {
+ message(" stalled for " + (System.currentTimeMillis()-startStallTime) + " msec");
+ }
+ }
+ }
+
+
// TODO: we could be careful about which merges to do in
// the BG (eg maybe the "biggest" ones) vs FG, which
// merges to do first (the easiest ones?), etc.
-
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null) {
if (verbose())
@@ -326,32 +347,11 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
boolean success = false;
try {
synchronized(this) {
- final MergeThread merger;
- long startStallTime = 0;
- while (mergeThreadCount() >= maxMergeCount) {
- startStallTime = System.currentTimeMillis();
- if (verbose()) {
- message(" too many merges; stalling...");
- }
- try {
- wait();
- } catch (InterruptedException ie) {
- throw new ThreadInterruptedException(ie);
- }
- }
-
- if (verbose()) {
- if (startStallTime != 0) {
- message(" stalled for " + (System.currentTimeMillis()-startStallTime) + " msec");
- }
- message(" consider merge " + merge.segString(dir));
- }
-
- assert mergeThreadCount() < maxMergeCount;
+ message(" consider merge " + merge.segString(dir));
// OK to spawn a new merge thread to handle this
// merge:
- merger = getMergeThread(writer, merge);
+ final MergeThread merger = getMergeThread(writer, merge);
mergeThreads.add(merger);
if (verbose()) {
message(" launch new thread [" + merger.getName() + "]");
diff --git a/lucene/src/java/org/apache/lucene/index/SegmentInfo.java b/lucene/src/java/org/apache/lucene/index/SegmentInfo.java
index a3dfaac25a7..e668fb9a279 100644
--- a/lucene/src/java/org/apache/lucene/index/SegmentInfo.java
+++ b/lucene/src/java/org/apache/lucene/index/SegmentInfo.java
@@ -66,11 +66,11 @@ public final class SegmentInfo {
private boolean isCompoundFile;
- private List files; // cached list of files that this segment uses
+ private volatile List files; // cached list of files that this segment uses
// in the Directory
- private long sizeInBytesNoStore = -1; // total byte size of all but the store files (computed on demand)
- private long sizeInBytesWithStore = -1; // total byte size of all of our files (computed on demand)
+ private volatile long sizeInBytesNoStore = -1; // total byte size of all but the store files (computed on demand)
+ private volatile long sizeInBytesWithStore = -1; // total byte size of all of our files (computed on demand)
private int docStoreOffset; // if this segment shares stored fields & vectors, this
// offset is where in that file this segment's docs begin
@@ -241,24 +241,31 @@ public final class SegmentInfo {
*/
public long sizeInBytes(boolean includeDocStores) throws IOException {
if (includeDocStores) {
- if (sizeInBytesWithStore != -1) return sizeInBytesWithStore;
- sizeInBytesWithStore = 0;
+ if (sizeInBytesWithStore != -1) {
+ return sizeInBytesWithStore;
+ }
+ long sum = 0;
for (final String fileName : files()) {
- // We don't count bytes used by a shared doc store against this segment
+ // We don't count bytes used by a shared doc store
+ // against this segment
if (docStoreOffset == -1 || !IndexFileNames.isDocStoreFile(fileName)) {
- sizeInBytesWithStore += dir.fileLength(fileName);
+ sum += dir.fileLength(fileName);
}
}
+ sizeInBytesWithStore = sum;
return sizeInBytesWithStore;
} else {
- if (sizeInBytesNoStore != -1) return sizeInBytesNoStore;
- sizeInBytesNoStore = 0;
+ if (sizeInBytesNoStore != -1) {
+ return sizeInBytesNoStore;
+ }
+ long sum = 0;
for (final String fileName : files()) {
if (IndexFileNames.isDocStoreFile(fileName)) {
continue;
}
- sizeInBytesNoStore += dir.fileLength(fileName);
+ sum += dir.fileLength(fileName);
}
+ sizeInBytesNoStore = sum;
return sizeInBytesNoStore;
}
}
From d4c9a814527ec5c58d62c42a65b23b39e508a505 Mon Sep 17 00:00:00 2001
From: Yonik Seeley
Date: Sat, 29 Jan 2011 16:50:51 +0000
Subject: [PATCH 047/185] SOLR-792: fix test to match fix to mincount
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065067 13f79535-47bb-0310-9956-ffa450edef68
---
.../test/org/apache/solr/client/solrj/SolrExampleTests.java | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java b/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
index f79a622f06e..216470e44cc 100644
--- a/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
+++ b/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
@@ -642,12 +642,12 @@ abstract public class SolrExampleTests extends SolrJettyTestBase
pivot = pivots.getVal( 2 );
assertEquals( "features,cat,inStock", pivots.getName( 2 ) );
assertEquals( 2, pivot.size() );
- PivotField p = pivot.get( 1 ).getPivot().get(0);
+ PivotField p = pivot.get( 1 ).getPivot().get(0); // get(1) should be features=AAAA, then get(0) should be cat=a
assertEquals( "cat", p.getField() );
assertEquals( "a", p.getValue() );
counts = p.getPivot();
// p.write(System.out, 5 );
- assertEquals( 1, counts.size() );
+ assertEquals( 2, counts.size() ); // 2 trues and 1 false under features=AAAA,cat=a
assertEquals( "inStock", counts.get(0).getField() );
assertEquals( Boolean.TRUE, counts.get(0).getValue() );
assertEquals( 2, counts.get(0).getCount() );
From 5dcacafcb4be2672e2c3240a107080007ce3c6dc Mon Sep 17 00:00:00 2001
From: Ryan McKinley
Date: Sat, 29 Jan 2011 17:10:09 +0000
Subject: [PATCH 048/185] - add comments to pivot test - change capitalization
so that testing looks the same as the input
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065074 13f79535-47bb-0310-9956-ffa450edef68
---
.../solr/client/solrj/SolrExampleTests.java | 58 ++++++++++++++-----
1 file changed, 45 insertions(+), 13 deletions(-)
diff --git a/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java b/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
index 216470e44cc..071f74e0255 100644
--- a/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
+++ b/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
@@ -576,17 +576,17 @@ abstract public class SolrExampleTests extends SolrJettyTestBase
int id = 1;
ArrayList docs = new ArrayList();
- docs.add( makeTestDoc( "id", id++, "features", "AAA", "cat", "a", "inStock", true ) );
- docs.add( makeTestDoc( "id", id++, "features", "AAA", "cat", "a", "inStock", false ) );
- docs.add( makeTestDoc( "id", id++, "features", "AAA", "cat", "a", "inStock", true ) );
- docs.add( makeTestDoc( "id", id++, "features", "AAA", "cat", "b", "inStock", false ) );
- docs.add( makeTestDoc( "id", id++, "features", "AAA", "cat", "b", "inStock", true ) );
- docs.add( makeTestDoc( "id", id++, "features", "BBB", "cat", "a", "inStock", false ) );
- docs.add( makeTestDoc( "id", id++, "features", "BBB", "cat", "a", "inStock", true ) );
- docs.add( makeTestDoc( "id", id++, "features", "BBB", "cat", "b", "inStock", false ) );
- docs.add( makeTestDoc( "id", id++, "features", "BBB", "cat", "b", "inStock", true ) );
- docs.add( makeTestDoc( "id", id++, "features", "BBB", "cat", "b", "inStock", false ) );
- docs.add( makeTestDoc( "id", id++, "features", "BBB", "cat", "b", "inStock", true ) );
+ docs.add( makeTestDoc( "id", id++, "features", "aaa", "cat", "a", "inStock", true ) );
+ docs.add( makeTestDoc( "id", id++, "features", "aaa", "cat", "a", "inStock", false ) );
+ docs.add( makeTestDoc( "id", id++, "features", "aaa", "cat", "a", "inStock", true ) );
+ docs.add( makeTestDoc( "id", id++, "features", "aaa", "cat", "b", "inStock", false ) );
+ docs.add( makeTestDoc( "id", id++, "features", "aaa", "cat", "b", "inStock", true ) );
+ docs.add( makeTestDoc( "id", id++, "features", "bbb", "cat", "a", "inStock", false ) );
+ docs.add( makeTestDoc( "id", id++, "features", "bbb", "cat", "a", "inStock", true ) );
+ docs.add( makeTestDoc( "id", id++, "features", "bbb", "cat", "b", "inStock", false ) );
+ docs.add( makeTestDoc( "id", id++, "features", "bbb", "cat", "b", "inStock", true ) );
+ docs.add( makeTestDoc( "id", id++, "features", "bbb", "cat", "b", "inStock", false ) );
+ docs.add( makeTestDoc( "id", id++, "features", "bbb", "cat", "b", "inStock", true ) );
docs.add( makeTestDoc( "id", id++ ) ); // something not matching
server.add( docs );
server.commit();
@@ -610,7 +610,14 @@ abstract public class SolrExampleTests extends SolrJettyTestBase
// System.out.println();
// }
- // Now make sure they have reasonable stuff
+ // PIVOT: features,cat
+ // features=bbb (6)
+ // cat=b (4)
+ // cat=a (2)
+ // features=aaa (5)
+ // cat=a (3)
+ // cat=b (2)
+
List pivot = pivots.getVal( 0 );
assertEquals( "features,cat", pivots.getName( 0 ) );
assertEquals( 2, pivot.size() );
@@ -627,6 +634,15 @@ abstract public class SolrExampleTests extends SolrJettyTestBase
assertEquals( "a", counts.get(1).getValue() );
assertEquals( 2, counts.get(1).getCount() );
+
+ // PIVOT: cat,features
+ // cat=b (6)
+ // features=bbb (4)
+ // features=aaa (2)
+ // cat=a (5)
+ // features=aaa (3)
+ // features=bbb (2)
+
ff = pivot.get( 1 );
assertEquals( "features", ff.getField() );
assertEquals( "aaa", ff.getValue() );
@@ -638,7 +654,23 @@ abstract public class SolrExampleTests extends SolrJettyTestBase
assertEquals( "b", counts.get(1).getValue() );
assertEquals( 2, counts.get(1).getCount() );
- // 3 deep
+ // Three deep:
+ // PIVOT: features,cat,inStock
+ // features=bbb (6)
+ // cat=b (4)
+ // inStock=false (2)
+ // inStock=true (2)
+ // cat=a (2)
+ // inStock=false (1)
+ // inStock=true (1)
+ // features=aaa (5)
+ // cat=a (3)
+ // inStock=true (2)
+ // inStock=false (1)
+ // cat=b (2)
+ // inStock=false (1)
+ // inStock=true (1)
+
pivot = pivots.getVal( 2 );
assertEquals( "features,cat,inStock", pivots.getName( 2 ) );
assertEquals( 2, pivot.size() );
From 5f7f97021c7da069ab90dcfe95c06e2f89893e66 Mon Sep 17 00:00:00 2001
From: Ryan McKinley
Date: Sat, 29 Jan 2011 19:43:20 +0000
Subject: [PATCH 049/185] LUCENE-2671 -- deprecate FieldTypes that will be
removed in 5.x
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065093 13f79535-47bb-0310-9956-ffa450edef68
---
solr/src/java/org/apache/solr/schema/SortableDoubleField.java | 2 ++
solr/src/java/org/apache/solr/schema/SortableFloatField.java | 2 ++
solr/src/java/org/apache/solr/schema/SortableIntField.java | 2 ++
solr/src/java/org/apache/solr/schema/SortableLongField.java | 2 ++
4 files changed, 8 insertions(+)
diff --git a/solr/src/java/org/apache/solr/schema/SortableDoubleField.java b/solr/src/java/org/apache/solr/schema/SortableDoubleField.java
index 411e9b5f6fc..b12858b45c2 100644
--- a/solr/src/java/org/apache/solr/schema/SortableDoubleField.java
+++ b/solr/src/java/org/apache/solr/schema/SortableDoubleField.java
@@ -37,6 +37,8 @@ import java.util.Map;
import java.io.IOException;
/**
* @version $Id$
+ *
+ * @deprecated use {@link DoubleField} or {@link TrieDoubleField} - will be removed in 5.x
*/
public class SortableDoubleField extends FieldType {
protected void init(IndexSchema schema, Map args) {
diff --git a/solr/src/java/org/apache/solr/schema/SortableFloatField.java b/solr/src/java/org/apache/solr/schema/SortableFloatField.java
index e56ffd70c2a..b495227b1f6 100644
--- a/solr/src/java/org/apache/solr/schema/SortableFloatField.java
+++ b/solr/src/java/org/apache/solr/schema/SortableFloatField.java
@@ -37,6 +37,8 @@ import java.util.Map;
import java.io.IOException;
/**
* @version $Id$
+ *
+ * @deprecated use {@link FloatField} or {@link TrieFloatField} - will be removed in 5.x
*/
public class SortableFloatField extends FieldType {
protected void init(IndexSchema schema, Map args) {
diff --git a/solr/src/java/org/apache/solr/schema/SortableIntField.java b/solr/src/java/org/apache/solr/schema/SortableIntField.java
index b6db1cff194..421e4bc45fc 100644
--- a/solr/src/java/org/apache/solr/schema/SortableIntField.java
+++ b/solr/src/java/org/apache/solr/schema/SortableIntField.java
@@ -37,6 +37,8 @@ import java.util.Map;
import java.io.IOException;
/**
* @version $Id$
+ *
+ * @deprecated use {@link IntField} or {@link TrieIntField} - will be removed in 5.x
*/
public class SortableIntField extends FieldType {
protected void init(IndexSchema schema, Map args) {
diff --git a/solr/src/java/org/apache/solr/schema/SortableLongField.java b/solr/src/java/org/apache/solr/schema/SortableLongField.java
index 3be76b9b1c1..d23fff2bb26 100644
--- a/solr/src/java/org/apache/solr/schema/SortableLongField.java
+++ b/solr/src/java/org/apache/solr/schema/SortableLongField.java
@@ -37,6 +37,8 @@ import java.util.Map;
import java.io.IOException;
/**
* @version $Id$
+ *
+ * @deprecated use {@link LongField} or {@link TrieLongtField} - will be removed in 5.x
*/
public class SortableLongField extends FieldType {
protected void init(IndexSchema schema, Map args) {
From 295c8f84c73327cf93fe705224c13e11aa852480 Mon Sep 17 00:00:00 2001
From: Michael McCandless
Date: Sat, 29 Jan 2011 19:48:56 +0000
Subject: [PATCH 050/185] LUCENE-1076: allow non-contiguous merges; improve
handling of buffered deletes
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065095 13f79535-47bb-0310-9956-ffa450edef68
---
.../store/instantiated/TestIndicesEquals.java | 4 +-
.../lucene/index/TestFieldNormModifier.java | 2 +-
.../index/TestMultiPassIndexSplitter.java | 2 +-
.../lucene/misc/TestLengthNormModifier.java | 2 +-
.../lucene/search/DuplicateFilterTest.java | 9 +-
.../lucene/search/FuzzyLikeThisQueryTest.java | 2 +-
.../apache/lucene/index/BufferedDeletes.java | 550 +++++-------------
.../lucene/index/BufferedDeletesStream.java | 440 ++++++++++++++
.../apache/lucene/index/DocumentsWriter.java | 30 +-
.../org/apache/lucene/index/IndexWriter.java | 128 ++--
.../apache/lucene/index/LogMergePolicy.java | 84 ++-
.../apache/lucene/index/SegmentDeletes.java | 191 ------
.../org/apache/lucene/index/SegmentInfo.java | 13 +-
.../lucene/TestMergeSchedulerExternal.java | 4 +-
.../test/org/apache/lucene/TestSearch.java | 14 +-
.../lucene/TestSearchForDuplicates.java | 19 +-
.../lucene/index/MockRandomMergePolicy.java | 93 +++
.../apache/lucene/index/TestAddIndexes.java | 1 +
.../apache/lucene/index/TestAtomicUpdate.java | 1 +
.../index/TestConcurrentMergeScheduler.java | 7 +
.../lucene/index/TestDeletionPolicy.java | 128 ++--
.../lucene/index/TestDocsAndPositions.java | 8 +-
.../apache/lucene/index/TestFieldsReader.java | 4 +-
.../apache/lucene/index/TestIndexReader.java | 10 +-
.../index/TestIndexReaderCloneNorms.java | 4 +-
.../lucene/index/TestIndexReaderReopen.java | 2 +-
.../apache/lucene/index/TestIndexWriter.java | 48 +-
.../lucene/index/TestIndexWriterDelete.java | 2 +-
.../index/TestIndexWriterExceptions.java | 7 +-
.../index/TestIndexWriterMergePolicy.java | 2 +-
.../lucene/index/TestIndexWriterMerging.java | 4 +-
.../index/TestIndexWriterOnDiskFull.java | 10 +-
.../lucene/index/TestIndexWriterReader.java | 4 +-
.../org/apache/lucene/index/TestLazyBug.java | 2 +-
.../lucene/index/TestMaxTermFrequency.java | 2 +-
.../lucene/index/TestMultiLevelSkipList.java | 2 +-
.../apache/lucene/index/TestNRTThreads.java | 34 +-
.../org/apache/lucene/index/TestNorms.java | 4 +-
.../apache/lucene/index/TestOmitNorms.java | 5 +-
.../org/apache/lucene/index/TestOmitTf.java | 7 +-
.../org/apache/lucene/index/TestPayloads.java | 3 +-
.../index/TestPerFieldCodecSupport.java | 6 +-
.../lucene/index/TestPerSegmentDeletes.java | 5 +-
.../apache/lucene/index/TestSegmentInfo.java | 2 +-
.../lucene/index/TestSegmentTermDocs.java | 2 +-
.../lucene/index/TestStressIndexing2.java | 6 +-
.../lucene/search/BaseTestRangeFilter.java | 6 +-
.../apache/lucene/search/TestBoolean2.java | 2 +-
.../search/TestDisjunctionMaxQuery.java | 2 +-
.../apache/lucene/search/TestDocBoost.java | 7 +-
.../lucene/search/TestExplanations.java | 2 +-
.../apache/lucene/search/TestFieldCache.java | 2 +-
.../lucene/search/TestFilteredQuery.java | 10 +-
.../lucene/search/TestFilteredSearch.java | 5 +-
.../apache/lucene/search/TestFuzzyQuery2.java | 2 +-
.../lucene/search/TestMatchAllDocsQuery.java | 2 +-
.../search/TestMultiThreadTermVectors.java | 2 +-
.../search/TestNumericRangeQuery32.java | 3 +-
.../search/TestNumericRangeQuery64.java | 3 +-
.../apache/lucene/search/TestPhraseQuery.java | 2 +-
.../org/apache/lucene/search/TestSort.java | 2 +-
.../lucene/search/TestSpanQueryFilter.java | 5 +-
.../lucene/search/TestSubScorerFreqs.java | 14 +-
.../apache/lucene/search/TestTermScorer.java | 9 +-
.../apache/lucene/search/TestTermVectors.java | 4 +-
.../search/cache/TestEntryCreators.java | 5 +-
.../search/function/FunctionTestSetup.java | 2 +-
.../search/payloads/TestPayloadTermQuery.java | 2 +-
.../lucene/search/spans/TestBasics.java | 2 +-
.../spans/TestFieldMaskingSpanQuery.java | 3 +-
.../search/spans/TestNearSpansOrdered.java | 2 +-
.../apache/lucene/search/spans/TestSpans.java | 2 +-
.../search/spans/TestSpansAdvanced.java | 5 +-
.../search/spans/TestSpansAdvanced2.java | 2 +-
.../apache/lucene/store/TestMultiMMap.java | 3 +-
.../apache/lucene/util/LuceneTestCase.java | 19 +-
76 files changed, 1170 insertions(+), 870 deletions(-)
create mode 100644 lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
delete mode 100644 lucene/src/java/org/apache/lucene/index/SegmentDeletes.java
create mode 100644 lucene/src/test/org/apache/lucene/index/MockRandomMergePolicy.java
diff --git a/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java b/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
index ae091b5ec6e..7a5398c4ed0 100644
--- a/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
+++ b/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
@@ -65,7 +65,7 @@ public class TestIndicesEquals extends LuceneTestCase {
// create dir data
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
for (int i = 0; i < 20; i++) {
Document document = new Document();
@@ -91,7 +91,7 @@ public class TestIndicesEquals extends LuceneTestCase {
// create dir data
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
indexWriter.setInfoStream(VERBOSE ? System.out : null);
if (VERBOSE) {
System.out.println("TEST: make test index");
diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java
index 8c83d449341..48bb42dfcf5 100644
--- a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java
+++ b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java
@@ -54,7 +54,7 @@ public class TestFieldNormModifier extends LuceneTestCase {
super.setUp();
store = newDirectory();
IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();
diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java
index f861063942d..158b24ff58b 100644
--- a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java
+++ b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java
@@ -32,7 +32,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
- IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
Document doc;
for (int i = 0; i < NUM_DOCS; i++) {
doc = new Document();
diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java b/lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java
index a856dd9fa58..7dfa6a311d5 100644
--- a/lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java
+++ b/lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java
@@ -59,7 +59,7 @@ public class TestLengthNormModifier extends LuceneTestCase {
super.setUp();
store = newDirectory();
IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();
diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
index a040d303fb8..2a3df020714 100644
--- a/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
+++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
@@ -20,16 +20,17 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.HashSet;
+import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.MultiFields;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
public class DuplicateFilterTest extends LuceneTestCase {
private static final String KEY_FIELD = "url";
@@ -42,7 +43,7 @@ public class DuplicateFilterTest extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
- RandomIndexWriter writer = new RandomIndexWriter(random, directory);
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
//Add series of docs with filterable fields : url, text and dates flags
addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101");
diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java
index 5f2bec5b04c..587a5710b9a 100644
--- a/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java
+++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java
@@ -40,7 +40,7 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
- RandomIndexWriter writer = new RandomIndexWriter(random, directory);
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
//Add series of docs with misspelt names
addDoc(writer, "jonathon smythe","1");
diff --git a/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java b/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java
index 0be1dd2ba30..ed955b90d2d 100644
--- a/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java
+++ b/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java
@@ -17,440 +17,166 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import java.io.IOException;
-import java.io.PrintStream;
+import java.util.ArrayList;
import java.util.HashMap;
-import java.util.Date;
-import java.util.Map.Entry;
+import java.util.List;
import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
+import java.util.SortedMap;
+import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.lucene.index.IndexReader.AtomicReaderContext;
-import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.RamUsageEstimator;
-/** Holds a {@link SegmentDeletes} for each segment in the
- * index. */
+/** Holds buffered deletes, by docID, term or query for a
+ * single segment. This is used to hold buffered pending
+ * deletes against the to-be-flushed segment as well as
+ * per-segment deletes for each segment in the index. */
+
+// NOTE: we are sync'd by BufferedDeletes, ie, all access to
+// instances of this class is via sync'd methods on
+// BufferedDeletes
class BufferedDeletes {
- // Deletes for all flushed/merged segments:
- private final Map deletesMap = new HashMap();
+ /* Rough logic: HashMap has an array[Entry] w/ varying
+ load factor (say 2 * POINTER). Entry is object w/ Term
+ key, Integer val, int hash, Entry next
+ (OBJ_HEADER + 3*POINTER + INT). Term is object w/
+ String field and String text (OBJ_HEADER + 2*POINTER).
+ We don't count Term's field since it's interned.
+ Term's text is String (OBJ_HEADER + 4*INT + POINTER +
+ OBJ_HEADER + string.length*CHAR). Integer is
+ OBJ_HEADER + INT. */
+ final static int BYTES_PER_DEL_TERM = 8*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 5*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 6*RamUsageEstimator.NUM_BYTES_INT;
- // used only by assert
- private Term lastDeleteTerm;
-
- private PrintStream infoStream;
- private final AtomicLong bytesUsed = new AtomicLong();
- private final AtomicInteger numTerms = new AtomicInteger();
- private final int messageID;
+ /* Rough logic: del docIDs are List. Say list
+ allocates ~2X size (2*POINTER). Integer is OBJ_HEADER
+ + int */
+ final static int BYTES_PER_DEL_DOCID = 2*RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + RamUsageEstimator.NUM_BYTES_INT;
- public BufferedDeletes(int messageID) {
- this.messageID = messageID;
- }
+ /* Rough logic: HashMap has an array[Entry] w/ varying
+ load factor (say 2 * POINTER). Entry is object w/
+ Query key, Integer val, int hash, Entry next
+ (OBJ_HEADER + 3*POINTER + INT). Query we often
+ undercount (say 24 bytes). Integer is OBJ_HEADER + INT. */
+ final static int BYTES_PER_DEL_QUERY = 5*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 2*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2*RamUsageEstimator.NUM_BYTES_INT + 24;
- private synchronized void message(String message) {
- if (infoStream != null) {
- infoStream.println("BD " + messageID + " [" + new Date() + "; " + Thread.currentThread().getName() + "]: BD " + message);
- }
- }
-
- public synchronized void setInfoStream(PrintStream infoStream) {
- this.infoStream = infoStream;
- }
+ // TODO: many of the deletes stored here will map to
+ // Integer.MAX_VALUE; we could be more efficient for this
+ // case ie use a SortedSet not a SortedMap. But: Java's
+ // SortedSet impls are simply backed by a Map so we won't
+ // save anything unless we do something custom...
+ final AtomicInteger numTermDeletes = new AtomicInteger();
+ final SortedMap terms = new TreeMap();
+ final Map queries = new HashMap();
+ final List docIDs = new ArrayList();
- public synchronized void pushDeletes(SegmentDeletes newDeletes, SegmentInfo info) {
- pushDeletes(newDeletes, info, false);
- }
+ public static final Integer MAX_INT = Integer.valueOf(Integer.MAX_VALUE);
- // Moves all pending deletes onto the provided segment,
- // then clears the pending deletes
- public synchronized void pushDeletes(SegmentDeletes newDeletes, SegmentInfo info, boolean noLimit) {
- assert newDeletes.any();
- numTerms.addAndGet(newDeletes.numTermDeletes.get());
+ final AtomicLong bytesUsed = new AtomicLong();
- if (!noLimit) {
- assert !deletesMap.containsKey(info);
- assert info != null;
- deletesMap.put(info, newDeletes);
- bytesUsed.addAndGet(newDeletes.bytesUsed.get());
+ private final static boolean VERBOSE_DELETES = false;
+
+ long gen;
+
+ @Override
+ public String toString() {
+ if (VERBOSE_DELETES) {
+ return "gen=" + gen + " numTerms=" + numTermDeletes + ", terms=" + terms
+ + ", queries=" + queries + ", docIDs=" + docIDs + ", bytesUsed="
+ + bytesUsed;
} else {
- final SegmentDeletes deletes = getDeletes(info);
- bytesUsed.addAndGet(-deletes.bytesUsed.get());
- deletes.update(newDeletes, noLimit);
- bytesUsed.addAndGet(deletes.bytesUsed.get());
- }
- if (infoStream != null) {
- message("push deletes seg=" + info + " dels=" + getDeletes(info));
+ String s = "gen=" + gen;
+ if (numTermDeletes.get() != 0) {
+ s += " " + numTermDeletes.get() + " deleted terms (unique count=" + terms.size() + ")";
+ }
+ if (queries.size() != 0) {
+ s += " " + queries.size() + " deleted queries";
+ }
+ if (docIDs.size() != 0) {
+ s += " " + docIDs.size() + " deleted docIDs";
+ }
+ if (bytesUsed.get() != 0) {
+ s += " bytesUsed=" + bytesUsed.get();
+ }
+
+ return s;
}
- assert checkDeleteStats();
}
- public synchronized void clear() {
- deletesMap.clear();
- numTerms.set(0);
+ void update(BufferedDeletes in) {
+ numTermDeletes.addAndGet(in.numTermDeletes.get());
+ for (Map.Entry ent : in.terms.entrySet()) {
+ final Term term = ent.getKey();
+ if (!terms.containsKey(term)) {
+ // only incr bytesUsed if this term wasn't already buffered:
+ bytesUsed.addAndGet(BYTES_PER_DEL_TERM);
+ }
+ terms.put(term, MAX_INT);
+ }
+
+ for (Map.Entry ent : in.queries.entrySet()) {
+ final Query query = ent.getKey();
+ if (!queries.containsKey(query)) {
+ // only incr bytesUsed if this query wasn't already buffered:
+ bytesUsed.addAndGet(BYTES_PER_DEL_QUERY);
+ }
+ queries.put(query, MAX_INT);
+ }
+
+ // docIDs never move across segments and the docIDs
+ // should already be cleared
+ }
+
+ public void addQuery(Query query, int docIDUpto) {
+ Integer current = queries.put(query, docIDUpto);
+ // increment bytes used only if the query wasn't added so far.
+ if (current == null) {
+ bytesUsed.addAndGet(BYTES_PER_DEL_QUERY);
+ }
+ }
+
+ public void addDocID(int docID) {
+ docIDs.add(Integer.valueOf(docID));
+ bytesUsed.addAndGet(BYTES_PER_DEL_DOCID);
+ }
+
+ public void addTerm(Term term, int docIDUpto) {
+ Integer current = terms.get(term);
+ if (current != null && docIDUpto < current) {
+ // Only record the new number if it's greater than the
+ // current one. This is important because if multiple
+ // threads are replacing the same doc at nearly the
+ // same time, it's possible that one thread that got a
+ // higher docID is scheduled before the other
+ // threads. If we blindly replace than we can
+ // incorrectly get both docs indexed.
+ return;
+ }
+
+ terms.put(term, Integer.valueOf(docIDUpto));
+ numTermDeletes.incrementAndGet();
+ if (current == null) {
+ bytesUsed.addAndGet(BYTES_PER_DEL_TERM + term.bytes.length);
+ }
+ }
+
+ void clear() {
+ terms.clear();
+ queries.clear();
+ docIDs.clear();
+ numTermDeletes.set(0);
bytesUsed.set(0);
}
-
- synchronized boolean any() {
- return bytesUsed.get() != 0;
- }
-
- public int numTerms() {
- return numTerms.get();
- }
-
- public long bytesUsed() {
- return bytesUsed.get();
- }
-
- // IW calls this on finishing a merge. While the merge
- // was running, it's possible new deletes were pushed onto
- // our last (and only our last) segment. In this case we
- // must carry forward those deletes onto the merged
- // segment.
- synchronized void commitMerge(MergePolicy.OneMerge merge) {
- assert checkDeleteStats();
- if (infoStream != null) {
- message("commitMerge merge.info=" + merge.info + " merge.segments=" + merge.segments);
- }
- final SegmentInfo lastInfo = merge.segments.lastElement();
- final SegmentDeletes lastDeletes = deletesMap.get(lastInfo);
- if (lastDeletes != null) {
- deletesMap.remove(lastInfo);
- assert !deletesMap.containsKey(merge.info);
- deletesMap.put(merge.info, lastDeletes);
- // don't need to update numTerms/bytesUsed since we
- // are just moving the deletes from one info to
- // another
- if (infoStream != null) {
- message("commitMerge done: new deletions=" + lastDeletes);
- }
- } else if (infoStream != null) {
- message("commitMerge done: no new deletions");
- }
- assert !anyDeletes(merge.segments.range(0, merge.segments.size()-1));
- assert checkDeleteStats();
- }
-
- synchronized void clear(SegmentDeletes deletes) {
- deletes.clear();
+
+ void clearDocIDs() {
+ bytesUsed.addAndGet(-docIDs.size()*BYTES_PER_DEL_DOCID);
+ docIDs.clear();
}
- public synchronized boolean applyDeletes(IndexWriter.ReaderPool readerPool, SegmentInfos segmentInfos, SegmentInfos applyInfos) throws IOException {
- if (!any()) {
- return false;
- }
- final long t0 = System.currentTimeMillis();
-
- if (infoStream != null) {
- message("applyDeletes: applyInfos=" + applyInfos + "; index=" + segmentInfos);
- }
-
- assert checkDeleteStats();
-
- assert applyInfos.size() > 0;
-
- boolean any = false;
-
- final SegmentInfo lastApplyInfo = applyInfos.lastElement();
- final int lastIdx = segmentInfos.indexOf(lastApplyInfo);
-
- final SegmentInfo firstInfo = applyInfos.firstElement();
- final int firstIdx = segmentInfos.indexOf(firstInfo);
-
- // applyInfos must be a slice of segmentInfos
- assert lastIdx - firstIdx + 1 == applyInfos.size();
-
- // iterate over all segment infos backwards
- // coalesceing deletes along the way
- // when we're at or below the last of the
- // segments to apply to, start applying the deletes
- // we traverse up to the first apply infos
- SegmentDeletes coalescedDeletes = null;
- boolean hasDeletes = false;
- for (int segIdx=segmentInfos.size()-1; segIdx >= firstIdx; segIdx--) {
- final SegmentInfo info = segmentInfos.info(segIdx);
- final SegmentDeletes deletes = deletesMap.get(info);
- assert deletes == null || deletes.any();
-
- if (deletes == null && coalescedDeletes == null) {
- continue;
- }
-
- if (infoStream != null) {
- message("applyDeletes: seg=" + info + " segment's deletes=[" + (deletes == null ? "null" : deletes) + "]; coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "]");
- }
-
- hasDeletes |= deletes != null;
-
- if (segIdx <= lastIdx && hasDeletes) {
-
- final long delCountInc = applyDeletes(readerPool, info, coalescedDeletes, deletes);
-
- if (delCountInc != 0) {
- any = true;
- }
- if (infoStream != null) {
- message("deletes touched " + delCountInc + " docIDs");
- }
-
- if (deletes != null) {
- // we've applied doc ids, and they're only applied
- // on the current segment
- bytesUsed.addAndGet(-deletes.docIDs.size() * SegmentDeletes.BYTES_PER_DEL_DOCID);
- deletes.clearDocIDs();
- }
- }
-
- // now coalesce at the max limit
- if (deletes != null) {
- if (coalescedDeletes == null) {
- coalescedDeletes = new SegmentDeletes();
- }
- // TODO: we could make this single pass (coalesce as
- // we apply the deletes
- coalescedDeletes.update(deletes, true);
- }
- }
-
- // move all deletes to segment just before our merge.
- if (firstIdx > 0) {
-
- SegmentDeletes mergedDeletes = null;
- // TODO: we could also make this single pass
- for (SegmentInfo info : applyInfos) {
- final SegmentDeletes deletes = deletesMap.get(info);
- if (deletes != null) {
- assert deletes.any();
- if (mergedDeletes == null) {
- mergedDeletes = getDeletes(segmentInfos.info(firstIdx-1));
- numTerms.addAndGet(-mergedDeletes.numTermDeletes.get());
- assert numTerms.get() >= 0;
- bytesUsed.addAndGet(-mergedDeletes.bytesUsed.get());
- assert bytesUsed.get() >= 0;
- }
-
- mergedDeletes.update(deletes, true);
- }
- }
-
- if (mergedDeletes != null) {
- numTerms.addAndGet(mergedDeletes.numTermDeletes.get());
- bytesUsed.addAndGet(mergedDeletes.bytesUsed.get());
- }
-
- if (infoStream != null) {
- if (mergedDeletes != null) {
- message("applyDeletes: merge all deletes into seg=" + segmentInfos.info(firstIdx-1) + ": " + mergedDeletes);
- } else {
- message("applyDeletes: no deletes to merge");
- }
- }
- } else {
- // We drop the deletes in this case, because we've
- // applied them to segment infos starting w/ the first
- // segment. There are no prior segments so there's no
- // reason to keep them around. When the applyInfos ==
- // segmentInfos this means all deletes have been
- // removed:
- }
- remove(applyInfos);
-
- assert checkDeleteStats();
- assert applyInfos != segmentInfos || !any();
-
- if (infoStream != null) {
- message("applyDeletes took " + (System.currentTimeMillis()-t0) + " msec");
- }
- return any;
- }
-
- private synchronized long applyDeletes(IndexWriter.ReaderPool readerPool,
- SegmentInfo info,
- SegmentDeletes coalescedDeletes,
- SegmentDeletes segmentDeletes) throws IOException {
- assert readerPool.infoIsLive(info);
-
- assert coalescedDeletes == null || coalescedDeletes.docIDs.size() == 0;
-
- long delCount = 0;
-
- // Lock order: IW -> BD -> RP
- SegmentReader reader = readerPool.get(info, false);
- try {
- if (coalescedDeletes != null) {
- delCount += applyDeletes(coalescedDeletes, reader);
- }
- if (segmentDeletes != null) {
- delCount += applyDeletes(segmentDeletes, reader);
- }
- } finally {
- readerPool.release(reader);
- }
- return delCount;
- }
-
- private synchronized long applyDeletes(SegmentDeletes deletes, SegmentReader reader) throws IOException {
-
- long delCount = 0;
-
- assert checkDeleteTerm(null);
-
- if (deletes.terms.size() > 0) {
- Fields fields = reader.fields();
- if (fields == null) {
- // This reader has no postings
- return 0;
- }
-
- TermsEnum termsEnum = null;
-
- String currentField = null;
- DocsEnum docs = null;
-
- for (Entry entry: deletes.terms.entrySet()) {
- Term term = entry.getKey();
- // Since we visit terms sorted, we gain performance
- // by re-using the same TermsEnum and seeking only
- // forwards
- if (term.field() != currentField) {
- assert currentField == null || currentField.compareTo(term.field()) < 0;
- currentField = term.field();
- Terms terms = fields.terms(currentField);
- if (terms != null) {
- termsEnum = terms.iterator();
- } else {
- termsEnum = null;
- }
- }
-
- if (termsEnum == null) {
- continue;
- }
- assert checkDeleteTerm(term);
-
- if (termsEnum.seek(term.bytes(), false) == TermsEnum.SeekStatus.FOUND) {
- DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs);
-
- if (docsEnum != null) {
- docs = docsEnum;
- final int limit = entry.getValue();
- while (true) {
- final int docID = docs.nextDoc();
- if (docID == DocsEnum.NO_MORE_DOCS || docID >= limit) {
- break;
- }
- reader.deleteDocument(docID);
- // TODO: we could/should change
- // reader.deleteDocument to return boolean
- // true if it did in fact delete, because here
- // we could be deleting an already-deleted doc
- // which makes this an upper bound:
- delCount++;
- }
- }
- }
- }
- }
-
- // Delete by docID
- for (Integer docIdInt : deletes.docIDs) {
- int docID = docIdInt.intValue();
- reader.deleteDocument(docID);
- delCount++;
- }
-
- // Delete by query
- if (deletes.queries.size() > 0) {
- IndexSearcher searcher = new IndexSearcher(reader);
- assert searcher.getTopReaderContext().isAtomic;
- final AtomicReaderContext readerContext = (AtomicReaderContext) searcher.getTopReaderContext();
- try {
- for (Entry entry : deletes.queries.entrySet()) {
- Query query = entry.getKey();
- int limit = entry.getValue().intValue();
- Weight weight = query.weight(searcher);
- Scorer scorer = weight.scorer(readerContext, Weight.ScorerContext.def());
- if (scorer != null) {
- while(true) {
- int doc = scorer.nextDoc();
- if (doc >= limit)
- break;
-
- reader.deleteDocument(doc);
- // TODO: we could/should change
- // reader.deleteDocument to return boolean
- // true if it did in fact delete, because here
- // we could be deleting an already-deleted doc
- // which makes this an upper bound:
- delCount++;
- }
- }
- }
- } finally {
- searcher.close();
- }
- }
-
- return delCount;
- }
-
- public synchronized SegmentDeletes getDeletes(SegmentInfo info) {
- SegmentDeletes deletes = deletesMap.get(info);
- if (deletes == null) {
- deletes = new SegmentDeletes();
- deletesMap.put(info, deletes);
- }
- return deletes;
- }
-
- public synchronized void remove(SegmentInfos infos) {
- assert infos.size() > 0;
- for (SegmentInfo info : infos) {
- SegmentDeletes deletes = deletesMap.get(info);
- if (deletes != null) {
- bytesUsed.addAndGet(-deletes.bytesUsed.get());
- assert bytesUsed.get() >= 0: "bytesUsed=" + bytesUsed;
- numTerms.addAndGet(-deletes.numTermDeletes.get());
- assert numTerms.get() >= 0: "numTerms=" + numTerms;
- deletesMap.remove(info);
- }
- }
- }
-
- // used only by assert
- private boolean anyDeletes(SegmentInfos infos) {
- for(SegmentInfo info : infos) {
- if (deletesMap.containsKey(info)) {
- return true;
- }
- }
- return false;
- }
-
- // used only by assert
- private boolean checkDeleteTerm(Term term) {
- if (term != null) {
- assert lastDeleteTerm == null || term.compareTo(lastDeleteTerm) > 0: "lastTerm=" + lastDeleteTerm + " vs term=" + term;
- }
- lastDeleteTerm = term;
- return true;
- }
-
- // only for assert
- private boolean checkDeleteStats() {
- int numTerms2 = 0;
- long bytesUsed2 = 0;
- for(SegmentDeletes deletes : deletesMap.values()) {
- numTerms2 += deletes.numTermDeletes.get();
- bytesUsed2 += deletes.bytesUsed.get();
- }
- assert numTerms2 == numTerms.get(): "numTerms2=" + numTerms2 + " vs " + numTerms.get();
- assert bytesUsed2 == bytesUsed.get(): "bytesUsed2=" + bytesUsed2 + " vs " + bytesUsed;
- return true;
+ boolean any() {
+ return terms.size() > 0 || docIDs.size() > 0 || queries.size() > 0;
}
}
diff --git a/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java b/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
new file mode 100644
index 00000000000..b9a0184a0c1
--- /dev/null
+++ b/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
@@ -0,0 +1,440 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.Map.Entry;
+import java.util.Comparator;
+import java.util.Collections;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+
+/* Tracks the stream of {@link BuffereDeletes}.
+ * When DocumensWriter flushes, its buffered
+ * deletes are appended to this stream. We later
+ * apply these deletes (resolve them to the actual
+ * docIDs, per segment) when a merge is started
+ * (only to the to-be-merged segments). We
+ * also apply to all segments when NRT reader is pulled,
+ * commit/close is called, or when too many deletes are
+ * buffered and must be flushed (by RAM usage or by count).
+ *
+ * Each packet is assigned a generation, and each flushed or
+ * merged segment is also assigned a generation, so we can
+ * track which BufferedDeletes packets to apply to any given
+ * segment. */
+
+class BufferedDeletesStream {
+
+ // TODO: maybe linked list?
+ private final List deletes = new ArrayList();
+
+ // Starts at 1 so that SegmentInfos that have never had
+ // deletes applied (whose bufferedDelGen defaults to 0)
+ // will be correct:
+ private long nextGen = 1;
+
+ // used only by assert
+ private Term lastDeleteTerm;
+
+ private PrintStream infoStream;
+ private final AtomicLong bytesUsed = new AtomicLong();
+ private final AtomicInteger numTerms = new AtomicInteger();
+ private final int messageID;
+
+ public BufferedDeletesStream(int messageID) {
+ this.messageID = messageID;
+ }
+
+ private synchronized void message(String message) {
+ if (infoStream != null) {
+ infoStream.println("BD " + messageID + " [" + new Date() + "; " + Thread.currentThread().getName() + "]: " + message);
+ }
+ }
+
+ public synchronized void setInfoStream(PrintStream infoStream) {
+ this.infoStream = infoStream;
+ }
+
+ // Appends a new packet of buffered deletes to the stream,
+ // setting its generation:
+ public synchronized void push(BufferedDeletes packet) {
+ assert packet.any();
+ assert checkDeleteStats();
+ packet.gen = nextGen++;
+ deletes.add(packet);
+ numTerms.addAndGet(packet.numTermDeletes.get());
+ bytesUsed.addAndGet(packet.bytesUsed.get());
+ if (infoStream != null) {
+ message("push deletes " + packet + " delGen=" + packet.gen + " packetCount=" + deletes.size());
+ }
+ assert checkDeleteStats();
+ }
+
+ public synchronized void clear() {
+ deletes.clear();
+ nextGen = 1;
+ numTerms.set(0);
+ bytesUsed.set(0);
+ }
+
+ public boolean any() {
+ return bytesUsed.get() != 0;
+ }
+
+ public int numTerms() {
+ return numTerms.get();
+ }
+
+ public long bytesUsed() {
+ return bytesUsed.get();
+ }
+
+ public static class ApplyDeletesResult {
+ // True if any actual deletes took place:
+ public final boolean anyDeletes;
+
+ // Current gen, for the merged segment:
+ public final long gen;
+
+ ApplyDeletesResult(boolean anyDeletes, long gen) {
+ this.anyDeletes = anyDeletes;
+ this.gen = gen;
+ }
+ }
+
+ // Sorts SegmentInfos from smallest to biggest bufferedDelGen:
+ private static final Comparator sortByDelGen = new Comparator() {
+ @Override
+ public int compare(SegmentInfo si1, SegmentInfo si2) {
+ final long cmp = si1.getBufferedDeletesGen() - si2.getBufferedDeletesGen();
+ if (cmp > 0) {
+ return 1;
+ } else if (cmp < 0) {
+ return -1;
+ } else {
+ return 0;
+ }
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return sortByDelGen == other;
+ }
+ };
+
+ /** Resolves the buffered deleted Term/Query/docIDs, into
+ * actual deleted docIDs in the deletedDocs BitVector for
+ * each SegmentReader. */
+ public synchronized ApplyDeletesResult applyDeletes(IndexWriter.ReaderPool readerPool, SegmentInfos infos) throws IOException {
+ final long t0 = System.currentTimeMillis();
+
+ if (infos.size() == 0) {
+ return new ApplyDeletesResult(false, nextGen++);
+ }
+
+ assert checkDeleteStats();
+
+ if (!any()) {
+ message("applyDeletes: no deletes; skipping");
+ return new ApplyDeletesResult(false, nextGen++);
+ }
+
+ if (infoStream != null) {
+ message("applyDeletes: infos=" + infos + " packetCount=" + deletes.size());
+ }
+
+ SegmentInfos infos2 = new SegmentInfos();
+ infos2.addAll(infos);
+ Collections.sort(infos2, sortByDelGen);
+
+ BufferedDeletes coalescedDeletes = null;
+ boolean anyNewDeletes = false;
+
+ int infosIDX = infos2.size()-1;
+ int delIDX = deletes.size()-1;
+
+ while (infosIDX >= 0) {
+ //System.out.println("BD: cycle delIDX=" + delIDX + " infoIDX=" + infosIDX);
+
+ final BufferedDeletes packet = delIDX >= 0 ? deletes.get(delIDX) : null;
+ final SegmentInfo info = infos2.get(infosIDX);
+ final long segGen = info.getBufferedDeletesGen();
+
+ if (packet != null && segGen < packet.gen) {
+ //System.out.println(" coalesce");
+ if (coalescedDeletes == null) {
+ coalescedDeletes = new BufferedDeletes();
+ }
+ coalescedDeletes.update(packet);
+ delIDX--;
+ } else if (packet != null && segGen == packet.gen) {
+ //System.out.println(" eq");
+
+ // Lock order: IW -> BD -> RP
+ assert readerPool.infoIsLive(info);
+ SegmentReader reader = readerPool.get(info, false);
+ int delCount = 0;
+ try {
+ if (coalescedDeletes != null) {
+ delCount += applyDeletes(coalescedDeletes, reader);
+ }
+ delCount += applyDeletes(packet, reader);
+ } finally {
+ readerPool.release(reader);
+ }
+ anyNewDeletes |= delCount > 0;
+
+ // We've applied doc ids, and they're only applied
+ // on the current segment
+ bytesUsed.addAndGet(-packet.docIDs.size() * BufferedDeletes.BYTES_PER_DEL_DOCID);
+ packet.clearDocIDs();
+
+ if (infoStream != null) {
+ message("seg=" + info + " segGen=" + segGen + " segDeletes=[" + packet + "]; coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount);
+ }
+
+ if (coalescedDeletes == null) {
+ coalescedDeletes = new BufferedDeletes();
+ }
+ coalescedDeletes.update(packet);
+ delIDX--;
+ infosIDX--;
+ info.setBufferedDeletesGen(nextGen);
+
+ } else {
+ //System.out.println(" gt");
+
+ if (coalescedDeletes != null) {
+ // Lock order: IW -> BD -> RP
+ assert readerPool.infoIsLive(info);
+ SegmentReader reader = readerPool.get(info, false);
+ int delCount = 0;
+ try {
+ delCount += applyDeletes(coalescedDeletes, reader);
+ } finally {
+ readerPool.release(reader);
+ }
+ anyNewDeletes |= delCount > 0;
+
+ if (infoStream != null) {
+ message("seg=" + info + " segGen=" + segGen + " coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount);
+ }
+ }
+ info.setBufferedDeletesGen(nextGen);
+
+ infosIDX--;
+ }
+ }
+
+ assert checkDeleteStats();
+ if (infoStream != null) {
+ message("applyDeletes took " + (System.currentTimeMillis()-t0) + " msec");
+ }
+ // assert infos != segmentInfos || !any() : "infos=" + infos + " segmentInfos=" + segmentInfos + " any=" + any;
+
+ return new ApplyDeletesResult(anyNewDeletes, nextGen++);
+ }
+
+ public synchronized long getNextGen() {
+ return nextGen++;
+ }
+
+ // Lock order IW -> BD
+ /* Removes any BufferedDeletes that we no longer need to
+ * store because all segments in the index have had the
+ * deletes applied. */
+ public synchronized void prune(SegmentInfos segmentInfos) {
+ assert checkDeleteStats();
+ long minGen = Long.MAX_VALUE;
+ for(SegmentInfo info : segmentInfos) {
+ minGen = Math.min(info.getBufferedDeletesGen(), minGen);
+ }
+
+ if (infoStream != null) {
+ message("prune sis=" + segmentInfos + " minGen=" + minGen + " packetCount=" + deletes.size());
+ }
+
+ final int limit = deletes.size();
+ for(int delIDX=0;delIDX= minGen) {
+ prune(delIDX);
+ assert checkDeleteStats();
+ return;
+ }
+ }
+
+ // All deletes pruned
+ prune(limit);
+ assert !any();
+ assert checkDeleteStats();
+ }
+
+ private synchronized void prune(int count) {
+ if (count > 0) {
+ if (infoStream != null) {
+ message("pruneDeletes: prune " + count + " packets; " + (deletes.size() - count) + " packets remain");
+ }
+ for(int delIDX=0;delIDX= 0;
+ bytesUsed.addAndGet(-packet.bytesUsed.get());
+ assert bytesUsed.get() >= 0;
+ }
+ deletes.subList(0, count).clear();
+ }
+ }
+
+ private synchronized long applyDeletes(BufferedDeletes deletes, SegmentReader reader) throws IOException {
+
+ long delCount = 0;
+
+ assert checkDeleteTerm(null);
+
+ if (deletes.terms.size() > 0) {
+ Fields fields = reader.fields();
+ if (fields == null) {
+ // This reader has no postings
+ return 0;
+ }
+
+ TermsEnum termsEnum = null;
+
+ String currentField = null;
+ DocsEnum docs = null;
+
+ for (Entry entry: deletes.terms.entrySet()) {
+ Term term = entry.getKey();
+ // Since we visit terms sorted, we gain performance
+ // by re-using the same TermsEnum and seeking only
+ // forwards
+ if (term.field() != currentField) {
+ assert currentField == null || currentField.compareTo(term.field()) < 0;
+ currentField = term.field();
+ Terms terms = fields.terms(currentField);
+ if (terms != null) {
+ termsEnum = terms.iterator();
+ } else {
+ termsEnum = null;
+ }
+ }
+
+ if (termsEnum == null) {
+ continue;
+ }
+ assert checkDeleteTerm(term);
+
+ if (termsEnum.seek(term.bytes(), false) == TermsEnum.SeekStatus.FOUND) {
+ DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs);
+
+ if (docsEnum != null) {
+ docs = docsEnum;
+ final int limit = entry.getValue();
+ while (true) {
+ final int docID = docs.nextDoc();
+ if (docID == DocsEnum.NO_MORE_DOCS || docID >= limit) {
+ break;
+ }
+ reader.deleteDocument(docID);
+ // TODO: we could/should change
+ // reader.deleteDocument to return boolean
+ // true if it did in fact delete, because here
+ // we could be deleting an already-deleted doc
+ // which makes this an upper bound:
+ delCount++;
+ }
+ }
+ }
+ }
+ }
+
+ // Delete by docID
+ for (Integer docIdInt : deletes.docIDs) {
+ int docID = docIdInt.intValue();
+ reader.deleteDocument(docID);
+ delCount++;
+ }
+
+ // Delete by query
+ if (deletes.queries.size() > 0) {
+ IndexSearcher searcher = new IndexSearcher(reader);
+ assert searcher.getTopReaderContext().isAtomic;
+ final AtomicReaderContext readerContext = (AtomicReaderContext) searcher.getTopReaderContext();
+ try {
+ for (Entry entry : deletes.queries.entrySet()) {
+ Query query = entry.getKey();
+ int limit = entry.getValue().intValue();
+ Weight weight = query.weight(searcher);
+ Scorer scorer = weight.scorer(readerContext, Weight.ScorerContext.def());
+ if (scorer != null) {
+ while(true) {
+ int doc = scorer.nextDoc();
+ if (doc >= limit)
+ break;
+
+ reader.deleteDocument(doc);
+ // TODO: we could/should change
+ // reader.deleteDocument to return boolean
+ // true if it did in fact delete, because here
+ // we could be deleting an already-deleted doc
+ // which makes this an upper bound:
+ delCount++;
+ }
+ }
+ }
+ } finally {
+ searcher.close();
+ }
+ }
+
+ return delCount;
+ }
+
+ // used only by assert
+ private boolean checkDeleteTerm(Term term) {
+ if (term != null) {
+ assert lastDeleteTerm == null || term.compareTo(lastDeleteTerm) > 0: "lastTerm=" + lastDeleteTerm + " vs term=" + term;
+ }
+ lastDeleteTerm = term;
+ return true;
+ }
+
+ // only for assert
+ private boolean checkDeleteStats() {
+ int numTerms2 = 0;
+ long bytesUsed2 = 0;
+ for(BufferedDeletes packet : deletes) {
+ numTerms2 += packet.numTermDeletes.get();
+ bytesUsed2 += packet.bytesUsed.get();
+ }
+ assert numTerms2 == numTerms.get(): "numTerms2=" + numTerms2 + " vs " + numTerms.get();
+ assert bytesUsed2 == bytesUsed.get(): "bytesUsed2=" + bytesUsed2 + " vs " + bytesUsed;
+ return true;
+ }
+}
diff --git a/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java b/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java
index 25cd8cac737..d3c6caee9fe 100644
--- a/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java
+++ b/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java
@@ -134,7 +134,7 @@ final class DocumentsWriter {
private final int maxThreadStates;
// Deletes for our still-in-RAM (to be flushed next) segment
- private SegmentDeletes pendingDeletes = new SegmentDeletes();
+ private BufferedDeletes pendingDeletes = new BufferedDeletes();
static class DocState {
DocumentsWriter docWriter;
@@ -278,16 +278,16 @@ final class DocumentsWriter {
private boolean closed;
private final FieldInfos fieldInfos;
- private final BufferedDeletes bufferedDeletes;
+ private final BufferedDeletesStream bufferedDeletesStream;
private final IndexWriter.FlushControl flushControl;
- DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException {
+ DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, BufferedDeletesStream bufferedDeletesStream) throws IOException {
this.directory = directory;
this.writer = writer;
this.similarityProvider = writer.getConfig().getSimilarityProvider();
this.maxThreadStates = maxThreadStates;
this.fieldInfos = fieldInfos;
- this.bufferedDeletes = bufferedDeletes;
+ this.bufferedDeletesStream = bufferedDeletesStream;
flushControl = writer.flushControl;
consumer = indexingChain.getChain(this);
@@ -501,23 +501,24 @@ final class DocumentsWriter {
}
// for testing
- public SegmentDeletes getPendingDeletes() {
+ public BufferedDeletes getPendingDeletes() {
return pendingDeletes;
}
private void pushDeletes(SegmentInfo newSegment, SegmentInfos segmentInfos) {
// Lock order: DW -> BD
if (pendingDeletes.any()) {
- if (newSegment != null) {
+ if (segmentInfos.size() > 0 || newSegment != null) {
if (infoStream != null) {
- message("flush: push buffered deletes to newSegment");
+ message("flush: push buffered deletes");
}
- bufferedDeletes.pushDeletes(pendingDeletes, newSegment);
- } else if (segmentInfos.size() > 0) {
+ bufferedDeletesStream.push(pendingDeletes);
if (infoStream != null) {
- message("flush: push buffered deletes to previously flushed segment " + segmentInfos.lastElement());
+ message("flush: delGen=" + pendingDeletes.gen);
+ }
+ if (newSegment != null) {
+ newSegment.setBufferedDeletesGen(pendingDeletes.gen);
}
- bufferedDeletes.pushDeletes(pendingDeletes, segmentInfos.lastElement(), true);
} else {
if (infoStream != null) {
message("flush: drop buffered deletes: no segments");
@@ -526,7 +527,9 @@ final class DocumentsWriter {
// there are no segments, the deletions cannot
// affect anything.
}
- pendingDeletes = new SegmentDeletes();
+ pendingDeletes = new BufferedDeletes();
+ } else if (newSegment != null) {
+ newSegment.setBufferedDeletesGen(bufferedDeletesStream.getNextGen());
}
}
@@ -639,7 +642,6 @@ final class DocumentsWriter {
// Lock order: IW -> DW -> BD
pushDeletes(newSegment, segmentInfos);
-
if (infoStream != null) {
message("flush time " + (System.currentTimeMillis()-startTime) + " msec");
}
@@ -964,7 +966,7 @@ final class DocumentsWriter {
final boolean doBalance;
final long deletesRAMUsed;
- deletesRAMUsed = bufferedDeletes.bytesUsed();
+ deletesRAMUsed = bufferedDeletesStream.bytesUsed();
synchronized(this) {
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH || bufferIsFull) {
diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java
index 613d47058f5..20f7b35bbf8 100644
--- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java
@@ -251,7 +251,7 @@ public class IndexWriter implements Closeable {
private final AtomicInteger flushDeletesCount = new AtomicInteger();
final ReaderPool readerPool = new ReaderPool();
- final BufferedDeletes bufferedDeletes;
+ final BufferedDeletesStream bufferedDeletesStream;
// This is a "write once" variable (like the organic dye
// on a DVD-R that may or may not be heated by a laser and
@@ -707,8 +707,8 @@ public class IndexWriter implements Closeable {
mergedSegmentWarmer = conf.getMergedSegmentWarmer();
codecs = conf.getCodecProvider();
- bufferedDeletes = new BufferedDeletes(messageID);
- bufferedDeletes.setInfoStream(infoStream);
+ bufferedDeletesStream = new BufferedDeletesStream(messageID);
+ bufferedDeletesStream.setInfoStream(infoStream);
poolReaders = conf.getReaderPooling();
OpenMode mode = conf.getOpenMode();
@@ -773,7 +773,7 @@ public class IndexWriter implements Closeable {
setRollbackSegmentInfos(segmentInfos);
- docWriter = new DocumentsWriter(directory, this, conf.getIndexingChain(), conf.getMaxThreadStates(), getCurrentFieldInfos(), bufferedDeletes);
+ docWriter = new DocumentsWriter(directory, this, conf.getIndexingChain(), conf.getMaxThreadStates(), getCurrentFieldInfos(), bufferedDeletesStream);
docWriter.setInfoStream(infoStream);
// Default deleter (for backwards compatibility) is
@@ -921,7 +921,7 @@ public class IndexWriter implements Closeable {
this.infoStream = infoStream;
docWriter.setInfoStream(infoStream);
deleter.setInfoStream(infoStream);
- bufferedDeletes.setInfoStream(infoStream);
+ bufferedDeletesStream.setInfoStream(infoStream);
if (infoStream != null)
messageState();
}
@@ -1167,7 +1167,7 @@ public class IndexWriter implements Closeable {
public synchronized boolean hasDeletions() throws IOException {
ensureOpen();
- if (bufferedDeletes.any()) {
+ if (bufferedDeletesStream.any()) {
return true;
}
if (docWriter.anyDeletions()) {
@@ -1882,7 +1882,7 @@ public class IndexWriter implements Closeable {
mergePolicy.close();
mergeScheduler.close();
- bufferedDeletes.clear();
+ bufferedDeletesStream.clear();
synchronized(this) {
@@ -2525,10 +2525,10 @@ public class IndexWriter implements Closeable {
// tiny segments:
if (flushControl.getFlushDeletes() ||
(config.getRAMBufferSizeMB() != IndexWriterConfig.DISABLE_AUTO_FLUSH &&
- bufferedDeletes.bytesUsed() > (1024*1024*config.getRAMBufferSizeMB()/2))) {
+ bufferedDeletesStream.bytesUsed() > (1024*1024*config.getRAMBufferSizeMB()/2))) {
applyAllDeletes = true;
if (infoStream != null) {
- message("force apply deletes bytesUsed=" + bufferedDeletes.bytesUsed() + " vs ramBuffer=" + (1024*1024*config.getRAMBufferSizeMB()));
+ message("force apply deletes bytesUsed=" + bufferedDeletesStream.bytesUsed() + " vs ramBuffer=" + (1024*1024*config.getRAMBufferSizeMB()));
}
}
}
@@ -2538,12 +2538,15 @@ public class IndexWriter implements Closeable {
message("apply all deletes during flush");
}
flushDeletesCount.incrementAndGet();
- if (bufferedDeletes.applyDeletes(readerPool, segmentInfos, segmentInfos)) {
+ final BufferedDeletesStream.ApplyDeletesResult result = bufferedDeletesStream.applyDeletes(readerPool, segmentInfos);
+ if (result.anyDeletes) {
checkpoint();
}
+ bufferedDeletesStream.prune(segmentInfos);
+ assert !bufferedDeletesStream.any();
flushControl.clearDeletes();
} else if (infoStream != null) {
- message("don't apply deletes now delTermCount=" + bufferedDeletes.numTerms() + " bytesUsed=" + bufferedDeletes.bytesUsed());
+ message("don't apply deletes now delTermCount=" + bufferedDeletesStream.numTerms() + " bytesUsed=" + bufferedDeletesStream.bytesUsed());
}
doAfterFlush();
@@ -2569,7 +2572,7 @@ public class IndexWriter implements Closeable {
*/
public final long ramSizeInBytes() {
ensureOpen();
- return docWriter.bytesUsed() + bufferedDeletes.bytesUsed();
+ return docWriter.bytesUsed() + bufferedDeletesStream.bytesUsed();
}
/** Expert: Return the number of documents currently
@@ -2579,28 +2582,12 @@ public class IndexWriter implements Closeable {
return docWriter.getNumDocs();
}
- private int ensureContiguousMerge(MergePolicy.OneMerge merge) {
-
- int first = segmentInfos.indexOf(merge.segments.info(0));
- if (first == -1)
- throw new MergePolicy.MergeException("could not find segment " + merge.segments.info(0).name + " in current index " + segString(), directory);
-
- final int numSegments = segmentInfos.size();
-
- final int numSegmentsToMerge = merge.segments.size();
- for(int i=0;i= numSegments || !segmentInfos.info(first+i).equals(info)) {
- if (segmentInfos.indexOf(info) == -1)
- throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.name + ") that is not in the current index " + segString(), directory);
- else
- throw new MergePolicy.MergeException("MergePolicy selected non-contiguous segments to merge (" + merge.segString(directory) + " vs " + segString() + "), which IndexWriter (currently) cannot handle",
- directory);
+ private void ensureValidMerge(MergePolicy.OneMerge merge) {
+ for(SegmentInfo info : merge.segments) {
+ if (segmentInfos.indexOf(info) == -1) {
+ throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.name + ") that is not in the current index " + segString(), directory);
}
}
-
- return first;
}
/** Carefully merges deletes for the segments we just
@@ -2625,9 +2612,11 @@ public class IndexWriter implements Closeable {
// started merging:
int docUpto = 0;
int delCount = 0;
+ long minGen = Long.MAX_VALUE;
for(int i=0; i < sourceSegments.size(); i++) {
SegmentInfo info = sourceSegments.info(i);
+ minGen = Math.min(info.getBufferedDeletesGen(), minGen);
int docCount = info.docCount;
SegmentReader previousReader = merge.readersClone[i];
final Bits prevDelDocs = previousReader.getDeletedDocs();
@@ -2678,9 +2667,17 @@ public class IndexWriter implements Closeable {
assert mergedReader.numDeletedDocs() == delCount;
mergedReader.hasChanges = delCount > 0;
+
+ // If new deletes were applied while we were merging
+ // (which happens if eg commit() or getReader() is
+ // called during our merge), then it better be the case
+ // that the delGen has increased for all our merged
+ // segments:
+ assert !mergedReader.hasChanges || minGen > mergedReader.getSegmentInfo().getBufferedDeletesGen();
+
+ mergedReader.getSegmentInfo().setBufferedDeletesGen(minGen);
}
- /* FIXME if we want to support non-contiguous segment merges */
synchronized private boolean commitMerge(MergePolicy.OneMerge merge, SegmentReader mergedReader) throws IOException {
assert testPoint("startCommitMerge");
@@ -2706,7 +2703,7 @@ public class IndexWriter implements Closeable {
return false;
}
- final int start = ensureContiguousMerge(merge);
+ ensureValidMerge(merge);
commitMergedDeletes(merge, mergedReader);
@@ -2716,10 +2713,32 @@ public class IndexWriter implements Closeable {
// format as well:
setMergeDocStoreIsCompoundFile(merge);
- segmentInfos.subList(start, start + merge.segments.size()).clear();
assert !segmentInfos.contains(merge.info);
- segmentInfos.add(start, merge.info);
-
+
+ final Set mergedAway = new HashSet(merge.segments);
+ int segIdx = 0;
+ int newSegIdx = 0;
+ boolean inserted = false;
+ final int curSegCount = segmentInfos.size();
+ while(segIdx < curSegCount) {
+ final SegmentInfo info = segmentInfos.info(segIdx++);
+ if (mergedAway.contains(info)) {
+ if (!inserted) {
+ segmentInfos.set(segIdx-1, merge.info);
+ inserted = true;
+ newSegIdx++;
+ }
+ } else {
+ segmentInfos.set(newSegIdx++, info);
+ }
+ }
+ assert newSegIdx == curSegCount - merge.segments.size() + 1;
+ segmentInfos.subList(newSegIdx, segmentInfos.size()).clear();
+
+ if (infoStream != null) {
+ message("after commit: " + segString());
+ }
+
closeMergeReaders(merge, false);
// Must note the change to segmentInfos so any commits
@@ -2731,16 +2750,12 @@ public class IndexWriter implements Closeable {
// disk, updating SegmentInfo, etc.:
readerPool.clear(merge.segments);
- // remove pending deletes of the segments
- // that were merged, moving them onto the segment just
- // before the merged segment
- // Lock order: IW -> BD
- bufferedDeletes.commitMerge(merge);
-
if (merge.optimize) {
// cascade the optimize:
segmentsToOptimize.add(merge.info);
}
+
+
return true;
}
@@ -2868,7 +2883,7 @@ public class IndexWriter implements Closeable {
}
}
- ensureContiguousMerge(merge);
+ ensureValidMerge(merge);
pendingMerges.add(merge);
@@ -2918,6 +2933,9 @@ public class IndexWriter implements Closeable {
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot merge");
}
+ // TODO: is there any perf benefit to sorting
+ // merged segments? eg biggest to smallest?
+
if (merge.info != null)
// mergeInit already done
return;
@@ -2925,16 +2943,22 @@ public class IndexWriter implements Closeable {
if (merge.isAborted())
return;
- // Lock order: IW -> BD
- if (bufferedDeletes.applyDeletes(readerPool, segmentInfos, merge.segments)) {
- checkpoint();
- }
-
// Bind a new segment name here so even with
// ConcurrentMergePolicy we keep deterministic segment
// names.
merge.info = new SegmentInfo(newSegmentName(), 0, directory, false, false, null, false);
+ // Lock order: IW -> BD
+ final BufferedDeletesStream.ApplyDeletesResult result = bufferedDeletesStream.applyDeletes(readerPool, merge.segments);
+ if (result.anyDeletes) {
+ checkpoint();
+ }
+
+ merge.info.setBufferedDeletesGen(result.gen);
+
+ // Lock order: IW -> BD
+ bufferedDeletesStream.prune(segmentInfos);
+
Map details = new HashMap();
details.put("optimize", Boolean.toString(merge.optimize));
details.put("mergeFactor", Integer.toString(merge.segments.size()));
@@ -3498,7 +3522,7 @@ public class IndexWriter implements Closeable {
}
synchronized boolean nrtIsCurrent(SegmentInfos infos) {
- return infos.version == segmentInfos.version && !docWriter.anyChanges() && !bufferedDeletes.any();
+ return infos.version == segmentInfos.version && !docWriter.anyChanges() && !bufferedDeletesStream.any();
}
synchronized boolean isClosed() {
@@ -3665,7 +3689,7 @@ public class IndexWriter implements Closeable {
final double ramBufferSizeMB = config.getRAMBufferSizeMB();
if (ramBufferSizeMB != IndexWriterConfig.DISABLE_AUTO_FLUSH) {
final long limit = (long) (ramBufferSizeMB*1024*1024);
- long used = bufferedDeletes.bytesUsed() + docWriter.bytesUsed();
+ long used = bufferedDeletesStream.bytesUsed() + docWriter.bytesUsed();
if (used >= limit) {
// DocumentsWriter may be able to free up some
@@ -3673,7 +3697,7 @@ public class IndexWriter implements Closeable {
// Lock order: FC -> DW
docWriter.balanceRAM();
- used = bufferedDeletes.bytesUsed() + docWriter.bytesUsed();
+ used = bufferedDeletesStream.bytesUsed() + docWriter.bytesUsed();
if (used >= limit) {
return setFlushPending("ram full: " + reason, false);
}
diff --git a/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java b/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java
index 1c84bb01d5d..9dd6278f78f 100644
--- a/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java
+++ b/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java
@@ -19,6 +19,8 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.Set;
+import java.util.Arrays;
+import java.util.Comparator;
/** This class implements a {@link MergePolicy} that tries
* to merge segments into levels of exponentially
@@ -67,6 +69,7 @@ public abstract class LogMergePolicy extends MergePolicy {
// out there wrote his own LMP ...
protected long maxMergeSizeForOptimize = Long.MAX_VALUE;
protected int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
+ protected boolean requireContiguousMerge = false;
protected double noCFSRatio = DEFAULT_NO_CFS_RATIO;
@@ -105,6 +108,21 @@ public abstract class LogMergePolicy extends MergePolicy {
writer.get().message("LMP: " + message);
}
+ /** If true, merges must be in-order slice of the
+ * segments. If false, then the merge policy is free to
+ * pick any segments. The default is false, which is
+ * in general more efficient than true since it gives the
+ * merge policy more freedom to pick closely sized
+ * segments. */
+ public void setRequireContiguousMerge(boolean v) {
+ requireContiguousMerge = v;
+ }
+
+ /** See {@link #setRequireContiguousMerge}. */
+ public boolean getRequireContiguousMerge() {
+ return requireContiguousMerge;
+ }
+
/**
Returns the number of segments that are merged at
* once and also controls the total number of segments
* allowed to accumulate in the index.
*/
@@ -356,6 +374,8 @@ public abstract class LogMergePolicy extends MergePolicy {
}
return null;
}
+
+ // TODO: handle non-contiguous merge case differently?
// Find the newest (rightmost) segment that needs to
// be optimized (other segments may have been flushed
@@ -454,6 +474,37 @@ public abstract class LogMergePolicy extends MergePolicy {
return spec;
}
+ private static class SegmentInfoAndLevel implements Comparable {
+ SegmentInfo info;
+ float level;
+ int index;
+
+ public SegmentInfoAndLevel(SegmentInfo info, float level, int index) {
+ this.info = info;
+ this.level = level;
+ this.index = index;
+ }
+
+ // Sorts largest to smallest
+ public int compareTo(Object o) {
+ SegmentInfoAndLevel other = (SegmentInfoAndLevel) o;
+ if (level < other.level)
+ return 1;
+ else if (level > other.level)
+ return -1;
+ else
+ return 0;
+ }
+ }
+
+ private static class SortByIndex implements Comparator {
+ public int compare(SegmentInfoAndLevel o1, SegmentInfoAndLevel o2) {
+ return o1.index - o2.index;
+ }
+ }
+
+ private static final SortByIndex sortByIndex = new SortByIndex();
+
/** Checks if any merges are now necessary and returns a
* {@link MergePolicy.MergeSpecification} if so. A merge
* is necessary when there are more than {@link
@@ -470,7 +521,7 @@ public abstract class LogMergePolicy extends MergePolicy {
// Compute levels, which is just log (base mergeFactor)
// of the size of each segment
- float[] levels = new float[numSegments];
+ SegmentInfoAndLevel[] levels = new SegmentInfoAndLevel[numSegments];
final float norm = (float) Math.log(mergeFactor);
for(int i=0;i. Say list
- allocates ~2X size (2*POINTER). Integer is OBJ_HEADER
- + int */
- final static int BYTES_PER_DEL_DOCID = 2*RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + RamUsageEstimator.NUM_BYTES_INT;
-
- /* Rough logic: HashMap has an array[Entry] w/ varying
- load factor (say 2 * POINTER). Entry is object w/
- Query key, Integer val, int hash, Entry next
- (OBJ_HEADER + 3*POINTER + INT). Query we often
- undercount (say 24 bytes). Integer is OBJ_HEADER + INT. */
- final static int BYTES_PER_DEL_QUERY = 5*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 2*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2*RamUsageEstimator.NUM_BYTES_INT + 24;
-
- // TODO: many of the deletes stored here will map to
- // Integer.MAX_VALUE; we could be more efficient for this
- // case ie use a SortedSet not a SortedMap. But: Java's
- // SortedSet impls are simply backed by a Map so we won't
- // save anything unless we do something custom...
- final AtomicInteger numTermDeletes = new AtomicInteger();
- final SortedMap terms = new TreeMap();
- final Map queries = new HashMap();
- final List docIDs = new ArrayList();
-
- public static final Integer MAX_INT = Integer.valueOf(Integer.MAX_VALUE);
-
- final AtomicLong bytesUsed = new AtomicLong();
-
- private final static boolean VERBOSE_DELETES = false;
-
- @Override
- public String toString() {
- if (VERBOSE_DELETES) {
- return "SegmentDeletes [numTerms=" + numTermDeletes + ", terms=" + terms
- + ", queries=" + queries + ", docIDs=" + docIDs + ", bytesUsed="
- + bytesUsed + "]";
- } else {
- String s = "";
- if (numTermDeletes.get() != 0) {
- s += " " + numTermDeletes.get() + " deleted terms (unique count=" + terms.size() + ")";
- }
- if (queries.size() != 0) {
- s += " " + queries.size() + " deleted queries";
- }
- if (docIDs.size() != 0) {
- s += " " + docIDs.size() + " deleted docIDs";
- }
- if (bytesUsed.get() != 0) {
- s += " bytesUsed=" + bytesUsed.get();
- }
-
- return s;
- }
- }
-
- void update(SegmentDeletes in, boolean noLimit) {
- numTermDeletes.addAndGet(in.numTermDeletes.get());
- for (Map.Entry ent : in.terms.entrySet()) {
- final Term term = ent.getKey();
- if (!terms.containsKey(term)) {
- // only incr bytesUsed if this term wasn't already buffered:
- bytesUsed.addAndGet(BYTES_PER_DEL_TERM);
- }
- final Integer limit;
- if (noLimit) {
- limit = MAX_INT;
- } else {
- limit = ent.getValue();
- }
- terms.put(term, limit);
- }
-
- for (Map.Entry ent : in.queries.entrySet()) {
- final Query query = ent.getKey();
- if (!queries.containsKey(query)) {
- // only incr bytesUsed if this query wasn't already buffered:
- bytesUsed.addAndGet(BYTES_PER_DEL_QUERY);
- }
- final Integer limit;
- if (noLimit) {
- limit = MAX_INT;
- } else {
- limit = ent.getValue();
- }
- queries.put(query, limit);
- }
-
- // docIDs never move across segments and the docIDs
- // should already be cleared
- }
-
- public void addQuery(Query query, int docIDUpto) {
- Integer current = queries.put(query, docIDUpto);
- // increment bytes used only if the query wasn't added so far.
- if (current == null) {
- bytesUsed.addAndGet(BYTES_PER_DEL_QUERY);
- }
- }
-
- public void addDocID(int docID) {
- docIDs.add(Integer.valueOf(docID));
- bytesUsed.addAndGet(BYTES_PER_DEL_DOCID);
- }
-
- public void addTerm(Term term, int docIDUpto) {
- Integer current = terms.get(term);
- if (current != null && docIDUpto < current) {
- // Only record the new number if it's greater than the
- // current one. This is important because if multiple
- // threads are replacing the same doc at nearly the
- // same time, it's possible that one thread that got a
- // higher docID is scheduled before the other
- // threads. If we blindly replace than we can get
- // double-doc in the segment.
- return;
- }
-
- terms.put(term, Integer.valueOf(docIDUpto));
- numTermDeletes.incrementAndGet();
- if (current == null) {
- bytesUsed.addAndGet(BYTES_PER_DEL_TERM + term.bytes.length);
- }
- }
-
- void clear() {
- terms.clear();
- queries.clear();
- docIDs.clear();
- numTermDeletes.set(0);
- bytesUsed.set(0);
- }
-
- void clearDocIDs() {
- bytesUsed.addAndGet(-docIDs.size()*BYTES_PER_DEL_DOCID);
- docIDs.clear();
- }
-
- boolean any() {
- return terms.size() > 0 || docIDs.size() > 0 || queries.size() > 0;
- }
-}
diff --git a/lucene/src/java/org/apache/lucene/index/SegmentInfo.java b/lucene/src/java/org/apache/lucene/index/SegmentInfo.java
index e668fb9a279..47d0b54795d 100644
--- a/lucene/src/java/org/apache/lucene/index/SegmentInfo.java
+++ b/lucene/src/java/org/apache/lucene/index/SegmentInfo.java
@@ -94,6 +94,10 @@ public final class SegmentInfo {
// specific versions afterwards ("3.0", "3.1" etc.).
// see Constants.LUCENE_MAIN_VERSION.
private String version;
+
+ // NOTE: only used in-RAM by IW to track buffered deletes;
+ // this is never written to/read from the Directory
+ private long bufferedDeletesGen;
public SegmentInfo(String name, int docCount, Directory dir, boolean isCompoundFile,
boolean hasProx, SegmentCodecs segmentCodecs, boolean hasVectors) {
@@ -679,5 +683,12 @@ public final class SegmentInfo {
public String getVersion() {
return version;
}
-
+
+ long getBufferedDeletesGen() {
+ return bufferedDeletesGen;
+ }
+
+ void setBufferedDeletesGen(long v) {
+ bufferedDeletesGen = v;
+ }
}
diff --git a/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java b/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
index 3bda6232951..cd6ebb358f0 100644
--- a/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
+++ b/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
@@ -91,8 +91,8 @@ public class TestMergeSchedulerExternal extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMergeScheduler(new MyMergeScheduler())
- .setMaxBufferedDocs(2).setRAMBufferSizeMB(
- IndexWriterConfig.DISABLE_AUTO_FLUSH));
+ .setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
+ .setMergePolicy(newLogMergePolicy()));
LogMergePolicy logMP = (LogMergePolicy) writer.getConfig().getMergePolicy();
logMP.setMergeFactor(10);
for(int i=0;i<20;i++)
diff --git a/lucene/src/test/org/apache/lucene/TestSearch.java b/lucene/src/test/org/apache/lucene/TestSearch.java
index 7878e3a9f0e..619a60485a6 100644
--- a/lucene/src/test/org/apache/lucene/TestSearch.java
+++ b/lucene/src/test/org/apache/lucene/TestSearch.java
@@ -74,8 +74,11 @@ public class TestSearch extends LuceneTestCase {
Directory directory = newDirectory();
Analyzer analyzer = new MockAnalyzer();
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
- LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
- lmp.setUseCompoundFile(useCompoundFile);
+ MergePolicy mp = conf.getMergePolicy();
+ if (mp instanceof LogMergePolicy) {
+ ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+ }
+
IndexWriter writer = new IndexWriter(directory, conf);
String[] docs = {
@@ -90,6 +93,7 @@ public class TestSearch extends LuceneTestCase {
for (int j = 0; j < docs.length; j++) {
Document d = new Document();
d.add(newField("contents", docs[j], Field.Store.YES, Field.Index.ANALYZED));
+ d.add(newField("id", ""+j, Field.Index.NOT_ANALYZED_NO_NORMS));
writer.addDocument(d);
}
writer.close();
@@ -106,6 +110,10 @@ public class TestSearch extends LuceneTestCase {
};
ScoreDoc[] hits = null;
+ Sort sort = new Sort(new SortField[] {
+ SortField.FIELD_SCORE,
+ new SortField("id", SortField.INT)});
+
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "contents", analyzer);
parser.setPhraseSlop(4);
for (int j = 0; j < queries.length; j++) {
@@ -115,7 +123,7 @@ public class TestSearch extends LuceneTestCase {
System.out.println("TEST: query=" + query);
}
- hits = searcher.search(query, null, 1000).scoreDocs;
+ hits = searcher.search(query, null, 1000, sort).scoreDocs;
out.println(hits.length + " total results");
for (int i = 0 ; i < hits.length && i < 10; i++) {
diff --git a/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java b/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java
index 08229ca220b..aec32f66285 100644
--- a/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java
+++ b/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java
@@ -80,8 +80,10 @@ public class TestSearchForDuplicates extends LuceneTestCase {
Directory directory = newDirectory();
Analyzer analyzer = new MockAnalyzer();
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
- LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
- lmp.setUseCompoundFile(useCompoundFiles);
+ final MergePolicy mp = conf.getMergePolicy();
+ if (mp instanceof LogMergePolicy) {
+ ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFiles);
+ }
IndexWriter writer = new IndexWriter(directory, conf);
if (VERBOSE) {
System.out.println("TEST: now build index");
@@ -93,9 +95,6 @@ public class TestSearchForDuplicates extends LuceneTestCase {
for (int j = 0; j < MAX_DOCS; j++) {
Document d = new Document();
d.add(newField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES, Field.Index.ANALYZED));
-
- // NOTE: this ID_FIELD produces no tokens since
- // MockAnalyzer discards numbers
d.add(newField(ID_FIELD, Integer.toString(j), Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d);
}
@@ -112,7 +111,11 @@ public class TestSearchForDuplicates extends LuceneTestCase {
System.out.println("TEST: search query=" + query);
}
- ScoreDoc[] hits = searcher.search(query, null, MAX_DOCS).scoreDocs;
+ final Sort sort = new Sort(new SortField[] {
+ SortField.FIELD_SCORE,
+ new SortField(ID_FIELD, SortField.INT)});
+
+ ScoreDoc[] hits = searcher.search(query, null, MAX_DOCS, sort).scoreDocs;
printHits(out, hits, searcher);
checkHits(hits, MAX_DOCS, searcher);
@@ -127,7 +130,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
query = parser.parse(HIGH_PRIORITY + " OR " + MED_PRIORITY);
out.println("Query: " + query.toString(PRIORITY_FIELD));
- hits = searcher.search(query, null, MAX_DOCS).scoreDocs;
+ hits = searcher.search(query, null, MAX_DOCS, sort).scoreDocs;
printHits(out, hits, searcher);
checkHits(hits, MAX_DOCS, searcher);
@@ -149,7 +152,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
private void checkHits(ScoreDoc[] hits, int expectedCount, IndexSearcher searcher) throws IOException {
assertEquals("total results", expectedCount, hits.length);
for (int i = 0 ; i < hits.length; i++) {
- if ( i < 10 || (i > 94 && i < 105) ) {
+ if (i < 10 || (i > 94 && i < 105) ) {
Document d = searcher.doc(hits[i].doc);
assertEquals("check " + i, String.valueOf(i), d.get(ID_FIELD));
}
diff --git a/lucene/src/test/org/apache/lucene/index/MockRandomMergePolicy.java b/lucene/src/test/org/apache/lucene/index/MockRandomMergePolicy.java
new file mode 100644
index 00000000000..7630dc7d220
--- /dev/null
+++ b/lucene/src/test/org/apache/lucene/index/MockRandomMergePolicy.java
@@ -0,0 +1,93 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Random;
+import java.util.Set;
+
+import org.apache.lucene.util._TestUtil;
+
+public class MockRandomMergePolicy extends MergePolicy {
+ private final Random random;
+
+ public MockRandomMergePolicy(Random random) {
+ // fork a private random, since we are called
+ // unpredictably from threads:
+ this.random = new Random(random.nextLong());
+ }
+
+ @Override
+ public MergeSpecification findMerges(SegmentInfos segmentInfos) {
+ MergeSpecification mergeSpec = null;
+ //System.out.println("MRMP: findMerges sis=" + segmentInfos);
+
+ if (segmentInfos.size() > 1 && random.nextInt(5) == 3) {
+
+ SegmentInfos segmentInfos2 = new SegmentInfos();
+ segmentInfos2.addAll(segmentInfos);
+ Collections.shuffle(segmentInfos2, random);
+
+ // TODO: sometimes make more than 1 merge?
+ mergeSpec = new MergeSpecification();
+ final int segsToMerge = _TestUtil.nextInt(random, 1, segmentInfos.size());
+ mergeSpec.add(new OneMerge(segmentInfos2.range(0, segsToMerge)));
+ }
+
+ return mergeSpec;
+ }
+
+ @Override
+ public MergeSpecification findMergesForOptimize(
+ SegmentInfos segmentInfos, int maxSegmentCount, Set segmentsToOptimize)
+ throws CorruptIndexException, IOException {
+
+ //System.out.println("MRMP: findMergesForOptimize sis=" + segmentInfos);
+ MergeSpecification mergeSpec = null;
+ if (segmentInfos.size() > 1 || (segmentInfos.size() == 1 && segmentInfos.info(0).hasDeletions())) {
+ mergeSpec = new MergeSpecification();
+ SegmentInfos segmentInfos2 = new SegmentInfos();
+ segmentInfos2.addAll(segmentInfos);
+ Collections.shuffle(segmentInfos2, random);
+ int upto = 0;
+ while(upto < segmentInfos.size()) {
+ int inc = _TestUtil.nextInt(random, 1, segmentInfos.size()-upto);
+ mergeSpec.add(new OneMerge(segmentInfos2.range(upto, upto+inc)));
+ upto += inc;
+ }
+ }
+ return mergeSpec;
+ }
+
+ @Override
+ public MergeSpecification findMergesToExpungeDeletes(
+ SegmentInfos segmentInfos)
+ throws CorruptIndexException, IOException {
+ return findMerges(segmentInfos);
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ public boolean useCompoundFile(SegmentInfos infos, SegmentInfo mergedInfo) throws IOException {
+ return random.nextBoolean();
+ }
+}
diff --git a/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
index 52d5b7d7d46..69063eb5b20 100755
--- a/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
+++ b/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
@@ -452,6 +452,7 @@ public class TestAddIndexes extends LuceneTestCase {
setMaxBufferedDocs(100).
setMergePolicy(newLogMergePolicy(10))
);
+ writer.setInfoStream(VERBOSE ? System.out : null);
writer.addIndexes(aux);
assertEquals(30, writer.maxDoc());
assertEquals(3, writer.getSegmentCount());
diff --git a/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java b/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java
index b5f44752178..95da21de23e 100644
--- a/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java
+++ b/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java
@@ -131,6 +131,7 @@ public class TestAtomicUpdate extends LuceneTestCase {
.setMaxBufferedDocs(7);
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(3);
IndexWriter writer = new MockIndexWriter(directory, conf);
+ writer.setInfoStream(VERBOSE ? System.out : null);
// Establish a base index of 100 docs:
for(int i=0;i<100;i++) {
diff --git a/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java b/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
index 9c7c3bf0846..5e50c968b51 100644
--- a/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
+++ b/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
@@ -132,11 +132,15 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setMergePolicy(mp));
+ writer.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int i=0;i<10;i++) {
+ if (VERBOSE) {
+ System.out.println("\nTEST: cycle");
+ }
for(int j=0;j<100;j++) {
idField.setValue(Integer.toString(i*100+j));
writer.addDocument(doc);
@@ -144,6 +148,9 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
int delID = i;
while(delID < 100*(1+i)) {
+ if (VERBOSE) {
+ System.out.println("TEST: del " + delID);
+ }
writer.deleteDocuments(new Term("id", ""+delID));
delID += 10;
}
diff --git a/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java b/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java
index 55592662b0b..6d90baa7ff5 100644
--- a/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java
+++ b/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java
@@ -119,6 +119,9 @@ public class TestDeletionPolicy extends LuceneTestCase {
}
public void onInit(List extends IndexCommit> commits) throws IOException {
+ if (VERBOSE) {
+ System.out.println("TEST: onInit");
+ }
verifyCommitOrder(commits);
numOnInit++;
// do no deletions on init
@@ -126,6 +129,9 @@ public class TestDeletionPolicy extends LuceneTestCase {
}
public void onCommit(List extends IndexCommit> commits) throws IOException {
+ if (VERBOSE) {
+ System.out.println("TEST: onCommit");
+ }
verifyCommitOrder(commits);
doDeletes(commits, true);
}
@@ -200,8 +206,10 @@ public class TestDeletionPolicy extends LuceneTestCase {
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer())
.setIndexDeletionPolicy(policy);
- LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
- lmp.setUseCompoundFile(true);
+ MergePolicy mp = conf.getMergePolicy();
+ if (mp instanceof LogMergePolicy) {
+ ((LogMergePolicy) mp).setUseCompoundFile(true);
+ }
IndexWriter writer = new IndexWriter(dir, conf);
writer.close();
@@ -215,8 +223,10 @@ public class TestDeletionPolicy extends LuceneTestCase {
conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setOpenMode(
OpenMode.APPEND).setIndexDeletionPolicy(policy);
- lmp = (LogMergePolicy) conf.getMergePolicy();
- lmp.setUseCompoundFile(true);
+ mp = conf.getMergePolicy();
+ if (mp instanceof LogMergePolicy) {
+ ((LogMergePolicy) mp).setUseCompoundFile(true);
+ }
writer = new IndexWriter(dir, conf);
for(int j=0;j<17;j++) {
addDoc(writer);
@@ -280,6 +290,10 @@ public class TestDeletionPolicy extends LuceneTestCase {
public void testKeepAllDeletionPolicy() throws IOException {
for(int pass=0;pass<2;pass++) {
+ if (VERBOSE) {
+ System.out.println("TEST: cycle pass=" + pass);
+ }
+
boolean useCompoundFile = (pass % 2) != 0;
// Never deletes a commit
@@ -292,34 +306,48 @@ public class TestDeletionPolicy extends LuceneTestCase {
TEST_VERSION_CURRENT, new MockAnalyzer())
.setIndexDeletionPolicy(policy).setMaxBufferedDocs(10)
.setMergeScheduler(new SerialMergeScheduler());
- LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
- lmp.setUseCompoundFile(useCompoundFile);
- lmp.setMergeFactor(10);
+ MergePolicy mp = conf.getMergePolicy();
+ if (mp instanceof LogMergePolicy) {
+ ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+ }
IndexWriter writer = new IndexWriter(dir, conf);
for(int i=0;i<107;i++) {
addDoc(writer);
}
writer.close();
- conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
- new MockAnalyzer()).setOpenMode(
- OpenMode.APPEND).setIndexDeletionPolicy(policy);
- lmp = (LogMergePolicy) conf.getMergePolicy();
- lmp.setUseCompoundFile(useCompoundFile);
- writer = new IndexWriter(dir, conf);
- writer.optimize();
- writer.close();
-
- assertEquals(1, policy.numOnInit);
+ final boolean isOptimized;
+ {
+ IndexReader r = IndexReader.open(dir);
+ isOptimized = r.isOptimized();
+ r.close();
+ }
+ if (!isOptimized) {
+ conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
+ new MockAnalyzer()).setOpenMode(
+ OpenMode.APPEND).setIndexDeletionPolicy(policy);
+ mp = conf.getMergePolicy();
+ if (mp instanceof LogMergePolicy) {
+ ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+ }
+ if (VERBOSE) {
+ System.out.println("TEST: open writer for optimize");
+ }
+ writer = new IndexWriter(dir, conf);
+ writer.setInfoStream(VERBOSE ? System.out : null);
+ writer.optimize();
+ writer.close();
+ }
+ assertEquals(isOptimized ? 0:1, policy.numOnInit);
// If we are not auto committing then there should
// be exactly 2 commits (one per close above):
- assertEquals(2, policy.numOnCommit);
+ assertEquals(1 + (isOptimized ? 0:1), policy.numOnCommit);
// Test listCommits
Collection commits = IndexReader.listCommits(dir);
// 2 from closing writer
- assertEquals(2, commits.size());
+ assertEquals(1 + (isOptimized ? 0:1), commits.size());
// Make sure we can open a reader on each commit:
for (final IndexCommit commit : commits) {
@@ -480,8 +508,10 @@ public class TestDeletionPolicy extends LuceneTestCase {
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy)
.setMaxBufferedDocs(10);
- LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
- lmp.setUseCompoundFile(useCompoundFile);
+ MergePolicy mp = conf.getMergePolicy();
+ if (mp instanceof LogMergePolicy) {
+ ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+ }
IndexWriter writer = new IndexWriter(dir, conf);
for(int i=0;i<107;i++) {
addDoc(writer);
@@ -490,8 +520,10 @@ public class TestDeletionPolicy extends LuceneTestCase {
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy);
- lmp = (LogMergePolicy) conf.getMergePolicy();
- lmp.setUseCompoundFile(useCompoundFile);
+ mp = conf.getMergePolicy();
+ if (mp instanceof LogMergePolicy) {
+ ((LogMergePolicy) mp).setUseCompoundFile(true);
+ }
writer = new IndexWriter(dir, conf);
writer.optimize();
writer.close();
@@ -529,8 +561,10 @@ public class TestDeletionPolicy extends LuceneTestCase {
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy)
.setMaxBufferedDocs(10);
- LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
- lmp.setUseCompoundFile(useCompoundFile);
+ MergePolicy mp = conf.getMergePolicy();
+ if (mp instanceof LogMergePolicy) {
+ ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+ }
IndexWriter writer = new IndexWriter(dir, conf);
for(int i=0;i<17;i++) {
addDoc(writer);
@@ -586,24 +620,34 @@ public class TestDeletionPolicy extends LuceneTestCase {
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy);
- LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
- lmp.setUseCompoundFile(useCompoundFile);
+ MergePolicy mp = conf.getMergePolicy();
+ if (mp instanceof LogMergePolicy) {
+ ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
+ }
IndexWriter writer = new IndexWriter(dir, conf);
writer.close();
Term searchTerm = new Term("content", "aaa");
Query query = new TermQuery(searchTerm);
for(int i=0;i 5 + extraFileCount);
-
+ assertTrue("flush should have occurred and files should have been created", dir.listAll().length > 5 + extraFileCount);
+
// After rollback, IW should remove all files
writer.rollback();
assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length);
@@ -2846,7 +2844,7 @@ public class TestIndexWriter extends LuceneTestCase {
public void testNoUnwantedTVFiles() throws Exception {
Directory dir = newDirectory();
- IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.01));
+ IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.01).setMergePolicy(newLogMergePolicy()));
((LogMergePolicy) indexWriter.getConfig().getMergePolicy()).setUseCompoundFile(false);
String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
index c546b2a3cb3..3763e54035c 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
@@ -684,7 +684,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
MockDirectoryWrapper dir = newDirectory();
IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(2).setReaderPooling(false));
+ TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(2).setReaderPooling(false).setMergePolicy(newLogMergePolicy()));
modifier.setInfoStream(VERBOSE ? System.out : null);
LogMergePolicy lmp = (LogMergePolicy) modifier.getConfig().getMergePolicy();
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
index da56333555e..101812330e1 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
@@ -288,6 +288,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
public void testExceptionDocumentsWriterInit() throws IOException {
Directory dir = newDirectory();
MockIndexWriter2 w = new MockIndexWriter2(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+ w.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(newField("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
@@ -359,7 +360,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
public void testExceptionOnMergeInit() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
- .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler());
+ .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()).setMergePolicy(newLogMergePolicy());
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
MockIndexWriter3 w = new MockIndexWriter3(dir, conf);
w.doFail = true;
@@ -527,7 +528,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
System.out.println("TEST: cycle i=" + i);
}
MockDirectoryWrapper dir = newDirectory();
- IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer));
+ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMergePolicy(newLogMergePolicy()));
writer.setInfoStream(VERBOSE ? System.out : null);
// don't allow a sudden merge to clean up the deleted
@@ -844,7 +845,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
public void testOptimizeExceptions() throws IOException {
Directory startDir = newDirectory();
- IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2);
+ IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy());
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(100);
IndexWriter w = new IndexWriter(startDir, conf);
for(int i=0;i<27;i++)
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
index ecb44b9e0ae..0e50c7815e5 100755
--- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
@@ -104,7 +104,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(10).
- setMergePolicy(newLogMergePolicy())
+ setMergePolicy(newInOrderLogMergePolicy())
);
for (int i = 0; i < 250; i++) {
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
index 6278b52c42c..57c5e26040d 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
@@ -58,7 +58,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
IndexWriter writer = new IndexWriter(
merged,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
- setMergePolicy(newLogMergePolicy(2))
+ setMergePolicy(newInOrderLogMergePolicy(2))
);
writer.setInfoStream(VERBOSE ? System.out : null);
writer.addIndexes(indexA, indexB);
@@ -102,7 +102,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(2).
- setMergePolicy(newLogMergePolicy(2))
+ setMergePolicy(newInOrderLogMergePolicy(2))
);
for (int i = start; i < (start + numDocs); i++)
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
index 5016f5245d4..27f29a49a8e 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
@@ -232,7 +232,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
// Make a new dir that will enforce disk usage:
MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir));
- writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
+ writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy()));
IOException err = null;
writer.setInfoStream(VERBOSE ? System.out : null);
@@ -401,10 +401,10 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
// required is at most 2X total input size of
// indices so let's make sure:
assertTrue("max free Directory space required exceeded 1X the total input index sizes during " + methodName +
- ": max temp usage = " + (dir.getMaxUsedSizeInBytes()-startDiskUsage) + " bytes; " +
- "starting disk usage = " + startDiskUsage + " bytes; " +
- "input index disk usage = " + inputDiskUsage + " bytes",
- (dir.getMaxUsedSizeInBytes()-startDiskUsage) < 2*(startDiskUsage + inputDiskUsage));
+ ": max temp usage = " + (dir.getMaxUsedSizeInBytes()-startDiskUsage) + " bytes vs limit=" + (2*(startDiskUsage + inputDiskUsage)) +
+ "; starting disk usage = " + startDiskUsage + " bytes; " +
+ "input index disk usage = " + inputDiskUsage + " bytes",
+ (dir.getMaxUsedSizeInBytes()-startDiskUsage) < 2*(startDiskUsage + inputDiskUsage));
}
// Make sure we don't hit disk full during close below:
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
index 6758e89b5d2..60f5e49fbb0 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
@@ -366,7 +366,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
int numDirs = 3;
Directory mainDir = newDirectory();
- IndexWriter mainWriter = new IndexWriter(mainDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+ IndexWriter mainWriter = new IndexWriter(mainDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy()));
_TestUtil.reduceOpenFiles(mainWriter);
mainWriter.setInfoStream(infoStream);
@@ -900,7 +900,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
public void testExpungeDeletes() throws Throwable {
Directory dir = newDirectory();
- final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+ final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy()));
Document doc = new Document();
doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
diff --git a/lucene/src/test/org/apache/lucene/index/TestLazyBug.java b/lucene/src/test/org/apache/lucene/index/TestLazyBug.java
index 58681ab847f..13b668417c9 100755
--- a/lucene/src/test/org/apache/lucene/index/TestLazyBug.java
+++ b/lucene/src/test/org/apache/lucene/index/TestLazyBug.java
@@ -63,7 +63,7 @@ public class TestLazyBug extends LuceneTestCase {
Directory dir = newDirectory();
try {
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy()));
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
lmp.setUseCompoundFile(false);
diff --git a/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java b/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
index f3b7f2b3ca8..fe1f29be001 100644
--- a/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
+++ b/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
@@ -45,7 +45,7 @@ public class TestMaxTermFrequency extends LuceneTestCase {
super.setUp();
dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
- new MockAnalyzer(MockTokenizer.SIMPLE, true));
+ new MockAnalyzer(MockTokenizer.SIMPLE, true)).setMergePolicy(newInOrderLogMergePolicy());
config.setSimilarityProvider(new TestSimilarity());
RandomIndexWriter writer = new RandomIndexWriter(random, dir, config);
Document doc = new Document();
diff --git a/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java b/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
index 10dbc4f9fb0..425e790784d 100644
--- a/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
+++ b/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
@@ -69,7 +69,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase {
public void testSimpleSkip() throws IOException {
Directory dir = new CountingRAMDirectory(new RAMDirectory());
- IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new PayloadAnalyzer()).setCodecProvider(_TestUtil.alwaysCodec("Standard")));
+ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new PayloadAnalyzer()).setCodecProvider(_TestUtil.alwaysCodec("Standard")).setMergePolicy(newInOrderLogMergePolicy()));
Term term = new Term("test", "a");
for (int i = 0; i < 5000; i++) {
Document d1 = new Document();
diff --git a/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java b/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java
index e070fbdc187..f88bb18a286 100644
--- a/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java
+++ b/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
+import java.util.HashSet;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.Executors;
@@ -94,7 +95,7 @@ public class TestNRTThreads extends LuceneTestCase {
}
}
});
-
+
final IndexWriter writer = new IndexWriter(dir, conf);
if (VERBOSE) {
writer.setInfoStream(System.out);
@@ -105,10 +106,12 @@ public class TestNRTThreads extends LuceneTestCase {
((ConcurrentMergeScheduler) ms).setMaxThreadCount(1);
((ConcurrentMergeScheduler) ms).setMaxMergeCount(1);
}
+ /*
LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
if (lmp.getMergeFactor() > 5) {
lmp.setMergeFactor(5);
}
+ */
final int NUM_INDEX_THREADS = 2;
final int NUM_SEARCH_THREADS = 3;
@@ -118,7 +121,7 @@ public class TestNRTThreads extends LuceneTestCase {
final AtomicInteger addCount = new AtomicInteger();
final AtomicInteger delCount = new AtomicInteger();
- final List delIDs = Collections.synchronizedList(new ArrayList());
+ final Set delIDs = Collections.synchronizedSet(new HashSet());
final long stopTime = System.currentTimeMillis() + RUN_TIME_SEC*1000;
Thread[] threads = new Thread[NUM_INDEX_THREADS];
@@ -142,20 +145,20 @@ public class TestNRTThreads extends LuceneTestCase {
}
if (random.nextBoolean()) {
if (VERBOSE) {
- //System.out.println(Thread.currentThread().getName() + ": add doc id:" + doc.get("id"));
+ System.out.println(Thread.currentThread().getName() + ": add doc id:" + doc.get("id"));
}
writer.addDocument(doc);
} else {
// we use update but it never replaces a
// prior doc
if (VERBOSE) {
- //System.out.println(Thread.currentThread().getName() + ": update doc id:" + doc.get("id"));
+ System.out.println(Thread.currentThread().getName() + ": update doc id:" + doc.get("id"));
}
writer.updateDocument(new Term("id", doc.get("id")), doc);
}
if (random.nextInt(5) == 3) {
if (VERBOSE) {
- //System.out.println(Thread.currentThread().getName() + ": buffer del id:" + doc.get("id"));
+ System.out.println(Thread.currentThread().getName() + ": buffer del id:" + doc.get("id"));
}
toDeleteIDs.add(doc.get("id"));
}
@@ -164,6 +167,9 @@ public class TestNRTThreads extends LuceneTestCase {
System.out.println(Thread.currentThread().getName() + ": apply " + toDeleteIDs.size() + " deletes");
}
for(String id : toDeleteIDs) {
+ if (VERBOSE) {
+ System.out.println(Thread.currentThread().getName() + ": del term=id:" + id);
+ }
writer.deleteDocuments(new Term("id", id));
}
final int count = delCount.addAndGet(toDeleteIDs.size());
@@ -347,12 +353,28 @@ public class TestNRTThreads extends LuceneTestCase {
final IndexReader r2 = writer.getReader();
final IndexSearcher s = new IndexSearcher(r2);
+ boolean doFail = false;
for(String id : delIDs) {
final TopDocs hits = s.search(new TermQuery(new Term("id", id)), 1);
if (hits.totalHits != 0) {
- fail("doc id=" + id + " is supposed to be deleted, but got docID=" + hits.scoreDocs[0].doc);
+ System.out.println("doc id=" + id + " is supposed to be deleted, but got docID=" + hits.scoreDocs[0].doc);
+ doFail = true;
}
}
+
+ final int endID = Integer.parseInt(docs.nextDoc().get("id"));
+ for(int id=0;id docs = new HashMap();
IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setRAMBufferSizeMB(
- 0.1).setMaxBufferedDocs(maxBufferedDocs));
+ 0.1).setMaxBufferedDocs(maxBufferedDocs).setMergePolicy(newLogMergePolicy()));
w.setInfoStream(VERBOSE ? System.out : null);
w.commit();
LogMergePolicy lmp = (LogMergePolicy) w.getConfig().getMergePolicy();
@@ -206,7 +206,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE)
.setRAMBufferSizeMB(0.1).setMaxBufferedDocs(maxBufferedDocs).setMaxThreadStates(maxThreadStates)
- .setReaderPooling(doReaderPooling));
+ .setReaderPooling(doReaderPooling).setMergePolicy(newLogMergePolicy()));
w.setInfoStream(VERBOSE ? System.out : null);
LogMergePolicy lmp = (LogMergePolicy) w.getConfig().getMergePolicy();
lmp.setUseCompoundFile(false);
@@ -248,7 +248,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
public static void indexSerial(Random random, Map docs, Directory dir) throws IOException {
- IndexWriter w = new IndexWriter(dir, LuceneTestCase.newIndexWriterConfig(random, TEST_VERSION_CURRENT, new MockAnalyzer()));
+ IndexWriter w = new IndexWriter(dir, LuceneTestCase.newIndexWriterConfig(random, TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy()));
// index all docs in a single thread
Iterator iter = docs.values().iterator();
diff --git a/lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java b/lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
index 332ba958698..6d21b13185c 100644
--- a/lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
+++ b/lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
@@ -124,14 +124,14 @@ public class BaseTestRangeFilter extends LuceneTestCase {
RandomIndexWriter writer = new RandomIndexWriter(random, index.index,
newIndexWriterConfig(random, TEST_VERSION_CURRENT, new MockAnalyzer())
- .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
+ .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)).setMergePolicy(newLogMergePolicy()));
+ _TestUtil.reduceOpenFiles(writer.w);
+
while(true) {
int minCount = 0;
int maxCount = 0;
- _TestUtil.reduceOpenFiles(writer.w);
-
for (int d = minId; d <= maxId; d++) {
idField.setValue(pad(d));
int r = index.allowNegativeRandomInts ? random.nextInt() : random
diff --git a/lucene/src/test/org/apache/lucene/search/TestBoolean2.java b/lucene/src/test/org/apache/lucene/search/TestBoolean2.java
index b4dfdbb6b6b..090eda2d18c 100644
--- a/lucene/src/test/org/apache/lucene/search/TestBoolean2.java
+++ b/lucene/src/test/org/apache/lucene/search/TestBoolean2.java
@@ -54,7 +54,7 @@ public class TestBoolean2 extends LuceneTestCase {
@BeforeClass
public static void beforeClass() throws Exception {
directory = newDirectory();
- RandomIndexWriter writer= new RandomIndexWriter(random, directory);
+ RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
doc.add(newField(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
diff --git a/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
index e2462e9207c..1d2f8a6f2de 100644
--- a/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
@@ -85,7 +85,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
index = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, index,
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
- .setSimilarityProvider(sim));
+ .setSimilarityProvider(sim).setMergePolicy(newInOrderLogMergePolicy()));
// hed is the most important field, dek is secondary
diff --git a/lucene/src/test/org/apache/lucene/search/TestDocBoost.java b/lucene/src/test/org/apache/lucene/search/TestDocBoost.java
index c222d632bbd..f970477bda6 100644
--- a/lucene/src/test/org/apache/lucene/search/TestDocBoost.java
+++ b/lucene/src/test/org/apache/lucene/search/TestDocBoost.java
@@ -19,13 +19,14 @@ package org.apache.lucene.search;
import java.io.IOException;
-import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.*;
-import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
/** Document boost unit test.
*
@@ -36,7 +37,7 @@ public class TestDocBoost extends LuceneTestCase {
public void testDocBoost() throws Exception {
Directory store = newDirectory();
- RandomIndexWriter writer = new RandomIndexWriter(random, store);
+ RandomIndexWriter writer = new RandomIndexWriter(random, store, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
Fieldable f1 = newField("field", "word", Field.Store.YES, Field.Index.ANALYZED);
Fieldable f2 = newField("field", "word", Field.Store.YES, Field.Index.ANALYZED);
diff --git a/lucene/src/test/org/apache/lucene/search/TestExplanations.java b/lucene/src/test/org/apache/lucene/search/TestExplanations.java
index 2960a4e943b..5e712eac75f 100644
--- a/lucene/src/test/org/apache/lucene/search/TestExplanations.java
+++ b/lucene/src/test/org/apache/lucene/search/TestExplanations.java
@@ -68,7 +68,7 @@ public class TestExplanations extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
- RandomIndexWriter writer= new RandomIndexWriter(random, directory);
+ RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
doc.add(newField(KEY, ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
diff --git a/lucene/src/test/org/apache/lucene/search/TestFieldCache.java b/lucene/src/test/org/apache/lucene/search/TestFieldCache.java
index b69efe6ac6a..93a440b2f7d 100644
--- a/lucene/src/test/org/apache/lucene/search/TestFieldCache.java
+++ b/lucene/src/test/org/apache/lucene/search/TestFieldCache.java
@@ -41,7 +41,7 @@ public class TestFieldCache extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
- RandomIndexWriter writer= new RandomIndexWriter(random, directory);
+ RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
long theLong = Long.MAX_VALUE;
double theDouble = Double.MAX_VALUE;
byte theByte = Byte.MAX_VALUE;
diff --git a/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java b/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java
index bca34a1f594..da3be2fb26c 100644
--- a/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java
@@ -17,17 +17,19 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import java.util.BitSet;
+
+import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.DocIdBitSet;
-import java.util.BitSet;
+import org.apache.lucene.util.LuceneTestCase;
/**
* FilteredQuery JUnit tests.
@@ -49,7 +51,7 @@ public class TestFilteredQuery extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
- RandomIndexWriter writer = new RandomIndexWriter (random, directory);
+ RandomIndexWriter writer = new RandomIndexWriter (random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
Document doc = new Document();
doc.add (newField("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED));
diff --git a/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java b/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java
index 6070ad8e7cb..bada9039631 100644
--- a/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java
+++ b/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java
@@ -24,7 +24,6 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
@@ -47,14 +46,14 @@ public class TestFilteredSearch extends LuceneTestCase {
Directory directory = newDirectory();
int[] filterBits = {1, 36};
SimpleDocIdSetFilter filter = new SimpleDocIdSetFilter(filterBits);
- IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+ IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
searchFiltered(writer, directory, filter, enforceSingleSegment);
// run the test on more than one segment
enforceSingleSegment = false;
// reset - it is stateful
filter.reset();
writer.close();
- writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
+ writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10).setMergePolicy(newInOrderLogMergePolicy()));
// we index 60 docs - this will create 6 segments
searchFiltered(writer, directory, filter, enforceSingleSegment);
writer.close();
diff --git a/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java b/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java
index 314089b6ee9..85f40abfe37 100644
--- a/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java
+++ b/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java
@@ -79,7 +79,7 @@ public class TestFuzzyQuery2 extends LuceneTestCase {
int terms = (int) Math.pow(2, bits);
Directory dir = newDirectory();
- RandomIndexWriter writer = new RandomIndexWriter(random, dir, new MockAnalyzer(MockTokenizer.KEYWORD, false));
+ RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.KEYWORD, false)).setMergePolicy(newInOrderLogMergePolicy()));
Document doc = new Document();
Field field = newField("field", "", Field.Store.NO, Field.Index.ANALYZED);
diff --git a/lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java b/lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java
index 394f387cb41..4f7356271ef 100644
--- a/lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java
@@ -40,7 +40,7 @@ public class TestMatchAllDocsQuery extends LuceneTestCase {
public void testQuery() throws Exception {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
- TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(2));
+ TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(2).setMergePolicy(newInOrderLogMergePolicy()));
addDoc("one", iw, 1f);
addDoc("two", iw, 20f);
addDoc("three four", iw, 300f);
diff --git a/lucene/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java b/lucene/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
index 8c8cc1a76c3..7657d25dc78 100644
--- a/lucene/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
+++ b/lucene/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
@@ -38,7 +38,7 @@ public class TestMultiThreadTermVectors extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
- IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+ IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
//writer.setUseCompoundFile(false);
//writer.infoStream = System.out;
for (int i = 0; i < numDocs; i++) {
diff --git a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
index 18b1ded0a24..e143d730e34 100644
--- a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
+++ b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
@@ -55,7 +55,8 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
- .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
+ .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))
+ .setMergePolicy(newInOrderLogMergePolicy()));
NumericField
field8 = new NumericField("field8", 8, Field.Store.YES, true),
diff --git a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
index 27aebfce451..d3873fc6c12 100644
--- a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
+++ b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
@@ -52,7 +52,8 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
- .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
+ .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))
+ .setMergePolicy(newInOrderLogMergePolicy()));
NumericField
field8 = new NumericField("field8", 8, Field.Store.YES, true),
diff --git a/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java b/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java
index a9e9c78a320..71621a3f80d 100644
--- a/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java
@@ -598,7 +598,7 @@ public class TestPhraseQuery extends LuceneTestCase {
Directory dir = newDirectory();
Analyzer analyzer = new MockAnalyzer();
- RandomIndexWriter w = new RandomIndexWriter(random, dir, analyzer);
+ RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setMergePolicy(newInOrderLogMergePolicy()));
List> docs = new ArrayList>();
Document d = new Document();
Field f = newField("f", "", Field.Store.NO, Field.Index.ANALYZED);
diff --git a/lucene/src/test/org/apache/lucene/search/TestSort.java b/lucene/src/test/org/apache/lucene/search/TestSort.java
index 9a5db0d1053..4428101c7d2 100644
--- a/lucene/src/test/org/apache/lucene/search/TestSort.java
+++ b/lucene/src/test/org/apache/lucene/search/TestSort.java
@@ -121,7 +121,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
throws IOException {
Directory indexStore = newDirectory();
dirs.add(indexStore);
- RandomIndexWriter writer = new RandomIndexWriter(random, indexStore);
+ RandomIndexWriter writer = new RandomIndexWriter(random, indexStore, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
for (int i=0; i
Date: Sat, 29 Jan 2011 19:51:30 +0000
Subject: [PATCH 051/185] LUCENE-1076: add CHANGES
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065096 13f79535-47bb-0310-9956-ffa450edef68
---
lucene/CHANGES.txt | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index e859ecd042c..1a29524de6e 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -150,6 +150,12 @@ Changes in Runtime Behavior
* LUCENE-2720: IndexWriter throws IndexFormatTooOldException on open, rather
than later when e.g. a merge starts. (Shai Erera, Mike McCandless, Uwe Schindler)
+* LUCENE-1076: The default merge policy is now able to merge
+ non-contiguous segments, which means docIDs no longer necessarily
+ say "in order". If this is a problem then you can use either of the
+ LogMergePolicy impls, and call setRequireContiguousMerge(true).
+ (Mike McCandless)
+
API Changes
* LUCENE-2302, LUCENE-1458, LUCENE-2111, LUCENE-2514: Terms are no longer
From 185ad0c6310d2d5001840a177a2f8337aa2eeed1 Mon Sep 17 00:00:00 2001
From: Michael McCandless
Date: Sat, 29 Jan 2011 20:33:09 +0000
Subject: [PATCH 052/185] Java 1.5 can't @Override an interface
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065102 13f79535-47bb-0310-9956-ffa450edef68
---
.../src/java/org/apache/lucene/index/BufferedDeletesStream.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java b/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
index b9a0184a0c1..555c78b67c2 100644
--- a/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
+++ b/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
@@ -130,7 +130,7 @@ class BufferedDeletesStream {
// Sorts SegmentInfos from smallest to biggest bufferedDelGen:
private static final Comparator sortByDelGen = new Comparator() {
- @Override
+ // @Override -- not until Java 1.6
public int compare(SegmentInfo si1, SegmentInfo si2) {
final long cmp = si1.getBufferedDeletesGen() - si2.getBufferedDeletesGen();
if (cmp > 0) {
From de55bd4de125d5d8b5d89c1b7b3ec2224f93f8ca Mon Sep 17 00:00:00 2001
From: Robert Muir
Date: Sun, 30 Jan 2011 13:03:21 +0000
Subject: [PATCH 053/185] LUCENE-2896: in advance(), don't skip when target doc
delta is very small
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065261 13f79535-47bb-0310-9956-ffa450edef68
---
.../standard/StandardPostingsReader.java | 20 ++++++-------------
1 file changed, 6 insertions(+), 14 deletions(-)
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java
index 707bb43dec1..0e53a99d536 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java
@@ -375,13 +375,10 @@ public class StandardPostingsReader extends PostingsReaderBase {
@Override
public int advance(int target) throws IOException {
- // TODO: jump right to next() if target is < X away
- // from where we are now?
-
- if (limit >= skipInterval) {
+ if ((target - skipInterval) >= doc && limit >= skipInterval) {
// There are enough docs in the posting to have
- // skip data
+ // skip data, and it isn't too close.
if (skipper == null) {
// This is the first time this enum has ever been used for skipping -- do lazy init
@@ -528,13 +525,10 @@ public class StandardPostingsReader extends PostingsReaderBase {
//System.out.println("StandardR.D&PE advance target=" + target);
- // TODO: jump right to next() if target is < X away
- // from where we are now?
-
- if (limit >= skipInterval) {
+ if ((target - skipInterval) >= doc && limit >= skipInterval) {
// There are enough docs in the posting to have
- // skip data
+ // skip data, and it isn't too close
if (skipper == null) {
// This is the first time this enum has ever been used for skipping -- do lazy init
@@ -724,13 +718,11 @@ public class StandardPostingsReader extends PostingsReaderBase {
public int advance(int target) throws IOException {
//System.out.println("StandardR.D&PE advance seg=" + segment + " target=" + target + " this=" + this);
- // TODO: jump right to next() if target is < X away
- // from where we are now?
- if (limit >= skipInterval) {
+ if ((target - skipInterval) >= doc && limit >= skipInterval) {
// There are enough docs in the posting to have
- // skip data
+ // skip data, and it isn't too close
if (skipper == null) {
// This is the first time this enum has ever been used for skipping -- do lazy init
From 5629a2b96b37f87c90463438033df1ddf90f3518 Mon Sep 17 00:00:00 2001
From: Robert Muir
Date: Sun, 30 Jan 2011 13:28:41 +0000
Subject: [PATCH 054/185] add missing license headers where there are none, but
the JIRA box was checked
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065265 13f79535-47bb-0310-9956-ffa450edef68
---
.../org/apache/lucene/store/NativePosixUtil.cpp | 17 +++++++++++++++++
.../org/apache/lucene/search/regex/package.html | 17 +++++++++++++++++
.../core/builders/TestQueryTreeBuilder.java | 17 +++++++++++++++++
.../spatial/geometry/TestDistanceUnits.java | 17 +++++++++++++++++
.../projections/SinusoidalProjectorTest.java | 17 +++++++++++++++++
.../org/apache/lucene/util/packed/package.html | 16 ++++++++++++++++
.../index/TestSnapshotDeletionPolicy.java | 17 +++++++++++++++++
.../lucene/analysis/el/TestGreekStemmer.java | 17 +++++++++++++++++
8 files changed, 135 insertions(+)
diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/store/NativePosixUtil.cpp b/lucene/contrib/misc/src/java/org/apache/lucene/store/NativePosixUtil.cpp
index 7ccf7e7b445..fa05142f877 100644
--- a/lucene/contrib/misc/src/java/org/apache/lucene/store/NativePosixUtil.cpp
+++ b/lucene/contrib/misc/src/java/org/apache/lucene/store/NativePosixUtil.cpp
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
#include
#include // posix_fadvise, constants for open
#include // strerror
diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/package.html b/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/package.html
index c963307fecb..7b54ddb557e 100644
--- a/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/package.html
+++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/package.html
@@ -1,3 +1,20 @@
+
+
Regular expression Query.
diff --git a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/core/builders/TestQueryTreeBuilder.java b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/core/builders/TestQueryTreeBuilder.java
index f456d298b67..88ad9a21b16 100644
--- a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/core/builders/TestQueryTreeBuilder.java
+++ b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/core/builders/TestQueryTreeBuilder.java
@@ -1,5 +1,22 @@
package org.apache.lucene.queryParser.core.builders;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import junit.framework.Assert;
import org.apache.lucene.queryParser.core.QueryNodeException;
diff --git a/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/geometry/TestDistanceUnits.java b/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/geometry/TestDistanceUnits.java
index 509e7009799..f1758859cf9 100644
--- a/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/geometry/TestDistanceUnits.java
+++ b/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/geometry/TestDistanceUnits.java
@@ -1,5 +1,22 @@
package org.apache.lucene.spatial.geometry;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import org.apache.lucene.util.LuceneTestCase;
import org.junit.Test;
diff --git a/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/projections/SinusoidalProjectorTest.java b/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/projections/SinusoidalProjectorTest.java
index c10858cdb62..d764200b6c2 100644
--- a/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/projections/SinusoidalProjectorTest.java
+++ b/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/projections/SinusoidalProjectorTest.java
@@ -1,5 +1,22 @@
package org.apache.lucene.spatial.tier.projections;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import org.apache.lucene.util.LuceneTestCase;
import org.junit.Test;
diff --git a/lucene/src/java/org/apache/lucene/util/packed/package.html b/lucene/src/java/org/apache/lucene/util/packed/package.html
index b98aa234276..d1d0e298ea1 100644
--- a/lucene/src/java/org/apache/lucene/util/packed/package.html
+++ b/lucene/src/java/org/apache/lucene/util/packed/package.html
@@ -1,4 +1,20 @@
+
diff --git a/lucene/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java b/lucene/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java
index e2899a66393..a4f138c40fa 100644
--- a/lucene/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java
+++ b/lucene/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java
@@ -1,5 +1,22 @@
package org.apache.lucene.index;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import java.util.Collection;
import java.util.Map;
import java.util.Random;
diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekStemmer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekStemmer.java
index 1b95c29b31a..8b0192e1555 100644
--- a/modules/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekStemmer.java
+++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekStemmer.java
@@ -1,5 +1,22 @@
package org.apache.lucene.analysis.el;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
From 7c24712e89cbbb65375e7c5d3a7ee11eac8759f7 Mon Sep 17 00:00:00 2001
From: Robert Muir
Date: Sun, 30 Jan 2011 13:52:06 +0000
Subject: [PATCH 055/185] LUCENE-1866: add missing dirs to rat report
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065272 13f79535-47bb-0310-9956-ffa450edef68
---
solr/build.xml | 2 ++
1 file changed, 2 insertions(+)
diff --git a/solr/build.xml b/solr/build.xml
index c3ef9682c18..9214ee4d6ce 100644
--- a/solr/build.xml
+++ b/solr/build.xml
@@ -956,6 +956,8 @@
description="runs the tasks over src/java excluding the license directory">
+
+
From 24cfce7c1aad29f5caa1858c91caafa71d4846c4 Mon Sep 17 00:00:00 2001
From: Robert Muir
Date: Sun, 30 Jan 2011 14:17:46 +0000
Subject: [PATCH 056/185] add missing license headers where there are none, but
the JIRA box was checked (solr)
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065286 13f79535-47bb-0310-9956-ffa450edef68
---
.../common/params/QueryElevationParams.java | 17 +++++++++++++++++
.../handler/admin/SolrInfoMBeanHandler.java | 17 +++++++++++++++++
.../solr/search/function/TFValueSource.java | 17 +++++++++++++++++
.../distance/StringDistanceFunction.java | 17 +++++++++++++++++
.../solr/client/solrj/impl/CloudSolrServer.java | 17 +++++++++++++++++
.../solr/BaseDistributedSearchTestCase.java | 17 +++++++++++++++++
.../test/org/apache/solr/TestPluginEnable.java | 17 +++++++++++++++++
.../solr/client/solrj/SolrJettyTestBase.java | 17 +++++++++++++++++
.../TestLegacyMergeSchedulerPolicyConfig.java | 17 +++++++++++++++++
.../org/apache/solr/core/TestPropInject.java | 17 +++++++++++++++++
.../apache/solr/core/TestXIncludeConfig.java | 17 +++++++++++++++++
.../DistributedSpellCheckComponentTest.java | 17 +++++++++++++++++
.../DistributedTermsComponentTest.java | 17 +++++++++++++++++
.../org/apache/solr/search/TestLRUCache.java | 17 +++++++++++++++++
14 files changed, 238 insertions(+)
diff --git a/solr/src/common/org/apache/solr/common/params/QueryElevationParams.java b/solr/src/common/org/apache/solr/common/params/QueryElevationParams.java
index 517eec4d7b8..0c15b7b2acf 100644
--- a/solr/src/common/org/apache/solr/common/params/QueryElevationParams.java
+++ b/solr/src/common/org/apache/solr/common/params/QueryElevationParams.java
@@ -1,5 +1,22 @@
package org.apache.solr.common.params;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
/**
* Parameters used with the QueryElevationComponent
diff --git a/solr/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java b/solr/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java
index aa8d94c22d9..c5fef3c14e7 100644
--- a/solr/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java
+++ b/solr/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java
@@ -1,5 +1,22 @@
package org.apache.solr.handler.admin;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import org.apache.solr.handler.RequestHandlerBase;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.core.SolrInfoMBean;
diff --git a/solr/src/java/org/apache/solr/search/function/TFValueSource.java b/solr/src/java/org/apache/solr/search/function/TFValueSource.java
index c37a4949c18..b2a776e5a87 100755
--- a/solr/src/java/org/apache/solr/search/function/TFValueSource.java
+++ b/solr/src/java/org/apache/solr/search/function/TFValueSource.java
@@ -1,5 +1,22 @@
package org.apache.solr.search.function;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import org.apache.lucene.index.*;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.search.DocIdSetIterator;
diff --git a/solr/src/java/org/apache/solr/search/function/distance/StringDistanceFunction.java b/solr/src/java/org/apache/solr/search/function/distance/StringDistanceFunction.java
index f67639c9d28..222ef314b7a 100644
--- a/solr/src/java/org/apache/solr/search/function/distance/StringDistanceFunction.java
+++ b/solr/src/java/org/apache/solr/search/function/distance/StringDistanceFunction.java
@@ -1,5 +1,22 @@
package org.apache.solr.search.function.distance;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.search.spell.StringDistance;
import org.apache.solr.search.function.DocValues;
diff --git a/solr/src/solrj/org/apache/solr/client/solrj/impl/CloudSolrServer.java b/solr/src/solrj/org/apache/solr/client/solrj/impl/CloudSolrServer.java
index 60955330996..1268c402589 100644
--- a/solr/src/solrj/org/apache/solr/client/solrj/impl/CloudSolrServer.java
+++ b/solr/src/solrj/org/apache/solr/client/solrj/impl/CloudSolrServer.java
@@ -1,5 +1,22 @@
package org.apache.solr.client.solrj.impl;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import java.io.IOException;
import java.net.MalformedURLException;
import java.util.ArrayList;
diff --git a/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java b/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java
index 33839c86c3d..1dc858ad0ef 100644
--- a/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java
+++ b/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java
@@ -1,5 +1,22 @@
package org.apache.solr;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
diff --git a/solr/src/test/org/apache/solr/TestPluginEnable.java b/solr/src/test/org/apache/solr/TestPluginEnable.java
index 443c462869a..b390ddf37bb 100644
--- a/solr/src/test/org/apache/solr/TestPluginEnable.java
+++ b/solr/src/test/org/apache/solr/TestPluginEnable.java
@@ -1,5 +1,22 @@
package org.apache.solr;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import org.apache.solr.client.solrj.SolrServerException;
import org.junit.BeforeClass;
import org.junit.Test;
diff --git a/solr/src/test/org/apache/solr/client/solrj/SolrJettyTestBase.java b/solr/src/test/org/apache/solr/client/solrj/SolrJettyTestBase.java
index 145317efeae..ad8a70aa299 100755
--- a/solr/src/test/org/apache/solr/client/solrj/SolrJettyTestBase.java
+++ b/solr/src/test/org/apache/solr/client/solrj/SolrJettyTestBase.java
@@ -1,5 +1,22 @@
package org.apache.solr.client.solrj;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import java.io.File;
import java.io.IOException;
diff --git a/solr/src/test/org/apache/solr/core/TestLegacyMergeSchedulerPolicyConfig.java b/solr/src/test/org/apache/solr/core/TestLegacyMergeSchedulerPolicyConfig.java
index f0bd861aaa2..e89815cecce 100644
--- a/solr/src/test/org/apache/solr/core/TestLegacyMergeSchedulerPolicyConfig.java
+++ b/solr/src/test/org/apache/solr/core/TestLegacyMergeSchedulerPolicyConfig.java
@@ -1,5 +1,22 @@
package org.apache.solr.core;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import java.io.IOException;
import org.apache.lucene.index.IndexWriter;
diff --git a/solr/src/test/org/apache/solr/core/TestPropInject.java b/solr/src/test/org/apache/solr/core/TestPropInject.java
index c84e13fe877..858388a7ac0 100644
--- a/solr/src/test/org/apache/solr/core/TestPropInject.java
+++ b/solr/src/test/org/apache/solr/core/TestPropInject.java
@@ -1,5 +1,22 @@
package org.apache.solr.core;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import java.io.IOException;
import org.apache.lucene.index.ConcurrentMergeScheduler;
diff --git a/solr/src/test/org/apache/solr/core/TestXIncludeConfig.java b/solr/src/test/org/apache/solr/core/TestXIncludeConfig.java
index 95b03bfb327..905685abeb8 100644
--- a/solr/src/test/org/apache/solr/core/TestXIncludeConfig.java
+++ b/solr/src/test/org/apache/solr/core/TestXIncludeConfig.java
@@ -1,5 +1,22 @@
package org.apache.solr.core;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import java.io.File;
import org.apache.commons.io.FileUtils;
diff --git a/solr/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java b/solr/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java
index 75a968d4f7f..ed0edbb97b6 100644
--- a/solr/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java
+++ b/solr/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java
@@ -1,5 +1,22 @@
package org.apache.solr.handler.component;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import org.apache.solr.BaseDistributedSearchTestCase;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.common.params.ModifiableSolrParams;
diff --git a/solr/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java b/solr/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java
index ac3b7094c1e..bcf91c268d3 100644
--- a/solr/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java
+++ b/solr/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java
@@ -1,5 +1,22 @@
package org.apache.solr.handler.component;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import org.apache.solr.BaseDistributedSearchTestCase;
/**
diff --git a/solr/src/test/org/apache/solr/search/TestLRUCache.java b/solr/src/test/org/apache/solr/search/TestLRUCache.java
index 7439704f075..7ff5b762085 100644
--- a/solr/src/test/org/apache/solr/search/TestLRUCache.java
+++ b/solr/src/test/org/apache/solr/search/TestLRUCache.java
@@ -1,5 +1,22 @@
package org.apache.solr.search;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
import java.io.IOException;
import java.io.Serializable;
import java.util.HashMap;
From 6569aa5da376c73ebff0fc93fdf7f073e152c374 Mon Sep 17 00:00:00 2001
From: Yonik Seeley
Date: Sun, 30 Jan 2011 15:03:01 +0000
Subject: [PATCH 057/185] add ASL
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065302 13f79535-47bb-0310-9956-ffa450edef68
---
.../byTask/feeds/LongToEnglishQueryMaker.java | 17 +++++++++++++++++
.../apache/solr/core/RefCntRamDirectory.java | 17 +++++++++++++++++
.../request/PerSegmentSingleValuedFaceting.java | 17 +++++++++++++++++
.../solr/analysis/TestMultiWordSynonyms.java | 17 +++++++++++++++++
.../velocity/VelocityResponseWriterTest.java | 17 +++++++++++++++++
5 files changed, 85 insertions(+)
diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishQueryMaker.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishQueryMaker.java
index 6abe9fcccd9..fdee2882518 100644
--- a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishQueryMaker.java
+++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishQueryMaker.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package org.apache.lucene.benchmark.byTask.feeds;
import org.apache.lucene.analysis.Analyzer;
diff --git a/solr/src/java/org/apache/solr/core/RefCntRamDirectory.java b/solr/src/java/org/apache/solr/core/RefCntRamDirectory.java
index e3eaaf3d9e5..e9659814374 100644
--- a/solr/src/java/org/apache/solr/core/RefCntRamDirectory.java
+++ b/solr/src/java/org/apache/solr/core/RefCntRamDirectory.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package org.apache.solr.core;
import java.io.IOException;
diff --git a/solr/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java b/solr/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java
index 30f99a6f0a6..56015fde2cf 100755
--- a/solr/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java
+++ b/solr/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package org.apache.solr.request;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
diff --git a/solr/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java b/solr/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java
index e4f71c57249..f0dd0782567 100644
--- a/solr/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java
+++ b/solr/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package org.apache.solr.analysis;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
diff --git a/solr/src/test/org/apache/solr/velocity/VelocityResponseWriterTest.java b/solr/src/test/org/apache/solr/velocity/VelocityResponseWriterTest.java
index 911ca19b1ad..f8c61e5a8e1 100644
--- a/solr/src/test/org/apache/solr/velocity/VelocityResponseWriterTest.java
+++ b/solr/src/test/org/apache/solr/velocity/VelocityResponseWriterTest.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package org.apache.solr.velocity;
import org.apache.solr.response.SolrQueryResponse;
From d1a5ca1460643c62a4761c5d3139679fbb5bc80d Mon Sep 17 00:00:00 2001
From: Robert Muir
Date: Sun, 30 Jan 2011 15:10:15 +0000
Subject: [PATCH 058/185] add missing @Override and @Deprecated annotations
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065304 13f79535-47bb-0310-9956-ffa450edef68
---
.../InstantiatedDocsAndPositionsEnum.java | 3 +
.../apache/lucene/store/WindowsDirectory.java | 5 +
.../precedence/TestPrecedenceQueryParser.java | 4 +
.../index/FreqProxTermsWriterPerField.java | 1 +
.../org/apache/lucene/index/MultiReader.java | 1 +
.../lucene/index/PerFieldCodecWrapper.java | 1 +
.../index/TermVectorsTermsWriterPerField.java | 1 +
.../lucene/index/codecs/BlockTermsReader.java | 1 +
.../codecs/preflex/SegmentTermPositions.java | 6 ++
.../index/codecs/preflex/TermInfosReader.java | 3 +
.../index/codecs/sep/IntIndexInput.java | 1 +
.../codecs/sep/SepPostingsReaderImpl.java | 1 +
.../simpletext/SimpleTextFieldsReader.java | 1 +
.../standard/StandardPostingsReader.java | 9 ++
.../apache/lucene/search/FuzzyTermsEnum.java | 1 +
.../lucene/search/TotalHitCountCollector.java | 4 +
.../lucene/search/cache/EntryCreator.java | 1 +
.../search/function/MultiValueSource.java | 1 +
.../search/payloads/PayloadNearQuery.java | 1 +
.../spans/SpanNearPayloadCheckQuery.java | 1 +
.../search/spans/SpanPayloadCheckQuery.java | 1 +
.../java/org/apache/lucene/util/BytesRef.java | 1 +
.../lucene/util/DoubleBarrelLRUCache.java | 1 +
.../java/org/apache/lucene/util/IntsRef.java | 1 +
.../org/apache/lucene/util/ReaderUtil.java | 1 +
.../util/automaton/BasicOperations.java | 3 +
.../lucene/util/automaton/SortedIntSet.java | 6 ++
.../lucene/util/automaton/UTF32ToUTF8.java | 1 +
.../util/automaton/fst/PairOutputs.java | 1 +
.../apache/lucene/util/packed/Packed32.java | 1 +
.../apache/lucene/util/packed/Packed64.java | 1 +
.../lucene/util/packed/PackedWriter.java | 1 +
.../org/apache/lucene/TestAssertions.java | 11 +++
.../org/apache/lucene/TestExternalCodecs.java | 2 +
.../TestCharTermAttributeImpl.java | 1 +
.../lucene/index/TestDocsAndPositions.java | 1 +
.../lucene/index/TestFilterIndexReader.java | 4 +
.../lucene/index/TestIndexWriterReader.java | 1 +
.../lucene/index/TestMultiLevelSkipList.java | 2 +
.../lucene/index/TestPerSegmentDeletes.java | 1 +
.../lucene/search/TestAutomatonQuery.java | 2 +
.../search/TestAutomatonQueryUnicode.java | 2 +
.../apache/lucene/search/TestRegexpQuery.java | 2 +
.../search/cache/TestEntryCreators.java | 1 +
.../util/LuceneJUnitDividingSelector.java | 3 +
.../apache/lucene/util/TestBytesRefHash.java | 1 +
.../lucene/util/TestDoubleBarrelLRUCache.java | 6 ++
.../util/TestRecyclingByteBlockAllocator.java | 1 +
.../lucene/util/automaton/fst/TestFSTs.java | 2 +
.../charfilter/HTMLStripCharFilter.java | 3 +
.../commongrams/CommonGramsFilter.java | 1 +
.../commongrams/CommonGramsQueryFilter.java | 2 +
.../lucene/analysis/fa/PersianCharFilter.java | 1 +
.../pattern/PatternReplaceCharFilter.java | 2 +
.../lucene/analysis/synonym/SynonymMap.java | 1 +
.../TestRemoveDuplicatesTokenFilter.java | 1 +
.../miscellaneous/TestTrimFilter.java | 1 +
.../TestWordDelimiterFilter.java | 3 +
.../analysis/synonym/TestSynonymFilter.java | 1 +
.../carrot2/CarrotClusteringEngine.java | 2 +
.../carrot2/LuceneLanguageModelFactory.java | 3 +-
.../MockDocumentClusteringEngine.java | 2 +
.../dataimport/MailEntityProcessor.java | 2 +
.../dataimport/TikaEntityProcessor.java | 1 +
.../dataimport/TestMailEntityProcessor.java | 4 +
.../BinContentStreamDataSource.java | 3 +
.../handler/dataimport/BinFileDataSource.java | 3 +
.../handler/dataimport/BinURLDataSource.java | 3 +
.../dataimport/CachedSqlEntityProcessor.java | 3 +
.../handler/dataimport/ClobTransformer.java | 1 +
.../dataimport/ContentStreamDataSource.java | 3 +
.../solr/handler/dataimport/ContextImpl.java | 21 ++++
.../solr/handler/dataimport/DataConfig.java | 1 +
.../solr/handler/dataimport/DataImporter.java | 1 +
.../dataimport/DateFormatTransformer.java | 1 +
.../solr/handler/dataimport/DebugLogger.java | 5 +
.../solr/handler/dataimport/DocBuilder.java | 1 +
.../dataimport/EntityProcessorBase.java | 6 ++
.../dataimport/EntityProcessorWrapper.java | 8 ++
.../solr/handler/dataimport/EvaluatorBag.java | 5 +
.../dataimport/FieldReaderDataSource.java | 3 +
.../dataimport/FieldStreamDataSource.java | 3 +
.../handler/dataimport/FileDataSource.java | 3 +
.../dataimport/FileListEntityProcessor.java | 2 +
.../handler/dataimport/JdbcDataSource.java | 4 +
.../dataimport/LineEntityProcessor.java | 2 +
.../handler/dataimport/LogTransformer.java | 1 +
.../handler/dataimport/MockDataSource.java | 3 +
.../dataimport/NumberFormatTransformer.java | 1 +
.../dataimport/PlainTextEntityProcessor.java | 2 +
.../handler/dataimport/RegexTransformer.java | 1 +
.../handler/dataimport/ScriptTransformer.java | 1 +
.../dataimport/SqlEntityProcessor.java | 5 +
.../dataimport/TemplateTransformer.java | 1 +
.../handler/dataimport/URLDataSource.java | 3 +
.../dataimport/VariableResolverImpl.java | 2 +
.../dataimport/XPathEntityProcessor.java | 3 +
.../AbstractDataImportHandlerTestCase.java | 27 ++++++
.../TestCachedSqlEntityProcessor.java | 2 +
.../TestContentStreamDataSource.java | 2 +
.../handler/dataimport/TestDocBuilder.java | 6 ++
.../handler/dataimport/TestDocBuilder2.java | 2 +
.../dataimport/TestEntityProcessorBase.java | 2 +
.../handler/dataimport/TestErrorHandling.java | 4 +
.../handler/dataimport/TestEvaluatorBag.java | 1 +
.../dataimport/TestJdbcDataSource.java | 2 +
.../dataimport/TestLineEntityProcessor.java | 3 +
.../TestPlainTextEntityProcessor.java | 3 +
.../dataimport/TestSqlEntityProcessor.java | 5 +
.../dataimport/TestSqlEntityProcessor2.java | 1 +
.../dataimport/TestVariableResolver.java | 1 +
.../dataimport/TestXPathEntityProcessor.java | 3 +
.../extraction/ExtractingDocumentLoader.java | 1 +
.../extraction/ExtractingRequestHandler.java | 1 +
.../handler/ExtractingRequestHandlerTest.java | 1 +
.../processor/UIMAUpdateRequestProcessor.java | 1 +
.../UIMAUpdateRequestProcessorFactory.java | 1 +
.../UIMAUpdateRequestProcessorTest.java | 1 +
.../org/apache/solr/common/SolrDocument.java | 2 +
.../apache/solr/common/SolrDocumentList.java | 1 +
.../apache/solr/common/cloud/CloudState.java | 1 +
.../apache/solr/common/cloud/ZkNodeProps.java | 1 +
.../solr/common/params/FacetParams.java | 3 +
.../solr/common/util/ConcurrentLRUCache.java | 7 ++
.../solr/common/util/FastOutputStream.java | 1 +
.../apache/solr/common/util/NamedList.java | 3 +
.../solr/common/util/RegexFileFilter.java | 1 +
.../ArabicLetterTokenizerFactory.java | 1 +
.../solr/analysis/ChineseFilterFactory.java | 1 +
.../analysis/ChineseTokenizerFactory.java | 1 +
...tionaryCompoundWordTokenFilterFactory.java | 1 +
...enationCompoundWordTokenFilterFactory.java | 1 +
.../analysis/IndonesianStemFilterFactory.java | 1 +
.../NumericPayloadTokenFilterFactory.java | 1 +
.../PatternReplaceCharFilterFactory.java | 1 +
.../solr/analysis/PositionFilterFactory.java | 1 +
.../solr/analysis/ShingleFilterFactory.java | 1 +
.../apache/solr/analysis/SolrAnalyzer.java | 1 +
.../apache/solr/analysis/TokenizerChain.java | 1 +
.../org/apache/solr/cloud/SolrZkServer.java | 1 +
.../solr/cloud/ZkSolrResourceLoader.java | 3 +
.../solr/core/AbstractSolrEventListener.java | 1 +
.../apache/solr/core/RAMDirectoryFactory.java | 1 +
.../apache/solr/core/RefCntRamDirectory.java | 1 +
.../solr/core/RunExecutableListener.java | 3 +
.../java/org/apache/solr/core/SolrCore.java | 2 +
.../solr/core/StandardDirectoryFactory.java | 1 +
.../solr/core/StandardIndexReaderFactory.java | 1 +
.../handler/AnalysisRequestHandlerBase.java | 1 +
.../handler/BinaryUpdateRequestHandler.java | 6 ++
.../solr/handler/CSVRequestHandler.java | 7 ++
.../handler/ContentStreamHandlerBase.java | 1 +
.../DocumentAnalysisRequestHandler.java | 1 +
.../handler/FieldAnalysisRequestHandler.java | 1 +
.../handler/JsonUpdateRequestHandler.java | 1 +
.../solr/handler/ReplicationHandler.java | 6 ++
.../org/apache/solr/handler/SnapPuller.java | 1 +
.../org/apache/solr/handler/SnapShooter.java | 1 +
.../org/apache/solr/handler/XMLLoader.java | 1 +
.../solr/handler/XmlUpdateRequestHandler.java | 1 +
.../handler/admin/SolrInfoMBeanHandler.java | 5 +
.../handler/component/DebugComponent.java | 1 +
.../handler/component/FacetComponent.java | 1 +
.../handler/component/HighlightComponent.java | 1 +
.../component/QueryElevationComponent.java | 7 ++
.../solr/handler/component/ShardDoc.java | 4 +
.../solr/handler/component/ShardRequest.java | 1 +
.../solr/handler/component/ShardResponse.java | 1 +
.../component/TermVectorComponent.java | 8 ++
.../handler/component/TermsComponent.java | 5 +
.../highlight/DefaultSolrHighlighter.java | 3 +
.../apache/solr/highlight/GapFragmenter.java | 2 +
.../solr/highlight/RegexFragmenter.java | 1 +
.../solr/request/ServletSolrParams.java | 1 +
.../org/apache/solr/request/SimpleFacets.java | 14 +++
.../solr/request/SolrQueryRequestBase.java | 1 +
.../apache/solr/request/UnInvertedField.java | 4 +
.../solr/response/JSONResponseWriter.java | 14 +++
.../response/PHPSerializedResponseWriter.java | 1 +
.../org/apache/solr/response/PageTool.java | 1 +
.../solr/response/RubyResponseWriter.java | 2 +
.../response/SolrParamResourceLoader.java | 4 +
.../response/SolrVelocityResourceLoader.java | 4 +
.../org/apache/solr/response/XMLWriter.java | 2 +
.../solr/schema/AbstractSubTypeFieldType.java | 1 +
.../org/apache/solr/schema/BCDIntField.java | 6 ++
.../org/apache/solr/schema/BinaryField.java | 4 +
.../org/apache/solr/schema/BoolField.java | 9 ++
.../org/apache/solr/schema/ByteField.java | 3 +
.../org/apache/solr/schema/DateField.java | 21 ++++
.../org/apache/solr/schema/DoubleField.java | 3 +
.../apache/solr/schema/ExternalFileField.java | 4 +
.../org/apache/solr/schema/FieldType.java | 2 +
.../org/apache/solr/schema/FloatField.java | 3 +
.../org/apache/solr/schema/IndexSchema.java | 4 +
.../java/org/apache/solr/schema/IntField.java | 3 +
.../org/apache/solr/schema/LongField.java | 2 +
.../apache/solr/schema/RandomSortField.java | 7 ++
.../org/apache/solr/schema/ShortField.java | 2 +
.../solr/schema/SortableDoubleField.java | 18 ++++
.../solr/schema/SortableFloatField.java | 18 ++++
.../apache/solr/schema/SortableIntField.java | 18 ++++
.../apache/solr/schema/SortableLongField.java | 18 ++++
.../java/org/apache/solr/schema/StrField.java | 4 +
.../apache/solr/schema/StrFieldSource.java | 13 +++
.../org/apache/solr/schema/TextField.java | 3 +
.../org/apache/solr/schema/TrieField.java | 3 +
.../org/apache/solr/search/BitDocSet.java | 3 +
.../solr/search/BoostQParserPlugin.java | 5 +
.../org/apache/solr/search/DisMaxQParser.java | 2 +
.../solr/search/DisMaxQParserPlugin.java | 1 +
.../java/org/apache/solr/search/DocSet.java | 2 +
.../solr/search/DocSetHitCollector.java | 8 ++
.../search/ExtendedDismaxQParserPlugin.java | 8 ++
.../org/apache/solr/search/FastLRUCache.java | 1 +
.../solr/search/FieldQParserPlugin.java | 2 +
.../apache/solr/search/FunctionQParser.java | 1 +
.../solr/search/FunctionQParserPlugin.java | 1 +
.../search/FunctionRangeQParserPlugin.java | 2 +
.../java/org/apache/solr/search/LRUCache.java | 2 +
.../solr/search/LuceneQParserPlugin.java | 4 +
.../solr/search/LuceneQueryOptimizer.java | 1 +
.../MissingStringLastComparatorSource.java | 1 +
.../org/apache/solr/search/MutableValue.java | 1 +
.../solr/search/NestedQParserPlugin.java | 5 +
.../solr/search/OldLuceneQParserPlugin.java | 1 +
.../solr/search/PrefixQParserPlugin.java | 2 +
.../org/apache/solr/search/QueryParsing.java | 1 +
.../apache/solr/search/QueryResultKey.java | 2 +
.../apache/solr/search/RawQParserPlugin.java | 2 +
.../org/apache/solr/search/SolrCacheBase.java | 1 +
.../solr/search/SolrConstantScoreQuery.java | 3 +
.../apache/solr/search/SolrIndexSearcher.java | 24 ++++-
.../apache/solr/search/SortedIntDocSet.java | 1 +
.../solr/search/SpatialBoxQParserPlugin.java | 1 +
.../apache/solr/search/TermQParserPlugin.java | 2 +
.../apache/solr/search/ValueSourceParser.java | 96 +++++++++++++++++++
.../solr/search/function/BoostedQuery.java | 8 ++
.../solr/search/function/ByteFieldSource.java | 8 ++
.../search/function/ConstValueSource.java | 10 ++
.../search/function/DivFloatFunction.java | 2 +
.../search/function/DocFreqValueSource.java | 14 +++
.../function/DoubleConstValueSource.java | 10 ++
.../search/function/DoubleFieldSource.java | 8 ++
.../search/function/DualFloatFunction.java | 10 ++
.../search/function/FieldCacheSource.java | 3 +
.../solr/search/function/FileFloatSource.java | 14 +++
.../search/function/FloatFieldSource.java | 8 ++
.../solr/search/function/IntFieldSource.java | 8 ++
.../function/JoinDocFreqValueSource.java | 10 ++
.../search/function/LinearFloatFunction.java | 10 ++
.../solr/search/function/LongFieldSource.java | 8 ++
.../search/function/MaxFloatFunction.java | 10 ++
.../search/function/MultiFloatFunction.java | 10 ++
.../function/NumericFieldCacheSource.java | 2 +
.../solr/search/function/OrdFieldSource.java | 12 +++
.../search/function/PowFloatFunction.java | 2 +
.../search/function/ProductFloatFunction.java | 2 +
.../search/function/QueryValueSource.java | 9 ++
.../function/RangeMapFloatFunction.java | 10 ++
.../function/ReciprocalFloatFunction.java | 10 ++
.../function/ReverseOrdFieldSource.java | 12 +++
.../search/function/ScaleFloatFunction.java | 10 ++
.../search/function/ShortFieldSource.java | 8 ++
.../search/function/SimpleFloatFunction.java | 6 ++
.../solr/search/function/SingleFunction.java | 3 +
.../search/function/StringIndexDocValues.java | 1 +
.../search/function/SumFloatFunction.java | 1 +
.../solr/search/function/ValueSource.java | 10 ++
.../function/ValueSourceRangeFilter.java | 7 +-
.../search/function/VectorValueSource.java | 4 +
.../distance/GeohashHaversineFunction.java | 5 +
.../distance/HaversineConstFunction.java | 7 ++
.../function/distance/HaversineFunction.java | 6 ++
.../distance/SquaredEuclideanFunction.java | 2 +
.../distance/StringDistanceFunction.java | 5 +
.../distance/VectorDistanceFunction.java | 5 +
.../spelling/AbstractLuceneSpellChecker.java | 2 +
.../solr/spelling/FileBasedSpellChecker.java | 2 +
.../solr/spelling/IndexBasedSpellChecker.java | 2 +
.../solr/spelling/SpellingQueryConverter.java | 1 +
.../apache/solr/spelling/suggest/Lookup.java | 1 +
.../solr/update/CommitUpdateCommand.java | 1 +
.../solr/update/DeleteUpdateCommand.java | 1 +
.../solr/update/DirectUpdateHandler2.java | 9 ++
.../apache/solr/update/SolrIndexWriter.java | 2 +
.../org/apache/solr/update/UpdateCommand.java | 1 +
.../update/processor/Lookup3Signature.java | 2 +
.../solr/update/processor/MD5Signature.java | 3 +
.../processor/TextProfileSignature.java | 3 +
.../org/apache/solr/util/BoundedTreeSet.java | 2 +
.../org/apache/solr/util/SolrPluginUtils.java | 1 +
.../solrj/impl/BinaryRequestWriter.java | 5 +
.../solrj/impl/BinaryResponseParser.java | 4 +
.../client/solrj/impl/LBHttpSolrServer.java | 3 +
.../impl/StreamingBinaryResponseParser.java | 2 +
.../request/JavaBinUpdateRequestCodec.java | 3 +
.../client/solrj/response/FieldStatsInfo.java | 1 +
.../solr/BaseDistributedSearchTestCase.java | 6 ++
.../apache/solr/BasicFunctionalityTest.java | 5 +
.../test/org/apache/solr/SolrTestCaseJ4.java | 3 +
solr/src/test/org/apache/solr/TestTrie.java | 1 +
...estRemoveDuplicatesTokenFilterFactory.java | 1 +
.../TestReversedWildcardFilterFactory.java | 1 +
.../client/solrj/SolrExampleTestBase.java | 1 +
.../client/solrj/TestLBHttpSolrServer.java | 1 +
.../solrj/embedded/TestSolrProperties.java | 2 +
.../cloud/AbstractDistributedZkTestCase.java | 1 +
.../apache/solr/cloud/AbstractZkTestCase.java | 1 +
.../solr/cloud/CloudStateUpdateTest.java | 1 +
.../apache/solr/cloud/ZkControllerTest.java | 1 +
.../apache/solr/cloud/ZkSolrClientTest.java | 1 +
.../solr/core/AlternateDirectoryTest.java | 2 +
.../solr/core/DummyValueSourceParser.java | 4 +
.../solr/core/IndexReaderFactoryTest.java | 2 +
.../MockQuerySenderListenerReqHandler.java | 7 ++
.../solr/core/TestArbitraryIndexDir.java | 2 +
.../org/apache/solr/core/TestBadConfig.java | 3 +
.../apache/solr/core/TestJmxIntegration.java | 2 +
.../apache/solr/core/TestJmxMonitoredMap.java | 2 +
.../org/apache/solr/core/TestPropInject.java | 2 +
.../solr/core/TestSolrDeletionPolicy1.java | 1 +
.../apache/solr/core/TestXIncludeConfig.java | 2 +
.../apache/solr/handler/JsonLoaderTest.java | 5 +
.../apache/solr/handler/TestCSVLoader.java | 2 +
.../solr/handler/TestReplicationHandler.java | 2 +
.../component/SpellCheckComponentTest.java | 2 +
.../request/TestBinaryResponseWriter.java | 2 +
.../apache/solr/request/TestWriterPerf.java | 4 +
.../solr/schema/CustomSimilarityFactory.java | 1 +
.../org/apache/solr/schema/DateFieldTest.java | 1 +
.../apache/solr/schema/TestBinaryField.java | 1 +
.../apache/solr/search/FooQParserPlugin.java | 2 +
.../solr/search/TestExtendedDismaxParser.java | 4 +
.../apache/solr/search/TestFastLRUCache.java | 1 +
.../apache/solr/search/TestIndexSearcher.java | 1 +
.../apache/solr/search/TestQueryTypes.java | 4 +
.../apache/solr/search/TestQueryUtils.java | 4 +
.../apache/solr/search/TestRangeQuery.java | 1 +
.../apache/solr/search/TestSearchPerf.java | 4 +
.../test/org/apache/solr/search/TestSort.java | 3 +
.../search/function/NvlValueSourceParser.java | 8 +-
.../search/function/SortByFunctionTest.java | 2 +
.../apache/solr/servlet/CacheHeaderTest.java | 3 +
.../servlet/DirectSolrConnectionTest.java | 2 +
.../solr/servlet/NoCacheHeaderTest.java | 6 ++
.../spelling/IndexBasedSpellCheckerTest.java | 1 +
.../SpellPossibilityIteratorTest.java | 3 +-
.../apache/solr/update/AutoCommitTest.java | 2 +
.../DirectUpdateHandlerOptimizeTest.java | 2 +
.../solr/update/DirectUpdateHandlerTest.java | 1 +
.../solr/update/TestIndexingPerformance.java | 2 +
.../SignatureUpdateProcessorFactoryTest.java | 2 +
.../solr/util/AbstractSolrTestCase.java | 3 +
.../org/apache/solr/util/TestNumberUtils.java | 16 ++++
.../velocity/VelocityResponseWriterTest.java | 2 +
.../solr/servlet/LogLevelSelection.java | 5 +
357 files changed, 1322 insertions(+), 9 deletions(-)
diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocsAndPositionsEnum.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocsAndPositionsEnum.java
index 816e454673e..e4eea034bd1 100644
--- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocsAndPositionsEnum.java
+++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocsAndPositionsEnum.java
@@ -82,14 +82,17 @@ public class InstantiatedDocsAndPositionsEnum extends DocsAndPositionsEnum {
return currentDoc.getTermPositions().length;
}
+ @Override
public int nextPosition() {
return currentDoc.getTermPositions()[++posUpto];
}
+ @Override
public boolean hasPayload() {
return currentDoc.getPayloads()[posUpto] != null;
}
+ @Override
public BytesRef getPayload() {
payload.bytes = currentDoc.getPayloads()[posUpto];
payload.length = payload.bytes.length;
diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/store/WindowsDirectory.java b/lucene/contrib/misc/src/java/org/apache/lucene/store/WindowsDirectory.java
index 870ebfade1e..f1c3f74a117 100644
--- a/lucene/contrib/misc/src/java/org/apache/lucene/store/WindowsDirectory.java
+++ b/lucene/contrib/misc/src/java/org/apache/lucene/store/WindowsDirectory.java
@@ -64,6 +64,7 @@ public class WindowsDirectory extends FSDirectory {
super(path, null);
}
+ @Override
public IndexInput openInput(String name, int bufferSize) throws IOException {
ensureOpen();
return new WindowsIndexInput(new File(getDirectory(), name), Math.max(bufferSize, DEFAULT_BUFFERSIZE));
@@ -82,14 +83,17 @@ public class WindowsDirectory extends FSDirectory {
isOpen = true;
}
+ @Override
protected void readInternal(byte[] b, int offset, int length) throws IOException {
if (WindowsDirectory.read(fd, b, offset, length, getFilePointer()) != length)
throw new IOException("Read past EOF");
}
+ @Override
protected void seekInternal(long pos) throws IOException {
}
+ @Override
public synchronized void close() throws IOException {
// NOTE: we synchronize and track "isOpen" because Lucene sometimes closes IIs twice!
if (!isClone && isOpen) {
@@ -98,6 +102,7 @@ public class WindowsDirectory extends FSDirectory {
}
}
+ @Override
public long length() {
return length;
}
diff --git a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java
index 5d044b9ad4c..5cba05b3111 100644
--- a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java
+++ b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java
@@ -84,6 +84,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+ @Override
public boolean incrementToken() throws IOException {
if (inPhrase) {
inPhrase = false;
@@ -108,6 +109,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
public static final class QPTestAnalyzer extends Analyzer {
/** Filters MockTokenizer with StopFilter. */
+ @Override
public final TokenStream tokenStream(String fieldName, Reader reader) {
return new QPTestFilter(new MockTokenizer(reader, MockTokenizer.SIMPLE, true));
}
@@ -115,6 +117,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
private int originalMaxClauses;
+ @Override
public void setUp() throws Exception {
super.setUp();
originalMaxClauses = BooleanQuery.getMaxClauseCount();
@@ -627,6 +630,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
}
+ @Override
public void tearDown() {
BooleanQuery.setMaxClauseCount(originalMaxClauses);
}
diff --git a/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java b/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
index f47d2a71bc0..b504f1557b9 100644
--- a/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
+++ b/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
@@ -194,6 +194,7 @@ final class FreqProxTermsWriterPerField extends TermsHashConsumerPerField implem
return new FreqProxPostingsArray(size);
}
+ @Override
void copyTo(ParallelPostingsArray toArray, int numToCopy) {
assert toArray instanceof FreqProxPostingsArray;
FreqProxPostingsArray to = (FreqProxPostingsArray) toArray;
diff --git a/lucene/src/java/org/apache/lucene/index/MultiReader.java b/lucene/src/java/org/apache/lucene/index/MultiReader.java
index 0d3a082567b..c2682e40231 100644
--- a/lucene/src/java/org/apache/lucene/index/MultiReader.java
+++ b/lucene/src/java/org/apache/lucene/index/MultiReader.java
@@ -383,6 +383,7 @@ public class MultiReader extends IndexReader implements Cloneable {
return subReaders;
}
+ @Override
public ReaderContext getTopReaderContext() {
return topLevelContext;
}
diff --git a/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java b/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java
index f7d4a1885d4..51c92321f54 100644
--- a/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java
+++ b/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java
@@ -204,6 +204,7 @@ final class PerFieldCodecWrapper extends Codec {
}
}
+ @Override
public FieldsProducer fieldsProducer(SegmentReadState state)
throws IOException {
return new FieldsReader(state.dir, state.fieldInfos, state.segmentInfo,
diff --git a/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java b/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java
index 4938538d054..2b4e35e09cd 100644
--- a/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java
+++ b/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java
@@ -281,6 +281,7 @@ final class TermVectorsTermsWriterPerField extends TermsHashConsumerPerField {
int[] lastOffsets; // Last offset we saw
int[] lastPositions; // Last position where this term occurred
+ @Override
ParallelPostingsArray newInstance(int size) {
return new TermVectorsPostingsArray(size);
}
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java b/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java
index d4a6ac1bce7..e25364c33a3 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java
@@ -646,6 +646,7 @@ public class BlockTermsReader extends FieldsProducer {
return SeekStatus.FOUND;
}
+ @Override
public long ord() {
if (!doOrd) {
throw new UnsupportedOperationException();
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermPositions.java b/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermPositions.java
index f50d226741c..c642f6b1aaa 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermPositions.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermPositions.java
@@ -58,6 +58,7 @@ extends SegmentTermDocs {
this.proxStreamOrig = proxStream; // the proxStream will be cloned lazily when nextPosition() is called for the first time
}
+ @Override
final void seek(TermInfo ti, Term term) throws IOException {
super.seek(ti, term);
if (ti != null)
@@ -69,6 +70,7 @@ extends SegmentTermDocs {
needToLoadPayload = false;
}
+ @Override
public final void close() throws IOException {
super.close();
if (proxStream != null) proxStream.close();
@@ -100,11 +102,13 @@ extends SegmentTermDocs {
return delta;
}
+ @Override
protected final void skippingDoc() throws IOException {
// we remember to skip a document lazily
lazySkipProxCount += freq;
}
+ @Override
public final boolean next() throws IOException {
// we remember to skip the remaining positions of the current
// document lazily
@@ -118,12 +122,14 @@ extends SegmentTermDocs {
return false;
}
+ @Override
public final int read(final int[] docs, final int[] freqs) {
throw new UnsupportedOperationException("TermPositions does not support processing multiple documents in one call. Use TermDocs instead.");
}
/** Called by super.skipTo(). */
+ @Override
protected void skipProx(long proxPointer, int payloadLength) throws IOException {
// we save the pointer, we might have to skip there lazily
lazySkipPointer = proxPointer;
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java b/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java
index adf0535390d..8205e73b972 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java
@@ -67,15 +67,18 @@ public final class TermInfosReader {
this.term = t;
}
+ @Override
public boolean equals(Object other) {
CloneableTerm t = (CloneableTerm) other;
return this.term.equals(t.term);
}
+ @Override
public int hashCode() {
return term.hashCode();
}
+ @Override
public Object clone() {
return new CloneableTerm(term);
}
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/sep/IntIndexInput.java b/lucene/src/java/org/apache/lucene/index/codecs/sep/IntIndexInput.java
index 741272a329a..631476df0ba 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/sep/IntIndexInput.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/sep/IntIndexInput.java
@@ -48,6 +48,7 @@ public abstract class IntIndexInput implements Closeable {
public abstract void set(Index other);
+ @Override
public abstract Object clone();
}
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java b/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java
index 08e2781732c..b693db361c9 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java
@@ -160,6 +160,7 @@ public class SepPostingsReaderImpl extends PostingsReaderBase {
return other;
}
+ @Override
public void copyFrom(TermState _other) {
super.copyFrom(_other);
SepTermState other = (SepTermState) _other;
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/simpletext/SimpleTextFieldsReader.java b/lucene/src/java/org/apache/lucene/index/codecs/simpletext/SimpleTextFieldsReader.java
index e40fba2f44e..ea74a6b6627 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/simpletext/SimpleTextFieldsReader.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/simpletext/SimpleTextFieldsReader.java
@@ -129,6 +129,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
fstEnum = new BytesRefFSTEnum>>(fst);
}
+ @Override
public SeekStatus seek(BytesRef text, boolean useCache /* ignored */) throws IOException {
//System.out.println("seek to text=" + text.utf8ToString());
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java
index 0e53a99d536..0c9dd4f5c86 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java
@@ -99,12 +99,14 @@ public class StandardPostingsReader extends PostingsReaderBase {
ByteArrayDataInput bytesReader;
byte[] bytes;
+ @Override
public Object clone() {
StandardTermState other = new StandardTermState();
other.copyFrom(this);
return other;
}
+ @Override
public void copyFrom(TermState _other) {
super.copyFrom(_other);
StandardTermState other = (StandardTermState) _other;
@@ -118,6 +120,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
// (rare!), they will be re-read from disk.
}
+ @Override
public String toString() {
return super.toString() + " freqFP=" + freqOffset + " proxFP=" + proxOffset + " skipOffset=" + skipOffset;
}
@@ -569,6 +572,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
return doc;
}
+ @Override
public int nextPosition() throws IOException {
if (lazyProxPointer != -1) {
@@ -597,10 +601,12 @@ public class StandardPostingsReader extends PostingsReaderBase {
/** Returns the payload at this position, or null if no
* payload was indexed. */
+ @Override
public BytesRef getPayload() throws IOException {
throw new IOException("No payloads exist for this field!");
}
+ @Override
public boolean hasPayload() {
return false;
}
@@ -765,6 +771,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
return doc;
}
+ @Override
public int nextPosition() throws IOException {
if (lazyProxPointer != -1) {
@@ -825,6 +832,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
/** Returns the payload at this position, or null if no
* payload was indexed. */
+ @Override
public BytesRef getPayload() throws IOException {
assert lazyProxPointer == -1;
assert posPendingCount < freq;
@@ -842,6 +850,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
return payload;
}
+ @Override
public boolean hasPayload() {
return payloadPending && payloadLength > 0;
}
diff --git a/lucene/src/java/org/apache/lucene/search/FuzzyTermsEnum.java b/lucene/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
index 40795000c82..ee5e274ffaf 100644
--- a/lucene/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
+++ b/lucene/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
@@ -261,6 +261,7 @@ public final class FuzzyTermsEnum extends TermsEnum {
return actualEnum.docsAndPositions(skipDocs, reuse);
}
+ @Override
public void seek(BytesRef term, TermState state) throws IOException {
actualEnum.seek(term, state);
}
diff --git a/lucene/src/java/org/apache/lucene/search/TotalHitCountCollector.java b/lucene/src/java/org/apache/lucene/search/TotalHitCountCollector.java
index 533d69c65d3..b154091e27d 100644
--- a/lucene/src/java/org/apache/lucene/search/TotalHitCountCollector.java
+++ b/lucene/src/java/org/apache/lucene/search/TotalHitCountCollector.java
@@ -31,16 +31,20 @@ public class TotalHitCountCollector extends Collector {
return totalHits;
}
+ @Override
public void setScorer(Scorer scorer) {
}
+ @Override
public void collect(int doc) {
totalHits++;
}
+ @Override
public void setNextReader(AtomicReaderContext context) {
}
+ @Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
diff --git a/lucene/src/java/org/apache/lucene/search/cache/EntryCreator.java b/lucene/src/java/org/apache/lucene/search/cache/EntryCreator.java
index 0e0daff40cd..362cc83a71e 100644
--- a/lucene/src/java/org/apache/lucene/search/cache/EntryCreator.java
+++ b/lucene/src/java/org/apache/lucene/search/cache/EntryCreator.java
@@ -58,6 +58,7 @@ public abstract class EntryCreator implements Serializable
// This can be removed
//------------------------------------------------------------------------
+ @Override
public boolean equals(Object obj) {
if( obj instanceof EntryCreator ) {
return getCacheKey().equals( ((EntryCreator)obj).getCacheKey() );
diff --git a/lucene/src/java/org/apache/lucene/search/function/MultiValueSource.java b/lucene/src/java/org/apache/lucene/search/function/MultiValueSource.java
index 7dbccb25a69..b3ec7681ad1 100644
--- a/lucene/src/java/org/apache/lucene/search/function/MultiValueSource.java
+++ b/lucene/src/java/org/apache/lucene/search/function/MultiValueSource.java
@@ -52,6 +52,7 @@ public final class MultiValueSource extends ValueSource {
return other.getValues(context);
}
+ @Override
public DocValues getValues(ReaderContext context) throws IOException {
if (context.isAtomic) {
return getValues((AtomicReaderContext) context);
diff --git a/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java b/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
index 19a771213ba..35356f30f7d 100644
--- a/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
@@ -219,6 +219,7 @@ public class PayloadNearQuery extends SpanNearQuery {
return true;
}
+ @Override
public float score() throws IOException {
return super.score()
diff --git a/lucene/src/java/org/apache/lucene/search/spans/SpanNearPayloadCheckQuery.java b/lucene/src/java/org/apache/lucene/search/spans/SpanNearPayloadCheckQuery.java
index 5fce135941b..2b17f627327 100644
--- a/lucene/src/java/org/apache/lucene/search/spans/SpanNearPayloadCheckQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/spans/SpanNearPayloadCheckQuery.java
@@ -72,6 +72,7 @@ public class SpanNearPayloadCheckQuery extends SpanPositionCheckQuery {
return AcceptStatus.NO;
}
+ @Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
buffer.append("spanPayCheck(");
diff --git a/lucene/src/java/org/apache/lucene/search/spans/SpanPayloadCheckQuery.java b/lucene/src/java/org/apache/lucene/search/spans/SpanPayloadCheckQuery.java
index 69dbc306f19..086dad2f929 100644
--- a/lucene/src/java/org/apache/lucene/search/spans/SpanPayloadCheckQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/spans/SpanPayloadCheckQuery.java
@@ -74,6 +74,7 @@ public class SpanPayloadCheckQuery extends SpanPositionCheckQuery{
return AcceptStatus.YES;
}
+ @Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
buffer.append("spanPayCheck(");
diff --git a/lucene/src/java/org/apache/lucene/util/BytesRef.java b/lucene/src/java/org/apache/lucene/util/BytesRef.java
index 1ec291a5c65..a90b6fb682d 100644
--- a/lucene/src/java/org/apache/lucene/util/BytesRef.java
+++ b/lucene/src/java/org/apache/lucene/util/BytesRef.java
@@ -209,6 +209,7 @@ public final class BytesRef implements Comparable, Externalizable {
}
/** Returns hex encoded bytes, eg [0x6c 0x75 0x63 0x65 0x6e 0x65] */
+ @Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append('[');
diff --git a/lucene/src/java/org/apache/lucene/util/DoubleBarrelLRUCache.java b/lucene/src/java/org/apache/lucene/util/DoubleBarrelLRUCache.java
index a0dd7c19f08..cdb958ef252 100644
--- a/lucene/src/java/org/apache/lucene/util/DoubleBarrelLRUCache.java
+++ b/lucene/src/java/org/apache/lucene/util/DoubleBarrelLRUCache.java
@@ -45,6 +45,7 @@ import java.util.Map;
final public class DoubleBarrelLRUCache {
public static abstract class CloneableKey {
+ @Override
abstract public Object clone();
}
diff --git a/lucene/src/java/org/apache/lucene/util/IntsRef.java b/lucene/src/java/org/apache/lucene/util/IntsRef.java
index 1f284b5ea51..ee1bd2ed69c 100644
--- a/lucene/src/java/org/apache/lucene/util/IntsRef.java
+++ b/lucene/src/java/org/apache/lucene/util/IntsRef.java
@@ -123,6 +123,7 @@ public final class IntsRef implements Comparable {
}
}
+ @Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append('[');
diff --git a/lucene/src/java/org/apache/lucene/util/ReaderUtil.java b/lucene/src/java/org/apache/lucene/util/ReaderUtil.java
index 772b5ebf751..8d772880d0b 100644
--- a/lucene/src/java/org/apache/lucene/util/ReaderUtil.java
+++ b/lucene/src/java/org/apache/lucene/util/ReaderUtil.java
@@ -47,6 +47,7 @@ public final class ReaderUtil {
this.readerIndex = readerIndex;
}
+ @Override
public String toString() {
return "slice start=" + start + " length=" + length + " readerIndex=" + readerIndex;
}
diff --git a/lucene/src/java/org/apache/lucene/util/automaton/BasicOperations.java b/lucene/src/java/org/apache/lucene/util/automaton/BasicOperations.java
index ce1c19001b2..e7e9b301482 100644
--- a/lucene/src/java/org/apache/lucene/util/automaton/BasicOperations.java
+++ b/lucene/src/java/org/apache/lucene/util/automaton/BasicOperations.java
@@ -483,10 +483,12 @@ final public class BasicOperations {
starts.count = 0;
}
+ @Override
public boolean equals(Object other) {
return ((PointTransitions) other).point == point;
}
+ @Override
public int hashCode() {
return point;
}
@@ -563,6 +565,7 @@ final public class BasicOperations {
find(1+t.max).ends.add(t);
}
+ @Override
public String toString() {
StringBuilder s = new StringBuilder();
for(int i=0;i extends Outputs> {
}
}
+ @Override
public int hashCode() {
return output1.hashCode() + output2.hashCode();
}
diff --git a/lucene/src/java/org/apache/lucene/util/packed/Packed32.java b/lucene/src/java/org/apache/lucene/util/packed/Packed32.java
index c8bb011c209..ff22ad7ef48 100644
--- a/lucene/src/java/org/apache/lucene/util/packed/Packed32.java
+++ b/lucene/src/java/org/apache/lucene/util/packed/Packed32.java
@@ -214,6 +214,7 @@ class Packed32 extends PackedInts.ReaderImpl implements PackedInts.Mutable {
Arrays.fill(blocks, 0);
}
+ @Override
public String toString() {
return "Packed32(bitsPerValue=" + bitsPerValue + ", maxPos=" + maxPos
+ ", elements.length=" + blocks.length + ")";
diff --git a/lucene/src/java/org/apache/lucene/util/packed/Packed64.java b/lucene/src/java/org/apache/lucene/util/packed/Packed64.java
index b3826676503..62e77934ef3 100644
--- a/lucene/src/java/org/apache/lucene/util/packed/Packed64.java
+++ b/lucene/src/java/org/apache/lucene/util/packed/Packed64.java
@@ -199,6 +199,7 @@ class Packed64 extends PackedInts.ReaderImpl implements PackedInts.Mutable {
| ((value << shifts[base + 2]) & writeMasks[base+2]);
}
+ @Override
public String toString() {
return "Packed64(bitsPerValue=" + bitsPerValue + ", size="
+ size() + ", maxPos=" + maxPos
diff --git a/lucene/src/java/org/apache/lucene/util/packed/PackedWriter.java b/lucene/src/java/org/apache/lucene/util/packed/PackedWriter.java
index 0cf054991ba..b2c86dd799f 100644
--- a/lucene/src/java/org/apache/lucene/util/packed/PackedWriter.java
+++ b/lucene/src/java/org/apache/lucene/util/packed/PackedWriter.java
@@ -106,6 +106,7 @@ class PackedWriter extends PackedInts.Writer {
}
}
+ @Override
public String toString() {
return "PackedWriter(written " + written + "/" + valueCount + " with "
+ bitsPerValue + " bits/value)";
diff --git a/lucene/src/test/org/apache/lucene/TestAssertions.java b/lucene/src/test/org/apache/lucene/TestAssertions.java
index 373fd3db271..ce51fd34484 100644
--- a/lucene/src/test/org/apache/lucene/TestAssertions.java
+++ b/lucene/src/test/org/apache/lucene/TestAssertions.java
@@ -35,34 +35,45 @@ public class TestAssertions extends LuceneTestCase {
}
static class TestAnalyzer1 extends Analyzer {
+ @Override
public final TokenStream tokenStream(String s, Reader r) { return null; }
+ @Override
public final TokenStream reusableTokenStream(String s, Reader r) { return null; }
}
static final class TestAnalyzer2 extends Analyzer {
+ @Override
public TokenStream tokenStream(String s, Reader r) { return null; }
+ @Override
public TokenStream reusableTokenStream(String s, Reader r) { return null; }
}
static class TestAnalyzer3 extends Analyzer {
+ @Override
public TokenStream tokenStream(String s, Reader r) { return null; }
+ @Override
public TokenStream reusableTokenStream(String s, Reader r) { return null; }
}
static class TestAnalyzer4 extends Analyzer {
+ @Override
public final TokenStream tokenStream(String s, Reader r) { return null; }
+ @Override
public TokenStream reusableTokenStream(String s, Reader r) { return null; }
}
static class TestTokenStream1 extends TokenStream {
+ @Override
public final boolean incrementToken() { return false; }
}
static final class TestTokenStream2 extends TokenStream {
+ @Override
public boolean incrementToken() { return false; }
}
static class TestTokenStream3 extends TokenStream {
+ @Override
public boolean incrementToken() { return false; }
}
diff --git a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java
index 4252f752488..e7ff3954542 100644
--- a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java
+++ b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java
@@ -63,6 +63,7 @@ public class TestExternalCodecs extends LuceneTestCase {
return t2.length-t1.length;
}
+ @Override
public boolean equals(Object other) {
return this == other;
}
@@ -344,6 +345,7 @@ public class TestExternalCodecs extends LuceneTestCase {
return ramField.termToDocs.get(current).totalTermFreq;
}
+ @Override
public DocsEnum docs(Bits skipDocs, DocsEnum reuse) {
return new RAMDocsEnum(ramField.termToDocs.get(current), skipDocs);
}
diff --git a/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java b/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java
index bac72b5951d..1d2ab4371b7 100644
--- a/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java
+++ b/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java
@@ -227,6 +227,7 @@ public class TestCharTermAttributeImpl extends LuceneTestCase {
public char charAt(int i) { return longTestString.charAt(i); }
public int length() { return longTestString.length(); }
public CharSequence subSequence(int start, int end) { return longTestString.subSequence(start, end); }
+ @Override
public String toString() { return longTestString; }
});
assertEquals("4567890123456"+longTestString, t.toString());
diff --git a/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java b/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java
index 7929a4519b5..a3f02482b93 100644
--- a/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java
+++ b/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java
@@ -36,6 +36,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
private String fieldName;
private boolean usePayload;
+ @Override
public void setUp() throws Exception {
super.setUp();
fieldName = "field" + random.nextInt();
diff --git a/lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java b/lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java
index 5f4dfd36e80..c17dc38b9aa 100644
--- a/lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java
+++ b/lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java
@@ -38,9 +38,11 @@ public class TestFilterIndexReader extends LuceneTestCase {
TestFields(Fields in) {
super(in);
}
+ @Override
public FieldsEnum iterator() throws IOException {
return new TestFieldsEnum(super.iterator());
}
+ @Override
public Terms terms(String field) throws IOException {
return new TestTerms(super.terms(field));
}
@@ -51,6 +53,7 @@ public class TestFilterIndexReader extends LuceneTestCase {
super(in);
}
+ @Override
public TermsEnum iterator() throws IOException {
return new TestTermsEnum(super.iterator());
}
@@ -61,6 +64,7 @@ public class TestFilterIndexReader extends LuceneTestCase {
super(in);
}
+ @Override
public TermsEnum terms() throws IOException {
return new TestTermsEnum(super.terms());
}
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
index 60f5e49fbb0..8607d8fed1b 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
@@ -971,6 +971,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
setMaxBufferedDocs(2).
setReaderPooling(true).
setMergedSegmentWarmer(new IndexWriter.IndexReaderWarmer() {
+ @Override
public void warm(IndexReader r) throws IOException {
IndexSearcher s = new IndexSearcher(r);
TopDocs hits = s.search(new TermQuery(new Term("foo", "bar")), 10);
diff --git a/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java b/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
index 425e790784d..92c3689612a 100644
--- a/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
+++ b/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
@@ -53,6 +53,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase {
super(random, delegate);
}
+ @Override
public IndexInput openInput(String fileName) throws IOException {
IndexInput in = super.openInput(fileName);
if (fileName.endsWith(".frq"))
@@ -61,6 +62,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase {
}
}
+ @Override
@Before
public void setUp() throws Exception {
super.setUp();
diff --git a/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java b/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
index a18e2eaee48..c7312b45ab8 100644
--- a/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
+++ b/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
@@ -257,6 +257,7 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
@Override
public void close() {}
+ @Override
public MergeSpecification findMerges(SegmentInfos segmentInfos)
throws CorruptIndexException, IOException {
MergeSpecification ms = new MergeSpecification();
diff --git a/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java b/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java
index 58e4e4d5b03..f5809dd1931 100644
--- a/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java
@@ -40,6 +40,7 @@ public class TestAutomatonQuery extends LuceneTestCase {
private final String FN = "field";
+ @Override
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
@@ -65,6 +66,7 @@ public class TestAutomatonQuery extends LuceneTestCase {
writer.close();
}
+ @Override
public void tearDown() throws Exception {
searcher.close();
reader.close();
diff --git a/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java b/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java
index 29bec6c066a..b764dc0fc4f 100644
--- a/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java
+++ b/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java
@@ -41,6 +41,7 @@ public class TestAutomatonQueryUnicode extends LuceneTestCase {
private final String FN = "field";
+ @Override
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
@@ -85,6 +86,7 @@ public class TestAutomatonQueryUnicode extends LuceneTestCase {
writer.close();
}
+ @Override
public void tearDown() throws Exception {
searcher.close();
reader.close();
diff --git a/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java b/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java
index 254246bf210..65552c46f1f 100644
--- a/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java
@@ -42,6 +42,7 @@ public class TestRegexpQuery extends LuceneTestCase {
private Directory directory;
private final String FN = "field";
+ @Override
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
@@ -56,6 +57,7 @@ public class TestRegexpQuery extends LuceneTestCase {
searcher = new IndexSearcher(reader);
}
+ @Override
public void tearDown() throws Exception {
searcher.close();
reader.close();
diff --git a/lucene/src/test/org/apache/lucene/search/cache/TestEntryCreators.java b/lucene/src/test/org/apache/lucene/search/cache/TestEntryCreators.java
index 4485adb82dc..fad6f63c3f2 100644
--- a/lucene/src/test/org/apache/lucene/search/cache/TestEntryCreators.java
+++ b/lucene/src/test/org/apache/lucene/search/cache/TestEntryCreators.java
@@ -54,6 +54,7 @@ public class TestEntryCreators extends LuceneTestCase {
this.parser = parser;
values = new Number[NUM_DOCS];
}
+ @Override
public String toString()
{
return field;
diff --git a/lucene/src/test/org/apache/lucene/util/LuceneJUnitDividingSelector.java b/lucene/src/test/org/apache/lucene/util/LuceneJUnitDividingSelector.java
index cf27a7267fc..5a9509c5a82 100644
--- a/lucene/src/test/org/apache/lucene/util/LuceneJUnitDividingSelector.java
+++ b/lucene/src/test/org/apache/lucene/util/LuceneJUnitDividingSelector.java
@@ -30,6 +30,7 @@ public class LuceneJUnitDividingSelector extends BaseExtendSelector {
/** Current part to accept. */
private int part;
+ @Override
public void setParameters(Parameter[] pParameters) {
super.setParameters(pParameters);
for (int j = 0; j < pParameters.length; j++) {
@@ -46,6 +47,7 @@ public class LuceneJUnitDividingSelector extends BaseExtendSelector {
}
}
+ @Override
public void verifySettings() {
super.verifySettings();
if (divisor <= 0 || part <= 0) {
@@ -56,6 +58,7 @@ public class LuceneJUnitDividingSelector extends BaseExtendSelector {
}
}
+ @Override
public boolean isSelected(File dir, String name, File path) {
counter = counter % divisor + 1;
return counter == part;
diff --git a/lucene/src/test/org/apache/lucene/util/TestBytesRefHash.java b/lucene/src/test/org/apache/lucene/util/TestBytesRefHash.java
index 553898a8a8c..2c82aea6ff3 100644
--- a/lucene/src/test/org/apache/lucene/util/TestBytesRefHash.java
+++ b/lucene/src/test/org/apache/lucene/util/TestBytesRefHash.java
@@ -40,6 +40,7 @@ public class TestBytesRefHash extends LuceneTestCase {
/**
*/
+ @Override
@Before
public void setUp() throws Exception {
super.setUp();
diff --git a/lucene/src/test/org/apache/lucene/util/TestDoubleBarrelLRUCache.java b/lucene/src/test/org/apache/lucene/util/TestDoubleBarrelLRUCache.java
index 03a935a6fef..952c218de3f 100644
--- a/lucene/src/test/org/apache/lucene/util/TestDoubleBarrelLRUCache.java
+++ b/lucene/src/test/org/apache/lucene/util/TestDoubleBarrelLRUCache.java
@@ -145,14 +145,17 @@ public class TestDoubleBarrelLRUCache extends LuceneTestCase {
this.value = value;
}
+ @Override
public boolean equals(Object other) {
return this.value.equals(((CloneableObject) other).value);
}
+ @Override
public int hashCode() {
return value.hashCode();
}
+ @Override
public Object clone() {
return new CloneableObject(value);
}
@@ -165,14 +168,17 @@ public class TestDoubleBarrelLRUCache extends LuceneTestCase {
this.value = value;
}
+ @Override
public boolean equals(Object other) {
return this.value.equals(((CloneableInteger) other).value);
}
+ @Override
public int hashCode() {
return value.hashCode();
}
+ @Override
public Object clone() {
return new CloneableInteger(value);
}
diff --git a/lucene/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java b/lucene/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java
index beac79df7c9..b6c1c4d4f9c 100644
--- a/lucene/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java
+++ b/lucene/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java
@@ -31,6 +31,7 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase {
/**
*/
+ @Override
@Before
public void setUp() throws Exception {
super.setUp();
diff --git a/lucene/src/test/org/apache/lucene/util/automaton/fst/TestFSTs.java b/lucene/src/test/org/apache/lucene/util/automaton/fst/TestFSTs.java
index 4922a1e0d64..f7d54753271 100644
--- a/lucene/src/test/org/apache/lucene/util/automaton/fst/TestFSTs.java
+++ b/lucene/src/test/org/apache/lucene/util/automaton/fst/TestFSTs.java
@@ -59,11 +59,13 @@ public class TestFSTs extends LuceneTestCase {
private MockDirectoryWrapper dir;
+ @Override
public void setUp() throws IOException {
dir = newDirectory();
dir.setPreventDoubleWrite(false);
}
+ @Override
public void tearDown() throws IOException {
dir.close();
}
diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java
index 87591992e1f..ff3f20fb8a1 100644
--- a/modules/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java
+++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java
@@ -673,6 +673,7 @@ public class HTMLStripCharFilter extends BaseCharFilter {
+ @Override
public int read() throws IOException {
// TODO: Do we ever want to preserve CDATA sections?
// where do we have to worry about them?
@@ -741,6 +742,7 @@ public class HTMLStripCharFilter extends BaseCharFilter {
}
+ @Override
public int read(char cbuf[], int off, int len) throws IOException {
int i=0;
for (i=0; ioutput:"the-rain", "rain-in" ,"in-spain", "falls", "mainly"
*
*/
+ @Override
public boolean incrementToken() throws IOException {
while (input.incrementToken()) {
State current = captureState();
diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianCharFilter.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianCharFilter.java
index c1ed38acfa5..962f839d45c 100644
--- a/modules/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianCharFilter.java
+++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianCharFilter.java
@@ -32,6 +32,7 @@ public class PersianCharFilter extends CharFilter {
super(in);
}
+ @Override
public int read(char[] cbuf, int off, int len) throws IOException {
final int charsRead = super.read(cbuf, off, len);
if (charsRead > 0) {
diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilter.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilter.java
index 0ccbb85b074..77f5c95475f 100644
--- a/modules/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilter.java
+++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilter.java
@@ -113,6 +113,7 @@ public class PatternReplaceCharFilter extends BaseCharFilter {
}
}
+ @Override
public int read() throws IOException {
while( prepareReplaceBlock() ){
return replaceBlockBuffer.charAt( replaceBlockBufferOffset++ );
@@ -120,6 +121,7 @@ public class PatternReplaceCharFilter extends BaseCharFilter {
return -1;
}
+ @Override
public int read(char[] cbuf, int off, int len) throws IOException {
char[] tmp = new char[len];
int l = input.read(tmp, 0, len);
diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java
index ea8ba38c996..a74b3f8e9e7 100644
--- a/modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java
+++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java
@@ -78,6 +78,7 @@ public class SynonymMap {
}
+ @Override
public String toString() {
StringBuilder sb = new StringBuilder("<");
if (synonyms!=null) {
diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java
index 946f9787c4c..9f3a28ad638 100644
--- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java
+++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java
@@ -47,6 +47,7 @@ public class TestRemoveDuplicatesTokenFilter extends BaseTokenStreamTestCase {
CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
+ @Override
public boolean incrementToken() {
if (toks.hasNext()) {
clearAttributes();
diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java
index 4e65f9b11a4..256cbacd1ca 100644
--- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java
+++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java
@@ -87,6 +87,7 @@ public class TestTrimFilter extends BaseTokenStreamTestCase {
this(tokens.toArray(new Token[tokens.size()]));
}
+ @Override
public boolean incrementToken() throws IOException {
if (index >= tokens.length)
return false;
diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
index c784130d439..3d081184e58 100644
--- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
+++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
@@ -213,6 +213,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
/* analyzer that uses whitespace + wdf */
Analyzer a = new Analyzer() {
+ @Override
public TokenStream tokenStream(String field, Reader reader) {
return new WordDelimiterFilter(
new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader),
@@ -239,6 +240,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
/* analyzer that will consume tokens with large position increments */
Analyzer a2 = new Analyzer() {
+ @Override
public TokenStream tokenStream(String field, Reader reader) {
return new WordDelimiterFilter(
new LargePosIncTokenFilter(
@@ -271,6 +273,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
new int[] { 1, 11, 1 });
Analyzer a3 = new Analyzer() {
+ @Override
public TokenStream tokenStream(String field, Reader reader) {
StopFilter filter = new StopFilter(TEST_VERSION_CURRENT,
new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), StandardAnalyzer.STOP_WORDS_SET);
diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymFilter.java
index 2c68e047abf..29c26d6ff2b 100644
--- a/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymFilter.java
+++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymFilter.java
@@ -395,6 +395,7 @@ public class TestSynonymFilter extends BaseTokenStreamTestCase {
this(tokens.toArray(new Token[tokens.size()]));
}
+ @Override
public boolean incrementToken() throws IOException {
if (index >= tokens.length)
return false;
diff --git a/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java b/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java
index 126ea09c744..b0cb1981d89 100644
--- a/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java
+++ b/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java
@@ -77,6 +77,7 @@ public class CarrotClusteringEngine extends SearchClusteringEngine {
private String idFieldName;
+ @Override
@Deprecated
public Object cluster(Query query, DocList docList, SolrQueryRequest sreq) {
SolrIndexSearcher searcher = sreq.getSearcher();
@@ -90,6 +91,7 @@ public class CarrotClusteringEngine extends SearchClusteringEngine {
}
}
+ @Override
public Object cluster(Query query, SolrDocumentList solrDocList,
Map docIds, SolrQueryRequest sreq) {
try {
diff --git a/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/carrot2/LuceneLanguageModelFactory.java b/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/carrot2/LuceneLanguageModelFactory.java
index 8efd3ab1be6..857fccf48f3 100644
--- a/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/carrot2/LuceneLanguageModelFactory.java
+++ b/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/carrot2/LuceneLanguageModelFactory.java
@@ -69,7 +69,8 @@ public class LuceneLanguageModelFactory extends DefaultLanguageModelFactory {
/**
* Provide an {@link IStemmer} implementation for a given language.
*/
- protected IStemmer createStemmer(LanguageCode language) {
+ @Override
+ protected IStemmer createStemmer(LanguageCode language) {
switch (language) {
case ARABIC:
return ArabicStemmerFactory.createStemmer();
diff --git a/solr/contrib/clustering/src/test/java/org/apache/solr/handler/clustering/MockDocumentClusteringEngine.java b/solr/contrib/clustering/src/test/java/org/apache/solr/handler/clustering/MockDocumentClusteringEngine.java
index 90f0ab73e5a..77b3fcfba06 100644
--- a/solr/contrib/clustering/src/test/java/org/apache/solr/handler/clustering/MockDocumentClusteringEngine.java
+++ b/solr/contrib/clustering/src/test/java/org/apache/solr/handler/clustering/MockDocumentClusteringEngine.java
@@ -25,11 +25,13 @@ import org.apache.solr.search.DocSet;
*
**/
public class MockDocumentClusteringEngine extends DocumentClusteringEngine {
+ @Override
public NamedList cluster(DocSet docs, SolrParams solrParams) {
NamedList result = new NamedList();
return result;
}
+ @Override
public NamedList cluster(SolrParams solrParams) {
NamedList result = new NamedList();
return result;
diff --git a/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java b/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java
index 5b9374c744a..0231d1ec7e3 100644
--- a/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java
@@ -51,6 +51,7 @@ public class MailEntityProcessor extends EntityProcessorBase {
public SearchTerm getCustomSearch(Folder folder);
}
+ @Override
public void init(Context context) {
super.init(context);
// set attributes using XXX getXXXFromContext(attribute, defualtValue);
@@ -95,6 +96,7 @@ public class MailEntityProcessor extends EntityProcessorBase {
logConfig();
}
+ @Override
public Map nextRow() {
Message mail;
Map row = null;
diff --git a/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java b/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java
index 4e4ad41afbe..b7b31ab5abb 100644
--- a/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java
@@ -92,6 +92,7 @@ public class TikaEntityProcessor extends EntityProcessorBase {
done = false;
}
+ @Override
public Map nextRow() {
if(done) return null;
Map row = new HashMap();
diff --git a/solr/contrib/dataimporthandler/src/extras/test/java/org/apache/solr/handler/dataimport/TestMailEntityProcessor.java b/solr/contrib/dataimporthandler/src/extras/test/java/org/apache/solr/handler/dataimport/TestMailEntityProcessor.java
index 8a1f1083d4e..2ac19b32192 100644
--- a/solr/contrib/dataimporthandler/src/extras/test/java/org/apache/solr/handler/dataimport/TestMailEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/extras/test/java/org/apache/solr/handler/dataimport/TestMailEntityProcessor.java
@@ -191,18 +191,22 @@ public class TestMailEntityProcessor extends AbstractDataImportHandlerTestCase {
super(null, ".", null);
}
+ @Override
public boolean upload(SolrInputDocument doc) {
return docs.add(doc);
}
+ @Override
public void log(int event, String name, Object row) {
// Do nothing
}
+ @Override
public void doDeleteAll() {
deleteAllCalled = Boolean.TRUE;
}
+ @Override
public void commit(boolean b) {
commitCalled = Boolean.TRUE;
}
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinContentStreamDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinContentStreamDataSource.java
index 221d8eacbc7..1187f65e92d 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinContentStreamDataSource.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinContentStreamDataSource.java
@@ -39,10 +39,12 @@ public class BinContentStreamDataSource extends DataSource {
private InputStream in;
+ @Override
public void init(Context context, Properties initProps) {
this.context = (ContextImpl) context;
}
+ @Override
public InputStream getData(String query) {
contentStream = context.getDocBuilder().requestParameters.contentStream;
if (contentStream == null)
@@ -55,6 +57,7 @@ public class BinContentStreamDataSource extends DataSource {
}
}
+ @Override
public void close() {
if (contentStream != null) {
try {
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinFileDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinFileDataSource.java
index 4d4cdebc62b..e473ab8da32 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinFileDataSource.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinFileDataSource.java
@@ -43,10 +43,12 @@ import java.util.Properties;
public class BinFileDataSource extends DataSource{
protected String basePath;
+ @Override
public void init(Context context, Properties initProps) {
basePath = initProps.getProperty(FileDataSource.BASE_PATH);
}
+ @Override
public InputStream getData(String query) {
File f = FileDataSource.getFile(basePath,query);
try {
@@ -57,6 +59,7 @@ public class BinFileDataSource extends DataSource{
}
}
+ @Override
public void close() {
}
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinURLDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinURLDataSource.java
index 9d4d879c2ce..045d6fa3bd0 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinURLDataSource.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinURLDataSource.java
@@ -49,6 +49,7 @@ public class BinURLDataSource extends DataSource{
public BinURLDataSource() { }
+ @Override
public void init(Context context, Properties initProps) {
this.context = context;
this.initProps = initProps;
@@ -72,6 +73,7 @@ public class BinURLDataSource extends DataSource{
}
}
+ @Override
public InputStream getData(String query) {
URL url = null;
try {
@@ -89,6 +91,7 @@ public class BinURLDataSource extends DataSource{
}
}
+ @Override
public void close() { }
private String getInitPropWithReplacements(String propertyName) {
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/CachedSqlEntityProcessor.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/CachedSqlEntityProcessor.java
index 69b7b2b6f18..864e772288e 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/CachedSqlEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/CachedSqlEntityProcessor.java
@@ -38,6 +38,7 @@ import java.util.Map;
public class CachedSqlEntityProcessor extends SqlEntityProcessor {
private boolean isFirst;
+ @Override
@SuppressWarnings("unchecked")
public void init(Context context) {
super.init(context);
@@ -45,6 +46,7 @@ public class CachedSqlEntityProcessor extends SqlEntityProcessor {
isFirst = true;
}
+ @Override
public Map nextRow() {
if (dataSourceRowCache != null)
return getFromRowCacheTransformed();
@@ -60,6 +62,7 @@ public class CachedSqlEntityProcessor extends SqlEntityProcessor {
}
+ @Override
protected List> getAllNonCachedRows() {
List> rows = new ArrayList>();
String q = getQuery();
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ClobTransformer.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ClobTransformer.java
index 5ebd3baa5dd..ae970d25fdb 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ClobTransformer.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ClobTransformer.java
@@ -37,6 +37,7 @@ import java.util.Map;
* @since solr 1.4
*/
public class ClobTransformer extends Transformer {
+ @Override
public Object transformRow(Map aRow, Context context) {
for (Map map : context.getAllEntityFields()) {
if (!TRUE.equals(map.get(CLOB))) continue;
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ContentStreamDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ContentStreamDataSource.java
index 3b55fd6cf5e..58ed19ed595 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ContentStreamDataSource.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ContentStreamDataSource.java
@@ -39,10 +39,12 @@ public class ContentStreamDataSource extends DataSource {
private ContentStream contentStream;
private Reader reader;
+ @Override
public void init(Context context, Properties initProps) {
this.context = (ContextImpl) context;
}
+ @Override
public Reader getData(String query) {
contentStream = context.getDocBuilder().requestParameters.contentStream;
if (contentStream == null)
@@ -55,6 +57,7 @@ public class ContentStreamDataSource extends DataSource {
}
}
+ @Override
public void close() {
if (contentStream != null) {
try {
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ContextImpl.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ContextImpl.java
index 6dfa48276c9..bd726835e2d 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ContextImpl.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ContextImpl.java
@@ -71,22 +71,27 @@ public class ContextImpl extends Context {
parent = parentContext;
}
+ @Override
public String getEntityAttribute(String name) {
return entity == null ? null : entity.allAttributes.get(name);
}
+ @Override
public String getResolvedEntityAttribute(String name) {
return entity == null ? null : resolver.replaceTokens(entity.allAttributes.get(name));
}
+ @Override
public List> getAllEntityFields() {
return entity == null ? Collections.EMPTY_LIST : entity.allFieldsList;
}
+ @Override
public VariableResolver getVariableResolver() {
return resolver;
}
+ @Override
public DataSource getDataSource() {
if (ds != null) return ds;
if(entity == null) return null;
@@ -101,26 +106,32 @@ public class ContextImpl extends Context {
return entity.dataSrc;
}
+ @Override
public DataSource getDataSource(String name) {
return dataImporter.getDataSourceInstance(entity, name, this);
}
+ @Override
public boolean isRootEntity() {
return entity.isDocRoot;
}
+ @Override
public String currentProcess() {
return currProcess;
}
+ @Override
public Map getRequestParameters() {
return requestParams;
}
+ @Override
public EntityProcessor getEntityProcessor() {
return entity == null ? null : entity.processor;
}
+ @Override
public void setSessionAttribute(String name, Object val, String scope) {
if(name == null) return;
if (Context.SCOPE_ENTITY.equals(scope)) {
@@ -148,6 +159,7 @@ public class ContextImpl extends Context {
else entitySession.put(name, val);
}
+ @Override
public Object getSessionAttribute(String name, String scope) {
if (Context.SCOPE_ENTITY.equals(scope)) {
if (entitySession == null)
@@ -166,6 +178,7 @@ public class ContextImpl extends Context {
return null;
}
+ @Override
public Context getParentContext() {
return parent;
}
@@ -187,15 +200,18 @@ public class ContextImpl extends Context {
}
+ @Override
public SolrCore getSolrCore() {
return dataImporter == null ? null : dataImporter.getCore();
}
+ @Override
public Map getStats() {
return docBuilder != null ? docBuilder.importStatistics.getStatsSnapshot() : Collections.emptyMap();
}
+ @Override
public String getScript() {
if(dataImporter != null) {
DataConfig.Script script = dataImporter.getConfig().script;
@@ -204,6 +220,7 @@ public class ContextImpl extends Context {
return null;
}
+ @Override
public String getScriptLanguage() {
if (dataImporter != null) {
DataConfig.Script script = dataImporter.getConfig().script;
@@ -212,12 +229,14 @@ public class ContextImpl extends Context {
return null;
}
+ @Override
public void deleteDoc(String id) {
if(docBuilder != null){
docBuilder.writer.deleteDoc(id);
}
}
+ @Override
public void deleteDocByQuery(String query) {
if(docBuilder != null){
docBuilder.writer.deleteByQuery(query);
@@ -227,10 +246,12 @@ public class ContextImpl extends Context {
DocBuilder getDocBuilder(){
return docBuilder;
}
+ @Override
public Object resolve(String var) {
return resolver.resolve(var);
}
+ @Override
public String replaceTokens(String template) {
return resolver.replaceTokens(template);
}
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataConfig.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataConfig.java
index f548ff648a9..f0a9e412427 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataConfig.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataConfig.java
@@ -214,6 +214,7 @@ public class DataConfig {
public Map allAttributes = new HashMap() {
+ @Override
public String put(String key, String value) {
if (super.containsKey(key))
return super.get(key);
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataImporter.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataImporter.java
index 6d9206d43ed..45f8fcf1598 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataImporter.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataImporter.java
@@ -423,6 +423,7 @@ public class DataImporter {
}
static final ThreadLocal QUERY_COUNT = new ThreadLocal() {
+ @Override
protected AtomicLong initialValue() {
return new AtomicLong();
}
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DateFormatTransformer.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DateFormatTransformer.java
index e2c6e221095..95c601e8911 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DateFormatTransformer.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DateFormatTransformer.java
@@ -45,6 +45,7 @@ public class DateFormatTransformer extends Transformer {
private static final Logger LOG = LoggerFactory
.getLogger(DateFormatTransformer.class);
+ @Override
@SuppressWarnings("unchecked")
public Object transformRow(Map aRow, Context context) {
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DebugLogger.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DebugLogger.java
index 77c1ea7669e..8ee0126dc25 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DebugLogger.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DebugLogger.java
@@ -60,6 +60,7 @@ class DebugLogger {
output = new NamedList();
debugStack = new Stack() {
+ @Override
public DebugInfo pop() {
if (size() == 1)
throw new DataImportHandlerException(
@@ -169,14 +170,17 @@ class DebugLogger {
DataSource wrapDs(final DataSource ds) {
return new DataSource() {
+ @Override
public void init(Context context, Properties initProps) {
ds.init(context, initProps);
}
+ @Override
public void close() {
ds.close();
}
+ @Override
public Object getData(String query) {
writer.log(SolrWriter.ENTITY_META, "query", query);
long start = System.currentTimeMillis();
@@ -203,6 +207,7 @@ class DebugLogger {
Transformer wrapTransformer(final Transformer t) {
return new Transformer() {
+ @Override
public Object transformRow(Map row, Context context) {
writer.log(SolrWriter.PRE_TRANSFORMER_ROW, null, row);
String tName = getTransformerName(t);
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DocBuilder.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DocBuilder.java
index e94ba8947f8..42bf6da9499 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DocBuilder.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DocBuilder.java
@@ -139,6 +139,7 @@ public class DocBuilder {
document = dataImporter.getConfig().document;
final AtomicLong startTime = new AtomicLong(System.currentTimeMillis());
statusMessages.put(TIME_ELAPSED, new Object() {
+ @Override
public String toString() {
return getTimeElapsedSince(startTime.get());
}
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java
index c2294bd6549..5d761194440 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java
@@ -49,6 +49,7 @@ public class EntityProcessorBase extends EntityProcessor {
protected String onError = ABORT;
+ @Override
public void init(Context context) {
rowIterator = null;
this.context = context;
@@ -86,14 +87,17 @@ public class EntityProcessorBase extends EntityProcessor {
}
}
+ @Override
public Map nextModifiedRowKey() {
return null;
}
+ @Override
public Map nextDeletedRowKey() {
return null;
}
+ @Override
public Map nextModifiedParentRowKey() {
return null;
}
@@ -105,11 +109,13 @@ public class EntityProcessorBase extends EntityProcessor {
* @return a row where the key is the name of the field and value can be any Object or a Collection of objects. Return
* null to signal end of rows
*/
+ @Override
public Map nextRow() {
return null;// do not do anything
}
+ @Override
public void destroy() {
/*no op*/
}
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java
index c46ddcf9e6d..432e64ac767 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java
@@ -54,6 +54,7 @@ public class EntityProcessorWrapper extends EntityProcessor {
this.docBuilder = docBuilder;
}
+ @Override
public void init(Context context) {
rowcache = null;
this.context = context;
@@ -79,6 +80,7 @@ public class EntityProcessorWrapper extends EntityProcessor {
String[] transArr = transClasses.split(",");
transformers = new ArrayList() {
+ @Override
public boolean add(Transformer transformer) {
if (docBuilder != null && docBuilder.verboseDebug) {
transformer = docBuilder.writer.getDebugLogger().wrapTransformer(transformer);
@@ -135,6 +137,7 @@ public class EntityProcessorWrapper extends EntityProcessor {
o = clazz.newInstance();
}
+ @Override
public Object transformRow(Map aRow, Context context) {
try {
return meth.invoke(o, aRow);
@@ -223,6 +226,7 @@ public class EntityProcessorWrapper extends EntityProcessor {
&& Boolean.parseBoolean(oMap.get("$stopTransform").toString());
}
+ @Override
public Map nextRow() {
if (rowcache != null) {
return getFromRowCache();
@@ -252,6 +256,7 @@ public class EntityProcessorWrapper extends EntityProcessor {
}
}
+ @Override
public Map nextModifiedRowKey() {
Map row = delegate.nextModifiedRowKey();
row = applyTransformer(row);
@@ -259,6 +264,7 @@ public class EntityProcessorWrapper extends EntityProcessor {
return row;
}
+ @Override
public Map nextDeletedRowKey() {
Map row = delegate.nextDeletedRowKey();
row = applyTransformer(row);
@@ -266,10 +272,12 @@ public class EntityProcessorWrapper extends EntityProcessor {
return row;
}
+ @Override
public Map nextModifiedParentRowKey() {
return delegate.nextModifiedParentRowKey();
}
+ @Override
public void destroy() {
delegate.destroy();
}
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EvaluatorBag.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EvaluatorBag.java
index 9c4321a97d7..24e728d82c0 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EvaluatorBag.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EvaluatorBag.java
@@ -66,6 +66,7 @@ public class EvaluatorBag {
*/
public static Evaluator getSqlEscapingEvaluator() {
return new Evaluator() {
+ @Override
public String evaluate(String expression, Context context) {
List l = parseParams(expression, context.getVariableResolver());
if (l.size() != 1) {
@@ -90,6 +91,7 @@ public class EvaluatorBag {
*/
public static Evaluator getSolrQueryEscapingEvaluator() {
return new Evaluator() {
+ @Override
public String evaluate(String expression, Context context) {
List l = parseParams(expression, context.getVariableResolver());
if (l.size() != 1) {
@@ -109,6 +111,7 @@ public class EvaluatorBag {
*/
public static Evaluator getUrlEvaluator() {
return new Evaluator() {
+ @Override
public String evaluate(String expression, Context context) {
List l = parseParams(expression, context.getVariableResolver());
if (l.size() != 1) {
@@ -138,6 +141,7 @@ public class EvaluatorBag {
*/
public static Evaluator getDateFormatEvaluator() {
return new Evaluator() {
+ @Override
public String evaluate(String expression, Context context) {
List l = parseParams(expression, context.getVariableResolver());
if (l.size() != 2) {
@@ -288,6 +292,7 @@ public class EvaluatorBag {
}
+ @Override
public String toString() {
Object o = vr.resolve(varName);
return o == null ? null : o.toString();
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FieldReaderDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FieldReaderDataSource.java
index 8b2ae93c12c..b9d9ec74ab9 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FieldReaderDataSource.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FieldReaderDataSource.java
@@ -52,6 +52,7 @@ public class FieldReaderDataSource extends DataSource {
private String encoding;
private EntityProcessorWrapper entityProcessor;
+ @Override
public void init(Context context, Properties initProps) {
dataField = context.getEntityAttribute("dataField");
encoding = context.getEntityAttribute("encoding");
@@ -59,6 +60,7 @@ public class FieldReaderDataSource extends DataSource {
/*no op*/
}
+ @Override
public Reader getData(String query) {
Object o = entityProcessor.getVariableResolver().resolve(dataField);
if (o == null) {
@@ -111,6 +113,7 @@ public class FieldReaderDataSource extends DataSource {
}
}
+ @Override
public void close() {
}
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FieldStreamDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FieldStreamDataSource.java
index f92f7cb4b15..132367cc0fa 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FieldStreamDataSource.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FieldStreamDataSource.java
@@ -52,12 +52,14 @@ public class FieldStreamDataSource extends DataSource {
protected String dataField;
private EntityProcessorWrapper wrapper;
+ @Override
public void init(Context context, Properties initProps) {
dataField = context.getEntityAttribute("dataField");
wrapper = (EntityProcessorWrapper) context.getEntityProcessor();
/*no op*/
}
+ @Override
public InputStream getData(String query) {
Object o = wrapper.getVariableResolver().resolve(dataField);
if (o == null) {
@@ -90,6 +92,7 @@ public class FieldStreamDataSource extends DataSource {
}
+ @Override
public void close() {
}
}
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FileDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FileDataSource.java
index 64353ef3fa4..2f5a5aa1e2e 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FileDataSource.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FileDataSource.java
@@ -59,6 +59,7 @@ public class FileDataSource extends DataSource {
private static final Logger LOG = LoggerFactory.getLogger(FileDataSource.class);
+ @Override
public void init(Context context, Properties initProps) {
basePath = initProps.getProperty(BASE_PATH);
if (initProps.get(URLDataSource.ENCODING) != null)
@@ -79,6 +80,7 @@ public class FileDataSource extends DataSource {
* returned Reader
*
*/
+ @Override
public Reader getData(String query) {
File f = getFile(basePath,query);
try {
@@ -130,6 +132,7 @@ public class FileDataSource extends DataSource {
}
}
+ @Override
public void close() {
}
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FileListEntityProcessor.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FileListEntityProcessor.java
index 72924176731..7549af7dfbd 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FileListEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FileListEntityProcessor.java
@@ -106,6 +106,7 @@ public class FileListEntityProcessor extends EntityProcessorBase {
private Pattern fileNamePattern, excludesPattern;
+ @Override
public void init(Context context) {
super.init(context);
fileName = context.getEntityAttribute(FILE_NAME);
@@ -195,6 +196,7 @@ public class FileListEntityProcessor extends EntityProcessorBase {
return Long.parseLong(sizeStr);
}
+ @Override
public Map nextRow() {
if (rowIterator != null)
return getNext();
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/JdbcDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/JdbcDataSource.java
index cb38e480bf0..f48ca2cda86 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/JdbcDataSource.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/JdbcDataSource.java
@@ -54,6 +54,7 @@ public class JdbcDataSource extends
private int maxRows = 0;
+ @Override
public void init(Context context, Properties initProps) {
Object o = initProps.get(CONVERT_TYPE);
if (o != null)
@@ -204,6 +205,7 @@ public class JdbcDataSource extends
}
}
+ @Override
public Iterator> getData(String query) {
ResultSetIterator r = new ResultSetIterator(query);
return r.getIterator();
@@ -370,6 +372,7 @@ public class JdbcDataSource extends
}
}
+ @Override
protected void finalize() throws Throwable {
try {
if(!isClosed){
@@ -383,6 +386,7 @@ public class JdbcDataSource extends
private boolean isClosed = false;
+ @Override
public void close() {
try {
closeConnection();
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java
index 0a8b201ab42..30e366316f7 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java
@@ -64,6 +64,7 @@ public class LineEntityProcessor extends EntityProcessorBase {
/**
* Parses each of the entity attributes.
*/
+ @Override
public void init(Context context) {
super.init(context);
String s;
@@ -97,6 +98,7 @@ public class LineEntityProcessor extends EntityProcessorBase {
* from the url. However transformers can be used to create as
* many other fields as required.
*/
+ @Override
public Map nextRow() {
if (reader == null) {
reader = new BufferedReader((Reader) context.getDataSource().getData(url));
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/LogTransformer.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/LogTransformer.java
index d9d2f115d24..5a603a74049 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/LogTransformer.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/LogTransformer.java
@@ -35,6 +35,7 @@ import java.util.Map;
public class LogTransformer extends Transformer {
Logger LOG = LoggerFactory.getLogger(LogTransformer.class);
+ @Override
public Object transformRow(Map row, Context ctx) {
String expr = ctx.getEntityAttribute(LOG_TEMPLATE);
String level = ctx.replaceTokens(ctx.getEntityAttribute(LOG_LEVEL));
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/MockDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/MockDataSource.java
index 7b747d72a56..6fd7213b5a7 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/MockDataSource.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/MockDataSource.java
@@ -45,13 +45,16 @@ public class MockDataSource extends
cache.clear();
}
+ @Override
public void init(Context context, Properties initProps) {
}
+ @Override
public Iterator> getData(String query) {
return cache.get(query);
}
+ @Override
public void close() {
cache.clear();
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/NumberFormatTransformer.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/NumberFormatTransformer.java
index d38ab75fcb0..36efecf5320 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/NumberFormatTransformer.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/NumberFormatTransformer.java
@@ -49,6 +49,7 @@ public class NumberFormatTransformer extends Transformer {
private static final Pattern localeRegex = Pattern.compile("^([a-z]{2})-([A-Z]{2})$");
+ @Override
@SuppressWarnings("unchecked")
public Object transformRow(Map row, Context context) {
for (Map fld : context.getAllEntityFields()) {
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/PlainTextEntityProcessor.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/PlainTextEntityProcessor.java
index 79a981875bc..2d32eee0122 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/PlainTextEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/PlainTextEntityProcessor.java
@@ -40,11 +40,13 @@ public class PlainTextEntityProcessor extends EntityProcessorBase {
private static final Logger LOG = LoggerFactory.getLogger(PlainTextEntityProcessor.class);
private boolean ended = false;
+ @Override
public void init(Context context) {
super.init(context);
ended = false;
}
+ @Override
public Map nextRow() {
if (ended) return null;
DataSource ds = context.getDataSource();
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/RegexTransformer.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/RegexTransformer.java
index e5910093e73..429bb0cf2f3 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/RegexTransformer.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/RegexTransformer.java
@@ -43,6 +43,7 @@ import java.util.regex.Pattern;
public class RegexTransformer extends Transformer {
private static final Logger LOG = LoggerFactory.getLogger(RegexTransformer.class);
+ @Override
@SuppressWarnings("unchecked")
public Map transformRow(Map row,
Context ctx) {
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ScriptTransformer.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ScriptTransformer.java
index ba06f49b91d..547fc66cf2a 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ScriptTransformer.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ScriptTransformer.java
@@ -47,6 +47,7 @@ public class ScriptTransformer extends Transformer {
private String functionName;
+ @Override
public Object transformRow(Map row, Context context) {
try {
if (engine == null)
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/SqlEntityProcessor.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/SqlEntityProcessor.java
index 925a9569bf2..1748998720b 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/SqlEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/SqlEntityProcessor.java
@@ -46,6 +46,7 @@ public class SqlEntityProcessor extends EntityProcessorBase {
protected DataSource>> dataSource;
+ @Override
@SuppressWarnings("unchecked")
public void init(Context context) {
super.init(context);
@@ -65,6 +66,7 @@ public class SqlEntityProcessor extends EntityProcessorBase {
}
}
+ @Override
public Map nextRow() {
if (rowIterator == null) {
String q = getQuery();
@@ -73,6 +75,7 @@ public class SqlEntityProcessor extends EntityProcessorBase {
return getNext();
}
+ @Override
public Map nextModifiedRowKey() {
if (rowIterator == null) {
String deltaQuery = context.getEntityAttribute(DELTA_QUERY);
@@ -83,6 +86,7 @@ public class SqlEntityProcessor extends EntityProcessorBase {
return getNext();
}
+ @Override
public Map nextDeletedRowKey() {
if (rowIterator == null) {
String deletedPkQuery = context.getEntityAttribute(DEL_PK_QUERY);
@@ -93,6 +97,7 @@ public class SqlEntityProcessor extends EntityProcessorBase {
return getNext();
}
+ @Override
public Map nextModifiedParentRowKey() {
if (rowIterator == null) {
String parentDeltaQuery = context.getEntityAttribute(PARENT_DELTA_QUERY);
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/TemplateTransformer.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/TemplateTransformer.java
index 8c5527983ff..6fd0665c700 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/TemplateTransformer.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/TemplateTransformer.java
@@ -51,6 +51,7 @@ public class TemplateTransformer extends Transformer {
private static final Logger LOG = LoggerFactory.getLogger(TemplateTransformer.class);
private Map> templateVsVars = new HashMap>();
+ @Override
@SuppressWarnings("unchecked")
public Object transformRow(Map row, Context context) {
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/URLDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/URLDataSource.java
index 274c120b270..234fb56f264 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/URLDataSource.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/URLDataSource.java
@@ -56,6 +56,7 @@ public class URLDataSource extends DataSource {
public URLDataSource() {
}
+ @Override
public void init(Context context, Properties initProps) {
this.context = context;
this.initProps = initProps;
@@ -81,6 +82,7 @@ public class URLDataSource extends DataSource {
}
}
+ @Override
public Reader getData(String query) {
URL url = null;
try {
@@ -114,6 +116,7 @@ public class URLDataSource extends DataSource {
}
}
+ @Override
public void close() {
}
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/VariableResolverImpl.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/VariableResolverImpl.java
index b0675cc4627..8d39dd13bde 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/VariableResolverImpl.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/VariableResolverImpl.java
@@ -91,10 +91,12 @@ public class VariableResolverImpl extends VariableResolver {
container.remove(name);
}
+ @Override
public String replaceTokens(String template) {
return templateString.replaceTokens(template, this);
}
+ @Override
@SuppressWarnings("unchecked")
public Object resolve(String name) {
if (name == null)
diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/XPathEntityProcessor.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/XPathEntityProcessor.java
index 26196788dfe..e995fab3442 100644
--- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/XPathEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/XPathEntityProcessor.java
@@ -80,6 +80,7 @@ public class XPathEntityProcessor extends EntityProcessorBase {
protected Thread publisherThread;
+ @Override
@SuppressWarnings("unchecked")
public void init(Context context) {
super.init(context);
@@ -171,6 +172,7 @@ public class XPathEntityProcessor extends EntityProcessorBase {
}
+ @Override
public Map nextRow() {
Map result;
@@ -398,6 +400,7 @@ public class XPathEntityProcessor extends EntityProcessorBase {
final AtomicBoolean isEnd = new AtomicBoolean(false);
final AtomicBoolean throwExp = new AtomicBoolean(true);
publisherThread = new Thread() {
+ @Override
public void run() {
try {
xpathReader.streamRecords(data, new XPathRecordReader.Handler() {
diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/AbstractDataImportHandlerTestCase.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/AbstractDataImportHandlerTestCase.java
index 07f78d0b412..8c3791f224a 100644
--- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/AbstractDataImportHandlerTestCase.java
+++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/AbstractDataImportHandlerTestCase.java
@@ -197,89 +197,110 @@ public abstract class AbstractDataImportHandlerTestCase extends
this.root = root;
}
+ @Override
public String getEntityAttribute(String name) {
return entityAttrs == null ? delegate.getEntityAttribute(name) : entityAttrs.get(name);
}
+ @Override
public String getResolvedEntityAttribute(String name) {
return entityAttrs == null ? delegate.getResolvedEntityAttribute(name) :
delegate.getVariableResolver().replaceTokens(entityAttrs.get(name));
}
+ @Override
public List> getAllEntityFields() {
return entityFields == null ? delegate.getAllEntityFields()
: entityFields;
}
+ @Override
public VariableResolver getVariableResolver() {
return delegate.getVariableResolver();
}
+ @Override
public DataSource getDataSource() {
return delegate.getDataSource();
}
+ @Override
public boolean isRootEntity() {
return root;
}
+ @Override
public String currentProcess() {
return delegate.currentProcess();
}
+ @Override
public Map getRequestParameters() {
return delegate.getRequestParameters();
}
+ @Override
public EntityProcessor getEntityProcessor() {
return null;
}
+ @Override
public void setSessionAttribute(String name, Object val, String scope) {
delegate.setSessionAttribute(name, val, scope);
}
+ @Override
public Object getSessionAttribute(String name, String scope) {
return delegate.getSessionAttribute(name, scope);
}
+ @Override
public Context getParentContext() {
return delegate.getParentContext();
}
+ @Override
public DataSource getDataSource(String name) {
return delegate.getDataSource(name);
}
+ @Override
public SolrCore getSolrCore() {
return delegate.getSolrCore();
}
+ @Override
public Map getStats() {
return delegate.getStats();
}
+ @Override
public String getScript() {
return script == null ? delegate.getScript() : script;
}
+ @Override
public String getScriptLanguage() {
return scriptlang == null ? delegate.getScriptLanguage() : scriptlang;
}
+ @Override
public void deleteDoc(String id) {
}
+ @Override
public void deleteDocByQuery(String query) {
}
+ @Override
public Object resolve(String var) {
return delegate.resolve(var);
}
+ @Override
public String replaceTokens(String template) {
return delegate.replaceTokens(template);
}
@@ -318,31 +339,37 @@ public abstract class AbstractDataImportHandlerTestCase extends
reset();
}
+ @Override
public void finish() throws IOException {
finishCalled = true;
super.finish();
}
+ @Override
public void processAdd(AddUpdateCommand cmd) throws IOException {
processAddCalled = true;
super.processAdd(cmd);
}
+ @Override
public void processCommit(CommitUpdateCommand cmd) throws IOException {
processCommitCalled = true;
super.processCommit(cmd);
}
+ @Override
public void processDelete(DeleteUpdateCommand cmd) throws IOException {
processDeleteCalled = true;
super.processDelete(cmd);
}
+ @Override
public void processMergeIndexes(MergeIndexesCommand cmd) throws IOException {
mergeIndexesCalled = true;
super.processMergeIndexes(cmd);
}
+ @Override
public void processRollback(RollbackUpdateCommand cmd) throws IOException {
rollbackCalled = true;
super.processRollback(cmd);
diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestCachedSqlEntityProcessor.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestCachedSqlEntityProcessor.java
index ceda6edd3ec..0037d6796ca 100644
--- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestCachedSqlEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestCachedSqlEntityProcessor.java
@@ -158,6 +158,7 @@ public class TestCachedSqlEntityProcessor extends AbstractDataImportHandlerTestC
public static class DoubleTransformer extends Transformer {
+ @Override
public Object transformRow(Map row, Context context) {
List> rows = new ArrayList>();
rows.add(row);
@@ -169,6 +170,7 @@ public class TestCachedSqlEntityProcessor extends AbstractDataImportHandlerTestC
public static class UppercaseTransformer extends Transformer {
+ @Override
public Object transformRow(Map row, Context context) {
for (Map.Entry entry : row.entrySet()) {
Object val = entry.getValue();
diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java
index c49be006377..692272d4e04 100644
--- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java
+++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java
@@ -43,6 +43,7 @@ public class TestContentStreamDataSource extends AbstractDataImportHandlerTestCa
SolrInstance instance = null;
JettySolrRunner jetty;
+ @Override
@Before
public void setUp() throws Exception {
super.setUp();
@@ -51,6 +52,7 @@ public class TestContentStreamDataSource extends AbstractDataImportHandlerTestCa
jetty = createJetty(instance);
}
+ @Override
@After
public void tearDown() throws Exception {
jetty.stop();
diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDocBuilder.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDocBuilder.java
index e9947e52e76..a16b7017ab4 100644
--- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDocBuilder.java
+++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDocBuilder.java
@@ -33,6 +33,7 @@ import java.util.*;
*/
public class TestDocBuilder extends AbstractDataImportHandlerTestCase {
+ @Override
@After
public void tearDown() throws Exception {
MockDataSource.clearCache();
@@ -200,22 +201,27 @@ public class TestDocBuilder extends AbstractDataImportHandlerTestCase {
super(null, ".",null);
}
+ @Override
public boolean upload(SolrInputDocument doc) {
return docs.add(doc);
}
+ @Override
public void log(int event, String name, Object row) {
// Do nothing
}
+ @Override
public void doDeleteAll() {
deleteAllCalled = Boolean.TRUE;
}
+ @Override
public void commit(boolean b) {
commitCalled = Boolean.TRUE;
}
+ @Override
public void finish() {
finishCalled = Boolean.TRUE;
}
diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDocBuilder2.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDocBuilder2.java
index f361eb20a43..4632318fa17 100644
--- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDocBuilder2.java
+++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDocBuilder2.java
@@ -252,6 +252,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
}
public static class MockTransformer extends Transformer {
+ @Override
public Object transformRow(Map row, Context context) {
assertTrue("Context gave incorrect data source", context.getDataSource("mockDs") instanceof MockDataSource2);
return row;
@@ -259,6 +260,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
}
public static class AddDynamicFieldTransformer extends Transformer {
+ @Override
public Object transformRow(Map row, Context context) {
// Add a dynamic field
row.put("dynamic_s", "test");
diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestEntityProcessorBase.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestEntityProcessorBase.java
index 2b7d3578e96..42b29610666 100644
--- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestEntityProcessorBase.java
+++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestEntityProcessorBase.java
@@ -57,6 +57,7 @@ public class TestEntityProcessorBase extends AbstractDataImportHandlerTestCase {
static class T1 extends Transformer {
+ @Override
public Object transformRow(Map aRow, Context context) {
aRow.put("T1", "T1 called");
return aRow;
@@ -66,6 +67,7 @@ public class TestEntityProcessorBase extends AbstractDataImportHandlerTestCase {
static class T2 extends Transformer {
+ @Override
public Object transformRow(Map aRow, Context context) {
aRow.put("T2", "T2 called");
return aRow;
diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestErrorHandling.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestErrorHandling.java
index 0f703815bae..b8e285dffe1 100644
--- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestErrorHandling.java
+++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestErrorHandling.java
@@ -78,19 +78,23 @@ public class TestErrorHandling extends AbstractDataImportHandlerTestCase {
public static class StringDataSource extends DataSource {
public static String xml = "";
+ @Override
public void init(Context context, Properties initProps) {
}
+ @Override
public Reader getData(String query) {
return new StringReader(xml);
}
+ @Override
public void close() {
}
}
public static class ExceptionTransformer extends Transformer {
+ @Override
public Object transformRow(Map row, Context context) {
throw new RuntimeException("Test exception");
}
diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestEvaluatorBag.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestEvaluatorBag.java
index 18b30a36d7b..41ac1dc5d15 100644
--- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestEvaluatorBag.java
+++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestEvaluatorBag.java
@@ -39,6 +39,7 @@ public class TestEvaluatorBag extends AbstractDataImportHandlerTestCase {
Map urlTests;
+ @Override
@Before
public void setUp() throws Exception {
super.setUp();
diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestJdbcDataSource.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestJdbcDataSource.java
index 68cc9ccc9ff..ac6626462ee 100644
--- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestJdbcDataSource.java
+++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestJdbcDataSource.java
@@ -57,6 +57,7 @@ public class TestJdbcDataSource extends AbstractDataImportHandlerTestCase {
String sysProp = System.getProperty("java.naming.factory.initial");
+ @Override
@Before
public void setUp() throws Exception {
super.setUp();
@@ -69,6 +70,7 @@ public class TestJdbcDataSource extends AbstractDataImportHandlerTestCase {
connection = mockControl.createMock(Connection.class);
}
+ @Override
@After
public void tearDown() throws Exception {
if (sysProp == null) {
diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestLineEntityProcessor.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestLineEntityProcessor.java
index c24fced0bbf..91f8d034cda 100644
--- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestLineEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestLineEntityProcessor.java
@@ -207,12 +207,15 @@ public class TestLineEntityProcessor extends AbstractDataImportHandlerTestCase {
private DataSource getDataSource(final String xml) {
return new DataSource() {
+ @Override
public void init(Context context, Properties initProps) {
}
+ @Override
public void close() {
}
+ @Override
public Reader getData(String query) {
return new StringReader(xml);
}
diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestPlainTextEntityProcessor.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestPlainTextEntityProcessor.java
index 48a0b1b4214..e0a5b8bf39c 100644
--- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestPlainTextEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestPlainTextEntityProcessor.java
@@ -42,15 +42,18 @@ public class TestPlainTextEntityProcessor extends AbstractDataImportHandlerTestC
public static class DS extends DataSource {
static String s = "hello world";
+ @Override
public void init(Context context, Properties initProps) {
}
+ @Override
public Object getData(String query) {
return new StringReader(s);
}
+ @Override
public void close() {
}
diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestSqlEntityProcessor.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestSqlEntityProcessor.java
index 7fc50fa11cd..0fbfb846eae 100644
--- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestSqlEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestSqlEntityProcessor.java
@@ -135,19 +135,23 @@ public class TestSqlEntityProcessor extends AbstractDataImportHandlerTestCase {
private static DataSource>> getDs(
final List> rows) {
return new DataSource>>() {
+ @Override
public Iterator