diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 275bbc24d0d..36a88ed5501 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -347,6 +347,9 @@ API Changes
be set at the constructor for non-contextual lookup.
(Boon Low, Tomás Fernández Löbbe)
+* LUCENE-6158: IndexWriter.addIndexes(IndexReader...) changed to
+ addIndexes(LeafReader...) (Robert Muir)
+
Bug Fixes
* LUCENE-5650: Enforce read-only access to any path outside the temporary
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index 3cfc991ef54..a73bcc411f8 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -558,11 +558,11 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
public void testAddOldIndexesReader() throws IOException {
for (String name : oldNames) {
- IndexReader reader = DirectoryReader.open(oldIndexDirs.get(name));
+ DirectoryReader reader = DirectoryReader.open(oldIndexDirs.get(name));
Directory targetDir = newDirectory();
IndexWriter w = new IndexWriter(targetDir, newIndexWriterConfig(new MockAnalyzer(random())));
- w.addIndexes(reader);
+ TestUtil.addIndexesSlowly(w, reader);
w.close();
reader.close();
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddIndexesTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddIndexesTask.java
index 714a668150c..a7a5c7905a5 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddIndexesTask.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddIndexesTask.java
@@ -18,18 +18,21 @@ package org.apache.lucene.benchmark.byTask.tasks;
*/
import java.nio.file.Paths;
+import java.util.List;
import org.apache.lucene.benchmark.byTask.PerfRunData;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
/**
* Adds an input index to an existing index, using
* {@link IndexWriter#addIndexes(Directory...)} or
- * {@link IndexWriter#addIndexes(IndexReader...)}. The location of the input
+ * {@link IndexWriter#addIndexes(LeafReader...)}. The location of the input
* index is specified by the parameter {@link #ADDINDEXES_INPUT_DIR} and is
* assumed to be a directory on the file system.
*
@@ -63,11 +66,13 @@ public class AddIndexesTask extends PerfTask {
if (useAddIndexesDir) {
writer.addIndexes(inputDir);
} else {
- IndexReader r = DirectoryReader.open(inputDir);
- try {
- writer.addIndexes(r);
- } finally {
- r.close();
+ try (IndexReader r = DirectoryReader.open(inputDir)) {
+ LeafReader leaves[] = new LeafReader[r.leaves().size()];
+ int i = 0;
+ for (LeafReaderContext leaf : r.leaves()) {
+ leaves[i++] = leaf.reader();
+ }
+ writer.addIndexes(leaves);
}
}
return 1;
@@ -79,7 +84,7 @@ public class AddIndexesTask extends PerfTask {
* @param params
* {@code useAddIndexesDir=true} for using
* {@link IndexWriter#addIndexes(Directory...)} or {@code false} for
- * using {@link IndexWriter#addIndexes(IndexReader...)}. Defaults to
+ * using {@link IndexWriter#addIndexes(LeafReader...)}. Defaults to
* {@code true}.
*/
@Override
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index 3f1ae7562df..7ac236aecd8 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -235,8 +235,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
public static final String SOURCE_MERGE = "merge";
/** Source of a segment which results from a flush. */
public static final String SOURCE_FLUSH = "flush";
- /** Source of a segment which results from a call to {@link #addIndexes(IndexReader...)}. */
- public static final String SOURCE_ADDINDEXES_READERS = "addIndexes(IndexReader...)";
+ /** Source of a segment which results from a call to {@link #addIndexes(LeafReader...)}. */
+ public static final String SOURCE_ADDINDEXES_READERS = "addIndexes(LeafReader...)";
/**
* Absolute hard maximum length for a term, in bytes once
@@ -2099,7 +2099,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
*
*
* NOTE: this method will forcefully abort all merges in progress. If other
- * threads are running {@link #forceMerge}, {@link #addIndexes(IndexReader[])}
+ * threads are running {@link #forceMerge}, {@link #addIndexes(LeafReader[])}
* or {@link #forceMergeDeletes} methods, they may receive
* {@link MergePolicy.MergeAbortedException}s.
*/
@@ -2497,7 +2497,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
* index.
*
*
- * NOTE: this method merges all given {@link IndexReader}s in one
+ * NOTE: this method merges all given {@link LeafReader}s in one
* merge. If you intend to merge a large number of readers, it may be better
* to call this method multiple times, each time with a small set of readers.
* In principle, if you use a merge policy with a {@code mergeFactor} or
@@ -2509,23 +2509,19 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
* @throws IOException
* if there is a low-level IO error
*/
- public void addIndexes(IndexReader... readers) throws IOException {
+ public void addIndexes(LeafReader... readers) throws IOException {
ensureOpen();
int numDocs = 0;
try {
if (infoStream.isEnabled("IW")) {
- infoStream.message("IW", "flush at addIndexes(IndexReader...)");
+ infoStream.message("IW", "flush at addIndexes(LeafReader...)");
}
flush(false, true);
String mergedName = newSegmentName();
- final List mergeReaders = new ArrayList<>();
- for (IndexReader indexReader : readers) {
- numDocs += indexReader.numDocs();
- for (LeafReaderContext ctx : indexReader.leaves()) {
- mergeReaders.add(ctx.reader());
- }
+ for (LeafReader leaf : readers) {
+ numDocs += leaf.numDocs();
}
// Make sure adding the new documents to this index won't
@@ -2541,7 +2537,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
SegmentInfo info = new SegmentInfo(directory, Version.LATEST, mergedName, -1,
false, codec, null, StringHelper.randomId(), new HashMap<>());
- SegmentMerger merger = new SegmentMerger(mergeReaders, info, infoStream, trackingDir,
+ SegmentMerger merger = new SegmentMerger(Arrays.asList(readers), info, infoStream, trackingDir,
globalFieldNumberMap,
context);
diff --git a/lucene/core/src/java/org/apache/lucene/index/TrackingIndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/TrackingIndexWriter.java
index e1fb987976b..f90b5f0d74b 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TrackingIndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TrackingIndexWriter.java
@@ -165,9 +165,9 @@ public class TrackingIndexWriter {
return indexingGen.get();
}
- /** Calls {@link IndexWriter#addIndexes(IndexReader...)}
+ /** Calls {@link IndexWriter#addIndexes(LeafReader...)}
* and returns the generation that reflects this change. */
- public long addIndexes(IndexReader... readers) throws IOException {
+ public long addIndexes(LeafReader... readers) throws IOException {
writer.addIndexes(readers);
// Return gen as of when indexing finished:
return indexingGen.get();
diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java b/lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java
index 9e55181cb05..c0e18449e37 100644
--- a/lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java
@@ -84,31 +84,29 @@ public class Test2BPostingsBytes extends LuceneTestCase {
w.close();
DirectoryReader oneThousand = DirectoryReader.open(dir);
- IndexReader subReaders[] = new IndexReader[1000];
+ DirectoryReader subReaders[] = new DirectoryReader[1000];
Arrays.fill(subReaders, oneThousand);
- MultiReader mr = new MultiReader(subReaders);
BaseDirectoryWrapper dir2 = newFSDirectory(createTempDir("2BPostingsBytes2"));
if (dir2 instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper)dir2).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
}
IndexWriter w2 = new IndexWriter(dir2,
new IndexWriterConfig(null));
- w2.addIndexes(mr);
+ TestUtil.addIndexesSlowly(w2, subReaders);
w2.forceMerge(1);
w2.close();
oneThousand.close();
DirectoryReader oneMillion = DirectoryReader.open(dir2);
- subReaders = new IndexReader[2000];
+ subReaders = new DirectoryReader[2000];
Arrays.fill(subReaders, oneMillion);
- mr = new MultiReader(subReaders);
BaseDirectoryWrapper dir3 = newFSDirectory(createTempDir("2BPostingsBytes3"));
if (dir3 instanceof MockDirectoryWrapper) {
((MockDirectoryWrapper)dir3).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
}
IndexWriter w3 = new IndexWriter(dir3,
new IndexWriterConfig(null));
- w3.addIndexes(mr);
+ TestUtil.addIndexesSlowly(w3, subReaders);
w3.forceMerge(1);
w3.close();
oneMillion.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
index 1d3fb706730..470b498f6f7 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
@@ -652,7 +652,7 @@ public class TestAddIndexes extends LuceneTestCase {
IndexWriter writer2;
final List failures = new ArrayList<>();
volatile boolean didClose;
- final IndexReader[] readers;
+ final DirectoryReader[] readers;
final int NUM_COPY;
final static int NUM_THREADS = 5;
final Thread[] threads = new Thread[NUM_THREADS];
@@ -671,7 +671,7 @@ public class TestAddIndexes extends LuceneTestCase {
writer2.commit();
- readers = new IndexReader[NUM_COPY];
+ readers = new DirectoryReader[NUM_COPY];
for(int i=0;i failures = new ArrayList<>();
- IndexReader[] readers;
+ DirectoryReader[] readers;
boolean didClose = false;
AtomicInteger count = new AtomicInteger(0);
AtomicInteger numaddIndexes = new AtomicInteger(0);
@@ -418,7 +418,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
writer.close();
- readers = new IndexReader[numDirs];
+ readers = new DirectoryReader[numDirs];
for (int i = 0; i < numDirs; i++)
readers[i] = DirectoryReader.open(addDir);
}
@@ -498,7 +498,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
numaddIndexes.incrementAndGet();
break;
case 2:
- mainWriter.addIndexes(readers);
+ TestUtil.addIndexesSlowly(mainWriter, readers);
break;
case 3:
mainWriter.commit();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java b/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java
index 466f072009b..7af304f93b1 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java
@@ -1196,7 +1196,7 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
writer.addIndexes(dir1);
} else {
DirectoryReader reader = DirectoryReader.open(dir1);
- writer.addIndexes(reader);
+ TestUtil.addIndexesSlowly(writer, reader);
reader.close();
}
writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java
index ea8baca74b6..9a2f5d962a1 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java
@@ -18,6 +18,8 @@ package org.apache.lucene.index;
*/
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
@@ -65,11 +67,11 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase {
DirectoryReader.open(rd2));
// When unpatched, Lucene crashes here with a NoSuchElementException (caused by ParallelTermEnum)
- iwOut.addIndexes(cpr);
- iwOut.forceMerge(1);
-
- // 2nd try with a readerless parallel reader
- iwOut.addIndexes(new ParallelCompositeReader());
+ List leaves = new ArrayList<>();
+ for (LeafReaderContext leaf : cpr.leaves()) {
+ leaves.add(leaf.reader());
+ }
+ iwOut.addIndexes(leaves.toArray(new LeafReader[0]));
iwOut.forceMerge(1);
iwOut.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java
similarity index 96%
rename from lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java
rename to lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java
index bac1e39b070..356353ea57e 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java
@@ -1,4 +1,4 @@
-package org.apache.lucene.search;
+package org.apache.lucene.index;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@@ -33,6 +33,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.util.English;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -154,8 +155,8 @@ public class TestTermVectors extends LuceneTestCase {
IndexWriter writer = createWriter(target);
for (Directory dir : input) {
- IndexReader r = DirectoryReader.open(dir);
- writer.addIndexes(r);
+ DirectoryReader r = DirectoryReader.open(dir);
+ TestUtil.addIndexesSlowly(writer, r);
r.close();
}
writer.forceMerge(1);
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyMergeUtils.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyMergeUtils.java
index bdd11e77d0b..13a34e3b770 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyMergeUtils.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyMergeUtils.java
@@ -56,7 +56,7 @@ public abstract class TaxonomyMergeUtils {
for (int i = 0; i < numReaders; i++) {
wrappedLeaves[i] = new OrdinalMappingLeafReader(leaves.get(i).reader(), ordinalMap, srcConfig);
}
- destIndexWriter.addIndexes(new MultiReader(wrappedLeaves));
+ destIndexWriter.addIndexes(wrappedLeaves);
// commit changes to taxonomy and index respectively.
destTaxoWriter.commit();
diff --git a/lucene/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java b/lucene/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java
index 67846067c19..f2ff43f7aae 100644
--- a/lucene/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java
+++ b/lucene/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java
@@ -33,7 +33,7 @@ import org.apache.lucene.util.Version;
/**
* This tool splits input index into multiple equal parts. The method employed
- * here uses {@link IndexWriter#addIndexes(IndexReader[])} where the input data
+ * here uses {@link IndexWriter#addIndexes(LeafReader[])} where the input data
* comes from the input index with artificially applied deletes to the document
* id-s that fall outside the selected partition.
* Note 1: Deletes are only applied to a buffered list of deleted docs and
@@ -102,7 +102,7 @@ public class MultiPassIndexSplitter {
System.err.println("Writing part " + (i + 1) + " ...");
// pass the subreaders directly, as our wrapper's numDocs/hasDeletetions are not up-to-date
final List extends FakeDeleteLeafIndexReader> sr = input.getSequentialSubReaders();
- w.addIndexes(sr.toArray(new IndexReader[sr.size()])); // TODO: maybe take List here?
+ w.addIndexes(sr.toArray(new LeafReader[sr.size()])); // TODO: maybe take List here?
w.close();
}
System.err.println("Done.");
diff --git a/lucene/misc/src/java/org/apache/lucene/index/PKIndexSplitter.java b/lucene/misc/src/java/org/apache/lucene/index/PKIndexSplitter.java
index a58a4794d30..e88ee0a5707 100644
--- a/lucene/misc/src/java/org/apache/lucene/index/PKIndexSplitter.java
+++ b/lucene/misc/src/java/org/apache/lucene/index/PKIndexSplitter.java
@@ -103,7 +103,7 @@ public class PKIndexSplitter {
final IndexWriter w = new IndexWriter(target, config);
try {
final List leaves = reader.leaves();
- final IndexReader[] subReaders = new IndexReader[leaves.size()];
+ final LeafReader[] subReaders = new LeafReader[leaves.size()];
int i = 0;
for (final LeafReaderContext ctx : leaves) {
subReaders[i++] = new DocumentFilteredLeafIndexReader(ctx, preserveFilter, negateFilter);
diff --git a/lucene/misc/src/test/org/apache/lucene/index/IndexSortingTest.java b/lucene/misc/src/test/org/apache/lucene/index/IndexSortingTest.java
index b6666cba60c..8d51b08451a 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/IndexSortingTest.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/IndexSortingTest.java
@@ -71,7 +71,7 @@ public class IndexSortingTest extends SorterTestBase {
Directory target = newDirectory();
IndexWriter writer = new IndexWriter(target, newIndexWriterConfig(null));
- IndexReader reader = SortingLeafReader.wrap(unsortedReader, sorter);
+ LeafReader reader = SortingLeafReader.wrap(unsortedReader, sorter);
writer.addIndexes(reader);
writer.close();
// NOTE: also closes unsortedReader
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
index e8501230a0a..3477025dbb7 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
@@ -207,7 +207,7 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase {
w.forceMerge(1);
w.commit();
w.close();
- IndexReader reader = DirectoryReader.open(dir);
+ DirectoryReader reader = DirectoryReader.open(dir);
Directory dir2 = newDirectory();
if (dir2 instanceof MockDirectoryWrapper) {
@@ -219,7 +219,8 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase {
mp.setNoCFSRatio(0);
cfg = new IndexWriterConfig(new MockAnalyzer(random())).setUseCompoundFile(false).setMergePolicy(mp);
w = new IndexWriter(dir2, cfg);
- w.addIndexes(reader);
+ TestUtil.addIndexesSlowly(w, reader);
+
w.commit();
w.close();
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
index 822212bfd04..375429f3ce1 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java
@@ -647,7 +647,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat
Directory dir2 = newDirectory();
w = new RandomIndexWriter(random(), dir2);
- w.addIndexes(reader);
+ TestUtil.addIndexesSlowly(w.w, reader);
reader.close();
dir.close();
@@ -787,7 +787,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends BaseIndexFileFormat
}
dirs[i] = newDirectory();
IndexWriter adder = new IndexWriter(dirs[i], new IndexWriterConfig(null));
- adder.addIndexes(reader);
+ TestUtil.addIndexesSlowly(adder, reader);
adder.commit();
adder.close();
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
index 3bd18e2f859..8daac204240 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
@@ -222,7 +222,7 @@ public class RandomIndexWriter implements Closeable {
w.addIndexes(dirs);
}
- public void addIndexes(IndexReader... readers) throws IOException {
+ public void addIndexes(LeafReader... readers) throws IOException {
LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, w.getConfig());
w.addIndexes(readers);
}
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
index 2cda0f98752..636ad5619d3 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
@@ -29,6 +29,7 @@ import java.nio.CharBuffer;
import java.nio.file.FileSystem;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
@@ -68,6 +69,7 @@ import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.ConcurrentMergeScheduler;
+import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
@@ -872,6 +874,16 @@ public final class TestUtil {
return false;
}
}
+
+ public static void addIndexesSlowly(IndexWriter writer, DirectoryReader... readers) throws IOException {
+ List leaves = new ArrayList<>();
+ for (DirectoryReader reader : readers) {
+ for (LeafReaderContext context : reader.leaves()) {
+ leaves.add(context.reader());
+ }
+ }
+ writer.addIndexes(leaves.toArray(new LeafReader[leaves.size()]));
+ }
/** just tries to configure things to keep the open file
* count lowish */
diff --git a/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java b/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
index 3943c7f9e41..72f9b5a2eb4 100644
--- a/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
+++ b/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
@@ -24,6 +24,8 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.BooleanClause;
@@ -467,9 +469,15 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
List readers = cmd.readers;
if (readers != null && readers.size() > 0) {
+ List leaves = new ArrayList<>();
+ for (DirectoryReader reader : readers) {
+ for (LeafReaderContext leaf : reader.leaves()) {
+ leaves.add(leaf.reader());
+ }
+ }
RefCounted iw = solrCoreState.getIndexWriter(core);
try {
- iw.get().addIndexes(readers.toArray(new IndexReader[readers.size()]));
+ iw.get().addIndexes(leaves.toArray(new LeafReader[leaves.size()]));
} finally {
iw.decref();
}
diff --git a/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java b/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
index 650c1a3264a..5adddfd0a6b 100644
--- a/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
+++ b/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
@@ -129,7 +129,7 @@ public class SolrIndexSplitter {
// This removes deletions but optimize might still be needed because sub-shards will have the same number of segments as the parent shard.
for (int segmentNumber = 0; segmentNumber