SOLR-6897: Nuke non-NRT mode from code and configuration

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1649810 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Shalin Shekhar Mangar 2015-01-06 14:28:08 +00:00
parent cd8fc589b9
commit e13e0de583
20 changed files with 45 additions and 364 deletions

View File

@ -146,6 +146,10 @@ Upgrading from Solr 4.x
then it should be backwards compatible but you'll get a deprecation warning on startup. See
SOLR-6797.
* The <nrtMode> configuration in solrconfig.xml has been discontinued and should be removed from
solrconfig.xml. Solr defaults to using NRT searchers regardless of the value in configuration
and a warning is logged on startup if the solrconfig.xml has <nrtMode> specified.
Detailed Change List
----------------------
@ -634,6 +638,8 @@ Other Changes
* SOLR-4839: Upgrade Jetty to 9.2.6.v20141205 and restlet-jee to 2.3.0
(Bill Bell, Timothy Potter, Uwe Schindler, shalin)
* SOLR-6897: Nuke non-NRT mode from code and configuration. (Hossman, shalin)
================== 4.10.3 ==================
Bug Fixes

View File

@ -35,7 +35,6 @@ A solrconfig.xml snippet containing indexConfig settings for randomized testing.
<ramBufferSizeMB>${solr.tests.ramBufferSizeMB}</ramBufferSizeMB>
<mergeScheduler class="${solr.tests.mergeScheduler}" />
<nrtMode>${solr.tests.nrtMode:true}</nrtMode>
<writeLockTimeout>1000</writeLockTimeout>
<commitLockTimeout>10000</commitLockTimeout>

View File

@ -20,6 +20,7 @@ package org.apache.solr.core;
import org.apache.lucene.util.Version;
import org.apache.solr.cloud.ZkSolrResourceLoader;
import org.apache.solr.common.SolrException;
import org.apache.solr.update.SolrIndexConfig;
import org.apache.solr.util.DOMUtil;
import org.apache.solr.util.SystemIdResolver;
import org.apache.solr.common.util.XMLErrorLogger;
@ -162,6 +163,21 @@ public class Config {
}
}
/*
* Assert that assertCondition is true.
* If not, prints reason as log warning.
* If failCondition is true, then throw exception instead of warning
*/
public static void assertWarnOrFail(String reason, boolean assertCondition, boolean failCondition) {
if (assertCondition) {
return;
} else if (failCondition) {
throw new SolrException(SolrException.ErrorCode.FORBIDDEN, reason);
} else {
log.warn(reason);
}
}
protected Properties getSubstituteProperties() {
return loader.getCoreProperties();
}

View File

@ -60,9 +60,8 @@ public abstract class IndexReaderFactory implements NamedListInitializedPlugin {
/**
* Creates a new IndexReader instance using the given IndexWriter.
* <p>
* This is used for opening the initial reader in NRT mode ({@code nrtMode=true}
* in solrconfig.xml)
*
* This is used for opening the initial reader in NRT mode
*
* @param writer IndexWriter
* @param core {@link SolrCore} instance where this reader will be used. NOTE:
* this SolrCore instance may not be fully configured yet, but basic things like

View File

@ -193,7 +193,11 @@ public class SolrConfig extends Config implements MapSerializable{
defaultIndexConfig = mainIndexConfig = null;
indexConfigPrefix = "indexConfig";
}
nrtMode = getBool(indexConfigPrefix+"/nrtMode", true);
assertWarnOrFail("The <nrtMode> config has been discontinued and NRT mode is always used by Solr." +
" This config will be removed in future versions.", getNode(indexConfigPrefix + "/nrtMode", false) != null,
false
);
// Parse indexConfig section, using mainIndex as backup in case old config is used
indexConfig = new SolrIndexConfig(this, "indexConfig", mainIndexConfig);
@ -419,7 +423,6 @@ public class SolrConfig extends Config implements MapSerializable{
public final int queryResultWindowSize;
public final int queryResultMaxDocsCached;
public final boolean enableLazyFieldLoading;
public final boolean nrtMode;
// DocSet
public final float hashSetInverseLoadFactor;
public final int hashDocSetMaxSize;

View File

@ -441,7 +441,7 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
solrCoreState.increfSolrCoreState();
SolrCore currentCore;
boolean indexDirChange = !getNewIndexDir().equals(getIndexDir());
if (indexDirChange || !coreConfig.getSolrConfig().nrtMode) {
if (indexDirChange) {
// the directory is changing, don't pass on state
currentCore = null;
} else {
@ -462,13 +462,6 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
}
// gets a non-caching searcher
public SolrIndexSearcher newSearcher(String name) throws IOException {
return new SolrIndexSearcher(this, getNewIndexDir(), getLatestSchema(), getSolrConfig().indexConfig,
name, false, directoryFactory);
}
private void initDirectoryFactory() {
DirectoryFactory dirFactory;
PluginInfo info = solrConfig.getPluginInfo(DirectoryFactory.class.getName());
@ -852,13 +845,7 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
@Override
public DirectoryReader call() throws Exception {
if(getSolrConfig().nrtMode) {
// if in NRT mode, need to open from the previous writer
return indexReaderFactory.newReader(iw, core);
} else {
// if not NRT, need to create a new reader from the directory
return indexReaderFactory.newReader(iw.getDirectory(), core);
}
return indexReaderFactory.newReader(iw, core);
}
};
}
@ -1500,7 +1487,6 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
SolrIndexSearcher tmp;
RefCounted<SolrIndexSearcher> newestSearcher = null;
boolean nrt = solrConfig.nrtMode && updateHandlerReopens;
openSearcherLock.lock();
try {
@ -1509,7 +1495,7 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
String newIndexDirFile = null;
// if it's not a normal near-realtime update, check that paths haven't changed.
if (!nrt) {
if (!updateHandlerReopens) {
indexDirFile = getDirectoryFactory().normalize(getIndexDir());
newIndexDirFile = getDirectoryFactory().normalize(newIndexDir);
}
@ -1521,7 +1507,7 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
}
}
if (newestSearcher != null && (nrt || indexDirFile.equals(newIndexDirFile))) {
if (newestSearcher != null && (updateHandlerReopens || indexDirFile.equals(newIndexDirFile))) {
DirectoryReader newReader;
DirectoryReader currentReader = newestSearcher.get().getRawReader();
@ -1531,12 +1517,11 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
RefCounted<IndexWriter> writer = getUpdateHandler().getSolrCoreState()
.getIndexWriter(null);
try {
if (writer != null && solrConfig.nrtMode) {
if (writer != null) {
// if in NRT mode, open from the writer
newReader = DirectoryReader.openIfChanged(currentReader, writer.get(), true);
} else {
// verbose("start reopen without writer, reader=", currentReader);
// if not in NRT mode, just re-open the reader
newReader = DirectoryReader.openIfChanged(currentReader);
// verbose("reopen result", newReader);
}
@ -1583,7 +1568,7 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
DirectoryReader newReader = newReaderCreator.call();
tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(),
(realtime ? "realtime":"main"), newReader, true, !realtime, true, directoryFactory);
} else if (solrConfig.nrtMode) {
} else {
RefCounted<IndexWriter> writer = getUpdateHandler().getSolrCoreState().getIndexWriter(this);
DirectoryReader newReader = null;
try {
@ -1593,12 +1578,6 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
}
tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(),
(realtime ? "realtime":"main"), newReader, true, !realtime, true, directoryFactory);
} else {
// normal open that happens at startup
// verbose("non-reopen START:");
tmp = new SolrIndexSearcher(this, newIndexDir, getLatestSchema(), getSolrConfig().indexConfig,
"main", true, directoryFactory);
// verbose("non-reopen DONE: searcher=",tmp);
}
}

View File

@ -20,12 +20,10 @@ package org.apache.solr.update;
import org.apache.lucene.index.*;
import org.apache.lucene.index.IndexWriter.IndexReaderWarmer;
import org.apache.lucene.util.InfoStream;
import org.apache.lucene.util.PrintStreamInfoStream;
import org.apache.lucene.util.Version;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.Config;
import org.apache.solr.core.MapSerializable;
import org.apache.solr.core.SolrConfig;
import org.apache.solr.core.PluginInfo;
@ -34,12 +32,11 @@ import org.apache.solr.util.SolrPluginUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileOutputStream;
import java.io.PrintStream;
import java.util.List;
import java.util.Map;
import static org.apache.solr.core.Config.assertWarnOrFail;
/**
* This config object encapsulates IndexWriter config params,
* defined in the &lt;indexConfig&gt; section of solrconfig.xml
@ -127,13 +124,13 @@ public class SolrIndexConfig implements MapSerializable {
// Assert that end-of-life parameters or syntax is not in our config.
// Warn for luceneMatchVersion's before LUCENE_3_6, fail fast above
assertWarnOrFail("The <mergeScheduler>myclass</mergeScheduler> syntax is no longer supported in solrconfig.xml. Please use syntax <mergeScheduler class=\"myclass\"/> instead.",
!((solrConfig.getNode(prefix+"/mergeScheduler",false) != null) && (solrConfig.get(prefix+"/mergeScheduler/@class",null) == null)),
!((solrConfig.getNode(prefix + "/mergeScheduler", false) != null) && (solrConfig.get(prefix + "/mergeScheduler/@class", null) == null)),
true);
assertWarnOrFail("The <mergePolicy>myclass</mergePolicy> syntax is no longer supported in solrconfig.xml. Please use syntax <mergePolicy class=\"myclass\"/> instead.",
!((solrConfig.getNode(prefix+"/mergePolicy",false) != null) && (solrConfig.get(prefix+"/mergePolicy/@class",null) == null)),
!((solrConfig.getNode(prefix + "/mergePolicy", false) != null) && (solrConfig.get(prefix + "/mergePolicy/@class", null) == null)),
true);
assertWarnOrFail("The <luceneAutoCommit>true|false</luceneAutoCommit> parameter is no longer valid in solrconfig.xml.",
solrConfig.get(prefix+"/luceneAutoCommit", null) == null,
solrConfig.get(prefix + "/luceneAutoCommit", null) == null,
true);
defaultMergePolicyClassName = def.defaultMergePolicyClassName;
@ -167,13 +164,10 @@ public class SolrIndexConfig implements MapSerializable {
}
}
mergedSegmentWarmerInfo = getPluginInfo(prefix + "/mergedSegmentWarmer", solrConfig, def.mergedSegmentWarmerInfo);
if (mergedSegmentWarmerInfo != null && solrConfig.nrtMode == false) {
throw new IllegalArgumentException("Supplying a mergedSegmentWarmer will do nothing since nrtMode is false");
}
assertWarnOrFail("Begining with Solr 5.0, <checkIntegrityAtMerge> option is no longer supported and should be removed from solrconfig.xml (these integrity checks are now automatic)",
(null == solrConfig.getNode(prefix+"/checkIntegrityAtMerge",false)),
true);
(null == solrConfig.getNode(prefix + "/checkIntegrityAtMerge", false)),
true);
}
@Override
public Map<String, Object> toMap() {
@ -189,21 +183,6 @@ public class SolrIndexConfig implements MapSerializable {
return m;
}
/*
* Assert that assertCondition is true.
* If not, prints reason as log warning.
* If failCondition is true, then throw exception instead of warning
*/
private void assertWarnOrFail(String reason, boolean assertCondition, boolean failCondition) {
if(assertCondition) {
return;
} else if(failCondition) {
throw new SolrException(ErrorCode.FORBIDDEN, reason);
} else {
log.warn(reason);
}
}
private PluginInfo getPluginInfo(String path, SolrConfig solrConfig, PluginInfo def) {
List<PluginInfo> l = solrConfig.readPluginInfos(path, false, true);
return l.isEmpty() ? def : l.get(0);

View File

@ -1,27 +0,0 @@
<?xml version="1.0" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<config>
<luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
<indexConfig>
<mergedSegmentWarmer class="org.apache.lucene.index.SimpleMergedSegmentWarmer"/>
<nrtMode>false</nrtMode> <!-- BAD -->
</indexConfig>
</config>

View File

@ -35,7 +35,6 @@ A solrconfig.xml snippet containing indexConfig settings for randomized testing.
<ramBufferSizeMB>${solr.tests.ramBufferSizeMB}</ramBufferSizeMB>
<mergeScheduler class="${solr.tests.mergeScheduler}" />
<nrtMode>${solr.tests.nrtMode:true}</nrtMode>
<writeLockTimeout>1000</writeLockTimeout>
<commitLockTimeout>10000</commitLockTimeout>

View File

@ -54,14 +54,12 @@ public class TestArbitraryIndexDir extends AbstractSolrTestCase{
@BeforeClass
public static void beforeClass() {
// this test wants to start solr, and then open a separate indexwriter of its own on the same dir.
System.setProperty("solr.tests.nrtMode", "false");
System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
savedFactory = System.getProperty("solr.DirectoryFactory");
System.setProperty("solr.directoryFactory", "org.apache.solr.core.MockFSDirectoryFactory");
}
@AfterClass
public static void afterClass() {
System.clearProperty("solr.tests.nrtMode");
if (savedFactory == null) {
System.clearProperty("solr.directoryFactory");
} else {
@ -125,7 +123,7 @@ public class TestArbitraryIndexDir extends AbstractSolrTestCase{
iw.close();
//commit will cause searcher to open with the new index dir
assertU(commit());
assertU(commit());h.getCoreContainer().reload(h.getCore().getName());
//new index dir contains just 1 doc.
assertQ("return doc with id 2",
req("id:2"),

View File

@ -27,10 +27,6 @@ public class TestBadConfig extends AbstractBadConfigTestBase {
assertConfigs("bad_solrconfig.xml","schema.xml","unset.sys.property");
}
public void testSegmentMergerWithoutReopen() throws Exception {
assertConfigs("bad-solrconfig-warmer-no-reopen.xml", "schema12.xml",
"mergedSegmentWarmer");
}
public void testMultipleDirectoryFactories() throws Exception {
assertConfigs("bad-solrconfig-multiple-dirfactory.xml", "schema12.xml",
"directoryFactory");

View File

@ -1,164 +0,0 @@
package org.apache.solr.core;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.util.Collections;
import java.util.IdentityHashMap;
import java.util.Set;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.DirectoryReader;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.RefCounted;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TestNonNRTOpen extends SolrTestCaseJ4 {
private static final Logger log = LoggerFactory.getLogger(TestNonNRTOpen.class);
@BeforeClass
public static void beforeClass() throws Exception {
// use a filesystem, because we need to create an index, then "start up solr"
System.setProperty("solr.directoryFactory", "solr.StandardDirectoryFactory");
// and dont delete it initially
System.setProperty("solr.test.leavedatadir", "true");
// turn off nrt
System.setProperty("solr.tests.nrtMode", "false");
// set these so that merges won't break the test
System.setProperty("solr.tests.maxBufferedDocs", "100000");
System.setProperty("solr.tests.mergePolicy", "org.apache.lucene.index.LogDocMergePolicy");
initCore("solrconfig-basic.xml", "schema-minimal.xml");
// add a doc
assertU(adoc("foo", "bar"));
assertU(commit());
File myDir = initCoreDataDir;
deleteCore();
// boot up again over the same index
initCoreDataDir = myDir;
initCore("solrconfig-basic.xml", "schema-minimal.xml");
// startup
assertNotNRT(1);
}
public void setUp() throws Exception {
super.setUp();
// delete all, then add initial doc
assertU(delQ("*:*"));
assertU(adoc("foo", "bar"));
assertU(commit());
}
@AfterClass
public static void afterClass() throws Exception {
// ensure we clean up after ourselves, this will fire before superclass...
System.clearProperty("solr.test.leavedatadir");
System.clearProperty("solr.directoryFactory");
System.clearProperty("solr.tests.maxBufferedDocs");
System.clearProperty("solr.tests.mergePolicy");
System.clearProperty("solr.tests.nrtMode");
}
public void testReaderIsNotNRT() {
// startup
assertNotNRT(1);
// core reload
String core = h.getCore().getName();
log.info("Reloading core: " + h.getCore().toString());
h.getCoreContainer().reload(core);
assertNotNRT(1);
// add a doc and commit
assertU(adoc("baz", "doc"));
assertU(commit());
assertNotNRT(2);
// add a doc and core reload
assertU(adoc("bazz", "doc2"));
log.info("Reloading core: " + h.getCore().toString());
h.getCoreContainer().reload(core);
assertNotNRT(3);
}
public void testSharedCores() {
// clear out any junk
assertU(optimize());
Set<Object> s1 = getCoreCacheKeys();
assertEquals(1, s1.size());
// add a doc, will go in a new segment
assertU(adoc("baz", "doc"));
assertU(commit());
Set<Object> s2 = getCoreCacheKeys();
assertEquals(2, s2.size());
assertTrue(s2.containsAll(s1));
// add two docs, will go in a new segment
assertU(adoc("foo", "doc"));
assertU(adoc("foo2", "doc"));
assertU(commit());
Set<Object> s3 = getCoreCacheKeys();
assertEquals(3, s3.size());
assertTrue(s3.containsAll(s2));
// delete a doc
assertU(delQ("foo2:doc"));
assertU(commit());
// same cores
assertEquals(s3, getCoreCacheKeys());
}
static void assertNotNRT(int maxDoc) {
SolrCore core = h.getCore();
log.info("Checking notNRT & maxDoc=" + maxDoc + " of core=" + core.toString());
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
try {
SolrIndexSearcher s = searcher.get();
DirectoryReader ir = s.getRawReader();
assertEquals("SOLR-5815? : wrong maxDoc: core=" + core.toString() +" searcher=" + s.toString(),
maxDoc, ir.maxDoc());
assertFalse("SOLR-5815? : expected non-NRT reader, got: " + ir, ir.toString().contains(":nrt"));
} finally {
searcher.decref();
}
}
private Set<Object> getCoreCacheKeys() {
RefCounted<SolrIndexSearcher> searcher = h.getCore().getSearcher();
Set<Object> set = Collections.newSetFromMap(new IdentityHashMap<Object,Boolean>());
try {
DirectoryReader ir = searcher.get().getRawReader();
for (LeafReaderContext context : ir.leaves()) {
set.add(context.reader().getCoreCacheKey());
}
} finally {
searcher.decref();
}
return set;
}
}

View File

@ -346,46 +346,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
/**
* Verify that things still work if an IW has not been opened (and hence the CommitPoints have not been communicated to the deletion policy)
*/
public void testNoWriter() throws Exception {
useFactory(null); // force a persistent directory
// read-only setting (no opening from indexwriter)
System.setProperty("solr.tests.nrtMode", "false");
try {
// stop and start so they see the new directory setting
slaveJetty.stop();
masterJetty.stop();
slaveJetty.start(true);
masterJetty.start(true);
index(slaveClient, "id", "123456");
slaveClient.commit();
slaveJetty.stop();
slaveJetty.start(true);
} finally {
System.clearProperty("solr.tests.nrtMode"); // dont mess with other tests
}
// Currently we open a writer on-demand. This is to test that we are correctly testing
// the code path when SolrDeletionPolicy.getLatestCommit() returns null.
// When we are using an ephemeral directory, an IW will always be opened to create the index and hence
// getLatestCommit will always be non-null.
CoreContainer cores = ((SolrDispatchFilter) slaveJetty.getDispatchFilter().getFilter()).getCores();
Collection<SolrCore> theCores = cores.getCores();
assertEquals(1, theCores.size());
SolrCore core = (SolrCore)theCores.toArray()[0];
assertNull( core.getDeletionPolicy().getLatestCommit() );
pullFromMasterToSlave(); // this will cause SnapPuller to be invoked and we will test when SolrDeletionPolicy.getLatestCommit() returns null
resetFactory();
}
/**
* Verify that empty commits and/or commits with openSearcher=false
* Verify that empty commits and/or commits with openSearcher=false
* on the master do not cause subsequent replication problems on the slave
*/
public void testEmptyCommits() throws Exception {

View File

@ -276,15 +276,6 @@
<unlockOnStartup>false</unlockOnStartup>
-->
<!-- If true, IndexReaders will be opened/reopened from the IndexWriter
instead of from the Directory. Hosts in a master/slave setup
should have this set to false while those in a SolrCloud
cluster need to be set to true. Default: true
-->
<!--
<nrtMode>true</nrtMode>
-->
<!-- Commit Deletion Policy
Custom deletion policies can be specified here. The class must
implement org.apache.lucene.index.IndexDeletionPolicy.

View File

@ -279,15 +279,6 @@
<unlockOnStartup>false</unlockOnStartup>
-->
<!-- If true, IndexReaders will be opened/reopened from the IndexWriter
instead of from the Directory. Hosts in a master/slave setup
should have this set to false while those in a SolrCloud
cluster need to be set to true. Default: true
-->
<!--
<nrtMode>true</nrtMode>
-->
<!-- Commit Deletion Policy
Custom deletion policies can be specified here. The class must
implement org.apache.lucene.index.IndexDeletionPolicy.

View File

@ -276,15 +276,6 @@
<unlockOnStartup>false</unlockOnStartup>
-->
<!-- If true, IndexReaders will be opened/reopened from the IndexWriter
instead of from the Directory. Hosts in a master/slave setup
should have this set to false while those in a SolrCloud
cluster need to be set to true. Default: true
-->
<!--
<nrtMode>true</nrtMode>
-->
<!-- Commit Deletion Policy
Custom deletion policies can be specified here. The class must
implement org.apache.lucene.index.IndexDeletionPolicy.

View File

@ -276,15 +276,6 @@
<unlockOnStartup>false</unlockOnStartup>
-->
<!-- If true, IndexReaders will be opened/reopened from the IndexWriter
instead of from the Directory. Hosts in a master/slave setup
should have this set to false while those in a SolrCloud
cluster need to be set to true. Default: true
-->
<!--
<nrtMode>true</nrtMode>
-->
<!-- Commit Deletion Policy
Custom deletion policies can be specified here. The class must
implement org.apache.lucene.index.IndexDeletionPolicy.

View File

@ -277,15 +277,6 @@
<unlockOnStartup>false</unlockOnStartup>
-->
<!-- If true, IndexReaders will be opened/reopened from the IndexWriter
instead of from the Directory. Hosts in a master/slave setup
should have this set to false while those in a SolrCloud
cluster need to be set to true. Default: true
-->
<!--
<nrtMode>true</nrtMode>
-->
<!-- Commit Deletion Policy
Custom deletion policies can be specified here. The class must
implement org.apache.lucene.index.IndexDeletionPolicy.

View File

@ -256,15 +256,6 @@
<unlockOnStartup>false</unlockOnStartup>
-->
<!-- If true, IndexReaders will be opened/reopened from the IndexWriter
instead of from the Directory. Hosts in a master/slave setup
should have this set to false while those in a SolrCloud
cluster need to be set to true. Default: true
-->
<!--
<nrtMode>true</nrtMode>
-->
<!-- Commit Deletion Policy
Custom deletion policies can be specified here. The class must
implement org.apache.lucene.index.IndexDeletionPolicy.

View File

@ -274,15 +274,6 @@
<unlockOnStartup>false</unlockOnStartup>
-->
<!-- If true, IndexReaders will be opened/reopened from the IndexWriter
instead of from the Directory. Hosts in a master/slave setup
should have this set to false while those in a SolrCloud
cluster need to be set to true. Default: true
-->
<!--
<nrtMode>true</nrtMode>
-->
<!-- Commit Deletion Policy
Custom deletion policies can be specified here. The class must
implement org.apache.lucene.index.IndexDeletionPolicy.