SOLR-12554: Expose IndexWriterConfig's ramPerThreadHardLimitMB

* When ramPerThreadHardLimitMB is not specified, then Lucene's
  default value 1945 is used. The specified value should be
  greater than 0 and less than 2048MB
This commit is contained in:
Munendra S N 2019-07-01 14:43:34 +05:30
parent dc16e2707b
commit 0e877aac34
10 changed files with 58 additions and 3 deletions

View File

@ -148,6 +148,8 @@ New Features
* SOLR-13569: AdminUI visual indication of prod/test/dev environment (janhoy)
* SOLR-12554: Expose IndexWriterConfig's ramPerThreadHardLimitMB through solrconfig.xml (Ishan Chattopadhyaya, Munendra S N)
Bug Fixes
----------------------

View File

@ -67,6 +67,7 @@ public class SolrIndexConfig implements MapSerializable {
public final int maxBufferedDocs;
public final double ramBufferSizeMB;
public final int ramPerThreadHardLimitMB;
public final int writeLockTimeout;
public final String lockType;
@ -85,6 +86,7 @@ public class SolrIndexConfig implements MapSerializable {
useCompoundFile = false;
maxBufferedDocs = -1;
ramBufferSizeMB = 100;
ramPerThreadHardLimitMB = -1;
writeLockTimeout = -1;
lockType = DirectoryFactory.LOCK_TYPE_NATIVE;
mergePolicyFactoryInfo = null;
@ -130,6 +132,9 @@ public class SolrIndexConfig implements MapSerializable {
maxBufferedDocs=solrConfig.getInt(prefix+"/maxBufferedDocs",def.maxBufferedDocs);
ramBufferSizeMB = solrConfig.getDouble(prefix+"/ramBufferSizeMB", def.ramBufferSizeMB);
// how do we validate the value??
ramPerThreadHardLimitMB = solrConfig.getInt(prefix+"/ramPerThreadHardLimitMB", def.ramPerThreadHardLimitMB);
writeLockTimeout=solrConfig.getInt(prefix+"/writeLockTimeout", def.writeLockTimeout);
lockType=solrConfig.get(prefix+"/lockType", def.lockType);
@ -179,6 +184,7 @@ public class SolrIndexConfig implements MapSerializable {
Map<String, Object> m = Utils.makeMap("useCompoundFile", useCompoundFile,
"maxBufferedDocs", maxBufferedDocs,
"ramBufferSizeMB", ramBufferSizeMB,
"ramPerThreadHardLimitMB", ramPerThreadHardLimitMB,
"writeLockTimeout", writeLockTimeout,
"lockType", lockType,
"infoStreamEnabled", infoStream != InfoStream.NO_OUTPUT);
@ -221,6 +227,10 @@ public class SolrIndexConfig implements MapSerializable {
if (ramBufferSizeMB != -1)
iwc.setRAMBufferSizeMB(ramBufferSizeMB);
if (ramPerThreadHardLimitMB != -1) {
iwc.setRAMPerThreadHardLimitMB(ramPerThreadHardLimitMB);
}
iwc.setSimilarity(schema.getSimilarity());
MergePolicy mergePolicy = buildMergePolicy(core.getResourceLoader(), schema);
iwc.setMergePolicy(mergePolicy);

View File

@ -32,6 +32,7 @@ A solrconfig.xml snippet containing indexConfig settings for randomized testing.
<maxBufferedDocs>${solr.tests.maxBufferedDocs}</maxBufferedDocs>
<ramBufferSizeMB>${solr.tests.ramBufferSizeMB}</ramBufferSizeMB>
<ramPerThreadHardLimitMB>${solr.tests.ramPerThreadHardLimitMB}</ramPerThreadHardLimitMB>
<mergeScheduler class="${solr.tests.mergeScheduler}" />

View File

@ -163,6 +163,15 @@
<!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
<!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
<!-- Expert: ramPerThreadHardLimitMB sets the maximum amount of RAM that can be consumed
per thread before they are flushed. When limit is exceeded, this triggers a forced
flush even if ramBufferSizeMB has not been exceeded.
This is a safety limit to prevent Lucene's DocumentsWriterPerThread from address space
exhaustion due to its internal 32 bit signed integer based memory addressing.
The specified value should be greater than 0 and less than 2048MB. When not specified,
Solr uses Lucene's default value 1945. -->
<!-- <ramPerThreadHardLimitMB>1945</ramPerThreadHardLimitMB> -->
<!-- Expert: Merge Policy
The Merge Policy in Lucene controls how merging of segments is done.
The default since Solr/Lucene 3.3 is TieredMergePolicy.

View File

@ -154,6 +154,7 @@ public class TestConfig extends SolrTestCaseJ4 {
++numDefaultsTested; assertEquals("default maxBufferedDocs", -1, sic.maxBufferedDocs);
++numDefaultsTested; assertEquals("default ramBufferSizeMB", 100.0D, sic.ramBufferSizeMB, 0.0D);
++numDefaultsTested; assertEquals("default ramPerThreadHardLimitMB", -1, sic.ramPerThreadHardLimitMB);
++numDefaultsTested; assertEquals("default writeLockTimeout", -1, sic.writeLockTimeout);
++numDefaultsTested; assertEquals("default LockType", DirectoryFactory.LOCK_TYPE_NATIVE, sic.lockType);
@ -215,7 +216,7 @@ public class TestConfig extends SolrTestCaseJ4 {
Assert.assertEquals(-1, solrConfig.getUpdateHandlerInfo().autoCommmitMaxTime);
}
// sanity check that sys propertis are working as expected
// sanity check that sys properties are working as expected
public void testSanityCheckTestSysPropsAreUsed() throws Exception {
SolrConfig sc = new SolrConfig(new SolrResourceLoader(TEST_PATH().resolve("collection1")), "solrconfig-basic.xml", null);
@ -224,10 +225,10 @@ public class TestConfig extends SolrTestCaseJ4 {
assertEquals("ramBufferSizeMB sysprop",
Double.parseDouble(System.getProperty("solr.tests.ramBufferSizeMB")),
sic.ramBufferSizeMB, 0.0D);
assertEquals("ramPerThreadHardLimitMB sysprop",
Integer.parseInt(System.getProperty("solr.tests.ramPerThreadHardLimitMB")), sic.ramPerThreadHardLimitMB);
assertEquals("useCompoundFile sysprop",
Boolean.parseBoolean(System.getProperty("useCompoundFile")), sic.useCompoundFile);
}
}

View File

@ -179,6 +179,8 @@ public class SolrIndexConfigTest extends SolrTestCaseJ4 {
++mSizeExpected; assertTrue(m.get("ramBufferSizeMB") instanceof Double);
++mSizeExpected; assertTrue(m.get("ramPerThreadHardLimitMB") instanceof Integer);
++mSizeExpected; assertTrue(m.get("writeLockTimeout") instanceof Integer);
++mSizeExpected; assertTrue(m.get("lockType") instanceof String);

View File

@ -163,6 +163,15 @@
<!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
<!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
<!-- Expert: ramPerThreadHardLimitMB sets the maximum amount of RAM that can be consumed
per thread before they are flushed. When limit is exceeded, this triggers a forced
flush even if ramBufferSizeMB has not been exceeded.
This is a safety limit to prevent Lucene's DocumentsWriterPerThread from address space
exhaustion due to its internal 32 bit signed integer based memory addressing.
The specified value should be greater than 0 and less than 2048MB. When not specified,
Solr uses Lucene's default value 1945. -->
<!-- <ramPerThreadHardLimitMB>1945</ramPerThreadHardLimitMB> -->
<!-- Expert: Merge Policy
The Merge Policy in Lucene controls how merging of segments is done.
The default since Solr/Lucene 3.3 is TieredMergePolicy.

View File

@ -165,6 +165,15 @@
<!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
<!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
<!-- Expert: ramPerThreadHardLimitMB sets the maximum amount of RAM that can be consumed
per thread before they are flushed. When limit is exceeded, this triggers a forced
flush even if ramBufferSizeMB has not been exceeded.
This is a safety limit to prevent Lucene's DocumentsWriterPerThread from address space
exhaustion due to its internal 32 bit signed integer based memory addressing.
The specified value should be greater than 0 and less than 2048MB. When not specified,
Solr uses Lucene's default value 1945. -->
<!-- <ramPerThreadHardLimitMB>1945</ramPerThreadHardLimitMB> -->
<!-- Expert: Merge Policy
The Merge Policy in Lucene controls how merging of segments is done.
The default since Solr/Lucene 3.3 is TieredMergePolicy.

View File

@ -56,6 +56,17 @@ Controls whether newly written (and not yet merged) index segments should use th
<useCompoundFile>false</useCompoundFile>
----
=== ramPerThreadHardLimitMB
Sets the maximum memory (defined in megabytes) consumption per thread triggering a forced flush if exceeded. The given value must be greater than 0 and less than 2GB (2048 MB).
[source,xml]
----
<ramPerThreadHardLimitMB>1945</ramPerThreadHardLimitMB>
----
NOTE: This is an expert level parameter as it triggers forced flush even if <<ramBufferSizeMB>> has not been exceeded.
== Merging Index Segments
=== mergePolicyFactory

View File

@ -557,6 +557,7 @@ public abstract class SolrTestCaseJ4 extends SolrTestCase {
System.setProperty("useCompoundFile", String.valueOf(iwc.getUseCompoundFile()));
System.setProperty("solr.tests.maxBufferedDocs", String.valueOf(iwc.getMaxBufferedDocs()));
System.setProperty("solr.tests.ramPerThreadHardLimitMB", String.valueOf(iwc.getRAMPerThreadHardLimitMB()));
System.setProperty("solr.tests.ramBufferSizeMB", String.valueOf(iwc.getRAMBufferSizeMB()));
String mergeSchedulerClass = iwc.getMergeScheduler().getClass().getName();