This commit is contained in:
Karl Wright 2018-03-18 16:15:57 -04:00
commit f282a855f2
34 changed files with 743 additions and 240 deletions

View File

@ -288,7 +288,13 @@
<target name="eclipse" depends="resolve" description="Setup Eclipse configuration">
<basename file="${basedir}" property="eclipseprojectname"/>
<copy file="dev-tools/eclipse/dot.project" tofile=".project" overwrite="false" encoding="UTF-8">
<copy file="dev-tools/eclipse/dot.project" tofile=".project" overwrite="false" encoding="UTF-8">
<filterset>
<filter token="ECLIPSEPROJECTNAME" value="${eclipseprojectname}"/>
</filterset>
</copy>
<copy overwrite="false" todir="eclipse-build" encoding="UTF-8">
<fileset dir="dev-tools/eclipse" includes="*.launch"/>
<filterset>
<filter token="ECLIPSEPROJECTNAME" value="${eclipseprojectname}"/>
</filterset>
@ -297,7 +303,7 @@
<copy todir=".settings/" overwrite="true">
<fileset dir="dev-tools/eclipse/dot.settings" includes="*.prefs" />
</copy>
<pathconvert property="eclipse.fileset.sourcefolders" pathsep="|" dirsep="/">
<dirset dir="${basedir}/lucene" includes="**/src/java, **/src/resources, **/src/test, **/src/test-files, **/src/examples" excludes="tools/**, build/**" />
<dirset dir="${basedir}/solr" includes="**/src/java, **/src/resources, **/src/test, **/src/test-files, **/src/examples" excludes="build/**" />
@ -309,10 +315,19 @@
<fileset dir="${basedir}/solr" includes="**/test-lib/*.jar,**/lib/*.jar" excludes="core/test-lib/*servlet-api*.jar, contrib/analysis-extras/**, test-framework/lib/junit*, test-framework/lib/ant*, test-framework/lib/randomizedtesting*, build/**, dist/**, package/**" />
<map from="${basedir}/" to=""/>
</pathconvert>
<pathconvert property="eclipse.fileset.webfolders" pathsep="|" dirsep="/">
<dirset dir="${basedir}/solr/server/contexts" excludes="**/*" />
<dirset dir="${basedir}/solr/server/etc" excludes="**/*" />
<dirset dir="${basedir}/solr/server/modules" excludes="**/*" />
<dirset dir="${basedir}/solr/server/solr" excludes="**/*" />
<dirset dir="${basedir}/solr/webapp/web" excludes="**/*" />
<map from="${basedir}/" to=""/>
</pathconvert>
<xslt in="${ant.file}" out=".classpath" style="dev-tools/eclipse/dot.classpath.xsl" force="true">
<outputproperty name="indent" value="yes"/>
<param name="eclipse.fileset.libs" expression="${eclipse.fileset.libs}"/>
<param name="eclipse.fileset.sourcefolders" expression="${eclipse.fileset.sourcefolders}"/>
<param name="eclipse.fileset.webfolders" expression="${eclipse.fileset.webfolders}"/>
</xslt>
<echo>

View File

@ -22,6 +22,7 @@
>
<xsl:param name="eclipse.fileset.sourcefolders"/>
<xsl:param name="eclipse.fileset.libs"/>
<xsl:param name="eclipse.fileset.webfolders"/>
<!--
NOTE: This template matches the root element of any given input XML document!
@ -54,7 +55,23 @@
<classpathentry excluding="src" including="conf/**" kind="src" path="lucene/benchmark"/>
</xsl:if>
</xsl:for-each>
<xsl:for-each select="str:split($eclipse.fileset.webfolders,'|')">
<xsl:sort select="text()" order="ascending" lang="en"/>
<classpathentry kind="src" path="{.}">
<xsl:attribute name="output">
<xsl:choose>
<xsl:when test="contains(.,'solr/webapp/web')">
<xsl:text>eclipse-build/solr-server/solr-webapp/webapp</xsl:text>
</xsl:when>
<xsl:otherwise>
<xsl:text>eclipse-build/solr-server/</xsl:text><xsl:value-of select="substring(text(), 13)"/>
</xsl:otherwise>
</xsl:choose>
</xsl:attribute>
</classpathentry>
</xsl:for-each>
<!-- the main resources folder is here (see above), so it's listed after the test-framework resources, making preflex-override work: -->
<classpathentry kind="output" path="eclipse-build/main"/>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8"/>

View File

@ -0,0 +1,20 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<launchConfiguration type="org.eclipse.jdt.launching.localJavaApplication">
<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_PATHS">
<listEntry value="/@ECLIPSEPROJECTNAME@"/>
</listAttribute>
<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
<listEntry value="4"/>
</listAttribute>
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_USE_START_ON_FIRST_THREAD" value="true"/>
<listAttribute key="org.eclipse.jdt.launching.CLASSPATH">
<listEntry value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#13;&#10;&lt;runtimeClasspathEntry containerPath=&quot;org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8&quot; javaProject=&quot;@ECLIPSEPROJECTNAME@&quot; path=&quot;1&quot; type=&quot;4&quot;/&gt;&#13;&#10;"/>
<listEntry value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#13;&#10;&lt;runtimeClasspathEntry id=&quot;org.eclipse.jdt.launching.classpathentry.defaultClasspath&quot;&gt;&#13;&#10;&lt;memento exportedEntriesOnly=&quot;false&quot; project=&quot;@ECLIPSEPROJECTNAME@&quot;/&gt;&#13;&#10;&lt;/runtimeClasspathEntry&gt;&#13;&#10;"/>
<listEntry value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#13;&#10;&lt;runtimeClasspathEntry internalArchive=&quot;/@ECLIPSEPROJECTNAME@/solr/server/start.jar&quot; path=&quot;3&quot; type=&quot;2&quot;/&gt;&#13;&#10;"/>
</listAttribute>
<booleanAttribute key="org.eclipse.jdt.launching.DEFAULT_CLASSPATH" value="false"/>
<stringAttribute key="org.eclipse.jdt.launching.MAIN_TYPE" value="org.eclipse.jetty.start.Main"/>
<stringAttribute key="org.eclipse.jdt.launching.PROGRAM_ARGUMENTS" value="--module=http"/>
<stringAttribute key="org.eclipse.jdt.launching.PROJECT_ATTR" value="@ECLIPSEPROJECTNAME@"/>
<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xms512m &#13;&#10;-Djetty.port=8983&#13;&#10;-DSTOP.PORT=7983 &#13;&#10;-DSTOP.KEY=solrrock&#13;&#10;-Dlog4j.configuration=&quot;file:${workspace_loc:@ECLIPSEPROJECTNAME@}/solr/server/resources/log4j.properties&quot;&#13;&#10;-Djetty.home=&quot;${workspace_loc:@ECLIPSEPROJECTNAME@}/eclipse-build/solr-server&quot;&#13;&#10;-Djetty.base=&quot;${workspace_loc:@ECLIPSEPROJECTNAME@}/eclipse-build/solr-server&quot;&#13;&#10;-Dsolr.log=&quot;${workspace_loc:@ECLIPSEPROJECTNAME@}/eclipse-build&quot;&#13;&#10;-Dsolr.log.dir=&quot;${workspace_loc:@ECLIPSEPROJECTNAME@}/eclipse-build&quot;&#13;&#10;-Dsolr.solr.home=&quot;${workspace_loc:@ECLIPSEPROJECTNAME@}/eclipse-build/solr-server/solr&quot;&#13;&#10;-DzkRun"/>
</launchConfiguration>

View File

@ -0,0 +1,20 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<launchConfiguration type="org.eclipse.jdt.launching.localJavaApplication">
<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_PATHS">
<listEntry value="/@ECLIPSEPROJECTNAME@"/>
</listAttribute>
<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
<listEntry value="4"/>
</listAttribute>
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_USE_START_ON_FIRST_THREAD" value="true"/>
<listAttribute key="org.eclipse.jdt.launching.CLASSPATH">
<listEntry value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#13;&#10;&lt;runtimeClasspathEntry containerPath=&quot;org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8&quot; javaProject=&quot;@ECLIPSEPROJECTNAME@&quot; path=&quot;1&quot; type=&quot;4&quot;/&gt;&#13;&#10;"/>
<listEntry value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#13;&#10;&lt;runtimeClasspathEntry id=&quot;org.eclipse.jdt.launching.classpathentry.defaultClasspath&quot;&gt;&#13;&#10;&lt;memento exportedEntriesOnly=&quot;false&quot; project=&quot;@ECLIPSEPROJECTNAME@&quot;/&gt;&#13;&#10;&lt;/runtimeClasspathEntry&gt;&#13;&#10;"/>
<listEntry value="&lt;?xml version=&quot;1.0&quot; encoding=&quot;UTF-8&quot; standalone=&quot;no&quot;?&gt;&#13;&#10;&lt;runtimeClasspathEntry internalArchive=&quot;/@ECLIPSEPROJECTNAME@/solr/server/start.jar&quot; path=&quot;3&quot; type=&quot;2&quot;/&gt;&#13;&#10;"/>
</listAttribute>
<booleanAttribute key="org.eclipse.jdt.launching.DEFAULT_CLASSPATH" value="false"/>
<stringAttribute key="org.eclipse.jdt.launching.MAIN_TYPE" value="org.eclipse.jetty.start.Main"/>
<stringAttribute key="org.eclipse.jdt.launching.PROGRAM_ARGUMENTS" value="--module=http"/>
<stringAttribute key="org.eclipse.jdt.launching.PROJECT_ATTR" value="@ECLIPSEPROJECTNAME@"/>
<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xms512m &#13;&#10;-Djetty.port=8983&#13;&#10;-DSTOP.PORT=7983 &#13;&#10;-DSTOP.KEY=solrrock&#13;&#10;-Dlog4j.configuration=&quot;file:${workspace_loc:@ECLIPSEPROJECTNAME@}/solr/server/resources/log4j.properties&quot;&#13;&#10;-Djetty.home=&quot;${workspace_loc:@ECLIPSEPROJECTNAME@}/eclipse-build/solr-server&quot;&#13;&#10;-Djetty.base=&quot;${workspace_loc:@ECLIPSEPROJECTNAME@}/eclipse-build/solr-server&quot;&#13;&#10;-Dsolr.log=&quot;${workspace_loc:@ECLIPSEPROJECTNAME@}/eclipse-build&quot;&#13;&#10;-Dsolr.log.dir=&quot;${workspace_loc:@ECLIPSEPROJECTNAME@}/eclipse-build&quot;&#13;&#10;-Dsolr.solr.home=&quot;${workspace_loc:@ECLIPSEPROJECTNAME@}/eclipse-build/solr-server/solr&quot;"/>
</launchConfiguration>

View File

@ -0,0 +1,17 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<launchConfiguration type="org.eclipse.jdt.junit.launchconfig">
<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_PATHS">
<listEntry value="/@ECLIPSEPROJECTNAME@"/>
</listAttribute>
<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
<listEntry value="4"/>
</listAttribute>
<stringAttribute key="org.eclipse.jdt.junit.CONTAINER" value="=@ECLIPSEPROJECTNAME@"/>
<booleanAttribute key="org.eclipse.jdt.junit.KEEPRUNNING_ATTR" value="false"/>
<stringAttribute key="org.eclipse.jdt.junit.TESTNAME" value=""/>
<stringAttribute key="org.eclipse.jdt.junit.TEST_KIND" value="org.eclipse.jdt.junit.loader.junit4"/>
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_USE_START_ON_FIRST_THREAD" value="true"/>
<stringAttribute key="org.eclipse.jdt.launching.MAIN_TYPE" value=""/>
<stringAttribute key="org.eclipse.jdt.launching.PROJECT_ATTR" value="@ECLIPSEPROJECTNAME@"/>
<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-ea"/>
</launchConfiguration>

View File

@ -19,10 +19,13 @@ package org.apache.lucene.index;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Locale;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.lucene.analysis.Analyzer;
@ -261,16 +264,30 @@ final class DocumentsWriter implements Closeable, Accountable {
return false; // we didn't flush anything here
}
/** Returns how many documents were aborted. */
synchronized long lockAndAbortAll(IndexWriter indexWriter) throws IOException {
/** Locks all currently active DWPT and aborts them.
* The returned Closeable should be closed once the locks for the aborted
* DWPTs can be released. */
synchronized Closeable lockAndAbortAll(IndexWriter indexWriter) throws IOException {
assert indexWriter.holdsFullFlushLock();
if (infoStream.isEnabled("DW")) {
infoStream.message("DW", "lockAndAbortAll");
}
// Make sure we move all pending tickets into the flush queue:
ticketQueue.forcePurge(indexWriter);
long abortedDocCount = 0;
boolean success = false;
List<ThreadState> threadStates = new ArrayList<>();
AtomicBoolean released = new AtomicBoolean(false);
final Closeable release = () -> {
assert indexWriter.holdsFullFlushLock();
if (released.compareAndSet(false, true)) { // only once
if (infoStream.isEnabled("DW")) {
infoStream.message("DW", "unlockAllAbortedThread");
}
perThreadPool.clearAbort();
for (ThreadState state : threadStates) {
state.unlock();
}
}
};
try {
deleteQueue.clear();
final int limit = perThreadPool.getMaxThreadStates();
@ -278,25 +295,31 @@ final class DocumentsWriter implements Closeable, Accountable {
for (int i = 0; i < limit; i++) {
final ThreadState perThread = perThreadPool.getThreadState(i);
perThread.lock();
abortedDocCount += abortThreadState(perThread);
threadStates.add(perThread);
abortThreadState(perThread);
}
deleteQueue.clear();
// jump over any possible in flight ops:
deleteQueue.skipSequenceNumbers(perThreadPool.getActiveThreadStateCount()+1);
deleteQueue.skipSequenceNumbers(perThreadPool.getActiveThreadStateCount() + 1);
flushControl.abortPendingFlushes();
flushControl.waitForFlush();
success = true;
return abortedDocCount;
} finally {
if (infoStream.isEnabled("DW")) {
infoStream.message("DW", "finished lockAndAbortAll success=" + success);
infoStream.message("DW", "finished lockAndAbortAll success=true");
}
if (success == false) {
return release;
} catch (Throwable t) {
if (infoStream.isEnabled("DW")) {
infoStream.message("DW", "finished lockAndAbortAll success=false");
}
try {
// if something happens here we unlock all states again
unlockAllAfterAbortAll(indexWriter);
release.close();
} catch (Throwable t1) {
t.addSuppressed(t1);
}
throw t;
}
}
@ -318,28 +341,6 @@ final class DocumentsWriter implements Closeable, Accountable {
return 0;
}
}
synchronized void unlockAllAfterAbortAll(IndexWriter indexWriter) {
assert indexWriter.holdsFullFlushLock();
if (infoStream.isEnabled("DW")) {
infoStream.message("DW", "unlockAll");
}
final int limit = perThreadPool.getMaxThreadStates();
perThreadPool.clearAbort();
for (int i = 0; i < limit; i++) {
try {
final ThreadState perThread = perThreadPool.getThreadState(i);
if (perThread.isHeldByCurrentThread()) {
perThread.unlock();
}
} catch (Throwable e) {
if (infoStream.isEnabled("DW")) {
infoStream.message("DW", "unlockAll: could not unlock state: " + i + " msg:" + e.getMessage());
}
// ignore & keep on unlocking
}
}
}
/** returns the maximum sequence number for all previously completed operations */
public long getMaxCompletedSequenceNumber() {

View File

@ -746,10 +746,9 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
checkpointNoSIS();
}
} catch (Throwable t) {
priorE = IOUtils.useOrSuppress(priorE, t);
if (doSave) {
throw IOUtils.rethrowAlways(t);
} else if (priorE == null) {
priorE = t;
throw t;
}
}
@ -766,10 +765,9 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
try {
rld.dropReaders();
} catch (Throwable t) {
priorE = IOUtils.useOrSuppress(priorE, t);
if (doSave) {
throw IOUtils.rethrowAlways(t);
} else if (priorE == null) {
priorE = t;
throw t;
}
}
}
@ -2447,8 +2445,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
}
private void rollbackInternalNoCommit() throws IOException {
boolean success = false;
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "rollback");
}
@ -2467,7 +2463,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
docWriter.abort(this); // don't sync on IW here
docWriter.flushControl.waitForFlush(); // wait for all concurrently running flushes
purge(true); // empty the flush ticket queue otherwise we might not have cleaned up all resources
synchronized(this) {
synchronized (this) {
if (pendingCommit != null) {
pendingCommit.rollbackCommit(directory);
@ -2488,8 +2484,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
int rollbackMaxDoc = segmentInfos.totalMaxDoc();
// now we need to adjust this back to the rolled back SI but don't set it to the absolute value
// otherwise we might hide internal bugsf
adjustPendingNumDocs(-(totalMaxDoc-rollbackMaxDoc));
if (infoStream.isEnabled("IW") ) {
adjustPendingNumDocs(-(totalMaxDoc - rollbackMaxDoc));
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "rollback: infos=" + segString(segmentInfos));
}
@ -2510,45 +2506,53 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
// after we leave this sync block and before we enter the sync block in the finally clause below that sets closed:
closed = true;
IOUtils.close(writeLock); // release write lock
IOUtils.close(writeLock); // release write lock
writeLock = null;
closed = true;
closing = false;
// So any "concurrently closing" threads wake up and see that the close has now completed:
notifyAll();
}
success = true;
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "rollbackInternal");
throw tragedy;
} finally {
if (success == false) {
} catch (Throwable throwable) {
try {
// Must not hold IW's lock while closing
// mergeScheduler: this can lead to deadlock,
// e.g. TestIW.testThreadInterruptDeadlock
IOUtils.closeWhileHandlingException(mergeScheduler);
}
synchronized(this) {
if (success == false) {
synchronized (this) {
// we tried to be nice about it: do the minimum
// don't leak a segments_N file if there is a pending commit
if (pendingCommit != null) {
try {
pendingCommit.rollbackCommit(directory);
deleter.decRef(pendingCommit);
} catch (Throwable t) {
throwable.addSuppressed(t);
}
pendingCommit = null;
}
// close all the closeables we can (but important is readerPool and writeLock to prevent leaks)
IOUtils.closeWhileHandlingException(readerPool, deleter, writeLock);
writeLock = null;
}
closed = true;
closing = false;
closed = true;
closing = false;
// So any "concurrently closing" threads wake up and see that the close has now completed:
notifyAll();
// So any "concurrently closing" threads wake up and see that the close has now completed:
notifyAll();
}
} catch (Throwable t) {
throwable.addSuppressed(t);
} finally {
if (throwable instanceof VirtualMachineError) {
try {
tragicEvent(throwable, "rollbackInternal");
} catch (Throwable t1){
throwable.addSuppressed(t1);
}
}
}
throw throwable;
}
}
@ -2599,42 +2603,42 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
* sure it's just like a fresh index.
*/
try {
synchronized (fullFlushLock) {
docWriter.lockAndAbortAll(this);
processEvents(false);
synchronized (this) {
try {
// Abort any running merges
abortMerges();
// Let merges run again
stopMerges = false;
adjustPendingNumDocs(-segmentInfos.totalMaxDoc());
// Remove all segments
segmentInfos.clear();
// Ask deleter to locate unreferenced files & remove them:
deleter.checkpoint(segmentInfos, false);
synchronized (fullFlushLock) {
try (Closeable release = docWriter.lockAndAbortAll(this)) {
processEvents(false);
synchronized (this) {
try {
// Abort any running merges
abortMerges();
// Let merges run again
stopMerges = false;
adjustPendingNumDocs(-segmentInfos.totalMaxDoc());
// Remove all segments
segmentInfos.clear();
// Ask deleter to locate unreferenced files & remove them:
deleter.checkpoint(segmentInfos, false);
/* don't refresh the deleter here since there might
* be concurrent indexing requests coming in opening
* files on the directory after we called DW#abort()
* if we do so these indexing requests might hit FNF exceptions.
* We will remove the files incrementally as we go...
*/
// Don't bother saving any changes in our segmentInfos
readerPool.dropAll(false);
// Mark that the index has changed
changeCount.incrementAndGet();
segmentInfos.changed();
globalFieldNumberMap.clear();
success = true;
long seqNo = docWriter.deleteQueue.getNextSequenceNumber();
docWriter.setLastSeqNo(seqNo);
return seqNo;
} finally {
docWriter.unlockAllAfterAbortAll(this);
if (!success) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception during deleteAll");
/* don't refresh the deleter here since there might
* be concurrent indexing requests coming in opening
* files on the directory after we called DW#abort()
* if we do so these indexing requests might hit FNF exceptions.
* We will remove the files incrementally as we go...
*/
// Don't bother saving any changes in our segmentInfos
readerPool.dropAll(false);
// Mark that the index has changed
changeCount.incrementAndGet();
segmentInfos.changed();
globalFieldNumberMap.clear();
success = true;
long seqNo = docWriter.deleteQueue.getNextSequenceNumber();
docWriter.setLastSeqNo(seqNo);
return seqNo;
} finally {
if (success == false) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception during deleteAll");
}
}
}
}
@ -4065,26 +4069,22 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
deleteNewFiles(merge.info.files());
}
boolean success = false;
try {
// Must close before checkpoint, otherwise IFD won't be
// able to delete the held-open files from the merge
// readers:
closeMergeReaders(merge, false);
success = true;
} finally {
checkpoint();
} catch (Throwable t) {
// Must note the change to segmentInfos so any commits
// in-flight don't lose it (IFD will incRef/protect the
// new files we created):
if (success) {
try {
checkpoint();
} else {
try {
checkpoint();
} catch (Throwable t) {
// Ignore so we keep throwing original exception.
}
} catch (Throwable t1) {
t.addSuppressed(t1);
}
throw t;
}
if (infoStream.isEnabled("IW")) {
@ -4120,7 +4120,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
// in which case we must throw it so, for example, the
// rollbackTransaction code in addIndexes* is
// executed.
if (merge.isExternal) {
if (merge.isExternal) { // TODO can we simplify this and just throw all the time? this would simplify this a lot
throw (MergePolicy.MergeAbortedException) t;
}
} else {
@ -4427,9 +4427,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
readerPool.drop(rld.info);
}
} catch (Throwable t) {
if (th == null) {
th = t;
}
th = IOUtils.useOrSuppress(th, t);
}
merge.readers.set(i, null);
}
@ -4438,9 +4436,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
try {
merge.mergeFinished();
} catch (Throwable t) {
if (th == null) {
th = t;
}
th = IOUtils.useOrSuppress(th, t);
}
// If any error occurred, throw it.

View File

@ -122,6 +122,8 @@ public final class IOUtils {
if (object != null) {
object.close();
}
} catch (VirtualMachineError e) {
throw e;
} catch (Throwable t) {
}
}
@ -623,4 +625,17 @@ public final class IOUtils {
return desc;
}
}
/**
* Returns the second throwable if the first is null otherwise adds the second as suppressed to the first
* and returns it.
*/
public static <T extends Throwable> T useOrSuppress(T first, T second) {
if (first == null) {
return second;
} else {
first.addSuppressed(second);
}
return first;
}
}

View File

@ -18,7 +18,8 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock;
@ -53,6 +54,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
// Used by test cases below
private static class IndexerThread extends Thread {
private final CyclicBarrier syncStart;
boolean diskFull;
Throwable error;
AlreadyClosedException ace;
@ -60,13 +62,20 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
boolean noErrors;
volatile int addCount;
public IndexerThread(IndexWriter writer, boolean noErrors) {
public IndexerThread(IndexWriter writer, boolean noErrors, CyclicBarrier syncStart) {
this.writer = writer;
this.noErrors = noErrors;
this.syncStart = syncStart;
}
@Override
public void run() {
try {
syncStart.await();
} catch (BrokenBarrierException | InterruptedException e) {
error = e;
throw new RuntimeException(e);
}
final Document doc = new Document();
FieldType customType = new FieldType(TextField.TYPE_STORED);
@ -79,7 +88,6 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
int idUpto = 0;
int fullCount = 0;
final long stopTime = System.currentTimeMillis() + 200;
do {
try {
@ -114,7 +122,6 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
// OK: abort closes the writer
break;
} catch (Throwable t) {
//t.printStackTrace(System.out);
if (noErrors) {
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected Throwable:");
t.printStackTrace(System.out);
@ -122,7 +129,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
}
break;
}
} while(System.currentTimeMillis() < stopTime);
} while (true);
}
}
@ -133,7 +140,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
int NUM_THREADS = 3;
final int numIterations = TEST_NIGHTLY ? 10 : 3;
for(int iter=0;iter<numIterations;iter++) {
for (int iter=0;iter<numIterations;iter++) {
if (VERBOSE) {
System.out.println("\nTEST: iter=" + iter);
}
@ -149,15 +156,15 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions();
dir.setMaxSizeInBytes(4*1024+20*iter);
CyclicBarrier syncStart = new CyclicBarrier(NUM_THREADS + 1);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, true);
for(int i=0;i<NUM_THREADS;i++)
for (int i = 0; i < NUM_THREADS; i++) {
threads[i] = new IndexerThread(writer, true, syncStart);
threads[i].start();
}
syncStart.await();
for(int i=0;i<NUM_THREADS;i++) {
for (int i = 0; i < NUM_THREADS; i++) {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
@ -203,16 +210,17 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
);
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions();
CyclicBarrier syncStart = new CyclicBarrier(NUM_THREADS + 1);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, false);
for(int i=0;i<NUM_THREADS;i++)
for (int i = 0; i < NUM_THREADS; i++) {
threads[i] = new IndexerThread(writer, false, syncStart);
threads[i].start();
}
syncStart.await();
boolean done = false;
while(!done) {
while (!done) {
Thread.sleep(100);
for(int i=0;i<NUM_THREADS;i++)
// only stop when at least one thread has added a doc
@ -238,6 +246,8 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
// Without fix for LUCENE-1130: one of the
// threads will hang
threads[i].join();
// [DW] this is unreachable once join() returns a thread cannot be alive.
if (threads[i].isAlive())
fail("thread seems to be hung");
}
@ -266,7 +276,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
int NUM_THREADS = 3;
for(int iter=0;iter<2;iter++) {
for (int iter = 0; iter < 2; iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + iter);
}
@ -282,20 +292,18 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
);
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions();
CyclicBarrier syncStart = new CyclicBarrier(NUM_THREADS + 1);
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
for(int i=0;i<NUM_THREADS;i++)
threads[i] = new IndexerThread(writer, true);
for(int i=0;i<NUM_THREADS;i++)
for (int i = 0; i < NUM_THREADS; i++) {
threads[i] = new IndexerThread(writer, true, syncStart);
threads[i].start();
Thread.sleep(10);
}
syncStart.await();
dir.failOn(failure);
failure.setDoFail();
for(int i=0;i<NUM_THREADS;i++) {
for (int i = 0; i < NUM_THREADS; i++) {
threads[i].join();
assertTrue("hit unexpected Throwable", threads[i].error == null);
}
@ -502,39 +510,30 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
// and closes before the second IndexWriter time's out trying to get the Lock,
// we should see both documents
public void testOpenTwoIndexWritersOnDifferentThreads() throws IOException, InterruptedException {
final Directory dir = newDirectory();
CountDownLatch oneIWConstructed = new CountDownLatch(1);
DelayedIndexAndCloseRunnable thread1 = new DelayedIndexAndCloseRunnable(
dir, oneIWConstructed);
DelayedIndexAndCloseRunnable thread2 = new DelayedIndexAndCloseRunnable(
dir, oneIWConstructed);
try (final Directory dir = newDirectory()) {
CyclicBarrier syncStart = new CyclicBarrier(2);
DelayedIndexAndCloseRunnable thread1 = new DelayedIndexAndCloseRunnable(dir, syncStart);
DelayedIndexAndCloseRunnable thread2 = new DelayedIndexAndCloseRunnable(dir, syncStart);
thread1.start();
thread2.start();
thread1.join();
thread2.join();
thread1.start();
thread2.start();
oneIWConstructed.await();
thread1.startIndexing();
thread2.startIndexing();
thread1.join();
thread2.join();
// ensure the directory is closed if we hit the timeout and throw assume
// TODO: can we improve this in LuceneTestCase? I dont know what the logic would be...
try {
assumeFalse("aborting test: timeout obtaining lock", thread1.failure instanceof LockObtainFailedException);
assumeFalse("aborting test: timeout obtaining lock", thread2.failure instanceof LockObtainFailedException);
if (thread1.failure instanceof LockObtainFailedException ||
thread2.failure instanceof LockObtainFailedException) {
// We only care about the situation when the two writers succeeded.
return;
}
assertFalse("Failed due to: " + thread1.failure, thread1.failed);
assertFalse("Failed due to: " + thread2.failure, thread2.failed);
// now verify that we have two documents in the index
IndexReader reader = DirectoryReader.open(dir);
assertEquals("IndexReader should have one document per thread running", 2,
reader.numDocs());
reader.close();
} finally {
dir.close();
}
}
@ -542,17 +541,12 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
private final Directory dir;
boolean failed = false;
Throwable failure = null;
private final CountDownLatch startIndexing = new CountDownLatch(1);
private CountDownLatch iwConstructed;
private CyclicBarrier syncStart;
public DelayedIndexAndCloseRunnable(Directory dir,
CountDownLatch iwConstructed) {
CyclicBarrier syncStart) {
this.dir = dir;
this.iwConstructed = iwConstructed;
}
public void startIndexing() {
this.startIndexing.countDown();
this.syncStart = syncStart;
}
@Override
@ -561,16 +555,14 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
Document doc = new Document();
Field field = newTextField("field", "testData", Field.Store.YES);
doc.add(field);
syncStart.await();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
iwConstructed.countDown();
startIndexing.await();
writer.addDocument(doc);
writer.close();
} catch (Throwable e) {
failed = true;
failure = e;
failure.printStackTrace(System.out);
return;
}
}
}

View File

@ -49,7 +49,6 @@ Apache UIMA 2.3.1
Apache ZooKeeper 3.4.11
Jetty 9.4.8.v20171121
Upgrade Notes
----------------------
@ -59,6 +58,8 @@ New Features
Bug Fixes
----------------------
* SOLR-12103: Raise CryptoKeys.DEFAULT_KEYPAIR_LENGTH from 1024 to 2048. (Mark Miller)
Optimizations
----------------------
@ -66,6 +67,9 @@ Optimizations
differential fetching now speeds up recovery times when full index replication is needed, but only
a few segments diverge. (Ishan Chattopadhyaya, Shaun Sabo, John Gallagher)
* SOLR-11731: LatLonPointSpatialField can now decode points from docValues when stored=false docValues=true,
albeit with maximum precision of 1.33cm (Karthik Ramachandran, David Smiley)
Other Changes
----------------------
@ -79,6 +83,12 @@ Other Changes
* SOLR-12091: Rename TimeSource.getTime to getTimeNs. (ab)
* SOLR-12101: ZkTestServer was not handling connection timeout settings properly. (Gus Heck via Mark Miller)
* SOLR-11331: Ability to run and debug standalone Solr and a single node SolrCloud server from Eclipse.
Also being able to run all Lucene and Solr tests as a configuration (Karthik Ramachandran via
Varun Thacker, Uwe Schindler)
================== 7.3.0 ==================
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
@ -327,6 +337,10 @@ Bug Fixes
* SOLR-12083: Fix RealTime GET to work on a cluster running CDCR when using Solr's in-place updates
(Amrit Sarkar, Varun Thacker)
* SOLR-12063: Fix PeerSync, Leader Election failures and CDCR checkpoint inconsistencies on a cluster running CDCR
(Amrit Sarkar, Varun Thacker)
* SOLR-12110: Replica which failed to register in Zk can become leader (Cao Manh Dat)
Optimizations
----------------------
@ -452,6 +466,9 @@ Other Changes
* SOLR-12098: Document the Lucene spins auto-detection and its effect on CMS dynamic defaults.
(Cassandra Targett, shalin)
* SOLR-12097: Document the diskType policy attribute and usage of disk space in Collection APIs.
(Cassandra Targett, shalin)
================== 7.2.1 ==================
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

View File

@ -641,17 +641,20 @@
<target name="resolve" depends="resolve-example,resolve-server">
<sequential>
<ant dir="core" target="resolve" inheritall="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<ant dir="solrj" target="resolve" inheritall="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<ant dir="test-framework" target="resolve" inheritall="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<ant dir="server" target="resolve" inheritall="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<ant dir="solr-ref-guide" target="resolve" inheritall="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<contrib-crawl target="resolve"/>
</sequential>
</target>

View File

@ -1146,6 +1146,9 @@ public class ZkController {
// make sure we have an update cluster state right away
zkStateReader.forceUpdateCollection(collection);
return shardId;
} catch (Exception e) {
unregister(coreName, desc, false);
throw e;
} finally {
MDCLoggingContext.clear();
}
@ -1493,6 +1496,10 @@ public class ZkController {
}
public void unregister(String coreName, CoreDescriptor cd) throws Exception {
unregister(coreName, cd, true);
}
public void unregister(String coreName, CoreDescriptor cd, boolean removeCoreFromZk) throws Exception {
final String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
final String collection = cd.getCloudDescriptor().getCollectionName();
getCollectionTerms(collection).remove(cd.getCloudDescriptor().getShardId(), cd);
@ -1504,7 +1511,7 @@ public class ZkController {
}
final DocCollection docCollection = zkStateReader.getClusterState().getCollectionOrNull(collection);
Replica replica = (docCollection == null) ? null : docCollection.getReplica(coreNodeName);
if (replica == null || replica.getType() != Type.PULL) {
ElectionContext context = electionContexts.remove(new ContextKey(collection, coreNodeName));
@ -1514,14 +1521,15 @@ public class ZkController {
}
CloudDescriptor cloudDescriptor = cd.getCloudDescriptor();
zkStateReader.unregisterCore(cloudDescriptor.getCollectionName());
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION,
OverseerAction.DELETECORE.toLower(), ZkStateReader.CORE_NAME_PROP, coreName,
ZkStateReader.NODE_NAME_PROP, getNodeName(),
ZkStateReader.COLLECTION_PROP, cloudDescriptor.getCollectionName(),
ZkStateReader.BASE_URL_PROP, getBaseUrl(),
ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
overseerJobQueue.offer(Utils.toJSON(m));
if (removeCoreFromZk) {
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION,
OverseerAction.DELETECORE.toLower(), ZkStateReader.CORE_NAME_PROP, coreName,
ZkStateReader.NODE_NAME_PROP, getNodeName(),
ZkStateReader.COLLECTION_PROP, cloudDescriptor.getCollectionName(),
ZkStateReader.BASE_URL_PROP, getBaseUrl(),
ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
overseerJobQueue.offer(Utils.toJSON(m));
}
}
public void createCollection(String collection) throws Exception {

View File

@ -26,6 +26,7 @@ import java.util.Collection;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeoutException;
import java.util.function.Predicate;
import org.apache.solr.cloud.CurrentCoreDescriptorProvider;
import org.apache.solr.cloud.SolrZkServer;
@ -173,11 +174,16 @@ public class ZkContainer {
return zkRun.substring(0, zkRun.lastIndexOf('/'));
}
public static Predicate<CoreDescriptor> testing_beforeRegisterInZk;
public void registerInZk(final SolrCore core, boolean background, boolean skipRecovery) {
Runnable r = () -> {
MDCLoggingContext.setCore(core);
try {
try {
if (testing_beforeRegisterInZk != null) {
testing_beforeRegisterInZk.test(core.getCoreDescriptor());
}
zkController.register(core.getName(), core.getCoreDescriptor(), skipRecovery);
} catch (InterruptedException e) {
// Restore the interrupted status

View File

@ -18,11 +18,13 @@
package org.apache.solr.schema;
import java.io.IOException;
import java.math.BigDecimal;
import java.util.Objects;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.LatLonDocValuesField;
import org.apache.lucene.document.LatLonPoint;
import org.apache.lucene.geo.GeoEncodingUtils;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.queries.function.ValueSource;
@ -46,6 +48,8 @@ import org.locationtech.spatial4j.shape.Point;
import org.locationtech.spatial4j.shape.Rectangle;
import org.locationtech.spatial4j.shape.Shape;
import static java.math.RoundingMode.CEILING;
/**
* A spatial implementation based on Lucene's {@code LatLonPoint} and {@code LatLonDocValuesField}. The
* first is based on Lucene's "Points" API, which is a BKD Index. This field type is strictly limited to
@ -71,6 +75,26 @@ public class LatLonPointSpatialField extends AbstractSpatialFieldType implements
SchemaField schemaField = schema.getField(fieldName); // TODO change AbstractSpatialFieldType so we get schemaField?
return new LatLonPointSpatialStrategy(ctx, fieldName, schemaField.indexed(), schemaField.hasDocValues());
}
/**
* Decodes the docValues number into latitude and longitude components, formatting as "lat,lon".
* The encoding is governed by {@code LatLonDocValuesField}. The decimal output representation is reflective
* of the available precision.
* @param value Non-null; stored location field data
* @return Non-null; "lat, lon" with 6 decimal point precision
*/
public static String decodeDocValueToString(long value) {
final double latDouble = GeoEncodingUtils.decodeLatitude((int) (value >> 32));
final double lonDouble = GeoEncodingUtils.decodeLongitude((int) (value & 0xFFFFFFFFL));
// 7 decimal places maximizes our available precision to just over a centimeter; we have a test for it.
// CEILING round-trips (decode then re-encode then decode to get identical results). Others did not. It also
// reverses the "floor" that occurs when we encode.
BigDecimal latitudeDecoded = BigDecimal.valueOf(latDouble).setScale(7, CEILING);
BigDecimal longitudeDecoded = BigDecimal.valueOf(lonDouble).setScale(7, CEILING);
return latitudeDecoded.stripTrailingZeros().toPlainString() + ","
+ longitudeDecoded.stripTrailingZeros().toPlainString();
// return ((float)latDouble) + "," + ((float)lonDouble); crude but not quite as accurate
}
// TODO move to Lucene-spatial-extras once LatLonPoint & LatLonDocValuesField moves out of sandbox
public static class LatLonPointSpatialStrategy extends SpatialStrategy {

View File

@ -56,6 +56,7 @@ import org.apache.lucene.util.NumericUtils;
import org.apache.solr.common.SolrDocumentBase;
import org.apache.solr.core.SolrConfig;
import org.apache.solr.schema.BoolField;
import org.apache.solr.schema.LatLonPointSpatialField;
import org.apache.solr.schema.AbstractEnumField;
import org.apache.solr.schema.NumberType;
import org.apache.solr.schema.SchemaField;
@ -490,8 +491,16 @@ public class SolrDocumentFetcher {
long number = numericDv.nextValue();
Object value = decodeNumberFromDV(schemaField, number, true);
// return immediately if the number is not decodable, hence won't return an empty list.
if (value == null) return null;
else outValues.add(value);
if (value == null) {
return null;
}
// normally never true but LatLonPointSpatialField uses SORTED_NUMERIC even when single valued
else if (schemaField.multiValued() == false) {
return value;
}
else {
outValues.add(value);
}
}
assert outValues.size() > 0;
return outValues;
@ -515,6 +524,12 @@ public class SolrDocumentFetcher {
}
private Object decodeNumberFromDV(SchemaField schemaField, long value, boolean sortableNumeric) {
// note: This special-case is unfortunate; if we have to add any more than perhaps the fieldType should
// have this method so that specific field types can customize it.
if (schemaField.getType() instanceof LatLonPointSpatialField) {
return LatLonPointSpatialField.decodeDocValueToString(value);
}
if (schemaField.getType().getNumberType() == null) {
log.warn("Couldn't decode docValues for field: [{}], schemaField: [{}], numberType is unknown",
schemaField.getName(), schemaField);

View File

@ -1431,8 +1431,11 @@ public class UpdateLog implements PluginInfoInitialized, SolrMetricProducer {
update.pointer = reader.position();
update.version = version;
if (oper == UpdateLog.UPDATE_INPLACE && entry.size() == 5) {
update.previousVersion = (Long) entry.get(UpdateLog.PREV_VERSION_IDX);
if (oper == UpdateLog.UPDATE_INPLACE) {
if ((update.log instanceof CdcrTransactionLog && entry.size() == 6) ||
(!(update.log instanceof CdcrTransactionLog) && entry.size() == 5)) {
update.previousVersion = (Long) entry.get(UpdateLog.PREV_VERSION_IDX);
}
}
updatesForLog.add(update);
updates.put(version, update);
@ -1440,7 +1443,7 @@ public class UpdateLog implements PluginInfoInitialized, SolrMetricProducer {
if (oper == UpdateLog.DELETE_BY_QUERY) {
deleteByQueryList.add(update);
} else if (oper == UpdateLog.DELETE) {
deleteList.add(new DeleteUpdate(version, (byte[])entry.get(entry.size()-1)));
deleteList.add(new DeleteUpdate(version, (byte[])entry.get(2)));
}
break;

View File

@ -287,8 +287,8 @@ public final class CryptoKeys {
// If this ever comes back to haunt us see the discussion at
// SOLR-9609 for background and code allowing this to go
// into security.json
private static final int DEFAULT_KEYPAIR_LENGTH = 1024;
// into security.json. Also see SOLR-12103.
private static final int DEFAULT_KEYPAIR_LENGTH = 2048;
public RSAKeyPair() {
KeyPairGenerator keyGen = null;

View File

@ -86,8 +86,11 @@
<field name="llp_idx" type="llp" indexed="true" docValues="false" />
<field name="llp_dv" type="llp" indexed="false" docValues="true" />
<field name="llp_1_dv_st" type="llp" indexed="false" docValues="true" stored="true" multiValued="false"/>
<field name="llp_N_dv_st" type="llp" indexed="false" docValues="true" stored="true" multiValued="true"/>
<field name="llp_1_dv" type="llp" indexed="false" docValues="true" stored="false" multiValued="false" useDocValuesAsStored="false"/>
<field name="llp_N_dv" type="llp" indexed="false" docValues="true" stored="false" multiValued="true" useDocValuesAsStored="false"/>
<field name="llp_1_dv_dvasst" type="llp" indexed="false" docValues="true" stored="false" multiValued="false" useDocValuesAsStored="true"/>
<field name="llp_N_dv_dvasst" type="llp" indexed="false" docValues="true" stored="false" multiValued="true" useDocValuesAsStored="true"/>
<dynamicField name="bboxD_*" type="bbox" indexed="true"/>
<dynamicField name="str_*" type="string" indexed="true" stored="true"/>

View File

@ -16,23 +16,41 @@
*/
package org.apache.solr.cloud;
import java.lang.invoke.MethodHandles;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.EnumSet;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.CoreStatus;
import org.apache.solr.cloud.overseer.OverseerAction;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.util.TimeSource;
import org.apache.solr.common.util.Utils;
import org.apache.solr.core.ZkContainer;
import org.apache.solr.util.TimeOut;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.common.cloud.Replica.State.DOWN;
public class DeleteReplicaTest extends SolrCloudTestCase {
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@BeforeClass
public static void setupCluster() throws Exception {
configureCluster(4)
@ -141,5 +159,130 @@ public class DeleteReplicaTest extends SolrCloudTestCase {
}
@Test
public void raceConditionOnDeleteAndRegisterReplica() throws Exception {
final String collectionName = "raceDeleteReplica";
CollectionAdminRequest.createCollection(collectionName, "conf", 1, 2)
.process(cluster.getSolrClient());
waitForState("Expected 1x2 collections", collectionName, clusterShape(1, 2));
Slice shard1 = getCollectionState(collectionName).getSlice("shard1");
Replica leader = shard1.getLeader();
JettySolrRunner leaderJetty = getJettyForReplica(leader);
Replica replica1 = shard1.getReplicas(replica -> !replica.getName().equals(leader.getName())).get(0);
assertFalse(replica1.getName().equals(leader.getName()));
JettySolrRunner replica1Jetty = getJettyForReplica(replica1);
String replica1JettyNodeName = replica1Jetty.getNodeName();
Semaphore waitingForReplicaGetDeleted = new Semaphore(0);
// for safety, we only want this hook get triggered one time
AtomicInteger times = new AtomicInteger(0);
ZkContainer.testing_beforeRegisterInZk = cd -> {
if (cd.getCloudDescriptor() == null) return false;
if (replica1.getName().equals(cd.getCloudDescriptor().getCoreNodeName())
&& collectionName.equals(cd.getCloudDescriptor().getCollectionName())) {
if (times.incrementAndGet() > 1) {
return false;
}
LOG.info("Running delete core {}",cd);
try {
ZkNodeProps m = new ZkNodeProps(
Overseer.QUEUE_OPERATION, OverseerAction.DELETECORE.toLower(),
ZkStateReader.CORE_NAME_PROP, replica1.getCoreName(),
ZkStateReader.NODE_NAME_PROP, replica1.getNodeName(),
ZkStateReader.COLLECTION_PROP, collectionName,
ZkStateReader.CORE_NODE_NAME_PROP, replica1.getName(),
ZkStateReader.BASE_URL_PROP, replica1.getBaseUrl());
Overseer.getStateUpdateQueue(cluster.getZkClient()).offer(Utils.toJSON(m));
boolean replicaDeleted = false;
TimeOut timeOut = new TimeOut(20, TimeUnit.SECONDS, TimeSource.NANO_TIME);
while (!timeOut.hasTimedOut()) {
try {
ZkStateReader stateReader = replica1Jetty.getCoreContainer().getZkController().getZkStateReader();
stateReader.forceUpdateCollection(collectionName);
Slice shard = stateReader.getClusterState().getCollection(collectionName).getSlice("shard1");
LOG.error("Datcm get slice on 211 {}", shard);
if (shard.getReplicas().size() == 1) {
replicaDeleted = true;
waitingForReplicaGetDeleted.release();
break;
}
Thread.sleep(500);
} catch (NullPointerException | SolrException e) {
e.printStackTrace();
Thread.sleep(500);
}
}
if (!replicaDeleted) {
fail("Timeout for waiting replica get deleted");
}
} catch (Exception e) {
e.printStackTrace();
fail("Failed to delete replica");
} finally {
//avoiding deadlock
waitingForReplicaGetDeleted.release();
}
return true;
}
return false;
};
try {
replica1Jetty.stop();
waitForNodeLeave(replica1JettyNodeName);
waitForState("Expected replica:"+replica1+" get down", collectionName, (liveNodes, collectionState)
-> collectionState.getSlice("shard1").getReplica(replica1.getName()).getState() == DOWN);
replica1Jetty.start();
waitingForReplicaGetDeleted.acquire();
} finally {
ZkContainer.testing_beforeRegisterInZk = null;
}
waitForState("Timeout for replica:"+replica1.getName()+" register itself as DOWN after failed to register", collectionName, (liveNodes, collectionState) -> {
Slice shard = collectionState.getSlice("shard1");
Replica replica = shard.getReplica(replica1.getName());
return replica != null && replica.getState() == DOWN;
});
CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
.process(cluster.getSolrClient());
waitForState("Expected 1x2 collections", collectionName, clusterShape(1, 2));
String leaderJettyNodeName = leaderJetty.getNodeName();
leaderJetty.stop();
waitForNodeLeave(leaderJettyNodeName);
waitForState("Expected new active leader", collectionName, (liveNodes, collectionState) -> {
Slice shard = collectionState.getSlice("shard1");
Replica newLeader = shard.getLeader();
return newLeader != null && newLeader.getState() == Replica.State.ACTIVE && !newLeader.getName().equals(leader.getName());
});
leaderJetty.start();
CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
}
private JettySolrRunner getJettyForReplica(Replica replica) {
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
if (jetty.getNodeName().equals(replica.getNodeName())) return jetty;
}
throw new IllegalArgumentException("Can not find jetty for replica "+ replica);
}
private void waitForNodeLeave(String lostNodeName) throws InterruptedException {
ZkStateReader reader = cluster.getSolrClient().getZkStateReader();
TimeOut timeOut = new TimeOut(20, TimeUnit.SECONDS, TimeSource.NANO_TIME);
while (reader.getClusterState().getLiveNodes().contains(lostNodeName)) {
Thread.sleep(100);
if (timeOut.hasTimedOut()) fail("Wait for " + lostNodeName + " to leave failed!");
}
}
}

View File

@ -90,6 +90,7 @@ public class LeaderVoteWaitTimeoutTest extends SolrCloudTestCase {
}
@Test
@BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 17-Mar-2018
public void basicTest() throws Exception {
final String collectionName = "basicTest";
CollectionAdminRequest.createCollection(collectionName, 1, 1)
@ -126,6 +127,7 @@ public class LeaderVoteWaitTimeoutTest extends SolrCloudTestCase {
}
@Test
@BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 17-Mar-2018
public void testMostInSyncReplicasCanWinElection() throws Exception {
final String collectionName = "collection1";
CollectionAdminRequest.createCollection(collectionName, 1, 3)

View File

@ -251,6 +251,7 @@ public class MoveReplicaTest extends SolrCloudTestCase {
//Commented out 5-Dec-2017
// @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/SOLR-11458")
@Test
@BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 17-Mar-2018 This JIRA is fixed, but this test still fails
public void testFailedMove() throws Exception {
String coll = getTestClass().getSimpleName() + "_failed_coll_" + inPlaceMove;
int REPLICATION = 2;

View File

@ -54,6 +54,7 @@ import static org.apache.solr.common.util.Utils.makeMap;
public class SSLMigrationTest extends AbstractFullDistribZkTestBase {
@Test
@BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 17-Mar-2018
public void test() throws Exception {
//Migrate from HTTP -> HTTPS -> HTTP
assertReplicaInformation("http");

View File

@ -41,6 +41,7 @@ public class TestLeaderElectionZkExpiry extends SolrTestCaseJ4 {
private static final int MIN_NODES = 4;
@Test
@BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 17-Mar-2018
public void testLeaderElectionWithZkExpiry() throws Exception {
String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
Path ccDir = createTempDir("testLeaderElectionWithZkExpiry-solr");

View File

@ -146,6 +146,7 @@ public class ScheduledMaintenanceTriggerTest extends SolrCloudTestCase {
}
@Test
@BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 17-Mar-2018
public void testInactiveShardCleanup() throws Exception {
String collection1 = getClass().getSimpleName() + "_collection1";
CollectionAdminRequest.Create create1 = CollectionAdminRequest.createCollection(collection1,

View File

@ -16,7 +16,10 @@
*/
package org.apache.solr.cloud.cdcr;
import java.util.Arrays;
import com.google.common.collect.ImmutableMap;
import org.apache.lucene.util.LuceneTestCase.Nightly;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.handler.CdcrParams;
import org.junit.Test;
@ -69,7 +72,7 @@ public class CdcrRequestHandlerTest extends BaseCdcrDistributedZkTest {
NamedList rsp = invokeCdcrAction(shardToLeaderJetty.get(SOURCE_COLLECTION).get(SHARD1), CdcrParams.CdcrAction.COLLECTIONCHECKPOINT);
assertEquals(-1l, rsp.get(CdcrParams.CHECKPOINT));
index(SOURCE_COLLECTION, getDoc(id, "a")); // shard 2
index(SOURCE_COLLECTION, getDoc(id, "a","test_i_dvo",10)); // shard 2
// only one document indexed in shard 2, the checkpoint must be still -1
rsp = invokeCdcrAction(shardToLeaderJetty.get(SOURCE_COLLECTION).get(SHARD1), CdcrParams.CdcrAction.COLLECTIONCHECKPOINT);
@ -97,17 +100,39 @@ public class CdcrRequestHandlerTest extends BaseCdcrDistributedZkTest {
expected = (Long) invokeCdcrAction(shardToLeaderJetty.get(SOURCE_COLLECTION).get(SHARD1), CdcrParams.CdcrAction.SHARDCHECKPOINT).get(CdcrParams.CHECKPOINT);
assertEquals(expected, checkpoint2);
// send a delete by id
long pre_op = (Long) invokeCdcrAction(shardToLeaderJetty.get(SOURCE_COLLECTION).get(SHARD2), CdcrParams.CdcrAction.SHARDCHECKPOINT).get(CdcrParams.CHECKPOINT);
deleteById(SOURCE_COLLECTION, Arrays.asList(new String[]{"c"})); //shard1
// document deleted in shard1, checkpoint should come from shard2
rsp = invokeCdcrAction(shardToLeaderJetty.get(SOURCE_COLLECTION).get(SHARD2), CdcrParams.CdcrAction.COLLECTIONCHECKPOINT);
long checkpoint3 = (Long) rsp.get(CdcrParams.CHECKPOINT);
expected = (Long) invokeCdcrAction(shardToLeaderJetty.get(SOURCE_COLLECTION).get(SHARD2), CdcrParams.CdcrAction.SHARDCHECKPOINT).get(CdcrParams.CHECKPOINT);
assertEquals(pre_op, expected);
assertEquals(expected, checkpoint3);
// send a in-place update
SolrInputDocument in_place_doc = new SolrInputDocument();
in_place_doc.setField(id, "a");
in_place_doc.setField("test_i_dvo", ImmutableMap.of("inc", 10)); //shard2
index(SOURCE_COLLECTION, in_place_doc);
// document updated in shard2, checkpoint should come from shard1
rsp = invokeCdcrAction(shardToLeaderJetty.get(SOURCE_COLLECTION).get(SHARD1), CdcrParams.CdcrAction.COLLECTIONCHECKPOINT);
long checkpoint4 = (Long) rsp.get(CdcrParams.CHECKPOINT);
expected = (Long) invokeCdcrAction(shardToLeaderJetty.get(SOURCE_COLLECTION).get(SHARD1), CdcrParams.CdcrAction.SHARDCHECKPOINT).get(CdcrParams.CHECKPOINT);
assertEquals(expected, checkpoint4);
// send a delete by query
deleteByQuery(SOURCE_COLLECTION, "*:*");
// all the checkpoints must come from the DBQ
rsp = invokeCdcrAction(shardToLeaderJetty.get(SOURCE_COLLECTION).get(SHARD2), CdcrParams.CdcrAction.COLLECTIONCHECKPOINT);
long checkpoint3 = (Long) rsp.get(CdcrParams.CHECKPOINT);
assertTrue(checkpoint3 > 0); // ensure that checkpoints from deletes are in absolute form
checkpoint3 = (Long) invokeCdcrAction(shardToLeaderJetty.get(SOURCE_COLLECTION).get(SHARD1), CdcrParams.CdcrAction.SHARDCHECKPOINT).get(CdcrParams.CHECKPOINT);
assertTrue(checkpoint3 > 0); // ensure that checkpoints from deletes are in absolute form
checkpoint3 = (Long) invokeCdcrAction(shardToLeaderJetty.get(SOURCE_COLLECTION).get(SHARD2), CdcrParams.CdcrAction.SHARDCHECKPOINT).get(CdcrParams.CHECKPOINT);
assertTrue(checkpoint3 > 0); // ensure that checkpoints from deletes are in absolute form
long checkpoint5= (Long) rsp.get(CdcrParams.CHECKPOINT);
assertTrue(checkpoint5 > 0); // ensure that checkpoints from deletes are in absolute form
checkpoint5 = (Long) invokeCdcrAction(shardToLeaderJetty.get(SOURCE_COLLECTION).get(SHARD1), CdcrParams.CdcrAction.SHARDCHECKPOINT).get(CdcrParams.CHECKPOINT);
assertTrue(checkpoint5 > 0); // ensure that checkpoints from deletes are in absolute form
checkpoint5 = (Long) invokeCdcrAction(shardToLeaderJetty.get(SOURCE_COLLECTION).get(SHARD2), CdcrParams.CdcrAction.SHARDCHECKPOINT).get(CdcrParams.CHECKPOINT);
assertTrue(checkpoint5 > 0); // ensure that checkpoints from deletes are in absolute form
// replication never started, lastProcessedVersion should be -1 for both shards
rsp = invokeCdcrAction(shardToLeaderJetty.get(SOURCE_COLLECTION).get(SHARD1), CdcrParams.CdcrAction.LASTPROCESSEDVERSION);

View File

@ -75,6 +75,7 @@ public class TestRecovery extends SolrTestCaseJ4 {
public static void beforeClass() throws Exception {
savedFactory = System.getProperty("solr.DirectoryFactory");
System.setProperty("solr.directoryFactory", "org.apache.solr.core.MockFSDirectoryFactory");
randomizeUpdateLogImpl();
initCore("solrconfig-tlog.xml","schema15.xml");
// validate that the schema was not changed to an unexpected state

View File

@ -16,15 +16,31 @@
*/
package org.apache.solr.search;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import com.carrotsearch.randomizedtesting.annotations.Repeat;
import org.apache.lucene.geo.GeoTestUtil;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.FacetParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.metrics.MetricsMap;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.util.SpatialUtils;
import org.apache.solr.util.TestUtils;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.locationtech.spatial4j.context.SpatialContext;
import org.locationtech.spatial4j.distance.DistanceUtils;
import org.locationtech.spatial4j.shape.Point;
//Unlike TestSolr4Spatial, not parametrized / not generic.
public class TestSolr4Spatial2 extends SolrTestCaseJ4 {
@ -117,24 +133,122 @@ public class TestSolr4Spatial2 extends SolrTestCaseJ4 {
"q", "{!cache=false field f=" + fieldName + "}Intersects(" + polygonWKT + ")",
"sort", "id asc"), "/response/numFound==2");
}
@Test @Repeat(iterations = 10)
public void testLLPDecodeIsStableAndPrecise() throws Exception {
// test that LatLonPointSpatialField decode of docValue will round-trip (re-index then re-decode) to the same value
@SuppressWarnings({"resource", "IOResourceOpenedButNotSafelyClosed"})
SolrClient client = new EmbeddedSolrServer(h.getCore());// do NOT close it; it will close Solr
final String fld = "llp_1_dv_dvasst";
String ptOrig = GeoTestUtil.nextLatitude() + "," + GeoTestUtil.nextLongitude();
assertU(adoc("id", "0", fld, ptOrig));
assertU(commit());
// retrieve it (probably less precision
String ptDecoded1 = (String) client.query(params("q", "id:0")).getResults().get(0).get(fld);
// now write it back
assertU(adoc("id", "0", fld, ptDecoded1));
assertU(commit());
// retrieve it and hopefully the same
String ptDecoded2 = (String) client.query(params("q", "id:0")).getResults().get(0).get(fld);
assertEquals("orig:" + ptOrig, ptDecoded1, ptDecoded2);
// test that the representation is pretty accurate
final Point ptOrigObj = SpatialUtils.parsePoint(ptOrig, SpatialContext.GEO);
final Point ptDecodedObj = SpatialUtils.parsePoint(ptDecoded1, SpatialContext.GEO);
double deltaCentimeters = SpatialContext.GEO.calcDistance(ptOrigObj, ptDecodedObj) * DistanceUtils.DEG_TO_KM * 1000.0 * 100.0;
// //See javadocs of LatLonDocValuesField
// final Point absErrorPt = SpatialContext.GEO.getShapeFactory().pointXY(8.381903171539307E-8, 4.190951585769653E-8);
// double deltaCentimetersMax
// = SpatialContext.GEO.calcDistance(absErrorPt, 0,0) * DistanceUtils.DEG_TO_KM * 1000.0 * 100.0;
// // equals 1.0420371840922256 which is a bit lower than what we're able to do
assertTrue("deltaCm too high: " + deltaCentimeters, deltaCentimeters < 1.33);
}
@Test
public void testLatLonRetrieval() throws Exception {
assertU(adoc("id", "0",
"llp_1_dv_st", "-75,41",
"llp_1_dv", "-80,20",
"llp_1_dv_dvasst", "10,-30"));
final String ptHighPrecision = "40.2996543270,-74.0824956673";
final String ptLossOfPrecision = "40.2996544,-74.0824957"; // rounded version of the one above, losing precision
// "_1" is single, "_N" is multiValued
// "_dv" is docValues (otherwise not), "_dvasst" is useDocValuesAsStored (otherwise not)
// "_st" is stored" (otherwise not)
List<RetrievalCombo> combos = Arrays.asList(
new RetrievalCombo("llp_1_dv_st", ptHighPrecision),
new RetrievalCombo("llp_N_dv_st", Arrays.asList("-40,40", "-45,45")),
new RetrievalCombo("llp_N_dv_st", Arrays.asList("-40,40")), // multiValued but 1 value
new RetrievalCombo("llp_1_dv_dvasst", ptHighPrecision, ptLossOfPrecision),
// this one comes back in a different order since it gets sorted low to high
new RetrievalCombo("llp_N_dv_dvasst", Arrays.asList("-40,40", "-45,45"), Arrays.asList("-45,45", "-40,40")),
new RetrievalCombo("llp_N_dv_dvasst", Arrays.asList("-40,40")), // multiValued but 1 value
// edge cases. (note we sorted it as Lucene will internally)
new RetrievalCombo("llp_N_dv_dvasst", Arrays.asList(
"-90,180", "-90,-180",
"0,0", "0,180", "0,-180",
"90,0", "90,180", "90,-180")),
new RetrievalCombo("llp_1_dv", ptHighPrecision, ptLossOfPrecision),
new RetrievalCombo("llp_N_dv", Arrays.asList("-45,45", "-40,40"))
);
Collections.shuffle(combos, random());
// add and commit
for (RetrievalCombo combo : combos) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", "" + combo.id);
for (String indexValue : combo.indexValues) {
doc.addField(combo.fieldName, indexValue);
}
assertU(adoc(doc));
if (TestUtils.rarely()) { // induce segments to potentially change internal behavior
assertU(commit());
}
}
assertU(commit());
assertJQ(req("q","*:*", "fl","*"),
"response/docs/[0]/llp_1_dv_st=='-75,41'",
// Right now we do not support decoding point value from dv field
"!response/docs/[0]/llp_1_dv=='-80,20'",
"!response/docs/[0]/llp_1_dv_dvasst=='10,-30'");
assertJQ(req("q","*:*", "fl","llp_1_dv_st, llp_1_dv, llp_1_dv_dvasst"),
"response/docs/[0]/llp_1_dv_st=='-75,41'",
// Even when these fields are specified, we won't return them
"!response/docs/[0]/llp_1_dv=='-80,20'",
"!response/docs/[0]/llp_1_dv_dvasst=='10,-30'");
// create an assertJQ assertion string, once for fl=*, another for when the field is listed
List<String> assertJQsFlListed = new ArrayList<>();
List<String> assertJQsFlStar = new ArrayList<>();
for (RetrievalCombo combo : combos) {
String expect = "response/docs/[" + combo.id + "]/" + combo.fieldName + "==" + combo.expectReturnJSON;
assertJQsFlListed.add(expect);
if (combo.fieldName.endsWith("_dv")) {
expect = "response/docs/[" + combo.id + "]=={'id':'" + combo.id + "'}"; // only the id, nothing else
}
assertJQsFlStar.add(expect);
}
// check
assertJQ(req("q","*:*", "sort", "id asc",
"fl","*"),
assertJQsFlStar.toArray(new String[0]));
assertJQ(req("q","*:*", "sort", "id asc",
"fl", "id," + combos.stream().map(c -> c.fieldName).collect(Collectors.joining(","))),
assertJQsFlListed.toArray(new String[0]));
}
private static class RetrievalCombo {
static int idCounter = 0;
final int id = idCounter++;
final String fieldName;
final List<String> indexValues;
final String expectReturnJSON; //or null if not expected in response
RetrievalCombo(String fieldName, List<String> indexValues) { this(fieldName, indexValues, indexValues);}
RetrievalCombo(String fieldName, List<String> indexValues, List<String> returnValues) {
this.fieldName = fieldName;
this.indexValues = indexValues;
this.expectReturnJSON = returnValues.stream().collect(Collectors.joining("', '", "['", "']"));
}
RetrievalCombo(String fieldName, String indexValue) { this(fieldName, indexValue, indexValue); }
RetrievalCombo(String fieldName, String indexValue, String returnValue) {
this.fieldName = fieldName;
this.indexValues = Collections.singletonList(indexValue);
this.expectReturnJSON = "'" + returnValue + "'";
}
}
private void testRptWithGeometryField(String fieldName) throws Exception {

View File

@ -47,6 +47,7 @@ public class TestStressRecovery extends TestRTGBase {
@BeforeClass
public static void beforeClass() throws Exception {
randomizeUpdateLogImpl();
initCore("solrconfig-tlog.xml","schema15.xml");
}

View File

@ -245,6 +245,10 @@ This command allows for seamless splitting and requires no downtime. A shard bei
The split is performed by dividing the original shard's hash range into two equal partitions and dividing up the documents in the original shard according to the new sub-ranges. Two parameters discussed below, `ranges` and `split.key` provide further control over how the split occurs.
The newly created shards will have as many replicas as the parent shard.
You must ensure that the node running the leader of the parent shard has enough free disk space i.e. more than twice the index size, for the split to succeed. The API uses the Autoscaling framework to find nodes that can satisfy the disk requirements for the new replicas but only when an Autoscaling policy is configured. Refer to <<solrcloud-autoscaling-policy-preferences.adoc#solrcloud-autoscaling-policy-preferences,Autoscaling Policy and Preferences>> section for more details.
Shard splitting can be a long running process. In order to avoid timeouts, you should run this as an <<Asynchronous Calls,asynchronous call>>.
=== SPLITSHARD Parameters
@ -960,6 +964,8 @@ http://localhost:8983/solr/admin/collections?action=DELETEREPLICA&collection=tes
Add a replica to a shard in a collection. The node name can be specified if the replica is to be created in a specific node.
The API uses the Autoscaling framework to find nodes that can satisfy the disk requirements for the new replica but only when an Autoscaling policy is configured. Refer to <<solrcloud-autoscaling-policy-preferences.adoc#solrcloud-autoscaling-policy-preferences,Autoscaling Policy and Preferences>> section for more details.
`/admin/collections?action=ADDREPLICA&collection=_collection_&shard=_shard_&node=_nodeName_`
=== ADDREPLICA Parameters
@ -2159,6 +2165,8 @@ This command recreates replicas in one node (the source) to another node(s) (the
For source replicas that are also shard leaders the operation will wait for the number of seconds set with the `timeout` parameter to make sure there's an active replica that can become a leader (either an existing replica becoming a leader or the new replica completing recovery and becoming a leader).
The API uses the Autoscaling framework to find nodes that can satisfy the disk requirements for the new replicas but only when an Autoscaling policy is configured. Refer to <<solrcloud-autoscaling-policy-preferences.adoc#solrcloud-autoscaling-policy-preferences,Autoscaling Policy and Preferences>> section for more details.
`/admin/collections?action=REPLACENODE&sourceNode=_source-node_&targetNode=_target-node_`
=== REPLACENODE Parameters
@ -2188,6 +2196,8 @@ This operation does not hold necessary locks on the replicas that belong to on t
This command moves a replica from one node to a new node. In case of shared filesystems the `dataDir` will be reused.
The API uses the Autoscaling framework to find nodes that can satisfy the disk requirements for the replica to be moved but only when an Autoscaling policy is configured. Refer to <<solrcloud-autoscaling-policy-preferences.adoc#solrcloud-autoscaling-policy-preferences,Autoscaling Policy and Preferences>> section for more details.
`/admin/collections?action=MOVEREPLICA&collection=collection&shard=shard&replica=replica&sourceNode=nodeName&targetNode=nodeName`
=== MOVEREPLICA Parameters

View File

@ -371,7 +371,7 @@ Read more about model evolution in the <<LTR Lifecycle>> section of this page.
=== Training Example
Example training data and a demo 'train and upload model' script can be found in the `solr/contrib/ltr/example` folder in the https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git[Apache lucene-solr git repository] which is mirrored on https://github.com/apache/lucene-solr/tree/releases/lucene-solr/6.4.0/solr/contrib/ltr/example[github.com] (the `solr/contrib/ltr/example` folder is not shipped in the Solr binary release).
Example training data and a demo `train_and_upload_demo_model.py` script can be found in the `solr/contrib/ltr/example` folder in the https://git1-us-west.apache.org/repos/asf?p=lucene-solr.git;a=tree;f=solr/contrib/ltr/example[Apache lucene-solr Git repository] (mirrored on https://github.com/apache/lucene-solr/tree/releases/lucene-solr/{solr-docs-version}.0/solr/contrib/ltr/example[github.com]). This example folder is not shipped in the Solr binary release.
== Installation of LTR

View File

@ -536,11 +536,11 @@ The `bin/solr auth enable` command makes several changes to enable Basic Authent
----
* Adds two lines to `bin/solr.in.sh` or `bin\solr.in.cmd` to set the authentication type, and the path to `basicAuth.conf`:
+
[source]
[source,subs="attributes"]
----
# The following lines added by ./solr for enabling BasicAuth
SOLR_AUTH_TYPE="basic"
SOLR_AUTHENTICATION_OPTS="-Dsolr.httpclient.config=/path/to/solr-6.6.0/server/solr/basicAuth.conf"
SOLR_AUTHENTICATION_OPTS="-Dsolr.httpclient.config=/path/to/solr-{solr-docs-version}.0/server/solr/basicAuth.conf"
----
* Creates the file `server/solr/basicAuth.conf` to store the credential information that is used with `bin/solr` commands.

View File

@ -139,6 +139,11 @@ Any arbitrary system property set on the node on startup.
`metrics:<full-path-to-the metric>`::
Any arbitrary metric. For example, `metrics:solr.node:CONTAINER.fs.totalSpace`. Refer to the `key` parameter in the <<metrics-reporting.adoc#metrics-reporting, Metrics API>> section.
`diskType`::
The type of disk drive being used for Solr's `coreRootDirectory`. The only two supported values are `rotational` and `ssd`. Refer to `coreRootDirectory` parameter in the <<format-of-solr-xml.adoc#solr-xml-parameters, Solr.xml Parameters>> section.
+
It's value is fetched from the Metrics API with the key named `solr.node:CONTAINER.fs.coreRoot.spins`. The disk type is auto-detected by Lucene using various heuristics and it is not guaranteed to be correct across all platforms or operating systems. Refer to the <<taking-solr-to-production.adoc#dynamic-defaults-for-concurrentmergescheduler, Dynamic defaults for ConcurrentMergeScheduler>> section for more details.
=== Policy Operators
Each attribute in the policy may specify one of the following operators along with the value.
@ -192,12 +197,15 @@ Place all replicas in nodes with freedisk more than 500GB when possible. Here we
[source,json]
{"replica": 0, "freedisk": "<500", "strict" : false}
==== Try to Place all Replicas of type TLOG in SSD type file system
==== Try to Place all Replicas of type TLOG in nodes with SSD drives
[source,json]
{ "replica": 0, "sysprop.file_system" : "!ssd", "type" : "TLOG" }
{ "replica": 0, "diskType" : "!ssd", "type" : "TLOG" }
Please note that to use the `sysprop.fs` attribute all your ssd nodes must be started with a system property `-Dfile_system=ssd`.
==== Try to Place all Replicas of type PULL on nodes with rotational disk drives
[source,json]
{ "replica": 0, "diskType" : "!rotational", "type" : "PULL" }
[[collection-specific-policy]]
== Defining Collection-Specific Policies

View File

@ -63,10 +63,12 @@ to add replicas on the live nodes to maintain the expected replication factor).
Refer to the section <<solrcloud-autoscaling-auto-add-replicas.adoc#solrcloud-autoscaling-auto-add-replicas, Autoscaling Automatically Adding Replicas>> to learn more about how the `.autoAddReplicas` trigger works.
This trigger supports one parameter:
This trigger supports one parameter, which is defined in the `<solrcloud>` section of `solr.xml`:
`autoReplicaFailoverWaitAfterExpiration`::
The minimum time in milliseconds to wait for initiating replacement of a replica after first noticing it not being live. This is important to prevent false positives while stopping or starting the cluster. The default is `120000` (2 minutes).
The minimum time in milliseconds to wait for initiating replacement of a replica after first noticing it not being live. This is important to prevent false positives while stopping or starting the cluster. The default is `120000` (2 minutes). The value provided for this parameter is used as the value for the `waitFor` parameter in the `.auto_add_replicas` trigger.
TIP: See <<format-of-solr-xml.adoc#the-solrcloud-element,The <solrcloud> Element>> for more details about how to work with `solr.xml`.
== Metric Trigger

View File

@ -73,6 +73,9 @@ public class ZkTestServer {
private volatile Thread zooThread;
private int theTickTime = TICK_TIME;
// SOLR-12101 - provide defaults to avoid max timeout 20 enforced by our server instance when tick time is 1000
private int maxSessionTimeout = 60000;
private int minSessionTimeout = 3000;
static public enum LimitViolationAction {
IGNORE,
@ -94,14 +97,14 @@ public class ZkTestServer {
} catch (JMException e) {
log.warn("Unable to register log4j JMX control", e);
}
ServerConfig config = new ServerConfig();
if (args.length == 1) {
config.parse(args[0]);
} else {
config.parse(args);
}
runFromConfig(config);
}
@ -470,6 +473,8 @@ public class ZkTestServer {
this.dataDir = zkDir;
this.dataLogDir = zkDir;
this.tickTime = theTickTime;
this.maxSessionTimeout = ZkTestServer.this.maxSessionTimeout;
this.minSessionTimeout = ZkTestServer.this.minSessionTimeout;
}
public void setClientPort(int clientPort) {
@ -555,13 +560,13 @@ public class ZkTestServer {
public static class HostPort {
String host;
int port;
HostPort(String host, int port) {
this.host = host;
this.port = port;
}
}
/**
* Send the 4letterword
* @param host the destination host
@ -633,4 +638,20 @@ public class ZkTestServer {
public ZKServerMain.WatchLimiter getLimiter() {
return zkServer.getLimiter();
}
public int getMaxSessionTimeout() {
return maxSessionTimeout;
}
public int getMinSessionTimeout() {
return minSessionTimeout;
}
public void setMaxSessionTimeout(int maxSessionTimeout) {
this.maxSessionTimeout = maxSessionTimeout;
}
public void setMinSessionTimeout(int minSessionTimeout) {
this.minSessionTimeout = minSessionTimeout;
}
}