Merge branch 'master' into upgrade-master-to-8

This commit is contained in:
Adrien Grand 2017-07-03 09:25:39 +02:00
commit e809e095f0
24 changed files with 177 additions and 229 deletions

View File

@ -644,10 +644,14 @@ def verifyUnpacked(java, project, artifact, unpackPath, gitRevision, version, te
textFiles.append('BUILD')
for fileName in textFiles:
fileName += '.txt'
if fileName not in l:
raise RuntimeError('file "%s" is missing from artifact %s' % (fileName, artifact))
l.remove(fileName)
fileNameTxt = fileName + '.txt'
fileNameMd = fileName + '.md'
if fileNameTxt in l:
l.remove(fileNameTxt)
elif fileNameMd in l:
l.remove(fileNameMd)
else:
raise RuntimeError('file "%s".[txt|md] is missing from artifact %s' % (fileName, artifact))
if project == 'lucene':
if LUCENE_NOTICE is None:

View File

@ -544,11 +544,11 @@ final class DocumentsWriter implements Closeable, Accountable {
dwptSuccess = true;
} finally {
subtractFlushedNumDocs(flushingDocsInRam);
if (!flushingDWPT.pendingFilesToDelete().isEmpty()) {
if (flushingDWPT.pendingFilesToDelete().isEmpty() == false) {
putEvent(new DeleteNewFilesEvent(flushingDWPT.pendingFilesToDelete()));
hasEvents = true;
}
if (!dwptSuccess) {
if (dwptSuccess == false) {
putEvent(new FlushFailedEvent(flushingDWPT.getSegmentInfo()));
hasEvents = true;
}
@ -582,6 +582,10 @@ final class DocumentsWriter implements Closeable, Accountable {
flushingDWPT = flushControl.nextPendingFlush();
}
if (hasEvents) {
writer.doAfterSegmentFlushed(false, false);
}
// If deletes alone are consuming > 1/2 our RAM
// buffer, force them all to apply now. This is to
// prevent too-frequent flushing of a long tail of
@ -605,7 +609,7 @@ final class DocumentsWriter implements Closeable, Accountable {
void subtractFlushedNumDocs(int numFlushed) {
int oldValue = numDocsInRAM.get();
while (!numDocsInRAM.compareAndSet(oldValue, oldValue - numFlushed)) {
while (numDocsInRAM.compareAndSet(oldValue, oldValue - numFlushed) == false) {
oldValue = numDocsInRAM.get();
}
assert numDocsInRAM.get() >= 0;
@ -726,10 +730,9 @@ final class DocumentsWriter implements Closeable, Accountable {
static final class ApplyDeletesEvent implements Event {
static final Event INSTANCE = new ApplyDeletesEvent();
private int instCount = 0;
private ApplyDeletesEvent() {
assert instCount == 0;
instCount++;
// only one instance
}
@Override
@ -740,10 +743,9 @@ final class DocumentsWriter implements Closeable, Accountable {
static final class ForcedPurgeEvent implements Event {
static final Event INSTANCE = new ForcedPurgeEvent();
private int instCount = 0;
private ForcedPurgeEvent() {
assert instCount == 0;
instCount++;
// only one instance
}
@Override

View File

@ -119,6 +119,10 @@ class DocumentsWriterFlushQueue {
synchronized (this) {
// finally remove the published ticket from the queue
final FlushTicket poll = queue.poll();
// we hold the purgeLock so no other thread should have polled:
assert poll == head;
ticketCount.decrementAndGet();
assert poll == head;
}

View File

@ -262,6 +262,8 @@ class FrozenBufferedUpdates {
int totalSegmentCount = 0;
long totalDelCount = 0;
boolean finished = false;
// Optimistic concurrency: assume we are free to resolve the deletes against all current segments in the index, despite that
// concurrent merges are running. Once we are done, we check to see if a merge completed while we were running. If so, we must retry
// resolving against the newly merged segment(s). Eventually no merge finishes while we were running and we are done.
@ -334,7 +336,7 @@ class FrozenBufferedUpdates {
if (infoStream.isEnabled("BD")) {
infoStream.message("BD", String.format(Locale.ROOT,
messagePrefix + "done apply del packet (%s) to %d segments; %d new deletes/updates; took %.3f sec",
messagePrefix + "done inner apply del packet (%s) to %d segments; %d new deletes/updates; took %.3f sec",
this, segStates.length, delCount, (System.nanoTime() - iterStartNS) / 1000000000.));
}
@ -352,6 +354,13 @@ class FrozenBufferedUpdates {
if (mergeGenCur == mergeGenStart) {
// Must do this while still holding IW lock else a merge could finish and skip carrying over our updates:
// Record that this packet is finished:
writer.bufferedUpdatesStream.finished(this);
finished = true;
// No merge finished while we were applying, so we are done!
break;
}
@ -367,8 +376,10 @@ class FrozenBufferedUpdates {
iter++;
}
if (finished == false) {
// Record that this packet is finished:
writer.bufferedUpdatesStream.finished(this);
}
if (infoStream.isEnabled("BD")) {
String message = String.format(Locale.ROOT,

View File

@ -642,12 +642,13 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
}
}
void writeDocValuesUpdates(List<SegmentCommitInfo> infos) throws IOException {
void writeDocValuesUpdatesForMerge(List<SegmentCommitInfo> infos) throws IOException {
boolean any = false;
for (SegmentCommitInfo info : infos) {
ReadersAndUpdates rld = get(info, false);
if (rld != null) {
any |= rld.writeFieldUpdates(directory, bufferedUpdatesStream.getCompletedDelGen(), infoStream);
rld.setIsMerging();
}
}
if (any) {
@ -4216,7 +4217,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
// Must move the pending doc values updates to disk now, else the newly merged segment will not see them:
// TODO: we could fix merging to pull the merged DV iterator so we don't have to move these updates to disk first, i.e. just carry them
// in memory:
readerPool.writeDocValuesUpdates(merge.segments);
readerPool.writeDocValuesUpdatesForMerge(merge.segments);
// Bind a new segment name here so even with
// ConcurrentMergePolicy we keep deterministic segment

View File

@ -808,14 +808,16 @@ class ReadersAndUpdates {
return true;
}
/** Returns a reader for merge, with the latest doc values updates and deletions. */
synchronized SegmentReader getReaderForMerge(IOContext context) throws IOException {
synchronized public void setIsMerging() {
// This ensures any newly resolved doc value updates while we are merging are
// saved for re-applying after this segment is done merging:
isMerging = true;
assert mergingDVUpdates.isEmpty();
}
/** Returns a reader for merge, with the latest doc values updates and deletions. */
synchronized SegmentReader getReaderForMerge(IOContext context) throws IOException {
// We must carry over any still-pending DV updates because they were not
// successfully written, e.g. because there was a hole in the delGens,

View File

@ -0,0 +1,71 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.index;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
public class TestIndexManyDocuments extends LuceneTestCase {
public void test() throws Exception {
Directory dir = newFSDirectory(createTempDir());
IndexWriterConfig iwc = new IndexWriterConfig();
iwc.setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 2000));
int numDocs = atLeast(10000);
final IndexWriter w = new IndexWriter(dir, iwc);
final AtomicInteger count = new AtomicInteger();
Thread[] threads = new Thread[2];
for(int i=0;i<threads.length;i++) {
threads[i] = new Thread() {
@Override
public void run() {
while (count.getAndIncrement() < numDocs) {
Document doc = new Document();
doc.add(newTextField("field", "text", Field.Store.NO));
try {
w.addDocument(doc);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
}
};
threads[i].start();
}
for (Thread thread : threads) {
thread.join();
}
assertEquals("lost " + (numDocs - w.maxDoc()) + " documents; maxBufferedDocs=" + iwc.getMaxBufferedDocs(), numDocs, w.maxDoc());
w.close();
IndexReader r = DirectoryReader.open(dir);
assertEquals(numDocs, r.maxDoc());
IOUtils.close(r, dir);
}
}

View File

@ -381,6 +381,7 @@ Other Changes
- SOLR-10971: Randomize PointFields in CdcrBootstrapTest (hossman)
- SOLR-10977: Randomize the usage of Points based numerics in schema15.xml and all impacted tests (hossman)
- SOLR-10979: Randomize PointFields in schema-docValues*.xml and all affected tests (hossman)
- SOLR-10989: Randomize PointFields and general cleanup in schema files where some Trie fields were unused (hossman)
* SOLR-6807: Changed requestDispatcher's handleSelect to default to false, thus ignoring "qt".
Simplified configs to not refer to handleSelect or "qt". Switch all tests that assumed true to assume false

View File

@ -22,8 +22,8 @@
<fieldType name="string" class="solr.StrField"/>
<!-- BEGIN BAD STUFF -->
<fieldType name="ftAgain" class="solr.TrieIntField"/>
<fieldType name="ftAgain" class="solr.TrieIntField"/>
<fieldType name="ftAgain" class="solr.StrField"/>
<fieldType name="ftAgain" class="solr.StrField"/>
<!-- END BAD STUFF -->
<field name="id" type="string" indexed="true" stored="true" multiValued="false" required="false"/>

View File

@ -27,46 +27,10 @@
<schema name="test" version="1.2">
<!-- field type definitions... note that the "name" attribute is
just a label to be used by field definitions. The "class"
attribute and any other attributes determine the real type and
behavior of the fieldType.
-->
<!--
Default numeric field types. For faster range queries, consider the tint/tfloat/tlong/tdouble types.
-->
<fieldType name="int" class="solr.TrieIntField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0"/>
<!--
Numeric field types that index each value at various levels of precision
to accelerate range queries when the number of values between the range
endpoints is large. See the javadoc for LegacyNumericRangeQuery for internal
implementation details.
Smaller precisionStep values (specified in bits) will lead to more tokens
indexed per value, slightly larger index size, and faster range queries.
A precisionStep of 0 disables indexing at different precision levels.
-->
<fieldType name="tint" class="solr.TrieIntField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="binary" class="solr.BinaryField"/>
<fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
<fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
<!-- format for date is 1995-12-31T23:59:59.999Z and only the fractional
seconds part (.999) is optional.
-->
<fieldType name="date" class="solr.TrieDateField" sortMissingLast="true"/>
<field name="id" type="string" indexed="true" stored="true" multiValued="false" required="true"/>
<field name="data" type="binary" stored="true"/>

View File

@ -27,34 +27,10 @@
<schema name="test" version="1.0">
<!-- field type definitions... note that the "name" attribute is
just a label to be used by field definitions. The "class"
attribute and any other attributes determine the real type and
behavior of the fieldType.
-->
<!--
Default numeric field types. For faster range queries, consider the tint/tfloat/tlong/tdouble types.
-->
<fieldType name="int" class="solr.TrieIntField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0"/>
<!--
Numeric field types that index each value at various levels of precision
to accelerate range queries when the number of values between the range
endpoints is large. See the javadoc for LegacyNumericRangeQuery for internal
implementation details.
Smaller precisionStep values (specified in bits) will lead to more tokens
indexed per value, slightly larger index size, and faster range queries.
A precisionStep of 0 disables indexing at different precision levels.
-->
<fieldType name="tint" class="solr.TrieIntField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="int" class="${solr.tests.IntegerFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="float" class="${solr.tests.FloatFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="long" class="${solr.tests.LongFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="double" class="${solr.tests.DoubleFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
<!-- Field type demonstrating an Analyzer failure -->
<fieldType name="failtype1" class="solr.TextField">
@ -94,7 +70,7 @@
<!-- format for date is 1995-12-31T23:59:59.999Z and only the fractional
seconds part (.999) is optional.
-->
<fieldType name="date" class="solr.TrieDateField" sortMissingLast="true"/>
<fieldType name="date" class="${solr.tests.DateFieldType}" docValues="${solr.tests.numeric.dv}" sortMissingLast="true"/>
<!-- solr.TextField allows the specification of custom
text analyzers specified as a tokenizer and a list

View File

@ -27,35 +27,10 @@
<schema name="test" version="1.2">
<!-- field type definitions... note that the "name" attribute is
just a label to be used by field definitions. The "class"
attribute and any other attributes determine the real type and
behavior of the fieldType.
-->
<fieldType name="string" class="solr.StrField"/>
<!--
Default numeric field types. For faster range queries, consider the tint/tfloat/tlong/tdouble types.
-->
<fieldType name="int" class="solr.TrieIntField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0"/>
<!--
Numeric field types that index each value at various levels of precision
to accelerate range queries when the number of values between the range
endpoints is large. See the javadoc for LegacyNumericRangeQuery for internal
implementation details.
Smaller precisionStep values (specified in bits) will lead to more tokens
indexed per value, slightly larger index size, and faster range queries.
A precisionStep of 0 disables indexing at different precision levels.
-->
<fieldType name="tint" class="solr.TrieIntField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="int" class="${solr.tests.IntegerFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="long" class="${solr.tests.LongFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="customfield" class="org.apache.solr.schema.MyCrazyCustomField" multiValued="true"
positionIncrementGap="100">

View File

@ -17,52 +17,15 @@
-->
<schema name="test-non-stored-docvalues" version="1.6">
<!-- attribute "name" is the name of this schema and is only used for display purposes.
version="x.y" is Solr's version number for the schema syntax and
semantics. It should not normally be changed by applications.
1.0: multiValued attribute did not exist, all fields are multiValued
by nature
1.1: multiValued attribute introduced, false by default
1.2: omitTermFreqAndPositions attribute introduced, true by default
except for text fields.
1.3: removed optional field compress feature
1.4: autoGeneratePhraseQueries attribute introduced to drive QueryParser
behavior when a single string produces multiple tokens. Defaults
to off for version >= 1.4
1.5: omitNorms defaults to true for primitive field types
(int, float, boolean, string...)
1.6: useDocValuesAsStored defaults to true.
-->
<!-- field type definitions... note that the "name" attribute is
just a label to be used by field definitions. The "class"
attribute and any other attributes determine the real type and
behavior of the fieldType.
-->
<fieldType name="int" class="solr.TrieIntField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="float" class="solr.TrieFloatField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="tint" class="solr.TrieIntField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="int" class="${solr.tests.IntegerFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="float" class="${solr.tests.FloatFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="long" class="${solr.tests.LongFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="double" class="${solr.tests.DoubleFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
<fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
<!-- format for date is 1995-12-31T23:59:59.999Z and only the fractional
seconds part (.999) is optional.
-->
<fieldType name="date" class="solr.TrieDateField" precisionStep="0"/>
<fieldType name="tdate" class="solr.TrieDateField" precisionStep="6"/>
<fieldType name="tdatedv" class="solr.TrieDateField" precisionStep="6" docValues="true"/>
<fieldType name="date" class="${solr.tests.DateFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0"/>
<fieldType name="enumField" class="solr.EnumField" enumsConfig="enumsConfig.xml" enumName="severity"/>

View File

@ -23,7 +23,6 @@
-->
<schema name="test" version="1.0">
<fieldType name="string" class="solr.StrField"/>
<fieldType name="int" class="solr.TrieIntField" precisionStep="0"/>
<fieldType name="text" class="solr.TextField">
<analyzer>
<tokenizer class="solr.StandardTokenizerFactory"/>

View File

@ -27,7 +27,6 @@
<schema name="test" version="1.2">
<fieldType name="integer" class="solr.TrieIntField" precisionStep="0"/>
<fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
<field name="id" type="string" indexed="true" stored="true" multiValued="false" required="false"/>

View File

@ -27,7 +27,6 @@
<schema name="test" version="1.2">
<fieldType name="integer" class="solr.TrieIntField" precisionStep="0"/>
<fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
<field name="id" type="string" indexed="true" stored="true" multiValued="false" required="false"/>

View File

@ -27,34 +27,10 @@
<schema name="test" version="1.0">
<!-- field type definitions... note that the "name" attribute is
just a label to be used by field definitions. The "class"
attribute and any other attributes determine the real type and
behavior of the fieldType.
-->
<!--
Default numeric field types. For faster range queries, consider the tint/tfloat/tlong/tdouble types.
-->
<fieldType name="int" class="solr.TrieIntField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0"/>
<!--
Numeric field types that index each value at various levels of precision
to accelerate range queries when the number of values between the range
endpoints is large. See the javadoc for LegacyNumericRangeQuery for internal
implementation details.
Smaller precisionStep values (specified in bits) will lead to more tokens
indexed per value, slightly larger index size, and faster range queries.
A precisionStep of 0 disables indexing at different precision levels.
-->
<fieldType name="tint" class="solr.TrieIntField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="int" class="${solr.tests.IntegerFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="float" class="${solr.tests.FloatFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="long" class="${solr.tests.LongFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="double" class="${solr.tests.DoubleFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
<!-- Field type demonstrating an Analyzer failure -->
<fieldType name="failtype1" class="solr.TextField">
@ -77,7 +53,7 @@
<!-- format for date is 1995-12-31T23:59:59.999Z and only the fractional
seconds part (.999) is optional.
-->
<fieldType name="date" class="solr.TrieDateField" sortMissingLast="true"/>
<fieldType name="date" class="${solr.tests.DateFieldType}" docValues="${solr.tests.numeric.dv}" sortMissingLast="true"/>
<!-- solr.TextField allows the specification of custom
text analyzers specified as a tokenizer and a list

View File

@ -22,10 +22,7 @@
-->
<schema name="test" version="1.0">
<fieldType name="long" class="solr.TrieLongField"/>
<fieldType name="integer" class="solr.TrieIntField" precisionStep="0"/>
<fieldType name="long" class="${solr.tests.LongFieldType}" docValues="${solr.tests.numeric.dv}"/>
<fieldType name="string" class="solr.StrField"/>
<fieldType name="text" class="solr.TextField">

View File

@ -27,27 +27,8 @@ more concise example.
<schema name="test" version="1.0">
<!-- field type definitions... note that the "name" attribute is
just a label to be used by field definitions. The "class"
attribute and any other attributes determine the real type and
behavior of the fieldType.
-->
<fieldType name="string" class="solr.StrField" />
<!--
Default numeric field types. For faster range queries, consider the tint/tfloat/tlong/tdouble types.
-->
<fieldType name="int" class="solr.TrieIntField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0"/>
<!--
Numeric field types that index each value at various levels of precision
to accelerate range queries when the number of values between the range
endpoints is large. See the javadoc for LegacyNumericRangeQuery for internal
implementation details.
-->
<fieldType name="long" class="${solr.tests.LongFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
<!-- Seperate analyzers for index and query time -->

View File

@ -16,15 +16,14 @@
limitations under the License.
-->
<schema name="example" version="1.6">
<field name="_version_" type="${solr.tests.longClass:plong}" indexed="false" docValues="true" stored="true"/>
<field name="_version_" type="long" indexed="false" docValues="true" stored="true"/>
<field name="id" type="string" indexed="true" stored="true" required="true" multiValued="false"/>
<field name="text" type="text_general" indexed="true" stored="false" multiValued="true"/>
<field name="signatureField" type="string" indexed="true" stored="false"/>
<dynamicField name="*_sS" type="string" indexed="false" stored="true"/>
<uniqueKey>id</uniqueKey>
<fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="plong" class="solr.LongPointField" positionIncrementGap="0"/>
<fieldType name="long" class="${solr.tests.LongFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
<analyzer>
<tokenizer class="solr.StandardTokenizerFactory"/>

View File

@ -16,15 +16,14 @@
limitations under the License.
-->
<schema name="example" version="1.6">
<field name="_version_" type="${solr.tests.longClass:plong}" indexed="true" docValues="false" stored="true" />
<field name="_version_" type="long" indexed="true" docValues="false" stored="true" />
<field name="id" type="string" indexed="true" stored="true" required="true" multiValued="false"/>
<field name="text" type="text_general" indexed="true" stored="false" multiValued="true"/>
<field name="signatureField" type="string" indexed="true" stored="false"/>
<dynamicField name="*_sS" type="string" indexed="false" stored="true"/>
<uniqueKey>id</uniqueKey>
<fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="plong" class="solr.LongPointField" positionIncrementGap="0"/>
<fieldType name="long" class="${solr.tests.LongFieldType}" docValues="${solr.tests.numeric.dv}" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
<analyzer>
<tokenizer class="solr.StandardTokenizerFactory"/>

View File

@ -28,6 +28,8 @@ import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.schema.TrieIntField;
import org.apache.solr.schema.IntPointField;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
@ -39,18 +41,36 @@ import org.slf4j.LoggerFactory;
* to the indexed facet results as if it were just another faceting method.
*/
@Slow
@SolrTestCaseJ4.SuppressPointFields(bugUrl="Test explicitly compares Trie to Points, randomization defeats the point")
public class TestRandomDVFaceting extends SolrTestCaseJ4 {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@BeforeClass
public static void beforeTests() throws Exception {
// we need DVs on point fields to compute stats & facets
// but test also has hard coded assumptions about these field types *NOT* having DV when *NOT* points
// so use docvalue if and only if we are using points...
System.setProperty(NUMERIC_DOCVALUES_SYSPROP, System.getProperty(NUMERIC_POINTS_SYSPROP));
// This tests explicitly compares Trie DV with non-DV Trie with DV Points
// so we don't want randomized DocValues on all Trie fields
System.setProperty(NUMERIC_DOCVALUES_SYSPROP, "false");
initCore("solrconfig-basic.xml","schema-docValuesFaceting.xml");
assertEquals("DocValues: Schema assumptions are broken",
false, h.getCore().getLatestSchema().getField("foo_i").hasDocValues());
assertEquals("DocValues: Schema assumptions are broken",
true, h.getCore().getLatestSchema().getField("foo_i_dv").hasDocValues());
assertEquals("DocValues: Schema assumptions are broken",
true, h.getCore().getLatestSchema().getField("foo_i_p").hasDocValues());
assertEquals("Type: Schema assumptions are broken",
TrieIntField.class,
h.getCore().getLatestSchema().getField("foo_i").getType().getClass());
assertEquals("Type: Schema assumptions are broken",
TrieIntField.class,
h.getCore().getLatestSchema().getField("foo_i_dv").getType().getClass());
assertEquals("Type: Schema assumptions are broken",
IntPointField.class,
h.getCore().getLatestSchema().getField("foo_i_p").getType().getClass());
}
int indexSize;

View File

@ -55,20 +55,25 @@ public class DirectoryFactoryTest extends LuceneTestCase {
rdf.init(new NamedList());
// No solr.data.home property set. Absolute instanceDir
assertEquals("/tmp/inst1/data", rdf.getDataHome(new CoreDescriptor("core_name", Paths.get("/tmp/inst1"), cp, zkAware)));
assertDataHome("/tmp/inst1/data", "/tmp/inst1", rdf, cc);
// Simulate solr.data.home set in solrconfig.xml <directoryFactory> tag
NamedList args = new NamedList();
args.add("solr.data.home", "/solrdata/");
rdf.init(args);
assertEquals("/solrdata/inst_dir/data", rdf.getDataHome(new CoreDescriptor("core_name", Paths.get("inst_dir"), cp, zkAware)));
assertDataHome("/solrdata/inst_dir/data", "inst_dir", rdf, cc);
// solr.data.home set with System property, and relative path
System.setProperty("solr.data.home", "solrdata");
rdf.init(new NamedList());
assertEquals("/solr/home/solrdata/inst_dir/data", rdf.getDataHome(new CoreDescriptor("core_name", Paths.get("inst_dir"), cp, zkAware)));
assertDataHome("/solr/home/solrdata/inst_dir/data", "inst_dir", rdf, cc);
// Test parsing last component of instanceDir, and using custom dataDir
assertEquals("/solr/home/solrdata/myinst/mydata", rdf.getDataHome(new CoreDescriptor("core_name", Paths.get("/path/to/myinst"), cp, zkAware, "dataDir", "mydata")));
assertDataHome("/solr/home/solrdata/myinst/mydata", "/path/to/myinst", rdf, cc, "dataDir", "mydata");
}
private void assertDataHome(String expected, String instanceDir, RAMDirectoryFactory rdf, MockCoreContainer cc, String... properties) throws IOException {
String dataHome = rdf.getDataHome(new CoreDescriptor("core_name", Paths.get(instanceDir), cc.containerProperties, cc.isZooKeeperAware(), properties));
assertEquals(Paths.get(expected).toAbsolutePath(), Paths.get(dataHome).toAbsolutePath());
}