Merge remote-tracking branch 'origin/master' into gradle-master

This commit is contained in:
Dawid Weiss 2020-01-09 11:56:02 +01:00
commit 0674fada65
27 changed files with 130 additions and 262 deletions

View File

@ -120,7 +120,8 @@ Optimizations
Bug Fixes
---------------------
(No changes)
* LUCENE-9084: Fix potential deadlock due to circular synchronization in AnalyzingInfixSuggester (Paul Ward)
Other
---------------------

View File

@ -368,7 +368,8 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable {
};
}
private synchronized void ensureOpen() throws IOException {
private void ensureOpen() throws IOException {
synchronized (searcherMgrLock) {
if (writer == null) {
if (DirectoryReader.indexExists(dir)) {
// Already built; open it:
@ -376,7 +377,7 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable {
} else {
writer = new IndexWriter(dir, getIndexWriterConfig(getGramAnalyzer(), IndexWriterConfig.OpenMode.CREATE));
}
synchronized (searcherMgrLock) {
SearcherManager oldSearcherMgr = searcherMgr;
searcherMgr = new SearcherManager(writer, null);
if (oldSearcherMgr != null) {

View File

@ -210,8 +210,6 @@ Bug Fixes
* SOLR-13089: Fix lsof edge cases in the solr CLI script (Martijn Koster via janhoy)
* SOLR-11746: Fixed existence query support for numeric point fields. (Kai Chan, hossman, Houston Putman)
Other Changes
---------------------
@ -224,6 +222,8 @@ Other Changes
* SOLR-13778: Solrj client will retry requests on SSLException with a suppressed SocketException
(very likely a hard-closed socket connection).
* SOLR-14169: Fix 20 Resource Leak warnings in SolrJ's apache/solr/common (Andras Salamon via Tomás Fernández Löbbe)
================== 8.4.0 ==================
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

View File

@ -266,7 +266,7 @@ public class ICUCollationField extends FieldType {
}
@Override
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
String f = field.getName();
BytesRef low = part1 == null ? null : getCollationKey(f, part1);
BytesRef high = part2 == null ? null : getCollationKey(f, part2);

View File

@ -1187,14 +1187,10 @@ public abstract class SolrQueryParserBase extends QueryBuilder {
// called from parser
protected Query getWildcardQuery(String field, String termStr) throws SyntaxError {
checkNullField(field);
// *:* -> MatchAllDocsQuery
if ("*".equals(termStr)) {
if ("*".equals(field) || getExplicitField() == null) {
// '*:*' and '*' -> MatchAllDocsQuery
return newMatchAllDocsQuery();
} else {
// 'foo:*' -> empty prefix query
return getPrefixQuery(field, "");
}
}

View File

@ -316,7 +316,7 @@ public abstract class AbstractSpatialFieldType<T extends SpatialStrategy> extend
}
@Override
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
if (!minInclusive || !maxInclusive)
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Both sides of spatial range query must be inclusive: " + field.getName());
Point p1 = SpatialUtils.parsePointSolrException(part1, ctx);

View File

@ -236,7 +236,7 @@ public class CollationField extends FieldType {
}
@Override
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
String f = field.getName();
BytesRef low = part1 == null ? null : getCollationKey(f, part1);
BytesRef high = part2 == null ? null : getCollationKey(f, part2);

View File

@ -251,7 +251,7 @@ public class CurrencyFieldType extends FieldType implements SchemaAware, Resourc
CurrencyValue valueDefault;
valueDefault = value.convertTo(provider, defaultCurrency);
return getRangeQueryInternal(parser, field, valueDefault, valueDefault, true, true);
return getRangeQuery(parser, field, valueDefault, valueDefault, true, true);
}
/**
@ -317,7 +317,7 @@ public class CurrencyFieldType extends FieldType implements SchemaAware, Resourc
}
@Override
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String part1, String part2, final boolean minInclusive, final boolean maxInclusive) {
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, final boolean minInclusive, final boolean maxInclusive) {
final CurrencyValue p1 = CurrencyValue.parse(part1, defaultCurrency);
final CurrencyValue p2 = CurrencyValue.parse(part2, defaultCurrency);
@ -327,10 +327,10 @@ public class CurrencyFieldType extends FieldType implements SchemaAware, Resourc
": range queries only supported when upper and lower bound have same currency.");
}
return getRangeQueryInternal(parser, field, p1, p2, minInclusive, maxInclusive);
return getRangeQuery(parser, field, p1, p2, minInclusive, maxInclusive);
}
private Query getRangeQueryInternal(QParser parser, SchemaField field, final CurrencyValue p1, final CurrencyValue p2, final boolean minInclusive, final boolean maxInclusive) {
public Query getRangeQuery(QParser parser, SchemaField field, final CurrencyValue p1, final CurrencyValue p2, final boolean minInclusive, final boolean maxInclusive) {
String currencyCode = (p1 != null) ? p1.getCurrencyCode() :
(p2 != null) ? p2.getCurrencyCode() : defaultCurrency;

View File

@ -143,7 +143,7 @@ public class DateRangeField extends AbstractSpatialPrefixTreeFieldType<NumberRan
}
@Override
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String startStr, String endStr, boolean minInclusive, boolean maxInclusive) {
public Query getRangeQuery(QParser parser, SchemaField field, String startStr, String endStr, boolean minInclusive, boolean maxInclusive) {
if (parser == null) {//null when invoked by SimpleFacets. But getQueryFromSpatialArgs expects to get localParams.
final SolrRequestInfo requestInfo = SolrRequestInfo.getRequestInfo();
parser = new QParser("", null, requestInfo.getReq().getParams(), requestInfo.getReq()) {

View File

@ -63,13 +63,13 @@ public class EnumField extends AbstractEnumField {
}
@Override
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive, boolean maxInclusive) {
public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive, boolean maxInclusive) {
Integer minValue = enumMapping.stringValueToIntValue(min);
Integer maxValue = enumMapping.stringValueToIntValue(max);
if (field.multiValued() && field.hasDocValues() && !field.indexed()) {
// for the multi-valued dv-case, the default rangeimpl over toInternal is correct
return super.getSpecializedRangeQuery(parser, field, minValue.toString(), maxValue.toString(), minInclusive, maxInclusive);
return super.getRangeQuery(parser, field, minValue.toString(), maxValue.toString(), minInclusive, maxInclusive);
}
Query query = null;
final boolean matchOnly = field.hasDocValues() && !field.indexed();

View File

@ -57,7 +57,7 @@ public class EnumFieldType extends AbstractEnumField {
}
@Override
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive, boolean maxInclusive) {
public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive, boolean maxInclusive) {
Integer minValue = enumMapping.stringValueToIntValue(min);
Integer maxValue = enumMapping.stringValueToIntValue(max);

View File

@ -43,7 +43,6 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DocValuesFieldExistsQuery;
import org.apache.lucene.search.DocValuesRewriteMethod;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.PrefixQuery;
@ -458,13 +457,11 @@ public abstract class FieldType extends FieldProperties {
*
* @param parser the {@link org.apache.solr.search.QParser} calling the method
* @param sf the schema field
* @param termStr the term string for prefix query, if blank then this query should match all docs with this field
* @param termStr the term string for prefix query
* @return a Query instance to perform prefix search
*
*/
public Query getPrefixQuery(QParser parser, SchemaField sf, String termStr) {
if ("".equals(termStr)) {
return getRangeQuery(parser, sf, null, null, true, true);
}
PrefixQuery query = new PrefixQuery(new Term(sf.getName(), termStr));
query.setRewriteMethod(sf.getType().getRewriteMethod(parser, sf));
return query;
@ -850,34 +847,7 @@ public abstract class FieldType extends FieldProperties {
return null;
}
/**
* Returns a Query instance for doing range searches on this field type. {@link org.apache.solr.search.SolrQueryParser}
* currently passes part1 and part2 as null if they are '*' respectively. minInclusive and maxInclusive are both true
* currently by SolrQueryParser but that may change in the future. Also, other QueryParser implementations may have
* different semantics.
* <p>
* If the field has docValues enabled, and the range query has '*'s or nulls on either side, then a {@link org.apache.lucene.search.DocValuesFieldExistsQuery} is returned.
*
* Sub-classes should override the "getSpecializedRangeQuery" method to provide their own range query implementation. They should strive to
* handle nulls in part1 and/or part2 as well as unequal minInclusive and maxInclusive parameters gracefully.
*
*
* @param parser the {@link org.apache.solr.search.QParser} calling the method
* @param field the schema field
* @param part1 the lower boundary of the range, nulls are allowed.
* @param part2 the upper boundary of the range, nulls are allowed
* @param minInclusive whether the minimum of the range is inclusive or not
* @param maxInclusive whether the maximum of the range is inclusive or not
* @return a Query instance to perform range search according to given parameters
*
*/
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
if (field.hasDocValues() && part1 == null && part2 == null) {
return new DocValuesFieldExistsQuery(field.getName());
} else {
return getSpecializedRangeQuery(parser, field, part1, part2, minInclusive, maxInclusive);
}
}
/**
* Returns a Query instance for doing range searches on this field type. {@link org.apache.solr.search.SolrQueryParser}
@ -897,11 +867,10 @@ public abstract class FieldType extends FieldProperties {
* @return a Query instance to perform range search according to given parameters
*
*/
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
// TODO: change these all to use readableToIndexed/bytes instead (e.g. for unicode collation)
final BytesRef miValue = part1 == null ? null : new BytesRef(toInternal(part1));
final BytesRef maxValue = part2 == null ? null : new BytesRef(toInternal(part2));
if (field.hasDocValues() && !field.indexed()) {
return SortedSetDocValuesField.newSlowRangeQuery(
field.getName(),
@ -922,6 +891,7 @@ public abstract class FieldType extends FieldProperties {
* @param field The {@link org.apache.solr.schema.SchemaField} of the field to search
* @param externalVal The String representation of the value to search
* @return The {@link org.apache.lucene.search.Query} instance. This implementation returns a {@link org.apache.lucene.search.TermQuery} but overriding queries may not
*
*/
public Query getFieldQuery(QParser parser, SchemaField field, String externalVal) {
BytesRefBuilder br = new BytesRefBuilder();

View File

@ -103,7 +103,7 @@ public class LatLonType extends AbstractSubTypeFieldType implements SpatialQuery
@Override
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
Point p1 = SpatialUtils.parsePointSolrException(part1, SpatialContext.GEO);
Point p2 = SpatialUtils.parsePointSolrException(part2, SpatialContext.GEO);

View File

@ -165,7 +165,7 @@ public abstract class PointField extends NumericFieldType {
boolean maxInclusive);
@Override
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
boolean maxInclusive) {
if (!field.indexed() && field.hasDocValues()) {
return getDocValuesRangeQuery(parser, field, min, max, minInclusive, maxInclusive);
@ -222,9 +222,6 @@ public abstract class PointField extends NumericFieldType {
@Override
public Query getPrefixQuery(QParser parser, SchemaField sf, String termStr) {
if ("".equals(termStr)) {
return super.getPrefixQuery(parser, sf, termStr);
}
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can't run prefix queries on numeric fields");
}

View File

@ -128,7 +128,7 @@ public class PointType extends CoordinateFieldType implements SpatialQueryable {
/**
* Care should be taken in calling this with higher order dimensions for performance reasons.
*/
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
//Query could look like: [x1,y1 TO x2,y2] for 2 dimension, but could look like: [x1,y1,z1 TO x2,y2,z2], and can be extrapolated to n-dimensions
//thus, this query essentially creates a box, cube, etc.
String[] p1 = parseCommaSeparatedList(part1, dimension);

View File

@ -158,7 +158,7 @@ public class TextField extends FieldType {
}
@Override
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
Analyzer multiAnalyzer = getMultiTermAnalyzer();
BytesRef lower = analyzeMultiTerm(field.getName(), part1, multiAnalyzer);
BytesRef upper = analyzeMultiTerm(field.getName(), part2, multiAnalyzer);

View File

@ -298,10 +298,10 @@ public class TrieField extends NumericFieldType {
}
@Override
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive, boolean maxInclusive) {
public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive, boolean maxInclusive) {
if (field.multiValued() && field.hasDocValues() && !field.indexed()) {
// for the multi-valued dv-case, the default rangeimpl over toInternal is correct
return super.getSpecializedRangeQuery(parser, field, min, max, minInclusive, maxInclusive);
return super.getRangeQuery(parser, field, min, max, minInclusive, maxInclusive);
}
int ps = precisionStep;
Query query;

View File

@ -693,26 +693,16 @@
<dynamicField name="*_ds_dv" type="double" indexed="true" stored="true" docValues="true" multiValued="true"/>
<dynamicField name="*_d_dvo" type="double" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
<dynamicField name="*_dts" type="date" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_dt_dv" type="date" indexed="true" stored="true" docValues="true" multiValued="false"/>
<dynamicField name="*_dts_dv" type="date" indexed="true" stored="true" docValues="true" multiValued="true"/>
<dynamicField name="*_dt_dvo" type="date" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_s1" type="string" indexed="true" stored="true" multiValued="false"/>
<!-- :TODO: why are these identical?!?!?! -->
<dynamicField name="*_s" type="string" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_ss" type="string" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_s_dv" type="string" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_sdv" type="string" indexed="false" stored="false" docValues="true" useDocValuesAsStored="true"/>
<dynamicField name="*_ss_dv" type="string" indexed="true" stored="true" docValues="true" multiValued="true"/>
<dynamicField name="*_bdv" type="boolean" indexed="false" stored="false" docValues="true" useDocValuesAsStored="true"/>
<dynamicField name="*_t" type="text" indexed="true" stored="true"/>
<dynamicField name="*_tt" type="text" indexed="true" stored="true"/>
<dynamicField name="*_b" type="boolean" indexed="true" stored="true"/>
<dynamicField name="*_bs" type="boolean" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_bdv" type="boolean" indexed="false" stored="false" docValues="true" useDocValuesAsStored="true"/>
<dynamicField name="*_b_dv" type="boolean" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_bs_dv" type="boolean" indexed="true" stored="true" docValues="true" multiValued="true"/>
<dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
<dynamicField name="*_pi" type="pint" indexed="true" multiValued="false"/>
<dynamicField name="*_pl" type="plong" indexed="true" multiValued="false"/>

View File

@ -16,7 +16,6 @@
*/
package org.apache.solr.search;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
@ -95,23 +94,6 @@ public class QueryEqualityTest extends SolrTestCaseJ4 {
" +apache +solr");
}
public void testQueryLuceneAllDocsWithField() throws Exception {
// for all "primative" types, 'foo:*' should be functionally equivilent to "foo:[* TO *]"
// whatever implementation/optimizations exist for one syntax, should exist for the other syntax as well
// (regardless of docValues, multivalued, etc...)
for (String field : Arrays.asList("foo_sI", "foo_sS", "foo_s1", "foo_s",
"t_foo", "tv_foo", "tv_mv_foo",
"foo_b",
"foo_i", "foo_is", "foo_i_dvo",
"foo_l", "foo_ll", "foo_l_dvo",
"foo_f", "foo_f_dvo",
"foo_d",
"foo_dt")) {
assertQueryEquals("lucene", field + ":*", field + ":[* TO *]");
}
}
public void testQueryPrefix() throws Exception {
SolrQueryRequest req = req("myField","foo_s");
try {

View File

@ -17,7 +17,6 @@
package org.apache.solr.search;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
@ -29,7 +28,6 @@ import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.DocValuesFieldExistsQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PointInSetQuery;
import org.apache.lucene.search.Query;
@ -37,7 +35,6 @@ import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.MapSolrParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.SolrParams;
@ -47,9 +44,7 @@ import org.apache.solr.metrics.SolrMetricManager;
import org.apache.solr.parser.QueryParser;
import org.apache.solr.query.FilterQuery;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.schema.SchemaField;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@ -64,13 +59,6 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 {
createIndex();
}
private static final List<String> HAS_VAL_FIELDS = new ArrayList<String>(31);
@AfterClass
public static void afterClass() throws Exception {
HAS_VAL_FIELDS.clear();
}
public static void createIndex() {
String v;
v = "how now brown cow";
@ -86,54 +74,11 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 {
assertU(adoc("id", "20", "syn", "wifi ATM"));
{ // make a doc that has a value in *lots* of fields that no other doc has
SolrInputDocument doc = sdoc("id", "999");
// numbers...
for (String t : Arrays.asList("i", "l", "f", "d")) {
for (String s : Arrays.asList("", "s", "_dv", "s_dv", "_dvo")) {
final String f = "has_val_" + t + s;
HAS_VAL_FIELDS.add(f);
doc.addField(f, "42");
}
}
// boolean...
HAS_VAL_FIELDS.add("has_val_b");
doc.addField("has_val_b", "false");
// dates (and strings/text -- they don't care about the format)...
for (String s : Arrays.asList("dt", "s", "s1", "t")) {
final String f = "has_val_" + s;
HAS_VAL_FIELDS.add(f);
doc.addField(f, "2019-01-12T00:00:00Z");
}
assertU(adoc(doc));
}
assertU(adoc("id", "30", "shingle23", "A B X D E"));
assertU(commit());
}
public void testDocsWithValuesInField() throws Exception {
assertEquals("someone changed the test setup of HAS_VAL_FIELDS, w/o updating the sanity check",
25, HAS_VAL_FIELDS.size());
for (String f : HAS_VAL_FIELDS) {
// for all of these fields, these 2 syntaxes should be functionally equivilent
// in matching the one doc that contains these fields
for (String q : Arrays.asList( f + ":*", f + ":[* TO *]" )) {
assertJQ(req("q", q)
, "/response/numFound==1"
, "/response/docs/[0]/id=='999'"
);
// the same syntaxes should be valid even if no doc has the field...
assertJQ(req("q", "bogus___" + q)
, "/response/numFound==0"
);
}
}
}
@Test
public void testPhrase() {
// "text" field's type has WordDelimiterGraphFilter (WDGFF) and autoGeneratePhraseQueries=true
@ -1208,36 +1153,4 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 {
}
@Test
public void testFieldExistsQueries() throws SyntaxError {
SolrQueryRequest req = req();
IndexSchema indexSchema = h.getCore().getLatestSchema();
String[] fieldSuffix = new String[] {
"ti", "tf", "td", "tl", "tdt",
"pi", "pf", "pd", "pl", "pdt",
"i", "f", "d", "l", "dt", "s", "b",
"is", "fs", "ds", "ls", "dts", "ss", "bs",
"i_dv", "f_dv", "d_dv", "l_dv", "dt_dv", "s_dv", "b_dv",
"is_dv", "fs_dv", "ds_dv", "ls_dv", "dts_dv", "ss_dv", "bs_dv",
"i_dvo", "f_dvo", "d_dvo", "l_dvo", "dt_dvo",
"t"
};
String[] existenceQueries = new String[] {
"*", "[* TO *]"
};
for (String existenceQuery : existenceQueries) {
for (String suffix : fieldSuffix) {
String field = "foo_" + suffix;
String query = field + ":" + existenceQuery;
QParser qParser = QParser.getParser(query, req);
if (indexSchema.getField(field).hasDocValues()) {
assertTrue("Field has docValues, so existence query \"" + query + "\" should return DocValuesFieldExistsQuery", qParser.getQuery() instanceof DocValuesFieldExistsQuery);
} else {
assertFalse("Field doesn't have docValues, so existence query \"" + query + "\" should not return DocValuesFieldExistsQuery", qParser.getQuery() instanceof DocValuesFieldExistsQuery);
}
}
}
}
}

View File

@ -322,13 +322,13 @@ Comments may be nested.
Solr's standard query parser originated as a variation of Lucene's "classic" QueryParser. It diverges in the following ways:
* A `*` may be used for either or both endpoints to specify an open-ended range query, or by itself as an existence query.
* A `*` may be used for either or both endpoints to specify an open-ended range query
** `field:[* TO 100]` finds all field values less than or equal to 100
** `field:[100 TO *]` finds all field values greater than or equal to 100
** `field:*` or `field:[* TO *]` finds all documents where the field exists (i.e. has a value)
** `field:[* TO *]` matches all documents with the field
* Pure negative queries (all clauses prohibited) are allowed (only as a top-level clause)
** `-inStock:false` finds all field values where inStock is not false
** `-field:*` or `-field:[* TO *]` finds all documents without a value for the field
** `-field:[* TO *]` finds all documents without a value for field
* Support for embedded Solr queries (sub-queries) using any type of query parser as a nested clause using the local-params syntax.
** `inStock:true OR {!dismax qf='name manu' v='ipod'}`
+

View File

@ -94,8 +94,8 @@ public class ZkNodeProps implements JSONWriter.Writable {
public static ZkNodeProps load(byte[] bytes) {
Map<String, Object> props = null;
if (bytes[0] == 2) {
try {
props = (Map<String, Object>) new JavaBinCodec().unmarshal(bytes);
try (JavaBinCodec jbc = new JavaBinCodec()) {
props = (Map<String, Object>) jbc.unmarshal(bytes);
} catch (IOException e) {
throw new RuntimeException("Unable to parse javabin content");
}

View File

@ -203,10 +203,10 @@ public class Utils {
}
public static Writer writeJson(Object o, Writer writer, boolean indent) throws IOException {
new SolrJSONWriter(writer)
.setIndent(indent)
.writeObj(o)
.close();
try (SolrJSONWriter jsonWriter = new SolrJSONWriter(writer)) {
jsonWriter.setIndent(indent)
.writeObj(o);
}
return writer;
}

View File

@ -47,7 +47,7 @@ public class ContentStreamTest extends SolrTestCaseJ4 {
public void testFileStream() throws IOException {
File file = new File(createTempDir().toFile(), "README");
try (InputStream is = new SolrResourceLoader().openResource("solrj/README");
try (SolrResourceLoader srl = new SolrResourceLoader(); InputStream is = srl.openResource("solrj/README");
FileOutputStream os = new FileOutputStream(file)) {
assertNotNull(is);
IOUtils.copy(is, os);
@ -70,7 +70,7 @@ public class ContentStreamTest extends SolrTestCaseJ4 {
public void testFileStreamGZIP() throws IOException {
File file = new File(createTempDir().toFile(), "README.gz");
try (InputStream is = new SolrResourceLoader().openResource("solrj/README");
try (SolrResourceLoader srl = new SolrResourceLoader(); InputStream is = srl.openResource("solrj/README");
FileOutputStream os = new FileOutputStream(file);
GZIPOutputStream zos = new GZIPOutputStream(os)) {
IOUtils.copy(is, zos);
@ -95,7 +95,7 @@ public class ContentStreamTest extends SolrTestCaseJ4 {
public void testURLStream() throws IOException {
File file = new File(createTempDir().toFile(), "README");
try (InputStream is = new SolrResourceLoader().openResource("solrj/README");
try (SolrResourceLoader srl = new SolrResourceLoader(); InputStream is = srl.openResource("solrj/README");
FileOutputStream os = new FileOutputStream(file)) {
IOUtils.copy(is, os);
}
@ -124,7 +124,7 @@ public class ContentStreamTest extends SolrTestCaseJ4 {
public void testURLStreamGZIP() throws IOException {
File file = new File(createTempDir().toFile(), "README.gz");
try (InputStream is = new SolrResourceLoader().openResource("solrj/README");
try (SolrResourceLoader srl = new SolrResourceLoader(); InputStream is = srl.openResource("solrj/README");
FileOutputStream os = new FileOutputStream(file);
GZIPOutputStream zos = new GZIPOutputStream(os)) {
IOUtils.copy(is, zos);
@ -149,7 +149,7 @@ public class ContentStreamTest extends SolrTestCaseJ4 {
public void testURLStreamCSVGZIPExtention() throws IOException {
File file = new File(createTempDir().toFile(), "README.CSV.gz");
try (InputStream is = new SolrResourceLoader().openResource("solrj/README");
try (SolrResourceLoader srl = new SolrResourceLoader(); InputStream is = srl.openResource("solrj/README");
FileOutputStream os = new FileOutputStream(file);
GZIPOutputStream zos = new GZIPOutputStream(os)) {
IOUtils.copy(is, zos);
@ -174,7 +174,7 @@ public class ContentStreamTest extends SolrTestCaseJ4 {
public void testURLStreamJSONGZIPExtention() throws IOException {
File file = new File(createTempDir().toFile(), "README.json.gzip");
try (InputStream is = new SolrResourceLoader().openResource("solrj/README");
try (SolrResourceLoader srl = new SolrResourceLoader(); InputStream is = srl.openResource("solrj/README");
FileOutputStream os = new FileOutputStream(file);
GZIPOutputStream zos = new GZIPOutputStream(os)) {
IOUtils.copy(is, zos);

View File

@ -40,22 +40,22 @@ import static org.apache.solr.common.util.Utils.NEW_LINKED_HASHMAP_FUN;
public class TestFastJavabinDecoder extends SolrTestCaseJ4 {
public void testTagRead() throws Exception {
BinaryRequestWriter.BAOS baos = new BinaryRequestWriter.BAOS();
FastOutputStream faos = FastOutputStream.wrap(baos);
JavaBinCodec codec = new JavaBinCodec(faos, null);
try (JavaBinCodec codec = new JavaBinCodec(faos, null)) {
codec.writeVal(10);
codec.writeVal(100);
codec.writeVal("Hello!");
}
faos.flushBuffer();
faos.close();
FastInputStream fis = new FastInputStream(null, baos.getbuf(), 0, baos.size());
FastJavaBinDecoder.StreamCodec scodec = new FastJavaBinDecoder.StreamCodec(fis);
try (FastJavaBinDecoder.StreamCodec scodec = new FastJavaBinDecoder.StreamCodec(fis)) {
scodec.start();
Tag tag = scodec.getTag();
assertEquals(Tag._SINT, tag);
@ -67,6 +67,7 @@ public class TestFastJavabinDecoder extends SolrTestCaseJ4 {
assertEquals(Tag._STR, tag);
assertEquals("Hello!", scodec.readStr(fis));
}
}
public void testSimple() throws IOException {
String sampleObj = "{k : v , " +
@ -78,10 +79,14 @@ public class TestFastJavabinDecoder extends SolrTestCaseJ4 {
Map m = (Map) Utils.fromJSONString(sampleObj);
BinaryRequestWriter.BAOS baos = new BinaryRequestWriter.BAOS();
new JavaBinCodec().marshal(m, baos);
Map m2 = (Map) new JavaBinCodec().unmarshal(new FastInputStream(null, baos.getbuf(), 0, baos.size()));
try (JavaBinCodec jbc = new JavaBinCodec()) {
jbc.marshal(m, baos);
}
Map m2;
try (JavaBinCodec jbc = new JavaBinCodec()) {
m2 = (Map) jbc.unmarshal(new FastInputStream(null, baos.getbuf(), 0, baos.size()));
}
LinkedHashMap fastMap = (LinkedHashMap) new FastJavaBinDecoder()
.withInputStream(new FastInputStream(null, baos.getbuf(), 0, baos.size()))
.decode(FastJavaBinDecoder.getEntryListener());
@ -113,7 +118,6 @@ public class TestFastJavabinDecoder extends SolrTestCaseJ4 {
((Map) m2.get("mapk")).remove("k2");
assertEquals(Utils.writeJson(m2, new StringWriter(), true).toString(),
Utils.writeJson(newMap, new StringWriter(), true).toString());
}
public void testFastJavabinStreamingDecoder() throws IOException {
@ -121,8 +125,13 @@ public class TestFastJavabinDecoder extends SolrTestCaseJ4 {
try (InputStream is = getClass().getResourceAsStream("/solrj/javabin_sample.bin")) {
IOUtils.copy(is, baos);
}
SimpleOrderedMap o = (SimpleOrderedMap) new JavaBinCodec().unmarshal(baos.toByteArray());
SolrDocumentList list = (SolrDocumentList) o.get("response");
SolrDocumentList list;
try (JavaBinCodec jbc = new JavaBinCodec()) {
SimpleOrderedMap o = (SimpleOrderedMap) jbc.unmarshal(baos.toByteArray());
list = (SolrDocumentList) o.get("response");
}
System.out.println(" " + list.getNumFound() + " , " + list.getStart() + " , " + list.getMaxScore());
class Pojo {
long _idx;
@ -172,10 +181,7 @@ public class TestFastJavabinDecoder extends SolrTestCaseJ4 {
assertEquals((Float) doc.get("price"), pojo.price, 0.001);
}
});
parser.processResponse(new FastInputStream(null, baos.getbuf(), 0, baos.size()), null);
}
public void testParsingWithChildDocs() throws IOException {
@ -195,7 +201,9 @@ public class TestFastJavabinDecoder extends SolrTestCaseJ4 {
orderedMap.add("response", sdocs);
BinaryRequestWriter.BAOS baos = new BinaryRequestWriter.BAOS();
new JavaBinCodec().marshal(orderedMap, baos);
try (JavaBinCodec jbc = new JavaBinCodec()) {
jbc.marshal(orderedMap, baos);
}
boolean[] useListener = new boolean[1];
useListener[0] = true;

View File

@ -42,10 +42,9 @@ public class TestSolrJsonWriter extends SolrTestCaseJ4 {
.add("v632"));
});
new SolrJSONWriter(writer)
.setIndent(true)
.writeObj(map)
.close();
try (SolrJSONWriter jsonWriter = new SolrJSONWriter(writer)) {
jsonWriter.setIndent(true).writeObj(map);
}
Object o = Utils.fromJSONString(writer.toString());
assertEquals("v1", Utils.getObjectByPath(o, true, "k1"));
assertEquals(1l, Utils.getObjectByPath(o, true, "k2"));

View File

@ -35,9 +35,10 @@ public class Utf8CharSequenceTest extends SolrTestCaseJ4 {
ByteArrayUtf8CharSequence utf8 = new ByteArrayUtf8CharSequence(sb.toString());
ByteArrayOutputStream baos = new ByteArrayOutputStream();
byte[] buf = new byte[256];
FastOutputStream fos = new FastOutputStream(baos, buf, 0);
try (FastOutputStream fos = new FastOutputStream(baos, buf, 0)) {
fos.writeUtf8CharSeq(utf8);
fos.flush();
}
byte[] result = baos.toByteArray();
ByteArrayUtf8CharSequence utf81 = new ByteArrayUtf8CharSequence(result, 0, result.length);
assertTrue(utf81.equals(utf8));
@ -50,14 +51,18 @@ public class Utf8CharSequenceTest extends SolrTestCaseJ4 {
Map m0 = new HashMap();
m0.put("str", utf8);
baos.reset();
new JavaBinCodec().marshal(m0, baos);
try (JavaBinCodec jbc = new JavaBinCodec()) {
jbc.marshal(m0, baos);
}
result = baos.toByteArray();
Map m1 = (Map) new JavaBinCodec()
try (JavaBinCodec jbc = new JavaBinCodec()) {
Map m1 = (Map) jbc
.setReadStringAsCharSeq(true)
.unmarshal(new ByteArrayInputStream(result));
utf81 = (ByteArrayUtf8CharSequence) m1.get("str");
assertTrue(utf81.equals(utf8));
}
}
public void testUnMarshal() throws IOException {
NamedList nl = new NamedList();
@ -78,16 +83,22 @@ public class Utf8CharSequenceTest extends SolrTestCaseJ4 {
nl.add("key_long", sb.toString());
nl.add("key5", "5" + str);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
new JavaBinCodec().marshal(nl, baos);
try (JavaBinCodec jbc = new JavaBinCodec()) {
jbc.marshal(nl, baos);
}
byte[] bytes = baos.toByteArray();
NamedList nl1 = (NamedList) new JavaBinCodec()
NamedList nl1;
try (JavaBinCodec jbc = new JavaBinCodec()) {
nl1 = (NamedList) jbc
.setReadStringAsCharSeq(true)
.unmarshal(new ByteArrayInputStream( bytes, 0, bytes.length));
.unmarshal(new ByteArrayInputStream(bytes, 0, bytes.length));
}
byte[] buf = ((ByteArrayUtf8CharSequence) nl1.getVal(0)).getBuf();
ByteArrayUtf8CharSequence valLong = (ByteArrayUtf8CharSequence) nl1.get("key_long");
assertFalse(valLong.getBuf() == buf);
for (int i = 1; i < 6; i++) {
ByteArrayUtf8CharSequence val = (ByteArrayUtf8CharSequence) nl1.get("key" + i);
assertEquals(buf, val.getBuf());