SOLR-349: new functions

git-svn-id: https://svn.apache.org/repos/asf/lucene/solr/trunk@576683 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Yonik Seeley 2007-09-18 04:04:38 +00:00
parent deafb3207e
commit efe1da6895
19 changed files with 1602 additions and 38 deletions

View File

@ -127,6 +127,10 @@ New Features
a new random UUID.
(Thomas Peuss via hossman)
24. SOLR-349: New FunctionQuery functions: sum, product, div, pow, log,
sqrt, abs, scale, map. Constants may now be used as a value source.
(yonik)
Changes in runtime behavior
Optimizations

View File

@ -32,6 +32,8 @@ import org.apache.solr.schema.FieldType;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.HashMap;
import java.util.regex.Pattern;
import java.util.logging.Level;
import java.io.IOException;
@ -485,30 +487,43 @@ public class QueryParsing {
return out;
}
private static ValueSource parseValSource(StrParser sp, IndexSchema schema) throws ParseException {
String id = sp.getId();
if (sp.opt("(")) {
// a function: could contain a fieldname or another function.
ValueSource vs=null;
if (id.equals("ord")) {
private abstract static class VSParser {
abstract ValueSource parse(StrParser sp, IndexSchema schema) throws ParseException;
}
private static Map<String, VSParser> vsParsers = new HashMap<String, VSParser>();
static {
vsParsers.put("ord", new VSParser() {
ValueSource parse(StrParser sp, IndexSchema schema) throws ParseException {
String field = sp.getId();
vs = new OrdFieldSource(field);
} else if (id.equals("rord")) {
return new OrdFieldSource(field);
}
});
vsParsers.put("rord", new VSParser() {
ValueSource parse(StrParser sp, IndexSchema schema) throws ParseException {
String field = sp.getId();
vs = new ReverseOrdFieldSource(field);
} else if (id.equals("linear")) {
return new ReverseOrdFieldSource(field);
}
});
vsParsers.put("linear", new VSParser() {
ValueSource parse(StrParser sp, IndexSchema schema) throws ParseException {
ValueSource source = parseValSource(sp, schema);
sp.expect(",");
float slope = sp.getFloat();
sp.expect(",");
float intercept = sp.getFloat();
vs = new LinearFloatFunction(source,slope,intercept);
} else if (id.equals("max")) {
return new LinearFloatFunction(source,slope,intercept);
}
});
vsParsers.put("max", new VSParser() {
ValueSource parse(StrParser sp, IndexSchema schema) throws ParseException {
ValueSource source = parseValSource(sp, schema);
sp.expect(",");
float val = sp.getFloat();
vs = new MaxFloatFunction(source,val);
} else if (id.equals("recip")) {
return new MaxFloatFunction(source,val);
}
});
vsParsers.put("recip", new VSParser() {
ValueSource parse(StrParser sp, IndexSchema schema) throws ParseException {
ValueSource source = parseValSource(sp,schema);
sp.expect(",");
float m = sp.getFloat();
@ -516,10 +531,125 @@ public class QueryParsing {
float a = sp.getFloat();
sp.expect(",");
float b = sp.getFloat();
vs = new ReciprocalFloatFunction(source,m,a,b);
} else {
return new ReciprocalFloatFunction(source,m,a,b);
}
});
vsParsers.put("scale", new VSParser() {
ValueSource parse(StrParser sp, IndexSchema schema) throws ParseException {
ValueSource source = parseValSource(sp,schema);
sp.expect(",");
float min = sp.getFloat();
sp.expect(",");
float max = sp.getFloat();
return new ScaleFloatFunction(source,min,max);
}
});
vsParsers.put("pow", new VSParser() {
ValueSource parse(StrParser sp, IndexSchema schema) throws ParseException {
ValueSource a = parseValSource(sp,schema);
sp.expect(",");
ValueSource b = parseValSource(sp,schema);
return new PowFloatFunction(a,b);
}
});
vsParsers.put("div", new VSParser() {
ValueSource parse(StrParser sp, IndexSchema schema) throws ParseException {
ValueSource a = parseValSource(sp,schema);
sp.expect(",");
ValueSource b = parseValSource(sp,schema);
return new DivFloatFunction(a,b);
}
});
vsParsers.put("map", new VSParser() {
ValueSource parse(StrParser sp, IndexSchema schema) throws ParseException {
ValueSource source = parseValSource(sp,schema);
sp.expect(",");
float min = sp.getFloat();
sp.expect(",");
float max = sp.getFloat();
sp.expect(",");
float target = sp.getFloat();
return new RangeMapFloatFunction(source,min,max,target);
}
});
vsParsers.put("sqrt", new VSParser() {
ValueSource parse(StrParser sp, IndexSchema schema) throws ParseException {
ValueSource source = parseValSource(sp,schema);
return new SimpleFloatFunction(source) {
protected String name() {
return "sqrt";
}
protected float func(int doc, DocValues vals) {
return (float)Math.sqrt(vals.floatVal(doc));
}
};
}
});
vsParsers.put("log", new VSParser() {
ValueSource parse(StrParser sp, IndexSchema schema) throws ParseException {
ValueSource source = parseValSource(sp,schema);
return new SimpleFloatFunction(source) {
protected String name() {
return "log";
}
protected float func(int doc, DocValues vals) {
return (float)Math.log10(vals.floatVal(doc));
}
};
}
});
vsParsers.put("abs", new VSParser() {
ValueSource parse(StrParser sp, IndexSchema schema) throws ParseException {
ValueSource source = parseValSource(sp,schema);
return new SimpleFloatFunction(source) {
protected String name() {
return "log";
}
protected float func(int doc, DocValues vals) {
return (float)Math.abs(vals.floatVal(doc));
}
};
}
});
vsParsers.put("sum", new VSParser() {
ValueSource parse(StrParser sp, IndexSchema schema) throws ParseException {
List<ValueSource> sources = parseValueSourceList(sp,schema);
return new SumFloatFunction(sources.toArray(new ValueSource[sources.size()]));
}
});
vsParsers.put("product", new VSParser() {
ValueSource parse(StrParser sp, IndexSchema schema) throws ParseException {
List<ValueSource> sources = parseValueSourceList(sp,schema);
return new ProductFloatFunction(sources.toArray(new ValueSource[sources.size()]));
}
});
}
private static List<ValueSource> parseValueSourceList(StrParser sp, IndexSchema schema) throws ParseException {
List<ValueSource> sources = new ArrayList<ValueSource>(3);
for (;;) {
sources.add(parseValSource(sp,schema));
char ch = sp.peek();
if (ch==')') break;
sp.expect(",");
}
return sources;
}
private static ValueSource parseValSource(StrParser sp, IndexSchema schema) throws ParseException {
int ch = sp.peek();
if (ch>='0' && ch<='9' || ch=='.' || ch=='+' || ch=='-') {
return new ConstValueSource(sp.getFloat());
}
String id = sp.getId();
if (sp.opt("(")) {
// a function... look it up.
VSParser argParser = vsParsers.get(id);
if (argParser==null) {
throw new ParseException("Unknown function " + id + " in FunctionQuery(" + sp + ")");
}
ValueSource vs = argParser.parse(sp, schema);
sp.expect(")");
return vs;
}

View File

@ -0,0 +1,166 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.function;
import org.apache.lucene.search.*;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.util.ToStringUtils;
import java.io.IOException;
import java.util.Set;
/**
* Query that is boosted by a ValueSource
*/
public class BoostedQuery extends Query {
private Query q;
private ValueSource boostVal; // optional, can be null
public BoostedQuery(Query subQuery, ValueSource boostVal) {
this.q = subQuery;
this.boostVal = boostVal;
}
public Query getQuery() { return q; }
public ValueSource getValueSource() { return boostVal; }
public Query rewrite(IndexReader reader) throws IOException {
return q.rewrite(reader);
}
public void extractTerms(Set terms) {
q.extractTerms(terms);
}
protected Weight createWeight(Searcher searcher) throws IOException {
return new BoostedQuery.BoostedWeight(searcher);
}
private class BoostedWeight implements Weight {
Searcher searcher;
Weight weight;
boolean qStrict;
public BoostedWeight(Searcher searcher) throws IOException {
this.searcher = searcher;
this.weight = q.weight(searcher);
}
public Query getQuery() {
return BoostedQuery.this;
}
public float getValue() {
return getBoost();
}
public float sumOfSquaredWeights() throws IOException {
float sum = weight.sumOfSquaredWeights();
sum *= getBoost() * getBoost();
return sum ;
}
public void normalize(float norm) {
norm *= getBoost();
weight.normalize(norm);
}
public Scorer scorer(IndexReader reader) throws IOException {
Scorer subQueryScorer = weight.scorer(reader);
return new BoostedQuery.CustomScorer(getSimilarity(searcher), reader, this, subQueryScorer, boostVal);
}
public Explanation explain(IndexReader reader, int doc) throws IOException {
return scorer(reader).explain(doc);
}
}
private class CustomScorer extends Scorer {
private final BoostedQuery.BoostedWeight weight;
private final float qWeight;
private final Scorer scorer;
private final DocValues vals;
private final IndexReader reader;
private CustomScorer(Similarity similarity, IndexReader reader, BoostedQuery.BoostedWeight w,
Scorer scorer, ValueSource vs) throws IOException {
super(similarity);
this.weight = w;
this.qWeight = w.getValue();
this.scorer = scorer;
this.reader = reader;
this.vals = vs.getValues(reader);
}
public boolean next() throws IOException {
return scorer.next();
}
public int doc() {
return scorer.doc();
}
public float score() throws IOException {
return qWeight * scorer.score() * vals.floatVal(scorer.doc());
}
public boolean skipTo(int target) throws IOException {
return scorer.skipTo(target);
}
public Explanation explain(int doc) throws IOException {
Explanation subQueryExpl = weight.weight.explain(reader,doc);
if (!subQueryExpl.isMatch()) {
return subQueryExpl;
}
float sc = subQueryExpl.getValue() * vals.floatVal(doc);
Explanation res = new ComplexExplanation(
true, sc, BoostedQuery.this.toString() + ", product of:");
res.addDetail(subQueryExpl);
res.addDetail(vals.explain(doc));
return res;
}
}
public String toString(String field) {
StringBuilder sb = new StringBuilder();
sb.append("boost(").append(q.toString(field)).append(',').append(boostVal).append(')');
sb.append(ToStringUtils.boost(getBoost()));
return sb.toString();
}
public boolean equals(Object o) {
if (getClass() != o.getClass()) return false;
BoostedQuery other = (BoostedQuery)o;
return this.getBoost() == other.getBoost()
&& this.q.equals(other.q)
&& this.boostVal.equals(other.boostVal);
}
public int hashCode() {
int h = q.hashCode();
h ^= (h << 17) | (h >>> 16);
h += boostVal.hashCode();
h ^= (h << 8) | (h >>> 25);
h += Float.floatToIntBits(getBoost());
return h;
}
}

View File

@ -0,0 +1,70 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
import java.io.IOException;
/**
* <code>ConstValueSource</code> returns a constant for all documents
*/
public class ConstValueSource extends ValueSource {
final float constant;
public ConstValueSource(float constant) {
this.constant = constant;
}
public String description() {
return "const(" + constant + ")";
}
public DocValues getValues(IndexReader reader) throws IOException {
return new DocValues() {
public float floatVal(int doc) {
return constant;
}
public int intVal(int doc) {
return (int)floatVal(doc);
}
public long longVal(int doc) {
return (long)floatVal(doc);
}
public double doubleVal(int doc) {
return (double)floatVal(doc);
}
public String strVal(int doc) {
return Float.toString(floatVal(doc));
}
public String toString(int doc) {
return description();
}
};
}
public int hashCode() {
return Float.floatToIntBits(constant) * 31;
}
public boolean equals(Object o) {
if (ConstValueSource.class != o.getClass()) return false;
ConstValueSource other = (ConstValueSource)o;
return this.constant == other.constant;
}
}

View File

@ -0,0 +1,38 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.function;
/** Function to divide "a" by "b"
*/
public class DivFloatFunction extends DualFloatFunction {
/**
* @param a the numerator.
* @param b the denominator.
*/
public DivFloatFunction(ValueSource a, ValueSource b) {
super(a,b);
}
protected String name() {
return "div";
}
protected float func(int doc, DocValues aVals, DocValues bVals) {
return aVals.floatVal(doc) / bVals.floatVal(doc);
}
}

View File

@ -33,13 +33,17 @@ public class FunctionQuery extends Query {
ValueSource func;
/**
*
* @param func defines the function to be used for scoring
*/
public FunctionQuery(ValueSource func) {
this.func=func;
}
/** @return The associated ValueSource */
public ValueSource getValueSource() {
return func;
}
public Query rewrite(IndexReader reader) throws IOException {
return this;
}
@ -169,7 +173,7 @@ public class FunctionQuery extends Query {
/** Returns a hash code value for this object. */
public int hashCode() {
return func.hashCode() ^ Float.floatToIntBits(getBoost());
return func.hashCode()*31 + Float.floatToIntBits(getBoost());
}
}

View File

@ -0,0 +1,105 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
import java.io.IOException;
/** Function to raise the base "a" to the power "b"
*/
public class PowFloatFunction extends DualFloatFunction {
/**
* @param a the base.
* @param b the exponent.
*/
public PowFloatFunction(ValueSource a, ValueSource b) {
super(a,b);
}
protected String name() {
return "pow";
}
protected float func(int doc, DocValues aVals, DocValues bVals) {
return (float)Math.pow(aVals.floatVal(doc), bVals.floatVal(doc));
}
}
abstract class DualFloatFunction extends ValueSource {
protected final ValueSource a;
protected final ValueSource b;
/**
* @param a the base.
* @param b the exponent.
*/
public DualFloatFunction(ValueSource a, ValueSource b) {
this.a = a;
this.b = b;
}
protected abstract String name();
protected abstract float func(int doc, DocValues aVals, DocValues bVals);
public String description() {
return name() + "(" + a.description() + "," + b.description() + ")";
}
public DocValues getValues(IndexReader reader) throws IOException {
final DocValues aVals = a.getValues(reader);
final DocValues bVals = b.getValues(reader);
return new DocValues() {
public float floatVal(int doc) {
return func(doc, aVals, bVals);
}
public int intVal(int doc) {
return (int)floatVal(doc);
}
public long longVal(int doc) {
return (long)floatVal(doc);
}
public double doubleVal(int doc) {
return floatVal(doc);
}
public String strVal(int doc) {
return Float.toString(floatVal(doc));
}
public String toString(int doc) {
return name() + '(' + aVals.toString(doc) + ',' + bVals.toString(doc) + ')';
}
};
}
public int hashCode() {
int h = a.hashCode();
h ^= (h << 13) | (h >>> 20);
h += b.hashCode();
h ^= (h << 23) | (h >>> 10);
h += name().hashCode();
return h;
}
public boolean equals(Object o) {
if (this.getClass() != o.getClass()) return false;
DualFloatFunction other = (DualFloatFunction)o;
return this.a.equals(other.a)
&& this.b.equals(other.b);
}
}

View File

@ -0,0 +1,39 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.function;
/**
* <code>ProductFloatFunction</code> returns the product of it's components.
*/
public class ProductFloatFunction extends MultiFloatFunction {
public ProductFloatFunction(ValueSource[] sources) {
super(sources);
}
protected String name() {
return "product";
}
protected float func(int doc, DocValues[] valsArr) {
float val = 1.0f;
for (DocValues vals : valsArr) {
val *= vals.floatVal(doc);
}
return val;
}
}

View File

@ -0,0 +1,136 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Scorer;
import org.apache.solr.common.SolrException;
import java.io.IOException;
/**
* <code>QueryValueSource</code> returns the relevance score of the query
*/
public class QueryValueSource extends ValueSource {
final Query q;
final float defVal;
public QueryValueSource(Query q, float defVal) {
this.q = q;
this.defVal = defVal;
}
public String description() {
return "query(" + q + ",def=" + defVal + ")";
}
public DocValues getValues(IndexReader reader) throws IOException {
return new QueryDocValues(reader, q, defVal);
}
public int hashCode() {
return q.hashCode() * 29;
}
public boolean equals(Object o) {
if (QueryValueSource.class != o.getClass()) return false;
QueryValueSource other = (QueryValueSource)o;
return this.q.equals(other.q);
}
}
class QueryDocValues extends DocValues {
final Query q;
final IndexReader reader;
final IndexSearcher searcher;
final Weight weight;
final float defVal;
Scorer scorer;
int scorerDoc; // the document the scorer is on
// the last document requested... start off with high value
// to trigger a scorer reset on first access.
int lastDocRequested=Integer.MAX_VALUE;
public QueryDocValues(IndexReader reader, Query q, float defVal) throws IOException {
this.reader = reader;
this.q = q;
this.defVal = defVal;
searcher = new IndexSearcher(reader);
weight = q.weight(searcher);
}
public float floatVal(int doc) {
try {
if (doc < lastDocRequested) {
// out-of-order access.... reset scorer.
scorer = weight.scorer(reader);
boolean more = scorer.next();
if (more) {
scorerDoc = scorer.doc();
} else {
// pretend we skipped to the end
scorerDoc = Integer.MAX_VALUE;
}
}
lastDocRequested = doc;
if (scorerDoc < doc) {
boolean more = scorer.skipTo(doc);
if (more) {
scorerDoc = scorer.doc();
} else {
// pretend we skipped to the end
scorerDoc = Integer.MAX_VALUE;
}
}
if (scorerDoc > doc) {
// query doesn't match this document... either because we hit the
// end (Integer.MAX_VALUE), or because the next doc is after this doc.
return defVal;
}
// a match!
return scorer.score();
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "caught exception in QueryDocVals("+q+") doc="+doc, e);
}
}
public int intVal(int doc) {
return (int)floatVal(doc);
}
public long longVal(int doc) {
return (long)floatVal(doc);
}
public double doubleVal(int doc) {
return (double)floatVal(doc);
}
public String strVal(int doc) {
return Float.toString(floatVal(doc));
}
public String toString(int doc) {
return "query(" + q + ",def=" + defVal + ")=" + floatVal(doc);
}
}

View File

@ -0,0 +1,93 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
import java.io.IOException;
/**
* <code>LinearFloatFunction</code> implements a linear function over
* another {@link org.apache.solr.search.function.ValueSource}.
* <br>
* Normally Used as an argument to a {@link org.apache.solr.search.function.FunctionQuery}
*
* @version $Id$
*/
public class RangeMapFloatFunction extends ValueSource {
protected final ValueSource source;
protected final float min;
protected final float max;
protected final float target;
public RangeMapFloatFunction(ValueSource source, float min, float max, float target) {
this.source = source;
this.min = min;
this.max = max;
this.target = target;
}
public String description() {
return "map(" + source.description() + "," + min + "," + max + "," + target + ")";
}
public DocValues getValues(IndexReader reader) throws IOException {
final DocValues vals = source.getValues(reader);
return new DocValues() {
public float floatVal(int doc) {
float val = vals.floatVal(doc);
return (val>=min && val<=max) ? target : val;
}
public int intVal(int doc) {
return (int)floatVal(doc);
}
public long longVal(int doc) {
return (long)floatVal(doc);
}
public double doubleVal(int doc) {
return (double)floatVal(doc);
}
public String strVal(int doc) {
return Float.toString(floatVal(doc));
}
public String toString(int doc) {
return "map(" + vals.toString(doc) + ",min=" + min + ",max=" + max + ",target=" + target + ")";
}
};
}
public int hashCode() {
int h = source.hashCode();
h ^= (h << 10) | (h >>> 23);
Float.floatToIntBits(min);
h ^= (h << 14) | (h >>> 19);
h += Float.floatToIntBits(max);
h ^= (h << 13) | (h >>> 20);
h += Float.floatToIntBits(target);
return h;
}
public boolean equals(Object o) {
if (RangeMapFloatFunction.class != o.getClass()) return false;
RangeMapFloatFunction other = (RangeMapFloatFunction)o;
return this.min == other.min
&& this.max == other.max
&& this.target == other.target
&& this.source.equals(other.source);
}
}

View File

@ -0,0 +1,121 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
import java.io.IOException;
/**
* Scales values to be between min and max.
* <p>This implementation currently traverses all of the source values to obtain
* their min and max.
* <p>This implementation currently cannot distinguish when documents have been
* deleted or documents that have no value, and 0.0 values will be used for
* these cases. This means that if values are normally all greater than 0.0, one can
* still end up with 0.0 as the min value to map from. In these cases, an
* appropriate map() function could be used as a workaround to change 0.0
* to a value in the real range.
*/
public class ScaleFloatFunction extends ValueSource {
protected final ValueSource source;
protected final float min;
protected final float max;
public ScaleFloatFunction(ValueSource source, float min, float max) {
this.source = source;
this.min = min;
this.max = max;
}
public String description() {
return "scale(" + source.description() + "," + min + "," + max + ")";
}
public DocValues getValues(IndexReader reader) throws IOException {
final DocValues vals = source.getValues(reader);
int maxDoc = reader.maxDoc();
// this doesn't take into account deleted docs!
float minVal=0.0f;
float maxVal=0.0f;
if (maxDoc>0) {
minVal = maxVal = vals.floatVal(0);
}
// Traverse the complete set of values to get the min and the max.
// Future alternatives include being able to ask a DocValues for min/max
// Another memory-intensive option is to cache the values in
// a float[] on this first pass.
for (int i=0; i<maxDoc; i++) {
float val = vals.floatVal(i);
if (val < minVal) {
minVal = val;
} else if (val > maxVal) {
maxVal = val;
}
}
final float scale = (maxVal-minVal==0) ? 0 : (max-min)/(maxVal-minVal);
final float minSource = minVal;
final float maxSource = maxVal;
return new DocValues() {
public float floatVal(int doc) {
return (vals.floatVal(doc) - minSource) * scale + min;
}
public int intVal(int doc) {
return (int)floatVal(doc);
}
public long longVal(int doc) {
return (long)floatVal(doc);
}
public double doubleVal(int doc) {
return (double)floatVal(doc);
}
public String strVal(int doc) {
return Float.toString(floatVal(doc));
}
public String toString(int doc) {
return "scale(" + vals.toString(doc) + ",toMin=" + min + ",toMax=" + max
+ ",fromMin=" + minSource
+ ",fromMax=" + maxSource
+ ")";
}
};
}
public int hashCode() {
int h = Float.floatToIntBits(min);
h = h*29;
h += Float.floatToIntBits(max);
h = h*29;
h += source.hashCode();
return h;
}
public boolean equals(Object o) {
if (ScaleFloatFunction.class != o.getClass()) return false;
ScaleFloatFunction other = (ScaleFloatFunction)o;
return this.min == other.min
&& this.max == other.max
&& this.source.equals(other.source);
}
}

View File

@ -0,0 +1,74 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
import java.io.IOException;
/** A simple float function with a single argument
*/
public abstract class SimpleFloatFunction extends ValueSource {
protected final ValueSource source;
public SimpleFloatFunction(ValueSource source) {
this.source = source;
}
protected abstract String name();
protected abstract float func(int doc, DocValues vals);
public String description() {
return name() + '(' + source.description() + ')';
}
public DocValues getValues(IndexReader reader) throws IOException {
final DocValues vals = source.getValues(reader);
return new DocValues() {
public float floatVal(int doc) {
return func(doc, vals);
}
public int intVal(int doc) {
return (int)floatVal(doc);
}
public long longVal(int doc) {
return (long)floatVal(doc);
}
public double doubleVal(int doc) {
return (double)floatVal(doc);
}
public String strVal(int doc) {
return Float.toString(floatVal(doc));
}
public String toString(int doc) {
return name() + '(' + vals.toString(doc) + ')';
}
};
}
public int hashCode() {
return source.hashCode() + name().hashCode();
}
public boolean equals(Object o) {
if (this.getClass() != o.getClass()) return false;
SimpleFloatFunction other = (SimpleFloatFunction)o;
return this.name().equals(other.name())
&& this.source.equals(other.source);
}
}

View File

@ -0,0 +1,125 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
import java.io.IOException;
import java.util.Arrays;
/**
* <code>SumFloatFunction</code> returns the sum of it's components.
*/
public class SumFloatFunction extends MultiFloatFunction {
public SumFloatFunction(ValueSource[] sources) {
super(sources);
}
@Override
protected String name() {
return "sum";
}
protected float func(int doc, DocValues[] valsArr) {
float val = 0.0f;
for (DocValues vals : valsArr) {
val += vals.floatVal(doc);
}
return val;
}
}
// a simple function of multiple sources
abstract class MultiFloatFunction extends ValueSource {
protected final ValueSource[] sources;
public MultiFloatFunction(ValueSource[] sources) {
this.sources = sources;
}
abstract protected String name();
abstract protected float func(int doc, DocValues[] valsArr);
public String description() {
StringBuilder sb = new StringBuilder();
sb.append(name()+'(');
boolean firstTime=true;
for (ValueSource source : sources) {
if (firstTime) {
firstTime=false;
} else {
sb.append(',');
}
sb.append(source);
}
sb.append(')');
return sb.toString();
}
public DocValues getValues(IndexReader reader) throws IOException {
final DocValues[] valsArr = new DocValues[sources.length];
for (int i=0; i<sources.length; i++) {
valsArr[i] = sources[i].getValues(reader);
}
return new DocValues() {
public float floatVal(int doc) {
return func(doc, valsArr);
}
public int intVal(int doc) {
return (int)floatVal(doc);
}
public long longVal(int doc) {
return (long)floatVal(doc);
}
public double doubleVal(int doc) {
return (double)floatVal(doc);
}
public String strVal(int doc) {
return Float.toString(floatVal(doc));
}
public String toString(int doc) {
StringBuilder sb = new StringBuilder();
sb.append(name()+'(');
boolean firstTime=true;
for (DocValues vals : valsArr) {
if (firstTime) {
firstTime=false;
} else {
sb.append(',');
}
sb.append(vals.toString(doc));
}
sb.append(')');
return sb.toString();
}
};
}
public int hashCode() {
return Arrays.hashCode(sources) + name().hashCode();
}
public boolean equals(Object o) {
if (this.getClass() != o.getClass()) return false;
MultiFloatFunction other = (MultiFloatFunction)o;
return this.name().equals(other.name())
&& Arrays.equals(this.sources, other.sources);
}
}

View File

@ -42,7 +42,7 @@ public abstract class ValueSource implements Serializable {
public abstract String description();
public String toString() {
return getClass().getName() + ":" + description();
return description();
}
}

View File

@ -71,7 +71,7 @@ public class DirectUpdateHandler extends UpdateHandler {
protected void openWriter() throws IOException {
if (writer==null) {
writer = createMainIndexWriter("DirectUpdateHandler");
writer = createMainIndexWriter("DirectUpdateHandler", false);
}
}

View File

@ -25,6 +25,7 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.MatchAllDocsQuery;
import java.util.TreeMap;
import java.util.Map;
@ -173,10 +174,19 @@ public class DirectUpdateHandler2 extends UpdateHandler {
tracker = new CommitTracker();
}
// must only be called when iwCommit lock held
private void deleteAll() throws IOException {
SolrCore.log.info("REMOVING ALL DOCUMENTS FROM INDEX");
closeWriter();
closeSearcher();
pset.clear(); // ignore docs marked for deletion since we are removing all
writer = createMainIndexWriter("DirectUpdateHandler2", true);
}
// must only be called when iwCommit lock held
protected void openWriter() throws IOException {
if (writer==null) {
writer = createMainIndexWriter("DirectUpdateHandler2");
writer = createMainIndexWriter("DirectUpdateHandler2", false);
}
}
@ -337,32 +347,41 @@ public class DirectUpdateHandler2 extends UpdateHandler {
}
boolean madeIt=false;
boolean delAll=false;
try {
Query q = QueryParsing.parseQuery(cmd.query, schema);
delAll = MatchAllDocsQuery.class == q.getClass();
int totDeleted = 0;
iwCommit.lock();
try {
// we need to do much of the commit logic (mainly doing queued
// deletes since deleteByQuery can throw off our counts.
doDeletions();
closeWriter();
openSearcher();
if (delAll) {
deleteAll();
} else {
// we need to do much of the commit logic (mainly doing queued
// deletes since deleteByQuery can throw off our counts.
doDeletions();
// if we want to count the number of docs that were deleted, then
// we need a new instance of the DeleteHitCollector
final DeleteHitCollector deleter = new DeleteHitCollector(searcher);
searcher.search(q, null, deleter);
totDeleted = deleter.deleted;
closeWriter();
openSearcher();
// if we want to count the number of docs that were deleted, then
// we need a new instance of the DeleteHitCollector
final DeleteHitCollector deleter = new DeleteHitCollector(searcher);
searcher.search(q, null, deleter);
totDeleted = deleter.deleted;
}
} finally {
iwCommit.unlock();
}
if (SolrCore.log.isLoggable(Level.FINE)) {
SolrCore.log.fine("docs deleted by query:" + totDeleted);
}
numDocsDeleted.getAndAdd(totDeleted);
if (!delAll) {
if (SolrCore.log.isLoggable(Level.FINE)) {
SolrCore.log.fine("docs deleted by query:" + totDeleted);
}
numDocsDeleted.getAndAdd(totDeleted);
}
madeIt=true;
if( tracker.timeUpperBound > 0 ) {

View File

@ -117,8 +117,8 @@ public abstract class UpdateHandler implements SolrInfoMBean {
core.getInfoRegistry().put("updateHandler", this);
}
protected SolrIndexWriter createMainIndexWriter(String name) throws IOException {
SolrIndexWriter writer = new SolrIndexWriter(name,core.getIndexDir(), false, schema, core.getSolrConfig().mainIndexConfig);
protected SolrIndexWriter createMainIndexWriter(String name, boolean removeAllExisting) throws IOException {
SolrIndexWriter writer = new SolrIndexWriter(name,core.getIndexDir(), removeAllExisting, schema, core.getSolrConfig().mainIndexConfig);
return writer;
}

View File

@ -0,0 +1,137 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.function;
import org.apache.solr.util.AbstractSolrTestCase;
import java.util.ArrayList;
import java.util.List;
/**
* Tests some basic functionality of Solr while demonstrating good
* Best Practices for using AbstractSolrTestCase
*/
public class TestFunctionQuery extends AbstractSolrTestCase {
public String getSchemaFile() { return "schema11.xml"; }
public String getSolrConfigFile() { return "solrconfig.xml"; }
public String getCoreName() { return "basic"; }
public void setUp() throws Exception {
// if you override setUp or tearDown, you better call
// the super classes version
super.setUp();
}
public void tearDown() throws Exception {
// if you override setUp or tearDown, you better call
// the super classes version
super.tearDown();
}
void createIndex(String field, float... values) {
// lrf.args.put("version","2.0");
for (float val : values) {
String s = Float.toString(val);
assertU(adoc("id", s, field, s));
System.out.println("added doc for " + val);
}
assertU(optimize()); // squeeze out any possible deleted docs
}
// replace \0 with the field name and create a parseable string
public String func(String field, String template) {
StringBuilder sb = new StringBuilder("_val_:\"");
for (char ch : template.toCharArray()) {
if (ch=='\0') {
sb.append(field);
continue;
}
if (ch=='"') sb.append('\\');
sb.append(ch);
}
sb.append('"');
return sb.toString();
}
void singleTest(String field, String funcTemplate, float... results) {
// lrf.args.put("version","2.0");
String parseableQuery = func(field, funcTemplate);
List<String> tests = new ArrayList<String>();
// Construct xpaths like the following:
// "//doc[./float[@name='foo_pf']='10.0' and ./float[@name='score']='10.0']"
for (int i=0; i<results.length; i+=2) {
String xpath = "//doc[./float[@name='" + field + "']='"
+ results[i] + "' and ./float[@name='score']='"
+ results[i+1] + "']";
tests.add(xpath);
}
assertQ(req("q", parseableQuery
,"fl", "*,score"
)
, tests.toArray(new String[tests.size()])
);
}
void doTest(String field) {
// lrf.args.put("version","2.0");
float[] vals = new float[] {
100,-4,0,10,25,5
};
createIndex(field,vals);
// test identity (straight field value)
singleTest(field, "\0", 10,10);
// test constant score
singleTest(field,"1.414213", 10, 1.414213f);
singleTest(field,"-1.414213", 10, -1.414213f);
singleTest(field,"sum(\0,1)", 10, 11);
singleTest(field,"sum(\0,\0)", 10, 20);
singleTest(field,"sum(\0,\0,5)", 10, 25);
singleTest(field,"product(\0,1)", 10, 10);
singleTest(field,"product(\0,-2,-4)", 10, 80);
singleTest(field,"log(\0)",10,1, 100,2);
singleTest(field,"sqrt(\0)",100,10, 25,5, 0,0);
singleTest(field,"abs(\0)",10,10, -4,4);
singleTest(field,"pow(\0,\0)",0,1, 5,3125);
singleTest(field,"pow(\0,0.5)",100,10, 25,5, 0,0);
singleTest(field,"div(1,\0)",-4,-.25f, 10,.1f, 100,.01f);
singleTest(field,"div(1,1)",-4,1, 10,1);
singleTest(field,"sqrt(abs(\0))",-4,2);
singleTest(field,"sqrt(sum(29,\0))",-4,5);
singleTest(field,"map(\0,0,0,500)",10,10, -4,-4, 0,500);
singleTest(field,"map(\0,-4,5,500)",100,100, -4,500, 0,500, 5,500, 10,10, 25,25);
singleTest(field,"scale(\0,-1,1)",-4,-1, 100,1, 0,-0.9230769f);
singleTest(field,"scale(\0,-10,1000)",-4,-10, 100,1000, 0,28.846153f);
}
public void testFunctions() {
doTest("foo_pf"); // a plain float field
doTest("foo_f"); // a sortable float field
}
}

View File

@ -0,0 +1,303 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!--
This is the Solr schema file. This file should be named "schema.xml" and
should be in the conf directory under the solr home
(i.e. ./solr/conf/schema.xml by default)
or located where the classloader for the Solr webapp can find it.
This example schema is the recommended starting point for users.
It should be kept correct and concise, usable out-of-the-box.
For more information, on how to customize this file, please see
http://wiki.apache.org/solr/SchemaXml
-->
<schema name="example" version="1.1">
<!-- attribute "name" is the name of this schema and is only used for display purposes.
Applications should change this to reflect the nature of the search collection.
version="1.1" is Solr's version number for the schema syntax and semantics. It should
not normally be changed by applications.
1.0: multiValued attribute did not exist, all fields are multiValued by nature
1.1: multiValued attribute introduced, false by default -->
<types>
<!-- field type definitions. The "name" attribute is
just a label to be used by field definitions. The "class"
attribute and any other attributes determine the real
behavior of the fieldType.
Class names starting with "solr" refer to java classes in the
org.apache.solr.analysis package.
-->
<!-- The StrField type is not analyzed, but indexed/stored verbatim.
- StrField and TextField support an optional compressThreshold which
limits compression (if enabled in the derived fields) to values which
exceed a certain size (in characters).
-->
<fieldType name="string" class="solr.StrField" sortMissingLast="true" omitNorms="true"/>
<!-- boolean type: "true" or "false" -->
<fieldType name="boolean" class="solr.BoolField" sortMissingLast="true" omitNorms="true"/>
<!-- The optional sortMissingLast and sortMissingFirst attributes are
currently supported on types that are sorted internally as strings.
- If sortMissingLast="true", then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order (asc or desc).
- If sortMissingFirst="true", then a sort on this field will cause documents
without the field to come before documents with the field,
regardless of the requested sort order.
- If sortMissingLast="false" and sortMissingFirst="false" (the default),
then default lucene sorting will be used which places docs without the
field first in an ascending sort and last in a descending sort.
-->
<!-- numeric field types that store and index the text
value verbatim (and hence don't support range queries, since the
lexicographic ordering isn't equal to the numeric ordering) -->
<fieldType name="integer" class="solr.IntField" omitNorms="true"/>
<fieldType name="long" class="solr.LongField" omitNorms="true"/>
<fieldType name="float" class="solr.FloatField" omitNorms="true"/>
<fieldType name="double" class="solr.DoubleField" omitNorms="true"/>
<!-- Numeric field types that manipulate the value into
a string value that isn't human-readable in its internal form,
but with a lexicographic ordering the same as the numeric ordering,
so that range queries work correctly. -->
<fieldType name="sint" class="solr.SortableIntField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="slong" class="solr.SortableLongField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="sfloat" class="solr.SortableFloatField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="sdouble" class="solr.SortableDoubleField" sortMissingLast="true" omitNorms="true"/>
<!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and
is a more restricted form of the canonical representation of dateTime
http://www.w3.org/TR/xmlschema-2/#dateTime
The trailing "Z" designates UTC time and is mandatory.
Optional fractional seconds are allowed: 1995-12-31T23:59:59.999Z
All other components are mandatory.
Expressions can also be used to denote calculations that should be
performed relative to "NOW" to determine the value, ie...
NOW/HOUR
... Round to the start of the current hour
NOW-1DAY
... Exactly 1 day prior to now
NOW/DAY+6MONTHS+3DAYS
... 6 months and 3 days in the future from the start of
the current day
Consult the DateField javadocs for more information.
-->
<fieldType name="date" class="solr.DateField" sortMissingLast="true" omitNorms="true"/>
<!-- The "RandomSortField" is not used to store or search any
data. You can declare fields of this type it in your schema
to generate psuedo-random orderings of your docs for sorting
purposes. The ordering is generated based on the field name
and the version of the index, As long as the index version
remains unchanged, and the same field name is reused,
the ordering of the docs will be consistent.
If you want differend psuedo-random orderings of documents,
for the same version of the index, use a dynamicField and
change the name
-->
<fieldType name="random" class="solr.RandomSortField" indexed="true" />
<!-- solr.TextField allows the specification of custom text analyzers
specified as a tokenizer and a list of token filters. Different
analyzers may be specified for indexing and querying.
The optional positionIncrementGap puts space between multiple fields of
this type on the same document, with the purpose of preventing false phrase
matching across fields.
For more info on customizing your analyzer chain, please see
http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters
-->
<!-- One can also specify an existing Analyzer class that has a
default constructor via the class attribute on the analyzer element
<fieldType name="text_greek" class="solr.TextField">
<analyzer class="org.apache.lucene.analysis.el.GreekAnalyzer"/>
</fieldType>
-->
<!-- A text field that only splits on whitespace for exact matching of words -->
<fieldType name="text_ws" class="solr.TextField" positionIncrementGap="100">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
</analyzer>
</fieldType>
<!-- A text field that uses WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and non-alphanumeric chars,
so that a query of "wifi" or "wi fi" could match a document containing "Wi-Fi".
Synonyms and stopwords are customized by external files, and stemming is enabled.
Duplicate tokens at the same position (which may result from Stemmed Synonyms or
WordDelim parts) are removed.
-->
<fieldType name="text" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<!-- in this example, we will only use synonyms at query time
<filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
-->
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPorterFilterFactory" protected="protwords.txt"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPorterFilterFactory" protected="protwords.txt"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
</analyzer>
</fieldType>
<!-- Less flexible matching, but less false matches. Probably not ideal for product names,
but may be good for SKUs. Can insert dashes in the wrong place and still match. -->
<fieldType name="textTight" class="solr.TextField" positionIncrementGap="100" >
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPorterFilterFactory" protected="protwords.txt"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
</analyzer>
</fieldType>
<!-- This is an example of using the KeywordTokenizer along
With various TokenFilterFactories to produce a sortable field
that does not include some properties of the source text
-->
<fieldType name="alphaOnlySort" class="solr.TextField" sortMissingLast="true" omitNorms="true">
<analyzer>
<!-- KeywordTokenizer does no actual tokenizing, so the entire
input string is preserved as a single token
-->
<tokenizer class="solr.KeywordTokenizerFactory"/>
<!-- The LowerCase TokenFilter does what you expect, which can be
when you want your sorting to be case insensitive
-->
<filter class="solr.LowerCaseFilterFactory" />
<!-- The TrimFilter removes any leading or trailing whitespace -->
<filter class="solr.TrimFilterFactory" />
<!-- The PatternReplaceFilter gives you the flexibility to use
Java Regular expression to replace any sequence of characters
matching a pattern with an arbitrary replacement string,
which may include back refrences to portions of the orriginal
string matched by the pattern.
See the Java Regular Expression documentation for more
infomation on pattern and replacement string syntax.
http://java.sun.com/j2se/1.5.0/docs/api/java/util/regex/package-summary.html
-->
<filter class="solr.PatternReplaceFilterFactory"
pattern="([^a-z])" replacement="" replace="all"
/>
</analyzer>
</fieldType>
<!-- since fields of this type are by default not stored or indexed, any data added to
them will be ignored outright
-->
<fieldtype name="ignored" stored="false" indexed="false" class="solr.StrField" />
</types>
<fields>
<!-- Valid attributes for fields:
name: mandatory - the name for the field
type: mandatory - the name of a previously defined type from the <types> section
indexed: true if this field should be indexed (searchable or sortable)
stored: true if this field should be retrievable
compressed: [false] if this field should be stored using gzip compression
(this will only apply if the field type is compressable; among
the standard field types, only TextField and StrField are)
multiValued: true if this field may contain multiple values per document
omitNorms: (expert) set to true to omit the norms associated with
this field (this disables length normalization and index-time
boosting for the field, and saves some memory). Only full-text
fields or fields that need an index-time boost need norms.
termVectors: [false] set to true to store the term vector for a given field.
When using MoreLikeThis, fields used for similarity should be stored for
best performance.
-->
<!-- for testing, a type that does a transform to see if it's correctly done everywhere -->
<field name="id" type="sfloat" indexed="true" stored="true" required="true" />
<field name="text" type="text" indexed="true" stored="false" />
<!-- Dynamic field definitions. If a field name is not found, dynamicFields
will be used if the name matches any of the patterns.
RESTRICTION: the glob-like pattern in the name attribute must have
a "*" only at the start or the end.
EXAMPLE: name="*_i" will match any field ending in _i (like myid_i, z_i)
Longer patterns will be matched first. if equal size patterns
both match, the first appearing in the schema will be used. -->
<dynamicField name="*_s" type="string" indexed="true" stored="true"/>
<dynamicField name="*_i" type="sint" indexed="true" stored="true"/>
<dynamicField name="*_l" type="slong" indexed="true" stored="true"/>
<dynamicField name="*_f" type="sfloat" indexed="true" stored="true"/>
<dynamicField name="*_d" type="sdouble" indexed="true" stored="true"/>
<dynamicField name="*_pi" type="integer" indexed="true" stored="true"/>
<dynamicField name="*_pl" type="long" indexed="true" stored="true"/>
<dynamicField name="*_pf" type="float" indexed="true" stored="true"/>
<dynamicField name="*_pd" type="double" indexed="true" stored="true"/>
<dynamicField name="*_t" type="text" indexed="true" stored="true"/>
<dynamicField name="*_b" type="boolean" indexed="true" stored="true"/>
<dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
<dynamicField name="*_random" type="random" />
<!-- uncomment the following to ignore any fields that don't already match an existing
field name or dynamic field, rather than reporting them as an error.
alternately, change the type="ignored" to some other type e.g. "text" if you want
unknown fields indexed and/or stored by default -->
<!--dynamicField name="*" type="ignored" /-->
</fields>
<!-- Field to use to determine and enforce document uniqueness.
Unless this field is marked with required="false", it will be a required field
-->
<uniqueKey>id</uniqueKey>
<!-- field for the QueryParser to use when an explicit fieldname is absent -->
<defaultSearchField>text</defaultSearchField>
</schema>