merge up to 924414

git-svn-id: https://svn.apache.org/repos/asf/lucene/solr/branches/solr@924451 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Yonik Seeley 2010-03-17 19:59:42 +00:00
commit cfa432ba68
10 changed files with 277 additions and 136 deletions

View File

@ -69,27 +69,27 @@
<target name="compileTests" depends="compile">
<solr-javac destdir="target/test-classes"
classpathref="test.classpath">
<src path="src/test/java" />
<src path="src/test" />
</solr-javac>
</target>
<target name="test" depends="compileTests">
<mkdir dir="${junit.output.dir}"/>
<junit printsummary="on"
<junit printsummary="true" showoutput="true"
haltonfailure="no"
errorProperty="tests.failed"
failureProperty="tests.failed"
dir="src/test/resources/"
dir="src/main"
>
<formatter type="brief" usefile="false" if="junit.details"/>
<formatter type="brief" usefile="false"/>
<classpath refid="test.classpath"/>
<formatter type="xml"/>
<!--<formatter type="xml" usefile="false"/>-->
<batchtest fork="yes" todir="${junit.output.dir}" unless="testcase">
<fileset dir="src/test/java" includes="${junit.includes}"/>
<fileset dir="src/test" includes="${junit.includes}"/>
</batchtest>
<batchtest fork="yes" todir="${junit.output.dir}" if="testcase">
<fileset dir="src/test/java" includes="**/${testcase}.java"/>
<fileset dir="src/test" includes="**/${testcase}.java"/>
</batchtest>
</junit>

View File

@ -15,8 +15,9 @@
* limitations under the License.
*/
package org.apache.solr.request;
package org.apache.solr.response;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.search.DocSlice;
import org.apache.solr.common.SolrDocumentList;

View File

@ -15,8 +15,9 @@
* limitations under the License.
*/
package org.apache.solr.request;
package org.apache.solr.response;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.velocity.runtime.resource.loader.ResourceLoader;
import org.apache.velocity.runtime.resource.Resource;
import org.apache.velocity.exception.ResourceNotFoundException;

View File

@ -14,7 +14,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.request;
package org.apache.solr.response;
import org.apache.velocity.runtime.resource.loader.ResourceLoader;
import org.apache.velocity.runtime.resource.Resource;

View File

@ -15,37 +15,32 @@
* limitations under the License.
*/
package org.apache.solr.request;
package org.apache.solr.response;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.client.solrj.SolrResponse;
import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.client.solrj.response.SolrResponseBase;
import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
import org.apache.solr.response.QueryResponseWriter;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.velocity.Template;
import org.apache.velocity.VelocityContext;
import org.apache.velocity.tools.generic.*;
import org.apache.velocity.app.VelocityEngine;
import org.apache.velocity.tools.generic.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.StringWriter;
import java.io.Writer;
import java.io.*;
import java.util.Properties;
public class VelocityResponseWriter implements QueryResponseWriter {
// TODO: maybe pass this Logger to the template for logging from there?
private static final Logger log = LoggerFactory.getLogger(VelocityResponseWriter.class);
public void write(Writer writer, SolrQueryRequest request, SolrQueryResponse response) throws IOException {
VelocityEngine engine = getEngine(request); // TODO: have HTTP headers available for configuring engine
// TODO: Add layout capability, render to string buffer, then render layout
Template template = getTemplate(engine, request);
VelocityContext context = new VelocityContext();
@ -67,10 +62,11 @@ public class VelocityResponseWriter implements QueryResponseWriter {
rsp.setResponse(parsedResponse);
// page only injected if QueryResponse works
context.put("page",new PageTool(request,response)); // page tool only makes sense for a SearchHandler request... *sigh*
context.put("page", new PageTool(request, response)); // page tool only makes sense for a SearchHandler request... *sigh*
} catch (ClassCastException e) {
// known edge case where QueryResponse's extraction assumes "response" is a SolrDocumentList
// (AnalysisRequestHandler emits a "response")
e.printStackTrace();
rsp = new SolrResponseBase();
rsp.setResponse(parsedResponse);
}
@ -78,22 +74,23 @@ public class VelocityResponseWriter implements QueryResponseWriter {
// Velocity context tools - TODO: make these pluggable
context.put("esc", new EscapeTool());
context.put("sort", new SortTool());
context.put("number", new NumberTool());
context.put("list", new ListTool());
context.put("date", new ComparisonDateTool());
context.put("list", new ListTool());
context.put("math", new MathTool());
context.put("number", new NumberTool());
context.put("sort", new SortTool());
context.put("engine", engine); // for $engine.resourceExists(...)
String layout_template = request.getParams().get("v.layout");
String json_wrapper = request.getParams().get("v.json");
boolean wrap_response = (layout_template != null) || (json_wrapper !=null);
boolean wrap_response = (layout_template != null) || (json_wrapper != null);
// create output, optionally wrap it into a json object
if (wrap_response) {
StringWriter stringWriter = new StringWriter();
template.merge(context, stringWriter);
if (layout_template != null) {
context.put("content", stringWriter.toString());
stringWriter = new StringWriter();
@ -103,7 +100,7 @@ public class VelocityResponseWriter implements QueryResponseWriter {
throw new IOException(e.getMessage());
}
}
if (json_wrapper != null) {
writer.write(request.getParams().get("v.json") + "(");
writer.write(getJSONWrap(stringWriter.toString()));
@ -126,28 +123,30 @@ public class VelocityResponseWriter implements QueryResponseWriter {
engine.setProperty(VelocityEngine.FILE_RESOURCE_LOADER_PATH, baseDir.getAbsolutePath());
engine.setProperty("params.resource.loader.instance", new SolrParamResourceLoader(request));
SolrVelocityResourceLoader resourceLoader =
new SolrVelocityResourceLoader(request.getCore().getSolrConfig().getResourceLoader());
new SolrVelocityResourceLoader(request.getCore().getSolrConfig().getResourceLoader());
engine.setProperty("solr.resource.loader.instance", resourceLoader);
// TODO: Externalize Velocity properties
engine.setProperty(VelocityEngine.RESOURCE_LOADER, "params,file,solr");
String propFile = request.getParams().get("v.properties");
try{
if( propFile == null )
try {
if (propFile == null)
engine.init();
else{
else {
InputStream is = null;
try{
is = resourceLoader.getResourceStream( propFile );
try {
is = resourceLoader.getResourceStream(propFile);
Properties props = new Properties();
props.load( is );
engine.init( props );
props.load(is);
engine.init(props);
}
finally{
if( is != null ) is.close();
finally {
if (is != null) is.close();
}
}
}
catch( Exception e ){
throw new RuntimeException( e );
catch (Exception e) {
throw new RuntimeException(e);
}
return engine;
@ -176,9 +175,9 @@ public class VelocityResponseWriter implements QueryResponseWriter {
}
public String getContentType(SolrQueryRequest request, SolrQueryResponse response) {
return request.getParams().get("v.contentType","text/html");
return request.getParams().get("v.contentType", "text/html");
}
private String getJSONWrap(String xmlResult) { // TODO: maybe noggit or Solr's JSON utilities can make this cleaner?
// escape the double quotes and backslashes
String replace1 = xmlResult.replaceAll("\\\\", "\\\\\\\\");
@ -188,7 +187,7 @@ public class VelocityResponseWriter implements QueryResponseWriter {
// wrap it in a JSON object
return "{\"result\":\"" + replaced + "\"}";
}
public void init(NamedList args) {
}
}

View File

@ -27,15 +27,31 @@
For more information, on how to customize this file, please see
http://wiki.apache.org/solr/SchemaXml
PERFORMANCE NOTE: this schema includes many optional features and should not
be used for benchmarking. To improve performance one could
- set stored="false" for all fields possible (esp large fields) when you
only need to search on the field but don't need to return the original
value.
- set indexed="false" if you don't need to search on the field, but only
return the field as a result of searching on other indexed fields.
- remove all unneeded copyField statements
- for best index size and searching performance, set "index" to false
for all general text fields, use copyField to copy them to the
catchall "text" field, and use that for searching.
- For maximum indexing performance, use the StreamingUpdateSolrServer
java client.
-->
<schema name="example" version="1.1">
<schema name="example" version="1.2">
<!-- attribute "name" is the name of this schema and is only used for display purposes.
Applications should change this to reflect the nature of the search collection.
version="1.1" is Solr's version number for the schema syntax and semantics. It should
version="1.2" is Solr's version number for the schema syntax and semantics. It should
not normally be changed by applications.
1.0: multiValued attribute did not exist, all fields are multiValued by nature
1.1: multiValued attribute introduced, false by default -->
1.1: multiValued attribute introduced, false by default
1.2: omitTermFreqAndPositions attribute introduced, true by default except for text fields.
-->
<types>
<!-- field type definitions. The "name" attribute is
@ -58,6 +74,7 @@
<!-- The optional sortMissingLast and sortMissingFirst attributes are
currently supported on types that are sorted internally as strings.
This includes "string","boolean","sint","slong","sfloat","sdouble","pdate"
- If sortMissingLast="true", then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order (asc or desc).
@ -69,25 +86,29 @@
field first in an ascending sort and last in a descending sort.
-->
<!-- Default numeric field types. For faster range queries, consider the tint/tfloat/tlong/tdouble types.
Note: the statistics component does not yet work with these field types.
-->
<fieldType name="int" class="solr.TrieIntField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="float" class="solr.TrieFloatField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<!-- numeric field types that store and index the text
value verbatim (and hence don't support range queries, since the
lexicographic ordering isn't equal to the numeric ordering) -->
<fieldType name="integer" class="solr.IntField" omitNorms="true"/>
<fieldType name="long" class="solr.LongField" omitNorms="true"/>
<fieldType name="float" class="solr.FloatField" omitNorms="true"/>
<fieldType name="double" class="solr.DoubleField" omitNorms="true"/>
<!--
Numeric field types that index each value at various levels of precision
to accelerate range queries when the number of values between the range
endpoints is large. See the javadoc for NumericRangeQuery for internal
implementation details.
Smaller precisionStep values (specified in bits) will lead to more tokens
indexed per value, slightly larger index size, and faster range queries.
<!-- Numeric field types that manipulate the value into
a string value that isn't human-readable in its internal form,
but with a lexicographic ordering the same as the numeric ordering,
so that range queries work correctly. -->
<fieldType name="sint" class="solr.SortableIntField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="slong" class="solr.SortableLongField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="sfloat" class="solr.SortableFloatField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="sdouble" class="solr.SortableDoubleField" sortMissingLast="true" omitNorms="true"/>
Note: faceting does not currently work for these fields.
-->
<fieldType name="tint" class="solr.TrieIntField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
<!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and
is a more restricted form of the canonical representation of dateTime
@ -109,17 +130,43 @@
Consult the DateField javadocs for more information.
-->
<fieldType name="date" class="solr.DateField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="date" class="solr.TrieDateField" omitNorms="true" precisionStep="0" positionIncrementGap="0"/>
<!-- A Trie based date field for faster date range queries and date faceting. -->
<fieldType name="tdate" class="solr.TrieDateField" omitNorms="true" precisionStep="6" positionIncrementGap="0"/>
<!-- plain numeric field types that store and index the text
value verbatim (and hence don't support range queries, since the
lexicographic ordering isn't equal to the numeric ordering)
These should only be used for compatibility with existing indexes.
Use Trie based fields instead.
-->
<fieldType name="pint" class="solr.IntField" omitNorms="true"/>
<fieldType name="plong" class="solr.LongField" omitNorms="true"/>
<fieldType name="pfloat" class="solr.FloatField" omitNorms="true"/>
<fieldType name="pdouble" class="solr.DoubleField" omitNorms="true"/>
<fieldType name="pdate" class="solr.DateField" sortMissingLast="true" omitNorms="true"/>
<!--
These types should only be used for back compatibility with existing
indexes, or if "sortMissingLast" functionality is needed. Use Trie based fields instead.
-->
<fieldType name="sint" class="solr.SortableIntField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="slong" class="solr.SortableLongField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="sfloat" class="solr.SortableFloatField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="sdouble" class="solr.SortableDoubleField" sortMissingLast="true" omitNorms="true"/>
<!-- The "RandomSortField" is not used to store or search any
data. You can declare fields of this type it in your schema
to generate psuedo-random orderings of your docs for sorting
to generate pseudo-random orderings of your docs for sorting
purposes. The ordering is generated based on the field name
and the version of the index, As long as the index version
remains unchanged, and the same field name is reused,
the ordering of the docs will be consistent.
If you want differend psuedo-random orderings of documents,
If you want different psuedo-random orderings of documents,
for the same version of the index, use a dynamicField and
change the name
-->
@ -155,8 +202,6 @@
words on case-change, alpha numeric boundaries, and non-alphanumeric chars,
so that a query of "wifi" or "wi fi" could match a document containing "Wi-Fi".
Synonyms and stopwords are customized by external files, and stemming is enabled.
Duplicate tokens at the same position (which may result from Stemmed Synonyms or
WordDelim parts) are removed.
-->
<fieldType name="text" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
@ -175,8 +220,7 @@
/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPorterFilterFactory" protected="protwords.txt"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
<filter class="solr.SnowballPorterFilterFactory" language="English" protected="protwords.txt"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
@ -188,8 +232,7 @@
/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPorterFilterFactory" protected="protwords.txt"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
<filter class="solr.SnowballPorterFilterFactory" language="English" protected="protwords.txt"/>
</analyzer>
</fieldType>
@ -203,28 +246,41 @@
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPorterFilterFactory" protected="protwords.txt"/>
<filter class="solr.SnowballPorterFilterFactory" language="English" protected="protwords.txt"/>
<!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
possible with WordDelimiterFilter in conjuncton with stemming. -->
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
</analyzer>
</fieldType>
<!--
Setup simple analysis for spell checking
-->
<fieldType name="textSpell" class="solr.TextField" positionIncrementGap="100" >
<analyzer>
<tokenizer class="solr.StandardTokenizerFactory"/>
<!-- A general unstemmed text field - good if one does not know the language of the field -->
<fieldType name="textgen" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" enablePositionIncrements="false" />
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
<filter class="solr.StopFilterFactory"
ignoreCase="true"
words="stopwords.txt"
enablePositionIncrements="true"
/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
</analyzer>
</fieldType>
<!-- charFilter + "CharStream aware" WhitespaceTokenizer -->
<!-- charFilter + WhitespaceTokenizer -->
<!--
<fieldType name="textCharNorm" class="solr.TextField" positionIncrementGap="100" >
<analyzer>
<charFilter class="solr.MappingCharFilterFactory" mapping="mapping-ISOLatin1Accent.txt"/>
<tokenizer class="solr.CharStreamAwareWhitespaceTokenizerFactory"/>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
</analyzer>
</fieldType>
-->
@ -248,11 +304,11 @@
<!-- The PatternReplaceFilter gives you the flexibility to use
Java Regular expression to replace any sequence of characters
matching a pattern with an arbitrary replacement string,
which may include back refrences to portions of the orriginal
which may include back references to portions of the original
string matched by the pattern.
See the Java Regular Expression documentation for more
infomation on pattern and replacement string syntax.
information on pattern and replacement string syntax.
http://java.sun.com/j2se/1.5.0/docs/api/java/util/regex/package-summary.html
-->
@ -267,13 +323,46 @@
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.DoubleMetaphoneFilterFactory" inject="false"/>
</analyzer>
</fieldtype>
</fieldtype>
<!-- since fields of this type are by default not stored or indexed, any data added to
them will be ignored outright
-->
<fieldtype name="ignored" stored="false" indexed="false" class="solr.StrField" />
<fieldtype name="payloads" stored="false" indexed="true" class="solr.TextField" >
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<!--
The DelimitedPayloadTokenFilter can put payloads on tokens... for example,
a token of "foo|1.4" would be indexed as "foo" with a payload of 1.4f
Attributes of the DelimitedPayloadTokenFilterFactory :
"delimiter" - a one character delimiter. Default is | (pipe)
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
Fully Qualified class name implementing PayloadEncoder, Encoder must have a no arg constructor.
-->
<filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float"/>
</analyzer>
</fieldtype>
<!-- lowercases the entire field value, keeping it as a single token. -->
<fieldType name="lowercase" class="solr.TextField" positionIncrementGap="100">
<analyzer>
<tokenizer class="solr.KeywordTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory" />
</analyzer>
</fieldType>
<!-- since fields of this type are by default not stored or indexed,
any data added to them will be ignored outright. -->
<fieldtype name="ignored" stored="false" indexed="false" multiValued="true" class="solr.StrField" />
<fieldType name="html_text" class="solr.TextField">
<analyzer>
<charFilter class="solr.HTMLStripCharFilterFactory"/>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
</analyzer>
</fieldType>
</types>
@ -281,7 +370,8 @@
<fields>
<!-- Valid attributes for fields:
name: mandatory - the name for the field
type: mandatory - the name of a previously defined type from the <types> section
type: mandatory - the name of a previously defined type from the
<types> section
indexed: true if this field should be indexed (searchable or sortable)
stored: true if this field should be retrievable
compressed: [false] if this field should be stored using gzip compression
@ -292,39 +382,35 @@
this field (this disables length normalization and index-time
boosting for the field, and saves some memory). Only full-text
fields or fields that need an index-time boost need norms.
termVectors: [false] set to true to store the term vector for a given field.
When using MoreLikeThis, fields used for similarity should be stored for
best performance.
termPositions: Store position information with the term vector. This will increase storage costs.
termOffsets: Store offset information with the term vector. This will increase storage costs.
termVectors: [false] set to true to store the term vector for a
given field.
When using MoreLikeThis, fields used for similarity should be
stored for best performance.
termPositions: Store position information with the term vector.
This will increase storage costs.
termOffsets: Store offset information with the term vector. This
will increase storage costs.
default: a value that should be used if no value is specified
when adding a document.
-->
<field name="id" type="string" indexed="true" stored="true" required="true" />
<field name="sku" type="textTight" indexed="true" stored="true" omitNorms="true"/>
<field name="name" type="text" indexed="true" stored="true"/>
<field name="nameSort" type="string" indexed="true" stored="false"/>
<field name="name" type="textgen" indexed="true" stored="true"/>
<field name="alphaNameSort" type="alphaOnlySort" indexed="true" stored="false"/>
<field name="manu" type="text" indexed="true" stored="true" omitNorms="true"/>
<field name="cat" type="text_ws" indexed="true" stored="true" multiValued="true" omitNorms="true" termVectors="true" />
<field name="features" type="text" indexed="true" stored="true" multiValued="true" termVectors="true" termPositions="true" termOffsets="true"/>
<field name="includes" type="text" indexed="true" stored="true"/>
<field name="manu" type="textgen" indexed="true" stored="true" omitNorms="true"/>
<field name="cat" type="text_ws" indexed="true" stored="true" multiValued="true" omitNorms="true" />
<field name="features" type="text" indexed="true" stored="true" multiValued="true"/>
<field name="includes" type="text" indexed="true" stored="true" termVectors="true" termPositions="true" termOffsets="true" />
<field name="weight" type="sfloat" indexed="true" stored="true"/>
<field name="price" type="sfloat" indexed="true" stored="true"/>
<!-- "default" values can be specified for fields, indicating which
value should be used if no value is specified when adding a document.
-->
<field name="popularity" type="sint" indexed="true" stored="true" default="0"/>
<field name="inStock" type="boolean" indexed="true" stored="true"/>
<field name="weight" type="float" indexed="true" stored="true"/>
<field name="price" type="float" indexed="true" stored="true"/>
<field name="popularity" type="int" indexed="true" stored="true" />
<field name="inStock" type="boolean" indexed="true" stored="true" />
<!-- Some sample docs exists solely to demonstrate the spellchecker
functionality, this is the only field they container.
Typically you might build the spellchecker of "catchall" type field
containing all of the text in each document
-->
<field name="word" type="string" indexed="true" stored="true"/>
<field name="title" type="text" indexed="true" stored="true"/>
<!-- catchall field, containing all other searchable text fields (implemented
via copyField further on in this schema -->
<field name="text" type="text" indexed="true" stored="false" multiValued="true"/>
@ -333,12 +419,16 @@
results by manufacturer. copied from "manu" via copyField -->
<field name="manu_exact" type="string" indexed="true" stored="false"/>
<!-- Here, default is used to create a "timestamp" field indicating
When each document was indexed.
<field name="payloads" type="payloads" indexed="true" stored="true"/>
<!-- Uncommenting the following will create a "timestamp" field using
a default value of "NOW" to indicate when each document was indexed.
-->
<!--
<field name="timestamp" type="date" indexed="true" stored="true" default="NOW" multiValued="false"/>
-->
<field name="spell" type="textSpell" indexed="true" stored="true" multiValued="true"/>
<!-- Dynamic field definitions. If a field name is not found, dynamicFields
will be used if the name matches any of the patterns.
RESTRICTION: the glob-like pattern in the name attribute must have
@ -346,23 +436,34 @@
EXAMPLE: name="*_i" will match any field ending in _i (like myid_i, z_i)
Longer patterns will be matched first. if equal size patterns
both match, the first appearing in the schema will be used. -->
<dynamicField name="*_i" type="sint" indexed="true" stored="true"/>
<dynamicField name="*_i" type="int" indexed="true" stored="true"/>
<dynamicField name="*_s" type="string" indexed="true" stored="true"/>
<dynamicField name="*_l" type="slong" indexed="true" stored="true"/>
<dynamicField name="*_l" type="long" indexed="true" stored="true"/>
<dynamicField name="*_t" type="text" indexed="true" stored="true"/>
<dynamicField name="*_b" type="boolean" indexed="true" stored="true"/>
<dynamicField name="*_f" type="sfloat" indexed="true" stored="true"/>
<dynamicField name="*_d" type="sdouble" indexed="true" stored="true"/>
<dynamicField name="*_f" type="float" indexed="true" stored="true"/>
<dynamicField name="*_d" type="double" indexed="true" stored="true"/>
<dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
<dynamicField name="*" type="string" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="random*" type="random" />
<!-- some trie-coded dynamic fields for faster range queries -->
<dynamicField name="*_ti" type="tint" indexed="true" stored="true"/>
<dynamicField name="*_tl" type="tlong" indexed="true" stored="true"/>
<dynamicField name="*_tf" type="tfloat" indexed="true" stored="true"/>
<dynamicField name="*_td" type="tdouble" indexed="true" stored="true"/>
<dynamicField name="*_tdt" type="tdate" indexed="true" stored="true"/>
<dynamicField name="*_pi" type="pint" indexed="true" stored="true"/>
<dynamicField name="ignored_*" type="ignored" multiValued="true"/>
<dynamicField name="attr_*" type="textgen" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="random_*" type="random" />
<!-- uncomment the following to ignore any fields that don't already match an existing
field name or dynamic field, rather than reporting them as an error.
alternately, change the type="ignored" to some other type e.g. "text" if you want
unknown fields indexed and/or stored by default -->
<!--dynamicField name="*" type="ignored" /-->
<!--dynamicField name="*" type="ignored" multiValued="true" /-->
</fields>
@ -380,20 +481,24 @@
<!-- copyField commands copy one field to another at the time a document
is added to the index. It's used either to index the same field differently,
or to add multiple fields to the same field for easier/faster searching. -->
<copyField source="id" dest="sku"/>
<copyField source="incubationdate_dt" dest="incubationdate_s"/>
<copyField source="cat" dest="text"/>
<copyField source="name" dest="text"/>
<copyField source="name" dest="nameSort"/>
<copyField source="name" dest="alphaNameSort"/>
<copyField source="manu" dest="text"/>
<copyField source="features" dest="text"/>
<copyField source="includes" dest="text"/>
<copyField source="manu" dest="manu_exact"/>
<!-- Above, multiple source fields are copied to the [text] field.
Another way to map multiple source fields to the same
destination field is to use the dynamic field syntax.
copyField also supports a maxChars to copy setting. -->
<!-- <copyField source="*_t" dest="text" maxChars="3000"/> -->
<copyField source="name" dest="spell"/>
<!-- copy name to alphaNameSort, a field designed for sorting by name -->
<!-- <copyField source="name" dest="alphaNameSort"/> -->
<!-- Similarity is the scoring routine for each document vs. a query.
A custom similarity may be specified here, but the default is fine

View File

@ -337,7 +337,7 @@
-->
<requestDispatcher handleSelect="true" >
<!--Make sure your system has some authentication before enabling remote streaming! -->
<requestParsers enableRemoteStreaming="false" multipartUploadLimitInKB="2048" />
<requestParsers enableRemoteStreaming="true" multipartUploadLimitInKB="2048" />
<!-- Set HTTP caching related parameters (for proxy caches and clients).
@ -398,6 +398,9 @@
</requestHandler>
<requestHandler name="/mlt" class="solr.MoreLikeThisHandler"/>
<requestHandler name="/analysis/field" class="solr.FieldAnalysisRequestHandler" />
<requestHandler name="/itas" class="solr.SearchHandler">
<lst name="defaults">
<str name="v.template">browse</str>

View File

@ -0,0 +1,8 @@
<html>
<head>
<title>Solritas</title>
</head>
<body>
$content
</body>
</html>

View File

@ -1 +0,0 @@
Yes, tests are forthcoming! Sorry Hudson.

View File

@ -0,0 +1,25 @@
package org.apache.solr.velocity;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.response.VelocityResponseWriter;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.util.AbstractSolrTestCase;
import java.io.StringWriter;
import java.io.IOException;
public class VelocityResponseWriterTest extends AbstractSolrTestCase {
public String getSchemaFile() { return "schema.xml"; }
public String getSolrConfigFile() { return "solrconfig.xml"; }
public void testTemplateName() throws IOException {
org.apache.solr.response.VelocityResponseWriter vrw = new VelocityResponseWriter();
SolrQueryRequest req = req("v.template","custom", "v.template.custom","$response.response.response_data");
SolrQueryResponse rsp = new SolrQueryResponse();
StringWriter buf = new StringWriter();
rsp.add("response_data", "testing");
vrw.write(buf, req, rsp);
assertEquals("testing", buf.toString());
}
}