SOLR-3 Removed src/apps containing the legacy SolrTest app

git-svn-id: https://svn.apache.org/repos/asf/incubator/solr/trunk@480683 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Chris M. Hostetter 2006-11-29 18:58:27 +00:00
parent 1900dbebbb
commit c5dbbf9f4a
13 changed files with 2 additions and 1735 deletions

View File

@ -152,5 +152,7 @@ Other Changes
7. blackslash escape * in ssh command used in snappuller for zsh compatibility, SOLR-63
8. check solr return code in admin scripts, SOLR-62
9. Updated to Lucene 2.0 nightly build 2006-11-15, SVN revision 475069
10. Removed src/apps containing the legacy "SolrTest" app (hossman, SOLR-3)
2006/01/17 Solr open sourced, moves to Apache Incubator

View File

@ -192,7 +192,6 @@
encoding="utf8"
classpathref="test.compile.classpath">
<src path="${src}/test" />
<src path="${src}/apps/SolrTest/src" />
</javac>
</target>
@ -201,27 +200,10 @@
description="Runs the unit tests."
depends="compileTests, junit" />
<target name="legacyTest"
depends="compileTests" >
<!-- DEPRECATED: no description so it doesn't show up in project help -->
<java classname="SolrTest" fork="true" dir="src/apps/SolrTest" failonerror="true">
<arg line="-test newtest.txt -qargs qt=test"/>
<classpath>
<path refid="test.run.classpath" />
</classpath>
</java>
</target>
<target name="junit" depends="compileTests">
<!-- no description so it doesn't show up in -projecthelp -->
<mkdir dir="${junit.output.dir}"/>
<!-- :TODO: either SolrCore needs a way to specify the
solrconfig.xml, or all test are going to need to use the same
conf file, either way we need a specific run directory for
the tests.
-->
<junit printsummary="withOutAndErr"
haltonfailure="no"
errorProperty="tests.failed"

View File

@ -1,39 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#change the query cache size to 3 and the autowarm size to 2 for this test
<commit/>
val_s:A
val_s:B
val_s:C
val_s:D
#B,C,D should be in cache
val_s:A
#miss, now C,D,A should be in cache
<commit/>
#should see old{lookups=5, hits=0, size=3}, new{size=2}
#now D,A should be autowarmed in new
val_s:C
#miss, now cache=D,A,C
<commit/>
#should see old{lookups,1 hits=0, size=3}, new{size=2}
#now A,C should be autowarmed in new
val_s:A
val_s:C
<commit/>
#should see old{lookups=2, hits=2, size=0}

View File

@ -1,19 +0,0 @@
echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
PATH=c:/cygwin/bin
c:/cygwin/bin/bash.exe -c "echo handler called... cwd=`pwd` MYVAR=%MYVAR% > commit.outfile"
exit 33

View File

@ -1 +0,0 @@
userName:Alex;startDate top 2;

View File

@ -1,571 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#compact the index, keep things from getting out of hand
<optimize/>
#test query
qlkciyopsbgzyvkylsjhchghjrdf %//result[@numFound="0"]
#test escaping of ";"
<delete><id>42</id></delete>
<add><doc><field name="id">42</field><field name="val_s">aa;bb</field></doc></add>
<commit/>
id:42 AND val_s:aa\;bb %//*[@numFound="1"]
id:42 AND val_s:"aa;bb" %//*[@numFound="1"]
id:42 AND val_s:"aa" %//*[@numFound="0"]
#test allowDups default of false
<delete><id>42</id></delete>
<add><doc><field name="id">42</field><field name="val_s">AAA</field></doc></add>
<add><doc><field name="id">42</field><field name="val_s">BBB</field></doc></add>
<commit/>
id:42 %//*[@numFound="1"] %//str[.="BBB"]
<add><doc><field name="id">42</field><field name="val_s">CCC</field></doc></add>
<add><doc><field name="id">42</field><field name="val_s">DDD</field></doc></add>
<commit/>
id:42 %//*[@numFound="1"] %//str[.="DDD"]
<delete><id>42</id></delete>
#test deletes
<delete><query>id:[100 TO 110]</query></delete>
<add allowDups="false"><doc><field name="id">101</field></doc></add>
<add allowDups="false"><doc><field name="id">101</field></doc></add>
<add allowDups="true"><doc><field name="id">105</field></doc></add>
<add allowDups="false"><doc><field name="id">102</field></doc></add>
<add allowDups="true"><doc><field name="id">103</field></doc></add>
<add allowDups="false"><doc><field name="id">101</field></doc></add>
<commit/>
id:[100 TO 110] %//*[@numFound="4"]
<delete><id>102</id></delete>
<commit/>
id:[100 TO 110] %//*[@numFound="3"]
<delete><query>id:105</query></delete>
<commit/>
id:[100 TO 110] %//*[@numFound="2"]
<delete><query>id:[100 TO 110]</query></delete>
<commit/>
id:[100 TO 110] %//*[@numFound="0"]
#test range
<delete><id>44</id></delete>
<add allowDups="true"><doc><field name="id">44</field><field name="val_s">apple</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="val_s">banana</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="val_s">pear</field></doc></add>
<commit/>
val_s:[a TO z] %//*[@numFound="3"] %*[count(//doc)=3] %//*[@start="0"]
val_s:[a TO z] %%start=2&limit=5 %//*[@numFound="3"] %*[count(//doc)=1] %*//doc[1]/str[.="pear"] %//*[@start="2"]
val_s:[a TO z] %%start=3&limit=5 %//*[@numFound="3"] %*[count(//doc)=0]
val_s:[a TO z] %%start=4&limit=5 %//*[@numFound="3"] %*[count(//doc)=0]
val_s:[a TO z] %%start=25&limit=5 %//*[@numFound="3"] %*[count(//doc)=0]
val_s:[a TO z] %%start=0&limit=1 %//*[@numFound="3"] %*[count(//doc)=1] %*//doc[1]/str[.="apple"]
val_s:[a TO z] %%start=0&limit=2 %//*[@numFound="3"] %*[count(//doc)=2] %*//doc[2]/str[.="banana"]
val_s:[a TO z] %%start=1&limit=1 %//*[@numFound="3"] %*[count(//doc)=1] %*//doc[1]/str[.="banana"]
val_s:[a TO z] %%start=3&limit=1 %//*[@numFound="3"] %*[count(//doc)=0]
val_s:[a TO z] %%start=4&limit=1 %//*[@numFound="3"] %*[count(//doc)=0]
val_s:[a TO z] %%start=1&limit=0 %//*[@numFound="3"] %*[count(//doc)=0]
val_s:[a TO z] %%start=0&limit=0 %//*[@numFound="3"] %*[count(//doc)=0]
val_s:[a TO z];val_s asc %%start=0&limit=0 %//*[@numFound="3"] %*[count(//doc)=0]
val_s:[a TO z];val_s desc %%start=0&limit=0 %//*[@numFound="3"] %*[count(//doc)=0]
val_s:[a TO b] %//*[@numFound="1"]
val_s:[a TO cat] %//*[@numFound="2"]
val_s:[a TO *] %//*[@numFound="3"]
val_s:[* TO z] %//*[@numFound="3"]
val_s:[* TO *] %//*[@numFound="3"]
val_s:[apple TO pear] %//*[@numFound="3"]
val_s:[bear TO boar] %//*[@numFound="0"]
val_s:[a TO a] %//*[@numFound="0"]
val_s:[apple TO apple] %//*[@numFound="1"]
val_s:{apple TO pear} %//*[@numFound="1"]
val_s:{a TO z} %//*[@numFound="3"]
val_s:{* TO *} %//*[@numFound="3"]
#test rangequery within a boolean query
id:44 AND val_s:[a TO z] %//*[@numFound="3"]
id:44 OR val_s:[a TO z] %//*[@numFound="3"]
val_s:[a TO b] OR val_s:[b TO z] %//*[@numFound="3"]
+val_s:[a TO b] -val_s:[b TO z] %//*[@numFound="1"]
-val_s:[a TO b] +val_s:[b TO z] %//*[@numFound="2"]
val_s:[a TO c] AND val_s:[apple TO z] %//*[@numFound="2"]
val_s:[a TO c] AND val_s:[a TO apple] %//*[@numFound="1"]
id:44 AND (val_s:[a TO c] AND val_s:[a TO apple]) %//*[@numFound="1"]
(val_s:[apple TO apple] OR val_s:[a TO c]) AND (val_s:[b TO c] OR val_s:[b TO b]) %//*[@numFound="1"] %//str[.="banana"]
(val_s:[apple TO apple] AND val_s:[a TO c]) OR (val_s:[p TO z] AND val_s:[a TO z]) %//*[@numFound="2"] %//str[.="apple"] %//str[.="pear"]
#check for docs that appear more than once in a range
<add allowDups="true"><doc><field name="id">44</field><field name="val_s">apple</field><field name="val_s">banana</field></doc></add>
<commit/>
val_s:[* TO *] OR val_s:[* TO *] %//*[@numFound="4"]
val_s:[* TO *] AND val_s:[* TO *] %//*[@numFound="4"]
val_s:[* TO *] %//*[@numFound="4"]
#<delete><id>44</id></delete>
<add overwritePending="true" overwriteCommitted="true"><doc><field name="id">44</field><field name="text">red riding hood</field></doc></add>
<commit/>
id:44 AND red %//@numFound[.="1"] %*[count(//doc)=1]
id:44 AND ride %//@numFound[.="1"]
id:44 AND blue %//@numFound[.="0"]
#allow duplicates
<delete><id>44</id></delete>
<add allowDups="true" overwriteCommitted="false" overwritePending="false"><doc><field name="id">44</field><field name="text">red riding hood</field></doc></add>
<add allowDups="true" overwriteCommitted="false" overwritePending="false"><doc><field name="id">44</field><field name="text">big bad wolf</field></doc></add>
<commit/>
id:44 %//@numFound[.="2"]
id:44 AND red %//@numFound[.="1"] %*[count(//doc)=1]
id:44 AND wolf %//@numFound[.="1"] %*[count(//doc)=1]
+id:44 red wolf %//@numFound[.="2"]
#test removal of multiples w/o adding anything else
<delete><id>44</id></delete>
<commit/>
id:44 %//@numFound[.="0"]
#untokenized string type
<delete><id>44</id></delete>
<add><doc><field name="id">44</field><field name="ssto">and a 10.4 ?</field></doc></add>
<commit/>
id:44 %//str[.="and a 10.4 ?"]
<delete><id>44</id></delete>
<add><doc><field name="id">44</field><field name="sind">abc123</field></doc></add>
<commit/>
#TODO: how to search for something with spaces....
sind:abc123 %//@numFound[.="1"] %*[count(//@name[.="sind"])=0] %*[count(//@name[.="id"])=1]
<delete><id>44</id></delete>
<delete><id>44</id></delete>
<add><doc><field name="id">44</field><field name="sindsto">abc123</field></doc></add>
<commit/>
#TODO: how to search for something with spaces....
sindsto:abc123 %//str[.="abc123"]
#test output of multivalued fields
<delete><id>44</id></delete>
<add><doc><field name="id">44</field><field name="title">yonik3</field><field name="title" boost="2">yonik4</field></doc></add>
<commit></commit>
id:44 %//arr[@name="title"][./str="yonik3" and ./str="yonik4"] %*[count(//@name[.="title"])=1]
title:yonik3 %//@numFound[.>"0"]
title:yonik4 %//@numFound[.>"0"]
title:yonik5 %//@numFound[.="0"]
<delete><query>title:yonik4</query></delete>
<commit/>
id:44 %//@numFound[.="0"]
#not visible until commit
<delete><id>44</id></delete>
<commit/>
<add><doc><field name="id">44</field></doc></add>
id:44 %//@numFound[.="0"]
<commit/>
id:44 %//@numFound[.="1"]
#test configurable stop words
<delete><id>44</id></delete>
<add><doc><field name="id">44</field><field name="teststop">world stopworda view</field></doc></add>
<commit/>
+id:44 +teststop:world %//@numFound[.="1"]
teststop:stopworda %//@numFound[.="0"]
#test ignoreCase stop words
<delete><id>44</id></delete>
<add><doc><field name="id">44</field><field name="stopfilt">world AnD view</field></doc></add>
<commit/>
+id:44 +stopfilt:world %//@numFound[.="1"]
stopfilt:"and" %//@numFound[.="0"]
stopfilt:"AND" %//@numFound[.="0"]
stopfilt:"AnD" %//@numFound[.="0"]
#test dynamic field types
<delete fromPending="true" fromCommitted="true"><id>44</id></delete>
<add><doc><field name="id">44</field><field name="gack_i">51778</field><field name="t_name">cats</field></doc></add>
<commit/>
#test if the dyn fields got added
id:44 %*[count(//doc/*)>=3] %//int[@name="gack_i"][.="51778"] %//str[@name="t_name"][.="cats"]
#now test if we can query by a dynamic field (requires analyzer support)
t_name:cat %//str[@name="t_name" and .="cats"]
#check that deleteByQuery works for dynamic fields
<delete><query>t_name:cat</query></delete>
<commit/>
t_name:cat %//@numFound[.="0"]
#test that longest dynamic field match happens first
<add><doc><field name="id">44</field><field name="xaa">mystr</field><field name="xaaa">12321</field></doc></add>
<commit/>
id:44 %//str[@name="xaa"][.="mystr"] %//int[@name="xaaa"][.="12321"]
#test integer ranges and sorting
<delete><id>44</id></delete>
<add allowDups="true"><doc><field name="id">44</field><field name="num_i">1234567890</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_i">10</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_i">1</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_i">2</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_i">15</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_i">-1</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_i">-987654321</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_i">2147483647</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_i">-2147483648</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_i">0</field></doc></add>
<commit/>
id:44 %*[count(//doc)=10]
num_i:2147483647 %//@numFound[.="1"] %//int[.="2147483647"]
num_i:"-2147483648" %//@numFound[.="1"] %//int[.="-2147483648"]
id:44;num_i asc; %//doc[1]/int[.="-2147483648"] %//doc[last()]/int[.="2147483647"]
id:44;num_i desc; %//doc[1]/int[.="2147483647"] %//doc[last()]/int[.="-2147483648"]
num_i:[0 TO 9] %*[count(//doc)=3]
num_i:[-2147483648 TO 2147483647] %*[count(//doc)=10]
num_i:[-10 TO -1] %*[count(//doc)=1]
#test long ranges and sorting
<delete><id>44</id></delete>
<add allowDups="true"><doc><field name="id">44</field><field name="num_l">1234567890</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_l">10</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_l">1</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_l">2</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_l">15</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_l">-1</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_l">-987654321</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_l">9223372036854775807</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_l">-9223372036854775808</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_l">0</field></doc></add>
<commit/>
id:44 %*[count(//doc)=10]
num_l:9223372036854775807 %//@numFound[.="1"] %//long[.="9223372036854775807"]
num_l:"-9223372036854775808" %//@numFound[.="1"] %//long[.="-9223372036854775808"]
id:44;num_l asc; %//doc[1]/long[.="-9223372036854775808"] %//doc[last()]/long[.="9223372036854775807"]
id:44;num_l desc; %//doc[1]/long[.="9223372036854775807"] %//doc[last()]/long[.="-9223372036854775808"]
num_l:[-1 TO 9] %*[count(//doc)=4]
num_l:[-9223372036854775808 TO 9223372036854775807] %*[count(//doc)=10]
num_l:[-10 TO -1] %*[count(//doc)=1]
#test binary float ranges and sorting
<delete><id>44</id></delete>
<add allowDups="true"><doc><field name="id">44</field><field name="num_f">1.4142135</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_f">Infinity</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_f">-Infinity</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_f">NaN</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_f">2</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_f">-1</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_f">-987654321</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_f">-999999.99</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_f">-1e20</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_f">0</field></doc></add>
<commit/>
id:44 %*[count(//doc)=10]
num_f:Infinity %//@numFound[.="1"] %//float[.="Infinity"]
num_f:"-Infinity" %//@numFound[.="1"] %//float[.="-Infinity"]
num_f:"NaN" %//@numFound[.="1"] %//float[.="NaN"]
num_f:"-1e20" %//@numFound[.="1"]
id:44;num_f asc; %//doc[1]/float[.="-Infinity"] %//doc[last()]/float[.="NaN"]
id:44;num_f desc; %//doc[1]/float[.="NaN"] %//doc[last()]/float[.="-Infinity"]
num_f:[-1 TO 2] %*[count(//doc)=4]
num_f:[-Infinity TO Infinity] %*[count(//doc)=9]
#test binary double ranges and sorting
<delete><id>44</id></delete>
<add allowDups="true"><doc><field name="id">44</field><field name="num_d">1.4142135</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_d">Infinity</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_d">-Infinity</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_d">NaN</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_d">2</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_d">-1</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_d">1e-100</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_d">-999999.99</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_d">-1e100</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="num_d">0</field></doc></add>
<commit/>
id:44 %*[count(//doc)=10]
num_d:Infinity %//@numFound[.="1"] %//double[.="Infinity"]
num_d:"-Infinity" %//@numFound[.="1"] %//double[.="-Infinity"]
num_d:"NaN" %//@numFound[.="1"] %//double[.="NaN"]
num_d:"-1e100" %//@numFound[.="1"]
num_d:"1e-100" %//@numFound[.="1"]
id:44;num_d asc; %//doc[1]/double[.="-Infinity"] %//doc[last()]/double[.="NaN"]
id:44;num_d desc; %//doc[1]/double[.="NaN"] %//doc[last()]/double[.="-Infinity"]
num_d:[-1 TO 2] %*[count(//doc)=5]
num_d:[-Infinity TO Infinity] %*[count(//doc)=9]
#test sorting on multiple fields
<delete><id>44</id></delete>
<add allowDups="true"><doc><field name="id">44</field><field name="a_i">10</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="a_i">1</field><field name="b_i">100</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="a_i">-1</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="a_i">15</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="a_i">1</field><field name="b_i">50</field></doc></add>
<add allowDups="true"><doc><field name="id">44</field><field name="a_i">0</field></doc></add>
<commit/>
id:44 %*[count(//doc)=6]
id:44; a_i asc,b_i desc %*[count(//doc)=6] %//doc[3]/int[.="100"] %//doc[4]/int[.="50"]
id:44;a_i asc , b_i asc; %*[count(//doc)=6] %//doc[3]/int[.="50"] %//doc[4]/int[.="100"]
id:44;a_i asc; %*[count(//doc)=6] %//doc[1]/int[.="-1"] %//doc[last()]/int[.="15"]
id:44;a_i asc , score top; %*[count(//doc)=6] %//doc[1]/int[.="-1"] %//doc[last()]/int[.="15"]
id:44; score top , a_i top, b_i bottom ; %*[count(//doc)=6] %//doc[last()]/int[.="-1"] %//doc[1]/int[.="15"] %//doc[3]/int[.="50"] %//doc[4]/int[.="100"]
#test sorting with some docs missing the sort field
<delete><query>id_i:[1000 TO 1010]</query></delete>
<add allowDups="true"><doc><field name="id_i">1000</field><field name="a_i">1</field></doc></add>
<add allowDups="true"><doc><field name="id_i">1001</field><field name="a_i">10</field></doc></add>
<add allowDups="true"><doc><field name="id_i">1002</field><field name="a_i">1</field><field name="b_i">100</field></doc></add>
<add allowDups="true"><doc><field name="id_i">1003</field><field name="a_i">-1</field></doc></add>
<add allowDups="true"><doc><field name="id_i">1004</field><field name="a_i">15</field></doc></add>
<add allowDups="true"><doc><field name="id_i">1005</field><field name="a_i">1</field><field name="b_i">50</field></doc></add>
<add allowDups="true"><doc><field name="id_i">1006</field><field name="a_i">0</field></doc></add>
<commit/>
id_i:[1000 TO 1010] %*[count(//doc)=7]
id_i:[1000 TO 1010]; b_i asc %*[count(//doc)=7] %//doc[1]/int[.="50"] %//doc[2]/int[.="100"]
id_i:[1000 TO 1010]; b_i desc %*[count(//doc)=7] %//doc[1]/int[.="100"] %//doc[2]/int[.="50"]
id_i:[1000 TO 1010]; a_i asc,b_i desc %*[count(//doc)=7] %//doc[3]/int[.="100"] %//doc[4]/int[.="50"] %//doc[5]/int[.="1000"]
id_i:[1000 TO 1010]; a_i asc,b_i asc %*[count(//doc)=7] %//doc[3]/int[.="50"] %//doc[4]/int[.="100"] %//doc[5]/int[.="1000"]
#test prefix query
<delete><query>val_s:[* TO *]</query></delete>
<add><doc><field name="id">100</field><field name="val_s">apple</field></doc></add>
<add><doc><field name="id">101</field><field name="val_s">banana</field></doc></add>
<add><doc><field name="id">102</field><field name="val_s">apple</field></doc></add>
<add><doc><field name="id">103</field><field name="val_s">pearing</field></doc></add>
<add><doc><field name="id">104</field><field name="val_s">pear</field></doc></add>
<add><doc><field name="id">105</field><field name="val_s">appalling</field></doc></add>
<add><doc><field name="id">106</field><field name="val_s">pearson</field></doc></add>
<add><doc><field name="id">107</field><field name="val_s">port</field></doc></add>
<commit/>
val_s:a* %//*[@numFound="3"]
val_s:p* %//*[@numFound="4"]
#val_s:* %//*[@numFound="8"]
<delete><query>id:[100 TO 110]</query></delete>
#test copyField functionality
<add><doc><field name="id">42</field><field name="title">How Now4 brown Cows</field></doc></add>
<commit/>
id:42 AND title:Now %*[count(//doc)=0]
id:42 AND title_lettertok:Now %*[count(//doc)=1]
id:42 AND title:cow %*[count(//doc)=0]
id:42 AND title_stemmed:cow %*[count(//doc)=1]
id:42 AND text:cow %*[count(//doc)=1]
#test slop
<add><doc><field name="id">42</field><field name="text">foo bar</field></doc></add>
<commit/>
id:42 AND text:"foo bar" %*[count(//doc)=1]
id:42 AND text:"foo" %*[count(//doc)=1]
id:42 AND text:"bar" %*[count(//doc)=1]
id:42 AND text:"bar foo" %*[count(//doc)=0]
id:42 AND text:"bar foo"~2 %*[count(//doc)=1]
#intra-word delimiter testing (WordDelimiterFilter)
<add><doc><field name="id">42</field><field name="subword">foo bar</field></doc></add>
<commit/>
id:42 AND subword:"foo bar" %*[count(//doc)=1]
id:42 AND subword:"foo" %*[count(//doc)=1]
id:42 AND subword:"bar" %*[count(//doc)=1]
id:42 AND subword:"bar foo" %*[count(//doc)=0]
id:42 AND subword:"bar foo"~2 %*[count(//doc)=1]
id:42 AND subword:"foo/bar" %*[count(//doc)=1]
id:42 AND subword:"foobar" %*[count(//doc)=0]
<add><doc><field name="id">42</field><field name="subword">foo-bar</field></doc></add>
<commit/>
id:42 AND subword:"foo bar" %*[count(//doc)=1]
id:42 AND subword:"foo" %*[count(//doc)=1]
id:42 AND subword:"bar" %*[count(//doc)=1]
id:42 AND subword:"bar foo" %*[count(//doc)=0]
id:42 AND subword:"bar foo"~2 %*[count(//doc)=1]
id:42 AND subword:"foo/bar" %*[count(//doc)=1]
id:42 AND subword:foobar %*[count(//doc)=1]
<add><doc><field name="id">42</field><field name="subword">Canon PowerShot SD500 7MP</field></doc></add>
<commit/>
id:42 AND subword:"power-shot" %*[count(//doc)=1]
id:42 AND subword:"power shot sd 500" %*[count(//doc)=1]
id:42 AND subword:"powershot" %*[count(//doc)=1]
id:42 AND subword:"SD-500" %*[count(//doc)=1]
id:42 AND subword:"SD500" %*[count(//doc)=1]
id:42 AND subword:"SD500-7MP" %*[count(//doc)=1]
id:42 AND subword:"PowerShotSD500-7MP" %*[count(//doc)=1]
<add><doc><field name="id">42</field><field name="subword">Wi-Fi</field></doc></add>
<commit/>
id:42 AND subword:wifi %*[count(//doc)=1]
id:42 AND subword:wi+=fi %*[count(//doc)=1]
id:42 AND subword:wi+=fi %*[count(//doc)=1]
id:42 AND subword:WiFi %*[count(//doc)=1]
id:42 AND subword:"wi fi" %*[count(//doc)=1]
<add><doc><field name="id">42</field><field name="subword">'I.B.M' A's,B's,C's</field></doc></add>
<commit/>
id:42 AND subword:"'I.B.M.'" %*[count(//doc)=1]
id:42 AND subword:I.B.M %*[count(//doc)=1]
id:42 AND subword:IBM %*[count(//doc)=1]
id:42 AND subword:I--B--M %*[count(//doc)=1]
id:42 AND subword:"I B M" %*[count(//doc)=1]
id:42 AND subword:IBM's %*[count(//doc)=1]
id:42 AND subword:IBM'sx %*[count(//doc)=0]
#this one fails since IBM and ABC are separated by two tokens
#id:42 AND subword:IBM's-ABC's %*[count(//doc)=1]
id:42 AND subword:"IBM's-ABC's"~2 %*[count(//doc)=1]
id:42 AND subword:"A's B's-C's" %*[count(//doc)=1]
<add><doc><field name="id">42</field><field name="subword">Sony KDF-E50A10</field></doc></add>
<commit/>
#check for exact match:
# Sony KDF E/KDFE 50 A 10 (this is how it's indexed)
# Sony KDF E 50 A 10 (and how it's queried)
id:42 AND subword:"Sony KDF-E50A10" %*[count(//doc)=1]
id:42 AND subword:10 %*[count(//doc)=1]
id:42 AND subword:Sony %*[count(//doc)=1]
#this one fails without slop since Sony and KDFE have a token inbetween
#id:42 AND subword:SonyKDFE50A10 %*[count(//doc)=1]
id:42 AND subword:"SonyKDFE50A10"~10 %*[count(//doc)=1]
id:42 AND subword:"Sony KDF E-50-A-10" %*[count(//doc)=1]
<add><doc><field name="id">42</field><field name="subword">http://www.yahoo.com</field></doc></add>
<commit/>
id:42 AND subword:yahoo %*[count(//doc)=1]
id:42 AND subword:www.yahoo.com %*[count(//doc)=1]
id:42 AND subword:http://www.yahoo.com %*[count(//doc)=1]
<add><doc><field name="id">42</field><field name="subword">--Q 1-- W2 E-3 Ok xY 4R 5-T *6-Y- 7-8-- 10A-B</field></doc></add>
<commit/>
id:42 AND subword:Q %*[count(//doc)=1]
id:42 AND subword:1 %*[count(//doc)=1]
id:42 AND subword:"w 2" %*[count(//doc)=1]
id:42 AND subword:"e 3" %*[count(//doc)=1]
id:42 AND subword:"o k" %*[count(//doc)=0]
id:42 AND subword:"ok" %*[count(//doc)=1]
id:42 AND subword:"x y" %*[count(//doc)=1]
id:42 AND subword:"xy" %*[count(//doc)=1]
id:42 AND subword:"4 r" %*[count(//doc)=1]
id:42 AND subword:"5 t" %*[count(//doc)=1]
id:42 AND subword:"5 t" %*[count(//doc)=1]
id:42 AND subword:"6 y" %*[count(//doc)=1]
id:42 AND subword:"7 8" %*[count(//doc)=1]
id:42 AND subword:"78" %*[count(//doc)=1]
id:42 AND subword:"10 A+B" %*[count(//doc)=1]
<add><doc><field name="id">42</field><field name="subword">FooBarBaz</field></doc></add>
<add><doc><field name="id">42</field><field name="subword">FooBar10</field></doc></add>
<add><doc><field name="id">42</field><field name="subword">10FooBar</field></doc></add>
<add><doc><field name="id">42</field><field name="subword">BAZ</field></doc></add>
<add><doc><field name="id">42</field><field name="subword">10</field></doc></add>
<add><doc><field name="id">42</field><field name="subword">Mark, I found what's the problem! It turns to be from the latest schema. I found tons of exceptions in the resin.stdout that prevented the builder from performing. It's all coming from the WordDelimiterFilter which was just added to the latest schema: [2005-08-29 15:11:38.375] java.lang.IndexOutOfBoundsException: Index: 3, Size: 3 673804 [2005-08-29 15:11:38.375] at java.util.ArrayList.RangeCheck(ArrayList.java:547) 673805 [2005-08-29 15:11:38.375] at java.util.ArrayList.get(ArrayList.java:322) 673806 [2005-08-29 15:11:38.375] at solr.analysis.WordDelimiterFilter.addCombos(WordDelimiterFilter.java:349) 673807 [2005-08-29 15:11:38.375] at solr.analysis.WordDelimiterFilter.next(WordDelimiterFilter.java:325) 673808 [2005-08-29 15:11:38.375] at org.apache.lucene.analysis.LowerCaseFilter.next(LowerCaseFilter.java:32) 673809 [2005-08-29 15:11:38.375] at org.apache.lucene.analysis.StopFilter.next(StopFilter.java:98) 673810 [2005-08-29 15:11:38.375] at solr.EnglishPorterFilter.next(TokenizerFactory.java:163) 673811 [2005-08-29 15:11:38.375] at org.apache.lucene.index.DocumentWriter.invertDocument(DocumentWriter.java:143) 673812 [2005-08-29 15:11:38.375] at org.apache.lucene.index.DocumentWriter.addDocument(DocumentWriter.java:81) 673813 [2005-08-29 15:11:38.375] at org.apache.lucene.index.IndexWriter.addDocument(IndexWriter.java:307) 673814 [2005-08-29 15:11:38.375] at org.apache.lucene.index.IndexWriter.addDocument(IndexWriter.java:294) 673815 [2005-08-29 15:11:38.375] at solr.DirectUpdateHandler2.doAdd(DirectUpdateHandler2.java:170) 673816 [2005-08-29 15:11:38.375] at solr.DirectUpdateHandler2.overwriteBoth(DirectUpdateHandler2.java:317) 673817 [2005-08-29 15:11:38.375] at solr.DirectUpdateHandler2.addDoc(DirectUpdateHandler2.java:191) 673818 [2005-08-29 15:11:38.375] at solr.SolrCore.update(SolrCore.java:795) 673819 [2005-08-29 15:11:38.375] at solrserver.SolrServlet.doPost(SolrServlet.java:71) 673820 [2005-08-29 15:11:38.375] at javax.servlet.http.HttpServlet.service(HttpServlet.java:154) 673821 [2005-08-29 15:11:38.375] at javax.servlet.http.HttpServlet.service(HttpServlet.java:92) 673822 [2005-08-29 15:11:38.375] at com.caucho.server.dispatch.ServletFilterChain.doFilter(ServletFilterChain.java:99) 673823 [2005-08-29 15:11:38.375] at com.caucho.server.cache.CacheFilterChain.doFilter(CacheFilterChain.java:188) 673824 [2005-08-29 15:11:38.375] at com.caucho.server.webapp.WebAppFilterChain.doFilter(WebAppFilterChain.java:163) 673825 [2005-08-29 15:11:38.375] at com.caucho.server.dispatch.ServletInvocation.service(ServletInvocation.java:208) 673826 [2005-08-29 15:11:38.375] at com.caucho.server.http.HttpRequest.handleRequest(HttpRequest.java:259) 673827 [2005-08-29 15:11:38.375] at com.caucho.server.port.TcpConnection.run(TcpConnection.java:363) 673828 [2005-08-29 15:11:38.375] at com.caucho.util.ThreadPool.runTasks(ThreadPool.java:490) 673829 [2005-08-29 15:11:38.375] at com.caucho.util.ThreadPool.run(ThreadPool.java:423) 673830 [2005-08-29 15:11:38.375] at java.lang.Thread.run(Thread.java:595) With the previous schema I'm able to perform a successful full build: http://c12-ssa-dev40-so-mas1.cnet.com:5078/select/?stylesheet=q=docTypeversion=2.0start=0rows=10indent=on Do you want to rollback to the previous schema version</field></doc></add>
#
<delete fromPending="true" fromCommitted="true"><id>44</id></delete>
<add><doc><field name="id">44</field><field name="fname_s">Yonik</field><field name="here_b">true</field><field name="iq_l">10000000000</field><field name="description_t">software engineer</field><field name="ego_d">1e100</field><field name="pi_f">3.1415962</field><field name="when_dt">2005-03-18T01:14:34Z</field><field name="arr_f">1.414213562</field><field name="arr_f">.999</field></doc></add>
<commit/>
id:44
id:44 %%fl=fname_s,arr_f %//str[.="Yonik"] %//float[.="1.4142135"]
id:44 %%fl= %//str[.="Yonik"] %//float[.="1.4142135"]
#test addition of score field
id:44 %%fl=score %//str[.="Yonik"] %//float[.="1.4142135"] %//float[@name="score"] %*[count(//doc/*)=10]
id:44 %%fl=*,score %//str[.="Yonik"] %//float[.="1.4142135"] %//float[@name="score"] %*[count(//doc/*)=10]
id:44 %%fl=* %//str[.="Yonik"] %//float[.="1.4142135"] %*[count(//doc/*)>=9]
#test maxScore
id:44 %%fl=score %//result[@maxScore>0]
id:44;id desc; %%fl=score %//result[@maxScore>0]
id:44; %%fl=score %//@maxScore = //doc/float[@name="score"]
id:44;id desc; %%fl=score %//@maxScore = //doc/float[@name="score"]
id:44;id desc; %%fl=score&limit=0 %//result[@maxScore>0]
# test schema field attribute inheritance and overriding
<delete><id>44</id></delete>
<add><doc><field name="id">44</field><field name="shouldbestored">hi</field></doc></add>
<commit/>
id:44 %//*[@name="shouldbestored"]
+id:44 +shouldbestored:hi %//*[@numFound="1"]
<delete><id>44</id></delete>
<add><doc><field name="id">44</field><field name="shouldbeunstored">hi</field></doc></add>
<commit/>
id:44 %not(//*[@name="shouldbeunstored"])
+id:44 +shouldbeunstored:hi %//*[@numFound="1"]
<delete><id>44</id></delete>
<add><doc><field name="id">44</field><field name="shouldbeunindexed">hi</field></doc></add>
<commit/>
id:44 %//*[@name="shouldbeunindexed"]
# this should result in an error... how to check for that?
#+id:44 +shouldbeunindexed:hi %//*[@numFound="0"]
#test spaces between XML elements because that can introduce extra XML events that
#can mess up parsing (and it has in the past)
<delete> <id>44</id> </delete>
<add> <doc> <field name="id">44</field> <field name="shouldbestored">hi</field> </doc> </add>
<commit />
#test adding multiple docs per add command
<delete><query>id:[0 TO 99]</query></delete>
<add><doc><field name="id">1</field></doc><doc><field name="id">2</field></doc></add>
<commit/>
id:[0 TO 99] %//*[@numFound="2"]
#test synonym filter
<delete><query>id:[10 TO 100]</query></delete>
<add><doc><field name="id">10</field><field name="syn">a</field></doc></add>
<add><doc><field name="id">11</field><field name="syn">b</field></doc></add>
<add><doc><field name="id">12</field><field name="syn">c</field></doc></add>
<add><doc><field name="id">13</field><field name="syn">foo</field></doc></add>
<commit/>
id:10 AND syn:a %//*[@numFound="1"]
id:10 AND syn:aa %//*[@numFound="1"]
id:11 AND syn:b %//*[@numFound="1"]
id:11 AND syn:b1 %//*[@numFound="1"]
id:11 AND syn:b2 %//*[@numFound="1"]
id:12 AND syn:c %//*[@numFound="1"]
id:12 AND syn:c1 %//*[@numFound="1"]
id:12 AND syn:c2 %//*[@numFound="1"]
id:13 AND syn:foo %//*[@numFound="1"]
id:13 AND syn:bar %//*[@numFound="1"]
id:13 AND syn:baz %//*[@numFound="1"]
#test position increment gaps between field values
<delete><id>44</id></delete>
<delete><id>45</id></delete>
<add><doc><field name="id">44</field><field name="textgap">aa bb cc</field><field name="textgap">dd ee ff</field></doc></add>
<add><doc><field name="id">45</field><field name="text">aa bb cc</field><field name="text">dd ee ff</field></doc></add>
<commit/>
+id:44 +textgap:"aa bb cc" %//*[@numFound="1"]
+id:44 +textgap:"dd ee ff" %//*[@numFound="1"]
+id:44 +textgap:"cc dd" %//*[@numFound="0"]
+id:44 +textgap:"cc dd"~100 %//*[@numFound="1"]
+id:44 +textgap:"bb cc dd ee"~90 %//*[@numFound="0"]
+id:44 +textgap:"bb cc dd ee"~100 %//*[@numFound="1"]
+id:45 +text:"cc dd" %//*[@numFound="1"]
#trigger output of custom value test
values %%qt=test

View File

@ -1,16 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cats
ridding

View File

@ -1,363 +0,0 @@
<?xml version="1.0" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- The Solr schema file. This file should be named "schema.xml" and
should be located where the classloader for the Solr webapp can find it.
$Id$
$Source: /cvs/main/searching/solr-configs/test/WEB-INF/classes/schema.xml,v $
$Name: $
-->
<schema name="test" version="1.0">
<types>
<!-- field type definitions... note that the "name" attribute is
just a label to be used by field definitions. The "class"
attribute and any other attributes determine the real type and
behavior of the fieldtype.
-->
<!-- numeric field types that store and index the text
value verbatim (and hence don't sort correctly or support range queries.)
These are provided more for backward compatability, allowing one
to create a schema that matches an existing lucene index.
-->
<fieldtype name="integer" class="solr.IntField"/>
<fieldtype name="long" class="solr.LongField"/>
<fieldtype name="float" class="solr.FloatField"/>
<fieldtype name="double" class="solr.DoubleField"/>
<!-- numeric field types that manipulate the value into
a string value that isn't human readable in it's internal form,
but sorts correctly and supports range queries.
If sortMissingLast="true" then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order.
If sortMissingFirst="true" then a sort on this field will cause documents
without the field to come before documents with the field,
regardless of the requested sort order.
If sortMissingLast="false" and sortMissingFirst="false" (the default),
then default lucene sorting will be used which places docs without the field
first in an ascending sort and last in a descending sort.
-->
<fieldtype name="sint" class="solr.SortableIntField" sortMissingLast="true"/>
<fieldtype name="slong" class="solr.SortableLongField" sortMissingLast="true"/>
<fieldtype name="sfloat" class="solr.SortableFloatField" sortMissingLast="true"/>
<fieldtype name="sdouble" class="solr.SortableDoubleField" sortMissingLast="true"/>
<!-- bcd versions of sortable numeric type may provide smaller
storage space and support very large numbers.
-->
<fieldtype name="bcdint" class="solr.BCDIntField" sortMissingLast="true"/>
<fieldtype name="bcdlong" class="solr.BCDLongField" sortMissingLast="true"/>
<fieldtype name="bcdstr" class="solr.BCDStrField" sortMissingLast="true"/>
<fieldtype name="boolean" class="solr.BoolField" sortMissingLast="true"/>
<fieldtype name="string" class="solr.StrField" sortMissingLast="true"/>
<!-- format for date is 1995-12-31T23:59:59.999Z and only the fractional
seconds part (.999) is optional.
-->
<fieldtype name="date" class="solr.DateField" sortMissingLast="true"/>
<!-- solr.TextField allows the specification of custom
text analyzers specified as a tokenizer and a list
of token filters.
-->
<fieldtype name="text" class="solr.TextField">
<analyzer>
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.StandardFilterFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.StopFilterFactory"/>
<!-- lucene PorterStemFilterFactory deprecated
<filter class="solr.PorterStemFilterFactory"/>
-->
<filter class="solr.EnglishPorterFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="nametext" class="solr.TextField">
<analyzer class="org.apache.lucene.analysis.WhitespaceAnalyzer"/>
</fieldtype>
<fieldtype name="teststop" class="solr.TextField">
<analyzer>
<tokenizer class="solr.LowerCaseTokenizerFactory"/>
<filter class="solr.StandardFilterFactory"/>
<filter class="solr.StopFilterFactory" words="stopwords.txt"/>
</analyzer>
</fieldtype>
<!-- fieldtypes in this section isolate tokenizers and tokenfilters for testing -->
<fieldtype name="lowertok" class="solr.TextField">
<analyzer><tokenizer class="solr.LowerCaseTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="standardtok" class="solr.TextField">
<analyzer><tokenizer class="solr.StandardTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="lettertok" class="solr.TextField">
<analyzer><tokenizer class="solr.LetterTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="whitetok" class="solr.TextField">
<analyzer><tokenizer class="solr.WhitespaceTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="HTMLstandardtok" class="solr.TextField">
<analyzer><tokenizer class="solr.HTMLStripStandardTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="HTMLwhitetok" class="solr.TextField">
<analyzer><tokenizer class="solr.HTMLStripWhitespaceTokenizerFactory"/></analyzer>
</fieldtype>
<fieldtype name="standardtokfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.StandardFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="standardfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.StandardFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="lowerfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="porterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldtype>
<!-- fieldtype name="snowballfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.SnowballPorterFilterFactory"/>
</analyzer>
</fieldtype -->
<fieldtype name="engporterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.EnglishPorterFilterFactory"/>
</analyzer>
</fieldtype>
<fieldtype name="custengporterfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.EnglishPorterFilterFactory" protected="protwords.txt"/>
</analyzer>
</fieldtype>
<fieldtype name="stopfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.StopFilterFactory" ignoreCase="true"/>
</analyzer>
</fieldtype>
<fieldtype name="custstopfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.StopFilterFactory" words="stopwords.txt"/>
</analyzer>
</fieldtype>
<fieldtype name="lengthfilt" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.LengthFilterFactory" min="2" max="5"/>
</analyzer>
</fieldtype>
<fieldtype name="subword" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.StopFilterFactory"/>
<filter class="solr.EnglishPorterFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.StopFilterFactory"/>
<filter class="solr.EnglishPorterFilterFactory"/>
</analyzer>
</fieldtype>
<!-- more flexible in matching skus, but more chance of a false match -->
<fieldtype name="skutype1" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
<!-- less flexible in matching skus, but less chance of a false match -->
<fieldtype name="skutype2" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
<!-- less flexible in matching skus, but less chance of a false match -->
<fieldtype name="syn" class="solr.TextField">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter name="syn" class="solr.SynonymFilterFactory" synonyms="synonyms.txt"/>
</analyzer>
</fieldtype>
<fieldtype name="unstored" class="solr.StrField" indexed="true" stored="false"/>
<fieldtype name="textgap" class="solr.TextField" multiValued="true" positionIncrementGap="100">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldtype>
</types>
<fields>
<field name="id" type="integer" indexed="true" stored="true"/>
<field name="name" type="nametext" indexed="true" stored="true"/>
<field name="text" type="text" indexed="true" stored="false"/>
<field name="subject" type="text" indexed="true" stored="true"/>
<field name="title" type="nametext" indexed="true" stored="true"/>
<field name="weight" type="float" indexed="true" stored="true"/>
<field name="bday" type="date" indexed="true" stored="true"/>
<field name="title_stemmed" type="text" indexed="true" stored="false"/>
<field name="title_lettertok" type="lettertok" indexed="true" stored="false"/>
<field name="syn" type="syn" indexed="true" stored="true"/>
<!-- to test property inheritance and overriding -->
<field name="shouldbeunstored" type="unstored" />
<field name="shouldbestored" type="unstored" stored="true"/>
<field name="shouldbeunindexed" type="unstored" indexed="false" stored="true"/>
<!-- test different combinations of indexed and stored -->
<field name="bind" type="boolean" indexed="true" stored="false"/>
<field name="bsto" type="boolean" indexed="false" stored="true"/>
<field name="bindsto" type="boolean" indexed="true" stored="true"/>
<field name="isto" type="integer" indexed="false" stored="true"/>
<field name="iind" type="integer" indexed="true" stored="false"/>
<field name="ssto" type="string" indexed="false" stored="true"/>
<field name="sind" type="string" indexed="true" stored="false"/>
<field name="sindsto" type="string" indexed="true" stored="true"/>
<!-- fields to test individual tokenizers and tokenfilters -->
<field name="teststop" type="teststop" indexed="true" stored="true"/>
<field name="lowertok" type="lowertok" indexed="true" stored="true"/>
<field name="standardtok" type="standardtok" indexed="true" stored="true"/>
<field name="HTMLstandardtok" type="HTMLstandardtok" indexed="true" stored="true"/>
<field name="lettertok" type="lettertok" indexed="true" stored="true"/>
<field name="whitetok" type="whitetok" indexed="true" stored="true"/>
<field name="HTMLwhitetok" type="HTMLwhitetok" indexed="true" stored="true"/>
<field name="standardtokfilt" type="standardtokfilt" indexed="true" stored="true"/>
<field name="standardfilt" type="standardfilt" indexed="true" stored="true"/>
<field name="lowerfilt" type="lowerfilt" indexed="true" stored="true"/>
<field name="porterfilt" type="porterfilt" indexed="true" stored="true"/>
<field name="engporterfilt" type="engporterfilt" indexed="true" stored="true"/>
<field name="custengporterfilt" type="custengporterfilt" indexed="true" stored="true"/>
<field name="stopfilt" type="stopfilt" indexed="true" stored="true"/>
<field name="custstopfilt" type="custstopfilt" indexed="true" stored="true"/>
<field name="lengthfilt" type="lengthfilt" indexed="true" stored="true"/>
<field name="subword" type="subword" indexed="true" stored="true"/>
<field name="sku1" type="skutype1" indexed="true" stored="true"/>
<field name="sku2" type="skutype2" indexed="true" stored="true"/>
<field name="textgap" type="textgap" indexed="true" stored="true"/>
<!-- Dynamic field definitions. If a field name is not found, dynamicFields
will be used if the name matches any of the patterns.
RESTRICTION: the glob-like pattern in the name attribute must have
a "*" only at the start or the end.
EXAMPLE: name="*_i" will match any field ending in _i (like myid_i, z_i)
Longer patterns will be matched first. if equal size patterns
both match, the first appearing in the schema will be used.
-->
<dynamicField name="*_i" type="sint" indexed="true" stored="true"/>
<dynamicField name="*_s" type="string" indexed="true" stored="true"/>
<dynamicField name="*_l" type="slong" indexed="true" stored="true"/>
<dynamicField name="*_t" type="text" indexed="true" stored="true"/>
<dynamicField name="*_b" type="boolean" indexed="true" stored="true"/>
<dynamicField name="*_f" type="sfloat" indexed="true" stored="true"/>
<dynamicField name="*_d" type="sdouble" indexed="true" stored="true"/>
<dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
<dynamicField name="*_bcd" type="bcdstr" indexed="true" stored="true"/>
<dynamicField name="*_sI" type="string" indexed="true" stored="false"/>
<dynamicField name="*_sS" type="string" indexed="false" stored="true"/>
<dynamicField name="t_*" type="text" indexed="true" stored="true"/>
<!-- for testing to ensure that longer patterns are matched first -->
<dynamicField name="*aa" type="string" indexed="true" stored="true"/>
<dynamicField name="*aaa" type="integer" indexed="false" stored="true"/>
</fields>
<defaultSearchField>text</defaultSearchField>
<uniqueKey>id</uniqueKey>
<!-- copyField commands copy one field to another at the time a document
is added to the index. It's used either to index the same field different
ways, or to add multiple fields to the same field for easier/faster searching.
-->
<copyField source="title" dest="title_stemmed"/>
<copyField source="title" dest="title_lettertok"/>
<copyField source="title" dest="text"/>
<copyField source="subject" dest="text"/>
<!-- Similarity is the scoring routine for each document vs a query.
A custom similarity may be specified here, but the default is fine
for most applications.
-->
<!-- <similarity class="org.apache.lucene.search.DefaultSimilarity"/> -->
</schema>

View File

@ -1,208 +0,0 @@
<?xml version="1.0" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- $Id$
$Source$
$Name$
-->
<config>
<!-- Used to specify an alternate directory to hold all index data.
It defaults to "index" if not present, and should probably
not be changed if replication is in use. -->
<!--
<indexDir>index</indexDir>
-->
<indexDefaults>
<!-- Values here affect all index writers and act as a default
unless overridden. -->
<useCompoundFile>false</useCompoundFile>
<mergeFactor>10</mergeFactor>
<maxBufferedDocs>1000</maxBufferedDocs>
<maxMergeDocs>2147483647</maxMergeDocs>
<maxFieldLength>10000</maxFieldLength>
<!-- these are global... can't currently override per index -->
<writeLockTimeout>1000</writeLockTimeout>
<commitLockTimeout>10000</commitLockTimeout>
</indexDefaults>
<mainIndex>
<!-- lucene options specific to the main on-disk lucene index -->
<useCompoundFile>false</useCompoundFile>
<mergeFactor>10</mergeFactor>
<maxBufferedDocs>1000</maxBufferedDocs>
<maxMergeDocs>2147483647</maxMergeDocs>
<maxFieldLength>10000</maxFieldLength>
<unlockOnStartup>true</unlockOnStartup>
</mainIndex>
<updateHandler class="solr.DirectUpdateHandler2">
<!-- autocommit pending docs if certain criteria are met -->
<autocommit> <!-- NOTE: autocommit not implemented yet -->
<maxDocs>10000</maxDocs>
<maxSec>3600</maxSec>
</autocommit>
<!-- represents a lower bound on the frequency that commits may
occur (in seconds). NOTE: not yet implemented
-->
<commitIntervalLowerBound>0</commitIntervalLowerBound>
<!-- The RunExecutableListener executes an external command.
exe - the name of the executable to run
dir - dir to use as the current working directory. default="."
wait - the calling thread waits until the executable returns. default="true"
args - the arguments to pass to the program. default=nothing
env - environment variables to set. default=nothing
-->
<!-- A postCommit event is fired after every commit
<listener event="postCommit" class="solr.RunExecutableListener">
<str name="exe">/var/opt/resin3/__PORT__/scripts/solr/snapshooter</str>
<str name="dir">/var/opt/resin3/__PORT__</str>
<bool name="wait">true</bool>
<arr name="args"> <str>arg1</str> <str>arg2</str> </arr>
<arr name="env"> <str>MYVAR=val1</str> </arr>
</listener>
-->
</updateHandler>
<query>
<!-- Maximum number of clauses in a boolean query... can affect
range or wildcard queries that expand to big boolean
queries. An exception is thrown if exceeded.
-->
<maxBooleanClauses>1024</maxBooleanClauses>
<!-- Cache specification for Filters or DocSets - unordered set of *all* documents
that match a particular query.
-->
<filterCache
class="solr.search.LRUCache"
size="512"
initialSize="512"
autowarmCount="256"/>
<queryResultCache
class="solr.search.LRUCache"
size="512"
initialSize="512"
autowarmCount="1024"/>
<documentCache
class="solr.search.LRUCache"
size="512"
initialSize="512"
autowarmCount="0"/>
<!--
<cache name="myUserCache"
class="solr.search.LRUCache"
size="4096"
initialSize="1024"
autowarmCount="1024"
regenerator="MyRegenerator"
/>
-->
<useFilterForSortedQuery>true</useFilterForSortedQuery>
<queryResultWindowSize>10</queryResultWindowSize>
<!-- set maxSize low to exercise both types of sets -->
<HashDocSet maxSize="3" loadFactor="0.75"/>
<!-- boolToFilterOptimizer converts boolean clauses with zero boost
into cached filters if the number of docs selected by the clause exceeds
the threshold (represented as a fraction of the total index)
-->
<boolTofilterOptimizer enabled="true" cacheSize="32" threshold=".05"/>
<!-- a newSearcher event is fired whenever a new searcher is being prepared
and there is a current searcher handling requests (aka registered). -->
<!-- QuerySenderListener takes an array of NamedList and executes a
local query request for each NamedList in sequence. -->
<!--
<listener event="newSearcher" class="solr.QuerySenderListener">
<arr name="queries">
<lst> <str name="q">solr</str> <str name="start">0</str> <str name="rows">10</str> </lst>
<lst> <str name="q">rocks</str> <str name="start">0</str> <str name="rows">10</str> </lst>
</arr>
</listener>
-->
<!-- a firstSearcher event is fired whenever a new searcher is being
prepared but there is no current registered searcher to handle
requests or to gain prewarming data from. -->
<!--
<listener event="firstSearcher" class="solr.QuerySenderListener">
<arr name="queries">
<lst> <str name="q">fast_warm</str> <str name="start">0</str> <str name="rows">10</str> </lst>
</arr>
</listener>
-->
</query>
<!-- An alternate set representation that uses an integer hash to store filters (sets of docids).
If the set cardinality <= maxSize elements, then HashDocSet will be used instead of the bitset
based HashBitset. -->
<!-- requestHandler plugins... incoming queries will be dispatched to the
correct handler based on the qt (query type) param matching the
name of registered handlers.
The "standard" request handler is the default and will be used if qt
is not specified in the request.
-->
<requestHandler name="standard" class="solr.StandardRequestHandler" />
<requestHandler name="old" class="solr.tst.OldRequestHandler" >
<int name="myparam">1000</int>
<float name="ratio">1.4142135</float>
<arr name="myarr"><int>1</int><int>2</int></arr>
<str>foo</str>
</requestHandler>
<requestHandler name="oldagain" class="solr.tst.OldRequestHandler" >
<lst name="lst1"> <str name="op">sqrt</str> <int name="val">2</int> </lst>
<lst name="lst2"> <str name="op">log</str> <float name="val">10</float> </lst>
</requestHandler>
<requestHandler name="test" class="solr.tst.TestRequestHandler" />
<admin>
<defaultQuery>solr</defaultQuery>
<gettableFiles>solrconfig.xml scheam.xml admin-extra.html</gettableFiles>
</admin>
</config>

View File

@ -1,16 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
stopworda
stopwordb

View File

@ -1,20 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
a => aa
b => b1 b2
c => c1,c2
a\=>a => b\=>b
a\,a => b\,b
foo,bar,baz

View File

@ -1,398 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.solr.core.SolrCore;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.request.*;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathFactory;
import javax.xml.xpath.XPathConstants;
import java.io.*;
import java.util.*;
import java.util.logging.Logger;
import java.util.logging.Level;
import java.util.logging.Handler;
import java.util.logging.ConsoleHandler;
import org.w3c.dom.Document;
/**
* User: Yonik Seeley
* Date: Aug 16, 2004
*/
public class SolrTest extends Thread {
static SolrCore core;
static String[] requestDict;
static String[] updateDict;
static String[] testDict;
static List<Integer> testDictLineno;
static List<Integer> lineno;
public static String[] readDict(String filename) throws IOException {
BufferedReader br = new BufferedReader(new FileReader(filename));
ArrayList lst = new ArrayList(1024);
lineno = new ArrayList<Integer>(1024);
String line;
int lineNum=0;
while ((line = br.readLine())!=null) {
lineNum++;
if (line.length() <= 1) continue;
lst.add(line);
lineno.add(lineNum);
}
br.close();
return (String[]) lst.toArray(new String[lst.size()]);
}
public static boolean verbose=false;
static boolean doValidate=true;
static int countdown;
static synchronized boolean runAgain() {
return countdown-- > 0;
}
// statistics per client
int numReq=0;
int numErr=0;
int numBodyChars=0;
boolean isWriter=false;
boolean sequenceTest=false;
public void run() {
if (sequenceTest) {
try {
for (int i=0; i<testDict.length; i++) {
String s = testDict[i];
int lineno = testDictLineno.get(i);
String req;
String test=null;
String params=null;
char[] resp;
if (s.length()<2 || s.startsWith("#")) continue; // comment
System.out.println("LINE=" + lineno + " EXECUTING " + s);
int endQuery = s.length();
int startParams = s.indexOf("%%");
int endParams = s.length();
int endTests = s.length();
if (startParams > 0) {
endQuery = startParams;
endParams = s.length();
}
int startTests = s.indexOf('%', startParams+2);
if (startTests > 0) {
if (endQuery == s.length()) endQuery = startTests;
endParams = startTests;
}
req = s.substring(0,endQuery).trim();
if (startParams > 0) params = s.substring(startParams+2,endParams).trim();
if (startTests > 0) test = s.substring(startTests+1,endTests).trim();
// System.out.println("###req=" + req);
// System.out.println("###params=" + params);
// System.out.println("###tests=" + test);
if (req.startsWith("<")) {
resp = doUpdate(req);
} else {
resp = doReq(req,params);
}
if (doValidate) {
validate(req,test,resp);
} else {
System.out.println("#### no validation performed");
}
}
} catch (RuntimeException e) {
numErr++;
throw(e);
}
System.out.println(">>>>>>>>>>>>>>>>>>>>>>>> SUCCESS <<<<<<<<<<<<<<<<<<<<<<<<<<");
}
else {
while(runAgain()) {
if (isWriter) doUpdate(updateDict[(int)(Math.random()*updateDict.length)]);
else doReq(requestDict[(int)(Math.random()*requestDict.length)], null);
}
}
}
private DocumentBuilder builder;
private XPath xpath = XPathFactory.newInstance().newXPath();
{
try {
builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
} catch (ParserConfigurationException e) {
e.printStackTrace();
}
}
private void validate(String req, String test, char[] resp) {
if (test==null || test.length()==0) return;
Document document=null;
try {
// the resp[] contains a declaration that it is UTF-8, so we
// need to change it to that for the XML parser.
document = builder.parse(new ByteArrayInputStream(new String(resp).getBytes("UTF-8")));
// document = builder.parse(new String(resp));
} catch (Exception e) {
System.out.println("ERROR parsing '" + new String(resp) + "'");
throw new RuntimeException(e);
}
String[] tests = test.split("%");
for (String xp : tests) {
Boolean bool=false;
xp=xp.trim();
try {
bool = (Boolean) xpath.evaluate(xp, document, XPathConstants.BOOLEAN);
} catch (Exception e) {
System.out.println("##################ERROR EVALUATING XPATH '" + xp + "'");
throw new RuntimeException(e);
}
if (!bool) {
System.out.println("##################ERROR");
System.out.println("req="+req);
System.out.println("xp="+xp);
throw new RuntimeException("test failed.");
}
}
}
public char[] doUpdate(String req) {
try {
// String lucene=updateDict[(int)(Math.random()*updateDict.length)];
String lucene=req;
StringReader ureq = new StringReader(lucene);
CharArrayWriter writer = new CharArrayWriter(32000);
core.update(ureq, writer);
if (verbose) System.out.println("UPDATE RESPONSE:'" + writer + "'");
// if (verbose) System.out.println("BODY chars read:" + writer.size());
this.numBodyChars+=writer.size();
this.numReq++;
return writer.toCharArray();
} catch (Exception e) {
this.numErr++;
e.printStackTrace();
}
return null;
}
static XMLResponseWriter xmlwriter = new XMLResponseWriter();
static SolrRequestHandler handler =
// new OldRequestHandler();
new StandardRequestHandler();
static String qargs = null; // default query arguments
public char[] doReq(String req, String params) {
int start=0;
int limit=10;
String handler="standard";
//handler="test";
Map args = new HashMap();
args.put("indent", "on");
args.put("debugQuery", "on");
args.put("version", "2.0");
if (qargs != null) {
if (params==null) params=qargs;
else params = qargs + '&' + params;
}
if (params != null) {
String[] plist = params.split("&");
for (String decl : plist) {
String[] nv = decl.split("=");
if (nv.length==1) {
nv = new String[] { nv[0], "" };
}
if (nv[0].equals("start")) {
start=Integer.parseInt(nv[1]);
}
else if (nv[0].equals("limit")) {
limit=Integer.parseInt(nv[1]);
}
else if (nv[0].equals("qt")) {
handler = nv[1];
} else {
args.put(nv[0], nv[1]);
}
}
}
try {
// String lucene=requestDict[(int)(Math.random()*requestDict.length)];
String lucene=req;
CharArrayWriter writer = new CharArrayWriter(32000);
System.out.println("start="+start+" limit="+limit+" handler="+handler);
LocalSolrQueryRequest qreq = new LocalSolrQueryRequest(core,lucene,handler,start,limit,args);
SolrQueryResponse qrsp = new SolrQueryResponse();
try {
core.execute(qreq,qrsp);
if (qrsp.getException() != null) throw qrsp.getException();
// handler.handleRequest(qreq,qrsp);
xmlwriter.write(writer,qreq,qrsp);
} finally {
qreq.close();
}
if (verbose) System.out.println("GOT:'" + writer + "'");
if (verbose) System.out.println("BODY chars read:" + writer.size());
this.numBodyChars+=writer.size();
this.numReq++;
return writer.toCharArray();
} catch (Exception e) {
this.numErr++;
e.printStackTrace();
}
return null;
}
public static void main(String[] args) throws Exception {
int readers=1;
int requests=1;
int writers=0;
Logger log = Logger.getLogger("org.apache.solr");
log.setUseParentHandlers(false);
log.setLevel(Level.FINEST);
Handler handler = new ConsoleHandler();
handler.setLevel(Level.FINEST);
log.addHandler(handler);
String filename="dict.txt";
String updateFilename="update_dict.txt";
String dataDir =null;
String schemaFile=null;
String testFile=null;
boolean b_numUpdates=false; boolean b_writers=false;
int i=0; String arg;
while (i < args.length && args[i].startsWith("-")) {
arg = args[i++];
if (arg.equals("-verbose")) {
verbose=true;
} else if (arg.equals("-dict")) {
filename=args[i++];
} else if (arg.equals("-data")) {
dataDir =args[i++];
} else if (arg.equals("-readers")) {
readers=Integer.parseInt(args[i++]);
} else if (arg.equals("-numRequests")) {
requests=Integer.parseInt(args[i++]);
} else if (arg.equals("-writers")) {
writers=Integer.parseInt(args[i++]);
b_writers=true;
} else if (arg.equals("-schema")) {
schemaFile=args[i++];
} else if (arg.equals("-test")) {
testFile=args[i++];
} else if (arg.equals("-noValidate")) {
doValidate=false;
} else if (arg.equals("-qargs")) {
qargs=args[i++];
} else {
System.out.println("Unknown option: " + arg);
return;
}
}
try {
IndexSchema schema = schemaFile==null ? null : new IndexSchema(schemaFile);
countdown = requests;
core=new SolrCore(dataDir,schema);
try {
if (testFile != null) {
testDict = readDict(testFile);
testDictLineno = lineno;
} else {
if (readers > 0) requestDict = readDict(filename);
if (writers > 0) updateDict = readDict(updateFilename);
}
} catch (IOException e) {
e.printStackTrace();
System.out.println("Can't read "+filename);
return;
}
SolrTest[] clients = new SolrTest[readers+writers];
for (i=0; i<readers; i++) {
clients[i] = new SolrTest();
if (testFile != null) clients[i].sequenceTest=true;
clients[i].start();
}
for (i=readers; i<readers+writers; i++) {
clients[i] = new SolrTest();
clients[i].isWriter = true;
clients[i].start();
}
for (i=0; i<readers; i++) {
clients[i].join();
}
for (i=readers; i<readers+writers; i++) {
clients[i].join();
}
core.close();
core=null;
if (testFile!=null) {
if (clients[0].numErr == 0) {
System.out.println(">>>>>>>>>>>>>>>>>>>>>>>> SUCCESS <<<<<<<<<<<<<<<<<<<<<<<<<<");
} else {
System.exit(1);
}
}
} catch (Throwable e) {
if (core!=null) {try{core.close();} catch (Throwable th){}}
e.printStackTrace();
System.exit(1);
}
}
}

View File

@ -1,66 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<delete><query>id:[* TO *]</query></delete>
<optimize/>
<delete><query>id[0 TO 9]</query></delete>
<commit/>
<add><doc><field name="id">3</field></doc></add>
<add><doc><field name="id">1</field></doc></add>
<add><doc><field name="id">7</field></doc></add>
<add><doc><field name="id">0</field></doc></add>
<add><doc><field name="id">5</field></doc></add>
<commit/>
_val_:"linear(id,2,3)"
+id:[ 0 TO 5 ] +_val_:"linear(id,2,3)"^0.1
+id:[ 0 TO 5 ] +_val_:"linear(rord(id),2,3)"^0.1
+id:[ 0 TO 5 ] +_val_:"recip(rord(id),2,3,4)"^0.1
+id:[ 0 TO 5 ] +_val_:"linear(linear(rord(id),6,5),2,3)"^0.1
#<delete><query>id:[0 TO 9]</query></delete>
#<commit/>
<delete><query>weight:[* TO *]</query></delete>
<commit/>
<add><doc><field name="id">10</field><field name="weight">3</field></doc></add>
<add><doc><field name="id">11</field><field name="weight">1</field></doc></add>
<add><doc><field name="id">12</field><field name="weight">7</field></doc></add>
<add><doc><field name="id">13</field><field name="weight">0</field></doc></add>
<add><doc><field name="id">14</field><field name="weight">5</field></doc></add>
<commit/>
+id:[10 TO 14] +_val_:weight^2
+id:[10 TO 14] +_val_:"ord(weight)"^2
+id:[10 TO 14] +_val_:"rord(weight)"^2
#+id:[10 TO 14] +weight:_int_^2
#+id:[10 TO 14] +weight:_ord_^2
#+id:[10 TO 14] +weight:_rord_^2
<add><doc><field name="id">10</field><field name="q_i">2</field></doc></add>
<add><doc><field name="id">11</field><field name="q_f">3.14159</field></doc></add>
<add><doc><field name="id">12</field><field name="q_l">900</field></doc></add>
<add><doc><field name="id">13</field><field name="q_d">.1</field></doc></add>
<add><doc><field name="id">14</field><field name="q_dt">2005-01-01T01:01:01Z</field></doc></add>
<commit/>
_val_:q_i %%fl=score %//@maxScore = //doc/float[@name="score"] %//doc/float[@name="score"] = "2.0"
_val_:q_f %%fl=score %//@maxScore = //doc/float[@name="score"] %//doc/float[@name="score"] = "3.14159"
_val_:q_l %%fl=score %//@maxScore = //doc/float[@name="score"] %//doc/float[@name="score"] = "900.0"
_val_:q_d %%fl=score %//@maxScore = //doc/float[@name="score"] %//doc/float[@name="score"] = "0.1"
_val_:q_dt %%fl=score %//@maxScore = //doc/float[@name="score"] %//doc/float[@name="score"] = "1.0"