LUCENE-6732: Remove tabs in JS and XML files

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1695395 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2015-08-11 23:00:39 +00:00
parent f0654e355e
commit 1331a57e3d
117 changed files with 19777 additions and 19777 deletions

View File

@ -131,8 +131,7 @@
import org.apache.tools.ant.BuildException;
def extensions = [
'java', 'jflex', 'py', 'pl', 'g4', 'jj', 'html'
// TODO: js, xml
'java', 'jflex', 'py', 'pl', 'g4', 'jj', 'html', 'js', 'xml'
];
def invalidPatterns = [
(~$/@author\b/$) : '@author javadoc tag',
@ -170,6 +169,7 @@
include(name: '*.' + it)
}
exclude(name: '**/build/**')
exclude(name: 'build.xml') // ourselves :-)
}
}.each{ f ->
def text = f.getText('UTF-8');

View File

@ -21,7 +21,7 @@
<option name="PACKAGES_TO_USE_IMPORT_ON_DEMAND">
<value />
</option>
<option name="IMPORT_LAYOUT_TABLE">
<option name="IMPORT_LAYOUT_TABLE">
<value>
<package name="javax" withSubpackages="true" static="false" />
<package name="java" withSubpackages="true" static="false" />

View File

@ -21,12 +21,12 @@
<description>
Additional Analyzers
- common: Additional Analyzers
- common: Additional Analyzers
- icu: Analyzers that use functionality from ICU
- kuromoji: Japanese Morphological Analyzer
- morfologik: Morfologik Stemmer
- smartcn: Smart Analyzer for Simplified Chinese Text
- stempel: Algorithmic Stemmer for Polish
- kuromoji: Japanese Morphological Analyzer
- morfologik: Morfologik Stemmer
- smartcn: Smart Analyzer for Simplified Chinese Text
- stempel: Algorithmic Stemmer for Polish
- uima: UIMA Analysis module
</description>
@ -120,15 +120,15 @@
<target name="-dist-maven">
<forall-analyzers target="-dist-maven"/>
</target>
</target>
<target name="-validate-maven-dependencies">
<forall-analyzers target="-validate-maven-dependencies"/>
</target>
</target>
<target name="javadocs">
<forall-analyzers target="javadocs"/>
</target>
</target>
<target name="javadocs-index.html">
<forall-analyzers target="javadocs-index.html"/>
@ -149,7 +149,7 @@
<target name="-append-module-dependencies-properties">
<forall-analyzers target="-append-module-dependencies-properties"/>
</target>
<target name="check-forbidden-apis">
<forall-analyzers target="check-forbidden-apis"/>
</target>

View File

@ -28,7 +28,7 @@
<property name="rat.additional-includes" value="src/tools/**"/>
<import file="../analysis-module-build.xml"/>
<target name="jflex" depends="-install-jflex,clean-jflex,-jflex-StandardAnalyzer,-jflex-UAX29URLEmailTokenizer,
-jflex-wiki-tokenizer,-jflex-HTMLStripCharFilter"/>
@ -122,7 +122,7 @@
fork="true"
failonerror="true">
<classpath>
<pathelement location="${build.dir}/classes/tools"/>
<pathelement location="${build.dir}/classes/tools"/>
</classpath>
<arg value="${tld.zones}"/>
<arg value="${tld.output}"/>

View File

@ -59,7 +59,7 @@
</target>
<property name="gennorm2.src.files"
value="nfc.txt nfkc.txt nfkc_cf.txt BasicFoldings.txt DiacriticFolding.txt DingbatFolding.txt HanRadicalFolding.txt NativeDigitFolding.txt"/>
value="nfc.txt nfkc.txt nfkc_cf.txt BasicFoldings.txt DiacriticFolding.txt DingbatFolding.txt HanRadicalFolding.txt NativeDigitFolding.txt"/>
<property name="gennorm2.tmp" value="${build.dir}/gennorm2/utr30.tmp"/>
<property name="gennorm2.dst" value="${resources.dir}/org/apache/lucene/analysis/icu/utr30.nrm"/>
<target name="gennorm2" depends="gen-utr30-data-files">
@ -85,7 +85,7 @@ are part of the ICU4C package. See http://site.icu-project.org/ </echo>
<property name="rbbi.src.dir" location="src/data/uax29"/>
<property name="rbbi.dst.dir" location="${resources.dir}/org/apache/lucene/analysis/icu/segmentation"/>
<target name="genrbbi" depends="compile-tools">
<mkdir dir="${rbbi.dst.dir}"/>
<java
@ -94,8 +94,8 @@ are part of the ICU4C package. See http://site.icu-project.org/ </echo>
fork="true"
failonerror="true">
<classpath>
<path refid="icujar"/>
<pathelement location="${build.dir}/classes/tools"/>
<path refid="icujar"/>
<pathelement location="${build.dir}/classes/tools"/>
</classpath>
<assertions>
<enable package="org.apache.lucene"/>

View File

@ -29,6 +29,6 @@
<pathelement path="${analyzers-common.jar}"/>
<path refid="base.classpath"/>
</path>
<target name="compile-core" depends="jar-analyzers-common, common.compile-core"/>
</project>

View File

@ -22,7 +22,7 @@
<description>
Analysis integration with Apache UIMA
</description>
<property name="tests.userdir" value="src/test-files"/>
<!-- TODO: why is this limited to one JVM? -->
<property name="tests.jvms.override" value="1" />

View File

@ -46,9 +46,9 @@
<available file="temp/enwiki-20070527-pages-articles.xml.bz2" property="enwiki.exists"/>
<available file="temp/enwiki-20070527-pages-articles.xml" property="enwiki.expanded"/>
<available file="${working.dir}/enwiki.txt" property="enwiki.extracted"/>
<available file="temp/${top.100k.words.archive.filename}"
<available file="temp/${top.100k.words.archive.filename}"
property="top.100k.words.archive.present"/>
<available file="${working.dir}/top100k-out"
<available file="${working.dir}/top100k-out"
property="top.100k.word.files.expanded"/>
</target>
@ -137,27 +137,27 @@
<untar src="temp/mini_newsgroups.tar" dest="${working.dir}"/>
</target>
<property name="top.100k.words.archive.filename"
value="top.100k.words.de.en.fr.uk.wikipedia.2009-11.tar.bz2"/>
<property name="top.100k.words.archive.base.url"
value="http://people.apache.org/~rmuir/wikipedia"/>
<target name="get-top-100k-words-archive" unless="top.100k.words.archive.present">
<mkdir dir="temp"/>
<get src="${top.100k.words.archive.base.url}/${top.100k.words.archive.filename}"
dest="temp/${top.100k.words.archive.filename}"/>
</target>
<target name="expand-top-100k-word-files" unless="top.100k.word.files.expanded">
<mkdir dir="${working.dir}/top100k-out"/>
<untar src="temp/${top.100k.words.archive.filename}"
overwrite="true" compression="bzip2" dest="${working.dir}/top100k-out"/>
</target>
<target name="top-100k-wiki-word-files" depends="check-files">
<mkdir dir="${working.dir}"/>
<antcall target="get-top-100k-words-archive"/>
<antcall target="expand-top-100k-word-files"/>
</target>
<property name="top.100k.words.archive.filename"
value="top.100k.words.de.en.fr.uk.wikipedia.2009-11.tar.bz2"/>
<property name="top.100k.words.archive.base.url"
value="http://people.apache.org/~rmuir/wikipedia"/>
<target name="get-top-100k-words-archive" unless="top.100k.words.archive.present">
<mkdir dir="temp"/>
<get src="${top.100k.words.archive.base.url}/${top.100k.words.archive.filename}"
dest="temp/${top.100k.words.archive.filename}"/>
</target>
<target name="expand-top-100k-word-files" unless="top.100k.word.files.expanded">
<mkdir dir="${working.dir}/top100k-out"/>
<untar src="temp/${top.100k.words.archive.filename}"
overwrite="true" compression="bzip2" dest="${working.dir}/top100k-out"/>
</target>
<target name="top-100k-wiki-word-files" depends="check-files">
<mkdir dir="${working.dir}"/>
<antcall target="get-top-100k-words-archive"/>
<antcall target="expand-top-100k-word-files"/>
</target>
<target name="get-files" depends="check-files">
<mkdir dir="temp"/>
<antcall target="get-reuters"/>
@ -222,44 +222,44 @@
</java>
</target>
<property name="collation.alg.file" location="conf/collation.alg"/>
<property name="collation.output.file"
value="${working.dir}/collation.benchmark.output.txt"/>
<property name="collation.jira.output.file"
value="${working.dir}/collation.bm2jira.output.txt"/>
<path id="collation.runtime.classpath">
<path refid="run.classpath"/>
<property name="collation.alg.file" location="conf/collation.alg"/>
<property name="collation.output.file"
value="${working.dir}/collation.benchmark.output.txt"/>
<property name="collation.jira.output.file"
value="${working.dir}/collation.bm2jira.output.txt"/>
<path id="collation.runtime.classpath">
<path refid="run.classpath"/>
<pathelement path="${analyzers-icu.jar}"/>
</path>
<target name="collation" depends="compile,jar-analyzers-icu,top-100k-wiki-word-files">
<echo>Running benchmark with alg file: ${collation.alg.file}</echo>
<java fork="true" classname="org.apache.lucene.benchmark.byTask.Benchmark"
maxmemory="${task.mem}" output="${collation.output.file}">
<classpath refid="collation.runtime.classpath"/>
<arg file="${collation.alg.file}"/>
</java>
<echo>Benchmark output is in file: ${collation.output.file}</echo>
<echo>Converting to JIRA table format...</echo>
<exec executable="${perl.exe}" output="${collation.jira.output.file}" failonerror="true">
</path>
<target name="collation" depends="compile,jar-analyzers-icu,top-100k-wiki-word-files">
<echo>Running benchmark with alg file: ${collation.alg.file}</echo>
<java fork="true" classname="org.apache.lucene.benchmark.byTask.Benchmark"
maxmemory="${task.mem}" output="${collation.output.file}">
<classpath refid="collation.runtime.classpath"/>
<arg file="${collation.alg.file}"/>
</java>
<echo>Benchmark output is in file: ${collation.output.file}</echo>
<echo>Converting to JIRA table format...</echo>
<exec executable="${perl.exe}" output="${collation.jira.output.file}" failonerror="true">
<arg value="-CSD"/>
<arg value="scripts/collation.bm2jira.pl"/>
<arg value="${collation.output.file}"/>
</exec>
<echo>Benchmark output in JIRA table format is in file: ${collation.jira.output.file}</echo>
</target>
<arg value="scripts/collation.bm2jira.pl"/>
<arg value="${collation.output.file}"/>
</exec>
<echo>Benchmark output in JIRA table format is in file: ${collation.jira.output.file}</echo>
</target>
<property name="shingle.alg.file" location="conf/shingle.alg"/>
<property name="shingle.output.file"
value="${working.dir}/shingle.benchmark.output.txt"/>
<property name="shingle.jira.output.file"
value="${working.dir}/shingle.bm2jira.output.txt"/>
<path id="shingle.runtime.classpath">
<path refid="run.classpath"/>
</path>
<target name="shingle" depends="compile,get-files">
<echo>Running benchmark with alg file: ${shingle.alg.file}</echo>
<java fork="true" classname="org.apache.lucene.benchmark.byTask.Benchmark"

View File

@ -78,7 +78,7 @@
<!-- Validation here depends on compile-tools: but we want to compile modules' tools too -->
<target name="compile-tools" depends="common.compile-tools">
<modules-crawl target="compile-tools" failonerror="true"/>
<modules-crawl target="compile-tools" failonerror="true"/>
</target>
<target name="check-licenses" depends="compile-tools,resolve,load-custom-tasks" description="Validate license stuff.">
@ -97,7 +97,7 @@
<lib-versions-check-macro dir="${common.dir}/.."
centralized.versions.file="${common.dir}/ivy-versions.properties"
ivy.settings.file="${common.dir}/ivy-settings.xml"
ivy.resolution-cache.dir="${ivy.resolution-cache.dir}"
ivy.resolution-cache.dir="${ivy.resolution-cache.dir}"
common.build.dir="${common.build.dir}"
ignore.conflicts.file="${common.dir}/ivy-ignore-conflicts.properties"/>
</target>
@ -237,7 +237,7 @@
<fileset dir="site/html"/>
</copy>
</target>
<target name="javadocs-modules" description="Generate javadoc for modules classes">
<modules-crawl target="javadocs"
failonerror="true"/>
@ -281,7 +281,7 @@
<patternset refid="binary.build.dist.patterns"/>
</zipfileset>
</zip>
<make-checksums file="${dist.dir}/lucene-${version}.zip"/>
<make-checksums file="${dist.dir}/lucene-${version}.zip"/>
</target>
<!-- ================================================================== -->
@ -435,7 +435,7 @@
<modules-crawl target="-validate-maven-dependencies"/>
</sequential>
</target>
<!-- ================================================================== -->
<!-- support for signing the artifacts using gpg -->
<!-- ================================================================== -->

View File

@ -221,7 +221,7 @@
<include name="**/lib/*.jar"/>
</fileset>
</path>
<property name="changes.src.dir" location="${common.dir}/site/changes"/>
<property name="changes.target.dir" location="${common.dir}/build/docs/changes"/>
@ -555,7 +555,7 @@
</pathconvert>
<macrodef name="m2-deploy" description="Builds a Maven artifact">
<element name="artifact-attachments" optional="yes"/>
<element name="artifact-attachments" optional="yes"/>
<element name="parent-poms" optional="yes"/>
<element name="credentials" optional="yes"/>
<attribute name="pom.xml"/>
@ -613,7 +613,7 @@
</macrodef>
<macrodef name="build-manifest" description="Builds a manifest file">
<attribute name="title"/>
<attribute name="title"/>
<attribute name="implementation.title"/>
<attribute name="manifest.file" default="${manifest.file}"/>
<element name="additional-manifest-attributes" optional="true"/>
@ -666,11 +666,11 @@
</manifest>
</sequential>
</macrodef>
<macrodef name="jarify" description="Builds a JAR file">
<attribute name="basedir" default="${build.dir}/classes/java"/>
<attribute name="destfile" default="${build.dir}/${final.name}.jar"/>
<attribute name="title" default="Lucene Search Engine: ${ant.project.name}"/>
<attribute name="basedir" default="${build.dir}/classes/java"/>
<attribute name="destfile" default="${build.dir}/${final.name}.jar"/>
<attribute name="title" default="Lucene Search Engine: ${ant.project.name}"/>
<attribute name="excludes" default="**/pom.xml,**/*.iml"/>
<attribute name="metainf.source.dir" default="${common.dir}"/>
<attribute name="implementation.title" default="org.apache.lucene"/>
@ -685,7 +685,7 @@
<jarify-additional-manifest-attributes />
</additional-manifest-attributes>
</build-manifest>
<jar destfile="@{destfile}"
basedir="@{basedir}"
manifest="@{manifest.file}"
@ -703,7 +703,7 @@
<attribute name="module-src-name" default="@{name}"/>
<sequential>
<uptodate property="@{property}" targetfile="@{jarfile}">
<srcfiles dir="${common.dir}/@{module-src-name}/src/java" includes="**/*.java"/>
<srcfiles dir="${common.dir}/@{module-src-name}/src/java" includes="**/*.java"/>
</uptodate>
</sequential>
</macrodef>
@ -711,7 +711,7 @@
<property name="lucene-core.jar" value="${common.dir}/build/core/lucene-core-${version}.jar"/>
<target name="check-lucene-core-uptodate" unless="lucene-core.uptodate">
<uptodate property="lucene-core.uptodate" targetfile="${lucene-core.jar}">
<srcfiles dir="${common.dir}/core/src/java" includes="**/*.java"/>
<srcfiles dir="${common.dir}/core/src/java" includes="**/*.java"/>
</uptodate>
</target>
<target name="jar-lucene-core" unless="lucene-core.uptodate" depends="check-lucene-core-uptodate">
@ -794,12 +794,12 @@
</target>
<macrodef name="compile-test-macro" description="Compiles junit tests.">
<attribute name="srcdir"/>
<attribute name="destdir"/>
<attribute name="test.classpath"/>
<attribute name="srcdir"/>
<attribute name="destdir"/>
<attribute name="test.classpath"/>
<attribute name="javac.source" default="${javac.source}"/>
<attribute name="javac.target" default="${javac.target}"/>
<sequential>
<sequential>
<compile
srcdir="@{srcdir}"
destdir="@{destdir}"
@ -812,7 +812,7 @@
<copy todir="@{destdir}">
<fileset dir="@{srcdir}" excludes="**/*.java"/>
</copy>
</sequential>
</sequential>
</macrodef>
<target name="test-updatecache" description="Overwrite tests' timings cache for balancing." depends="install-junit4-taskdef">
@ -1626,7 +1626,7 @@ ${tests-output}/junit4-*.suites - per-JVM executed suites
<!-- TODO, this is really unintuitive how we depend on a target that does not exist -->
<target name="javadocs">
<fail message="You must redefine the javadocs task to do something!!!!!"/>
<fail message="You must redefine the javadocs task to do something!!!!!"/>
</target>
<target name="install-maven-tasks" unless="maven-tasks.uptodate" depends="ivy-availability-check,ivy-configure">
@ -1821,7 +1821,7 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
</target>
<target name="rat-sources" depends="rat-sources-typedef"
description="runs the tasks over source and test files">
description="runs the tasks over source and test files">
<!-- create a temp file for the log to go to -->
<tempfile property="rat.sources.logfile"
prefix="rat"
@ -2052,7 +2052,7 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
<macrodef name="invoke-javadoc">
<element name="sources" optional="yes"/>
<attribute name="destdir"/>
<attribute name="title" default="${Name} ${version} API"/>
<attribute name="title" default="${Name} ${version} API"/>
<attribute name="overview" default="${src.dir}/overview.html"/>
<attribute name="linksource" default="no"/>
<sequential>
@ -2085,7 +2085,7 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
description="WARNING: This API is experimental and might change in incompatible ways in the next release."/>
<tag name="lucene.internal"
description="NOTE: This API is for internal purposes only and might change in incompatible ways in the next release."/>
<link offline="true" packagelistLoc="${javadoc.dir}"/>
<link offline="true" packagelistLoc="${javadoc.dir}"/>
<link offline="true" href="${javadoc.link}" packagelistLoc="${javadoc.packagelist.dir}/java8"/>
<bottom><![CDATA[
<i>Copyright &copy; ${year} Apache Software Foundation. All Rights Reserved.</i>
@ -2104,9 +2104,9 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
})();
</script>
]]></bottom>
<sources />
<sources />
<classpath refid="javadoc.classpath"/>
<arg line="${javadoc.doclint.args}"/>
</javadoc>
@ -2429,7 +2429,7 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
public final class PegDownFilter extends ChainableReaderFilter {
@Override
public String filter(String markdownSource) {
public String filter(String markdownSource) {
PegDownProcessor processor = new PegDownProcessor(
Extensions.ABBREVIATIONS | Extensions.AUTOLINKS |
Extensions.FENCED_CODE_BLOCKS | Extensions.SMARTS
@ -2479,13 +2479,13 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
</macrodef>
<target name="regenerate"/>
<macrodef name="check-broken-links">
<attribute name="dir"/>
<sequential>
<exec dir="." executable="${python32.exe}" failonerror="true">
<!-- Tell Python not to write any bytecode cache into the filesystem: -->
<arg value="-B"/>
<!-- Tell Python not to write any bytecode cache into the filesystem: -->
<arg value="-B"/>
<arg value="${dev-tools.dir}/scripts/checkJavadocLinks.py"/>
<arg value="@{dir}"/>
</exec>
@ -2497,8 +2497,8 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
<attribute name="level" default="class"/>
<sequential>
<exec dir="." executable="${python32.exe}" failonerror="true">
<!-- Tell Python not to write any bytecode cache into the filesystem: -->
<arg value="-B"/>
<!-- Tell Python not to write any bytecode cache into the filesystem: -->
<arg value="-B"/>
<arg value="${dev-tools.dir}/scripts/checkJavaDocs.py"/>
<arg value="@{dir}"/>
<arg value="@{level}"/>

View File

@ -65,20 +65,20 @@
<target name="-dist-maven" depends="-dist-maven-src-java"/>
<macrodef name="createLevAutomaton">
<attribute name="n"/>
<sequential>
<attribute name="n"/>
<sequential>
<exec dir="src/java/org/apache/lucene/util/automaton"
executable="${python.exe}" failonerror="true">
<!-- Tell Python not to write any bytecode cache into the filesystem: -->
<arg value="-B"/>
<!-- Tell Python not to write any bytecode cache into the filesystem: -->
<arg value="-B"/>
<arg value="createLevAutomata.py"/>
<arg value="@{n}"/>
<arg value="True"/>
</exec>
<exec dir="src/java/org/apache/lucene/util/automaton"
executable="${python.exe}" failonerror="true">
<!-- Tell Python not to write any bytecode cache into the filesystem: -->
<arg value="-B"/>
<!-- Tell Python not to write any bytecode cache into the filesystem: -->
<arg value="-B"/>
<arg value="createLevAutomata.py"/>
<arg value="@{n}"/>
<arg value="False"/>

View File

@ -47,9 +47,9 @@
<links>
<link href="../analyzers-common"/>
<link href="../queryparser"/>
<link href="../queries"/>
<link href="../facet"/>
<link href="../expressions"/>
<link href="../queries"/>
<link href="../facet"/>
<link href="../expressions"/>
</links>
</invoke-module-javadoc>
</target>

View File

@ -16,34 +16,34 @@
limitations under the License.
-->
<web-app id="WebApp_ID" version="2.4" xmlns="http://java.sun.com/xml/ns/j2ee" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://java.sun.com/xml/ns/j2ee http://java.sun.com/xml/ns/j2ee/web-app_2_4.xsd">
<display-name>
LuceneXmlQueryWebDemo</display-name>
<servlet>
<description>
Servlet demonstrating XMLQueryParser</description>
<display-name>
FormBasedXmlQueryDemo</display-name>
<servlet-name>FormBasedXmlQueryDemo</servlet-name>
<servlet-class>
org.apache.lucene.xmlparser.webdemo.FormBasedXmlQueryDemo</servlet-class>
<init-param>
<description>
Name of query file held in /WEB-INF</description>
<param-name>xslFile</param-name>
<param-value>query.xsl</param-value>
</init-param>
<init-param>
<description>
Default field used in standard Lucene QueryParser used in UserQuery tag</description>
<param-name>defaultStandardQueryParserField</param-name>
<param-value>jobDescription</param-value>
</init-param>
</servlet>
<servlet-mapping>
<servlet-name>FormBasedXmlQueryDemo</servlet-name>
<url-pattern>/FormBasedXmlQueryDemo</url-pattern>
</servlet-mapping>
<welcome-file-list>
<welcome-file>index.jsp</welcome-file>
</welcome-file-list>
<display-name>
LuceneXmlQueryWebDemo</display-name>
<servlet>
<description>
Servlet demonstrating XMLQueryParser</description>
<display-name>
FormBasedXmlQueryDemo</display-name>
<servlet-name>FormBasedXmlQueryDemo</servlet-name>
<servlet-class>
org.apache.lucene.xmlparser.webdemo.FormBasedXmlQueryDemo</servlet-class>
<init-param>
<description>
Name of query file held in /WEB-INF</description>
<param-name>xslFile</param-name>
<param-value>query.xsl</param-value>
</init-param>
<init-param>
<description>
Default field used in standard Lucene QueryParser used in UserQuery tag</description>
<param-name>defaultStandardQueryParserField</param-name>
<param-value>jobDescription</param-value>
</init-param>
</servlet>
<servlet-mapping>
<servlet-name>FormBasedXmlQueryDemo</servlet-name>
<url-pattern>/FormBasedXmlQueryDemo</url-pattern>
</servlet-mapping>
<welcome-file-list>
<welcome-file>index.jsp</welcome-file>
</welcome-file-list>
</web-app>

View File

@ -63,7 +63,7 @@
<target name="javadocs" depends="compile-core,javadocs-lucene-core,check-javadocs-uptodate"
unless="javadocs-uptodate-${name}">
<invoke-module-javadoc/>
</target>
</target>
<macrodef name="invoke-module-javadoc">
<!-- additional links for dependencies to other modules -->
@ -74,7 +74,7 @@
<mkdir dir="${javadoc.dir}/${name}"/>
<invoke-javadoc
destdir="${javadoc.dir}/${name}"
title="${Name} ${version} ${name} API"
title="${Name} ${version} ${name} API"
linksource="@{linksource}">
<sources>
<link href="../core/"/>
@ -130,7 +130,7 @@
</ant>
<property name="queryparser-javadocs.uptodate" value="true"/>
</target>
<property name="join.jar" value="${common.dir}/build/join/lucene-join-${version}.jar"/>
<target name="check-join-uptodate" unless="join.uptodate">
<module-uptodate name="join" jarfile="${join.jar}" property="join.uptodate"/>
@ -138,9 +138,9 @@
<target name="jar-join" unless="join.uptodate" depends="check-join-uptodate">
<ant dir="${common.dir}/join" target="jar-core" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<property name="join.uptodate" value="true"/>
</target>
</ant>
<property name="join.uptodate" value="true"/>
</target>
<property name="join-javadoc.jar" value="${common.dir}/build/join/lucene-join-${version}-javadoc.jar"/>
<target name="check-join-javadocs-uptodate" unless="join-javadocs.uptodate">
@ -180,7 +180,7 @@
<module-uptodate name="queries" jarfile="${queries.jar}" property="queries.uptodate"/>
</target>
<target name="jar-queries" unless="queries.uptodate" depends="check-queries-uptodate">
<ant dir="${common.dir}/queries" target="jar-core" inheritAll="false">
<ant dir="${common.dir}/queries" target="jar-core" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<property name="queries.uptodate" value="true"/>
@ -268,7 +268,7 @@
<module-uptodate name="analysis/icu" jarfile="${analyzers-icu.jar}" property="analyzers-icu.uptodate"/>
</target>
<target name="jar-analyzers-icu" unless="analyzers-icu.uptodate" depends="check-analyzers-icu-uptodate">
<ant dir="${common.dir}/analysis/icu" target="jar-core" inheritAll="false">
<ant dir="${common.dir}/analysis/icu" target="jar-core" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<property name="analyzers-icu.uptodate" value="true"/>
@ -290,7 +290,7 @@
<module-uptodate name="analysis/phonetic" jarfile="${analyzers-phonetic.jar}" property="analyzers-phonetic.uptodate"/>
</target>
<target name="jar-analyzers-phonetic" unless="analyzers-phonetic.uptodate" depends="check-analyzers-phonetic-uptodate">
<ant dir="${common.dir}/analysis/phonetic" target="jar-core" inheritAll="false">
<ant dir="${common.dir}/analysis/phonetic" target="jar-core" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
</target>
@ -311,7 +311,7 @@
<module-uptodate name="analysis/smartcn" jarfile="${analyzers-smartcn.jar}" property="analyzers-smartcn.uptodate"/>
</target>
<target name="jar-analyzers-smartcn" unless="analyzers-smartcn.uptodate" depends="check-analyzers-smartcn-uptodate">
<ant dir="${common.dir}/analysis/smartcn" target="jar-core" inheritAll="false">
<ant dir="${common.dir}/analysis/smartcn" target="jar-core" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<property name="analyzers-smartcn.uptodate" value="true"/>
@ -333,7 +333,7 @@
<module-uptodate name="analysis/stempel" jarfile="${analyzers-stempel.jar}" property="analyzers-stempel.uptodate"/>
</target>
<target name="jar-analyzers-stempel" unless="analyzers-stempel.uptodate" depends="check-analyzers-stempel-uptodate">
<ant dir="${common.dir}/analysis/stempel" target="jar-core" inheritAll="false">
<ant dir="${common.dir}/analysis/stempel" target="jar-core" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<property name="analyzers-stempel.uptodate" value="true"/>
@ -355,7 +355,7 @@
<module-uptodate name="analysis/kuromoji" jarfile="${analyzers-kuromoji.jar}" property="analyzers-kuromoji.uptodate"/>
</target>
<target name="jar-analyzers-kuromoji" unless="analyzers-kuromoji.uptodate" depends="check-analyzers-kuromoji-uptodate">
<ant dir="${common.dir}/analysis/kuromoji" target="jar-core" inheritAll="false">
<ant dir="${common.dir}/analysis/kuromoji" target="jar-core" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<property name="analyzers-kuromoji.uptodate" value="true"/>
@ -491,7 +491,7 @@
<module-uptodate name="grouping" jarfile="${grouping.jar}" property="grouping.uptodate"/>
</target>
<target name="jar-grouping" unless="grouping.uptodate" depends="check-grouping-uptodate">
<ant dir="${common.dir}/grouping" target="jar-core" inheritAll="false">
<ant dir="${common.dir}/grouping" target="jar-core" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<property name="grouping.uptodate" value="true"/>
@ -557,7 +557,7 @@
<module-uptodate name="misc" jarfile="${misc.jar}" property="misc.uptodate"/>
</target>
<target name="jar-misc" unless="misc.uptodate" depends="check-misc-uptodate">
<ant dir="${common.dir}/misc" target="jar-core" inheritAll="false">
<ant dir="${common.dir}/misc" target="jar-core" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<property name="misc.uptodate" value="true"/>
@ -579,7 +579,7 @@
<module-uptodate name="sandbox" jarfile="${sandbox.jar}" property="sandbox.uptodate"/>
</target>
<target name="jar-sandbox" unless="sandbox.uptodate" depends="check-sandbox-uptodate">
<ant dir="${common.dir}/sandbox" target="jar-core" inheritAll="false">
<ant dir="${common.dir}/sandbox" target="jar-core" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<property name="sandbox.uptodate" value="true"/>
@ -601,7 +601,7 @@
<module-uptodate name="spatial3d" jarfile="${spatial3d.jar}" property="spatial3d.uptodate"/>
</target>
<target name="jar-spatial3d" unless="spatial3d.uptodate" depends="check-spatial3d-uptodate">
<ant dir="${common.dir}/spatial3d" target="jar-core" inheritAll="false">
<ant dir="${common.dir}/spatial3d" target="jar-core" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<property name="spatial3d.uptodate" value="true"/>
@ -623,7 +623,7 @@
<module-uptodate name="spatial" jarfile="${spatial.jar}" property="spatial.uptodate"/>
</target>
<target name="jar-spatial" unless="spatial.uptodate" depends="check-spatial-uptodate">
<ant dir="${common.dir}/spatial" target="jar-core" inheritAll="false">
<ant dir="${common.dir}/spatial" target="jar-core" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<property name="spatial.uptodate" value="true"/>
@ -645,7 +645,7 @@
<module-uptodate name="suggest" jarfile="${suggest.jar}" property="suggest.uptodate"/>
</target>
<target name="jar-suggest" unless="suggest.uptodate" depends="check-suggest-uptodate">
<ant dir="${common.dir}/suggest" target="jar-core" inheritAll="false">
<ant dir="${common.dir}/suggest" target="jar-core" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<property name="suggest.uptodate" value="true"/>

View File

@ -63,19 +63,19 @@
<!-- Change the incorrect public ctors for QueryParser to be protected instead -->
<replaceregexp file="src/java/org/apache/lucene/queryparser/classic/QueryParser.java"
byline="true"
match="public QueryParser\(CharStream "
replace="protected QueryParser(CharStream "/>
byline="true"
match="public QueryParser\(CharStream "
replace="protected QueryParser(CharStream "/>
<replaceregexp file="src/java/org/apache/lucene/queryparser/classic/QueryParser.java"
byline="true"
match="public QueryParser\(QueryParserTokenManager "
replace="protected QueryParser(QueryParserTokenManager "/>
byline="true"
match="public QueryParser\(QueryParserTokenManager "
replace="protected QueryParser(QueryParserTokenManager "/>
<generalReplaces dir="src/java/org/apache/lucene/queryparser/classic"/>
</sequential>
</target>
<target name="javacc-surround" depends="resolve-javacc" description="generate surround query parser">
<invoke-javacc target="src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.jj"
<invoke-javacc target="src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.jj"
outputDir="src/java/org/apache/lucene/queryparser/surround/parser"
/>
<generalReplaces dir="src/java/org/apache/lucene/queryparser/surround/parser"/>

View File

@ -16,13 +16,13 @@
limitations under the License.
-->
<BooleanQuery fieldName="contents">
<Clause occurs="should">
<TermQuery>merger</TermQuery>
</Clause>
<Clause occurs="mustnot">
<TermQuery>sumitomo</TermQuery>
</Clause>
<Clause occurs="must">
<TermQuery>bank</TermQuery>
</Clause>
<Clause occurs="should">
<TermQuery>merger</TermQuery>
</Clause>
<Clause occurs="mustnot">
<TermQuery>sumitomo</TermQuery>
</Clause>
<Clause occurs="must">
<TermQuery>bank</TermQuery>
</Clause>
</BooleanQuery>

View File

@ -16,18 +16,18 @@
limitations under the License.
-->
<BoostingQuery>
<!-- Find docs about banks, preferably merger info and preferably not "World bank" -->
<Query>
<BooleanQuery fieldName="contents">
<Clause occurs="should">
<TermQuery>merger</TermQuery>
</Clause>
<Clause occurs="must">
<TermQuery>bank</TermQuery>
</Clause>
</BooleanQuery>
</Query>
<BoostQuery boost="0.01">
<UserQuery>"world bank"</UserQuery>
</BoostQuery>
<!-- Find docs about banks, preferably merger info and preferably not "World bank" -->
<Query>
<BooleanQuery fieldName="contents">
<Clause occurs="should">
<TermQuery>merger</TermQuery>
</Clause>
<Clause occurs="must">
<TermQuery>bank</TermQuery>
</Clause>
</BooleanQuery>
</Query>
<BoostQuery boost="0.01">
<UserQuery>"world bank"</UserQuery>
</BoostQuery>
</BoostingQuery>

View File

@ -16,36 +16,36 @@
limitations under the License.
-->
<BooleanQuery fieldName="contents">
<Clause occurs="should">
<TermQuery>merger</TermQuery>
</Clause>
<Clause occurs="mustnot">
<TermQuery >sumitomo</TermQuery>
</Clause>
<Clause occurs="filter">
<!--
CachedFilter elements can contain any Query or Filter.
CachedFilters are cached in an LRU Cache keyed on the contained query/filter object.
Using this will speed up overall performance for repeated uses of the same expensive
query/filter. The sorts of queries likely to benefit from caching need not necessarily be
complex - e.g. simple TermQuerys with a large DF (document frequency) can be expensive
on large indexes. A good example of this might be a term query on a field with only 2 possible
values - "true" or "false". In a large index, querying or filtering on this field requires
reading millions of document ids from disk which can more usefully be cached as a
QueryFilter bitset.
For Queries/Filters to be cached and reused the object must implement hashcode and
equals methods correctly so that duplicate queries/filters can be detected in the cache.
The CoreParser.maxNumCachedFilters property can be used to control the size
of the LRU Cache established during the construction of CoreParser instances.
-->
<CachedQuery>
<!-- Example query to be cached for fast, repeated use -->
<TermQuery fieldName="contents">bank</TermQuery>
<!-- Alternatively, a filter object can be cached ....
<RangeFilter fieldName="date" lowerTerm="19870409" upperTerm="19870412"/>
-->
</CachedQuery>
</Clause>
<Clause occurs="should">
<TermQuery>merger</TermQuery>
</Clause>
<Clause occurs="mustnot">
<TermQuery >sumitomo</TermQuery>
</Clause>
<Clause occurs="filter">
<!--
CachedFilter elements can contain any Query or Filter.
CachedFilters are cached in an LRU Cache keyed on the contained query/filter object.
Using this will speed up overall performance for repeated uses of the same expensive
query/filter. The sorts of queries likely to benefit from caching need not necessarily be
complex - e.g. simple TermQuerys with a large DF (document frequency) can be expensive
on large indexes. A good example of this might be a term query on a field with only 2 possible
values - "true" or "false". In a large index, querying or filtering on this field requires
reading millions of document ids from disk which can more usefully be cached as a
QueryFilter bitset.
For Queries/Filters to be cached and reused the object must implement hashcode and
equals methods correctly so that duplicate queries/filters can be detected in the cache.
The CoreParser.maxNumCachedFilters property can be used to control the size
of the LRU Cache established during the construction of CoreParser instances.
-->
<CachedQuery>
<!-- Example query to be cached for fast, repeated use -->
<TermQuery fieldName="contents">bank</TermQuery>
<!-- Alternatively, a filter object can be cached ....
<RangeFilter fieldName="date" lowerTerm="19870409" upperTerm="19870412"/>
-->
</CachedQuery>
</Clause>
</BooleanQuery>

View File

@ -16,5 +16,5 @@
limitations under the License.
-->
<ConstantScoreQuery>
<RangeQuery fieldName="date" lowerTerm="19870409" upperTerm="19870412"/>
<RangeQuery fieldName="date" lowerTerm="19870409" upperTerm="19870412"/>
</ConstantScoreQuery>

View File

@ -17,8 +17,8 @@
-->
<DisjunctionMaxQuery>
<TermQuery fieldName="a">merger</TermQuery>
<DisjunctionMaxQuery tieBreaker="1.2">
<TermQuery fieldName="b">verger</TermQuery>
</DisjunctionMaxQuery>
<TermQuery fieldName="a">merger</TermQuery>
<DisjunctionMaxQuery tieBreaker="1.2">
<TermQuery fieldName="b">verger</TermQuery>
</DisjunctionMaxQuery>
</DisjunctionMaxQuery>

View File

@ -16,8 +16,8 @@
limitations under the License.
-->
<FuzzyLikeThisQuery>
<!-- Matches on misspelt "Sumitomo" bank -->
<Field fieldName="contents">
Sumitimo bank
</Field>
<!-- Matches on misspelt "Sumitomo" bank -->
<Field fieldName="contents">
Sumitimo bank
</Field>
</FuzzyLikeThisQuery>

View File

@ -17,17 +17,17 @@
-->
<LikeThisQuery percentTermsToMatch="5" stopWords="Reuter" minDocFreq="2">
IRAQI TROOPS REPORTED PUSHING BACK IRANIANS Iraq said today its troops were pushing Iranian forces out of
positions they had initially occupied when they launched a new offensive near the southern port of
Basra early yesterday. A High Command communique said Iraqi troops had won a significant victory
and were continuing to advance. Iraq said it had foiled a three-pronged thrust some 10 km
(six miles) from Basra, but admitted the Iranians had occupied ground held by the Mohammed al-Qassem
unit, one of three divisions attacked. The communique said Iranian Revolutionary Guards were under
assault from warplanes, helicopter gunships, heavy artillery and tanks. "Our forces are continuing
their advance until they purge the last foothold" occupied by the Iranians, it said.
(Iran said its troops had killed or wounded more than 4,000 Iraqis and were stabilising their new positions.)
The Baghdad communique said Iraqi planes also destroyed oil installations at Iran's southwestern Ahvaz field
during a raid today. It denied an Iranian report that an Iraqi jet was shot down.
Iraq also reported a naval battle at the northern tip of the Gulf. Iraqi naval units and forces defending an
offshore terminal sank six Iranian out of 28 Iranian boats attempting to attack an offshore terminal,
the communique said. Reuter 3;
positions they had initially occupied when they launched a new offensive near the southern port of
Basra early yesterday. A High Command communique said Iraqi troops had won a significant victory
and were continuing to advance. Iraq said it had foiled a three-pronged thrust some 10 km
(six miles) from Basra, but admitted the Iranians had occupied ground held by the Mohammed al-Qassem
unit, one of three divisions attacked. The communique said Iranian Revolutionary Guards were under
assault from warplanes, helicopter gunships, heavy artillery and tanks. "Our forces are continuing
their advance until they purge the last foothold" occupied by the Iranians, it said.
(Iran said its troops had killed or wounded more than 4,000 Iraqis and were stabilising their new positions.)
The Baghdad communique said Iraqi planes also destroyed oil installations at Iran's southwestern Ahvaz field
during a raid today. It denied an Iranian report that an Iraqi jet was shot down.
Iraq also reported a naval battle at the northern tip of the Gulf. Iraqi naval units and forces defending an
offshore terminal sank six Iranian out of 28 Iranian boats attempting to attack an offshore terminal,
the communique said. Reuter 3;
</LikeThisQuery>

View File

@ -16,10 +16,10 @@
limitations under the License.
-->
<BooleanQuery>
<Clause occurs="must">
<MatchAllDocsQuery/>
</Clause>
<Clause occurs="filter">
<RangeQuery fieldName="date" lowerTerm="19870409" upperTerm="19870412"/>
</Clause>
<Clause occurs="must">
<MatchAllDocsQuery/>
</Clause>
<Clause occurs="filter">
<RangeQuery fieldName="date" lowerTerm="19870409" upperTerm="19870412"/>
</Clause>
</BooleanQuery>

View File

@ -16,21 +16,21 @@
limitations under the License.
-->
<!--
This query was added to demonstrate nested boolean queries - there
was a bug in the XML parser which added ALL child <Clause> tags to
the top level tags ie. took child and grandchild elements instead
of just child elements. This was due to the use of the
Element.getElementsByTagName() call in BooleanQueryBuilder
-->
<BooleanQuery fieldName="contents">
<Clause occurs="should">
<BooleanQuery fieldName="contents">
<Clause occurs="must">
<TermQuery>doesNotExistButShouldBeOKBecauseOtherClauseExists</TermQuery>
</Clause>
</BooleanQuery>
</Clause>
<Clause occurs="should">
<TermQuery>bank</TermQuery>
</Clause>
This query was added to demonstrate nested boolean queries - there
was a bug in the XML parser which added ALL child <Clause> tags to
the top level tags ie. took child and grandchild elements instead
of just child elements. This was due to the use of the
Element.getElementsByTagName() call in BooleanQueryBuilder
-->
<BooleanQuery fieldName="contents">
<Clause occurs="should">
<BooleanQuery fieldName="contents">
<Clause occurs="must">
<TermQuery>doesNotExistButShouldBeOKBecauseOtherClauseExists</TermQuery>
</Clause>
</BooleanQuery>
</Clause>
<Clause occurs="should">
<TermQuery>bank</TermQuery>
</Clause>
</BooleanQuery>

View File

@ -16,16 +16,16 @@
limitations under the License.
-->
<BooleanQuery fieldName="contents">
<Clause occurs="should">
<TermQuery>merger</TermQuery>
</Clause>
<Clause occurs="mustnot">
<TermQuery >sumitomo</TermQuery>
</Clause>
<Clause occurs="must">
<TermQuery>bank</TermQuery>
</Clause>
<Clause occurs="must">
<NumericRangeQuery fieldName="date2" lowerTerm="19870409" upperTerm="19870412"/>
</Clause>
<Clause occurs="should">
<TermQuery>merger</TermQuery>
</Clause>
<Clause occurs="mustnot">
<TermQuery >sumitomo</TermQuery>
</Clause>
<Clause occurs="must">
<TermQuery>bank</TermQuery>
</Clause>
<Clause occurs="must">
<NumericRangeQuery fieldName="date2" lowerTerm="19870409" upperTerm="19870412"/>
</Clause>
</BooleanQuery>

View File

@ -16,40 +16,40 @@
limitations under the License.
-->
<SpanOr fieldName="contents">
<SpanNear slop="8" inOrder="false" >
<SpanOr>
<SpanTerm>killed</SpanTerm>
<SpanTerm>died</SpanTerm>
<SpanTerm>dead</SpanTerm>
</SpanOr>
<SpanOr>
<!-- a less verbose way of declaring SpanTerm declarations - these are analyzed
into a series of Tokens which are added as SpanTerm elements of a SpanOr
-->
<SpanOrTerms>miner miners</SpanOrTerms>
<!-- finds mine near worker or workers -->
<SpanNear slop="6" inOrder="false">
<SpanTerm>mine</SpanTerm>
<SpanOrTerms>worker workers</SpanOrTerms>
<SpanNear slop="8" inOrder="false" >
<SpanOr>
<SpanTerm>killed</SpanTerm>
<SpanTerm>died</SpanTerm>
<SpanTerm>dead</SpanTerm>
</SpanOr>
<SpanOr>
<!-- a less verbose way of declaring SpanTerm declarations - these are analyzed
into a series of Tokens which are added as SpanTerm elements of a SpanOr
-->
<SpanOrTerms>miner miners</SpanOrTerms>
<!-- finds mine near worker or workers -->
<SpanNear slop="6" inOrder="false">
<SpanTerm>mine</SpanTerm>
<SpanOrTerms>worker workers</SpanOrTerms>
<BoostingTermQuery>heavy</BoostingTermQuery>
</SpanNear>
</SpanOr>
</SpanNear>
<SpanFirst end="10">
<SpanOrTerms>fire burn</SpanOrTerms>
</SpanFirst>
<!-- Other Span examples....
<SpanNot>
<Include>
<SpanNear slop="2" inOrder="2">
<SpanTerm>social</SpanTerm>
<SpanTerm>services</SpanTerm>
</SpanNear>
</Include>
<Exclude>
<SpanTerm>public</SpanTerm>
</Exclude>
</SpanNot>
-->
</SpanOr>
</SpanNear>
<SpanFirst end="10">
<SpanOrTerms>fire burn</SpanOrTerms>
</SpanFirst>
<!-- Other Span examples....
<SpanNot>
<Include>
<SpanNear slop="2" inOrder="2">
<SpanTerm>social</SpanTerm>
<SpanTerm>services</SpanTerm>
</SpanNear>
</Include>
<Exclude>
<SpanTerm>public</SpanTerm>
</Exclude>
</SpanNot>
-->
</SpanOr>

View File

@ -15,12 +15,12 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- TermsQuery uses an analyzer to tokenize text and creates a BooleanQuery with nested
"should" TermQueries for each of the tokens encountered. This can be used for user input
which may include content or characters that would otherwise be illegal query syntax when
using the standard lucene query parser. Of course the downside is that none of the query
operators (AND NOT ~ ^ : etc) will have an effect. For some scenarios queries are
not formed by people familiar with Lucene query syntax and they can inadvertently type illegal
query syntax so in these cases this is an appropriate and simple alternative
-->
<!-- TermsQuery uses an analyzer to tokenize text and creates a BooleanQuery with nested
"should" TermQueries for each of the tokens encountered. This can be used for user input
which may include content or characters that would otherwise be illegal query syntax when
using the standard lucene query parser. Of course the downside is that none of the query
operators (AND NOT ~ ^ : etc) will have an effect. For some scenarios queries are
not formed by people familiar with Lucene query syntax and they can inadvertently type illegal
query syntax so in these cases this is an appropriate and simple alternative
-->
<TermsQuery fieldName="contents">sumitomo bank</TermsQuery>

View File

@ -16,10 +16,10 @@
limitations under the License.
-->
<BooleanQuery>
<Clause occurs="must">
<UserQuery>"Bank of England"</UserQuery>
</Clause>
<Clause occurs="filter">
<RangeQuery fieldName="date" lowerTerm="19870409" upperTerm="19870412"/>
</Clause>
<Clause occurs="must">
<UserQuery>"Bank of England"</UserQuery>
</Clause>
<Clause occurs="filter">
<RangeQuery fieldName="date" lowerTerm="19870409" upperTerm="19870412"/>
</Clause>
</BooleanQuery>

View File

@ -19,7 +19,7 @@
<project name="DTDDocAnt" default="main">
<import file="../../lucene/module-build.xml"/>
<import file="../../lucene/module-build.xml"/>
<description>
This file generates DTDdocumentation

View File

@ -27,17 +27,17 @@
<import file="../module-build.xml"/>
<path id="classpath">
<fileset dir="lib" />
<fileset dir="lib" />
<pathelement path="${facet.jar}"/>
<path refid="base.classpath"/>
</path>
<target name="resolve" depends="common.resolve">
<sequential>
<!-- javax.servlet jar -->
<ivy:retrieve conf="servlet" log="download-only" type="orbit" symlink="${ivy.symlink}"/>
</sequential>
</target>
<target name="resolve" depends="common.resolve">
<sequential>
<!-- javax.servlet jar -->
<ivy:retrieve conf="servlet" log="download-only" type="orbit" symlink="${ivy.symlink}"/>
</sequential>
</target>
<target name="init" depends="module-build.init,jar-facet"/>

View File

@ -20,7 +20,7 @@
<info organisation="org.apache.lucene" module="replicator"/>
<configurations defaultconfmapping="http->master;jetty->master;start->master;servlet->master;logging->master">
<conf name="http" description="httpclient jars" transitive="false"/>
<conf name="http" description="httpclient jars" transitive="false"/>
<conf name="jetty" description="jetty jars" transitive="false"/>
<conf name="start" description="jetty start jar" transitive="false"/>
<conf name="servlet" description="servlet-api jar" transitive="false"/>

View File

@ -22,7 +22,7 @@
<description>
Auto-suggest and Spellchecking support
</description>
<!-- just a list of words for testing suggesters -->
<property name="rat.excludes" value="**/Top50KWiki.utf8,**/stop-snowball.txt"/>
@ -40,8 +40,8 @@
<invoke-module-javadoc>
<links>
<link href="../analyzers-common"/>
<link href="../queries"/>
<link href="../misc"/>
<link href="../queries"/>
<link href="../misc"/>
</links>
</invoke-module-javadoc>
</target>

View File

@ -89,7 +89,7 @@
<attribute name="dir"/>
<attribute name="centralized.versions.file"/>
<attribute name="ivy.settings.file"/>
<attribute name="ivy.resolution-cache.dir"/>
<attribute name="ivy.resolution-cache.dir"/>
<attribute name="common.build.dir"/>
<attribute name="ignore.conflicts.file"/>
<sequential>
@ -101,7 +101,7 @@
<echo>Lib versions check under: @{dir}</echo>
<libversions centralizedVersionsFile="@{centralized.versions.file}"
ivySettingsFile="@{ivy.settings.file}"
ivyResolutionCacheDir="@{ivy.resolution-cache.dir}"
ivyResolutionCacheDir="@{ivy.resolution-cache.dir}"
commonBuildDir="@{common.build.dir}"
ignoreConflictsFile="@{ignore.conflicts.file}">
<fileset dir="@{dir}">

View File

@ -95,7 +95,7 @@
<!--LUCENE-3286: Luke is incompatible with new XML QP location and target is not flexible
when it comes to incompatible changes. Update when Luke has updated.
<target name="compile-xml-query-parser">
<ant dir="${common.dir}/queryparser" target="compile-core" inheritAll="false">
<ant dir="${common.dir}/queryparser" target="compile-core" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
</target>
@ -378,7 +378,7 @@
<include name="webapps" />
<include name="solr-webapp/**/*" />
<exclude name="**/.gitignore" />
</fileset>
</fileset>
</delete>
</target>
@ -679,7 +679,7 @@
<contrib-crawl target="-validate-maven-dependencies"/>
</sequential>
</target>
<!-- ========================================================================= -->
<!-- ========================= COMMITTERS' HELPERS =========================== -->
<!-- ========================================================================= -->
@ -717,37 +717,37 @@
<!-- spanish -->
<copy verbose="true" file="${analysis-common.res.dir}/snowball/spanish_stop.txt"
tofile="${analysis.conf.dest}/stopwords_es.txt"/>
<!-- basque -->
<!-- basque -->
<copy verbose="true" file="${analysis-common.res.dir}/eu/stopwords.txt"
tofile="${analysis.conf.dest}/stopwords_eu.txt"/>
<!-- persian -->
<!-- persian -->
<copy verbose="true" file="${analysis-common.res.dir}/fa/stopwords.txt"
tofile="${analysis.conf.dest}/stopwords_fa.txt"/>
<!-- finnish -->
<!-- finnish -->
<copy verbose="true" file="${analysis-common.res.dir}/snowball/finnish_stop.txt"
tofile="${analysis.conf.dest}/stopwords_fi.txt"/>
<!-- french -->
<!-- french -->
<copy verbose="true" file="${analysis-common.res.dir}/snowball/french_stop.txt"
tofile="${analysis.conf.dest}/stopwords_fr.txt"/>
<!-- irish -->
<copy verbose="true" file="${analysis-common.res.dir}/ga/stopwords.txt"
tofile="${analysis.conf.dest}/stopwords_ga.txt"/>
<!-- galician -->
<!-- galician -->
<copy verbose="true" file="${analysis-common.res.dir}/gl/stopwords.txt"
tofile="${analysis.conf.dest}/stopwords_gl.txt"/>
<!-- hindi -->
<!-- hindi -->
<copy verbose="true" file="${analysis-common.res.dir}/hi/stopwords.txt"
tofile="${analysis.conf.dest}/stopwords_hi.txt"/>
<!-- hungarian -->
<!-- hungarian -->
<copy verbose="true" file="${analysis-common.res.dir}/snowball/hungarian_stop.txt"
tofile="${analysis.conf.dest}/stopwords_hu.txt"/>
<!-- armenian -->
<!-- armenian -->
<copy verbose="true" file="${analysis-common.res.dir}/hy/stopwords.txt"
tofile="${analysis.conf.dest}/stopwords_hy.txt"/>
<!-- indonesian -->
<!-- indonesian -->
<copy verbose="true" file="${analysis-common.res.dir}/id/stopwords.txt"
tofile="${analysis.conf.dest}/stopwords_id.txt"/>
<!-- italian -->
<!-- italian -->
<copy verbose="true" file="${analysis-common.res.dir}/snowball/italian_stop.txt"
tofile="${analysis.conf.dest}/stopwords_it.txt"/>
<!-- japanese -->
@ -755,31 +755,31 @@
tofile="${analysis.conf.dest}/stopwords_ja.txt"/>
<copy verbose="true" file="${analysis-kuromoji.res.dir}/ja/stoptags.txt"
tofile="${analysis.conf.dest}/stoptags_ja.txt"/>
<!-- latvian -->
<!-- latvian -->
<copy verbose="true" file="${analysis-common.res.dir}/lv/stopwords.txt"
tofile="${analysis.conf.dest}/stopwords_lv.txt"/>
<!-- dutch -->
<!-- dutch -->
<copy verbose="true" file="${analysis-common.res.dir}/snowball/dutch_stop.txt"
tofile="${analysis.conf.dest}/stopwords_nl.txt"/>
<!-- norwegian -->
<!-- norwegian -->
<copy verbose="true" file="${analysis-common.res.dir}/snowball/norwegian_stop.txt"
tofile="${analysis.conf.dest}/stopwords_no.txt"/>
<!-- portuguese -->
<!-- portuguese -->
<copy verbose="true" file="${analysis-common.res.dir}/snowball/portuguese_stop.txt"
tofile="${analysis.conf.dest}/stopwords_pt.txt"/>
<!-- romanian -->
<!-- romanian -->
<copy verbose="true" file="${analysis-common.res.dir}/ro/stopwords.txt"
tofile="${analysis.conf.dest}/stopwords_ro.txt"/>
<!-- russian -->
<!-- russian -->
<copy verbose="true" file="${analysis-common.res.dir}/snowball/russian_stop.txt"
tofile="${analysis.conf.dest}/stopwords_ru.txt"/>
<!-- swedish -->
<!-- swedish -->
<copy verbose="true" file="${analysis-common.res.dir}/snowball/swedish_stop.txt"
tofile="${analysis.conf.dest}/stopwords_sv.txt"/>
<!-- thai -->
<!-- thai -->
<copy verbose="true" file="${analysis-common.res.dir}/th/stopwords.txt"
tofile="${analysis.conf.dest}/stopwords_th.txt"/>
<!-- turkish -->
<!-- turkish -->
<copy verbose="true" file="${analysis-common.res.dir}/tr/stopwords.txt"
tofile="${analysis.conf.dest}/stopwords_tr.txt"/>
</target>

View File

@ -76,21 +76,21 @@
<property name="fullnamever" value="${final.name}"/>
<path id="additional.dependencies">
<fileset dir="${common-solr.dir}/core/lib" excludes="${common.classpath.excludes}"/>
<fileset dir="${common-solr.dir}/solrj/lib" excludes="${common.classpath.excludes}"/>
<fileset dir="${common-solr.dir}/server/lib" excludes="${common.classpath.excludes}"/>
<fileset dir="${common-solr.dir}/example/example-DIH/solr/db/lib" excludes="${common.classpath.excludes}"/>
<fileset dir="lib" excludes="${common.classpath.excludes}" erroronmissingdir="false"/>
<fileset dir="${common-solr.dir}/core/lib" excludes="${common.classpath.excludes}"/>
<fileset dir="${common-solr.dir}/solrj/lib" excludes="${common.classpath.excludes}"/>
<fileset dir="${common-solr.dir}/server/lib" excludes="${common.classpath.excludes}"/>
<fileset dir="${common-solr.dir}/example/example-DIH/solr/db/lib" excludes="${common.classpath.excludes}"/>
<fileset dir="lib" excludes="${common.classpath.excludes}" erroronmissingdir="false"/>
</path>
<path id="solr.lucene.libs">
<!-- List of jars that will be used as the foundation for both
the base classpath, as well as copied into the lucene-libs dir
in the release.
in the release.
-->
<!-- NOTE: lucene-core is explicitly not included because of the
base.classpath (compilation & tests are done directly against
the class files w/o needing to build the jar)
base.classpath (compilation & tests are done directly against
the class files w/o needing to build the jar)
-->
<pathelement location="${analyzers-common.jar}"/>
<pathelement location="${analyzers-kuromoji.jar}"/>
@ -128,8 +128,8 @@
<exclude name="randomizedtesting-runner-*.jar" />
<exclude name="ant*.jar" />
</fileset>
<pathelement path="src/test-files"/>
<path refid="test.base.classpath"/>
<pathelement path="src/test-files"/>
<path refid="test.base.classpath"/>
</path>
<path id="test.classpath" refid="solr.test.base.classpath"/>
@ -168,11 +168,11 @@
</target>
<target name="prep-lucene-jars"
depends="jar-lucene-core, jar-backward-codecs, jar-analyzers-phonetic, jar-analyzers-kuromoji, jar-codecs,jar-expressions, jar-suggest, jar-highlighter, jar-memory,
jar-misc, jar-spatial, jar-grouping, jar-queries, jar-queryparser, jar-join, jar-sandbox">
<property name="solr.deps.compiled" value="true"/>
depends="jar-lucene-core, jar-backward-codecs, jar-analyzers-phonetic, jar-analyzers-kuromoji, jar-codecs,jar-expressions, jar-suggest, jar-highlighter, jar-memory,
jar-misc, jar-spatial, jar-grouping, jar-queries, jar-queryparser, jar-join, jar-sandbox">
<property name="solr.deps.compiled" value="true"/>
</target>
<target name="lucene-jars-to-solr"
depends="-lucene-jars-to-solr-not-for-package,-lucene-jars-to-solr-package"/>
@ -249,7 +249,7 @@
<!-- create javadocs for the current module -->
<target name="javadocs" depends="compile-core,define-lucene-javadoc-url,lucene-javadocs,javadocs-solr-core,check-javadocs-uptodate" unless="javadocs-uptodate-${name}">
<sequential>
<sequential>
<mkdir dir="${javadoc.dir}/${name}"/>
<solr-invoke-javadoc>
<solrsources>
@ -424,7 +424,7 @@
<ant dir="${common-solr.dir}/test-framework" target="compile-core" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
<property name="solr.core.compiled" value="true"/>
<property name="solr.core.compiled" value="true"/>
<property name="solr.test.framework.compiled" value="true"/>
</target>
@ -469,15 +469,15 @@
</target>
<target name="compile-contrib" description="Compile contrib modules">
<contrib-crawl target="compile-core"/>
<contrib-crawl target="compile-core"/>
</target>
<target name="compile-test-contrib" description="Compile contrib modules' tests">
<contrib-crawl target="compile-test"/>
<contrib-crawl target="compile-test"/>
</target>
<target name="javadocs-contrib" description="Compile contrib modules">
<contrib-crawl target="javadocs"/>
<contrib-crawl target="javadocs"/>
</target>
<target name="jar-contrib" description="Jar contrib modules">

View File

@ -1,285 +1,285 @@
<?xml version="1.0" encoding="UTF-8"?>
<analyticsRequestEnvelope stats="true" olap="true">
<analyticsRequest>
<name>Add Request</name>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>unique(long(long_ld))</expression>
<name>unique</name>
</statistic>
<statistic>
<expression>add(sum(int(int_id)),unique(long(long_ld)))</expression>
<name>add sum and unique</name>
</statistic>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>count(long(long_ld))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>median(int(int_id))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>add(mean(int(int_id)),count(long(long_ld)),median(int(int_id)))</expression>
<name>add mean and count and median</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Multiply Request</name>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>unique(long(long_ld))</expression>
<name>unique</name>
</statistic>
<statistic>
<expression>mult(sum(int(int_id)),unique(long(long_ld)))</expression>
<name>multiply sum and unique</name>
</statistic>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>count(long(long_ld))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>median(int(int_id))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>mult(mean(int(int_id)),count(long(long_ld)),median(int(int_id)))</expression>
<name>multiply mean and count and median</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Divide Request</name>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>unique(long(long_ld))</expression>
<name>unique</name>
</statistic>
<statistic>
<expression>div(sum(int(int_id)),unique(long(long_ld)))</expression>
<name>divide sum by unique</name>
</statistic>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>count(long(long_ld))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>div(mean(int(int_id)),count(long(long_ld)))</expression>
<name>divide mean by count</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Power Request</name>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>unique(long(long_ld))</expression>
<name>unique</name>
</statistic>
<statistic>
<expression>pow(sum(int(int_id)),unique(long(long_ld)))</expression>
<name>power sum by unique</name>
</statistic>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>count(long(long_ld))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>pow(mean(int(int_id)),count(long(long_ld)))</expression>
<name>power mean by count</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Negate Request</name>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>neg(sum(int(int_id)))</expression>
<name>negate of sum</name>
</statistic>
<statistic>
<expression>count(long(long_ld))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>neg(count(long(long_ld)))</expression>
<name>negate of count</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Const Num Request</name>
<statistic>
<expression>const_num(8)</expression>
<name>constant 8</name>
</statistic>
<statistic>
<expression>const_num(10)</expression>
<name>constant 10</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Date Math Request</name>
<statistic>
<expression>median(date(date_dtd))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>const_str(+2YEARS)</expression>
<name>constant str median</name>
</statistic>
<statistic>
<expression>date_math(median(date(date_dtd)),const_str(+2YEARS))</expression>
<name>date math median</name>
</statistic>
<statistic>
<expression>max(date(date_dtd))</expression>
<name>max</name>
</statistic>
<statistic>
<expression>const_str(+2MONTHS)</expression>
<name>constant str max</name>
</statistic>
<statistic>
<expression>date_math(max(date(date_dtd)),const_str(+2MONTHS))</expression>
<name>date math max</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Constant Date Request</name>
<statistic>
<expression>const_str(1800-12-31T23:59:59Z)</expression>
<name>const str 1</name>
</statistic>
<statistic>
<expression>const_date(1800-12-31T23:59:59Z)</expression>
<name>const date 1</name>
</statistic>
<statistic>
<expression>const_str(1804-06-30T23:59:59Z)</expression>
<name>const str 2</name>
</statistic>
<statistic>
<expression>const_date(1804-06-30T23:59:59Z)</expression>
<name>const date 2</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Constant String Request</name>
<statistic>
<expression>const_str(this is the first)</expression>
<name>const str 1</name>
</statistic>
<statistic>
<expression>const_str(this is the second)</expression>
<name>const str 2</name>
</statistic>
<statistic>
<expression>const_str(this is the third)</expression>
<name>const str 3</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Concatenate Request</name>
<statistic>
<expression>const_str(this is the first)</expression>
<name>const str min</name>
</statistic>
<statistic>
<expression>min(str(string_sd))</expression>
<name>min</name>
</statistic>
<statistic>
<expression>concat(const_str(this is the first),min(str(string_sd)))</expression>
<name>concat const and min</name>
</statistic>
<statistic>
<expression>const_str(this is the second)</expression>
<name>const str max</name>
</statistic>
<statistic>
<expression>max(str(string_sd))</expression>
<name>max</name>
</statistic>
<statistic>
<expression>concat(const_str(this is the second),max(str(string_sd)))</expression>
<name>concat const and max</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Reverse Request</name>
<statistic>
<expression>min(str(string_sd))</expression>
<name>min</name>
</statistic>
<statistic>
<expression>rev(min(str(string_sd)))</expression>
<name>reverse min</name>
</statistic>
<statistic>
<expression>max(str(string_sd))</expression>
<name>max</name>
</statistic>
<statistic>
<expression>rev(max(str(string_sd)))</expression>
<name>reverse max</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Add Request</name>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>unique(long(long_ld))</expression>
<name>unique</name>
</statistic>
<statistic>
<expression>add(sum(int(int_id)),unique(long(long_ld)))</expression>
<name>add sum and unique</name>
</statistic>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>count(long(long_ld))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>median(int(int_id))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>add(mean(int(int_id)),count(long(long_ld)),median(int(int_id)))</expression>
<name>add mean and count and median</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Multiply Request</name>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>unique(long(long_ld))</expression>
<name>unique</name>
</statistic>
<statistic>
<expression>mult(sum(int(int_id)),unique(long(long_ld)))</expression>
<name>multiply sum and unique</name>
</statistic>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>count(long(long_ld))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>median(int(int_id))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>mult(mean(int(int_id)),count(long(long_ld)),median(int(int_id)))</expression>
<name>multiply mean and count and median</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Divide Request</name>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>unique(long(long_ld))</expression>
<name>unique</name>
</statistic>
<statistic>
<expression>div(sum(int(int_id)),unique(long(long_ld)))</expression>
<name>divide sum by unique</name>
</statistic>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>count(long(long_ld))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>div(mean(int(int_id)),count(long(long_ld)))</expression>
<name>divide mean by count</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Power Request</name>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>unique(long(long_ld))</expression>
<name>unique</name>
</statistic>
<statistic>
<expression>pow(sum(int(int_id)),unique(long(long_ld)))</expression>
<name>power sum by unique</name>
</statistic>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>count(long(long_ld))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>pow(mean(int(int_id)),count(long(long_ld)))</expression>
<name>power mean by count</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Negate Request</name>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>neg(sum(int(int_id)))</expression>
<name>negate of sum</name>
</statistic>
<statistic>
<expression>count(long(long_ld))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>neg(count(long(long_ld)))</expression>
<name>negate of count</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Const Num Request</name>
<statistic>
<expression>const_num(8)</expression>
<name>constant 8</name>
</statistic>
<statistic>
<expression>const_num(10)</expression>
<name>constant 10</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Date Math Request</name>
<statistic>
<expression>median(date(date_dtd))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>const_str(+2YEARS)</expression>
<name>constant str median</name>
</statistic>
<statistic>
<expression>date_math(median(date(date_dtd)),const_str(+2YEARS))</expression>
<name>date math median</name>
</statistic>
<statistic>
<expression>max(date(date_dtd))</expression>
<name>max</name>
</statistic>
<statistic>
<expression>const_str(+2MONTHS)</expression>
<name>constant str max</name>
</statistic>
<statistic>
<expression>date_math(max(date(date_dtd)),const_str(+2MONTHS))</expression>
<name>date math max</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Constant Date Request</name>
<statistic>
<expression>const_str(1800-12-31T23:59:59Z)</expression>
<name>const str 1</name>
</statistic>
<statistic>
<expression>const_date(1800-12-31T23:59:59Z)</expression>
<name>const date 1</name>
</statistic>
<statistic>
<expression>const_str(1804-06-30T23:59:59Z)</expression>
<name>const str 2</name>
</statistic>
<statistic>
<expression>const_date(1804-06-30T23:59:59Z)</expression>
<name>const date 2</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Constant String Request</name>
<statistic>
<expression>const_str(this is the first)</expression>
<name>const str 1</name>
</statistic>
<statistic>
<expression>const_str(this is the second)</expression>
<name>const str 2</name>
</statistic>
<statistic>
<expression>const_str(this is the third)</expression>
<name>const str 3</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Concatenate Request</name>
<statistic>
<expression>const_str(this is the first)</expression>
<name>const str min</name>
</statistic>
<statistic>
<expression>min(str(string_sd))</expression>
<name>min</name>
</statistic>
<statistic>
<expression>concat(const_str(this is the first),min(str(string_sd)))</expression>
<name>concat const and min</name>
</statistic>
<statistic>
<expression>const_str(this is the second)</expression>
<name>const str max</name>
</statistic>
<statistic>
<expression>max(str(string_sd))</expression>
<name>max</name>
</statistic>
<statistic>
<expression>concat(const_str(this is the second),max(str(string_sd)))</expression>
<name>concat const and max</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Reverse Request</name>
<statistic>
<expression>min(str(string_sd))</expression>
<name>min</name>
</statistic>
<statistic>
<expression>rev(min(str(string_sd)))</expression>
<name>reverse min</name>
</statistic>
<statistic>
<expression>max(str(string_sd))</expression>
<name>max</name>
</statistic>
<statistic>
<expression>rev(max(str(string_sd)))</expression>
<name>reverse max</name>
</statistic>
</analyticsRequest>
</analyticsRequestEnvelope>

View File

@ -1,101 +1,101 @@
<?xml version="1.0" encoding="UTF-8"?>
<analyticsRequestEnvelope stats="true" olap="true">
<analyticsRequest>
<name>sort request</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>median(int(int_id))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>count(int(int_id))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>perc(20,int(int_id))</expression>
<name>perc_20</name>
</statistic>
<fieldFacet>
<field>long_ld</field>
<sortSpecification>
<statName>mean</statName>
<direction>asc</direction>
</sortSpecification>
</fieldFacet>
<fieldFacet>
<field>float_fd</field>
<sortSpecification>
<statName>median</statName>
<direction>desc</direction>
</sortSpecification>
</fieldFacet>
<fieldFacet>
<field>double_dd</field>
<sortSpecification>
<statName>count</statName>
<direction>asc</direction>
</sortSpecification>
</fieldFacet>
<fieldFacet>
<field>string_sd</field>
<sortSpecification>
<statName>perc_20</statName>
<direction>desc</direction>
</sortSpecification>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>limit request</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>median(int(int_id))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>count(int(int_id))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>perc(20,int(int_id))</expression>
<name>perc_20</name>
</statistic>
<fieldFacet limit="5">
<field>long_ld</field>
<sortSpecification>
<statName>mean</statName>
<direction>asc</direction>
</sortSpecification>
</fieldFacet>
<fieldFacet limit="3">
<field>float_fd</field>
<sortSpecification>
<statName>median</statName>
<direction>desc</direction>
</sortSpecification>
</fieldFacet>
<fieldFacet limit="7">
<field>double_dd</field>
<sortSpecification>
<statName>count</statName>
<direction>asc</direction>
</sortSpecification>
</fieldFacet>
<fieldFacet limit="1">
<field>string_sd</field>
<sortSpecification>
<statName>perc_20</statName>
<direction>desc</direction>
</sortSpecification>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>sort request</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>median(int(int_id))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>count(int(int_id))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>perc(20,int(int_id))</expression>
<name>perc_20</name>
</statistic>
<fieldFacet>
<field>long_ld</field>
<sortSpecification>
<statName>mean</statName>
<direction>asc</direction>
</sortSpecification>
</fieldFacet>
<fieldFacet>
<field>float_fd</field>
<sortSpecification>
<statName>median</statName>
<direction>desc</direction>
</sortSpecification>
</fieldFacet>
<fieldFacet>
<field>double_dd</field>
<sortSpecification>
<statName>count</statName>
<direction>asc</direction>
</sortSpecification>
</fieldFacet>
<fieldFacet>
<field>string_sd</field>
<sortSpecification>
<statName>perc_20</statName>
<direction>desc</direction>
</sortSpecification>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>limit request</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>median(int(int_id))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>count(int(int_id))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>perc(20,int(int_id))</expression>
<name>perc_20</name>
</statistic>
<fieldFacet limit="5">
<field>long_ld</field>
<sortSpecification>
<statName>mean</statName>
<direction>asc</direction>
</sortSpecification>
</fieldFacet>
<fieldFacet limit="3">
<field>float_fd</field>
<sortSpecification>
<statName>median</statName>
<direction>desc</direction>
</sortSpecification>
</fieldFacet>
<fieldFacet limit="7">
<field>double_dd</field>
<sortSpecification>
<statName>count</statName>
<direction>asc</direction>
</sortSpecification>
</fieldFacet>
<fieldFacet limit="1">
<field>string_sd</field>
<sortSpecification>
<statName>perc_20</statName>
<direction>desc</direction>
</sortSpecification>
</fieldFacet>
</analyticsRequest>
</analyticsRequestEnvelope>

View File

@ -1,496 +1,496 @@
<?xml version="1.0" encoding="UTF-8"?>
<analyticsRequestEnvelope stats="true" olap="true">
<analyticsRequest>
<name>sum</name>
<statistic>
<expression>sum(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>sum(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>sum(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>sum(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>mean</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>mean(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>mean(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>mean(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>sumOfSquares</name>
<statistic>
<expression>sumofsquares(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>sumofsquares(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>sumofsquares(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>sumofsquares(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>stddev</name>
<statistic>
<expression>stddev(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>stddev(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>stddev(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>stddev(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>median</name>
<statistic>
<expression>median(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>median(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>median(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>median(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>perc_20 numeric</name>
<statistic>
<expression>perc(20,int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>perc(20,long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>perc(20,float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>perc(20,double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>perc_20</name>
<statistic>
<expression>perc(20,str(string_sd))</expression>
<name>str</name>
</statistic>
<statistic>
<expression>perc(20,date(date_dtd))</expression>
<name>date</name>
</statistic>
<fieldFacet>
<field>int_id</field>
</fieldFacet>
<fieldFacet>
<field>long_ld</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>perc_60 numeric</name>
<statistic>
<expression>perc(60,int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>perc(60,long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>perc(60,float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>perc(60,double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>perc_60</name>
<statistic>
<expression>perc(60,str(string_sd))</expression>
<name>str</name>
</statistic>
<statistic>
<expression>perc(60,date(date_dtd))</expression>
<name>date</name>
</statistic>
<fieldFacet>
<field>int_id</field>
</fieldFacet>
<fieldFacet>
<field>long_ld</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>min numeric</name>
<statistic>
<expression>min(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>min(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>min(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>min(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>min</name>
<statistic>
<expression>min(str(string_sd))</expression>
<name>str</name>
</statistic>
<statistic>
<expression>min(date(date_dtd))</expression>
<name>date</name>
</statistic>
<fieldFacet>
<field>int_id</field>
</fieldFacet>
<fieldFacet>
<field>long_ld</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>max numeric</name>
<statistic>
<expression>max(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>max(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>max(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>max(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>max</name>
<statistic>
<expression>max(str(string_sd))</expression>
<name>str</name>
</statistic>
<statistic>
<expression>max(date(date_dtd))</expression>
<name>date</name>
</statistic>
<fieldFacet>
<field>int_id</field>
</fieldFacet>
<fieldFacet>
<field>long_ld</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>count numeric</name>
<statistic>
<expression>count(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>count(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>count(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>count(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>count</name>
<statistic>
<expression>count(str(string_sd))</expression>
<name>str</name>
</statistic>
<statistic>
<expression>count(date(date_dtd))</expression>
<name>date</name>
</statistic>
<fieldFacet>
<field>int_id</field>
</fieldFacet>
<fieldFacet>
<field>long_ld</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>unique numeric</name>
<statistic>
<expression>unique(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>unique(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>unique(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>unique(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>unique</name>
<statistic>
<expression>unique(str(string_sd))</expression>
<name>str</name>
</statistic>
<statistic>
<expression>unique(date(date_dtd))</expression>
<name>date</name>
</statistic>
<fieldFacet>
<field>int_id</field>
</fieldFacet>
<fieldFacet>
<field>long_ld</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>missing numeric</name>
<statistic>
<expression>missing(int{int_id})</expression>
<name>int</name>
</statistic>
<statistic>
<expression>missing(long{long_ld})</expression>
<name>long</name>
</statistic>
<statistic>
<expression>missing(float{float_fd})</expression>
<name>float</name>
</statistic>
<statistic>
<expression>missing(double{double_dd})</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>missing</name>
<statistic>
<expression>missing(str{string_sd})</expression>
<name>str</name>
</statistic>
<statistic>
<expression>missing(date{date_dtd})</expression>
<name>date</name>
</statistic>
<fieldFacet>
<field>int_id</field>
</fieldFacet>
<fieldFacet>
<field>long_ld</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>multivalued</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<fieldFacet>
<field>long_ldm</field>
</fieldFacet>
<fieldFacet>
<field>string_sdm</field>
</fieldFacet>
<fieldFacet>
<field>date_dtdm</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>missing facet</name>
<analyticsRequest>
<name>sum</name>
<statistic>
<expression>sum(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>sum(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>sum(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>sum(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>mean</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>mean(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>mean(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>mean(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>sumOfSquares</name>
<statistic>
<expression>sumofsquares(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>sumofsquares(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>sumofsquares(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>sumofsquares(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>stddev</name>
<statistic>
<expression>stddev(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>stddev(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>stddev(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>stddev(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>median</name>
<statistic>
<expression>median(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>median(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>median(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>median(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>perc_20 numeric</name>
<statistic>
<expression>perc(20,int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>perc(20,long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>perc(20,float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>perc(20,double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>perc_20</name>
<statistic>
<expression>perc(20,str(string_sd))</expression>
<name>str</name>
</statistic>
<statistic>
<expression>perc(20,date(date_dtd))</expression>
<name>date</name>
</statistic>
<fieldFacet>
<field>int_id</field>
</fieldFacet>
<fieldFacet>
<field>long_ld</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>perc_60 numeric</name>
<statistic>
<expression>perc(60,int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>perc(60,long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>perc(60,float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>perc(60,double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>perc_60</name>
<statistic>
<expression>perc(60,str(string_sd))</expression>
<name>str</name>
</statistic>
<statistic>
<expression>perc(60,date(date_dtd))</expression>
<name>date</name>
</statistic>
<fieldFacet>
<field>int_id</field>
</fieldFacet>
<fieldFacet>
<field>long_ld</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>min numeric</name>
<statistic>
<expression>min(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>min(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>min(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>min(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>min</name>
<statistic>
<expression>min(str(string_sd))</expression>
<name>str</name>
</statistic>
<statistic>
<expression>min(date(date_dtd))</expression>
<name>date</name>
</statistic>
<fieldFacet>
<field>int_id</field>
</fieldFacet>
<fieldFacet>
<field>long_ld</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>max numeric</name>
<statistic>
<expression>max(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>max(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>max(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>max(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>max</name>
<statistic>
<expression>max(str(string_sd))</expression>
<name>str</name>
</statistic>
<statistic>
<expression>max(date(date_dtd))</expression>
<name>date</name>
</statistic>
<fieldFacet>
<field>int_id</field>
</fieldFacet>
<fieldFacet>
<field>long_ld</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>count numeric</name>
<statistic>
<expression>count(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>count(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>count(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>count(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>count</name>
<statistic>
<expression>count(str(string_sd))</expression>
<name>str</name>
</statistic>
<statistic>
<expression>count(date(date_dtd))</expression>
<name>date</name>
</statistic>
<fieldFacet>
<field>int_id</field>
</fieldFacet>
<fieldFacet>
<field>long_ld</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>unique numeric</name>
<statistic>
<expression>unique(int(int_id))</expression>
<name>int</name>
</statistic>
<statistic>
<expression>unique(long(long_ld))</expression>
<name>long</name>
</statistic>
<statistic>
<expression>unique(float(float_fd))</expression>
<name>float</name>
</statistic>
<statistic>
<expression>unique(double(double_dd))</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>unique</name>
<statistic>
<expression>unique(str(string_sd))</expression>
<name>str</name>
</statistic>
<statistic>
<expression>unique(date(date_dtd))</expression>
<name>date</name>
</statistic>
<fieldFacet>
<field>int_id</field>
</fieldFacet>
<fieldFacet>
<field>long_ld</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>missing numeric</name>
<statistic>
<expression>missing(int{int_id})</expression>
<name>int</name>
</statistic>
<statistic>
<expression>missing(long{long_ld})</expression>
<name>long</name>
</statistic>
<statistic>
<expression>missing(float{float_fd})</expression>
<name>float</name>
</statistic>
<statistic>
<expression>missing(double{double_dd})</expression>
<name>double</name>
</statistic>
<fieldFacet>
<field>string_sd</field>
</fieldFacet>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>missing</name>
<statistic>
<expression>missing(str{string_sd})</expression>
<name>str</name>
</statistic>
<statistic>
<expression>missing(date{date_dtd})</expression>
<name>date</name>
</statistic>
<fieldFacet>
<field>int_id</field>
</fieldFacet>
<fieldFacet>
<field>long_ld</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>multivalued</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<fieldFacet>
<field>long_ldm</field>
</fieldFacet>
<fieldFacet>
<field>string_sdm</field>
</fieldFacet>
<fieldFacet>
<field>date_dtdm</field>
</fieldFacet>
</analyticsRequest>
<analyticsRequest>
<name>missing facet</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
<fieldFacet showMissing="true">
<field>string_sd</field>
</fieldFacet>
<fieldFacet showMissing="true">
<field>date_dtdm</field>
</fieldFacet>
</analyticsRequest>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<fieldFacet>
<field>date_dtd</field>
</fieldFacet>
<fieldFacet showMissing="true">
<field>string_sd</field>
</fieldFacet>
<fieldFacet showMissing="true">
<field>date_dtdm</field>
</fieldFacet>
</analyticsRequest>
</analyticsRequestEnvelope>

View File

@ -1,246 +1,246 @@
<?xml version="1.0" encoding="UTF-8"?>
<analyticsRequestEnvelope stats="true" olap="true">
<analyticsRequest>
<name>Add Request</name>
<statistic>
<expression>sum(add(int(int_id),float(float_fd)))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>sum(double(add_if_dd))</expression>
<name>sum calced</name>
</statistic>
<statistic>
<expression>mean(add(long(long_ld),double(double_dd),float(float_fd)))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>mean(double(add_ldf_dd))</expression>
<name>mean calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Multiply Request</name>
<statistic>
<expression>sum(mult(int(int_id),float(float_fd)))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>sum(double(mult_if_dd))</expression>
<name>sum calced</name>
</statistic>
<statistic>
<expression>mean(mult(long(long_ld),double(double_dd),float(float_fd)))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>mean(double(mult_ldf_dd))</expression>
<name>mean calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Divide Request</name>
<statistic>
<expression>sum(div(int(int_id),float(float_fd)))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>sum(double(div_if_dd))</expression>
<name>sum calced</name>
</statistic>
<statistic>
<expression>mean(div(long(long_ld),double(double_dd)))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>mean(double(div_ld_dd))</expression>
<name>mean calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Power Request</name>
<statistic>
<expression>sum(pow(int(int_id),float(float_fd))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>sum(double(pow_if_dd))</expression>
<name>sum calced</name>
</statistic>
<statistic>
<expression>mean(pow(long(long_ld),double(double_dd)))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>mean(double(pow_ld_dd))</expression>
<name>mean calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Negate Request</name>
<statistic>
<expression>sum(neg(int(int_id)))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>sum(double(neg_i_dd))</expression>
<name>sum calced</name>
</statistic>
<statistic>
<expression>mean(neg(long(long_ld)))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>mean(double(neg_l_dd))</expression>
<name>mean calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Const Num Request</name>
<statistic>
<expression>sum(const_num(8))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>sum(double(const_8_dd))</expression>
<name>sum calced</name>
</statistic>
<statistic>
<expression>mean(const_num(10))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>mean(double(const_10_dd))</expression>
<name>mean calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Date Math Request</name>
<statistic>
<expression>median(date_math(date(date_dtd),const_str(+2YEARS)))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>median(date(dm_2y_dtd))</expression>
<name>median calced</name>
</statistic>
<statistic>
<expression>max(date_math(date(date_dtd),const_str(+2MONTHS)))</expression>
<name>max</name>
</statistic>
<statistic>
<expression>max(date(dm_2m_dtd))</expression>
<name>max calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Constant Date Request</name>
<statistic>
<expression>median(const_date(1800-06-30T23:59:59Z))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>median(date(const_00_dtd))</expression>
<name>median calced</name>
</statistic>
<statistic>
<expression>max(const_date(1804-06-30T23:59:59Z))</expression>
<name>max</name>
</statistic>
<statistic>
<expression>max(date(const_04_dtd))</expression>
<name>max calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Constant String Request</name>
<statistic>
<expression>min(const_str(this is the first))</expression>
<name>min</name>
</statistic>
<statistic>
<expression>min(str(const_first_sd))</expression>
<name>min calced</name>
</statistic>
<statistic>
<expression>max(const_str(this is the second))</expression>
<name>max</name>
</statistic>
<statistic>
<expression>max(str(const_second_sd))</expression>
<name>max calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Concatenate Request</name>
<statistic>
<expression>min(concat(const_str(this is the first),str(string_sd)))</expression>
<name>min</name>
</statistic>
<statistic>
<expression>min(str(concat_first_sd))</expression>
<name>min calced</name>
</statistic>
<statistic>
<expression>max(concat(const_str(this is the second),str(string_sd)))</expression>
<name>max</name>
</statistic>
<statistic>
<expression>max(str(concat_second_sd))</expression>
<name>max calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Reverse Request</name>
<statistic>
<expression>min(rev(str(string_sd)))</expression>
<name>min</name>
</statistic>
<statistic>
<expression>min(str(rev_sd))</expression>
<name>min calced</name>
</statistic>
<statistic>
<expression>max(rev(str(string_sd)))</expression>
<name>max</name>
</statistic>
<statistic>
<expression>max(str(rev_sd))</expression>
<name>max calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Add Request</name>
<statistic>
<expression>sum(add(int(int_id),float(float_fd)))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>sum(double(add_if_dd))</expression>
<name>sum calced</name>
</statistic>
<statistic>
<expression>mean(add(long(long_ld),double(double_dd),float(float_fd)))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>mean(double(add_ldf_dd))</expression>
<name>mean calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Multiply Request</name>
<statistic>
<expression>sum(mult(int(int_id),float(float_fd)))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>sum(double(mult_if_dd))</expression>
<name>sum calced</name>
</statistic>
<statistic>
<expression>mean(mult(long(long_ld),double(double_dd),float(float_fd)))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>mean(double(mult_ldf_dd))</expression>
<name>mean calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Divide Request</name>
<statistic>
<expression>sum(div(int(int_id),float(float_fd)))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>sum(double(div_if_dd))</expression>
<name>sum calced</name>
</statistic>
<statistic>
<expression>mean(div(long(long_ld),double(double_dd)))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>mean(double(div_ld_dd))</expression>
<name>mean calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Power Request</name>
<statistic>
<expression>sum(pow(int(int_id),float(float_fd))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>sum(double(pow_if_dd))</expression>
<name>sum calced</name>
</statistic>
<statistic>
<expression>mean(pow(long(long_ld),double(double_dd)))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>mean(double(pow_ld_dd))</expression>
<name>mean calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Negate Request</name>
<statistic>
<expression>sum(neg(int(int_id)))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>sum(double(neg_i_dd))</expression>
<name>sum calced</name>
</statistic>
<statistic>
<expression>mean(neg(long(long_ld)))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>mean(double(neg_l_dd))</expression>
<name>mean calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Const Num Request</name>
<statistic>
<expression>sum(const_num(8))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>sum(double(const_8_dd))</expression>
<name>sum calced</name>
</statistic>
<statistic>
<expression>mean(const_num(10))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>mean(double(const_10_dd))</expression>
<name>mean calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Date Math Request</name>
<statistic>
<expression>median(date_math(date(date_dtd),const_str(+2YEARS)))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>median(date(dm_2y_dtd))</expression>
<name>median calced</name>
</statistic>
<statistic>
<expression>max(date_math(date(date_dtd),const_str(+2MONTHS)))</expression>
<name>max</name>
</statistic>
<statistic>
<expression>max(date(dm_2m_dtd))</expression>
<name>max calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Constant Date Request</name>
<statistic>
<expression>median(const_date(1800-06-30T23:59:59Z))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>median(date(const_00_dtd))</expression>
<name>median calced</name>
</statistic>
<statistic>
<expression>max(const_date(1804-06-30T23:59:59Z))</expression>
<name>max</name>
</statistic>
<statistic>
<expression>max(date(const_04_dtd))</expression>
<name>max calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Constant String Request</name>
<statistic>
<expression>min(const_str(this is the first))</expression>
<name>min</name>
</statistic>
<statistic>
<expression>min(str(const_first_sd))</expression>
<name>min calced</name>
</statistic>
<statistic>
<expression>max(const_str(this is the second))</expression>
<name>max</name>
</statistic>
<statistic>
<expression>max(str(const_second_sd))</expression>
<name>max calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Concatenate Request</name>
<statistic>
<expression>min(concat(const_str(this is the first),str(string_sd)))</expression>
<name>min</name>
</statistic>
<statistic>
<expression>min(str(concat_first_sd))</expression>
<name>min calced</name>
</statistic>
<statistic>
<expression>max(concat(const_str(this is the second),str(string_sd)))</expression>
<name>max</name>
</statistic>
<statistic>
<expression>max(str(concat_second_sd))</expression>
<name>max calced</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Reverse Request</name>
<statistic>
<expression>min(rev(str(string_sd)))</expression>
<name>min</name>
</statistic>
<statistic>
<expression>min(str(rev_sd))</expression>
<name>min calced</name>
</statistic>
<statistic>
<expression>max(rev(str(string_sd)))</expression>
<name>max</name>
</statistic>
<statistic>
<expression>max(str(rev_sd))</expression>
<name>max calced</name>
</statistic>
</analyticsRequest>
</analyticsRequestEnvelope>

View File

@ -1,310 +1,310 @@
<?xml version="1.0" encoding="UTF-8"?>
<analyticsRequestEnvelope stats="true" olap="true">
<analyticsRequest>
<name>Sum Request</name>
<statistic>
<expression>sum(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>sum(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>sum(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>sum(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>SumOfSquares Request</name>
<statistic>
<expression>sumofsquares(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>sumofsquares(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>sumofsquares(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>sumofsquares(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Mean Request</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>mean(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>mean(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>mean(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Stddev Request</name>
<statistic>
<expression>stddev(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>stddev(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>stddev(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>stddev(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Median Request</name>
<statistic>
<expression>median(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>median(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>median(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>median(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Perc 20 Request</name>
<statistic>
<expression>perc(20,int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>perc(20,long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>perc(20,float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>perc(20,double(double_dd))</expression>
<name>double_dd</name>
</statistic>
<statistic>
<expression>perc(20,date(date_dtd))</expression>
<name>date_dtd</name>
</statistic>
<statistic>
<expression>perc(20,str(string_sd))</expression>
<name>string_sd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Perc 60 Request</name>
<statistic>
<expression>perc(60,int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>perc(60,long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>perc(60,float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>perc(60,double(double_dd))</expression>
<name>double_dd</name>
</statistic>
<statistic>
<expression>perc(60,date(date_dtd))</expression>
<name>date_dtd</name>
</statistic>
<statistic>
<expression>perc(60,str(string_sd))</expression>
<name>string_sd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Min Request</name>
<statistic>
<expression>min(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>min(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>min(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>min(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
<statistic>
<expression>min(date(date_dtd))</expression>
<name>date_dtd</name>
</statistic>
<statistic>
<expression>min(str(string_sd))</expression>
<name>string_sd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Max Request</name>
<statistic>
<expression>max(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>max(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>max(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>max(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
<statistic>
<expression>max(date(date_dtd))</expression>
<name>date_dtd</name>
</statistic>
<statistic>
<expression>max(str(string_sd))</expression>
<name>string_sd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Unique Request</name>
<statistic>
<expression>unique(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>unique(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>unique(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>unique(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
<statistic>
<expression>unique(date(date_dtd))</expression>
<name>date_dtd</name>
</statistic>
<statistic>
<expression>unique(str(string_sd))</expression>
<name>string_sd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Count Request</name>
<statistic>
<expression>count(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>count(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>count(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>count(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
<statistic>
<expression>count(date(date_dtd))</expression>
<name>date_dtd</name>
</statistic>
<statistic>
<expression>count(str(string_sd))</expression>
<name>string_sd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Missing Request</name>
<statistic>
<expression>missing(int{int_id})</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>missing(long{long_ld})</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>missing(float{float_fd})</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>missing(double{double_dd})</expression>
<name>double_dd</name>
</statistic>
<statistic>
<expression>missing(date{date_dtd})</expression>
<name>date_dtd</name>
</statistic>
<statistic>
<expression>missing(str{string_sd})</expression>
<name>string_sd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Sum Request</name>
<statistic>
<expression>sum(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>sum(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>sum(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>sum(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>SumOfSquares Request</name>
<statistic>
<expression>sumofsquares(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>sumofsquares(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>sumofsquares(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>sumofsquares(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Mean Request</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>mean(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>mean(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>mean(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Stddev Request</name>
<statistic>
<expression>stddev(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>stddev(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>stddev(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>stddev(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Median Request</name>
<statistic>
<expression>median(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>median(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>median(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>median(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Perc 20 Request</name>
<statistic>
<expression>perc(20,int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>perc(20,long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>perc(20,float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>perc(20,double(double_dd))</expression>
<name>double_dd</name>
</statistic>
<statistic>
<expression>perc(20,date(date_dtd))</expression>
<name>date_dtd</name>
</statistic>
<statistic>
<expression>perc(20,str(string_sd))</expression>
<name>string_sd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Perc 60 Request</name>
<statistic>
<expression>perc(60,int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>perc(60,long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>perc(60,float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>perc(60,double(double_dd))</expression>
<name>double_dd</name>
</statistic>
<statistic>
<expression>perc(60,date(date_dtd))</expression>
<name>date_dtd</name>
</statistic>
<statistic>
<expression>perc(60,str(string_sd))</expression>
<name>string_sd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Min Request</name>
<statistic>
<expression>min(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>min(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>min(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>min(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
<statistic>
<expression>min(date(date_dtd))</expression>
<name>date_dtd</name>
</statistic>
<statistic>
<expression>min(str(string_sd))</expression>
<name>string_sd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Max Request</name>
<statistic>
<expression>max(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>max(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>max(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>max(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
<statistic>
<expression>max(date(date_dtd))</expression>
<name>date_dtd</name>
</statistic>
<statistic>
<expression>max(str(string_sd))</expression>
<name>string_sd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Unique Request</name>
<statistic>
<expression>unique(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>unique(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>unique(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>unique(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
<statistic>
<expression>unique(date(date_dtd))</expression>
<name>date_dtd</name>
</statistic>
<statistic>
<expression>unique(str(string_sd))</expression>
<name>string_sd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Count Request</name>
<statistic>
<expression>count(int(int_id))</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>count(long(long_ld))</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>count(float(float_fd))</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>count(double(double_dd))</expression>
<name>double_dd</name>
</statistic>
<statistic>
<expression>count(date(date_dtd))</expression>
<name>date_dtd</name>
</statistic>
<statistic>
<expression>count(str(string_sd))</expression>
<name>string_sd</name>
</statistic>
</analyticsRequest>
<analyticsRequest>
<name>Missing Request</name>
<statistic>
<expression>missing(int{int_id})</expression>
<name>int_id</name>
</statistic>
<statistic>
<expression>missing(long{long_ld})</expression>
<name>long_ld</name>
</statistic>
<statistic>
<expression>missing(float{float_fd})</expression>
<name>float_fd</name>
</statistic>
<statistic>
<expression>missing(double{double_dd})</expression>
<name>double_dd</name>
</statistic>
<statistic>
<expression>missing(date{date_dtd})</expression>
<name>date_dtd</name>
</statistic>
<statistic>
<expression>missing(str{string_sd})</expression>
<name>string_sd</name>
</statistic>
</analyticsRequest>
</analyticsRequestEnvelope>

View File

@ -1,94 +1,94 @@
<?xml version="1.0" encoding="UTF-8"?>
<analyticsRequestEnvelope stats="true" olap="true">
<analyticsRequest>
<name>int request</name>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>median(int(int_id))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>perc(8,int(int_id))</expression>
<name>perc_8</name>
</statistic>
<queryFacet>
<name>float1</name>
<query>float_fd:[* TO 50]</query>
</queryFacet>
<queryFacet>
<name>float2</name>
<query>float_fd:[* TO 30]</query>
</queryFacet>
</analyticsRequest>
<analyticsRequest>
<name>long request</name>
<statistic>
<expression>sum(long(long_ld))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>mean(long(long_ld))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>median(long(long_ld))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>perc(8,long(long_ld))</expression>
<name>perc_8</name>
</statistic>
<queryFacet>
<name>string</name>
<query>string_sd:abc1</query>
<query>string_sd:abc2</query>
</queryFacet>
</analyticsRequest>
<analyticsRequest>
<name>float request</name>
<statistic>
<expression>sum(float(float_fd))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>mean(float(float_fd))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>median(float(float_fd))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>perc(8,float(float_fd))</expression>
<name>perc_8</name>
</statistic>
<queryFacet>
<name>long and double</name>
<query>long_ld:[20 TO *]</query>
<query>long_ld:[30 TO *]</query>
<query>double_dd:[* TO 50]</query>
</queryFacet>
</analyticsRequest>
<analyticsRequest>
<name>int request</name>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>median(int(int_id))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>perc(8,int(int_id))</expression>
<name>perc_8</name>
</statistic>
<queryFacet>
<name>float1</name>
<query>float_fd:[* TO 50]</query>
</queryFacet>
<queryFacet>
<name>float2</name>
<query>float_fd:[* TO 30]</query>
</queryFacet>
</analyticsRequest>
<analyticsRequest>
<name>long request</name>
<statistic>
<expression>sum(long(long_ld))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>mean(long(long_ld))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>median(long(long_ld))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>perc(8,long(long_ld))</expression>
<name>perc_8</name>
</statistic>
<queryFacet>
<name>string</name>
<query>string_sd:abc1</query>
<query>string_sd:abc2</query>
</queryFacet>
</analyticsRequest>
<analyticsRequest>
<name>float request</name>
<statistic>
<expression>sum(float(float_fd))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>mean(float(float_fd))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>median(float(float_fd))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>perc(8,float(float_fd))</expression>
<name>perc_8</name>
</statistic>
<queryFacet>
<name>long and double</name>
<query>long_ld:[20 TO *]</query>
<query>long_ld:[30 TO *]</query>
<query>double_dd:[* TO 50]</query>
</queryFacet>
</analyticsRequest>
</analyticsRequestEnvelope>

View File

@ -1,319 +1,319 @@
<?xml version="1.0" encoding="UTF-8"?>
<analyticsRequestEnvelope stats="true" olap="true">
<analyticsRequest>
<name>regular int</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>median(int(int_id))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>count(int(int_id))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>sumofsquares(int(int_id))</expression>
<name>sumOfSquares</name>
</statistic>
<rangeFacet hardend="false">
<field>long_ld</field>
<start>5</start>
<end>30</end>
<gap>5</gap>
<includeBoundary>lower</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="false">
<field>double_dd</field>
<start>3</start>
<end>39</end>
<gap>7</gap>
<includeBoundary>upper</includeBoundary>
<includeBoundary>outer</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="false">
<field>date_dtd</field>
<start>1007-01-01T23:59:59Z</start>
<end>1044-01-01T23:59:59Z</end>
<gap>+7YEARS</gap>
<includeBoundary>lower</includeBoundary>
<includeBoundary>edge</includeBoundary>
<includeBoundary>outer</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
</analyticsRequest>
<analyticsRequest>
<name>regular float</name>
<statistic>
<expression>mean(float(float_fd))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>sum(float(float_fd))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>median(float(float_fd))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>count(float(float_fd))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>sumofsquares(float(float_fd))</expression>
<name>sumOfSquares</name>
</statistic>
<rangeFacet hardend="false">
<field>long_ld</field>
<start>0</start>
<end>29</end>
<gap>4</gap>
<includeBoundary>all</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="false">
<field>double_dd</field>
<start>4</start>
<end>47</end>
<gap>11</gap>
<includeBoundary>edge</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="false">
<field>date_dtd</field>
<start>1004-01-01T23:59:59Z</start>
<end>1046-01-01T23:59:59Z</end>
<gap>+5YEARS</gap>
<includeBoundary>upper</includeBoundary>
<includeBoundary>edge</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
</analyticsRequest>
<analyticsRequest>
<name>hardend int</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>median(int(int_id))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>count(int(int_id))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>sumofsquares(int(int_id))</expression>
<name>sumOfSquares</name>
</statistic>
<rangeFacet hardend="true">
<field>long_ld</field>
<start>5</start>
<end>30</end>
<gap>5</gap>
<includeBoundary>lower</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="true">
<field>double_dd</field>
<start>3</start>
<end>39</end>
<gap>7</gap>
<includeBoundary>upper</includeBoundary>
<includeBoundary>outer</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="true">
<field>date_dtd</field>
<start>1007-01-01T23:59:59Z</start>
<end>1044-01-01T23:59:59Z</end>
<gap>+7YEARS</gap>
<includeBoundary>lower</includeBoundary>
<includeBoundary>edge</includeBoundary>
<includeBoundary>outer</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
</analyticsRequest>
<analyticsRequest>
<name>hardend float</name>
<statistic>
<expression>mean(float(float_fd))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>sum(float(float_fd))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>median(float(float_fd))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>count(float(float_fd))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>sumofsquares(float(float_fd))</expression>
<name>sumOfSquares</name>
</statistic>
<rangeFacet hardend="true">
<field>long_ld</field>
<start>0</start>
<end>29</end>
<gap>4</gap>
<includeBoundary>all</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="true">
<field>double_dd</field>
<start>4</start>
<end>47</end>
<gap>11</gap>
<includeBoundary>edge</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="true">
<field>date_dtd</field>
<start>1004-01-01T23:59:59Z</start>
<end>1046-01-01T23:59:59Z</end>
<gap>+5YEARS</gap>
<includeBoundary>upper</includeBoundary>
<includeBoundary>edge</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
</analyticsRequest>
<analyticsRequest>
<name>multigap int</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>median(int(int_id))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>count(int(int_id))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>sumofsquares(int(int_id))</expression>
<name>sumOfSquares</name>
</statistic>
<rangeFacet hardend="false">
<field>long_ld</field>
<start>5</start>
<end>30</end>
<gap>4</gap>
<gap>2</gap>
<gap>6</gap>
<gap>3</gap>
<includeBoundary>lower</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="false">
<field>double_dd</field>
<start>3</start>
<end>39</end>
<gap>3</gap>
<gap>1</gap>
<gap>7</gap>
<includeBoundary>upper</includeBoundary>
<includeBoundary>outer</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="false">
<field>date_dtd</field>
<start>1007-01-01T23:59:59Z</start>
<end>1044-01-01T23:59:59Z</end>
<gap>+2YEARS</gap>
<gap>+7YEARS</gap>
<includeBoundary>lower</includeBoundary>
<includeBoundary>edge</includeBoundary>
<includeBoundary>outer</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
</analyticsRequest>
<analyticsRequest>
<name>multigap float</name>
<statistic>
<expression>mean(float(float_fd))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>sum(float(float_fd))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>median(float(float_fd))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>count(float(float_fd))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>sumofsquares(float(float_fd))</expression>
<name>sumOfSquares</name>
</statistic>
<rangeFacet hardend="false">
<field>long_ld</field>
<start>0</start>
<end>29</end>
<gap>1</gap>
<gap>4</gap>
<includeBoundary>all</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="false">
<field>double_dd</field>
<start>4</start>
<end>47</end>
<gap>2</gap>
<gap>3</gap>
<gap>11</gap>
<includeBoundary>edge</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="false">
<field>date_dtd</field>
<start>1004-01-01T23:59:59Z</start>
<end>1046-01-01T23:59:59Z</end>
<gap>+4YEARS</gap>
<gap>+5YEARS</gap>
<includeBoundary>upper</includeBoundary>
<includeBoundary>edge</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
</analyticsRequest>
<analyticsRequest>
<name>regular int</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>median(int(int_id))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>count(int(int_id))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>sumofsquares(int(int_id))</expression>
<name>sumOfSquares</name>
</statistic>
<rangeFacet hardend="false">
<field>long_ld</field>
<start>5</start>
<end>30</end>
<gap>5</gap>
<includeBoundary>lower</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="false">
<field>double_dd</field>
<start>3</start>
<end>39</end>
<gap>7</gap>
<includeBoundary>upper</includeBoundary>
<includeBoundary>outer</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="false">
<field>date_dtd</field>
<start>1007-01-01T23:59:59Z</start>
<end>1044-01-01T23:59:59Z</end>
<gap>+7YEARS</gap>
<includeBoundary>lower</includeBoundary>
<includeBoundary>edge</includeBoundary>
<includeBoundary>outer</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
</analyticsRequest>
<analyticsRequest>
<name>regular float</name>
<statistic>
<expression>mean(float(float_fd))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>sum(float(float_fd))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>median(float(float_fd))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>count(float(float_fd))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>sumofsquares(float(float_fd))</expression>
<name>sumOfSquares</name>
</statistic>
<rangeFacet hardend="false">
<field>long_ld</field>
<start>0</start>
<end>29</end>
<gap>4</gap>
<includeBoundary>all</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="false">
<field>double_dd</field>
<start>4</start>
<end>47</end>
<gap>11</gap>
<includeBoundary>edge</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="false">
<field>date_dtd</field>
<start>1004-01-01T23:59:59Z</start>
<end>1046-01-01T23:59:59Z</end>
<gap>+5YEARS</gap>
<includeBoundary>upper</includeBoundary>
<includeBoundary>edge</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
</analyticsRequest>
<analyticsRequest>
<name>hardend int</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>median(int(int_id))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>count(int(int_id))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>sumofsquares(int(int_id))</expression>
<name>sumOfSquares</name>
</statistic>
<rangeFacet hardend="true">
<field>long_ld</field>
<start>5</start>
<end>30</end>
<gap>5</gap>
<includeBoundary>lower</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="true">
<field>double_dd</field>
<start>3</start>
<end>39</end>
<gap>7</gap>
<includeBoundary>upper</includeBoundary>
<includeBoundary>outer</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="true">
<field>date_dtd</field>
<start>1007-01-01T23:59:59Z</start>
<end>1044-01-01T23:59:59Z</end>
<gap>+7YEARS</gap>
<includeBoundary>lower</includeBoundary>
<includeBoundary>edge</includeBoundary>
<includeBoundary>outer</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
</analyticsRequest>
<analyticsRequest>
<name>hardend float</name>
<statistic>
<expression>mean(float(float_fd))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>sum(float(float_fd))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>median(float(float_fd))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>count(float(float_fd))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>sumofsquares(float(float_fd))</expression>
<name>sumOfSquares</name>
</statistic>
<rangeFacet hardend="true">
<field>long_ld</field>
<start>0</start>
<end>29</end>
<gap>4</gap>
<includeBoundary>all</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="true">
<field>double_dd</field>
<start>4</start>
<end>47</end>
<gap>11</gap>
<includeBoundary>edge</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="true">
<field>date_dtd</field>
<start>1004-01-01T23:59:59Z</start>
<end>1046-01-01T23:59:59Z</end>
<gap>+5YEARS</gap>
<includeBoundary>upper</includeBoundary>
<includeBoundary>edge</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
</analyticsRequest>
<analyticsRequest>
<name>multigap int</name>
<statistic>
<expression>mean(int(int_id))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>sum(int(int_id))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>median(int(int_id))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>count(int(int_id))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>sumofsquares(int(int_id))</expression>
<name>sumOfSquares</name>
</statistic>
<rangeFacet hardend="false">
<field>long_ld</field>
<start>5</start>
<end>30</end>
<gap>4</gap>
<gap>2</gap>
<gap>6</gap>
<gap>3</gap>
<includeBoundary>lower</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="false">
<field>double_dd</field>
<start>3</start>
<end>39</end>
<gap>3</gap>
<gap>1</gap>
<gap>7</gap>
<includeBoundary>upper</includeBoundary>
<includeBoundary>outer</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="false">
<field>date_dtd</field>
<start>1007-01-01T23:59:59Z</start>
<end>1044-01-01T23:59:59Z</end>
<gap>+2YEARS</gap>
<gap>+7YEARS</gap>
<includeBoundary>lower</includeBoundary>
<includeBoundary>edge</includeBoundary>
<includeBoundary>outer</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
</analyticsRequest>
<analyticsRequest>
<name>multigap float</name>
<statistic>
<expression>mean(float(float_fd))</expression>
<name>mean</name>
</statistic>
<statistic>
<expression>sum(float(float_fd))</expression>
<name>sum</name>
</statistic>
<statistic>
<expression>median(float(float_fd))</expression>
<name>median</name>
</statistic>
<statistic>
<expression>count(float(float_fd))</expression>
<name>count</name>
</statistic>
<statistic>
<expression>sumofsquares(float(float_fd))</expression>
<name>sumOfSquares</name>
</statistic>
<rangeFacet hardend="false">
<field>long_ld</field>
<start>0</start>
<end>29</end>
<gap>1</gap>
<gap>4</gap>
<includeBoundary>all</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="false">
<field>double_dd</field>
<start>4</start>
<end>47</end>
<gap>2</gap>
<gap>3</gap>
<gap>11</gap>
<includeBoundary>edge</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
<rangeFacet hardend="false">
<field>date_dtd</field>
<start>1004-01-01T23:59:59Z</start>
<end>1046-01-01T23:59:59Z</end>
<gap>+4YEARS</gap>
<gap>+5YEARS</gap>
<includeBoundary>upper</includeBoundary>
<includeBoundary>edge</includeBoundary>
<otherRange>all</otherRange>
</rangeFacet>
</analyticsRequest>
</analyticsRequestEnvelope>

View File

@ -31,17 +31,17 @@
<useCompoundFile>${useCompoundFile:false}</useCompoundFile>
</indexConfig>
<!-- Enables JMX if and only if an existing MBeanServer is found, use
this if you want to configure JMX through JVM parameters. Remove
this to disable exposing Solr configuration and statistics to JMX.
If you want to connect to a particular server, specify the agentId
e.g. <jmx agentId="myAgent" />
If you want to start a new MBeanServer, specify the serviceUrl
e.g <jmx serviceurl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr" />
For more details see http://wiki.apache.org/solr/SolrJmx
<!-- Enables JMX if and only if an existing MBeanServer is found, use
this if you want to configure JMX through JVM parameters. Remove
this to disable exposing Solr configuration and statistics to JMX.
If you want to connect to a particular server, specify the agentId
e.g. <jmx agentId="myAgent" />
If you want to start a new MBeanServer, specify the serviceUrl
e.g <jmx serviceurl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr" />
For more details see http://wiki.apache.org/solr/SolrJmx
-->
<jmx />

View File

@ -30,7 +30,7 @@
classpath.property="solr-dataimporthandler.jar"/>
<target name="compile-solr-dataimporthandler" unless="solr-dataimporthandler.uptodate">
<ant dir="${common-solr.dir}/contrib/dataimporthandler" target="compile-core" inheritAll="false">
<ant dir="${common-solr.dir}/contrib/dataimporthandler" target="compile-core" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
</target>
@ -40,7 +40,7 @@
we should probably fix this, the same issue exists in modules
-->
<target name="compile-solr-dataimporthandler-tests">
<ant dir="${common-solr.dir}/contrib/dataimporthandler" target="compile-test" inheritAll="false">
<ant dir="${common-solr.dir}/contrib/dataimporthandler" target="compile-test" inheritAll="false">
<propertyset refid="uptodate.and.compiled.properties"/>
</ant>
</target>

View File

@ -18,7 +18,7 @@
-->
<project name="solr-dataimporthandler" default="default">
<description>
Data Import Handler
</description>

View File

@ -25,13 +25,13 @@
query="SELECT CODE, COUNTRY_NAME FROM COUNTRIES"
>
<field column="CODE" name="DO_NOT_INDEX" />
<field column="CODE" name="DO_NOT_INDEX" />
</entity>
<entity
name="Sports"
processor="SqlEntityProcessor"
dataSource="hsqldb"
dataSource="hsqldb"
query="SELECT PERSON_ID, SPORT_NAME FROM PEOPLE_SPORTS WHERE PERSON_ID=${People.ID}"
/>

View File

@ -1,9 +1,9 @@
<dataConfig>
<dataSource type="MockDataSource" />
<document>
<entity name="x" query="select * from x">
<field column="id" />
<field column="desc" />
</entity>
</document>
<dataSource type="MockDataSource" />
<document>
<entity name="x" query="select * from x">
<field column="id" />
<field column="desc" />
</entity>
</document>
</dataConfig>

View File

@ -1,10 +1,10 @@
<dataConfig>
<dataSource type="MockDataSource" />
<dataSource name="mockDs" type="TestDocBuilder2$MockDataSource2" />
<document>
<entity name="x" query="select * from x" transformer="TestDocBuilder2$MockTransformer">
<field column="id" />
<field column="desc" />
</entity>
</document>
<dataSource type="MockDataSource" />
<dataSource name="mockDs" type="TestDocBuilder2$MockDataSource2" />
<document>
<entity name="x" query="select * from x" transformer="TestDocBuilder2$MockTransformer">
<field column="id" />
<field column="desc" />
</entity>
</document>
</dataConfig>

View File

@ -240,7 +240,7 @@
</requestHandler>
<requestHandler name="/dataimport" class="org.apache.solr.handler.dataimport.DataImportHandler">
<lst name="defaults">
<lst name="defaults">
<str name="dots.in.hsqldb.driver">org.hsqldb.jdbcDriver</str>
</lst>
</requestHandler>

View File

@ -1,9 +1,9 @@
<dataConfig>
<dataSource type="MockDataSource"/>
<document>
<entity name="x" query="select * from x">
<field column="id" />
<field column="desc" />
</entity>
</document>
<document>
<entity name="x" query="select * from x">
<field column="id" />
<field column="desc" />
</entity>
</document>
</dataConfig>

View File

@ -159,7 +159,7 @@
is not specified in the request.
-->
<requestHandler name="standard" class="solr.StandardRequestHandler">
<bool name="httpCaching">true</bool>
<bool name="httpCaching">true</bool>
</requestHandler>
<requestHandler name="dismax" class="solr.SearchHandler" >
<lst name="defaults">

View File

@ -23,9 +23,9 @@
Language Identifier contrib for extracting language from a document being indexed
</description>
<import file="../contrib-build.xml"/>
<import file="../contrib-build.xml"/>
<path id="classpath">
<path id="classpath">
<fileset dir="../extraction/lib" excludes="${common.classpath.excludes}"/>
<fileset dir="lib" excludes="${common.classpath.excludes}"/>
<path refid="solr.base.classpath"/>

View File

@ -62,36 +62,36 @@
</requestHandler>
<updateRequestProcessorChain name="lang_id">
<processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
<!-- Can take defaults, invariants and appends just like req handlers-->
<lst name="defaults">
<bool name="langid">true</bool>
<str name="langid.fl">name,subject</str>
<bool name="langid.map">true</bool>
<str name="langid.langField">language_s</str>
<str name="langid.langsField">language_sm</str>
<str name="langid.map.lcmap">th:thai</str>
<float name="threshold">0.5</float>
<str name="langid.fallback">fallback</str>
</lst>
</processor>
<processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
<!-- Can take defaults, invariants and appends just like req handlers-->
<lst name="defaults">
<bool name="langid">true</bool>
<str name="langid.fl">name,subject</str>
<bool name="langid.map">true</bool>
<str name="langid.langField">language_s</str>
<str name="langid.langsField">language_sm</str>
<str name="langid.map.lcmap">th:thai</str>
<float name="threshold">0.5</float>
<str name="langid.fallback">fallback</str>
</lst>
</processor>
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
<updateRequestProcessorChain name="lang_id_alt">
<processor class="org.apache.solr.update.processor.LangDetectLanguageIdentifierUpdateProcessorFactory">
<!-- Can take defaults, invariants and appends just like req handlers-->
<lst name="defaults">
<bool name="langid">true</bool>
<str name="langid.fl">name,subject</str>
<bool name="langid.map">true</bool>
<str name="langid.langField">language_s</str>
<str name="langid.langsField">language_sm</str>
<str name="langid.map.lcmap">th:thai</str>
<float name="threshold">0.5</float>
<str name="langid.fallback">fallback</str>
</lst>
</processor>
<processor class="org.apache.solr.update.processor.LangDetectLanguageIdentifierUpdateProcessorFactory">
<!-- Can take defaults, invariants and appends just like req handlers-->
<lst name="defaults">
<bool name="langid">true</bool>
<str name="langid.fl">name,subject</str>
<bool name="langid.map">true</bool>
<str name="langid.langField">language_s</str>
<str name="langid.langsField">language_sm</str>
<str name="langid.map.lcmap">th:thai</str>
<float name="threshold">0.5</float>
<str name="langid.fallback">fallback</str>
</lst>
</processor>
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>

View File

@ -184,8 +184,8 @@
<!-- sortMissingLast and sortMissingFirst attributes are optional attributes are
currently supported on types that are sorted internally as strings
and on numeric types.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
- If sortMissingLast="true", then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order (asc or desc).
@ -293,9 +293,9 @@
<!-- A general text field that has reasonable, generic
cross-language defaults: it tokenizes with StandardTokenizer,
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -331,11 +331,11 @@
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
@ -347,23 +347,23 @@
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldType>
<!-- A text field with defaults appropriate for English, plus
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
-->
<fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer type="index">
@ -415,7 +415,7 @@
</fieldType>
<!-- Just like text_general except it reverses the characters of
each token, to enable more efficient leading wildcard queries. -->
each token, to enable more efficient leading wildcard queries. -->
<fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -492,10 +492,10 @@
a token of "foo|1.4" would be indexed as "foo" with a payload of 1.4f
Attributes of the DelimitedPayloadTokenFilterFactory :
"delimiter" - a one character delimiter. Default is | (pipe)
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
Fully Qualified class name implementing PayloadEncoder, Encoder must have a no arg constructor.
-->
<filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float"/>
@ -516,10 +516,10 @@
-->
<fieldType name="descendent_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
</fieldType>
<!--
@ -528,10 +528,10 @@
-->
<fieldType name="ancestor_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
</fieldType>

View File

@ -955,7 +955,7 @@
<!-- maximum threshold of documents a query term can appear to be considered for correction -->
<float name="maxQueryFrequency">0.01</float>
<!-- uncomment this to require suggestions to occur in 1% of the documents
<float name="thresholdTokenFrequency">.01</float>
<float name="thresholdTokenFrequency">.01</float>
-->
</lst>

View File

@ -196,8 +196,8 @@
<!-- sortMissingLast and sortMissingFirst attributes are optional attributes are
currently supported on types that are sorted internally as strings
and on numeric types.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
- If sortMissingLast="true", then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order (asc or desc).
@ -308,9 +308,9 @@
<!-- A general text field that has reasonable, generic
cross-language defaults: it tokenizes with StandardTokenizer,
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -346,11 +346,11 @@
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
@ -362,23 +362,23 @@
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldType>
<!-- A text field with defaults appropriate for English, plus
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
-->
<fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer type="index">
@ -430,7 +430,7 @@
</fieldType>
<!-- Just like text_general except it reverses the characters of
each token, to enable more efficient leading wildcard queries. -->
each token, to enable more efficient leading wildcard queries. -->
<fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -505,10 +505,10 @@
a token of "foo|1.4" would be indexed as "foo" with a payload of 1.4f
Attributes of the DelimitedPayloadTokenFilterFactory :
"delimiter" - a one character delimiter. Default is | (pipe)
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
Fully Qualified class name implementing PayloadEncoder, Encoder must have a no arg constructor.
-->
<filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float"/>
@ -529,10 +529,10 @@
-->
<fieldType name="descendent_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
</fieldType>
<!--
@ -541,10 +541,10 @@
-->
<fieldType name="ancestor_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
</fieldType>

View File

@ -974,7 +974,7 @@
<!-- maximum threshold of documents a query term can appear to be considered for correction -->
<float name="maxQueryFrequency">0.01</float>
<!-- uncomment this to require suggestions to occur in 1% of the documents
<float name="thresholdTokenFrequency">.01</float>
<float name="thresholdTokenFrequency">.01</float>
-->
</lst>

View File

@ -196,8 +196,8 @@
<!-- sortMissingLast and sortMissingFirst attributes are optional attributes are
currently supported on types that are sorted internally as strings
and on numeric types.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
- If sortMissingLast="true", then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order (asc or desc).
@ -308,9 +308,9 @@
<!-- A general text field that has reasonable, generic
cross-language defaults: it tokenizes with StandardTokenizer,
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -346,11 +346,11 @@
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
@ -362,23 +362,23 @@
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldType>
<!-- A text field with defaults appropriate for English, plus
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
-->
<fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer type="index">
@ -430,7 +430,7 @@
</fieldType>
<!-- Just like text_general except it reverses the characters of
each token, to enable more efficient leading wildcard queries. -->
each token, to enable more efficient leading wildcard queries. -->
<fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -505,10 +505,10 @@
a token of "foo|1.4" would be indexed as "foo" with a payload of 1.4f
Attributes of the DelimitedPayloadTokenFilterFactory :
"delimiter" - a one character delimiter. Default is | (pipe)
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
Fully Qualified class name implementing PayloadEncoder, Encoder must have a no arg constructor.
-->
<filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float"/>
@ -529,10 +529,10 @@
-->
<fieldType name="descendent_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
</fieldType>
<!--
@ -541,10 +541,10 @@
-->
<fieldType name="ancestor_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
</fieldType>

View File

@ -978,7 +978,7 @@
<!-- maximum threshold of documents a query term can appear to be considered for correction -->
<float name="maxQueryFrequency">0.01</float>
<!-- uncomment this to require suggestions to occur in 1% of the documents
<float name="thresholdTokenFrequency">.01</float>
<float name="thresholdTokenFrequency">.01</float>
-->
</lst>

View File

@ -151,8 +151,8 @@
<!-- sortMissingLast and sortMissingFirst attributes are optional attributes are
currently supported on types that are sorted internally as strings
and on numeric types.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
- If sortMissingLast="true", then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order (asc or desc).
@ -260,9 +260,9 @@
<!-- A general text field that has reasonable, generic
cross-language defaults: it tokenizes with StandardTokenizer,
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -298,11 +298,11 @@
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
@ -314,23 +314,23 @@
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldType>
<!-- A text field with defaults appropriate for English, plus
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
-->
<fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer type="index">
@ -382,7 +382,7 @@
</fieldType>
<!-- Just like text_general except it reverses the characters of
each token, to enable more efficient leading wildcard queries. -->
each token, to enable more efficient leading wildcard queries. -->
<fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -459,10 +459,10 @@
a token of "foo|1.4" would be indexed as "foo" with a payload of 1.4f
Attributes of the DelimitedPayloadTokenFilterFactory :
"delimiter" - a one character delimiter. Default is | (pipe)
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
Fully Qualified class name implementing PayloadEncoder, Encoder must have a no arg constructor.
-->
<filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float"/>
@ -483,10 +483,10 @@
-->
<fieldType name="descendent_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
</fieldType>
<!--
@ -495,10 +495,10 @@
-->
<fieldType name="ancestor_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
</fieldType>

View File

@ -955,7 +955,7 @@
<!-- maximum threshold of documents a query term can appear to be considered for correction -->
<float name="maxQueryFrequency">0.01</float>
<!-- uncomment this to require suggestions to occur in 1% of the documents
<float name="thresholdTokenFrequency">.01</float>
<float name="thresholdTokenFrequency">.01</float>
-->
</lst>

View File

@ -977,7 +977,7 @@
<!-- maximum threshold of documents a query term can appear to be considered for correction -->
<float name="maxQueryFrequency">0.01</float>
<!-- uncomment this to require suggestions to occur in 1% of the documents
<float name="thresholdTokenFrequency">.01</float>
<float name="thresholdTokenFrequency">.01</float>
-->
</lst>

View File

@ -17,32 +17,32 @@
-->
<oaidc:dc xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:oaidc="http://www.openarchives.org/OAI/2.0/oai_dc/">
<dc:title>Tika test document</dc:title>
<dc:title>Tika test document</dc:title>
<dc:creator>Rida Benjelloun</dc:creator>
<dc:creator>Rida Benjelloun</dc:creator>
<dc:subject>Java</dc:subject>
<dc:subject>Java</dc:subject>
<dc:subject>XML</dc:subject>
<dc:subject>XML</dc:subject>
<dc:subject>XSLT</dc:subject>
<dc:subject>XSLT</dc:subject>
<dc:subject>JDOM</dc:subject>
<dc:subject>JDOM</dc:subject>
<dc:subject>Indexation</dc:subject>
<dc:subject>Indexation</dc:subject>
<dc:description>Framework d'indexation des documents XML, HTML, PDF etc.. </dc:description>
<dc:description>Framework d'indexation des documents XML, HTML, PDF etc.. </dc:description>
<dc:identifier>http://www.apache.org</dc:identifier>
<dc:identifier>http://www.apache.org</dc:identifier>
<dc:date>2000-12-01T00:00:00.000Z</dc:date>
<dc:date>2000-12-01T00:00:00.000Z</dc:date>
<dc:type>test</dc:type>
<dc:type>test</dc:type>
<dc:format>application/msword</dc:format>
<dc:format>application/msword</dc:format>
<dc:language>Fr</dc:language>
<dc:language>Fr</dc:language>
<dc:rights>Archimède et Lius à Châteauneuf testing chars en été</dc:rights>
<dc:rights>Archimède et Lius à Châteauneuf testing chars en été</dc:rights>
</oaidc:dc>

View File

@ -1,21 +1,21 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<analysisEngineDescription xmlns="http://uima.apache.org/resourceSpecifier">
<frameworkImplementation>org.apache.uima.java</frameworkImplementation>
@ -24,9 +24,9 @@
<analysisEngineMetaData>
<name>Hidden Markov Model - Part of Speech Tagger</name>
<description>A configuration of the HmmTaggerAnnotator that looks for
parts of speech of identified tokens within existing
Sentence and Token annotations. See also
WhitespaceTokenizer.xml.</description>
parts of speech of identified tokens within existing
Sentence and Token annotations. See also
WhitespaceTokenizer.xml.</description>
<version>1.0</version>
<vendor>The Apache Software Foundation</vendor>
<configurationParameters>
@ -55,7 +55,7 @@
<featureDescription>
<name>posTag</name>
<description>contains part-of-speech of a
corresponding token</description>
corresponding token</description>
<rangeTypeName>uima.cas.String</rangeTypeName>
</featureDescription>
</features>

View File

@ -1,22 +1,22 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<analysisEngineDescription xmlns="http://uima.apache.org/resourceSpecifier">

View File

@ -1,115 +1,115 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
***************************************************************
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
***************************************************************
***************************************************************
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
***************************************************************
-->
<analysisEngineDescription
xmlns="http://uima.apache.org/resourceSpecifier">
<frameworkImplementation>
org.apache.uima.java
</frameworkImplementation>
<primitive>true</primitive>
<annotatorImplementationName>
org.apache.uima.annotator.WhitespaceTokenizer
</annotatorImplementationName>
xmlns="http://uima.apache.org/resourceSpecifier">
<frameworkImplementation>
org.apache.uima.java
</frameworkImplementation>
<primitive>true</primitive>
<annotatorImplementationName>
org.apache.uima.annotator.WhitespaceTokenizer
</annotatorImplementationName>
<analysisEngineMetaData>
<name>WhitespaceTokenizer</name>
<description>
creates token and sentence annotations for whitespace
separated languages
</description>
<version>1.0</version>
<vendor>The Apache Software Foundation</vendor>
<analysisEngineMetaData>
<name>WhitespaceTokenizer</name>
<description>
creates token and sentence annotations for whitespace
separated languages
</description>
<version>1.0</version>
<vendor>The Apache Software Foundation</vendor>
<configurationParameters>
<configurationParameter>
<name>SofaNames</name>
<description>
The Sofa names the annotator should work on. If no
names are specified, the annotator works on the
default sofa.
</description>
<type>String</type>
<multiValued>true</multiValued>
<mandatory>false</mandatory>
</configurationParameter>
<configurationParameters>
<configurationParameter>
<name>SofaNames</name>
<description>
The Sofa names the annotator should work on. If no
names are specified, the annotator works on the
default sofa.
</description>
<type>String</type>
<multiValued>true</multiValued>
<mandatory>false</mandatory>
</configurationParameter>
</configurationParameters>
</configurationParameters>
<configurationParameterSettings>
<!--
<nameValuePair>
<name>SofaNames</name>
<value>
<array>
<string>sofaName</string>
</array>
</value>
</nameValuePair>
-->
</configurationParameterSettings>
<configurationParameterSettings>
<!--
<nameValuePair>
<name>SofaNames</name>
<value>
<array>
<string>sofaName</string>
</array>
</value>
</nameValuePair>
-->
</configurationParameterSettings>
<typeSystemDescription>
<typeDescription>
<name>org.apache.uima.TokenAnnotation</name>
<description>Single token annotation</description>
<supertypeName>uima.tcas.Annotation</supertypeName>
<features>
<featureDescription>
<name>tokenType</name>
<description>token type</description>
<rangeTypeName>uima.cas.String</rangeTypeName>
</featureDescription>
</features>
</typeDescription>
<typeSystemDescription>
<typeDescription>
<name>org.apache.uima.TokenAnnotation</name>
<description>Single token annotation</description>
<supertypeName>uima.tcas.Annotation</supertypeName>
<features>
<featureDescription>
<name>tokenType</name>
<description>token type</description>
<rangeTypeName>uima.cas.String</rangeTypeName>
</featureDescription>
</features>
</typeDescription>
<typeDescription>
<name>org.apache.uima.SentenceAnnotation</name>
<description>sentence annotation</description>
<supertypeName>uima.tcas.Annotation</supertypeName>
<features>
<typeDescription>
<name>org.apache.uima.SentenceAnnotation</name>
<description>sentence annotation</description>
<supertypeName>uima.tcas.Annotation</supertypeName>
<features>
</features>
</typeDescription>
</typeSystemDescription>
</features>
</typeDescription>
</typeSystemDescription>
<fsIndexes />
<fsIndexes />
<capabilities>
<capability>
<inputs />
<outputs>
<type>org.apache.uima.TokenAnnotation</type>
<feature>
org.apache.uima.TokenAnnotation:tokentype
</feature>
<type>org.apache.uima.SentenceAnnotation</type>
</outputs>
<languagesSupported>
<language>x-unspecified</language>
</languagesSupported>
</capability>
</capabilities>
<capabilities>
<capability>
<inputs />
<outputs>
<type>org.apache.uima.TokenAnnotation</type>
<feature>
org.apache.uima.TokenAnnotation:tokentype
</feature>
<type>org.apache.uima.SentenceAnnotation</type>
</outputs>
<languagesSupported>
<language>x-unspecified</language>
</languagesSupported>
</capability>
</capabilities>
</analysisEngineMetaData>
</analysisEngineMetaData>
</analysisEngineDescription>

View File

@ -50,7 +50,7 @@ Test for HighlighterMaxOffsetTest which requires the use of ReversedWildcardFilt
<!-- Just like text_general except it reverses the characters of
each token, to enable more efficient leading wildcard queries. -->
each token, to enable more efficient leading wildcard queries. -->
<fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>

View File

@ -31,23 +31,23 @@
</fieldType>
<fieldType name="phrase_suggest" class="solr.TextField">
<analyzer>
<tokenizer class="solr.KeywordTokenizerFactory"/>
<filter class="solr.PatternReplaceFilterFactory"
pattern="([^\p{L}\p{M}\p{N}\p{Cs}]*[\p{L}\p{M}\p{N}\p{Cs}\_]+:)|([^\p{L}\p{M}\p{N}\p{Cs}])+"
replacement=" " replace="all"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.TrimFilterFactory"/>
</analyzer>
</fieldType>
<fieldType name="ja_suggest" class="solr.TextField">
<analyzer>
<tokenizer class="solr.JapaneseTokenizerFactory" mode="normal"/>
<filter class="solr.CJKWidthFilterFactory"/>
<filter class="solr.JapaneseReadingFormFilterFactory" useRomaji="true"/>
</analyzer>
</fieldType>
<analyzer>
<tokenizer class="solr.KeywordTokenizerFactory"/>
<filter class="solr.PatternReplaceFilterFactory"
pattern="([^\p{L}\p{M}\p{N}\p{Cs}]*[\p{L}\p{M}\p{N}\p{Cs}\_]+:)|([^\p{L}\p{M}\p{N}\p{Cs}])+"
replacement=" " replace="all"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.TrimFilterFactory"/>
</analyzer>
</fieldType>
<fieldType name="ja_suggest" class="solr.TextField">
<analyzer>
<tokenizer class="solr.JapaneseTokenizerFactory" mode="normal"/>
<filter class="solr.CJKWidthFilterFactory"/>
<filter class="solr.JapaneseReadingFormFilterFactory" useRomaji="true"/>
</analyzer>
</fieldType>
</types>
<fields>

View File

@ -46,22 +46,22 @@
</analyzer>
</fieldType>
<fieldType name="spellText" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
<filter class="solr.StandardFilterFactory"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
<filter class="solr.StandardFilterFactory"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
</analyzer>
</fieldType>
<fieldType name="spellText" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
<filter class="solr.StandardFilterFactory"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
<filter class="solr.StandardFilterFactory"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
</analyzer>
</fieldType>
</types>

View File

@ -508,7 +508,7 @@
<field name="standardfilt" type="standardfilt" indexed="true" stored="true"/>
<field name="lowerfilt" type="lowerfilt" indexed="true" stored="true"/>
<field name="lowerfilt1" type="lowerfilt" indexed="true" stored="true"/>
<field name="lowerfilt1and2" type="lowerfilt" indexed="true" stored="true"/>
<field name="lowerfilt1and2" type="lowerfilt" indexed="true" stored="true"/>
<field name="patterntok" type="patterntok" indexed="true" stored="true"/>
<field name="patternreplacefilt" type="patternreplacefilt" indexed="true" stored="true"/>
<field name="porterfilt" type="porterfilt" indexed="true" stored="true"/>
@ -665,22 +665,22 @@
<copyField source="title" dest="title_stringNoNorms"/>
<copyField source="title" dest="text"/>
<copyField source="subject" dest="text"/>
<copyField source="subject" dest="text"/>
<copyField source="lowerfilt1" dest="lowerfilt1and2"/>
<copyField source="lowerfilt" dest="lowerfilt1and2"/>
<copyField source="lowerfilt1" dest="lowerfilt1and2"/>
<copyField source="lowerfilt" dest="lowerfilt1and2"/>
<copyField source="*_t" dest="text"/>
<copyField source="*_t" dest="text"/>
<copyField source="id" dest="range_facet_l"/>
<copyField source="range_facet_f" dest="range_facet_d"/>
<copyField source="range_facet_f1" dest="range_facet_f1_dv"/>
<copyField source="id" dest="range_facet_l_dv"/>
<copyField source="id" dest="range_facet_i_dv"/>
<copyField source="range_facet_f" dest="range_facet_f_dv"/>
<copyField source="range_facet_f" dest="range_facet_d_dv"/>
<copyField source="bday" dest="range_facet_dt_dv"/>
<copyField source="id" dest="range_facet_l"/>
<copyField source="range_facet_f" dest="range_facet_d"/>
<copyField source="range_facet_f1" dest="range_facet_f1_dv"/>
<copyField source="id" dest="range_facet_l_dv"/>
<copyField source="id" dest="range_facet_i_dv"/>
<copyField source="range_facet_f" dest="range_facet_f_dv"/>
<copyField source="range_facet_f" dest="range_facet_d_dv"/>
<copyField source="bday" dest="range_facet_dt_dv"/>
<!-- dynamic destination -->
<copyField source="*_dynamic" dest="dynamic_*"/>

View File

@ -336,46 +336,46 @@ valued. -->
EXAMPLE: name="*_i" will match any field ending in _i (like myid_i, z_i)
Longer patterns will be matched first. if equal size patterns
both match, the first appearing in the schema will be used. -->
<dynamicField name="*_s" type="string" indexed="true" stored="true"/>
<dynamicField name="*_s" type="string" indexed="true" stored="true"/>
<dynamicField name="*_s_dv" type="string" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_ss" type="string" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_sS" type="string" indexed="false" stored="true"/>
<dynamicField name="*_i" type="int" indexed="true" stored="true"/>
<dynamicField name="*_ii" type="int" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_l" type="long" indexed="true" stored="true"/>
<dynamicField name="*_f" type="float" indexed="true" stored="true"/>
<dynamicField name="*_d" type="double" indexed="true" stored="true"/>
<dynamicField name="*_ss" type="string" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_sS" type="string" indexed="false" stored="true"/>
<dynamicField name="*_i" type="int" indexed="true" stored="true"/>
<dynamicField name="*_ii" type="int" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_l" type="long" indexed="true" stored="true"/>
<dynamicField name="*_f" type="float" indexed="true" stored="true"/>
<dynamicField name="*_d" type="double" indexed="true" stored="true"/>
<dynamicField name="*_ti" type="tint" indexed="true" stored="true"/>
<dynamicField name="*_ti_dv" type="int" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_ti_ni_dv" type="int" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tl" type="tlong" indexed="true" stored="true"/>
<dynamicField name="*_tl_dv" type="tlong" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tl_ni_dv" type="tlong" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_tf" type="tfloat" indexed="true" stored="true"/>
<dynamicField name="*_tf_dv" type="tfloat" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tf_ni_dv" type="tfloat" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_td" type="tdouble" indexed="true" stored="true"/>
<dynamicField name="*_td_dv" type="tdouble" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_td_ni_dv" type="tdouble" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_tdt" type="tdate" indexed="true" stored="true"/>
<dynamicField name="*_tdt_dv" type="tdate" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tdt_ni_dv" type="tdate" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_ti" type="tint" indexed="true" stored="true"/>
<dynamicField name="*_ti_dv" type="int" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_ti_ni_dv" type="int" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tl" type="tlong" indexed="true" stored="true"/>
<dynamicField name="*_tl_dv" type="tlong" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tl_ni_dv" type="tlong" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_tf" type="tfloat" indexed="true" stored="true"/>
<dynamicField name="*_tf_dv" type="tfloat" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tf_ni_dv" type="tfloat" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_td" type="tdouble" indexed="true" stored="true"/>
<dynamicField name="*_td_dv" type="tdouble" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_td_ni_dv" type="tdouble" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_tdt" type="tdate" indexed="true" stored="true"/>
<dynamicField name="*_tdt_dv" type="tdate" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tdt_ni_dv" type="tdate" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_tis" type="tints" indexed="true" stored="true"/>
<dynamicField name="*_tis_dv" type="tints" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tis_ni_dv" type="tints" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_tls" type="tlongs" indexed="true" stored="true"/>
<dynamicField name="*_tls_dv" type="tlongs" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tls_ni_dv" type="tlongs" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_tfs" type="tfloats" indexed="true" stored="true"/>
<dynamicField name="*_tfs_dv" type="tfloats" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tis" type="tints" indexed="true" stored="true"/>
<dynamicField name="*_tis_dv" type="tints" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tis_ni_dv" type="tints" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_tls" type="tlongs" indexed="true" stored="true"/>
<dynamicField name="*_tls_dv" type="tlongs" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tls_ni_dv" type="tlongs" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_tfs" type="tfloats" indexed="true" stored="true"/>
<dynamicField name="*_tfs_dv" type="tfloats" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tfs_ni_dv" type="tfloats" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_tds" type="tdoubles" indexed="true" stored="true"/>
<dynamicField name="*_tds_dv" type="tdoubles" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tds" type="tdoubles" indexed="true" stored="true"/>
<dynamicField name="*_tds_dv" type="tdoubles" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tds_ni_dv" type="tdoubles" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_tdts" type="tdates" indexed="true" stored="true"/>
<dynamicField name="*_tdts_dv" type="tdates" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tdts" type="tdates" indexed="true" stored="true"/>
<dynamicField name="*_tdts_dv" type="tdates" indexed="true" stored="true" docValues="true"/>
<dynamicField name="*_tdts_ni_dv" type="tdates" indexed="false" stored="true" docValues="true"/>
<dynamicField name="*_t" type="text" indexed="true" stored="true"/>

View File

@ -310,12 +310,12 @@
<!-- Create a string version of author for faceting -->
<copyField source="author" dest="author_s"/>
<!-- Above, multiple source fields are copied to the [text] field.
Another way to map multiple source fields to the same
destination field is to use the dynamic field syntax.
copyField also supports a maxChars to copy setting. -->
Another way to map multiple source fields to the same
destination field is to use the dynamic field syntax.
copyField also supports a maxChars to copy setting. -->
<!-- <copyField source="*_t" dest="text" maxChars="3000"/> -->
<!-- copy name to alphaNameSort, a field designed for sorting by name -->
@ -342,8 +342,8 @@
<!-- sortMissingLast and sortMissingFirst attributes are optional attributes are
currently supported on types that are sorted internally as strings
and on numeric types.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
- If sortMissingLast="true", then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order (asc or desc).
@ -454,9 +454,9 @@
<!-- A general text field that has reasonable, generic
cross-language defaults: it tokenizes with StandardTokenizer,
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -492,11 +492,11 @@
words="stopwords.txt"
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
@ -507,23 +507,23 @@
words="stopwords.txt"
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldType>
<!-- A text field with defaults appropriate for English, plus
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
-->
<fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer type="index">
@ -574,7 +574,7 @@
</fieldType>
<!-- Just like text_general except it reverses the characters of
each token, to enable more efficient leading wildcard queries. -->
each token, to enable more efficient leading wildcard queries. -->
<fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -649,10 +649,10 @@
a token of "foo|1.4" would be indexed as "foo" with a payload of 1.4f
Attributes of the DelimitedPayloadTokenFilterFactory :
"delimiter" - a one character delimiter. Default is | (pipe)
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
Fully Qualified class name implementing PayloadEncoder, Encoder must have a no arg constructor.
-->
<filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float"/>
@ -673,10 +673,10 @@
-->
<fieldType name="descendent_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
</fieldType>
<!--
@ -685,10 +685,10 @@
-->
<fieldType name="ancestor_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
</fieldType>

View File

@ -41,8 +41,8 @@
class="solr.XMLResponseWriter" />
<requestHandler name="standard" class="solr.StandardRequestHandler">
<bool name="httpCaching">true</bool>
<arr name="first-components">
<bool name="httpCaching">true</bool>
<arr name="first-components">
<str>component1</str>
</arr>
<arr name="last-components">

View File

@ -38,8 +38,8 @@
<deletionPolicy class="org.apache.solr.core.FakeDeletionPolicy">
<str name="var1">value1</str>
<str name="var2">value2</str>
</deletionPolicy>
<str name="var2">value2</str>
</deletionPolicy>
</indexConfig>
<requestHandler name="standard" class="solr.StandardRequestHandler"></requestHandler>

View File

@ -24,8 +24,8 @@
<directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
<searchComponent name="componentThatAddsHeader" class="org.apache.solr.servlet.ResponseHeaderTest$ComponentThatAddsHeader"/>
<requestHandler name="/withHeaders" class="solr.StandardRequestHandler">
<arr name="first-components">
<str>componentThatAddsHeader</str>
</arr>
<arr name="first-components">
<str>componentThatAddsHeader</str>
</arr>
</requestHandler>
</config>

View File

@ -27,9 +27,9 @@
class="org.apache.solr.search.DelayingSearchComponent"/>
<requestHandler name="/select" class="solr.SearchHandler">
<arr name="first-components">
<str>delayingSearchComponent</str>
</arr>
<arr name="first-components">
<str>delayingSearchComponent</str>
</arr>
</requestHandler>
<requestDispatcher handleSelect="true" >

View File

@ -150,7 +150,7 @@
<!-- Suggester properties -->
<str name="separator"> </str>
<str name="suggestFreeTextAnalyzerFieldType">text</str>
<int name="ngrams">2</int>
<int name="ngrams">2</int>
</lst>
</searchComponent>

View File

@ -37,9 +37,9 @@
<!-- Log retrievedDocs -->
<requestHandler name="withlog" class="org.apache.solr.handler.component.SearchHandler">
<lst name="defaults">
<str name="defType">dismax</str>
</lst>
<lst name="defaults">
<str name="defType">dismax</str>
</lst>
<arr name="last-components">
<str>responselog</str>
</arr>

View File

@ -41,9 +41,9 @@
</requestHandler>
<requestHandler name="/replication" class="solr.ReplicationHandler">
<lst name="slave">
<str name="masterUrl">http://127.0.0.1:TEST_PORT/solr/collection1</str>
<str name="pollInterval">00:00:01</str>
<lst name="slave">
<str name="masterUrl">http://127.0.0.1:TEST_PORT/solr/collection1</str>
<str name="pollInterval">00:00:01</str>
<str name="compression">COMPRESSION</str>
</lst>
</requestHandler>

View File

@ -88,11 +88,11 @@
<float name="thresholdTokenFrequency">.29</float>
</lst>
<lst name="spellchecker">
<str name="name">multipleFields</str>
<str name="field">lowerfilt1and2</str>
<str name="spellcheckIndexDir">spellcheckerMultipleFields</str>
<str name="buildOnCommit">true</str>
</lst>
<str name="name">multipleFields</str>
<str name="field">lowerfilt1and2</str>
<str name="spellcheckIndexDir">spellcheckerMultipleFields</str>
<str name="buildOnCommit">true</str>
</lst>
<!-- Example of using different distance measure -->
<lst name="spellchecker">
<str name="name">jarowinkler</str>
@ -156,13 +156,13 @@
</arr>
</requestHandler>
<requestHandler name="spellCheckCompRH1" class="org.apache.solr.handler.component.SearchHandler">
<lst name="defaults">
<str name="defType">dismax</str>
<str name="qf">lowerfilt1^1</str>
</lst>
<arr name="last-components">
<str>spellcheck</str>
</arr>
<lst name="defaults">
<str name="defType">dismax</str>
<str name="qf">lowerfilt1^1</str>
</lst>
<arr name="last-components">
<str>spellcheck</str>
</arr>
</requestHandler>
<requestHandler name="spellCheckWithWordbreak" class="org.apache.solr.handler.component.SearchHandler">
<lst name="defaults">

View File

@ -31,7 +31,7 @@
<searchComponent class="solr.SuggestComponent" name="suggest">
<!-- Suggest component (default index based dictionary) -->
<!-- Suggest component (default index based dictionary) -->
<lst name="suggester">
<str name="name">suggest_fuzzy_with_high_freq_dict</str>
<str name="lookupImpl">FuzzyLookupFactory</str>
@ -42,8 +42,8 @@
<float name="threshold">0.0</float>
</lst>
<!-- Suggest component (default file based dictionary) -->
<!-- Suggest component (default file based dictionary) -->
<lst name="suggester">
<str name="name">suggest_fuzzy_file_based</str>
<str name="lookupImpl">FuzzyLookupFactory</str>
@ -53,7 +53,7 @@
<str name="buildOnCommit">true</str>
</lst>
<!-- Suggest component (Document Dictionary) -->
<!-- Suggest component (Document Dictionary) -->
<lst name="suggester">
<str name="name">suggest_fuzzy_doc_dict</str>
<str name="lookupImpl">FuzzyLookupFactory</str>
@ -65,7 +65,7 @@
<str name="buildOnStartup">false</str>
</lst>
<!-- Suggest component (Document Expression Dictionary) -->
<!-- Suggest component (Document Expression Dictionary) -->
<lst name="suggester">
<str name="name">suggest_fuzzy_doc_expr_dict</str>
<str name="dictionaryImpl">DocumentExpressionDictionaryFactory</str>

View File

@ -1,52 +1,52 @@
<?xml version="1.0" ?>
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor
license agreements. See the NOTICE file distributed with this work for additional
information regarding copyright ownership. The ASF licenses this file to
You under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License. -->
license agreements. See the NOTICE file distributed with this work for additional
information regarding copyright ownership. The ASF licenses this file to
You under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License. -->
<!-- This is a "kitchen sink" config file that tests can use. When writting
a new test, feel free to add *new* items (plugins, config options, etc...)
as long as they don't break any existing tests. if you need to test something
esoteric please add a new "solrconfig-your-esoteric-purpose.xml" config file.
Note in particular that this test is used by MinimalSchemaTest so Anything
added to this file needs to work correctly even if there is now uniqueKey
or defaultSearch Field. -->
a new test, feel free to add *new* items (plugins, config options, etc...)
as long as they don't break any existing tests. if you need to test something
esoteric please add a new "solrconfig-your-esoteric-purpose.xml" config file.
Note in particular that this test is used by MinimalSchemaTest so Anything
added to this file needs to work correctly even if there is now uniqueKey
or defaultSearch Field. -->
<config>
<dataDir>${solr.data.dir:}</dataDir>
<dataDir>${solr.data.dir:}</dataDir>
<directoryFactory name="DirectoryFactory"
class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}" />
<directoryFactory name="DirectoryFactory"
class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}" />
<luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
<luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
<xi:include href="solrconfig.snippet.randomindexconfig.xml"
xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="solrconfig.snippet.randomindexconfig.xml"
xmlns:xi="http://www.w3.org/2001/XInclude" />
<updateHandler class="solr.DirectUpdateHandler2">
<commitWithin>
<softCommit>${solr.commitwithin.softcommit:true}</softCommit>
</commitWithin>
<updateHandler class="solr.DirectUpdateHandler2">
<commitWithin>
<softCommit>${solr.commitwithin.softcommit:true}</softCommit>
</commitWithin>
</updateHandler>
<requestHandler name="/select" class="solr.SearchHandler">
<lst name="defaults">
<str name="echoParams">explicit</str>
<str name="indent">true</str>
<str name="df">text</str>
</lst>
</updateHandler>
<requestHandler name="/select" class="solr.SearchHandler">
<lst name="defaults">
<str name="echoParams">explicit</str>
<str name="indent">true</str>
<str name="df">text</str>
</lst>
</requestHandler>
<queryResponseWriter name="javabin"
</requestHandler>
<queryResponseWriter name="javabin"
class="solr.TestTolerantSearch$BadResponseWriter" />
</config>

View File

@ -83,7 +83,7 @@
-->
<updateLog enable="${enable.update.log:true}">
<str name="dir">${solr.ulog.dir:}</str>
<str name="dir">${solr.ulog.dir:}</str>
</updateLog>
<commitWithin>
@ -198,7 +198,7 @@
is not specified in the request.
-->
<requestHandler name="standard" class="solr.StandardRequestHandler">
<bool name="httpCaching">true</bool>
<bool name="httpCaching">true</bool>
</requestHandler>
<requestHandler name="dismax" class="solr.SearchHandler" >
@ -269,11 +269,11 @@
<int name="maxChanges">10</int>
</lst>
<lst name="spellchecker">
<str name="name">multipleFields</str>
<str name="field">lowerfilt1and2</str>
<str name="spellcheckIndexDir">spellcheckerMultipleFields</str>
<str name="buildOnCommit">false</str>
</lst>
<str name="name">multipleFields</str>
<str name="field">lowerfilt1and2</str>
<str name="spellcheckIndexDir">spellcheckerMultipleFields</str>
<str name="buildOnCommit">false</str>
</lst>
<!-- Example of using different distance measure -->
<lst name="spellchecker">
<str name="name">jarowinkler</str>
@ -377,13 +377,13 @@
</arr>
</requestHandler>
<requestHandler name="spellCheckCompRH1" class="org.apache.solr.handler.component.SearchHandler">
<lst name="defaults">
<str name="defType">dismax</str>
<str name="qf">lowerfilt1^1</str>
</lst>
<arr name="last-components">
<str>spellcheck</str>
</arr>
<lst name="defaults">
<str name="defType">dismax</str>
<str name="qf">lowerfilt1^1</str>
</lst>
<arr name="last-components">
<str>spellcheck</str>
</arr>
</requestHandler>
<requestHandler name="mltrh" class="org.apache.solr.handler.component.SearchHandler">

View File

@ -292,9 +292,9 @@
<copyField source="author" dest="author_s"/>
<!-- Above, multiple source fields are copied to the [text] field.
Another way to map multiple source fields to the same
destination field is to use the dynamic field syntax.
copyField also supports a maxChars to copy setting. -->
Another way to map multiple source fields to the same
destination field is to use the dynamic field syntax.
copyField also supports a maxChars to copy setting. -->
<!-- <copyField source="*_t" dest="text" maxChars="3000"/> -->
@ -322,8 +322,8 @@
<!-- sortMissingLast and sortMissingFirst attributes are optional attributes are
currently supported on types that are sorted internally as strings
and on numeric types.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
- If sortMissingLast="true", then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order (asc or desc).
@ -434,9 +434,9 @@
<!-- A general text field that has reasonable, generic
cross-language defaults: it tokenizes with StandardTokenizer,
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -472,11 +472,11 @@
words="lang/stopwords_en.txt"
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
@ -487,23 +487,23 @@
words="lang/stopwords_en.txt"
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldType>
<!-- A text field with defaults appropriate for English, plus
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
-->
<fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer type="index">
@ -554,7 +554,7 @@
</fieldType>
<!-- Just like text_general except it reverses the characters of
each token, to enable more efficient leading wildcard queries. -->
each token, to enable more efficient leading wildcard queries. -->
<fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -629,10 +629,10 @@
a token of "foo|1.4" would be indexed as "foo" with a payload of 1.4f
Attributes of the DelimitedPayloadTokenFilterFactory :
"delimiter" - a one character delimiter. Default is | (pipe)
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
Fully Qualified class name implementing PayloadEncoder, Encoder must have a no arg constructor.
-->
<filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float"/>
@ -653,10 +653,10 @@
-->
<fieldType name="descendent_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
</fieldType>
<!--
@ -665,10 +665,10 @@
-->
<fieldType name="ancestor_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
</fieldType>

View File

@ -1066,7 +1066,7 @@
<!-- maximum threshold of documents a query term can appear to be considered for correction -->
<float name="maxQueryFrequency">0.01</float>
<!-- uncomment this to require suggestions to occur in 1% of the documents
<float name="thresholdTokenFrequency">.01</float>
<float name="thresholdTokenFrequency">.01</float>
-->
</lst>
@ -1157,7 +1157,7 @@
</requestHandler>
<searchComponent name="suggest" class="solr.SuggestComponent">
<lst name="suggester">
<lst name="suggester">
<str name="name">mySuggester</str>
<str name="lookupImpl">FuzzyLookupFactory</str> <!-- org.apache.solr.spelling.suggest.fst -->
<str name="dictionaryImpl">DocumentDictionaryFactory</str> <!-- org.apache.solr.spelling.suggest.HighFrequencyDictionaryFactory -->

View File

@ -211,10 +211,10 @@
<copyField source="allTo" dest="text"/>
<!-- Above, multiple source fields are copied to the [text] field.
Another way to map multiple source fields to the same
destination field is to use the dynamic field syntax.
copyField also supports a maxChars to copy setting. -->
Another way to map multiple source fields to the same
destination field is to use the dynamic field syntax.
copyField also supports a maxChars to copy setting. -->
<!-- <copyField source="*_t" dest="text" maxChars="3000"/> -->
<!-- copy name to alphaNameSort, a field designed for sorting by name -->
@ -241,8 +241,8 @@
<!-- sortMissingLast and sortMissingFirst attributes are optional attributes are
currently supported on types that are sorted internally as strings
and on numeric types.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
- If sortMissingLast="true", then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order (asc or desc).
@ -353,9 +353,9 @@
<!-- A general text field that has reasonable, generic
cross-language defaults: it tokenizes with StandardTokenizer,
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -391,11 +391,11 @@
words="lang/stopwords_en.txt"
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
@ -406,23 +406,23 @@
words="lang/stopwords_en.txt"
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldType>
<!-- A text field with defaults appropriate for English, plus
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
-->
<fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer type="index">
@ -473,7 +473,7 @@
</fieldType>
<!-- Just like text_general except it reverses the characters of
each token, to enable more efficient leading wildcard queries. -->
each token, to enable more efficient leading wildcard queries. -->
<fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -548,10 +548,10 @@
a token of "foo|1.4" would be indexed as "foo" with a payload of 1.4f
Attributes of the DelimitedPayloadTokenFilterFactory :
"delimiter" - a one character delimiter. Default is | (pipe)
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
Fully Qualified class name implementing PayloadEncoder, Encoder must have a no arg constructor.
-->
<filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float"/>
@ -572,10 +572,10 @@
-->
<fieldType name="descendent_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
</fieldType>
<!--
@ -584,10 +584,10 @@
-->
<fieldType name="ancestor_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
</fieldType>

View File

@ -1068,7 +1068,7 @@
<!-- maximum threshold of documents a query term can appear to be considered for correction -->
<float name="maxQueryFrequency">0.01</float>
<!-- uncomment this to require suggestions to occur in 1% of the documents
<float name="thresholdTokenFrequency">.01</float>
<float name="thresholdTokenFrequency">.01</float>
-->
</lst>
@ -1159,7 +1159,7 @@
</requestHandler>
<searchComponent name="suggest" class="solr.SuggestComponent">
<lst name="suggester">
<lst name="suggester">
<str name="name">mySuggester</str>
<str name="lookupImpl">FuzzyLookupFactory</str> <!-- org.apache.solr.spelling.suggest.fst -->
<str name="dictionaryImpl">DocumentDictionaryFactory</str> <!-- org.apache.solr.spelling.suggest.HighFrequencyDictionaryFactory -->

View File

@ -7,11 +7,11 @@
processor="XPathEntityProcessor"
forEach="/rss/channel/item"
transformer="DateFormatTransformer">
<field column="source" xpath="/rss/channel/title" commonField="true" />
<field column="source-link" xpath="/rss/channel/link" commonField="true" />
<field column="subject" xpath="/rss/channel/subject" commonField="true" />
<field column="title" xpath="/rss/channel/item/title" />
<field column="link" xpath="/rss/channel/item/link" />
<field column="description" xpath="/rss/channel/item/description" />

View File

@ -215,10 +215,10 @@
<copyField source="item-subject" dest="text"/>
<!-- Above, multiple source fields are copied to the [text] field.
Another way to map multiple source fields to the same
destination field is to use the dynamic field syntax.
copyField also supports a maxChars to copy setting. -->
Another way to map multiple source fields to the same
destination field is to use the dynamic field syntax.
copyField also supports a maxChars to copy setting. -->
<!-- <copyField source="*_t" dest="text" maxChars="3000"/> -->
<!-- copy name to alphaNameSort, a field designed for sorting by name -->
@ -272,8 +272,8 @@
<!-- sortMissingLast and sortMissingFirst attributes are optional attributes are
currently supported on types that are sorted internally as strings
and on numeric types.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
- If sortMissingLast="true", then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order (asc or desc).
@ -384,9 +384,9 @@
<!-- A general text field that has reasonable, generic
cross-language defaults: it tokenizes with StandardTokenizer,
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -422,11 +422,11 @@
words="lang/stopwords_en.txt"
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
@ -437,23 +437,23 @@
words="lang/stopwords_en.txt"
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldType>
<!-- A text field with defaults appropriate for English, plus
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
-->
<fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer type="index">
@ -504,7 +504,7 @@
</fieldType>
<!-- Just like text_general except it reverses the characters of
each token, to enable more efficient leading wildcard queries. -->
each token, to enable more efficient leading wildcard queries. -->
<fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -579,10 +579,10 @@
a token of "foo|1.4" would be indexed as "foo" with a payload of 1.4f
Attributes of the DelimitedPayloadTokenFilterFactory :
"delimiter" - a one character delimiter. Default is | (pipe)
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
Fully Qualified class name implementing PayloadEncoder, Encoder must have a no arg constructor.
-->
<filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float"/>
@ -603,10 +603,10 @@
-->
<fieldType name="descendent_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
</fieldType>
<!--
@ -615,10 +615,10 @@
-->
<fieldType name="ancestor_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
</fieldType>

View File

@ -1065,7 +1065,7 @@
<!-- maximum threshold of documents a query term can appear to be considered for correction -->
<float name="maxQueryFrequency">0.01</float>
<!-- uncomment this to require suggestions to occur in 1% of the documents
<float name="thresholdTokenFrequency">.01</float>
<float name="thresholdTokenFrequency">.01</float>
-->
</lst>
@ -1156,7 +1156,7 @@
</requestHandler>
<searchComponent name="suggest" class="solr.SuggestComponent">
<lst name="suggester">
<lst name="suggester">
<str name="name">mySuggester</str>
<str name="lookupImpl">FuzzyLookupFactory</str> <!-- org.apache.solr.spelling.suggest.fst -->
<str name="dictionaryImpl">DocumentDictionaryFactory</str> <!-- org.apache.solr.spelling.suggest.HighFrequencyDictionaryFactory -->

View File

@ -290,12 +290,12 @@
<!-- Create a string version of author for faceting -->
<copyField source="author" dest="author_s"/>
<!-- Above, multiple source fields are copied to the [text] field.
Another way to map multiple source fields to the same
destination field is to use the dynamic field syntax.
copyField also supports a maxChars to copy setting. -->
Another way to map multiple source fields to the same
destination field is to use the dynamic field syntax.
copyField also supports a maxChars to copy setting. -->
<!-- <copyField source="*_t" dest="text" maxChars="3000"/> -->
<!-- copy name to alphaNameSort, a field designed for sorting by name -->
@ -322,8 +322,8 @@
<!-- sortMissingLast and sortMissingFirst attributes are optional attributes are
currently supported on types that are sorted internally as strings
and on numeric types.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
- If sortMissingLast="true", then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order (asc or desc).
@ -434,9 +434,9 @@
<!-- A general text field that has reasonable, generic
cross-language defaults: it tokenizes with StandardTokenizer,
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -472,11 +472,11 @@
words="lang/stopwords_en.txt"
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
@ -487,23 +487,23 @@
words="lang/stopwords_en.txt"
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldType>
<!-- A text field with defaults appropriate for English, plus
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
-->
<fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer type="index">
@ -554,7 +554,7 @@
</fieldType>
<!-- Just like text_general except it reverses the characters of
each token, to enable more efficient leading wildcard queries. -->
each token, to enable more efficient leading wildcard queries. -->
<fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -629,10 +629,10 @@
a token of "foo|1.4" would be indexed as "foo" with a payload of 1.4f
Attributes of the DelimitedPayloadTokenFilterFactory :
"delimiter" - a one character delimiter. Default is | (pipe)
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
Fully Qualified class name implementing PayloadEncoder, Encoder must have a no arg constructor.
-->
<filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float"/>
@ -653,10 +653,10 @@
-->
<fieldType name="descendent_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
</fieldType>
<!--
@ -665,10 +665,10 @@
-->
<fieldType name="ancestor_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
</fieldType>

View File

@ -1066,7 +1066,7 @@
<!-- maximum threshold of documents a query term can appear to be considered for correction -->
<float name="maxQueryFrequency">0.01</float>
<!-- uncomment this to require suggestions to occur in 1% of the documents
<float name="thresholdTokenFrequency">.01</float>
<float name="thresholdTokenFrequency">.01</float>
-->
</lst>
@ -1157,7 +1157,7 @@
</requestHandler>
<searchComponent name="suggest" class="solr.SuggestComponent">
<lst name="suggester">
<lst name="suggester">
<str name="name">mySuggester</str>
<str name="lookupImpl">FuzzyLookupFactory</str> <!-- org.apache.solr.spelling.suggest.fst -->
<str name="dictionaryImpl">DocumentDictionaryFactory</str> <!-- org.apache.solr.spelling.suggest.HighFrequencyDictionaryFactory -->

View File

@ -201,8 +201,8 @@
<!-- sortMissingLast and sortMissingFirst attributes are optional attributes are
currently supported on types that are sorted internally as strings
and on numeric types.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
- If sortMissingLast="true", then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order (asc or desc).
@ -332,31 +332,31 @@
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishPossessiveFilterFactory"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.StandardTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishPossessiveFilterFactory"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldType>
<!-- A text field with defaults appropriate for English, plus
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
-->
<fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer type="index">
@ -388,7 +388,7 @@
</fieldType>
<!-- Just like text_general except it reverses the characters of
each token, to enable more efficient leading wildcard queries. -->
each token, to enable more efficient leading wildcard queries. -->
<fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -460,10 +460,10 @@
a token of "foo|1.4" would be indexed as "foo" with a payload of 1.4f
Attributes of the DelimitedPayloadTokenFilterFactory :
"delimiter" - a one character delimiter. Default is | (pipe)
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
"encoder" - how to encode the following value into a playload
float -> org.apache.lucene.analysis.payloads.FloatEncoder,
integer -> o.a.l.a.p.IntegerEncoder
identity -> o.a.l.a.p.IdentityEncoder
Fully Qualified class name implementing PayloadEncoder, Encoder must have a no arg constructor.
-->
<filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float"/>
@ -484,10 +484,10 @@
-->
<fieldType name="descendent_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
</fieldType>
<!--
@ -496,10 +496,10 @@
-->
<fieldType name="ancestor_path" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.KeywordTokenizerFactory" />
<tokenizer class="solr.KeywordTokenizerFactory" />
</analyzer>
<analyzer type="query">
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
<tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
</analyzer>
</fieldType>

View File

@ -1068,7 +1068,7 @@
<!-- maximum threshold of documents a query term can appear to be considered for correction -->
<float name="maxQueryFrequency">0.01</float>
<!-- uncomment this to require suggestions to occur in 1% of the documents
<float name="thresholdTokenFrequency">.01</float>
<float name="thresholdTokenFrequency">.01</float>
-->
</lst>
@ -1159,7 +1159,7 @@
</requestHandler>
<searchComponent name="suggest" class="solr.SuggestComponent">
<lst name="suggester">
<lst name="suggester">
<str name="name">mySuggester</str>
<str name="lookupImpl">FuzzyLookupFactory</str> <!-- org.apache.solr.spelling.suggest.fst -->
<str name="dictionaryImpl">DocumentDictionaryFactory</str> <!-- org.apache.solr.spelling.suggest.HighFrequencyDictionaryFactory -->

View File

@ -24,7 +24,7 @@
<field name="manu_id_s">corsair</field>
<field name="cat">electronics</field>
<field name="cat">memory</field>
<field name="features">CAS latency 2, 2-3-3-6 timing, 2.75v, unbuffered, heat-spreader</field>
<field name="features">CAS latency 2, 2-3-3-6 timing, 2.75v, unbuffered, heat-spreader</field>
<field name="price">185.00</field>
<field name="popularity">5</field>
<field name="inStock">true</field>
@ -62,7 +62,7 @@
<field name="manu_id_s">corsair</field>
<field name="cat">electronics</field>
<field name="cat">memory</field>
<field name="features">CAS latency 3, 2.7v</field>
<field name="features">CAS latency 3, 2.7v</field>
<!-- note: price & popularity is missing on this one -->
<field name="popularity">0</field>
<field name="inStock">true</field>

View File

@ -1052,7 +1052,7 @@
<!-- maximum threshold of documents a query term can appear to be considered for correction -->
<float name="maxQueryFrequency">0.01</float>
<!-- uncomment this to require suggestions to occur in 1% of the documents
<float name="thresholdTokenFrequency">.01</float>
<float name="thresholdTokenFrequency">.01</float>
-->
</lst>

File diff suppressed because it is too large Load Diff

View File

@ -8,25 +8,25 @@
*/
(function($)
{
var settings;
var settings;
$.fn.tx3TagCloud = function(options)
{
//
// DEFAULT SETTINGS
//
settings = $.extend({
multiplier : 1
}, options);
main(this);
//
// DEFAULT SETTINGS
//
settings = $.extend({
multiplier : 1
}, options);
main(this);
}
function main(element)
{
// adding style attr
element.addClass("tx3-tag-cloud");
addListElementFontSize(element);
// adding style attr
element.addClass("tx3-tag-cloud");
addListElementFontSize(element);
}
/**
@ -35,36 +35,36 @@
*/
function addListElementFontSize(element)
{
var hDataWeight = -9007199254740992;
var lDataWeight = 9007199254740992;
$.each(element.find("li"), function(){
cDataWeight = getDataWeight(this);
if (cDataWeight == undefined)
{
logWarning("No \"data-weight\" attribut defined on <li> element");
}
else
{
hDataWeight = cDataWeight > hDataWeight ? cDataWeight : hDataWeight;
lDataWeight = cDataWeight < lDataWeight ? cDataWeight : lDataWeight;
}
});
$.each(element.find("li"), function(){
var dataWeight = getDataWeight(this);
var percent = Math.abs((dataWeight - lDataWeight)/(lDataWeight - hDataWeight));
$(this).css('font-size', (1 + (percent * settings['multiplier'])) + "em");
});
var hDataWeight = -9007199254740992;
var lDataWeight = 9007199254740992;
$.each(element.find("li"), function(){
cDataWeight = getDataWeight(this);
if (cDataWeight == undefined)
{
logWarning("No \"data-weight\" attribut defined on <li> element");
}
else
{
hDataWeight = cDataWeight > hDataWeight ? cDataWeight : hDataWeight;
lDataWeight = cDataWeight < lDataWeight ? cDataWeight : lDataWeight;
}
});
$.each(element.find("li"), function(){
var dataWeight = getDataWeight(this);
var percent = Math.abs((dataWeight - lDataWeight)/(lDataWeight - hDataWeight));
$(this).css('font-size', (1 + (percent * settings['multiplier'])) + "em");
});
}
function getDataWeight(element)
{
return parseInt($(element).attr("data-weight"));
return parseInt($(element).attr("data-weight"));
}
function logWarning(message)
{
console.log("[WARNING] " + Date.now() + " : " + message);
console.log("[WARNING] " + Date.now() + " : " + message);
}
}(jQuery));

View File

@ -40,7 +40,7 @@
<sequential>
<!-- jetty libs in lib/ -->
<ivy:retrieve conf="jetty,servlet" type="jar" log="download-only" symlink="${ivy.symlink}"
pattern="lib/[artifact]-[revision].[ext]" sync="true"/>
pattern="lib/[artifact]-[revision].[ext]" sync="true"/>
<ivy:retrieve conf="logging" type="jar,bundle" log="download-only" symlink="${ivy.symlink}"
pattern="lib/ext/[artifact]-[revision].[ext]" sync="true"/>
<!-- start.jar - we don't use sync=true here, we don't own the dir, but

View File

@ -194,8 +194,8 @@
<!-- sortMissingLast and sortMissingFirst attributes are optional attributes are
currently supported on types that are sorted internally as strings
and on numeric types.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
This includes "string","boolean", and, as of 3.5 (and 4.x),
int, float, long, date, double, including the "Trie" variants.
- If sortMissingLast="true", then a sort on this field will cause documents
without the field to come after documents with the field,
regardless of the requested sort order (asc or desc).
@ -306,9 +306,9 @@
<!-- A general text field that has reasonable, generic
cross-language defaults: it tokenizes with StandardTokenizer,
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
removes stop words from case-insensitive "stopwords.txt"
(empty by default), and down cases. At query time only, it
also applies synonyms. -->
<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>
@ -344,11 +344,11 @@
words="lang/stopwords_en.txt"
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
@ -359,23 +359,23 @@
words="lang/stopwords_en.txt"
/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.EnglishPossessiveFilterFactory"/>
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
<filter class="solr.EnglishMinimalStemFilterFactory"/>
-->
-->
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldType>
<!-- A text field with defaults appropriate for English, plus
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
aggressive word-splitting and autophrase features enabled.
This field is just like text_en, except it adds
WordDelimiterFilter to enable splitting and matching of
words on case-change, alpha numeric boundaries, and
non-alphanumeric chars. This means certain compound word
cases will work, for example query "wi fi" will match
document "WiFi" or "wi-fi".
-->
<fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer type="index">
@ -426,7 +426,7 @@
</fieldType>
<!-- Just like text_general except it reverses the characters of
each token, to enable more efficient leading wildcard queries. -->
each token, to enable more efficient leading wildcard queries. -->
<fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.StandardTokenizerFactory"/>

Some files were not shown because too many files have changed in this diff Show More