From 1b44e0b9a50b88b94a3af377cc60b643698da9d7 Mon Sep 17 00:00:00 2001 From: Steven Rowe Date: Mon, 24 Jan 2011 19:33:14 +0000 Subject: [PATCH 001/185] added support for maven artifact generation of the new Solr UIMA contrib; the top-level get-maven-poms target now forces copying of all of the source pom.xml files, even if the source is not newer than the target files, so that version changes will always take effect when specified through the -Dversion ant cmdline option git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1062936 13f79535-47bb-0310-9956-ffa450edef68 --- build.xml | 2 +- dev-tools/maven/pom.xml.template | 124 +++++++++++++++-- dev-tools/maven/solr/contrib/pom.xml.template | 1 + .../maven/solr/contrib/uima/pom.xml.template | 128 ++++++++++++++++++ lucene/build.xml | 2 +- modules/build.xml | 2 +- solr/build.xml | 4 +- solr/contrib/uima/solr-uima-pom.xml.template | 115 ---------------- 8 files changed, 248 insertions(+), 130 deletions(-) create mode 100644 dev-tools/maven/solr/contrib/uima/pom.xml.template delete mode 100644 solr/contrib/uima/solr-uima-pom.xml.template diff --git a/build.xml b/build.xml index 9f52b014c2d..05d3df35af1 100644 --- a/build.xml +++ b/build.xml @@ -41,7 +41,7 @@ - + diff --git a/dev-tools/maven/pom.xml.template b/dev-tools/maven/pom.xml.template index 06f20640719..174df33cfd1 100644 --- a/dev-tools/maven/pom.xml.template +++ b/dev-tools/maven/pom.xml.template @@ -41,6 +41,10 @@ 4.0.0 yyyy-MM-dd HH:mm:ss 1.5 + 6.1.26 + 1.5.5 + 0.8 + ${project.version} JIRA @@ -246,12 +250,37 @@ org.apache.tika tika-core - 0.8 + ${tika.version} org.apache.tika tika-parsers - 0.8 + ${tika.version} + + + org.apache.solr + uima-alchemy-annotator + ${uima.version} + + + org.apache.solr + uima-OpenCalaisAnnotator + ${uima.version} + + + org.apache.solr + uima-Tagger + ${uima.version} + + + org.apache.solr + uima-WhitespaceTokenizer + ${uima.version} + + + org.apache.solr + uima-uimaj-core + ${uima.version} org.apache.velocity @@ -286,12 +315,12 @@ org.mortbay.jetty jetty - 6.1.26 + ${jetty.version} org.mortbay.jetty jetty-util - 6.1.26 + ${jetty.version} org.mortbay.jetty @@ -301,7 +330,7 @@ org.mortbay.jetty jsp-2.1-jetty - 6.1.26 + ${jetty.version} org.mortbay.jetty @@ -311,23 +340,28 @@ org.slf4j jcl-over-slf4j - 1.5.5 + ${slf4j.version} org.slf4j log4j-over-slf4j - 1.5.5 + ${slf4j.version} org.slf4j slf4j-api - 1.5.5 + ${slf4j.version} org.slf4j slf4j-jdk14 - 1.5.5 + ${slf4j.version} + + org.slf4j + slf4j-simple + ${slf4j.version} + xerces xercesImpl @@ -480,7 +514,7 @@ org.mortbay.jetty maven-jetty-plugin - 6.1.26 + ${jetty.version} org.codehaus.gmaven @@ -681,6 +715,76 @@ solr/lib/apache-solr-noggit-r944541.jar + + install-solr-uima-alchemy-annotator + install + + install-file + + + org.apache.solr + uima-alchemy-annotator + ${uima.version} + jar + solr/contrib/uima/lib/uima-an-alchemy.jar + + + + install-solr-uima-OpenCalaisAnnotator + install + + install-file + + + org.apache.solr + uima-OpenCalaisAnnotator + ${uima.version} + jar + solr/contrib/uima/lib/uima-an-calais.jar + + + + install-solr-uima-Tagger + install + + install-file + + + org.apache.solr + uima-Tagger + ${uima.version} + jar + solr/contrib/uima/lib/uima-an-tagger.jar + + + + install-solr-uima-WhitespaceTokenizer + install + + install-file + + + org.apache.solr + uima-WhitespaceTokenizer + ${uima.version} + jar + solr/contrib/uima/lib/uima-an-wst.jar + + + + install-solr-uima-uimaj-core + install + + install-file + + + org.apache.solr + uima-uimaj-core + ${uima.version} + jar + solr/contrib/uima/lib/uima-core.jar + + diff --git a/dev-tools/maven/solr/contrib/pom.xml.template b/dev-tools/maven/solr/contrib/pom.xml.template index bd05a43d3aa..e4a731aa6a7 100644 --- a/dev-tools/maven/solr/contrib/pom.xml.template +++ b/dev-tools/maven/solr/contrib/pom.xml.template @@ -35,6 +35,7 @@ clustering dataimporthandler extraction + uima ../build/solr-contrib-aggregator diff --git a/dev-tools/maven/solr/contrib/uima/pom.xml.template b/dev-tools/maven/solr/contrib/uima/pom.xml.template new file mode 100644 index 00000000000..a7802c5b21b --- /dev/null +++ b/dev-tools/maven/solr/contrib/uima/pom.xml.template @@ -0,0 +1,128 @@ + + + 4.0.0 + + org.apache.solr + solr-parent + @version@ + ../../pom.xml + + org.apache.solr + solr-uima + jar + Apache Solr - UIMA integration + Apache Solr - UIMA integration + + solr/contrib/uima + build + 4.0 + + + + ${project.groupId} + solr-core + ${project.version} + + + ${project.groupId} + solr-core + ${project.version} + test-jar + test + + + ${project.groupId} + solr-solrj + ${project.version} + + + org.apache.lucene + lucene-core + ${project.version} + test-jar + test + + + commons-digester + commons-digester + + + commons-lang + commons-lang + + + org.apache.solr + uima-alchemy-annotator + + + org.apache.solr + uima-OpenCalaisAnnotator + + + org.apache.solr + uima-Tagger + + + org.apache.solr + uima-WhitespaceTokenizer + + + org.apache.solr + uima-uimaj-core + + + org.slf4j + slf4j-simple + + + junit + junit + test + + + + ${build-directory} + ${build-directory}/classes + ${build-directory}/test-classes + + + src/main/resources + + + + + src/test/resources + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + ../../../../testlogging.properties + + + + + + diff --git a/lucene/build.xml b/lucene/build.xml index 96e00855c45..31b3b2e3011 100644 --- a/lucene/build.xml +++ b/lucene/build.xml @@ -391,7 +391,7 @@ - + diff --git a/modules/build.xml b/modules/build.xml index cc191bb2c55..ccaa9f92a89 100644 --- a/modules/build.xml +++ b/modules/build.xml @@ -56,7 +56,7 @@ - + diff --git a/solr/build.xml b/solr/build.xml index cf6c7f655e9..a29cb50ab62 100644 --- a/solr/build.xml +++ b/solr/build.xml @@ -846,7 +846,7 @@ - + - 4.0.0 - org.apache.solr - solr-uima - 0.0.2-SNAPSHOT - Solr - UIMA integration - - 2.3.1-SNAPSHOT - - - - org.apache.solr - solr-core - 1.4.1 - - - org.apache.uima - uimaj-core - ${uimaVersion} - - - org.apache.uima - alchemy-annotator - ${uimaVersion} - - - org.apache.uima - OpenCalaisAnnotator - ${uimaVersion} - - - junit - junit - 4.7 - jar - test - - - org.slf4j - slf4j-simple - 1.5.5 - - - org.apache.uima - WhitespaceTokenizer - ${uimaVersion} - - - org.apache.uima - Tagger - ${uimaVersion} - - - - - - - com.googlecode.maven-gcu-plugin - maven-gcu-plugin - 1.0 - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 2.3.1 - - 1.5 - 1.5 - - - - com.googlecode.maven-gcu-plugin - maven-gcu-plugin - 1.0 - - googlecode - true - ${project.artifactId} - - - ${project.build.directory}/${project.artifactId}-${project.version}.${project.packaging} - ${project.name} sources bundle ${project.version} - - - - - - - - - - - \ No newline at end of file From 4a639880601d446394b6f9fd90a1087aae64b531 Mon Sep 17 00:00:00 2001 From: "Chris M. Hostetter" Date: Mon, 24 Jan 2011 23:03:58 +0000 Subject: [PATCH 002/185] SOLR-1240: followup: use NL instead of SOM for counts list git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063052 13f79535-47bb-0310-9956-ffa450edef68 --- solr/src/java/org/apache/solr/request/SimpleFacets.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solr/src/java/org/apache/solr/request/SimpleFacets.java b/solr/src/java/org/apache/solr/request/SimpleFacets.java index 9d5e49b648c..2a959a3e87a 100644 --- a/solr/src/java/org/apache/solr/request/SimpleFacets.java +++ b/solr/src/java/org/apache/solr/request/SimpleFacets.java @@ -1031,7 +1031,7 @@ public class SimpleFacets { final String f = sf.getName(); final NamedList res = new SimpleOrderedMap(); - final NamedList counts = new SimpleOrderedMap(); + final NamedList counts = new NamedList(); res.add("counts", counts); final T start = calc.getValue(required.getFieldParam(f,FacetParams.FACET_RANGE_START)); From a20e2cd89adb210f21607e419db942cc83b4be8f Mon Sep 17 00:00:00 2001 From: Adriano Crestani Campos Date: Tue, 25 Jan 2011 01:22:35 +0000 Subject: [PATCH 003/185] applying patches from LUCENE-2855 and LUCENE-2867 to trunk git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063095 13f79535-47bb-0310-9956-ffa450edef68 --- .../core/builders/QueryTreeBuilder.java | 35 ++++++++-------- .../queryParser/core/config/FieldConfig.java | 21 +++++----- .../core/config/QueryConfigHandler.java | 4 +- .../queryParser/core/nodes/QueryNode.java | 23 +++++++---- .../queryParser/core/nodes/QueryNodeImpl.java | 41 +++++++++++-------- .../queryParser/core/util/StringUtils.java | 33 +++++++++++++++ .../config/FieldBoostMapFCListener.java | 2 +- .../config/FieldDateResolutionFCListener.java | 2 +- .../MultiTermRewriteMethodAttribute.java | 2 +- .../processors/BoostQueryNodeProcessor.java | 4 +- .../ParametricRangeQueryNodeProcessor.java | 11 ++++- .../core/builders/TestQueryTreeBuilder.java | 31 ++++++++++++++ .../spans/SpansQueryConfigHandler.java | 2 +- 13 files changed, 148 insertions(+), 63 deletions(-) create mode 100644 lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/util/StringUtils.java create mode 100644 lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/core/builders/TestQueryTreeBuilder.java diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/builders/QueryTreeBuilder.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/builders/QueryTreeBuilder.java index 9cae3ac0d68..fe20a8452bb 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/builders/QueryTreeBuilder.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/builders/QueryTreeBuilder.java @@ -61,7 +61,7 @@ public class QueryTreeBuilder implements QueryBuilder { private HashMap, QueryBuilder> queryNodeBuilders; - private HashMap fieldNameBuilders; + private HashMap fieldNameBuilders; /** * {@link QueryTreeBuilder} constructor. @@ -73,28 +73,25 @@ public class QueryTreeBuilder implements QueryBuilder { /** * Associates a field name with a builder. * - * @param fieldName - * the field name - * @param builder - * the builder to be associated + * @param fieldName the field name + * @param builder the builder to be associated */ public void setBuilder(CharSequence fieldName, QueryBuilder builder) { if (this.fieldNameBuilders == null) { - this.fieldNameBuilders = new HashMap(); + this.fieldNameBuilders = new HashMap(); } - this.fieldNameBuilders.put(fieldName, builder); + this.fieldNameBuilders.put(fieldName.toString(), builder); + } /** * Associates a class with a builder * - * @param queryNodeClass - * the class - * @param builder - * the builder to be associated + * @param queryNodeClass the class + * @param builder the builder to be associated */ public void setBuilder(Class queryNodeClass, QueryBuilder builder) { @@ -135,8 +132,13 @@ public class QueryTreeBuilder implements QueryBuilder { QueryBuilder builder = null; if (this.fieldNameBuilders != null && node instanceof FieldableNode) { + CharSequence field = ((FieldableNode) node).getField(); - builder = this.fieldNameBuilders.get(((FieldableNode) node).getField()); + if (field != null) { + field = field.toString(); + } + + builder = this.fieldNameBuilders.get(field); } @@ -203,14 +205,13 @@ public class QueryTreeBuilder implements QueryBuilder { * Builds some kind of object from a query tree. Each node in the query tree * is built using an specific builder associated to it. * - * @param queryNode - * the query tree root node + * @param queryNode the query tree root node * * @return the built object * - * @throws QueryNodeException - * if some node builder throws a {@link QueryNodeException} or if - * there is a node which had no builder associated to it + * @throws QueryNodeException if some node builder throws a + * {@link QueryNodeException} or if there is a node which had no + * builder associated to it */ public Object build(QueryNode queryNode) throws QueryNodeException { process(queryNode); diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/FieldConfig.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/FieldConfig.java index 1036a86950f..7c582972bc7 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/FieldConfig.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/FieldConfig.java @@ -28,17 +28,15 @@ import org.apache.lucene.util.AttributeSource; */ public class FieldConfig extends AttributeSource { - private CharSequence fieldName; - + private String fieldName; + /** * Constructs a {@link FieldConfig} * - * @param fieldName - * the field name, it cannot be null - * @throws IllegalArgumentException - * if the field name is null + * @param fieldName the field name, it cannot be null + * @throws IllegalArgumentException if the field name is null */ - public FieldConfig(CharSequence fieldName) { + public FieldConfig(String fieldName) { if (fieldName == null) { throw new IllegalArgumentException("field name should not be null!"); @@ -53,13 +51,14 @@ public class FieldConfig extends AttributeSource { * * @return the field name */ - public CharSequence getFieldName() { + public String getField() { return this.fieldName; } - + @Override - public String toString(){ - return ""; + public String toString() { + return ""; } } diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/QueryConfigHandler.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/QueryConfigHandler.java index ad22c198928..f50ff443eea 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/QueryConfigHandler.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/QueryConfigHandler.java @@ -46,7 +46,7 @@ import org.apache.lucene.util.AttributeSource; public abstract class QueryConfigHandler extends AttributeSource { private LinkedList listeners = new LinkedList(); - + /** * Returns an implementation of * {@link FieldConfig} for a specific field name. If the implemented @@ -60,7 +60,7 @@ public abstract class QueryConfigHandler extends AttributeSource { * configuration or null, if the implemented * {@link QueryConfigHandler} has no configuration for that field */ - public FieldConfig getFieldConfig(CharSequence fieldName) { + public FieldConfig getFieldConfig(String fieldName) { FieldConfig fieldConfig = new FieldConfig(fieldName); for (FieldConfigListener listener : this.listeners) { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QueryNode.java index f1afdaec59b..fac89f59f10 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QueryNode.java @@ -43,14 +43,14 @@ public interface QueryNode extends Serializable { public boolean isLeaf(); /** verify if a node contains a tag */ - public boolean containsTag(CharSequence tagName); - + public boolean containsTag(String tagName); + /** * @param tagName * @return of stored on under that tag name */ - public Object getTag(CharSequence tagName); - + public Object getTag(String tagName); + public QueryNode getParent(); /** @@ -81,15 +81,20 @@ public interface QueryNode extends Serializable { * @param tagName * @param value */ - public void setTag(CharSequence tagName, Object value); - + public void setTag(String tagName, Object value); + /** * Unset a tag. tagName will be converted to lowercase. * * @param tagName */ - public void unsetTag(CharSequence tagName); - - public Map getTags(); + public void unsetTag(String tagName); + + /** + * Returns a map containing all tags attached to this query node. + * + * @return a map containing all tags attached to this query node + */ + public Map getTagMap(); } diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QueryNodeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QueryNodeImpl.java index e6316886838..6b48cabb869 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QueryNodeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QueryNodeImpl.java @@ -25,6 +25,7 @@ import java.util.ResourceBundle; import org.apache.lucene.messages.NLS; import org.apache.lucene.queryParser.core.messages.QueryParserMessages; +import org.apache.lucene.queryParser.core.util.StringUtils; /** * A {@link QueryNodeImpl} is the default implementation of the interface @@ -40,7 +41,7 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { private boolean isLeaf = true; - private Hashtable tags = new Hashtable(); + private Hashtable tags = new Hashtable(); private List clauses = null; @@ -117,7 +118,7 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { clone.isLeaf = this.isLeaf; // Reset all tags - clone.tags = new Hashtable(); + clone.tags = new Hashtable(); // copy children if (this.clauses != null) { @@ -151,19 +152,20 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { return this.clauses; } - public void setTag(CharSequence tagName, Object value) { - this.tags.put(tagName.toString().toLowerCase(), value); + public void setTag(String tagName, Object value) { + this.tags.put(tagName.toLowerCase(), value); } - public void unsetTag(CharSequence tagName) { - this.tags.remove(tagName.toString().toLowerCase()); + public void unsetTag(String tagName) { + this.tags.remove(tagName.toLowerCase()); } - public boolean containsTag(CharSequence tagName) { - return this.tags.containsKey(tagName.toString().toLowerCase()); + /** verify if a node contains a tag */ + public boolean containsTag(String tagName) { + return this.tags.containsKey(tagName); } - public Object getTag(CharSequence tagName) { + public Object getTag(String tagName) { return this.tags.get(tagName.toString().toLowerCase()); } @@ -189,16 +191,20 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { /** * This method is use toQueryString to detect if fld is the default field * - * @param fld - * - field name + * @param fld - field name * @return true if fld is the default field */ + // TODO: remove this method, it's commonly used by {@link + // #toQueryString(org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax)} + // to figure out what is the default field, however, {@link + // #toQueryString(org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax)} + // should receive the default field value directly by parameter protected boolean isDefaultField(CharSequence fld) { if (this.toQueryStringIgnoreFields) return true; if (fld == null) return true; - if (QueryNodeImpl.PLAINTEXT_FIELD_NAME.equals(fld.toString())) + if (QueryNodeImpl.PLAINTEXT_FIELD_NAME.equals(StringUtils.toString(fld))) return true; return false; } @@ -216,12 +222,13 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { } /** - * @see org.apache.lucene.queryParser.core.nodes.QueryNode#getTag(CharSequence) - * @return a Map with all tags for this QueryNode + * Returns a map containing all tags attached to this query node. + * + * @return a map containing all tags attached to this query node */ - @SuppressWarnings( { "unchecked" }) - public Map getTags() { - return (Map) this.tags.clone(); + @SuppressWarnings("unchecked") + public Map getTagMap() { + return (Map) this.tags.clone(); } } // end class QueryNodeImpl diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/util/StringUtils.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/util/StringUtils.java new file mode 100644 index 00000000000..fe0e51b9e8c --- /dev/null +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/util/StringUtils.java @@ -0,0 +1,33 @@ +package org.apache.lucene.queryParser.core.util; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +final public class StringUtils { + + public static String toString(Object obj) { + + if (obj != null) { + return obj.toString(); + + } else { + return null; + } + + } + +} diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapFCListener.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapFCListener.java index 5e8399e82e9..ab93ee9d531 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapFCListener.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapFCListener.java @@ -47,7 +47,7 @@ public class FieldBoostMapFCListener implements FieldConfigListener { FieldBoostMapAttribute fieldBoostMapAttr = this.config.getAttribute(FieldBoostMapAttribute.class); BoostAttribute boostAttr = fieldConfig.addAttribute(BoostAttribute.class); - Float boost = fieldBoostMapAttr.getFieldBoostMap().get(fieldConfig.getFieldName()); + Float boost = fieldBoostMapAttr.getFieldBoostMap().get(fieldConfig.getField()); if (boost != null) { boostAttr.setBoost(boost.floatValue()); diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionFCListener.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionFCListener.java index 7d6b66c8510..0bdb9abc8ac 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionFCListener.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionFCListener.java @@ -53,7 +53,7 @@ public class FieldDateResolutionFCListener implements FieldConfigListener { FieldDateResolutionMapAttribute dateResMapAttr = this.config .addAttribute(FieldDateResolutionMapAttribute.class); dateRes = dateResMapAttr.getFieldDateResolutionMap().get( - fieldConfig.getFieldName().toString()); + fieldConfig.getField()); } if (dateRes == null) { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/MultiTermRewriteMethodAttribute.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/MultiTermRewriteMethodAttribute.java index 84924e34599..bb0559427df 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/MultiTermRewriteMethodAttribute.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/MultiTermRewriteMethodAttribute.java @@ -32,7 +32,7 @@ import org.apache.lucene.util.Attribute; */ public interface MultiTermRewriteMethodAttribute extends Attribute { - public static final CharSequence TAG_ID = "MultiTermRewriteMethodAttribute"; + public static final String TAG_ID = "MultiTermRewriteMethodAttribute"; public void setMultiTermRewriteMethod(MultiTermQuery.RewriteMethod method); diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/BoostQueryNodeProcessor.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/BoostQueryNodeProcessor.java index 57f0cc59f75..5232ffa0184 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/BoostQueryNodeProcessor.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/BoostQueryNodeProcessor.java @@ -26,6 +26,7 @@ import org.apache.lucene.queryParser.core.nodes.BoostQueryNode; import org.apache.lucene.queryParser.core.nodes.FieldableNode; import org.apache.lucene.queryParser.core.nodes.QueryNode; import org.apache.lucene.queryParser.core.processors.QueryNodeProcessorImpl; +import org.apache.lucene.queryParser.core.util.StringUtils; import org.apache.lucene.queryParser.standard.config.BoostAttribute; /** @@ -49,7 +50,8 @@ public class BoostQueryNodeProcessor extends QueryNodeProcessorImpl { QueryConfigHandler config = getQueryConfigHandler(); if (config != null) { - FieldConfig fieldConfig = config.getFieldConfig(fieldNode.getField()); + CharSequence field = fieldNode.getField(); + FieldConfig fieldConfig = config.getFieldConfig(StringUtils.toString(field)); if (fieldConfig != null && fieldConfig.hasAttribute(BoostAttribute.class)) { BoostAttribute boostAttr = fieldConfig.getAttribute(BoostAttribute.class); diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/ParametricRangeQueryNodeProcessor.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/ParametricRangeQueryNodeProcessor.java index 0947475d637..fc2e5fa020b 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/ParametricRangeQueryNodeProcessor.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/ParametricRangeQueryNodeProcessor.java @@ -97,8 +97,15 @@ public class ParametricRangeQueryNodeProcessor extends QueryNodeProcessorImpl { } - FieldConfig fieldConfig = getQueryConfigHandler().getFieldConfig( - parametricRangeNode.getField()); + CharSequence field = parametricRangeNode.getField(); + String fieldStr = null; + + if (field != null) { + fieldStr = field.toString(); + } + + FieldConfig fieldConfig = getQueryConfigHandler() + .getFieldConfig(fieldStr); if (fieldConfig != null) { diff --git a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/core/builders/TestQueryTreeBuilder.java b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/core/builders/TestQueryTreeBuilder.java new file mode 100644 index 00000000000..f456d298b67 --- /dev/null +++ b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/core/builders/TestQueryTreeBuilder.java @@ -0,0 +1,31 @@ +package org.apache.lucene.queryParser.core.builders; + +import junit.framework.Assert; + +import org.apache.lucene.queryParser.core.QueryNodeException; +import org.apache.lucene.queryParser.core.nodes.FieldQueryNode; +import org.apache.lucene.queryParser.core.nodes.QueryNode; +import org.apache.lucene.queryParser.core.util.UnescapedCharSequence; +import org.apache.lucene.util.LuceneTestCase; +import org.junit.Test; + +public class TestQueryTreeBuilder extends LuceneTestCase { + + @Test + public void testSetFieldBuilder() throws QueryNodeException { + QueryTreeBuilder qtb = new QueryTreeBuilder(); + qtb.setBuilder("field", new DummyBuilder()); + Object result = qtb.build(new FieldQueryNode(new UnescapedCharSequence("field"), "foo", 0, 0)); + Assert.assertEquals("OK", result); + + } + + private static class DummyBuilder implements QueryBuilder { + + public Object build(QueryNode queryNode) throws QueryNodeException { + return "OK"; + } + + } + +} diff --git a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/spans/SpansQueryConfigHandler.java b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/spans/SpansQueryConfigHandler.java index 520d4efeb73..b614938f9ee 100644 --- a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/spans/SpansQueryConfigHandler.java +++ b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/spans/SpansQueryConfigHandler.java @@ -33,7 +33,7 @@ public class SpansQueryConfigHandler extends QueryConfigHandler { } @Override - public FieldConfig getFieldConfig(CharSequence fieldName) { + public FieldConfig getFieldConfig(String fieldName) { // there is no field configuration, always return null return null; From 913a9e4491ea5208b82e17324825da0aa4851149 Mon Sep 17 00:00:00 2001 From: Shai Erera Date: Tue, 25 Jan 2011 13:34:48 +0000 Subject: [PATCH 004/185] LUCENE-1250: fix equals() to check for null and not fail on NPE (trunk) git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063272 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/lucene/search/function/OrdFieldSource.java | 4 +++- .../lucene/search/function/ReverseOrdFieldSource.java | 4 +++- .../org/apache/lucene/search/function/TestOrdValues.java | 9 +++++++++ 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/search/function/OrdFieldSource.java b/lucene/src/java/org/apache/lucene/search/function/OrdFieldSource.java index e7817da359e..4a23a1c5d9e 100644 --- a/lucene/src/java/org/apache/lucene/search/function/OrdFieldSource.java +++ b/lucene/src/java/org/apache/lucene/search/function/OrdFieldSource.java @@ -99,7 +99,9 @@ public class OrdFieldSource extends ValueSource { /*(non-Javadoc) @see java.lang.Object#equals(java.lang.Object) */ @Override public boolean equals(Object o) { - if (o.getClass() != OrdFieldSource.class) return false; + if (o == this) return true; + if (o == null) return false; + if (o.getClass() != OrdFieldSource.class) return false; OrdFieldSource other = (OrdFieldSource)o; return this.field.equals(other.field); } diff --git a/lucene/src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java b/lucene/src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java index 30e339d5724..bb01ca4a4b5 100644 --- a/lucene/src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java +++ b/lucene/src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java @@ -107,7 +107,9 @@ public class ReverseOrdFieldSource extends ValueSource { /*(non-Javadoc) @see java.lang.Object#equals(java.lang.Object) */ @Override public boolean equals(Object o) { - if (o.getClass() != ReverseOrdFieldSource.class) return false; + if (o == this) return true; + if (o == null) return false; + if (o.getClass() != ReverseOrdFieldSource.class) return false; ReverseOrdFieldSource other = (ReverseOrdFieldSource)o; return this.field.equals(other.field); } diff --git a/lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java b/lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java index 706eca76f43..07b07f1c317 100644 --- a/lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java +++ b/lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java @@ -253,5 +253,14 @@ public class TestOrdValues extends FunctionTestSetup { private String testName() { return getClass().getName() + "." + getName(); } + + // LUCENE-1250 + public void testEqualsNull() throws Exception { + OrdFieldSource ofs = new OrdFieldSource("f"); + assertFalse(ofs.equals(null)); + + ReverseOrdFieldSource rofs = new ReverseOrdFieldSource("f"); + assertFalse(rofs.equals(null)); + } } From 5642eb07c4169f669c29645e56761b8fcf771b06 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Tue, 25 Jan 2011 15:40:51 +0000 Subject: [PATCH 005/185] LUCENE-2010: drop segments that are 100% deleted docs in IW/IR commit git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063323 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/CHANGES.txt | 6 ++++ .../apache/lucene/index/BufferedDeletes.java | 6 ++-- .../apache/lucene/index/DirectoryReader.java | 3 ++ .../org/apache/lucene/index/IndexReader.java | 9 ++++- .../org/apache/lucene/index/IndexWriter.java | 13 ++++++++ .../org/apache/lucene/index/SegmentInfos.java | 13 ++++++++ .../apache/lucene/index/TestAddIndexes.java | 4 +-- .../apache/lucene/index/TestIndexReader.java | 8 +---- .../lucene/index/TestIndexReaderReopen.java | 1 - .../apache/lucene/index/TestIndexWriter.java | 8 ----- .../index/TestIndexWriterExceptions.java | 33 ++++++++++--------- .../org/apache/lucene/search/QueryUtils.java | 11 +++++++ 12 files changed, 77 insertions(+), 38 deletions(-) diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index 90c498d9581..44d90a02240 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -486,6 +486,9 @@ Changes in runtime behavior * LUCENE-2829: Improve the performance of "primary key" lookup use case (running a TermQuery that matches one document) on a multi-segment index. (Robert Muir, Mike McCandless) + +* LUCENE-2010: Segments with 100% deleted documents are now removed on + IndexReader or IndexWriter commit. (Uwe Schindler, Mike McCandless) API Changes @@ -905,6 +908,9 @@ Optimizations * LUCENE-2824: Optimize BufferedIndexInput to do less bounds checks. (Robert Muir) +* LUCENE-2010: Segments with 100% deleted documents are now removed on + IndexReader or IndexWriter commit. (Uwe Schindler, Mike McCandless) + Build * LUCENE-2124: Moved the JDK-based collation support from contrib/collation diff --git a/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java b/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java index 8be325a64a9..0be1dd2ba30 100644 --- a/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java +++ b/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java @@ -270,9 +270,9 @@ class BufferedDeletes { } private synchronized long applyDeletes(IndexWriter.ReaderPool readerPool, - SegmentInfo info, - SegmentDeletes coalescedDeletes, - SegmentDeletes segmentDeletes) throws IOException { + SegmentInfo info, + SegmentDeletes coalescedDeletes, + SegmentDeletes segmentDeletes) throws IOException { assert readerPool.infoIsLive(info); assert coalescedDeletes == null || coalescedDeletes.docIDs.size() == 0; diff --git a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java index 9da85ca5e6a..f339133b7ca 100644 --- a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java +++ b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java @@ -710,6 +710,9 @@ class DirectoryReader extends IndexReader implements Cloneable { for (int i = 0; i < subReaders.length; i++) subReaders[i].commit(); + // Remove segments that contain only 100% deleted docs: + segmentInfos.pruneDeletedSegments(); + // Sync all files we just wrote directory.sync(segmentInfos.files(directory, false)); segmentInfos.commit(directory); diff --git a/lucene/src/java/org/apache/lucene/index/IndexReader.java b/lucene/src/java/org/apache/lucene/index/IndexReader.java index eb953c687ba..7f1b736cf1e 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexReader.java +++ b/lucene/src/java/org/apache/lucene/index/IndexReader.java @@ -1163,7 +1163,14 @@ public abstract class IndexReader implements Cloneable,Closeable { return n; } - /** Undeletes all documents currently marked as deleted in this index. + /** Undeletes all documents currently marked as deleted in + * this index. + * + *

NOTE: this is only a best-effort process. For + * example, if all documents in a given segment were + * deleted, Lucene now drops that segment from the index, + * which means its documents will not be recovered by this + * method. * * @throws StaleReaderException if the index has changed * since this reader was opened diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java index e746427eec9..710822bd15d 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java @@ -3276,6 +3276,15 @@ public class IndexWriter implements Closeable { } } + private boolean keepFullyDeletedSegments; + + /** Only for testing. + * + * @lucene.internal */ + void keepFullyDeletedSegments() { + keepFullyDeletedSegments = true; + } + // called only from assert private boolean filesExist(SegmentInfos toSync) throws IOException { Collection files = toSync.files(directory, false); @@ -3334,6 +3343,10 @@ public class IndexWriter implements Closeable { readerPool.commit(); toSync = (SegmentInfos) segmentInfos.clone(); + if (!keepFullyDeletedSegments) { + toSync.pruneDeletedSegments(); + } + assert filesExist(toSync); if (commitUserData != null) diff --git a/lucene/src/java/org/apache/lucene/index/SegmentInfos.java b/lucene/src/java/org/apache/lucene/index/SegmentInfos.java index 896e6222266..493279ee17b 100644 --- a/lucene/src/java/org/apache/lucene/index/SegmentInfos.java +++ b/lucene/src/java/org/apache/lucene/index/SegmentInfos.java @@ -308,6 +308,19 @@ public final class SegmentInfos extends Vector { } } + /** Prunes any segment whose docs are all deleted. */ + public void pruneDeletedSegments() { + int segIdx = 0; + while(segIdx < size()) { + final SegmentInfo info = info(segIdx); + if (info.getDelCount() == info.docCount) { + remove(segIdx); + } else { + segIdx++; + } + } + } + /** * Returns a copy of this instance, also copying each * SegmentInfo. diff --git a/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java index c02f4fa6f26..52d5b7d7d46 100755 --- a/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java +++ b/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java @@ -428,7 +428,7 @@ public class TestAddIndexes extends LuceneTestCase { ); writer.addIndexes(aux, new MockDirectoryWrapper(random, new RAMDirectory(aux))); - assertEquals(1060, writer.maxDoc()); + assertEquals(1020, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); dir.close(); @@ -480,7 +480,7 @@ public class TestAddIndexes extends LuceneTestCase { ); writer.addIndexes(aux, aux2); - assertEquals(1060, writer.maxDoc()); + assertEquals(1040, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); dir.close(); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReader.java b/lucene/src/test/org/apache/lucene/index/TestIndexReader.java index 41fb07fbf73..ef87922f311 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexReader.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexReader.java @@ -360,7 +360,7 @@ public class TestIndexReader extends LuceneTestCase // CREATE A NEW READER and re-test reader = IndexReader.open(dir, false); - assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm)); + assertEquals("deleted docFreq", 0, reader.docFreq(searchTerm)); assertTermDocsCount("deleted termDocs", reader, searchTerm, 0); reader.close(); reader2.close(); @@ -697,7 +697,6 @@ public class TestIndexReader extends LuceneTestCase // CREATE A NEW READER and re-test reader = IndexReader.open(dir, false); - assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm)); assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2)); assertTermDocsCount("deleted termDocs", reader, searchTerm, 0); assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100); @@ -838,7 +837,6 @@ public class TestIndexReader extends LuceneTestCase writer.close(); IndexReader reader = IndexReader.open(dir, false); reader.deleteDocument(0); - reader.deleteDocument(1); reader.close(); reader = IndexReader.open(dir, false); reader.undeleteAll(); @@ -855,7 +853,6 @@ public class TestIndexReader extends LuceneTestCase writer.close(); IndexReader reader = IndexReader.open(dir, false); reader.deleteDocument(0); - reader.deleteDocument(1); reader.close(); reader = IndexReader.open(dir, false); reader.undeleteAll(); @@ -1290,9 +1287,6 @@ public class TestIndexReader extends LuceneTestCase // Open another reader to confirm that everything is deleted reader2 = IndexReader.open(dir, false); - assertEquals("reopened 2", 100, reader2.docFreq(searchTerm1)); - assertEquals("reopened 2", 100, reader2.docFreq(searchTerm2)); - assertEquals("reopened 2", 100, reader2.docFreq(searchTerm3)); assertTermDocsCount("reopened 2", reader2, searchTerm1, 0); assertTermDocsCount("reopened 2", reader2, searchTerm2, 0); assertTermDocsCount("reopened 2", reader2, searchTerm3, 100); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java b/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java index c6bdd8c380f..e7d87a640ca 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java @@ -1211,7 +1211,6 @@ public class TestIndexReaderReopen extends LuceneTestCase { IndexReader r = IndexReader.open(dir, false); assertEquals(0, r.numDocs()); - assertEquals(4, r.maxDoc()); Collection commits = IndexReader.listCommits(dir); for (final IndexCommit commit : commits) { diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java index 554fa5bc165..a24bab5a878 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java @@ -101,19 +101,12 @@ public class TestIndexWriter extends LuceneTestCase { } reader.close(); - // test doc count before segments are merged/index is optimized - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); - assertEquals(100, writer.maxDoc()); - writer.close(); - reader = IndexReader.open(dir, true); - assertEquals(100, reader.maxDoc()); assertEquals(60, reader.numDocs()); reader.close(); // optimize the index and check that the new doc count is correct writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); - assertEquals(100, writer.maxDoc()); assertEquals(60, writer.numDocs()); writer.optimize(); assertEquals(60, writer.maxDoc()); @@ -1431,7 +1424,6 @@ public class TestIndexWriter extends LuceneTestCase { w.close(); IndexReader ir = IndexReader.open(dir, true); - assertEquals(1, ir.maxDoc()); assertEquals(0, ir.numDocs()); ir.close(); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java index 45ae58ce989..da56333555e 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java @@ -567,24 +567,25 @@ public class TestIndexWriterExceptions extends LuceneTestCase { System.out.println("TEST: open reader"); } IndexReader reader = IndexReader.open(dir, true); - int expected = 3+(1-i)*2; - assertEquals(expected, reader.docFreq(new Term("contents", "here"))); - assertEquals(expected, reader.maxDoc()); - int numDel = 0; - final Bits delDocs = MultiFields.getDeletedDocs(reader); - assertNotNull(delDocs); - for(int j=0;j Date: Tue, 25 Jan 2011 16:12:35 +0000 Subject: [PATCH 006/185] LUCENE-2888: Several DocsEnum / DocsAndPositionsEnum return wrong docID when next() / advance(int) return NO_MORE_DOCS git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063332 13f79535-47bb-0310-9956-ffa450edef68 --- .../index/codecs/preflex/PreFlexFields.java | 26 +- .../pulsing/PulsingPostingsReaderImpl.java | 8 +- .../lucene/index/TestDocsAndPositions.java | 327 ++++++++++++++++++ 3 files changed, 345 insertions(+), 16 deletions(-) create mode 100644 lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java diff --git a/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java b/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java index fe90eac93be..f7bbef7906c 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java @@ -980,7 +980,7 @@ public class PreFlexFields extends FieldsProducer { private final class PreDocsEnum extends DocsEnum { final private SegmentTermDocs docs; - + private int docID = -1; PreDocsEnum() throws IOException { docs = new SegmentTermDocs(freqStream, getTermsDict(), fieldInfos); } @@ -998,18 +998,18 @@ public class PreFlexFields extends FieldsProducer { @Override public int nextDoc() throws IOException { if (docs.next()) { - return docs.doc(); + return docID = docs.doc(); } else { - return NO_MORE_DOCS; + return docID = NO_MORE_DOCS; } } @Override public int advance(int target) throws IOException { if (docs.skipTo(target)) { - return docs.doc(); + return docID = docs.doc(); } else { - return NO_MORE_DOCS; + return docID = NO_MORE_DOCS; } } @@ -1020,7 +1020,7 @@ public class PreFlexFields extends FieldsProducer { @Override public int docID() { - return docs.doc(); + return docID; } @Override @@ -1036,7 +1036,7 @@ public class PreFlexFields extends FieldsProducer { private final class PreDocsAndPositionsEnum extends DocsAndPositionsEnum { final private SegmentTermPositions pos; - + private int docID = -1; PreDocsAndPositionsEnum() throws IOException { pos = new SegmentTermPositions(freqStream, proxStream, getTermsDict(), fieldInfos); } @@ -1054,18 +1054,18 @@ public class PreFlexFields extends FieldsProducer { @Override public int nextDoc() throws IOException { if (pos.next()) { - return pos.doc(); + return docID = pos.doc(); } else { - return NO_MORE_DOCS; + return docID = NO_MORE_DOCS; } } @Override public int advance(int target) throws IOException { if (pos.skipTo(target)) { - return pos.doc(); + return docID = pos.doc(); } else { - return NO_MORE_DOCS; + return docID = NO_MORE_DOCS; } } @@ -1076,16 +1076,18 @@ public class PreFlexFields extends FieldsProducer { @Override public int docID() { - return pos.doc(); + return docID; } @Override public int nextPosition() throws IOException { + assert docID != NO_MORE_DOCS; return pos.nextPosition(); } @Override public boolean hasPayload() { + assert docID != NO_MORE_DOCS; return pos.isPayloadAvailable(); } diff --git a/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsReaderImpl.java b/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsReaderImpl.java index cb76b6a65df..6adab4d9f19 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsReaderImpl.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsReaderImpl.java @@ -261,7 +261,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase { while(true) { if (postings.eof()) { //System.out.println("PR END"); - return NO_MORE_DOCS; + return docID = NO_MORE_DOCS; } final int code = postings.readVInt(); @@ -319,7 +319,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase { if (doc >= target) return doc; } - return NO_MORE_DOCS; + return docID = NO_MORE_DOCS; } } @@ -368,7 +368,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase { if (postings.eof()) { //System.out.println("PR END"); - return NO_MORE_DOCS; + return docID = NO_MORE_DOCS; } final int code = postings.readVInt(); @@ -406,7 +406,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase { return doc; } } - return NO_MORE_DOCS; + return docID = NO_MORE_DOCS; } @Override diff --git a/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java b/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java new file mode 100644 index 00000000000..654e33dfb52 --- /dev/null +++ b/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java @@ -0,0 +1,327 @@ +package org.apache.lucene.index; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; + +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.analysis.MockTokenizer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexReader.AtomicReaderContext; +import org.apache.lucene.index.IndexReader.ReaderContext; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.ReaderUtil; + +public class TestDocsAndPositions extends LuceneTestCase { + private String fieldName; + private boolean usePayload; + + public void setUp() throws Exception { + super.setUp(); + fieldName = "field" + random.nextInt(); + usePayload = random.nextBoolean(); + } + + /** + * Simple testcase for {@link DocsAndPositionsEnum} + */ + public void testPositionsSimple() throws IOException { + Directory directory = newDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(random, directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer( + MockTokenizer.WHITESPACE, true, usePayload))); + for (int i = 0; i < 39; i++) { + Document doc = new Document(); + doc.add(newField(fieldName, "1 2 3 4 5 6 7 8 9 10 " + + "1 2 3 4 5 6 7 8 9 10 " + "1 2 3 4 5 6 7 8 9 10 " + + "1 2 3 4 5 6 7 8 9 10", Field.Store.YES, Field.Index.ANALYZED)); + writer.addDocument(doc); + } + IndexReader reader = writer.getReader(); + writer.close(); + + for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) { + BytesRef bytes = new BytesRef("1"); + ReaderContext topReaderContext = reader.getTopReaderContext(); + AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext); + for (AtomicReaderContext atomicReaderContext : leaves) { + DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions( + atomicReaderContext.reader, bytes, null); + assertNotNull(docsAndPosEnum); + if (atomicReaderContext.reader.maxDoc() == 0) { + continue; + } + final int advance = docsAndPosEnum.advance(random.nextInt(atomicReaderContext.reader.maxDoc())); + do { + String msg = "Advanced to: " + advance + " current doc: " + + docsAndPosEnum.docID() + " usePayloads: " + usePayload; + assertEquals(msg, 4, docsAndPosEnum.freq()); + assertEquals(msg, 0, docsAndPosEnum.nextPosition()); + assertEquals(msg, 4, docsAndPosEnum.freq()); + assertEquals(msg, 10, docsAndPosEnum.nextPosition()); + assertEquals(msg, 4, docsAndPosEnum.freq()); + assertEquals(msg, 20, docsAndPosEnum.nextPosition()); + assertEquals(msg, 4, docsAndPosEnum.freq()); + assertEquals(msg, 30, docsAndPosEnum.nextPosition()); + } while (docsAndPosEnum.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS); + } + } + reader.close(); + directory.close(); + } + + public DocsAndPositionsEnum getDocsAndPositions(IndexReader reader, + BytesRef bytes, Bits skipDocs) throws IOException { + return reader.termPositionsEnum(null, fieldName, bytes); + } + + public DocsEnum getDocsEnum(IndexReader reader, BytesRef bytes, + boolean freqs, Bits skipDocs) throws IOException { + int randInt = random.nextInt(10); + if (randInt == 0) { // once in a while throw in a positions enum + return getDocsAndPositions(reader, bytes, skipDocs); + } else { + return reader.termDocsEnum(skipDocs, fieldName, bytes); + } + } + + /** + * this test indexes random numbers within a range into a field and checks + * their occurrences by searching for a number from that range selected at + * random. All positions for that number are saved up front and compared to + * the enums positions. + */ + public void testRandomPositons() throws IOException { + Directory dir = newDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(random, dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer( + MockTokenizer.WHITESPACE, true, usePayload))); + int numDocs = 131; + int max = 1051; + int term = random.nextInt(max); + Integer[][] positionsInDoc = new Integer[numDocs][]; + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + ArrayList positions = new ArrayList(); + StringBuilder builder = new StringBuilder(); + for (int j = 0; j < 3049; j++) { + int nextInt = random.nextInt(max); + builder.append(nextInt).append(" "); + if (nextInt == term) { + positions.add(Integer.valueOf(j)); + } + } + doc.add(newField(fieldName, builder.toString(), Field.Store.YES, + Field.Index.ANALYZED)); + positionsInDoc[i] = positions.toArray(new Integer[0]); + writer.addDocument(doc); + } + + IndexReader reader = writer.getReader(); + writer.close(); + + for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) { + BytesRef bytes = new BytesRef("" + term); + ReaderContext topReaderContext = reader.getTopReaderContext(); + AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext); + for (AtomicReaderContext atomicReaderContext : leaves) { + DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions( + atomicReaderContext.reader, bytes, null); + assertNotNull(docsAndPosEnum); + int initDoc = 0; + int maxDoc = atomicReaderContext.reader.maxDoc(); + // initially advance or do next doc + if (random.nextBoolean()) { + initDoc = docsAndPosEnum.nextDoc(); + } else { + initDoc = docsAndPosEnum.advance(random.nextInt(maxDoc)); + } + // now run through the scorer and check if all positions are there... + do { + int docID = docsAndPosEnum.docID(); + if (docID == DocsAndPositionsEnum.NO_MORE_DOCS) { + break; + } + Integer[] pos = positionsInDoc[atomicReaderContext.docBase + docID]; + assertEquals(pos.length, docsAndPosEnum.freq()); + // number of positions read should be random - don't read all of them + // allways + final int howMany = random.nextInt(20) == 0 ? pos.length + - random.nextInt(pos.length) : pos.length; + for (int j = 0; j < howMany; j++) { + assertEquals("iteration: " + i + " initDoc: " + initDoc + " doc: " + + docID + " base: " + atomicReaderContext.docBase + + " positions: " + Arrays.toString(pos) + " usePayloads: " + + usePayload, pos[j].intValue(), docsAndPosEnum.nextPosition()); + } + + if (random.nextInt(10) == 0) { // once is a while advance + docsAndPosEnum + .advance(docID + 1 + random.nextInt((maxDoc - docID))); + } + + } while (docsAndPosEnum.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS); + } + + } + reader.close(); + dir.close(); + } + + public void testRandomDocs() throws IOException { + Directory dir = newDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(random, dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer( + MockTokenizer.WHITESPACE, true, usePayload))); + int numDocs = 499; + int max = 15678; + int term = random.nextInt(max); + int[] freqInDoc = new int[numDocs]; + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + StringBuilder builder = new StringBuilder(); + for (int j = 0; j < 199; j++) { + int nextInt = random.nextInt(max); + builder.append(nextInt).append(" "); + if (nextInt == term) { + freqInDoc[i]++; + } + } + doc.add(newField(fieldName, builder.toString(), Field.Store.YES, + Field.Index.ANALYZED)); + writer.addDocument(doc); + } + + IndexReader reader = writer.getReader(); + writer.close(); + + for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) { + BytesRef bytes = new BytesRef("" + term); + ReaderContext topReaderContext = reader.getTopReaderContext(); + AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext); + for (AtomicReaderContext context : leaves) { + int maxDoc = context.reader.maxDoc(); + DocsEnum docsAndPosEnum = getDocsEnum(context.reader, bytes, true, null); + if (findNext(freqInDoc, context.docBase, context.docBase + maxDoc) == Integer.MAX_VALUE) { + assertNull(docsAndPosEnum); + continue; + } + assertNotNull(docsAndPosEnum); + docsAndPosEnum.nextDoc(); + for (int j = 0; j < maxDoc; j++) { + if (freqInDoc[context.docBase + j] != 0) { + assertEquals(j, docsAndPosEnum.docID()); + assertEquals(docsAndPosEnum.freq(), freqInDoc[context.docBase +j]); + if (i % 2 == 0 && random.nextInt(10) == 0) { + int next = findNext(freqInDoc, context.docBase+j+1, context.docBase + maxDoc) - context.docBase; + int advancedTo = docsAndPosEnum.advance(next); + if (next >= maxDoc) { + assertEquals(DocsEnum.NO_MORE_DOCS, advancedTo); + } else { + assertTrue("advanced to: " +advancedTo + " but should be <= " + next, next >= advancedTo); + } + } else { + docsAndPosEnum.nextDoc(); + } + } + } + assertEquals("docBase: " + context.docBase + " maxDoc: " + maxDoc + " " + docsAndPosEnum.getClass(), DocsEnum.NO_MORE_DOCS, docsAndPosEnum.docID()); + } + + } + + reader.close(); + dir.close(); + } + + private static int findNext(int[] docs, int pos, int max) { + for (int i = pos; i < max; i++) { + if( docs[i] != 0) { + return i; + } + } + return Integer.MAX_VALUE; + } + + /** + * tests retrieval of positions for terms that have a large number of + * occurrences to force test of buffer refill during positions iteration. + */ + public void testLargeNumberOfPositions() throws IOException { + Directory dir = newDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(random, dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer( + MockTokenizer.WHITESPACE, true, usePayload))); + int howMany = 1000; + for (int i = 0; i < 39; i++) { + Document doc = new Document(); + StringBuilder builder = new StringBuilder(); + for (int j = 0; j < howMany; j++) { + if (j % 2 == 0) { + builder.append("even "); + } else { + builder.append("odd "); + } + } + doc.add(newField(fieldName, builder.toString(), Field.Store.YES, + Field.Index.ANALYZED)); + writer.addDocument(doc); + } + + // now do seaches + IndexReader reader = writer.getReader(); + writer.close(); + + for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) { + BytesRef bytes = new BytesRef("even"); + + ReaderContext topReaderContext = reader.getTopReaderContext(); + AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext); + for (AtomicReaderContext atomicReaderContext : leaves) { + DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions( + atomicReaderContext.reader, bytes, null); + assertNotNull(docsAndPosEnum); + + int initDoc = 0; + int maxDoc = atomicReaderContext.reader.maxDoc(); + // initially advance or do next doc + if (random.nextBoolean()) { + initDoc = docsAndPosEnum.nextDoc(); + } else { + initDoc = docsAndPosEnum.advance(random.nextInt(maxDoc)); + } + String msg = "Iteration: " + i + " initDoc: " + initDoc + " payloads: " + + usePayload; + assertEquals(howMany / 2, docsAndPosEnum.freq()); + for (int j = 0; j < howMany; j += 2) { + assertEquals("position missmatch index: " + j + " with freq: " + + docsAndPosEnum.freq() + " -- " + msg, j, + docsAndPosEnum.nextPosition()); + } + } + } + reader.close(); + dir.close(); + } + +} From 27fe1ca874fbafd79b9efac54ab1a1882c80c469 Mon Sep 17 00:00:00 2001 From: Grant Ingersoll Date: Tue, 25 Jan 2011 16:14:59 +0000 Subject: [PATCH 007/185] SOLR-482: add some improved exception information for CSV files that fail git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063333 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 2 ++ .../solr/handler/CSVRequestHandler.java | 27 ++++++++++++++++--- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 4aa8fac6833..e4cc0d2eb93 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -161,6 +161,8 @@ Bug Fixes * SOLR-2127: Fixed serialization of default core and indentation of solr.xml when serializing. (Ephraim Ofir, Mark Miller) +* SOLR-482: Provide more exception handling in CSVLoader (gsingers) + Other Changes ---------------------- diff --git a/solr/src/java/org/apache/solr/handler/CSVRequestHandler.java b/solr/src/java/org/apache/solr/handler/CSVRequestHandler.java index 823b4502017..4a67d9ab909 100755 --- a/solr/src/java/org/apache/solr/handler/CSVRequestHandler.java +++ b/solr/src/java/org/apache/solr/handler/CSVRequestHandler.java @@ -305,12 +305,27 @@ abstract class CSVLoader extends ContentStreamLoader { private void input_err(String msg, String[] line, int lineno) { StringBuilder sb = new StringBuilder(); - sb.append(errHeader+", line="+lineno + ","+msg+"\n\tvalues={"); - for (String val: line) { sb.append("'"+val+"',"); } + sb.append(errHeader).append(", line=").append(lineno).append(",").append(msg).append("\n\tvalues={"); + for (String val: line) { + sb.append("'").append(val).append("',"); } sb.append('}'); throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,sb.toString()); } + private void input_err(String msg, String[] lines, int lineNo, Throwable e) { + StringBuilder sb = new StringBuilder(); + sb.append(errHeader).append(", line=").append(lineNo).append(",").append(msg).append("\n\tvalues={"); + if (lines != null) { + for (String val : lines) { + sb.append("'").append(val).append("',"); + } + } else { + sb.append("NO LINES AVAILABLE"); + } + sb.append('}'); + throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,sb.toString(), e); + } + /** load the CSV input */ public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream) throws IOException { errHeader = "CSVLoader: input=" + stream.getSourceInfo(); @@ -341,7 +356,13 @@ abstract class CSVLoader extends ContentStreamLoader { // read the rest of the CSV file for(;;) { int line = parser.getLineNumber(); // for error reporting in MT mode - String[] vals = parser.getLine(); + String[] vals = null; + try { + vals = parser.getLine(); + } catch (IOException e) { + //Catch the exception and rethrow it with more line information + input_err("can't read line: " + line, null, line, e); + } if (vals==null) break; if (vals.length != fields.length) { From dfb9a0faf84163aedf479a82fa4be2a0b8810f1c Mon Sep 17 00:00:00 2001 From: "Chris M. Hostetter" Date: Tue, 25 Jan 2011 16:28:04 +0000 Subject: [PATCH 008/185] SOLR-2320: Fixed ReplicationHandler detail reporting for masters git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063339 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 3 + .../solr/handler/ReplicationHandler.java | 10 +- .../solr/conf/solrconfig-repeater.xml | 93 +++++++++ .../solr/handler/TestReplicationHandler.java | 183 +++++++++++++----- 4 files changed, 239 insertions(+), 50 deletions(-) create mode 100644 solr/src/test-files/solr/conf/solrconfig-repeater.xml diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index e4cc0d2eb93..1235a429653 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -163,6 +163,9 @@ Bug Fixes * SOLR-482: Provide more exception handling in CSVLoader (gsingers) +* SOLR-2320: Fixed ReplicationHandler detail reporting for masters + (hossman) + Other Changes ---------------------- diff --git a/solr/src/java/org/apache/solr/handler/ReplicationHandler.java b/solr/src/java/org/apache/solr/handler/ReplicationHandler.java index d08cdb94cd2..188175a28a2 100644 --- a/solr/src/java/org/apache/solr/handler/ReplicationHandler.java +++ b/solr/src/java/org/apache/solr/handler/ReplicationHandler.java @@ -687,12 +687,12 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw LOG.error("Exception while writing replication details: ", e); } } - if (isMaster) - details.add("master", master); - if (isSlave && showSlaveDetails) - details.add("slave", slave); - } + + if (isMaster) + details.add("master", master); + if (isSlave && showSlaveDetails) + details.add("slave", slave); NamedList snapshotStats = snapShootDetails; if (snapshotStats != null) diff --git a/solr/src/test-files/solr/conf/solrconfig-repeater.xml b/solr/src/test-files/solr/conf/solrconfig-repeater.xml new file mode 100644 index 00000000000..4584dfaba45 --- /dev/null +++ b/solr/src/test-files/solr/conf/solrconfig-repeater.xml @@ -0,0 +1,93 @@ + + + + + + + + ${tests.luceneMatchVersion:LUCENE_CURRENT} + + ${solr.data.dir:./solr/data} + + + false + 10 + 32 + 2147483647 + 10000 + 1000 + 10000 + + 1000 + 10000 + + single + + + + false + 10 + 32 + 2147483647 + 10000 + + true + + + + + + + true + + + + + + + + + + + + + + + + commit + schema.xml + + + http://localhost:TEST_PORT/solr/replication + 00:00:01 + + + + + + + + + max-age=30, public + + + + diff --git a/solr/src/test/org/apache/solr/handler/TestReplicationHandler.java b/solr/src/test/org/apache/solr/handler/TestReplicationHandler.java index addd3d445e1..b3f68ede013 100644 --- a/solr/src/test/org/apache/solr/handler/TestReplicationHandler.java +++ b/solr/src/test/org/apache/solr/handler/TestReplicationHandler.java @@ -25,9 +25,11 @@ import org.apache.lucene.store.SimpleFSDirectory; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.TestDistributedSearch; import org.apache.solr.client.solrj.SolrServer; +import org.apache.solr.client.solrj.SolrRequest; import org.apache.solr.client.solrj.SolrServerException; import org.apache.solr.client.solrj.embedded.JettySolrRunner; import org.apache.solr.client.solrj.impl.CommonsHttpSolrServer; +import org.apache.solr.client.solrj.request.QueryRequest; import org.apache.solr.client.solrj.response.QueryResponse; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrDocumentList; @@ -42,6 +44,8 @@ import org.junit.Test; import java.io.*; import java.net.URL; +import java.util.Map; +import java.util.HashMap; /** * Test for ReplicationHandler @@ -53,7 +57,6 @@ public class TestReplicationHandler extends SolrTestCaseJ4 { private static final String CONF_DIR = "." + File.separator + "solr" + File.separator + "conf" + File.separator; - private static final String SLAVE_CONFIG = CONF_DIR + "solrconfig-slave.xml"; static JettySolrRunner masterJetty, slaveJetty; static SolrServer masterClient, slaveClient; @@ -157,6 +160,80 @@ public class TestReplicationHandler extends SolrTestCaseJ4 { return res; } + private NamedList getDetails(SolrServer s) throws Exception { + + + ModifiableSolrParams params = new ModifiableSolrParams(); + params.set("command","details"); + params.set("qt","/replication"); + QueryRequest req = new QueryRequest(params); + + NamedList res = s.request(req); + + assertNotNull("null response from server", res); + + @SuppressWarnings("unchecked") NamedList details + = (NamedList) res.get("details"); + + assertNotNull("null details", details); + + return details; + } + + @Test + public void testDetails() throws Exception { + { + NamedList details = getDetails(masterClient); + + assertEquals("master isMaster?", + "true", details.get("isMaster")); + assertEquals("master isSlave?", + "false", details.get("isSlave")); + assertNotNull("master has master section", + details.get("master")); + } + { + NamedList details = getDetails(slaveClient); + + assertEquals("slave isMaster?", + "false", details.get("isMaster")); + assertEquals("slave isSlave?", + "true", details.get("isSlave")); + assertNotNull("slave has slave section", + details.get("slave")); + } + + SolrInstance repeater = null; + JettySolrRunner repeaterJetty = null; + SolrServer repeaterClient = null; + try { + repeater = new SolrInstance("repeater", masterJetty.getLocalPort()); + repeater.setUp(); + repeaterJetty = createJetty(repeater); + repeaterClient = createNewSolrServer(repeaterJetty.getLocalPort()); + + + NamedList details = getDetails(repeaterClient); + + assertEquals("repeater isMaster?", + "true", details.get("isMaster")); + assertEquals("repeater isSlave?", + "true", details.get("isSlave")); + assertNotNull("repeater has master section", + details.get("master")); + assertNotNull("repeater has slave section", + details.get("slave")); + + } finally { + try { + if (repeaterJetty != null) repeaterJetty.stop(); + } catch (Exception e) { /* :NOOP: */ } + try { + if (repeater != null) repeater.tearDown(); + } catch (Exception e) { /* :NOOP: */ } + } + } + @Test public void testReplicateAfterWrite2Slave() throws Exception { clearIndexWithReplication(); @@ -250,14 +327,15 @@ public class TestReplicationHandler extends SolrTestCaseJ4 { masterClient.commit(); //change the schema on master - copyFile(getFile(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml")); + master.copyConfigFile(CONF_DIR + "schema-replication2.xml", "schema.xml"); masterJetty.stop(); masterJetty = createJetty(master); masterClient = createNewSolrServer(masterJetty.getLocalPort()); - copyFile(getFile(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort()); + slave.setTestPort(masterJetty.getLocalPort()); + slave.copyConfigFile(slave.getSolrConfigFile(), "solrconfig.xml"); slaveJetty.stop(); slaveJetty = createJetty(slave); @@ -349,7 +427,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 { public void testSnapPullWithMasterUrl() throws Exception { //change solrconfig on slave //this has no entry for pollinginterval - copyFile(getFile(CONF_DIR + "solrconfig-slave1.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort()); + slave.copyConfigFile(CONF_DIR + "solrconfig-slave1.xml", "solrconfig.xml"); slaveJetty.stop(); slaveJetty = createJetty(slave); slaveClient = createNewSolrServer(slaveJetty.getLocalPort()); @@ -386,7 +464,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 { // NOTE: at this point, the slave is not polling any more // restore it. - copyFile(getFile(CONF_DIR + "solrconfig-slave.xml"), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort()); + slave.copyConfigFile(CONF_DIR + "solrconfig-slave.xml", "solrconfig.xml"); slaveJetty.stop(); slaveJetty = createJetty(slave); slaveClient = createNewSolrServer(slaveJetty.getLocalPort()); @@ -410,15 +488,16 @@ public class TestReplicationHandler extends SolrTestCaseJ4 { assertEquals(nDocs, masterQueryResult.getNumFound()); //change solrconfig having 'replicateAfter startup' option on master - copyFile(getFile(CONF_DIR + "solrconfig-master2.xml"), - new File(master.getConfDir(), "solrconfig.xml")); + master.copyConfigFile(CONF_DIR + "solrconfig-master2.xml", + "solrconfig.xml"); masterJetty.stop(); masterJetty = createJetty(master); masterClient = createNewSolrServer(masterJetty.getLocalPort()); - copyFile(getFile(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort()); + slave.setTestPort(masterJetty.getLocalPort()); + slave.copyConfigFile(slave.getSolrConfigFile(), "solrconfig.xml"); //start slave slaveJetty = createJetty(slave); @@ -435,11 +514,14 @@ public class TestReplicationHandler extends SolrTestCaseJ4 { // NOTE: the master only replicates after startup now! // revert that change. - copyFile(getFile(CONF_DIR + "solrconfig-master.xml"), new File(master.getConfDir(), "solrconfig.xml")); + master.copyConfigFile(CONF_DIR + "solrconfig-master.xml", "solrconfig.xml"); masterJetty.stop(); masterJetty = createJetty(master); masterClient = createNewSolrServer(masterJetty.getLocalPort()); - copyFile(getFile(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort()); + + slave.setTestPort(masterJetty.getLocalPort()); + slave.copyConfigFile(slave.getSolrConfigFile(), "solrconfig.xml"); + //start slave slaveJetty.stop(); slaveJetty = createJetty(slave); @@ -477,20 +559,24 @@ public class TestReplicationHandler extends SolrTestCaseJ4 { masterClient.commit(); //change solrconfig on master - copyFile(getFile(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml")); + master.copyConfigFile(CONF_DIR + "solrconfig-master1.xml", + "solrconfig.xml"); //change schema on master - copyFile(getFile(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema.xml")); + master.copyConfigFile(CONF_DIR + "schema-replication2.xml", + "schema.xml"); //keep a copy of the new schema - copyFile(getFile(CONF_DIR + "schema-replication2.xml"), new File(master.getConfDir(), "schema-replication2.xml")); + master.copyConfigFile(CONF_DIR + "schema-replication2.xml", + "schema-replication2.xml"); masterJetty.stop(); masterJetty = createJetty(master); masterClient = createNewSolrServer(masterJetty.getLocalPort()); - copyFile(getFile(SLAVE_CONFIG), new File(slave.getConfDir(), "solrconfig.xml"), masterJetty.getLocalPort()); + slave.setTestPort(masterJetty.getLocalPort()); + slave.copyConfigFile(slave.getSolrConfigFile(), "solrconfig.xml"); slaveJetty.stop(); slaveJetty = createJetty(slave); @@ -521,12 +607,12 @@ public class TestReplicationHandler extends SolrTestCaseJ4 { @Test public void testBackup() throws Exception { masterJetty.stop(); - copyFile(getFile(CONF_DIR + "solrconfig-master1.xml"), new File(master.getConfDir(), "solrconfig.xml")); + master.copyConfigFile(CONF_DIR + "solrconfig-master1.xml", + "solrconfig.xml"); masterJetty = createJetty(master); masterClient = createNewSolrServer(masterJetty.getLocalPort()); - nDocs--; masterClient.deleteByQuery("*:*"); for (int i = 0; i < nDocs; i++) @@ -646,19 +732,22 @@ public class TestReplicationHandler extends SolrTestCaseJ4 { private static class SolrInstance { - String name; - Integer masterPort; - File homeDir; - File confDir; - File dataDir; + private String name; + private Integer testPort; + private File homeDir; + private File confDir; + private File dataDir; /** - * if masterPort is null, this instance is a master -- otherwise this instance is a slave, and assumes the master is - * on localhost at the specified port. + * @param name used to pick new solr home dir, as well as which + * "solrconfig-${name}.xml" file gets copied + * to solrconfig.xml in new conf dir. + * @param testPort if not null, used as a replacement for + * TEST_PORT in the cloned config files. */ - public SolrInstance(String name, Integer port) { + public SolrInstance(String name, Integer testPort) { this.name = name; - this.masterPort = port; + this.testPort = testPort; } public String getHomeDir() { @@ -678,43 +767,47 @@ public class TestReplicationHandler extends SolrTestCaseJ4 { } public String getSolrConfigFile() { - String fname = ""; - if (null == masterPort) - fname = CONF_DIR + "solrconfig-master.xml"; - else - fname = SLAVE_CONFIG; - return fname; + return CONF_DIR + "solrconfig-"+name+".xml"; + } + + /** If it needs to change */ + public void setTestPort(Integer testPort) { + this.testPort = testPort; } public void setUp() throws Exception { System.setProperty("solr.test.sys.prop1", "propone"); System.setProperty("solr.test.sys.prop2", "proptwo"); - File home = new File(TEMP_DIR, - getClass().getName() + "-" + System.currentTimeMillis()); + File home = new File(TEMP_DIR, + getClass().getName() + "-" + + System.currentTimeMillis()); + - if (null == masterPort) { - homeDir = new File(home, "master"); - dataDir = new File(homeDir, "data"); - confDir = new File(homeDir, "conf"); - } else { - homeDir = new File(home, "slave"); - dataDir = new File(homeDir, "data"); - confDir = new File(homeDir, "conf"); - } + homeDir = new File(home, name); + dataDir = new File(homeDir, "data"); + confDir = new File(homeDir, "conf"); homeDir.mkdirs(); dataDir.mkdirs(); confDir.mkdirs(); File f = new File(confDir, "solrconfig.xml"); - copyFile(getFile(getSolrConfigFile()), f, masterPort); - f = new File(confDir, "schema.xml"); - copyFile(getFile(getSchemaFile()), f); + copyConfigFile(getSolrConfigFile(), "solrconfig.xml"); + copyConfigFile(getSchemaFile(), "schema.xml"); } public void tearDown() throws Exception { AbstractSolrTestCase.recurseDelete(homeDir); } + + public void copyConfigFile(String srcFile, String destFile) + throws IOException { + + copyFile(getFile(srcFile), + new File(confDir, destFile), + testPort); + } + } } From a4a913d01b256060d02004174a3763b642e51733 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Tue, 25 Jan 2011 22:00:14 +0000 Subject: [PATCH 009/185] LUCENE-792: add test for PrecedenceQueryParser NOT bug git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063478 13f79535-47bb-0310-9956-ffa450edef68 --- .../queryParser/precedence/TestPrecedenceQueryParser.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java index cf0c8876064..5d044b9ad4c 100644 --- a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java +++ b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java @@ -567,6 +567,12 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { // too many boolean clauses, so ParseException is expected } } + + // LUCENE-792 + public void testNOT() throws Exception { + Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false); + assertQueryEquals("NOT foo AND bar", a, "-foo +bar"); + } /** * This test differs from the original QueryParser, showing how the precedence From fcf6e305b4806cecee626b13c69bf27f8a85bbd6 Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Tue, 25 Jan 2011 22:26:59 +0000 Subject: [PATCH 010/185] LUCENE-1472: Removed synchronization from static DateTools methods by using a ThreadLocal. Also converted DateTools.Resolution to a Java 5 enum. git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063493 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/CHANGES.txt | 4 + .../org/apache/lucene/document/DateTools.java | 199 +++++++++--------- 2 files changed, 105 insertions(+), 98 deletions(-) diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index 44d90a02240..843def76ff1 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -911,6 +911,10 @@ Optimizations * LUCENE-2010: Segments with 100% deleted documents are now removed on IndexReader or IndexWriter commit. (Uwe Schindler, Mike McCandless) +* LUCENE-1472: Removed synchronization from static DateTools methods + by using a ThreadLocal. Also converted DateTools.Resolution to a + Java 5 enum (this should not break backwards). (Uwe Schindler) + Build * LUCENE-2124: Moved the JDK-based collation support from contrib/collation diff --git a/lucene/src/java/org/apache/lucene/document/DateTools.java b/lucene/src/java/org/apache/lucene/document/DateTools.java index 68cb2dfdf25..ddac753b688 100644 --- a/lucene/src/java/org/apache/lucene/document/DateTools.java +++ b/lucene/src/java/org/apache/lucene/document/DateTools.java @@ -47,28 +47,37 @@ import org.apache.lucene.util.NumericUtils; // for javadocs */ public class DateTools { - private final static TimeZone GMT = TimeZone.getTimeZone("GMT"); + private static final class DateFormats { + final static TimeZone GMT = TimeZone.getTimeZone("GMT"); - private static final SimpleDateFormat YEAR_FORMAT = new SimpleDateFormat("yyyy", Locale.US); - private static final SimpleDateFormat MONTH_FORMAT = new SimpleDateFormat("yyyyMM", Locale.US); - private static final SimpleDateFormat DAY_FORMAT = new SimpleDateFormat("yyyyMMdd", Locale.US); - private static final SimpleDateFormat HOUR_FORMAT = new SimpleDateFormat("yyyyMMddHH", Locale.US); - private static final SimpleDateFormat MINUTE_FORMAT = new SimpleDateFormat("yyyyMMddHHmm", Locale.US); - private static final SimpleDateFormat SECOND_FORMAT = new SimpleDateFormat("yyyyMMddHHmmss", Locale.US); - private static final SimpleDateFormat MILLISECOND_FORMAT = new SimpleDateFormat("yyyyMMddHHmmssSSS", Locale.US); - static { - // times need to be normalized so the value doesn't depend on the - // location the index is created/used: - YEAR_FORMAT.setTimeZone(GMT); - MONTH_FORMAT.setTimeZone(GMT); - DAY_FORMAT.setTimeZone(GMT); - HOUR_FORMAT.setTimeZone(GMT); - MINUTE_FORMAT.setTimeZone(GMT); - SECOND_FORMAT.setTimeZone(GMT); - MILLISECOND_FORMAT.setTimeZone(GMT); + final SimpleDateFormat YEAR_FORMAT = new SimpleDateFormat("yyyy", Locale.US); + final SimpleDateFormat MONTH_FORMAT = new SimpleDateFormat("yyyyMM", Locale.US); + final SimpleDateFormat DAY_FORMAT = new SimpleDateFormat("yyyyMMdd", Locale.US); + final SimpleDateFormat HOUR_FORMAT = new SimpleDateFormat("yyyyMMddHH", Locale.US); + final SimpleDateFormat MINUTE_FORMAT = new SimpleDateFormat("yyyyMMddHHmm", Locale.US); + final SimpleDateFormat SECOND_FORMAT = new SimpleDateFormat("yyyyMMddHHmmss", Locale.US); + final SimpleDateFormat MILLISECOND_FORMAT = new SimpleDateFormat("yyyyMMddHHmmssSSS", Locale.US); + { + // times need to be normalized so the value doesn't depend on the + // location the index is created/used: + YEAR_FORMAT.setTimeZone(GMT); + MONTH_FORMAT.setTimeZone(GMT); + DAY_FORMAT.setTimeZone(GMT); + HOUR_FORMAT.setTimeZone(GMT); + MINUTE_FORMAT.setTimeZone(GMT); + SECOND_FORMAT.setTimeZone(GMT); + MILLISECOND_FORMAT.setTimeZone(GMT); + } + + final Calendar calInstance = Calendar.getInstance(GMT); } - - private static final Calendar calInstance = Calendar.getInstance(GMT); + + private static final ThreadLocal FORMATS = new ThreadLocal() { + @Override + protected DateFormats initialValue() { + return new DateFormats(); + } + }; // cannot create, the class has static methods only private DateTools() {} @@ -82,7 +91,7 @@ public class DateTools { * @return a string in format yyyyMMddHHmmssSSS or shorter, * depending on resolution; using GMT as timezone */ - public static synchronized String dateToString(Date date, Resolution resolution) { + public static String dateToString(Date date, Resolution resolution) { return timeToString(date.getTime(), resolution); } @@ -95,24 +104,20 @@ public class DateTools { * @return a string in format yyyyMMddHHmmssSSS or shorter, * depending on resolution; using GMT as timezone */ - public static synchronized String timeToString(long time, Resolution resolution) { - calInstance.setTimeInMillis(round(time, resolution)); - Date date = calInstance.getTime(); + public static String timeToString(long time, Resolution resolution) { + final DateFormats formats = FORMATS.get(); - if (resolution == Resolution.YEAR) { - return YEAR_FORMAT.format(date); - } else if (resolution == Resolution.MONTH) { - return MONTH_FORMAT.format(date); - } else if (resolution == Resolution.DAY) { - return DAY_FORMAT.format(date); - } else if (resolution == Resolution.HOUR) { - return HOUR_FORMAT.format(date); - } else if (resolution == Resolution.MINUTE) { - return MINUTE_FORMAT.format(date); - } else if (resolution == Resolution.SECOND) { - return SECOND_FORMAT.format(date); - } else if (resolution == Resolution.MILLISECOND) { - return MILLISECOND_FORMAT.format(date); + formats.calInstance.setTimeInMillis(round(time, resolution)); + final Date date = formats.calInstance.getTime(); + + switch (resolution) { + case YEAR: return formats.YEAR_FORMAT.format(date); + case MONTH:return formats.MONTH_FORMAT.format(date); + case DAY: return formats.DAY_FORMAT.format(date); + case HOUR: return formats.HOUR_FORMAT.format(date); + case MINUTE: return formats.MINUTE_FORMAT.format(date); + case SECOND: return formats.SECOND_FORMAT.format(date); + case MILLISECOND: return formats.MILLISECOND_FORMAT.format(date); } throw new IllegalArgumentException("unknown resolution " + resolution); @@ -128,7 +133,7 @@ public class DateTools { * @throws ParseException if dateString is not in the * expected format */ - public static synchronized long stringToTime(String dateString) throws ParseException { + public static long stringToTime(String dateString) throws ParseException { return stringToDate(dateString).getTime(); } @@ -142,21 +147,23 @@ public class DateTools { * @throws ParseException if dateString is not in the * expected format */ - public static synchronized Date stringToDate(String dateString) throws ParseException { + public static Date stringToDate(String dateString) throws ParseException { + final DateFormats formats = FORMATS.get(); + if (dateString.length() == 4) { - return YEAR_FORMAT.parse(dateString); + return formats.YEAR_FORMAT.parse(dateString); } else if (dateString.length() == 6) { - return MONTH_FORMAT.parse(dateString); + return formats.MONTH_FORMAT.parse(dateString); } else if (dateString.length() == 8) { - return DAY_FORMAT.parse(dateString); + return formats.DAY_FORMAT.parse(dateString); } else if (dateString.length() == 10) { - return HOUR_FORMAT.parse(dateString); + return formats.HOUR_FORMAT.parse(dateString); } else if (dateString.length() == 12) { - return MINUTE_FORMAT.parse(dateString); + return formats.MINUTE_FORMAT.parse(dateString); } else if (dateString.length() == 14) { - return SECOND_FORMAT.parse(dateString); + return formats.SECOND_FORMAT.parse(dateString); } else if (dateString.length() == 17) { - return MILLISECOND_FORMAT.parse(dateString); + return formats.MILLISECOND_FORMAT.parse(dateString); } throw new ParseException("Input is not valid date string: " + dateString, 0); } @@ -170,7 +177,7 @@ public class DateTools { * @return the date with all values more precise than resolution * set to 0 or 1 */ - public static synchronized Date round(Date date, Resolution resolution) { + public static Date round(Date date, Resolution resolution) { return new Date(round(date.getTime(), resolution)); } @@ -184,67 +191,63 @@ public class DateTools { * @return the date with all values more precise than resolution * set to 0 or 1, expressed as milliseconds since January 1, 1970, 00:00:00 GMT */ - public static synchronized long round(long time, Resolution resolution) { + public static long round(long time, Resolution resolution) { + final Calendar calInstance = FORMATS.get().calInstance; calInstance.setTimeInMillis(time); - if (resolution == Resolution.YEAR) { - calInstance.set(Calendar.MONTH, 0); - calInstance.set(Calendar.DAY_OF_MONTH, 1); - calInstance.set(Calendar.HOUR_OF_DAY, 0); - calInstance.set(Calendar.MINUTE, 0); - calInstance.set(Calendar.SECOND, 0); - calInstance.set(Calendar.MILLISECOND, 0); - } else if (resolution == Resolution.MONTH) { - calInstance.set(Calendar.DAY_OF_MONTH, 1); - calInstance.set(Calendar.HOUR_OF_DAY, 0); - calInstance.set(Calendar.MINUTE, 0); - calInstance.set(Calendar.SECOND, 0); - calInstance.set(Calendar.MILLISECOND, 0); - } else if (resolution == Resolution.DAY) { - calInstance.set(Calendar.HOUR_OF_DAY, 0); - calInstance.set(Calendar.MINUTE, 0); - calInstance.set(Calendar.SECOND, 0); - calInstance.set(Calendar.MILLISECOND, 0); - } else if (resolution == Resolution.HOUR) { - calInstance.set(Calendar.MINUTE, 0); - calInstance.set(Calendar.SECOND, 0); - calInstance.set(Calendar.MILLISECOND, 0); - } else if (resolution == Resolution.MINUTE) { - calInstance.set(Calendar.SECOND, 0); - calInstance.set(Calendar.MILLISECOND, 0); - } else if (resolution == Resolution.SECOND) { - calInstance.set(Calendar.MILLISECOND, 0); - } else if (resolution == Resolution.MILLISECOND) { - // don't cut off anything - } else { - throw new IllegalArgumentException("unknown resolution " + resolution); + switch (resolution) { + case YEAR: + calInstance.set(Calendar.MONTH, 0); + calInstance.set(Calendar.DAY_OF_MONTH, 1); + calInstance.set(Calendar.HOUR_OF_DAY, 0); + calInstance.set(Calendar.MINUTE, 0); + calInstance.set(Calendar.SECOND, 0); + calInstance.set(Calendar.MILLISECOND, 0); + break; + case MONTH: + calInstance.set(Calendar.DAY_OF_MONTH, 1); + calInstance.set(Calendar.HOUR_OF_DAY, 0); + calInstance.set(Calendar.MINUTE, 0); + calInstance.set(Calendar.SECOND, 0); + calInstance.set(Calendar.MILLISECOND, 0); + break; + case DAY: + calInstance.set(Calendar.HOUR_OF_DAY, 0); + calInstance.set(Calendar.MINUTE, 0); + calInstance.set(Calendar.SECOND, 0); + calInstance.set(Calendar.MILLISECOND, 0); + break; + case HOUR: + calInstance.set(Calendar.MINUTE, 0); + calInstance.set(Calendar.SECOND, 0); + calInstance.set(Calendar.MILLISECOND, 0); + break; + case MINUTE: + calInstance.set(Calendar.SECOND, 0); + calInstance.set(Calendar.MILLISECOND, 0); + break; + case SECOND: + calInstance.set(Calendar.MILLISECOND, 0); + break; + case MILLISECOND: + // don't cut off anything + break; + default: + throw new IllegalArgumentException("unknown resolution " + resolution); } return calInstance.getTimeInMillis(); } /** Specifies the time granularity. */ - public static class Resolution { + public static enum Resolution { - public static final Resolution YEAR = new Resolution("year"); - public static final Resolution MONTH = new Resolution("month"); - public static final Resolution DAY = new Resolution("day"); - public static final Resolution HOUR = new Resolution("hour"); - public static final Resolution MINUTE = new Resolution("minute"); - public static final Resolution SECOND = new Resolution("second"); - public static final Resolution MILLISECOND = new Resolution("millisecond"); + YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, MILLISECOND; - private String resolution; - - private Resolution() { - } - - private Resolution(String resolution) { - this.resolution = resolution; - } - + /** this method returns the name of the resolution + * in lowercase (for backwards compatibility) */ @Override public String toString() { - return resolution; + return super.toString().toLowerCase(Locale.ENGLISH); } } From 323ea4134d277535176ddebc6fc23d58f961e6b3 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Tue, 25 Jan 2011 22:42:37 +0000 Subject: [PATCH 011/185] LUCENE-2474: add expert ReaderFinishedListener API, for external caches to evict entries for readers git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063498 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/CHANGES.txt | 5 ++ .../instantiated/InstantiatedIndexReader.java | 2 + .../lucene/index/memory/MemoryIndex.java | 2 + .../apache/lucene/index/DirectoryReader.java | 25 +++++--- .../lucene/index/FilterIndexReader.java | 23 ++++--- .../org/apache/lucene/index/IndexReader.java | 61 +++++++++++++++++++ .../org/apache/lucene/index/IndexWriter.java | 9 +++ .../org/apache/lucene/index/MultiReader.java | 25 ++++++-- .../apache/lucene/index/ParallelReader.java | 19 +++++- .../apache/lucene/index/SegmentReader.java | 20 +++--- .../apache/lucene/search/FieldCacheImpl.java | 9 +++ .../apache/lucene/index/TestIndexReader.java | 38 ++++++++++++ 12 files changed, 205 insertions(+), 33 deletions(-) diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index 843def76ff1..f40b16255a9 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -812,6 +812,11 @@ New features * LUCENE-2864: Add getMaxTermFrequency (maximum within-document TF) to FieldInvertState so that it can be used in Similarity.computeNorm. (Robert Muir) + +* LUCENE-2474: Added expert ReaderFinishedListener API to + IndexReader, to allow apps that maintain external per-segment caches + to evict entries when a segment is finished. (Shay Banon, Yonik + Seeley, Mike McCandless) Optimizations diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java index 7cece688d33..301ff986fc2 100644 --- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java +++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java @@ -19,6 +19,7 @@ package org.apache.lucene.store.instantiated; import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -52,6 +53,7 @@ public class InstantiatedIndexReader extends IndexReader { public InstantiatedIndexReader(InstantiatedIndex index) { super(); this.index = index; + readerFinishedListeners = Collections.synchronizedSet(new HashSet()); } /** diff --git a/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java index 3fc82b7651b..437d313b9c9 100644 --- a/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java +++ b/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java @@ -25,6 +25,7 @@ import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.Map; @@ -758,6 +759,7 @@ public class MemoryIndex implements Serializable { private MemoryIndexReader() { super(); // avoid as much superclass baggage as possible + readerFinishedListeners = Collections.synchronizedSet(new HashSet()); } private Info getInfo(String fieldName) { diff --git a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java index f339133b7ca..aa372be4b66 100644 --- a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java +++ b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java @@ -37,8 +37,6 @@ import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close - /** * An IndexReader which reads indexes with multiple segments. */ @@ -106,6 +104,7 @@ class DirectoryReader extends IndexReader implements Cloneable { } else { this.codecs = codecs; } + readerFinishedListeners = Collections.synchronizedSet(new HashSet()); // To reduce the chance of hitting FileNotFound // (and having to retry), we open segments in @@ -117,6 +116,7 @@ class DirectoryReader extends IndexReader implements Cloneable { boolean success = false; try { readers[i] = SegmentReader.get(readOnly, sis.info(i), termInfosIndexDivisor); + readers[i].readerFinishedListeners = readerFinishedListeners; success = true; } finally { if (!success) { @@ -146,6 +146,7 @@ class DirectoryReader extends IndexReader implements Cloneable { } else { this.codecs = codecs; } + readerFinishedListeners = writer.getReaderFinishedListeners(); // IndexWriter synchronizes externally before calling // us, which ensures infos will not change; so there's @@ -160,6 +161,7 @@ class DirectoryReader extends IndexReader implements Cloneable { final SegmentInfo info = infos.info(i); assert info.dir == dir; readers[i] = writer.readerPool.getReadOnlyClone(info, true, termInfosIndexDivisor); + readers[i].readerFinishedListeners = readerFinishedListeners; success = true; } finally { if (!success) { @@ -182,11 +184,14 @@ class DirectoryReader extends IndexReader implements Cloneable { /** This constructor is only used for {@link #reopen()} */ DirectoryReader(Directory directory, SegmentInfos infos, SegmentReader[] oldReaders, int[] oldStarts, - boolean readOnly, boolean doClone, int termInfosIndexDivisor, CodecProvider codecs) throws IOException { + boolean readOnly, boolean doClone, int termInfosIndexDivisor, CodecProvider codecs, + Collection readerFinishedListeners) throws IOException { this.directory = directory; this.readOnly = readOnly; this.segmentInfos = infos; this.termInfosIndexDivisor = termInfosIndexDivisor; + this.readerFinishedListeners = readerFinishedListeners; + if (codecs == null) { this.codecs = CodecProvider.getDefault(); } else { @@ -232,8 +237,10 @@ class DirectoryReader extends IndexReader implements Cloneable { // this is a new reader; in case we hit an exception we can close it safely newReader = SegmentReader.get(readOnly, infos.info(i), termInfosIndexDivisor); + newReader.readerFinishedListeners = readerFinishedListeners; } else { newReader = newReaders[i].reopenSegment(infos.info(i), doClone, readOnly); + assert newReader.readerFinishedListeners == readerFinishedListeners; } if (newReader == newReaders[i]) { // this reader will be shared between the old and the new one, @@ -357,6 +364,7 @@ class DirectoryReader extends IndexReader implements Cloneable { writeLock = null; hasChanges = false; } + assert newReader.readerFinishedListeners != null; return newReader; } @@ -391,7 +399,9 @@ class DirectoryReader extends IndexReader implements Cloneable { // TODO: right now we *always* make a new reader; in // the future we could have write make some effort to // detect that no changes have occurred - return writer.getReader(); + IndexReader reader = writer.getReader(); + reader.readerFinishedListeners = readerFinishedListeners; + return reader; } private IndexReader doReopen(final boolean openReadOnly, IndexCommit commit) throws CorruptIndexException, IOException { @@ -458,7 +468,7 @@ class DirectoryReader extends IndexReader implements Cloneable { private synchronized DirectoryReader doReopen(SegmentInfos infos, boolean doClone, boolean openReadOnly) throws CorruptIndexException, IOException { DirectoryReader reader; - reader = new DirectoryReader(directory, infos, subReaders, starts, openReadOnly, doClone, termInfosIndexDivisor, codecs); + reader = new DirectoryReader(directory, infos, subReaders, starts, openReadOnly, doClone, termInfosIndexDivisor, codecs, readerFinishedListeners); return reader; } @@ -808,11 +818,6 @@ class DirectoryReader extends IndexReader implements Cloneable { } } - // NOTE: only needed in case someone had asked for - // FieldCache for top-level reader (which is generally - // not a good idea): - FieldCache.DEFAULT.purge(this); - if (writer != null) { // Since we just closed, writer may now be able to // delete unused files: diff --git a/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java b/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java index f7874dc4ec0..6dc2f48227e 100644 --- a/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java +++ b/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java @@ -22,13 +22,14 @@ import org.apache.lucene.document.FieldSelector; import org.apache.lucene.index.IndexReader.ReaderContext; import org.apache.lucene.store.Directory; import org.apache.lucene.util.Bits; -import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close import org.apache.lucene.util.BytesRef; import java.io.IOException; import java.util.Collection; import java.util.Map; import java.util.Comparator; +import java.util.HashSet; +import java.util.Collections; /** A FilterIndexReader contains another IndexReader, which it * uses as its basic source of data, possibly transforming the data along the @@ -286,6 +287,7 @@ public class FilterIndexReader extends IndexReader { public FilterIndexReader(IndexReader in) { super(); this.in = in; + readerFinishedListeners = Collections.synchronizedSet(new HashSet()); } @Override @@ -391,11 +393,6 @@ public class FilterIndexReader extends IndexReader { @Override protected void doClose() throws IOException { in.close(); - - // NOTE: only needed in case someone had asked for - // FieldCache for top-level reader (which is generally - // not a good idea): - FieldCache.DEFAULT.purge(this); } @@ -454,4 +451,16 @@ public class FilterIndexReader extends IndexReader { buffer.append(')'); return buffer.toString(); } -} \ No newline at end of file + + @Override + public void addReaderFinishedListener(ReaderFinishedListener listener) { + super.addReaderFinishedListener(listener); + in.addReaderFinishedListener(listener); + } + + @Override + public void removeReaderFinishedListener(ReaderFinishedListener listener) { + super.removeReaderFinishedListener(listener); + in.removeReaderFinishedListener(listener); + } +} diff --git a/lucene/src/java/org/apache/lucene/index/IndexReader.java b/lucene/src/java/org/apache/lucene/index/IndexReader.java index 7f1b736cf1e..29d7869f214 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexReader.java +++ b/lucene/src/java/org/apache/lucene/index/IndexReader.java @@ -34,6 +34,7 @@ import java.io.IOException; import java.io.Closeable; import java.util.Collection; import java.util.List; +import java.util.HashSet; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; @@ -81,6 +82,65 @@ import java.util.concurrent.atomic.AtomicInteger; */ public abstract class IndexReader implements Cloneable,Closeable { + /** + * A custom listener that's invoked when the IndexReader + * is finished. + * + *

For a SegmentReader, this listener is called only + * once all SegmentReaders sharing the same core are + * closed. At this point it is safe for apps to evict + * this reader from any caches keyed on {@link + * #getCoreCacheKey}. This is the same interface that + * {@link FieldCache} uses, internally, to evict + * entries.

+ * + *

For other readers, this listener is called when they + * are closed.

+ * + * @lucene.experimental + */ + public static interface ReaderFinishedListener { + public void finished(IndexReader reader); + } + + // Impls must set this if they may call add/removeReaderFinishedListener: + protected volatile Collection readerFinishedListeners; + + /** Expert: adds a {@link ReaderFinishedListener}. The + * provided listener is also added to any sub-readers, if + * this is a composite reader. Also, any reader reopened + * or cloned from this one will also copy the listeners at + * the time of reopen. + * + * @lucene.experimental */ + public void addReaderFinishedListener(ReaderFinishedListener listener) { + readerFinishedListeners.add(listener); + } + + /** Expert: remove a previously added {@link ReaderFinishedListener}. + * + * @lucene.experimental */ + public void removeReaderFinishedListener(ReaderFinishedListener listener) { + readerFinishedListeners.remove(listener); + } + + protected void notifyReaderFinishedListeners() { + // Defensive (should never be null -- all impls must set + // this): + if (readerFinishedListeners != null) { + + // Clone the set so that we don't have to sync on + // readerFinishedListeners while invoking them: + for(ReaderFinishedListener listener : new HashSet(readerFinishedListeners)) { + listener.finished(this); + } + } + } + + protected void readerFinished() { + notifyReaderFinishedListeners(); + } + /** * Constants describing field properties, for example used for * {@link IndexReader#getFieldNames(FieldOption)}. @@ -195,6 +255,7 @@ public abstract class IndexReader implements Cloneable,Closeable { refCount.incrementAndGet(); } } + readerFinished(); } } diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java index 710822bd15d..b7573b5b4e4 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java @@ -30,6 +30,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.Collections; import java.util.concurrent.atomic.AtomicInteger; import org.apache.lucene.analysis.Analyzer; @@ -365,6 +366,13 @@ public class IndexWriter implements Closeable { return r; } + // Used for all SegmentReaders we open + private final Collection readerFinishedListeners = Collections.synchronizedSet(new HashSet()); + + Collection getReaderFinishedListeners() throws IOException { + return readerFinishedListeners; + } + /** Holds shared SegmentReader instances. IndexWriter uses * SegmentReaders for 1) applying deletes, 2) doing * merges, 3) handing out a real-time reader. This pool @@ -574,6 +582,7 @@ public class IndexWriter implements Closeable { // synchronized // Returns a ref, which we xfer to readerMap: sr = SegmentReader.get(false, info.dir, info, readBufferSize, doOpenStores, termsIndexDivisor); + sr.readerFinishedListeners = readerFinishedListeners; if (info.dir == directory) { // Only pool if reader is not external diff --git a/lucene/src/java/org/apache/lucene/index/MultiReader.java b/lucene/src/java/org/apache/lucene/index/MultiReader.java index 8a5dca94f22..1e95cb272d9 100644 --- a/lucene/src/java/org/apache/lucene/index/MultiReader.java +++ b/lucene/src/java/org/apache/lucene/index/MultiReader.java @@ -20,10 +20,11 @@ package org.apache.lucene.index; import java.io.IOException; import java.util.Collection; import java.util.Map; +import java.util.HashSet; +import java.util.Collections; import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldSelector; -import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.ReaderUtil; @@ -82,6 +83,7 @@ public class MultiReader extends IndexReader implements Cloneable { } } starts[subReaders.length] = maxDoc; + readerFinishedListeners = Collections.synchronizedSet(new HashSet()); return ReaderUtil.buildReaderContext(this); } @@ -345,11 +347,6 @@ public class MultiReader extends IndexReader implements Cloneable { subReaders[i].close(); } } - - // NOTE: only needed in case someone had asked for - // FieldCache for top-level reader (which is generally - // not a good idea): - FieldCache.DEFAULT.purge(this); } @Override @@ -389,4 +386,20 @@ public class MultiReader extends IndexReader implements Cloneable { public ReaderContext getTopReaderContext() { return topLevelContext; } + + @Override + public void addReaderFinishedListener(ReaderFinishedListener listener) { + super.addReaderFinishedListener(listener); + for(IndexReader sub : subReaders) { + sub.addReaderFinishedListener(listener); + } + } + + @Override + public void removeReaderFinishedListener(ReaderFinishedListener listener) { + super.removeReaderFinishedListener(listener); + for(IndexReader sub : subReaders) { + sub.removeReaderFinishedListener(listener); + } + } } diff --git a/lucene/src/java/org/apache/lucene/index/ParallelReader.java b/lucene/src/java/org/apache/lucene/index/ParallelReader.java index b1ffd23834b..8b789e02058 100644 --- a/lucene/src/java/org/apache/lucene/index/ParallelReader.java +++ b/lucene/src/java/org/apache/lucene/index/ParallelReader.java @@ -22,7 +22,6 @@ import org.apache.lucene.document.FieldSelector; import org.apache.lucene.document.FieldSelectorResult; import org.apache.lucene.document.Fieldable; import org.apache.lucene.util.Bits; -import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close import org.apache.lucene.util.BytesRef; import java.io.IOException; @@ -73,6 +72,7 @@ public class ParallelReader extends IndexReader { public ParallelReader(boolean closeSubReaders) throws IOException { super(); this.incRefReaders = !closeSubReaders; + readerFinishedListeners = Collections.synchronizedSet(new HashSet()); } /** {@inheritDoc} */ @@ -529,8 +529,6 @@ public class ParallelReader extends IndexReader { readers.get(i).close(); } } - - FieldCache.DEFAULT.purge(this); } @Override @@ -548,6 +546,21 @@ public class ParallelReader extends IndexReader { return topLevelReaderContext; } + @Override + public void addReaderFinishedListener(ReaderFinishedListener listener) { + super.addReaderFinishedListener(listener); + for (IndexReader reader : readers) { + reader.addReaderFinishedListener(listener); + } + } + + @Override + public void removeReaderFinishedListener(ReaderFinishedListener listener) { + super.removeReaderFinishedListener(listener); + for (IndexReader reader : readers) { + reader.removeReaderFinishedListener(listener); + } + } } diff --git a/lucene/src/java/org/apache/lucene/index/SegmentReader.java b/lucene/src/java/org/apache/lucene/index/SegmentReader.java index 462ceaaceb8..ac36827bfc6 100644 --- a/lucene/src/java/org/apache/lucene/index/SegmentReader.java +++ b/lucene/src/java/org/apache/lucene/index/SegmentReader.java @@ -38,7 +38,6 @@ import org.apache.lucene.util.BitVector; import org.apache.lucene.util.Bits; import org.apache.lucene.util.CloseableThreadLocal; import org.apache.lucene.index.codecs.FieldsProducer; -import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close import org.apache.lucene.util.BytesRef; /** @@ -183,13 +182,9 @@ public class SegmentReader extends IndexReader implements Cloneable { storeCFSReader.close(); } - // Force FieldCache to evict our entries at this - // point. If the exception occurred while - // initializing the core readers, then - // origInstance will be null, and we don't want - // to call FieldCache.purge (it leads to NPE): + // Now, notify any ReaderFinished listeners: if (origInstance != null) { - FieldCache.DEFAULT.purge(origInstance); + origInstance.notifyReaderFinishedListeners(); } } } @@ -633,6 +628,7 @@ public class SegmentReader extends IndexReader implements Cloneable { clone.si = si; clone.readBufferSize = readBufferSize; clone.pendingDeleteCount = pendingDeleteCount; + clone.readerFinishedListeners = readerFinishedListeners; if (!openReadOnly && hasChanges) { // My pending changes transfer to the new reader @@ -1203,4 +1199,14 @@ public class SegmentReader extends IndexReader implements Cloneable { public int getTermInfosIndexDivisor() { return core.termsIndexDivisor; } + + @Override + protected void readerFinished() { + // Do nothing here -- we have more careful control on + // when to notify that a SegmentReader has finished, + // because a given core is shared across many cloned + // SegmentReaders. We only notify once that core is no + // longer used (all SegmentReaders sharing it have been + // closed). + } } diff --git a/lucene/src/java/org/apache/lucene/search/FieldCacheImpl.java b/lucene/src/java/org/apache/lucene/search/FieldCacheImpl.java index b583dc6fe78..971d7459840 100644 --- a/lucene/src/java/org/apache/lucene/search/FieldCacheImpl.java +++ b/lucene/src/java/org/apache/lucene/search/FieldCacheImpl.java @@ -137,6 +137,13 @@ public class FieldCacheImpl implements FieldCache { // Made Public so that public Object getValue() { return value; } } + final static IndexReader.ReaderFinishedListener purgeReader = new IndexReader.ReaderFinishedListener() { + // @Override -- not until Java 1.6 + public void finished(IndexReader reader) { + FieldCache.DEFAULT.purge(reader); + } + }; + /** Expert: Internal cache. */ final static class Cache { Cache() { @@ -171,8 +178,10 @@ public class FieldCacheImpl implements FieldCache { // Made Public so that synchronized (readerCache) { innerCache = readerCache.get(readerKey); if (innerCache == null) { + // First time this reader is using FieldCache innerCache = new HashMap,Object>(); readerCache.put(readerKey, innerCache); + reader.addReaderFinishedListener(purgeReader); value = null; } else { value = innerCache.get(key); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReader.java b/lucene/src/test/org/apache/lucene/index/TestIndexReader.java index ef87922f311..01b73877385 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexReader.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexReader.java @@ -1905,4 +1905,42 @@ public class TestIndexReader extends LuceneTestCase dir.close(); } } + + // LUCENE-2474 + public void testReaderFinishedListener() throws Exception { + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3); + writer.setInfoStream(VERBOSE ? System.out : null); + writer.addDocument(new Document()); + writer.commit(); + writer.addDocument(new Document()); + writer.commit(); + final IndexReader reader = writer.getReader(); + final int[] closeCount = new int[1]; + final IndexReader.ReaderFinishedListener listener = new IndexReader.ReaderFinishedListener() { + public void finished(IndexReader reader) { + closeCount[0]++; + } + }; + + reader.addReaderFinishedListener(listener); + + reader.close(); + + // Just the top reader + assertEquals(1, closeCount[0]); + writer.close(); + + // Now also the subs + assertEquals(3, closeCount[0]); + + IndexReader reader2 = IndexReader.open(dir); + reader2.addReaderFinishedListener(listener); + + closeCount[0] = 0; + reader2.close(); + assertEquals(3, closeCount[0]); + dir.close(); + } } From bb391fd3edc3729fdc8561d2f88ae74a8221ff56 Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Tue, 25 Jan 2011 22:56:18 +0000 Subject: [PATCH 012/185] LUCENE-1846: DateTools now uses the US locale everywhere, so DateTools.round() is safe also in strange locales git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063501 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/CHANGES.txt | 3 +++ lucene/src/java/org/apache/lucene/document/DateTools.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index f40b16255a9..d131a02b8c6 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -698,6 +698,9 @@ Bug fixes * LUCENE-2693: RAM used by IndexWriter was slightly incorrectly computed. (Jason Rutherglen via Shai Erera) +* LUCENE-1846: DateTools now uses the US locale everywhere, so DateTools.round() + is safe also in strange locales. (Uwe Schindler) + New features * LUCENE-2128: Parallelized fetching document frequencies during weight diff --git a/lucene/src/java/org/apache/lucene/document/DateTools.java b/lucene/src/java/org/apache/lucene/document/DateTools.java index ddac753b688..0e5199c6247 100644 --- a/lucene/src/java/org/apache/lucene/document/DateTools.java +++ b/lucene/src/java/org/apache/lucene/document/DateTools.java @@ -69,7 +69,7 @@ public class DateTools { MILLISECOND_FORMAT.setTimeZone(GMT); } - final Calendar calInstance = Calendar.getInstance(GMT); + final Calendar calInstance = Calendar.getInstance(GMT, Locale.US); } private static final ThreadLocal FORMATS = new ThreadLocal() { From c403d3e75624c4f37d364d97355d959c146137b4 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Tue, 25 Jan 2011 23:15:01 +0000 Subject: [PATCH 013/185] LUCENE-2887: sharpen jdocs for IndexReader.undeleteAll git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063513 13f79535-47bb-0310-9956-ffa450edef68 --- .../java/org/apache/lucene/index/IndexReader.java | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/index/IndexReader.java b/lucene/src/java/org/apache/lucene/index/IndexReader.java index 29d7869f214..7688884bb19 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexReader.java +++ b/lucene/src/java/org/apache/lucene/index/IndexReader.java @@ -1227,11 +1227,13 @@ public abstract class IndexReader implements Cloneable,Closeable { /** Undeletes all documents currently marked as deleted in * this index. * - *

NOTE: this is only a best-effort process. For - * example, if all documents in a given segment were - * deleted, Lucene now drops that segment from the index, - * which means its documents will not be recovered by this - * method. + *

NOTE: this method can only recover documents marked + * for deletion but not yet removed from the index; when + * and how Lucene removes deleted documents is an + * implementation detail, subject to change from release + * to release. However, you can use {@link + * #numDeletedDocs} on the current IndexReader instance to + * see how many documents will be un-deleted. * * @throws StaleReaderException if the index has changed * since this reader was opened From e76ad0990d2b389aee62246f12e7eeb69e90fa1e Mon Sep 17 00:00:00 2001 From: Shai Erera Date: Wed, 26 Jan 2011 09:10:06 +0000 Subject: [PATCH 014/185] LUCENE-929: contrib/benchmark build doesn't handle checking if content is properly extracted (trunk) git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063647 13f79535-47bb-0310-9956-ffa450edef68 --- modules/benchmark/CHANGES.txt | 5 + modules/benchmark/build.xml | 1 - .../benchmark/utils/ExtractReuters.java | 237 ++++++++---------- 3 files changed, 110 insertions(+), 133 deletions(-) diff --git a/modules/benchmark/CHANGES.txt b/modules/benchmark/CHANGES.txt index 82c005c3ea9..3811723e38b 100644 --- a/modules/benchmark/CHANGES.txt +++ b/modules/benchmark/CHANGES.txt @@ -2,6 +2,11 @@ Lucene Benchmark Contrib Change Log The Benchmark contrib package contains code for benchmarking Lucene in a variety of ways. +01/26/2011 + LUCENE-929: ExtractReuters first extracts to a tmp dir and then renames. That + way, if a previous extract attempt failed, "ant extract-reuters" will still + extract the files. (Shai Erera, Doron Cohen, Grant Ingersoll) + 01/24/2011 LUCENE-2885: Add WaitForMerges task (calls IndexWriter.waitForMerges()). (Mike McCandless) diff --git a/modules/benchmark/build.xml b/modules/benchmark/build.xml index f2abf4a3ef7..10d1510fd1d 100644 --- a/modules/benchmark/build.xml +++ b/modules/benchmark/build.xml @@ -87,7 +87,6 @@ - diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractReuters.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractReuters.java index 3e4104b5b85..395d640fc72 100644 --- a/modules/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractReuters.java +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractReuters.java @@ -29,146 +29,119 @@ import java.util.regex.Pattern; /** * Split the Reuters SGML documents into Simple Text files containing: Title, Date, Dateline, Body */ -public class ExtractReuters -{ - private File reutersDir; - private File outputDir; - private static final String LINE_SEPARATOR = System.getProperty("line.separator"); - - public ExtractReuters(File reutersDir, File outputDir) - { - this.reutersDir = reutersDir; - this.outputDir = outputDir; - System.out.println("Deleting all files in " + outputDir); - File [] files = outputDir.listFiles(); - for (int i = 0; i < files.length; i++) - { - files[i].delete(); - } +public class ExtractReuters { + private File reutersDir; + private File outputDir; + private static final String LINE_SEPARATOR = System.getProperty("line.separator"); + public ExtractReuters(File reutersDir, File outputDir) { + this.reutersDir = reutersDir; + this.outputDir = outputDir; + System.out.println("Deleting all files in " + outputDir); + for (File f : outputDir.listFiles()) { + f.delete(); } + } - public void extract() - { - File [] sgmFiles = reutersDir.listFiles(new FileFilter() - { - public boolean accept(File file) - { - return file.getName().endsWith(".sgm"); - } - }); - if (sgmFiles != null && sgmFiles.length > 0) - { - for (int i = 0; i < sgmFiles.length; i++) - { - File sgmFile = sgmFiles[i]; - extractFile(sgmFile); + public void extract() { + File[] sgmFiles = reutersDir.listFiles(new FileFilter() { + public boolean accept(File file) { + return file.getName().endsWith(".sgm"); + } + }); + if (sgmFiles != null && sgmFiles.length > 0) { + for (File sgmFile : sgmFiles) { + extractFile(sgmFile); + } + } else { + System.err.println("No .sgm files in " + reutersDir); + } + } + + Pattern EXTRACTION_PATTERN = Pattern + .compile("(.*?)|(.*?)|(.*?)"); + + private static String[] META_CHARS = { "&", "<", ">", "\"", "'" }; + + private static String[] META_CHARS_SERIALIZATIONS = { "&", "<", + ">", """, "'" }; + + /** + * Override if you wish to change what is extracted + * + * @param sgmFile + */ + protected void extractFile(File sgmFile) { + try { + BufferedReader reader = new BufferedReader(new FileReader(sgmFile)); + + StringBuilder buffer = new StringBuilder(1024); + StringBuilder outBuffer = new StringBuilder(1024); + + String line = null; + int docNumber = 0; + while ((line = reader.readLine()) != null) { + // when we see a closing reuters tag, flush the file + + if (line.indexOf("(.*?)|(.*?)|(.*?)"); - - private static String[] META_CHARS - = {"&", "<", ">", "\"", "'"}; - - private static String[] META_CHARS_SERIALIZATIONS - = {"&", "<", ">", """, "'"}; - - /** - * Override if you wish to change what is extracted - * - * @param sgmFile - */ - protected void extractFile(File sgmFile) - { - try - { - BufferedReader reader = new BufferedReader(new FileReader(sgmFile)); - - StringBuilder buffer = new StringBuilder(1024); - StringBuilder outBuffer = new StringBuilder(1024); - - String line = null; - int docNumber = 0; - while ((line = reader.readLine()) != null) - { - //when we see a closing reuters tag, flush the file - - if (line.indexOf(" org.apache.lucene.benchmark.utils.ExtractReuters "); - } + private static void printUsage() { + System.err.println("Usage: java -cp <...> org.apache.lucene.benchmark.utils.ExtractReuters "); + } + } From 20621a4e7257fafee7d80a98641484574063c1c6 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Wed, 26 Jan 2011 13:00:41 +0000 Subject: [PATCH 015/185] SOLR-1826: Add tests for highlighting with termOffsets=true and overlapping tokens git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063702 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 3 +++ solr/src/test-files/solr/conf/schema.xml | 1 + .../solr/highlight/HighlighterTest.java | 26 +++++++++++++++++++ 3 files changed, 30 insertions(+) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 1235a429653..683a722329b 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -733,6 +733,9 @@ Other Changes * SOLR-2213: Upgrade to jQuery 1.4.3 (Erick Erickson via ryan) +* SOLR-1826: Add unit tests for highlighting with termOffsets=true + and overlapping tokens. (Stefan Oestreicher via rmuir) + Build ---------------------- diff --git a/solr/src/test-files/solr/conf/schema.xml b/solr/src/test-files/solr/conf/schema.xml index 05fa45e7196..6590e20cc95 100644 --- a/solr/src/test-files/solr/conf/schema.xml +++ b/solr/src/test-files/solr/conf/schema.xml @@ -483,6 +483,7 @@ + diff --git a/solr/src/test/org/apache/solr/highlight/HighlighterTest.java b/solr/src/test/org/apache/solr/highlight/HighlighterTest.java index dba81809474..46050f9e39b 100755 --- a/solr/src/test/org/apache/solr/highlight/HighlighterTest.java +++ b/solr/src/test/org/apache/solr/highlight/HighlighterTest.java @@ -769,4 +769,30 @@ public class HighlighterTest extends SolrTestCaseJ4 { ); } + + public void testSubwordWildcardHighlight() { + assertU(adoc("subword", "lorem PowerShot.com ipsum", "id", "1")); + assertU(commit()); + assertQ("subword wildcard highlighting", + req("q", "subword:pow*", "hl", "true", "hl.fl", "subword"), + "//lst[@name='highlighting']/lst[@name='1']" + + "/arr[@name='subword']/str='lorem PowerShot.com ipsum'"); + } + + public void testSubwordWildcardHighlightWithTermOffsets() { + assertU(adoc("subword_offsets", "lorem PowerShot.com ipsum", "id", "1")); + assertU(commit()); + assertQ("subword wildcard highlighting", + req("q", "subword_offsets:pow*", "hl", "true", "hl.fl", "subword_offsets"), + "//lst[@name='highlighting']/lst[@name='1']" + + "/arr[@name='subword_offsets']/str='lorem PowerShot.com ipsum'"); + } + public void testSubwordWildcardHighlightWithTermOffsets2() { + assertU(adoc("subword_offsets", "lorem PowerShot ipsum", "id", "1")); + assertU(commit()); + assertQ("subword wildcard highlighting", + req("q", "subword_offsets:pow*", "hl", "true", "hl.fl", "subword_offsets"), + "//lst[@name='highlighting']/lst[@name='1']" + + "/arr[@name='subword_offsets']/str='lorem PowerShot ipsum'"); + } } From 9e51a873f6cde6e0ed03d443647ea163ee75e4ef Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Wed, 26 Jan 2011 15:47:28 +0000 Subject: [PATCH 016/185] LUCENE-2889: Remove @lucene.experimental from Numeric* git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063762 13f79535-47bb-0310-9956-ffa450edef68 --- .../src/java/org/apache/lucene/analysis/NumericTokenStream.java | 2 -- lucene/src/java/org/apache/lucene/document/NumericField.java | 2 -- .../src/java/org/apache/lucene/search/NumericRangeFilter.java | 2 -- lucene/src/java/org/apache/lucene/search/NumericRangeQuery.java | 2 -- lucene/src/java/org/apache/lucene/util/NumericUtils.java | 2 -- 5 files changed, 10 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java b/lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java index 018903c0892..502c3a214ac 100644 --- a/lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java +++ b/lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java @@ -84,8 +84,6 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; * href="../search/NumericRangeQuery.html#precisionStepDesc">precisionStep * parameter as well as how numeric fields work under the hood.

* - * @lucene.experimental - * * @since 2.9 */ public final class NumericTokenStream extends TokenStream { diff --git a/lucene/src/java/org/apache/lucene/document/NumericField.java b/lucene/src/java/org/apache/lucene/document/NumericField.java index b1ccf52c8bc..4d008e0169f 100644 --- a/lucene/src/java/org/apache/lucene/document/NumericField.java +++ b/lucene/src/java/org/apache/lucene/document/NumericField.java @@ -134,8 +134,6 @@ import org.apache.lucene.search.FieldCache; // javadocs * values are returned as {@link String}s (according to * toString(value) of the used data type). * - * @lucene.experimental - * * @since 2.9 */ public final class NumericField extends AbstractField { diff --git a/lucene/src/java/org/apache/lucene/search/NumericRangeFilter.java b/lucene/src/java/org/apache/lucene/search/NumericRangeFilter.java index 69ba4ace02e..f3a2dc1a66c 100644 --- a/lucene/src/java/org/apache/lucene/search/NumericRangeFilter.java +++ b/lucene/src/java/org/apache/lucene/search/NumericRangeFilter.java @@ -39,8 +39,6 @@ import org.apache.lucene.util.NumericUtils; // for javadocs * See {@link NumericRangeQuery} for details on how Lucene * indexes and searches numeric valued fields. * - * @lucene.experimental - * * @since 2.9 **/ public final class NumericRangeFilter extends MultiTermQueryWrapperFilter> { diff --git a/lucene/src/java/org/apache/lucene/search/NumericRangeQuery.java b/lucene/src/java/org/apache/lucene/search/NumericRangeQuery.java index 2cba90ad0bd..1daa453383c 100644 --- a/lucene/src/java/org/apache/lucene/search/NumericRangeQuery.java +++ b/lucene/src/java/org/apache/lucene/search/NumericRangeQuery.java @@ -76,8 +76,6 @@ import org.apache.lucene.index.TermsEnum; * BooleanQuery rewrite methods without changing * BooleanQuery's default max clause count. * - * @lucene.experimental - * *

How it works

* *

See the publication about panFMP, diff --git a/lucene/src/java/org/apache/lucene/util/NumericUtils.java b/lucene/src/java/org/apache/lucene/util/NumericUtils.java index 0a08f95e60d..232461ddcf5 100644 --- a/lucene/src/java/org/apache/lucene/util/NumericUtils.java +++ b/lucene/src/java/org/apache/lucene/util/NumericUtils.java @@ -22,8 +22,6 @@ import org.apache.lucene.document.NumericField; import org.apache.lucene.search.NumericRangeFilter; import org.apache.lucene.search.NumericRangeQuery; // for javadocs -// TODO: Remove the commented out methods before release! - /** * This is a helper class to generate prefix-encoded representations for numerical values * and supplies converters to represent float/double values as sortable integers/longs. From c40a12dd46b4e87751cdc09e6a8b25cdbea0104e Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Wed, 26 Jan 2011 19:18:48 +0000 Subject: [PATCH 017/185] jdocs: note that calling close(false) can hit MergeAbortedExc's in optimize, addIndexes(IndexReader[]), expungeDeletes git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063837 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/lucene/index/IndexWriter.java | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java index b7573b5b4e4..eb3dce99f00 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java @@ -1520,6 +1520,11 @@ public class IndexWriter implements Closeable { * you should immediately close the writer. See above for details.

* + *

NOTE: if you call {@link #close(boolean)} + * with false, which aborts all running merges, + * then any thread still running this method might hit a + * {@link MergePolicy.MergeAbortedException}. + * * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error * @see MergePolicy#findMergesForOptimize @@ -1669,6 +1674,11 @@ public class IndexWriter implements Closeable { *

NOTE: if this method hits an OutOfMemoryError * you should immediately close the writer. See above for details.

+ * + *

NOTE: if you call {@link #close(boolean)} + * with false, which aborts all running merges, + * then any thread still running this method might hit a + * {@link MergePolicy.MergeAbortedException}. */ public void expungeDeletes(boolean doWait) throws CorruptIndexException, IOException { @@ -1939,8 +1949,9 @@ public class IndexWriter implements Closeable { * *

NOTE: this method will forcefully abort all merges * in progress. If other threads are running {@link - * #optimize()} or any of the addIndexes methods, they - * will receive {@link MergePolicy.MergeAbortedException}s. + * #optimize()}, {@link #addIndexes(IndexReader[])} or + * {@link #expungeDeletes} methods, they may receive + * {@link MergePolicy.MergeAbortedException}s. */ public synchronized void deleteAll() throws IOException { try { @@ -2220,6 +2231,11 @@ public class IndexWriter implements Closeable { * you should immediately close the writer. See above for details.

* + *

NOTE: if you call {@link #close(boolean)} + * with false, which aborts all running merges, + * then any thread still running this method might hit a + * {@link MergePolicy.MergeAbortedException}. + * * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ From 3da5bd87288c88b5cae6d1984863d92c59eea864 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Wed, 26 Jan 2011 19:25:50 +0000 Subject: [PATCH 018/185] remove dead code git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063842 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/src/java/org/apache/lucene/index/IndexWriter.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java index eb3dce99f00..d4fd1a0f630 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java @@ -215,7 +215,6 @@ public class IndexWriter implements Closeable { private long lastCommitChangeCount; // last changeCount that was committed private SegmentInfos rollbackSegmentInfos; // segmentInfos we will fallback to if the commit fails - private HashMap rollbackSegments; volatile SegmentInfos pendingCommit; // set when a commit is pending (after prepareCommit() & before commit()) volatile long pendingCommitChangeCount; @@ -860,10 +859,6 @@ public class IndexWriter implements Closeable { private synchronized void setRollbackSegmentInfos(SegmentInfos infos) { rollbackSegmentInfos = (SegmentInfos) infos.clone(); - rollbackSegments = new HashMap(); - final int size = rollbackSegmentInfos.size(); - for(int i=0;i Date: Wed, 26 Jan 2011 20:55:49 +0000 Subject: [PATCH 019/185] LUCENE-2609: distribute the core tests, as they can be useful downstream to people git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063868 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/build.xml | 6 ++++-- lucene/common-build.xml | 4 ++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/lucene/build.xml b/lucene/build.xml index 31b3b2e3011..7827695aa9d 100644 --- a/lucene/build.xml +++ b/lucene/build.xml @@ -48,7 +48,7 @@ excludes="contrib/db/*/lib/,contrib/*/ext-libs/,src/site/build/" /> - + @@ -401,6 +401,8 @@ classifier="sources"/> + diff --git a/lucene/common-build.xml b/lucene/common-build.xml index be42d5bd648..fe93b40b62b 100644 --- a/lucene/common-build.xml +++ b/lucene/common-build.xml @@ -308,6 +308,10 @@ + + + + ################################################################## From 326ab7d5774b57a001db2f64f4df9d0ae6a84a5a Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Wed, 26 Jan 2011 20:57:05 +0000 Subject: [PATCH 020/185] SOLR-1711: fix hang when queue is full but there are no runners git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063869 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 3 +- .../solrj/impl/StreamingUpdateSolrServer.java | 30 ++++++++++++------- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 683a722329b..a4308254de6 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -516,7 +516,8 @@ Bug Fixes * SOLR-1711: SolrJ - StreamingUpdateSolrServer had a race condition that could halt the streaming of documents. The original patch to fix this (never officially released) introduced another hanging bug due to - connections not being released. (Attila Babo, Erik Hetzner via yonik) + connections not being released. + (Attila Babo, Erik Hetzner, Johannes Tuchscherer via yonik) * SOLR-1748, SOLR-1747, SOLR-1746, SOLR-1745, SOLR-1744: Streams and Readers retrieved from ContentStreams are not closed in various places, resulting diff --git a/solr/src/solrj/org/apache/solr/client/solrj/impl/StreamingUpdateSolrServer.java b/solr/src/solrj/org/apache/solr/client/solrj/impl/StreamingUpdateSolrServer.java index 4460dfb2ce0..c47f4a09957 100644 --- a/solr/src/solrj/org/apache/solr/client/solrj/impl/StreamingUpdateSolrServer.java +++ b/solr/src/solrj/org/apache/solr/client/solrj/impl/StreamingUpdateSolrServer.java @@ -173,12 +173,20 @@ public class StreamingUpdateSolrServer extends CommonsHttpSolrServer } catch (Throwable e) { handleError( e ); - } + } finally { - // remove it from the list of running things... + + // remove it from the list of running things unless we are the last runner and the queue is full... + // in which case, the next queue.put() would block and there would be no runners to handle it. synchronized (runners) { - runners.remove( this ); + if (runners.size() == 1 && queue.remainingCapacity() == 0) { + // keep this runner alive + scheduler.execute(this); + } else { + runners.remove( this ); + } } + log.info( "finished: {}" , this ); runnerLock.unlock(); } @@ -208,7 +216,7 @@ public class StreamingUpdateSolrServer extends CommonsHttpSolrServer return super.request( request ); } } - + try { CountDownLatch tmpLock = lock; if( tmpLock != null ) { @@ -216,18 +224,18 @@ public class StreamingUpdateSolrServer extends CommonsHttpSolrServer } queue.put( req ); - - synchronized( runners ) { - if( runners.isEmpty() - || (queue.remainingCapacity() < queue.size() - && runners.size() < threadCount) ) - { + + synchronized( runners ) { + if( runners.isEmpty() + || (queue.remainingCapacity() < queue.size() + && runners.size() < threadCount) ) + { Runner r = new Runner(); scheduler.execute( r ); runners.add( r ); } } - } + } catch (InterruptedException e) { log.error( "interrupted", e ); throw new IOException( e.getLocalizedMessage() ); From 471c0ced8288326cf7b4d5cd0468d309b25bccf3 Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Wed, 26 Jan 2011 21:39:42 +0000 Subject: [PATCH 021/185] SOLR-2327: error handling - force queryResultWindowSize to a min of 1 git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063877 13f79535-47bb-0310-9956-ffa450edef68 --- solr/src/java/org/apache/solr/core/SolrConfig.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solr/src/java/org/apache/solr/core/SolrConfig.java b/solr/src/java/org/apache/solr/core/SolrConfig.java index f8421945762..e0cee0ccb08 100644 --- a/solr/src/java/org/apache/solr/core/SolrConfig.java +++ b/solr/src/java/org/apache/solr/core/SolrConfig.java @@ -141,7 +141,7 @@ public class SolrConfig extends Config { filtOptThreshold = getFloat("query/boolTofilterOptimizer/@threshold",.05f); useFilterForSortedQuery = getBool("query/useFilterForSortedQuery", false); - queryResultWindowSize = getInt("query/queryResultWindowSize", 1); + queryResultWindowSize = Math.max(1, getInt("query/queryResultWindowSize", 1)); queryResultMaxDocsCached = getInt("query/queryResultMaxDocsCached", Integer.MAX_VALUE); enableLazyFieldLoading = getBool("query/enableLazyFieldLoading", false); From e54599568d77c1f66e09ea8a416c7ecedd56d074 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Wed, 26 Jan 2011 21:55:37 +0000 Subject: [PATCH 022/185] LUCENE-2010: don't assert no unref'd files in TIR.testDiskFull; fix rollback on exc during commit to put back any pruned segs git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063882 13f79535-47bb-0310-9956-ffa450edef68 --- .../java/org/apache/lucene/index/DirectoryReader.java | 10 +++++++++- .../test/org/apache/lucene/index/TestIndexReader.java | 9 --------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java index aa372be4b66..0009a5f9322 100644 --- a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java +++ b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java @@ -715,12 +715,16 @@ class DirectoryReader extends IndexReader implements Cloneable { // case we have to roll back: startCommit(); + final SegmentInfos rollbackSegmentInfos = new SegmentInfos(); + rollbackSegmentInfos.addAll(segmentInfos); + boolean success = false; try { for (int i = 0; i < subReaders.length; i++) subReaders[i].commit(); - // Remove segments that contain only 100% deleted docs: + // Remove segments that contain only 100% deleted + // docs: segmentInfos.pruneDeletedSegments(); // Sync all files we just wrote @@ -742,6 +746,10 @@ class DirectoryReader extends IndexReader implements Cloneable { // partially written .del files, etc, are // removed): deleter.refresh(); + + // Restore all SegmentInfos (in case we pruned some) + segmentInfos.clear(); + segmentInfos.addAll(rollbackSegmentInfos); } } diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReader.java b/lucene/src/test/org/apache/lucene/index/TestIndexReader.java index 01b73877385..1256cbaff98 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexReader.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexReader.java @@ -996,15 +996,6 @@ public class TestIndexReader extends LuceneTestCase } } - // Whether we succeeded or failed, check that all - // un-referenced files were in fact deleted (ie, - // we did not create garbage). Just create a - // new IndexFileDeleter, have it delete - // unreferenced files, then verify that in fact - // no files were deleted: - IndexWriter.unlock(dir); - TestIndexWriter.assertNoUnreferencedFiles(dir, "reader.close() failed to delete unreferenced files"); - // Finally, verify index is not corrupt, and, if // we succeeded, we see all docs changed, and if // we failed, we see either all docs or no docs From 63097d1bd8a7051f9b5b2f450eaab6e162e2b337 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Wed, 26 Jan 2011 22:17:57 +0000 Subject: [PATCH 023/185] LUCENE-2474: cutover to MapBackedSet(ConcurrentHashMap) instead of Collections.syncSet(HashSet) git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063897 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/lucene/index/DirectoryReader.java | 4 +- .../lucene/index/FilterIndexReader.java | 6 +- .../org/apache/lucene/index/IndexReader.java | 6 +- .../org/apache/lucene/index/IndexWriter.java | 5 +- .../org/apache/lucene/index/MultiReader.java | 6 +- .../apache/lucene/index/ParallelReader.java | 4 +- .../org/apache/lucene/util/MapBackedSet.java | 73 +++++++++++++++++++ 7 files changed, 89 insertions(+), 15 deletions(-) create mode 100644 lucene/src/java/org/apache/lucene/util/MapBackedSet.java diff --git a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java index 0009a5f9322..06c4d4009be 100644 --- a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java +++ b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java @@ -27,6 +27,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldSelector; @@ -36,6 +37,7 @@ import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.MapBackedSet; /** * An IndexReader which reads indexes with multiple segments. @@ -104,7 +106,7 @@ class DirectoryReader extends IndexReader implements Cloneable { } else { this.codecs = codecs; } - readerFinishedListeners = Collections.synchronizedSet(new HashSet()); + readerFinishedListeners = new MapBackedSet(new ConcurrentHashMap()); // To reduce the chance of hitting FileNotFound // (and having to retry), we open segments in diff --git a/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java b/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java index 6dc2f48227e..d922a48da7e 100644 --- a/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java +++ b/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java @@ -23,13 +23,13 @@ import org.apache.lucene.index.IndexReader.ReaderContext; import org.apache.lucene.store.Directory; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.MapBackedSet; import java.io.IOException; import java.util.Collection; import java.util.Map; import java.util.Comparator; -import java.util.HashSet; -import java.util.Collections; +import java.util.concurrent.ConcurrentHashMap; /** A FilterIndexReader contains another IndexReader, which it * uses as its basic source of data, possibly transforming the data along the @@ -287,7 +287,7 @@ public class FilterIndexReader extends IndexReader { public FilterIndexReader(IndexReader in) { super(); this.in = in; - readerFinishedListeners = Collections.synchronizedSet(new HashSet()); + readerFinishedListeners = new MapBackedSet(new ConcurrentHashMap()); } @Override diff --git a/lucene/src/java/org/apache/lucene/index/IndexReader.java b/lucene/src/java/org/apache/lucene/index/IndexReader.java index 7688884bb19..684c14e628b 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexReader.java +++ b/lucene/src/java/org/apache/lucene/index/IndexReader.java @@ -34,7 +34,6 @@ import java.io.IOException; import java.io.Closeable; import java.util.Collection; import java.util.List; -import java.util.HashSet; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; @@ -128,10 +127,7 @@ public abstract class IndexReader implements Cloneable,Closeable { // Defensive (should never be null -- all impls must set // this): if (readerFinishedListeners != null) { - - // Clone the set so that we don't have to sync on - // readerFinishedListeners while invoking them: - for(ReaderFinishedListener listener : new HashSet(readerFinishedListeners)) { + for(ReaderFinishedListener listener : readerFinishedListeners) { listener.finished(this); } } diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java index d4fd1a0f630..321daa0aa2b 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java @@ -30,8 +30,8 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.Collections; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.ConcurrentHashMap; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.Document; @@ -48,6 +48,7 @@ import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.Bits; import org.apache.lucene.util.Constants; import org.apache.lucene.util.ThreadInterruptedException; +import org.apache.lucene.util.MapBackedSet; /** An IndexWriter creates and maintains an index. @@ -366,7 +367,7 @@ public class IndexWriter implements Closeable { } // Used for all SegmentReaders we open - private final Collection readerFinishedListeners = Collections.synchronizedSet(new HashSet()); + private final Collection readerFinishedListeners = new MapBackedSet(new ConcurrentHashMap()); Collection getReaderFinishedListeners() throws IOException { return readerFinishedListeners; diff --git a/lucene/src/java/org/apache/lucene/index/MultiReader.java b/lucene/src/java/org/apache/lucene/index/MultiReader.java index 1e95cb272d9..0d3a082567b 100644 --- a/lucene/src/java/org/apache/lucene/index/MultiReader.java +++ b/lucene/src/java/org/apache/lucene/index/MultiReader.java @@ -20,14 +20,14 @@ package org.apache.lucene.index; import java.io.IOException; import java.util.Collection; import java.util.Map; -import java.util.HashSet; -import java.util.Collections; +import java.util.concurrent.ConcurrentHashMap; import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldSelector; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.ReaderUtil; +import org.apache.lucene.util.MapBackedSet; /** An IndexReader which reads multiple indexes, appending * their content. */ @@ -83,7 +83,7 @@ public class MultiReader extends IndexReader implements Cloneable { } } starts[subReaders.length] = maxDoc; - readerFinishedListeners = Collections.synchronizedSet(new HashSet()); + readerFinishedListeners = new MapBackedSet(new ConcurrentHashMap()); return ReaderUtil.buildReaderContext(this); } diff --git a/lucene/src/java/org/apache/lucene/index/ParallelReader.java b/lucene/src/java/org/apache/lucene/index/ParallelReader.java index 8b789e02058..004066c4daa 100644 --- a/lucene/src/java/org/apache/lucene/index/ParallelReader.java +++ b/lucene/src/java/org/apache/lucene/index/ParallelReader.java @@ -23,9 +23,11 @@ import org.apache.lucene.document.FieldSelectorResult; import org.apache.lucene.document.Fieldable; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.MapBackedSet; import java.io.IOException; import java.util.*; +import java.util.concurrent.ConcurrentHashMap; /** An IndexReader which reads multiple, parallel indexes. Each index added @@ -72,7 +74,7 @@ public class ParallelReader extends IndexReader { public ParallelReader(boolean closeSubReaders) throws IOException { super(); this.incRefReaders = !closeSubReaders; - readerFinishedListeners = Collections.synchronizedSet(new HashSet()); + readerFinishedListeners = new MapBackedSet(new ConcurrentHashMap()); } /** {@inheritDoc} */ diff --git a/lucene/src/java/org/apache/lucene/util/MapBackedSet.java b/lucene/src/java/org/apache/lucene/util/MapBackedSet.java new file mode 100644 index 00000000000..7b0c42c8ae3 --- /dev/null +++ b/lucene/src/java/org/apache/lucene/util/MapBackedSet.java @@ -0,0 +1,73 @@ +package org.apache.lucene.util; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.Serializable; +import java.util.AbstractSet; +import java.util.Iterator; +import java.util.Map; + +/** + * A Set implementation that wraps an actual Map based + * implementation. + * + * @lucene.internal + */ +public class MapBackedSet extends AbstractSet implements Serializable { + + private static final long serialVersionUID = -6761513279741915432L; + + private final Map map; + + /** + * Creates a new instance which wraps the specified {@code map}. + */ + public MapBackedSet(Map map) { + this.map = map; + } + + @Override + public int size() { + return map.size(); + } + + @Override + public boolean contains(Object o) { + return map.containsKey(o); + } + + @Override + public boolean add(E o) { + return map.put(o, Boolean.TRUE) == null; + } + + @Override + public boolean remove(Object o) { + return map.remove(o) != null; + } + + @Override + public void clear() { + map.clear(); + } + + @Override + public Iterator iterator() { + return map.keySet().iterator(); + } +} From add8aecd99b03e88ceabd4bb5579cce652c4bc45 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Wed, 26 Jan 2011 22:42:08 +0000 Subject: [PATCH 024/185] LUCENE-2474: make MBS final git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063908 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/src/java/org/apache/lucene/util/MapBackedSet.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucene/src/java/org/apache/lucene/util/MapBackedSet.java b/lucene/src/java/org/apache/lucene/util/MapBackedSet.java index 7b0c42c8ae3..9db05ec86ba 100644 --- a/lucene/src/java/org/apache/lucene/util/MapBackedSet.java +++ b/lucene/src/java/org/apache/lucene/util/MapBackedSet.java @@ -28,7 +28,7 @@ import java.util.Map; * * @lucene.internal */ -public class MapBackedSet extends AbstractSet implements Serializable { +public final class MapBackedSet extends AbstractSet implements Serializable { private static final long serialVersionUID = -6761513279741915432L; From 51dc4159e6c4ed708cbcf8d18a543b57beb0037f Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Wed, 26 Jan 2011 23:40:08 +0000 Subject: [PATCH 025/185] SOLR-1283: fix numRead counter that caused mark invalid exceptions git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063920 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/lucene/analysis/charfilter/HTMLStripCharFilter.java | 1 + .../lucene/analysis/charfilter/HTMLStripCharFilterTest.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java index 4ab01ab0d32..87591992e1f 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java @@ -101,6 +101,7 @@ public class HTMLStripCharFilter extends BaseCharFilter { if (len>0) { return pushed.charAt(len-1); } + numRead++; int ch = input.read(); push(ch); return ch; diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java index 604f9668d53..f1af45ab350 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java @@ -169,7 +169,7 @@ public class HTMLStripCharFilterTest extends LuceneTestCase { public void testBufferOverflow() throws Exception { StringBuilder testBuilder = new StringBuilder(HTMLStripCharFilter.DEFAULT_READ_AHEAD + 50); - testBuilder.append("ah "); + testBuilder.append("ah ??????"); appendChars(testBuilder, HTMLStripCharFilter.DEFAULT_READ_AHEAD + 500); processBuffer(testBuilder.toString(), "Failed on pseudo proc. instr.");//processing instructions From 5a9c5aae0b54235ecac85818d2e6dfa06c088df4 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Thu, 27 Jan 2011 00:42:58 +0000 Subject: [PATCH 026/185] LUCENE-2680: deletes were being double-applied git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1063936 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/src/java/org/apache/lucene/index/IndexWriter.java | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java index 321daa0aa2b..613d47058f5 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java @@ -2895,10 +2895,6 @@ public class IndexWriter implements Closeable { final synchronized void mergeInit(MergePolicy.OneMerge merge) throws IOException { boolean success = false; try { - // Lock order: IW -> BD - if (bufferedDeletes.applyDeletes(readerPool, segmentInfos, merge.segments)) { - checkpoint(); - } _mergeInit(merge); success = true; } finally { @@ -2929,6 +2925,11 @@ public class IndexWriter implements Closeable { if (merge.isAborted()) return; + // Lock order: IW -> BD + if (bufferedDeletes.applyDeletes(readerPool, segmentInfos, merge.segments)) { + checkpoint(); + } + // Bind a new segment name here so even with // ConcurrentMergePolicy we keep deterministic segment // names. From ecea5e669a150a3d0171f75f53e0e8ad1a8dbb84 Mon Sep 17 00:00:00 2001 From: Doron Cohen Date: Thu, 27 Jan 2011 09:26:04 +0000 Subject: [PATCH 027/185] LUCENE-914: Scorer.skipTo(current) remains on current for some scorers - javadoc fix. git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064051 13f79535-47bb-0310-9956-ffa450edef68 --- .../src/java/org/apache/lucene/search/DocIdSetIterator.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/search/DocIdSetIterator.java b/lucene/src/java/org/apache/lucene/search/DocIdSetIterator.java index f10d04c0d48..39a73345f9b 100644 --- a/lucene/src/java/org/apache/lucene/search/DocIdSetIterator.java +++ b/lucene/src/java/org/apache/lucene/search/DocIdSetIterator.java @@ -78,10 +78,10 @@ public abstract class DocIdSetIterator { * * Some implementations are considerably more efficient than that. *

- * NOTE: certain implementations may return a different value (each - * time) if called several times in a row with the same target. + * NOTE: when target ≤ current implementations may opt + * not to advance beyond their current {@link #docID()}. *

- * NOTE: this method may be called with {@value #NO_MORE_DOCS} for + * NOTE: this method may be called with {@link #NO_MORE_DOCS} for * efficiency by some Scorers. If your implementation cannot efficiently * determine that it should exhaust, it is recommended that you check for that * value in each call to this method. From b24a26b251f0498d56312f7e0d62edc5678ae929 Mon Sep 17 00:00:00 2001 From: Shai Erera Date: Thu, 27 Jan 2011 10:10:36 +0000 Subject: [PATCH 028/185] LUCENE-2609: Generate jar containing test classes (trunk) git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064068 13f79535-47bb-0310-9956-ffa450edef68 --- dev-tools/testjar/testfiles | 24 ++++++++++++++++++++++++ lucene/build.xml | 12 ++++++++++++ lucene/common-build.xml | 4 ---- 3 files changed, 36 insertions(+), 4 deletions(-) create mode 100755 dev-tools/testjar/testfiles diff --git a/dev-tools/testjar/testfiles b/dev-tools/testjar/testfiles new file mode 100755 index 00000000000..84d8bfb2eab --- /dev/null +++ b/dev-tools/testjar/testfiles @@ -0,0 +1,24 @@ +core.test.files=\ + org/apache/lucene/util/_TestUtil.java,\ + org/apache/lucene/util/LineFileDocs.java,\ + org/apache/lucene/util/LuceneJUnitDividingSelector.java,\ + org/apache/lucene/util/LuceneJUnitResultFormatter.java,\ + org/apache/lucene/util/LuceneTestCase.java,\ + org/apache/lucene/util/automaton/AutomatonTestUtil.java,\ + org/apache/lucene/search/QueryUtils.java,\ + org/apache/lucene/analysis/BaseTokenStreamTestCase.java,\ + org/apache/lucene/analysis/MockAnalyzer.java,\ + org/apache/lucene/analysis/MockPayloadAnalyzer.java,\ + org/apache/lucene/analysis/MockTokenFilter.java,\ + org/apache/lucene/analysis/MockTokenizer.java,\ + org/apache/lucene/index/MockIndexInput.java,\ + org/apache/lucene/index/RandomIndexWriter.java,\ + org/apache/lucene/index/DocHelper.java,\ + org/apache/lucene/codecs/preflexrw/PreFlexFieldsWriter.java,\ + org/apache/lucene/codecs/preflexrw/PreFlexRWCodec.java,\ + org/apache/lucene/codecs/preflexrw/TermInfosWriter.java,\ + org/apache/lucene/codecs/mockrandom/MockRandomCodec.java,\ + org/apache/lucene/store/_TestHelper.java,\ + org/apache/lucene/store/MockDirectoryWrapper.java,\ + org/apache/lucene/store/MockIndexInputWrapper.java,\ + org/apache/lucene/store/MockIndexOutputWrapper.java,\ diff --git a/lucene/build.xml b/lucene/build.xml index 7827695aa9d..4205d3c756d 100644 --- a/lucene/build.xml +++ b/lucene/build.xml @@ -618,4 +618,16 @@ + + + + + + + + + + + diff --git a/lucene/common-build.xml b/lucene/common-build.xml index fe93b40b62b..be42d5bd648 100644 --- a/lucene/common-build.xml +++ b/lucene/common-build.xml @@ -308,10 +308,6 @@ - - - - ################################################################## From ad24f6a01fef7178dca09c7862acd134ea147a05 Mon Sep 17 00:00:00 2001 From: Shai Erera Date: Thu, 27 Jan 2011 10:28:23 +0000 Subject: [PATCH 029/185] LUCENE-1469: make isValid protected and not static git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064072 13f79535-47bb-0310-9956-ffa450edef68 --- .../src/java/org/apache/lucene/wordnet/SynonymMap.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java b/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java index 455c8118c5a..ee7eabd9cae 100644 --- a/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java +++ b/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java @@ -161,7 +161,7 @@ public class SynonymMap { return word.toLowerCase(); } - private static boolean isValid(String str) { + protected boolean isValid(String str) { for (int i=str.length(); --i >= 0; ) { if (!Character.isLetter(str.charAt(i))) return false; } @@ -395,4 +395,4 @@ public class SynonymMap { } } -} \ No newline at end of file +} From 4aa8a1f179d347b040d4b3fb541672254da88cf5 Mon Sep 17 00:00:00 2001 From: Shai Erera Date: Thu, 27 Jan 2011 11:10:48 +0000 Subject: [PATCH 030/185] remove FilterManager git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064078 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/lucene/search/FilterManager.java | 203 ------------------ 1 file changed, 203 deletions(-) delete mode 100644 lucene/src/java/org/apache/lucene/search/FilterManager.java diff --git a/lucene/src/java/org/apache/lucene/search/FilterManager.java b/lucene/src/java/org/apache/lucene/search/FilterManager.java deleted file mode 100644 index 608f243890b..00000000000 --- a/lucene/src/java/org/apache/lucene/search/FilterManager.java +++ /dev/null @@ -1,203 +0,0 @@ -package org.apache.lucene.search; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.util.Comparator; -import java.util.Date; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.TreeSet; - -import org.apache.lucene.util.ThreadInterruptedException; - -/** - * Filter caching singleton. It can be used - * to save filters locally for reuse. - * This class makes it possible to cache Filters even when using RMI, as it - * keeps the cache on the searcher side of the RMI connection. - * - * Also could be used as a persistent storage for any filter as long as the - * filter provides a proper hashCode(), as that is used as the key in the cache. - * - * The cache is periodically cleaned up from a separate thread to ensure the - * cache doesn't exceed the maximum size. - */ -public class FilterManager { - - protected static FilterManager manager; - - /** The default maximum number of Filters in the cache */ - protected static final int DEFAULT_CACHE_CLEAN_SIZE = 100; - /** The default frequency of cache cleanup */ - protected static final long DEFAULT_CACHE_SLEEP_TIME = 1000 * 60 * 10; - - /** The cache itself */ - protected Map cache; - /** Maximum allowed cache size */ - protected int cacheCleanSize; - /** Cache cleaning frequency */ - protected long cleanSleepTime; - /** Cache cleaner that runs in a separate thread */ - protected FilterCleaner filterCleaner; - - public synchronized static FilterManager getInstance() { - if (manager == null) { - manager = new FilterManager(); - } - return manager; - } - - /** - * Sets up the FilterManager singleton. - */ - protected FilterManager() { - cache = new HashMap(); - cacheCleanSize = DEFAULT_CACHE_CLEAN_SIZE; // Let the cache get to 100 items - cleanSleepTime = DEFAULT_CACHE_SLEEP_TIME; // 10 minutes between cleanings - - filterCleaner = new FilterCleaner(); - Thread fcThread = new Thread(filterCleaner); - // set to be a Daemon so it doesn't have to be stopped - fcThread.setDaemon(true); - fcThread.start(); - } - - /** - * Sets the max size that cache should reach before it is cleaned up - * @param cacheCleanSize maximum allowed cache size - */ - public void setCacheSize(int cacheCleanSize) { - this.cacheCleanSize = cacheCleanSize; - } - - /** - * Sets the cache cleaning frequency in milliseconds. - * @param cleanSleepTime cleaning frequency in milliseconds - */ - public void setCleanThreadSleepTime(long cleanSleepTime) { - this.cleanSleepTime = cleanSleepTime; - } - - /** - * Returns the cached version of the filter. Allows the caller to pass up - * a small filter but this will keep a persistent version around and allow - * the caching filter to do its job. - * - * @param filter The input filter - * @return The cached version of the filter - */ - public Filter getFilter(Filter filter) { - synchronized(cache) { - FilterItem fi = null; - fi = cache.get(Integer.valueOf(filter.hashCode())); - if (fi != null) { - fi.timestamp = new Date().getTime(); - return fi.filter; - } - cache.put(Integer.valueOf(filter.hashCode()), new FilterItem(filter)); - return filter; - } - } - - /** - * Holds the filter and the last time the filter was used, to make LRU-based - * cache cleaning possible. - * TODO: Clean this up when we switch to Java 1.5 - */ - protected class FilterItem { - public Filter filter; - public long timestamp; - - public FilterItem (Filter filter) { - this.filter = filter; - this.timestamp = new Date().getTime(); - } - } - - - /** - * Keeps the cache from getting too big. - * If we were using Java 1.5, we could use LinkedHashMap and we would not need this thread - * to clean out the cache. - * - * The SortedSet sortedFilterItems is used only to sort the items from the cache, - * so when it's time to clean up we have the TreeSet sort the FilterItems by - * timestamp. - * - * Removes 1.5 * the numbers of items to make the cache smaller. - * For example: - * If cache clean size is 10, and the cache is at 15, we would remove (15 - 10) * 1.5 = 7.5 round up to 8. - * This way we clean the cache a bit more, and avoid having the cache cleaner having to do it frequently. - */ - protected class FilterCleaner implements Runnable { - - private boolean running = true; - private TreeSet> sortedFilterItems; - - public FilterCleaner() { - sortedFilterItems = new TreeSet>(new Comparator>() { - public int compare(Map.Entry a, Map.Entry b) { - FilterItem fia = a.getValue(); - FilterItem fib = b.getValue(); - if ( fia.timestamp == fib.timestamp ) { - return 0; - } - // smaller timestamp first - if ( fia.timestamp < fib.timestamp ) { - return -1; - } - // larger timestamp last - return 1; - - } - }); - } - - public void run () { - while (running) { - - // sort items from oldest to newest - // we delete the oldest filters - if (cache.size() > cacheCleanSize) { - // empty the temporary set - sortedFilterItems.clear(); - synchronized (cache) { - sortedFilterItems.addAll(cache.entrySet()); - Iterator> it = sortedFilterItems.iterator(); - int numToDelete = (int) ((cache.size() - cacheCleanSize) * 1.5); - int counter = 0; - // loop over the set and delete all of the cache entries not used in a while - while (it.hasNext() && counter++ < numToDelete) { - Map.Entry entry = it.next(); - cache.remove(entry.getKey()); - } - } - // empty the set so we don't tie up the memory - sortedFilterItems.clear(); - } - // take a nap - try { - Thread.sleep(cleanSleepTime); - } catch (InterruptedException ie) { - throw new ThreadInterruptedException(ie); - } - } - } - } -} From 4c62240087b896ec5dde2383300c4a3d396d7693 Mon Sep 17 00:00:00 2001 From: Shai Erera Date: Thu, 27 Jan 2011 20:07:43 +0000 Subject: [PATCH 031/185] LUCENE-2891: merge to trunk git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064285 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/CHANGES.txt | 3 ++ .../org/apache/lucene/index/IndexReader.java | 5 ++- .../lucene/index/IndexWriterConfig.java | 9 +++-- .../lucene/index/TestIndexWriterConfig.java | 17 +++++++++ .../lucene/index/TestIndexWriterReader.java | 35 +++++++++++++++++++ 5 files changed, 65 insertions(+), 4 deletions(-) diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index d131a02b8c6..e859ecd042c 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -701,6 +701,9 @@ Bug fixes * LUCENE-1846: DateTools now uses the US locale everywhere, so DateTools.round() is safe also in strange locales. (Uwe Schindler) +* LUCENE-2891: IndexWriterConfig did not accept -1 in setReaderTermIndexDivisor, + which can be used to prevent loading the terms index into memory. (Shai Erera) + New features * LUCENE-2128: Parallelized fetching document frequencies during weight diff --git a/lucene/src/java/org/apache/lucene/index/IndexReader.java b/lucene/src/java/org/apache/lucene/index/IndexReader.java index 684c14e628b..c73c514edf4 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexReader.java +++ b/lucene/src/java/org/apache/lucene/index/IndexReader.java @@ -415,7 +415,10 @@ public abstract class IndexReader implements Cloneable,Closeable { * memory. By setting this to a value > 1 you can reduce * memory usage, at the expense of higher latency when * loading a TermInfo. The default value is 1. Set this - * to -1 to skip loading the terms index entirely. + * to -1 to skip loading the terms index entirely. This is only useful in + * advanced situations when you will only .next() through all terms; + * attempts to seek will hit an exception. + * * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java b/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java index 18daa12e06c..812306cf4e8 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java +++ b/lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java @@ -552,10 +552,13 @@ public final class IndexWriterConfig implements Cloneable { /** Sets the termsIndexDivisor passed to any readers that * IndexWriter opens, for example when applying deletes * or creating a near-real-time reader in {@link - * IndexWriter#getReader}. */ + * IndexWriter#getReader}. If you pass -1, the terms index + * won't be loaded by the readers. This is only useful in + * advanced situations when you will only .next() through + * all terms; attempts to seek will hit an exception. */ public IndexWriterConfig setReaderTermsIndexDivisor(int divisor) { - if (divisor <= 0) { - throw new IllegalArgumentException("divisor must be >= 1 (got " + divisor + ")"); + if (divisor <= 0 && divisor != -1) { + throw new IllegalArgumentException("divisor must be >= 1, or -1 (got " + divisor + ")"); } readerTermsIndexDivisor = divisor; return this; diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterConfig.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterConfig.java index 1e12d8531fd..c8c203bef01 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterConfig.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterConfig.java @@ -222,6 +222,23 @@ public class TestIndexWriterConfig extends LuceneTestCase { // this is expected } + // Test setReaderTermsIndexDivisor + try { + conf.setReaderTermsIndexDivisor(0); + fail("should not have succeeded to set termsIndexDivisor to 0"); + } catch (IllegalArgumentException e) { + // this is expected + } + + // Setting to -1 is ok + conf.setReaderTermsIndexDivisor(-1); + try { + conf.setReaderTermsIndexDivisor(-2); + fail("should not have succeeded to set termsIndexDivisor to < -1"); + } catch (IllegalArgumentException e) { + // this is expected + } + assertEquals(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES, conf.getMaxThreadStates()); conf.setMaxThreadStates(5); assertEquals(5, conf.getMaxThreadStates()); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java index d0883d32825..09c7e1972b6 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.io.PrintStream; import java.util.ArrayList; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Random; import java.util.concurrent.atomic.AtomicBoolean; @@ -30,6 +31,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.Field.Index; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.Field.TermVector; +import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -990,4 +992,37 @@ public class TestIndexWriterReader extends LuceneTestCase { dir.close(); assertTrue(didWarm.get()); } + + public void testNoTermsIndex() throws Exception { + // Some Codecs don't honor the ReaderTermsIndexDiviso, so skip the test if + // they're picked. + HashSet illegalCodecs = new HashSet(); + illegalCodecs.add("PreFlex"); + illegalCodecs.add("MockRandom"); + illegalCodecs.add("SimpleText"); + + IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, + new MockAnalyzer()).setReaderTermsIndexDivisor(-1); + + // Don't proceed if picked Codec is in the list of illegal ones. + if (illegalCodecs.contains(conf.getCodecProvider().getFieldCodec("f"))) return; + + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, conf); + Document doc = new Document(); + doc.add(new Field("f", "val", Store.NO, Index.ANALYZED)); + w.addDocument(doc); + IndexReader r = IndexReader.open(w).getSequentialSubReaders()[0]; + try { + r.termDocsEnum(null, "f", new BytesRef("val")); + fail("should have failed to seek since terms index was not loaded. Codec used " + conf.getCodecProvider().getFieldCodec("f")); + } catch (IllegalStateException e) { + // expected - we didn't load the term index + } finally { + r.close(); + w.close(); + dir.close(); + } + } + } From 946dc5c68adb747ee067daebdfd28a51e07a1e82 Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Thu, 27 Jan 2011 22:00:14 +0000 Subject: [PATCH 032/185] SOLR-2263: Add ability for RawResponseWriter to stream binary files git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064330 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 3 +++ .../solr/response/RawResponseWriter.java | 23 ++++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index a4308254de6..d25f1ab1342 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -419,6 +419,9 @@ New Features * SOLR-2129: Added a Solr module for dynamic metadata extraction/indexing with Apache UIMA. See contrib/uima/README.txt for more information. (Tommaso Teofili via rmuir) +* SOLR-1283: Add ability for RawResponseWriter to stream binary files as well as + text files. (Eric Pugh via yonik) + Optimizations ---------------------- diff --git a/solr/src/java/org/apache/solr/response/RawResponseWriter.java b/solr/src/java/org/apache/solr/response/RawResponseWriter.java index 45e40b9bfd6..e34691c192d 100644 --- a/solr/src/java/org/apache/solr/response/RawResponseWriter.java +++ b/solr/src/java/org/apache/solr/response/RawResponseWriter.java @@ -18,6 +18,7 @@ package org.apache.solr.response; import java.io.IOException; +import java.io.OutputStream; import java.io.Reader; import java.io.Writer; @@ -44,7 +45,7 @@ import org.apache.solr.request.SolrQueryRequest; * @version $Id$ * @since solr 1.3 */ -public class RawResponseWriter implements QueryResponseWriter +public class RawResponseWriter implements BinaryQueryResponseWriter { /** * The key that should be used to add a ContentStream to the @@ -93,4 +94,24 @@ public class RawResponseWriter implements QueryResponseWriter getBaseWriter( request ).write( writer, request, response ); } } + +public void write(OutputStream out, SolrQueryRequest request, + SolrQueryResponse response) throws IOException { + Object obj = response.getValues().get( CONTENT ); + if( obj != null && (obj instanceof ContentStream ) ) { + // copy the contents to the writer... + ContentStream content = (ContentStream)obj; + java.io.InputStream in = content.getStream(); + try { + IOUtils.copy( in, out ); + } finally { + in.close(); + } + } + else { + //getBaseWriter( request ).write( writer, request, response ); + throw new IOException("did not find a CONTENT object"); + } + +} } From 261a161c293a6c48d695b304dff325655db68efe Mon Sep 17 00:00:00 2001 From: Koji Sekiguchi Date: Fri, 28 Jan 2011 00:19:47 +0000 Subject: [PATCH 033/185] SOLR-2263: correct the ticket number. SOLR-1283 -> SOLR-2263 git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064379 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index d25f1ab1342..7285ed6b69e 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -419,7 +419,7 @@ New Features * SOLR-2129: Added a Solr module for dynamic metadata extraction/indexing with Apache UIMA. See contrib/uima/README.txt for more information. (Tommaso Teofili via rmuir) -* SOLR-1283: Add ability for RawResponseWriter to stream binary files as well as +* SOLR-2263: Add ability for RawResponseWriter to stream binary files as well as text files. (Eric Pugh via yonik) Optimizations From e70311f3860158dc6d12c7a5e03714cc830d1e6a Mon Sep 17 00:00:00 2001 From: "Chris M. Hostetter" Date: Fri, 28 Jan 2011 00:34:40 +0000 Subject: [PATCH 034/185] SOLR-2085: Improve SolrJ behavior when FacetComponent comes before QueryComponent git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064386 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 4 ++++ .../org/apache/solr/client/solrj/response/QueryResponse.java | 4 +++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 7285ed6b69e..f30713d253a 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -166,6 +166,10 @@ Bug Fixes * SOLR-2320: Fixed ReplicationHandler detail reporting for masters (hossman) +* SOLR-2085: Improve SolrJ behavior when FacetComponent comes before + QueryComponent (Tomas Salfischberger via hossman) + + Other Changes ---------------------- diff --git a/solr/src/solrj/org/apache/solr/client/solrj/response/QueryResponse.java b/solr/src/solrj/org/apache/solr/client/solrj/response/QueryResponse.java index c80334070af..f1259d1ccbc 100644 --- a/solr/src/solrj/org/apache/solr/client/solrj/response/QueryResponse.java +++ b/solr/src/solrj/org/apache/solr/client/solrj/response/QueryResponse.java @@ -105,7 +105,8 @@ public class QueryResponse extends SolrResponseBase } else if( "facet_counts".equals( n ) ) { _facetInfo = (NamedList) res.getVal( i ); - extractFacetInfo( _facetInfo ); + // extractFacetInfo inspects _results, so defer calling it + // in case it hasn't been populated yet. } else if( "debug".equals( n ) ) { _debugInfo = (NamedList) res.getVal( i ); @@ -128,6 +129,7 @@ public class QueryResponse extends SolrResponseBase extractTermsInfo( _termsInfo ); } } + if(_facetInfo != null) extractFacetInfo( _facetInfo ); } private void extractSpellCheckInfo(NamedList> spellInfo) { From f13449ce484ea3af99aa9de09633d8e9cf1e852f Mon Sep 17 00:00:00 2001 From: "Chris M. Hostetter" Date: Fri, 28 Jan 2011 01:02:28 +0000 Subject: [PATCH 035/185] SOLR-1940: Fix SolrDispatchFilter behavior when Content-Type is unknown git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064395 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 3 +++ .../src/org/apache/solr/servlet/SolrDispatchFilter.java | 4 +++- solr/src/webapp/web/admin/index.jsp | 4 ++-- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index f30713d253a..d1275cf0f39 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -169,6 +169,9 @@ Bug Fixes * SOLR-2085: Improve SolrJ behavior when FacetComponent comes before QueryComponent (Tomas Salfischberger via hossman) +* SOLR-1940: Fix SolrDispatchFilter behavior when Content-Type is + unknown (Lance Norskog and hossman) + Other Changes ---------------------- diff --git a/solr/src/webapp/src/org/apache/solr/servlet/SolrDispatchFilter.java b/solr/src/webapp/src/org/apache/solr/servlet/SolrDispatchFilter.java index 93bdddc1720..0dcc8373d95 100644 --- a/solr/src/webapp/src/org/apache/solr/servlet/SolrDispatchFilter.java +++ b/solr/src/webapp/src/org/apache/solr/servlet/SolrDispatchFilter.java @@ -315,7 +315,9 @@ public class SolrDispatchFilter implements Filter sendError((HttpServletResponse) response, solrRsp.getException()); } else { // Now write it out - response.setContentType(responseWriter.getContentType(solrReq, solrRsp)); + final String ct = responseWriter.getContentType(solrReq, solrRsp); + // don't call setContentType on null + if (null != ct) response.setContentType(ct); if (Method.HEAD != reqMethod) { if (responseWriter instanceof BinaryQueryResponseWriter) { BinaryQueryResponseWriter binWriter = (BinaryQueryResponseWriter) responseWriter; diff --git a/solr/src/webapp/web/admin/index.jsp b/solr/src/webapp/web/admin/index.jsp index b38c6884b82..a34a2b0c876 100644 --- a/solr/src/webapp/web/admin/index.jsp +++ b/solr/src/webapp/web/admin/index.jsp @@ -39,10 +39,10 @@ <% if (null != core.getSchemaResource()) { %> - [Schema] + [Schema] <% } if (null != core.getConfigResource()) { %> - [Config] + [Config] <% } %> [Analysis] [Schema Browser] <%if(replicationhandler){%>[Replication]<%}%> From 4f1fe2b66db4a284152ba5e4e7740cff266a8f25 Mon Sep 17 00:00:00 2001 From: Shai Erera Date: Fri, 28 Jan 2011 05:25:35 +0000 Subject: [PATCH 036/185] LUCENE-2895: MockRandomCodec loads termsIndex even if termsIndexDivisor is set to -1 git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064463 13f79535-47bb-0310-9956-ffa450edef68 --- .../test/org/apache/lucene/index/TestIndexWriterReader.java | 5 +---- .../lucene/index/codecs/mockrandom/MockRandomCodec.java | 6 +++++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java index 09c7e1972b6..6758e89b5d2 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java @@ -31,7 +31,6 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.Field.Index; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.Field.TermVector; -import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -994,16 +993,14 @@ public class TestIndexWriterReader extends LuceneTestCase { } public void testNoTermsIndex() throws Exception { - // Some Codecs don't honor the ReaderTermsIndexDiviso, so skip the test if + // Some Codecs don't honor the ReaderTermsIndexDivisor, so skip the test if // they're picked. HashSet illegalCodecs = new HashSet(); illegalCodecs.add("PreFlex"); - illegalCodecs.add("MockRandom"); illegalCodecs.add("SimpleText"); IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setReaderTermsIndexDivisor(-1); - // Don't proceed if picked Codec is in the list of illegal ones. if (illegalCodecs.contains(conf.getCodecProvider().getFieldCodec("f"))) return; diff --git a/lucene/src/test/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java b/lucene/src/test/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java index d5554512be7..d00854ec9f1 100644 --- a/lucene/src/test/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java +++ b/lucene/src/test/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java @@ -236,7 +236,11 @@ public class MockRandomCodec extends Codec { try { if (random.nextBoolean()) { - state.termsIndexDivisor = _TestUtil.nextInt(random, 1, 10); + // if termsIndexDivisor is set to -1, we should not touch it. It means a + // test explicitly instructed not to load the terms index. + if (state.termsIndexDivisor != -1) { + state.termsIndexDivisor = _TestUtil.nextInt(random, 1, 10); + } if (LuceneTestCase.VERBOSE) { System.out.println("MockRandomCodec: fixed-gap terms index (divisor=" + state.termsIndexDivisor + ")"); } From a7a9be923ec56c2918294da427f7f467c9a82d7e Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 28 Jan 2011 15:25:33 +0000 Subject: [PATCH 037/185] Only create a Filter list if there is a non-empty fq parameter git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064730 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/solr/handler/component/QueryComponent.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/solr/src/java/org/apache/solr/handler/component/QueryComponent.java b/solr/src/java/org/apache/solr/handler/component/QueryComponent.java index f98d65493ca..10049937917 100644 --- a/solr/src/java/org/apache/solr/handler/component/QueryComponent.java +++ b/solr/src/java/org/apache/solr/handler/component/QueryComponent.java @@ -107,7 +107,6 @@ public class QueryComponent extends SearchComponent List filters = rb.getFilters(); if (filters==null) { filters = new ArrayList(fqs.length); - rb.setFilters( filters ); } for (String fq : fqs) { if (fq != null && fq.trim().length()!=0) { @@ -115,6 +114,12 @@ public class QueryComponent extends SearchComponent filters.add(fqp.getQuery()); } } + // only set the filters if they are not empty otherwise + // fq=&someotherParam= will trigger all docs filter for every request + // if filter cache is disabled + if (!filters.isEmpty()) { + rb.setFilters( filters ); + } } } catch (ParseException e) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e); From 92874ddaa629e3fdbb4cae4b4f414246676df393 Mon Sep 17 00:00:00 2001 From: Koji Sekiguchi Date: Fri, 28 Jan 2011 15:37:43 +0000 Subject: [PATCH 038/185] SOLR-860: Add debug output for MoreLikeThis git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064735 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 2 + .../solr/handler/MoreLikeThisHandler.java | 57 ++++++++++++------- .../component/MoreLikeThisComponent.java | 57 +++++++++++++++++-- .../solr/handler/MoreLikeThisHandlerTest.java | 14 ++++- 4 files changed, 102 insertions(+), 28 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index d1275cf0f39..8a3ad22e8e1 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -429,6 +429,8 @@ New Features * SOLR-2263: Add ability for RawResponseWriter to stream binary files as well as text files. (Eric Pugh via yonik) +* SOLR-860: Add debug output for MoreLikeThis. (koji) + Optimizations ---------------------- diff --git a/solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java b/solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java index e367d8922f4..fcd41e24dd9 100644 --- a/solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java +++ b/solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java @@ -232,7 +232,7 @@ public class MoreLikeThisHandler extends RequestHandlerBase // Copied from StandardRequestHandler... perhaps it should be added to doStandardDebug? if (dbg == true) { try { - NamedList dbgInfo = SolrPluginUtils.doStandardDebug(req, q, mlt.mltquery, mltDocs.docList, dbgQuery, dbgResults); + NamedList dbgInfo = SolrPluginUtils.doStandardDebug(req, q, mlt.getRawMLTQuery(), mltDocs.docList, dbgQuery, dbgResults); if (null != dbgInfo) { if (null != filters) { dbgInfo.add("filter_queries",req.getParams().getParams(CommonParams.FQ)); @@ -279,8 +279,6 @@ public class MoreLikeThisHandler extends RequestHandlerBase final boolean needDocSet; Map boostFields; - Query mltquery; // expose this for debugging - public MoreLikeThisHelper( SolrParams params, SolrIndexSearcher searcher ) { this.searcher = searcher; @@ -310,9 +308,26 @@ public class MoreLikeThisHandler extends RequestHandlerBase boostFields = SolrPluginUtils.parseFieldBoosts(params.getParams(MoreLikeThisParams.QF)); } - private void setBoosts(Query mltquery) { + private Query rawMLTQuery; + private Query boostedMLTQuery; + private BooleanQuery realMLTQuery; + + public Query getRawMLTQuery(){ + return rawMLTQuery; + } + + public Query getBoostedMLTQuery(){ + return boostedMLTQuery; + } + + public Query getRealMLTQuery(){ + return realMLTQuery; + } + + private Query getBoostedQuery(Query mltquery) { + BooleanQuery boostedQuery = (BooleanQuery)mltquery.clone(); if (boostFields.size() > 0) { - List clauses = ((BooleanQuery)mltquery).clauses(); + List clauses = boostedQuery.clauses(); for( Object o : clauses ) { TermQuery q = (TermQuery)((BooleanClause)o).getQuery(); Float b = this.boostFields.get(q.getTerm().field()); @@ -321,49 +336,51 @@ public class MoreLikeThisHandler extends RequestHandlerBase } } } + return boostedQuery; } public DocListAndSet getMoreLikeThis( int id, int start, int rows, List filters, List terms, int flags ) throws IOException { Document doc = reader.document(id); - mltquery = mlt.like(id); - setBoosts(mltquery); + rawMLTQuery = mlt.like(id); + boostedMLTQuery = getBoostedQuery( rawMLTQuery ); if( terms != null ) { - fillInterestingTermsFromMLTQuery( mltquery, terms ); + fillInterestingTermsFromMLTQuery( rawMLTQuery, terms ); } // exclude current document from results - BooleanQuery mltQuery = new BooleanQuery(); - mltQuery.add(mltquery, BooleanClause.Occur.MUST); - mltQuery.add( + realMLTQuery = new BooleanQuery(); + realMLTQuery.add(boostedMLTQuery, BooleanClause.Occur.MUST); + realMLTQuery.add( new TermQuery(new Term(uniqueKeyField.getName(), uniqueKeyField.getType().storedToIndexed(doc.getFieldable(uniqueKeyField.getName())))), BooleanClause.Occur.MUST_NOT); DocListAndSet results = new DocListAndSet(); if (this.needDocSet) { - results = searcher.getDocListAndSet(mltQuery, filters, null, start, rows, flags); + results = searcher.getDocListAndSet(realMLTQuery, filters, null, start, rows, flags); } else { - results.docList = searcher.getDocList(mltQuery, filters, null, start, rows, flags); + results.docList = searcher.getDocList(realMLTQuery, filters, null, start, rows, flags); } return results; } public DocListAndSet getMoreLikeThis( Reader reader, int start, int rows, List filters, List terms, int flags ) throws IOException { - mltquery = mlt.like(reader); - setBoosts(mltquery); + rawMLTQuery = mlt.like(reader); + boostedMLTQuery = getBoostedQuery( rawMLTQuery ); if( terms != null ) { - fillInterestingTermsFromMLTQuery( mltquery, terms ); + fillInterestingTermsFromMLTQuery( boostedMLTQuery, terms ); } DocListAndSet results = new DocListAndSet(); if (this.needDocSet) { - results = searcher.getDocListAndSet(mltquery, filters, null, start, rows, flags); + results = searcher.getDocListAndSet( boostedMLTQuery, filters, null, start, rows, flags); } else { - results.docList = searcher.getDocList(mltquery, filters, null, start, rows, flags); + results.docList = searcher.getDocList( boostedMLTQuery, filters, null, start, rows, flags); } return results; } - + + @Deprecated public NamedList getMoreLikeThese( DocList docs, int rows, int flags ) throws IOException { IndexSchema schema = searcher.getSchema(); @@ -382,7 +399,7 @@ public class MoreLikeThisHandler extends RequestHandlerBase private void fillInterestingTermsFromMLTQuery( Query query, List terms ) { - List clauses = ((BooleanQuery)mltquery).clauses(); + List clauses = ((BooleanQuery)query).clauses(); for( Object o : clauses ) { TermQuery q = (TermQuery)((BooleanClause)o).getQuery(); InterestingTerm it = new InterestingTerm(); diff --git a/solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java b/solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java index 61c97d1faf4..8851ff7761d 100644 --- a/solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java +++ b/solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java @@ -23,8 +23,12 @@ import java.net.URL; import org.apache.solr.common.params.MoreLikeThisParams; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.NamedList; +import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.handler.MoreLikeThisHandler; +import org.apache.solr.schema.IndexSchema; +import org.apache.solr.search.DocIterator; import org.apache.solr.search.DocList; +import org.apache.solr.search.DocListAndSet; import org.apache.solr.search.SolrIndexSearcher; /** @@ -50,18 +54,59 @@ public class MoreLikeThisComponent extends SearchComponent if( p.getBool( MoreLikeThisParams.MLT, false ) ) { SolrIndexSearcher searcher = rb.req.getSearcher(); - MoreLikeThisHandler.MoreLikeThisHelper mlt - = new MoreLikeThisHandler.MoreLikeThisHelper( p, searcher ); - - int mltcount = p.getInt( MoreLikeThisParams.DOC_COUNT, 5 ); - NamedList sim = mlt.getMoreLikeThese( - rb.getResults().docList, mltcount, rb.getFieldFlags() ); + NamedList sim = getMoreLikeThese( rb, searcher, + rb.getResults().docList, rb.getFieldFlags() ); // TODO ???? add this directly to the response? rb.rsp.add( "moreLikeThis", sim ); } } + NamedList getMoreLikeThese( ResponseBuilder rb, SolrIndexSearcher searcher, + DocList docs, int flags ) throws IOException { + SolrParams p = rb.req.getParams(); + IndexSchema schema = searcher.getSchema(); + MoreLikeThisHandler.MoreLikeThisHelper mltHelper + = new MoreLikeThisHandler.MoreLikeThisHelper( p, searcher ); + NamedList mlt = new SimpleOrderedMap(); + DocIterator iterator = docs.iterator(); + + SimpleOrderedMap dbg = null; + if( rb.isDebug() ){ + dbg = new SimpleOrderedMap(); + } + + while( iterator.hasNext() ) { + int id = iterator.nextDoc(); + int rows = p.getInt( MoreLikeThisParams.DOC_COUNT, 5 ); + DocListAndSet sim = mltHelper.getMoreLikeThis( id, 0, rows, null, null, flags ); + String name = schema.printableUniqueKey( searcher.doc( id ) ); + mlt.add(name, sim.docList); + + if( dbg != null ){ + SimpleOrderedMap docDbg = new SimpleOrderedMap(); + docDbg.add( "rawMLTQuery", mltHelper.getRawMLTQuery().toString() ); + docDbg.add( "boostedMLTQuery", mltHelper.getBoostedMLTQuery().toString() ); + docDbg.add( "realMLTQuery", mltHelper.getRealMLTQuery().toString() ); + SimpleOrderedMap explains = new SimpleOrderedMap(); + DocIterator mltIte = sim.docList.iterator(); + while( mltIte.hasNext() ){ + int mltid = mltIte.nextDoc(); + String key = schema.printableUniqueKey( searcher.doc( mltid ) ); + explains.add( key, searcher.explain( mltHelper.getRealMLTQuery(), mltid ) ); + } + docDbg.add( "explain", explains ); + dbg.add( name, docDbg ); + } + } + + // add debug information + if( dbg != null ){ + rb.addDebugInfo( "moreLikeThis", dbg ); + } + return mlt; + } + ///////////////////////////////////////////// /// SolrInfoMBean //////////////////////////////////////////// diff --git a/solr/src/test/org/apache/solr/handler/MoreLikeThisHandlerTest.java b/solr/src/test/org/apache/solr/handler/MoreLikeThisHandlerTest.java index 63b1edde582..6dbae21f244 100644 --- a/solr/src/test/org/apache/solr/handler/MoreLikeThisHandlerTest.java +++ b/solr/src/test/org/apache/solr/handler/MoreLikeThisHandlerTest.java @@ -94,7 +94,17 @@ public class MoreLikeThisHandlerTest extends SolrTestCaseJ4 { assertQ("morelike this - harrison ford",mltreq ,"//result/doc[1]/int[@name='id'][.='45']"); + // test MoreLikeThis debug + params.set(CommonParams.DEBUG_QUERY, "true"); + assertQ("morelike this - harrison ford",mltreq + ,"//lst[@name='debug']/lst[@name='moreLikeThis']/lst[@name='44']/str[@name='rawMLTQuery']" + ,"//lst[@name='debug']/lst[@name='moreLikeThis']/lst[@name='44']/str[@name='boostedMLTQuery']" + ,"//lst[@name='debug']/lst[@name='moreLikeThis']/lst[@name='44']/str[@name='realMLTQuery']" + ,"//lst[@name='debug']/lst[@name='moreLikeThis']/lst[@name='44']/lst[@name='explain']/str[@name='45']" + ); + // test that qparser plugins work + params.remove(CommonParams.DEBUG_QUERY); params.set(CommonParams.Q, "{!field f=id}44"); assertQ(mltreq ,"//result/doc[1]/int[@name='id'][.='45']"); @@ -112,9 +122,9 @@ public class MoreLikeThisHandlerTest extends SolrTestCaseJ4 { assertQ(mltreq ,"//result/doc[1]/int[@name='id'][.='45']"); - // test that debugging works + // test that debugging works (test for MoreLikeThis*Handler*) params.set(CommonParams.QT, "/mlt"); - params.set("debugQuery", "true"); + params.set(CommonParams.DEBUG_QUERY, "true"); assertQ(mltreq ,"//result/doc[1]/int[@name='id'][.='45']" ,"//lst[@name='debug']/lst[@name='explain']" From da24882340857b782a4d44939730bfbe53f4a3a3 Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Fri, 28 Jan 2011 15:45:55 +0000 Subject: [PATCH 039/185] docs: move changes entry to 3.1 git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064738 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 8a3ad22e8e1..815b140524b 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -106,8 +106,6 @@ New Features Adding a parameter NOW= to the request will override the current time. (Peter Sturge, yonik) -* SOLR-2325: Allow tagging and exlcusion of main query for faceting. (yonik) - Optimizations ---------------------- @@ -426,6 +424,8 @@ New Features * SOLR-2129: Added a Solr module for dynamic metadata extraction/indexing with Apache UIMA. See contrib/uima/README.txt for more information. (Tommaso Teofili via rmuir) +* SOLR-2325: Allow tagging and exlcusion of main query for faceting. (yonik) + * SOLR-2263: Add ability for RawResponseWriter to stream binary files as well as text files. (Eric Pugh via yonik) From 99a60c33c5ef7e90fe9580bc7f555536f4025713 Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Fri, 28 Jan 2011 17:07:50 +0000 Subject: [PATCH 040/185] SOLR-2265: update jetty to 6.1.26 (missed start.jar) git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064781 13f79535-47bb-0310-9956-ffa450edef68 --- solr/example/start.jar | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solr/example/start.jar b/solr/example/start.jar index 2bd8f2d6eb5..b2fca2178f2 100755 --- a/solr/example/start.jar +++ b/solr/example/start.jar @@ -1,2 +1,2 @@ -AnyObjectId[2a4a9a163d79f9214d9b1d9c0dbb611f741d8f16] was removed in git history. +AnyObjectId[d3a94bcfae630a90d4103437bd3c2da0d37d98c9] was removed in git history. Apache SVN contains full history. \ No newline at end of file From 0d9559e1b4f72c7a2c2d91350f0797bf8e1a0955 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 28 Jan 2011 19:55:24 +0000 Subject: [PATCH 041/185] LUCENE-1866: enable rat-sources for all lucene/contrib/modules/solr src and tests git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064844 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/build.xml | 16 ---------------- lucene/common-build.xml | 16 ++++++++++++++++ solr/build.xml | 2 ++ 3 files changed, 18 insertions(+), 16 deletions(-) diff --git a/lucene/build.xml b/lucene/build.xml index 4205d3c756d..3fe5b815403 100644 --- a/lucene/build.xml +++ b/lucene/build.xml @@ -602,22 +602,6 @@ - - - - - - - - - - - - - - - diff --git a/lucene/common-build.xml b/lucene/common-build.xml index be42d5bd648..86e31dd6382 100644 --- a/lucene/common-build.xml +++ b/lucene/common-build.xml @@ -638,6 +638,22 @@ + + + + + + + + + + + + + + + diff --git a/solr/build.xml b/solr/build.xml index a29cb50ab62..c3ef9682c18 100644 --- a/solr/build.xml +++ b/solr/build.xml @@ -963,6 +963,8 @@ + + From 06c4c204c80e3932aca22f4e7bba794d73d73cab Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Fri, 28 Jan 2011 23:12:22 +0000 Subject: [PATCH 042/185] LUCENE-2895: MockRandomCodec loads termsIndex even if termsIndexDivisor is set to -1 git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064926 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/src/test/org/apache/lucene/index/TestSegmentInfo.java | 1 + .../lucene/index/codecs/mockrandom/MockRandomCodec.java | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/lucene/src/test/org/apache/lucene/index/TestSegmentInfo.java b/lucene/src/test/org/apache/lucene/index/TestSegmentInfo.java index 58b7cb8d9a6..dbd9ddbf1d0 100644 --- a/lucene/src/test/org/apache/lucene/index/TestSegmentInfo.java +++ b/lucene/src/test/org/apache/lucene/index/TestSegmentInfo.java @@ -31,6 +31,7 @@ public class TestSegmentInfo extends LuceneTestCase { Directory dir = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()); IndexWriter writer = new IndexWriter(dir, conf); + writer.setInfoStream(VERBOSE ? System.out : null); Document doc = new Document(); doc.add(new Field("a", "value", Store.YES, Index.ANALYZED)); writer.addDocument(doc); diff --git a/lucene/src/test/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java b/lucene/src/test/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java index d00854ec9f1..745c619cb87 100644 --- a/lucene/src/test/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java +++ b/lucene/src/test/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java @@ -260,7 +260,9 @@ public class MockRandomCodec extends Codec { if (LuceneTestCase.VERBOSE) { System.out.println("MockRandomCodec: variable-gap terms index (divisor=" + state.termsIndexDivisor + ")"); } - state.termsIndexDivisor = _TestUtil.nextInt(random, 1, 10); + if (state.termsIndexDivisor != -1) { + state.termsIndexDivisor = _TestUtil.nextInt(random, 1, 10); + } indexReader = new VariableGapTermsIndexReader(state.dir, state.fieldInfos, state.segmentInfo.name, From f12b4ab2ff281070cece87158cf04a467cedf599 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Fri, 28 Jan 2011 23:12:48 +0000 Subject: [PATCH 043/185] fix typo git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064927 13f79535-47bb-0310-9956-ffa450edef68 --- .../src/test/org/apache/lucene/index/TestDocsAndPositions.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java b/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java index 654e33dfb52..a63e63db589 100644 --- a/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java +++ b/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java @@ -111,7 +111,7 @@ public class TestDocsAndPositions extends LuceneTestCase { * random. All positions for that number are saved up front and compared to * the enums positions. */ - public void testRandomPositons() throws IOException { + public void testRandomPositions() throws IOException { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer( From 5d8790eb85220b082ea5b288be51392c2613ecdc Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Sat, 29 Jan 2011 02:24:53 +0000 Subject: [PATCH 044/185] SOLR-1983 SOLR-2156: set replication flags and cleanup git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1064942 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 5 +++++ solr/src/java/org/apache/solr/handler/SnapPuller.java | 9 ++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 815b140524b..5022a1ed447 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -653,6 +653,11 @@ Bug Fixes * SOLR-2261: fix velocity template layout.vm that referred to an older version of jquery. (Eric Pugh via rmuir) +* SOLR-1983: snappuller fails when modifiedConfFiles is not empty and + full copy of index is needed. (Alexander Kanarsky via yonik) + +* SOLR-2156: SnapPuller fails to clean Old Index Directories on Full Copy + (Jayendra Patil via yonik) Other Changes ---------------------- diff --git a/solr/src/java/org/apache/solr/handler/SnapPuller.java b/solr/src/java/org/apache/solr/handler/SnapPuller.java index 88ac16671cf..b93d34f389b 100644 --- a/solr/src/java/org/apache/solr/handler/SnapPuller.java +++ b/solr/src/java/org/apache/solr/handler/SnapPuller.java @@ -300,15 +300,17 @@ public class SnapPuller { isFullCopyNeeded = true; successfulInstall = false; boolean deleteTmpIdxDir = true; + File indexDir = null ; try { - File indexDir = new File(core.getIndexDir()); + indexDir = new File(core.getIndexDir()); downloadIndexFiles(isFullCopyNeeded, tmpIndexDir, latestVersion); LOG.info("Total time taken for download : " + ((System.currentTimeMillis() - replicationStartTime) / 1000) + " secs"); Collection> modifiedConfFiles = getModifiedConfFiles(confFilesToDownload); if (!modifiedConfFiles.isEmpty()) { downloadConfFiles(confFilesToDownload, latestVersion); if (isFullCopyNeeded) { - modifyIndexProps(tmpIndexDir.getName()); + successfulInstall = modifyIndexProps(tmpIndexDir.getName()); + deleteTmpIdxDir = false; } else { successfulInstall = copyIndexFiles(tmpIndexDir, indexDir); } @@ -339,7 +341,8 @@ public class SnapPuller { } catch (Exception e) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Index fetch failed : ", e); } finally { - if(deleteTmpIdxDir) delTree(tmpIndexDir); + if (deleteTmpIdxDir) delTree(tmpIndexDir); + else delTree(indexDir); } return successfulInstall; } finally { From cbf8d878f7efa76a063a11a00db9f6fea9ec874c Mon Sep 17 00:00:00 2001 From: Koji Sekiguchi Date: Sat, 29 Jan 2011 14:30:45 +0000 Subject: [PATCH 045/185] SOLR-792: fix comparison for mincount git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065020 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/solr/handler/component/PivotFacetHelper.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/solr/src/java/org/apache/solr/handler/component/PivotFacetHelper.java b/solr/src/java/org/apache/solr/handler/component/PivotFacetHelper.java index b47be4fe6a4..c00add58464 100644 --- a/solr/src/java/org/apache/solr/handler/component/PivotFacetHelper.java +++ b/solr/src/java/org/apache/solr/handler/component/PivotFacetHelper.java @@ -102,7 +102,7 @@ public class PivotFacetHelper List> values = new ArrayList>( superFacets.size() ); for (Map.Entry kv : superFacets) { // Only sub-facet if parent facet has positive count - still may not be any values for the sub-field though - if (kv.getValue() > minMatch ) { + if (kv.getValue() >= minMatch ) { // don't reuse the same BytesRef each time since we will be constructing Term // objects that will most likely be cached. BytesRef termval = new BytesRef(); @@ -122,7 +122,7 @@ public class PivotFacetHelper SimpleFacets sf = getFacetImplementation(rb.req, subset, rb.req.getParams()); NamedList nl = sf.getTermCounts(subField); - if (nl.size() > minMatch ) { + if (nl.size() >= minMatch ) { pivot.add( "pivot", doPivots( nl, subField, nextField, fnames, rb, subset, minMatch ) ); values.add( pivot ); // only add response if there are some counts } From 5342d8676da8f794efbcf93b5001b107514f1363 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Sat, 29 Jan 2011 16:22:59 +0000 Subject: [PATCH 046/185] LUCENE-2898: fix CMS throttling to be independent of number of incoming producer threads; some defensive concurrency fixes for SegmentInfo git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065059 13f79535-47bb-0310-9956-ffa450edef68 --- .../index/ConcurrentMergeScheduler.java | 48 +++++++++---------- .../org/apache/lucene/index/SegmentInfo.java | 27 +++++++---- 2 files changed, 41 insertions(+), 34 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java b/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java index 55d682d593c..1927235cdce 100644 --- a/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java +++ b/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java @@ -308,10 +308,31 @@ public class ConcurrentMergeScheduler extends MergeScheduler { // pending merges, until it's empty: while (true) { + synchronized(this) { + long startStallTime = 0; + while (mergeThreadCount() >= 1+maxMergeCount) { + startStallTime = System.currentTimeMillis(); + if (verbose()) { + message(" too many merges; stalling..."); + } + try { + wait(); + } catch (InterruptedException ie) { + throw new ThreadInterruptedException(ie); + } + } + + if (verbose()) { + if (startStallTime != 0) { + message(" stalled for " + (System.currentTimeMillis()-startStallTime) + " msec"); + } + } + } + + // TODO: we could be careful about which merges to do in // the BG (eg maybe the "biggest" ones) vs FG, which // merges to do first (the easiest ones?), etc. - MergePolicy.OneMerge merge = writer.getNextMerge(); if (merge == null) { if (verbose()) @@ -326,32 +347,11 @@ public class ConcurrentMergeScheduler extends MergeScheduler { boolean success = false; try { synchronized(this) { - final MergeThread merger; - long startStallTime = 0; - while (mergeThreadCount() >= maxMergeCount) { - startStallTime = System.currentTimeMillis(); - if (verbose()) { - message(" too many merges; stalling..."); - } - try { - wait(); - } catch (InterruptedException ie) { - throw new ThreadInterruptedException(ie); - } - } - - if (verbose()) { - if (startStallTime != 0) { - message(" stalled for " + (System.currentTimeMillis()-startStallTime) + " msec"); - } - message(" consider merge " + merge.segString(dir)); - } - - assert mergeThreadCount() < maxMergeCount; + message(" consider merge " + merge.segString(dir)); // OK to spawn a new merge thread to handle this // merge: - merger = getMergeThread(writer, merge); + final MergeThread merger = getMergeThread(writer, merge); mergeThreads.add(merger); if (verbose()) { message(" launch new thread [" + merger.getName() + "]"); diff --git a/lucene/src/java/org/apache/lucene/index/SegmentInfo.java b/lucene/src/java/org/apache/lucene/index/SegmentInfo.java index a3dfaac25a7..e668fb9a279 100644 --- a/lucene/src/java/org/apache/lucene/index/SegmentInfo.java +++ b/lucene/src/java/org/apache/lucene/index/SegmentInfo.java @@ -66,11 +66,11 @@ public final class SegmentInfo { private boolean isCompoundFile; - private List files; // cached list of files that this segment uses + private volatile List files; // cached list of files that this segment uses // in the Directory - private long sizeInBytesNoStore = -1; // total byte size of all but the store files (computed on demand) - private long sizeInBytesWithStore = -1; // total byte size of all of our files (computed on demand) + private volatile long sizeInBytesNoStore = -1; // total byte size of all but the store files (computed on demand) + private volatile long sizeInBytesWithStore = -1; // total byte size of all of our files (computed on demand) private int docStoreOffset; // if this segment shares stored fields & vectors, this // offset is where in that file this segment's docs begin @@ -241,24 +241,31 @@ public final class SegmentInfo { */ public long sizeInBytes(boolean includeDocStores) throws IOException { if (includeDocStores) { - if (sizeInBytesWithStore != -1) return sizeInBytesWithStore; - sizeInBytesWithStore = 0; + if (sizeInBytesWithStore != -1) { + return sizeInBytesWithStore; + } + long sum = 0; for (final String fileName : files()) { - // We don't count bytes used by a shared doc store against this segment + // We don't count bytes used by a shared doc store + // against this segment if (docStoreOffset == -1 || !IndexFileNames.isDocStoreFile(fileName)) { - sizeInBytesWithStore += dir.fileLength(fileName); + sum += dir.fileLength(fileName); } } + sizeInBytesWithStore = sum; return sizeInBytesWithStore; } else { - if (sizeInBytesNoStore != -1) return sizeInBytesNoStore; - sizeInBytesNoStore = 0; + if (sizeInBytesNoStore != -1) { + return sizeInBytesNoStore; + } + long sum = 0; for (final String fileName : files()) { if (IndexFileNames.isDocStoreFile(fileName)) { continue; } - sizeInBytesNoStore += dir.fileLength(fileName); + sum += dir.fileLength(fileName); } + sizeInBytesNoStore = sum; return sizeInBytesNoStore; } } From d4c9a814527ec5c58d62c42a65b23b39e508a505 Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Sat, 29 Jan 2011 16:50:51 +0000 Subject: [PATCH 047/185] SOLR-792: fix test to match fix to mincount git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065067 13f79535-47bb-0310-9956-ffa450edef68 --- .../test/org/apache/solr/client/solrj/SolrExampleTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java b/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java index f79a622f06e..216470e44cc 100644 --- a/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java +++ b/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java @@ -642,12 +642,12 @@ abstract public class SolrExampleTests extends SolrJettyTestBase pivot = pivots.getVal( 2 ); assertEquals( "features,cat,inStock", pivots.getName( 2 ) ); assertEquals( 2, pivot.size() ); - PivotField p = pivot.get( 1 ).getPivot().get(0); + PivotField p = pivot.get( 1 ).getPivot().get(0); // get(1) should be features=AAAA, then get(0) should be cat=a assertEquals( "cat", p.getField() ); assertEquals( "a", p.getValue() ); counts = p.getPivot(); // p.write(System.out, 5 ); - assertEquals( 1, counts.size() ); + assertEquals( 2, counts.size() ); // 2 trues and 1 false under features=AAAA,cat=a assertEquals( "inStock", counts.get(0).getField() ); assertEquals( Boolean.TRUE, counts.get(0).getValue() ); assertEquals( 2, counts.get(0).getCount() ); From 5dcacafcb4be2672e2c3240a107080007ce3c6dc Mon Sep 17 00:00:00 2001 From: Ryan McKinley Date: Sat, 29 Jan 2011 17:10:09 +0000 Subject: [PATCH 048/185] - add comments to pivot test - change capitalization so that testing looks the same as the input git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065074 13f79535-47bb-0310-9956-ffa450edef68 --- .../solr/client/solrj/SolrExampleTests.java | 58 ++++++++++++++----- 1 file changed, 45 insertions(+), 13 deletions(-) diff --git a/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java b/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java index 216470e44cc..071f74e0255 100644 --- a/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java +++ b/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java @@ -576,17 +576,17 @@ abstract public class SolrExampleTests extends SolrJettyTestBase int id = 1; ArrayList docs = new ArrayList(); - docs.add( makeTestDoc( "id", id++, "features", "AAA", "cat", "a", "inStock", true ) ); - docs.add( makeTestDoc( "id", id++, "features", "AAA", "cat", "a", "inStock", false ) ); - docs.add( makeTestDoc( "id", id++, "features", "AAA", "cat", "a", "inStock", true ) ); - docs.add( makeTestDoc( "id", id++, "features", "AAA", "cat", "b", "inStock", false ) ); - docs.add( makeTestDoc( "id", id++, "features", "AAA", "cat", "b", "inStock", true ) ); - docs.add( makeTestDoc( "id", id++, "features", "BBB", "cat", "a", "inStock", false ) ); - docs.add( makeTestDoc( "id", id++, "features", "BBB", "cat", "a", "inStock", true ) ); - docs.add( makeTestDoc( "id", id++, "features", "BBB", "cat", "b", "inStock", false ) ); - docs.add( makeTestDoc( "id", id++, "features", "BBB", "cat", "b", "inStock", true ) ); - docs.add( makeTestDoc( "id", id++, "features", "BBB", "cat", "b", "inStock", false ) ); - docs.add( makeTestDoc( "id", id++, "features", "BBB", "cat", "b", "inStock", true ) ); + docs.add( makeTestDoc( "id", id++, "features", "aaa", "cat", "a", "inStock", true ) ); + docs.add( makeTestDoc( "id", id++, "features", "aaa", "cat", "a", "inStock", false ) ); + docs.add( makeTestDoc( "id", id++, "features", "aaa", "cat", "a", "inStock", true ) ); + docs.add( makeTestDoc( "id", id++, "features", "aaa", "cat", "b", "inStock", false ) ); + docs.add( makeTestDoc( "id", id++, "features", "aaa", "cat", "b", "inStock", true ) ); + docs.add( makeTestDoc( "id", id++, "features", "bbb", "cat", "a", "inStock", false ) ); + docs.add( makeTestDoc( "id", id++, "features", "bbb", "cat", "a", "inStock", true ) ); + docs.add( makeTestDoc( "id", id++, "features", "bbb", "cat", "b", "inStock", false ) ); + docs.add( makeTestDoc( "id", id++, "features", "bbb", "cat", "b", "inStock", true ) ); + docs.add( makeTestDoc( "id", id++, "features", "bbb", "cat", "b", "inStock", false ) ); + docs.add( makeTestDoc( "id", id++, "features", "bbb", "cat", "b", "inStock", true ) ); docs.add( makeTestDoc( "id", id++ ) ); // something not matching server.add( docs ); server.commit(); @@ -610,7 +610,14 @@ abstract public class SolrExampleTests extends SolrJettyTestBase // System.out.println(); // } - // Now make sure they have reasonable stuff + // PIVOT: features,cat + // features=bbb (6) + // cat=b (4) + // cat=a (2) + // features=aaa (5) + // cat=a (3) + // cat=b (2) + List pivot = pivots.getVal( 0 ); assertEquals( "features,cat", pivots.getName( 0 ) ); assertEquals( 2, pivot.size() ); @@ -627,6 +634,15 @@ abstract public class SolrExampleTests extends SolrJettyTestBase assertEquals( "a", counts.get(1).getValue() ); assertEquals( 2, counts.get(1).getCount() ); + + // PIVOT: cat,features + // cat=b (6) + // features=bbb (4) + // features=aaa (2) + // cat=a (5) + // features=aaa (3) + // features=bbb (2) + ff = pivot.get( 1 ); assertEquals( "features", ff.getField() ); assertEquals( "aaa", ff.getValue() ); @@ -638,7 +654,23 @@ abstract public class SolrExampleTests extends SolrJettyTestBase assertEquals( "b", counts.get(1).getValue() ); assertEquals( 2, counts.get(1).getCount() ); - // 3 deep + // Three deep: + // PIVOT: features,cat,inStock + // features=bbb (6) + // cat=b (4) + // inStock=false (2) + // inStock=true (2) + // cat=a (2) + // inStock=false (1) + // inStock=true (1) + // features=aaa (5) + // cat=a (3) + // inStock=true (2) + // inStock=false (1) + // cat=b (2) + // inStock=false (1) + // inStock=true (1) + pivot = pivots.getVal( 2 ); assertEquals( "features,cat,inStock", pivots.getName( 2 ) ); assertEquals( 2, pivot.size() ); From 5f7f97021c7da069ab90dcfe95c06e2f89893e66 Mon Sep 17 00:00:00 2001 From: Ryan McKinley Date: Sat, 29 Jan 2011 19:43:20 +0000 Subject: [PATCH 049/185] LUCENE-2671 -- deprecate FieldTypes that will be removed in 5.x git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065093 13f79535-47bb-0310-9956-ffa450edef68 --- solr/src/java/org/apache/solr/schema/SortableDoubleField.java | 2 ++ solr/src/java/org/apache/solr/schema/SortableFloatField.java | 2 ++ solr/src/java/org/apache/solr/schema/SortableIntField.java | 2 ++ solr/src/java/org/apache/solr/schema/SortableLongField.java | 2 ++ 4 files changed, 8 insertions(+) diff --git a/solr/src/java/org/apache/solr/schema/SortableDoubleField.java b/solr/src/java/org/apache/solr/schema/SortableDoubleField.java index 411e9b5f6fc..b12858b45c2 100644 --- a/solr/src/java/org/apache/solr/schema/SortableDoubleField.java +++ b/solr/src/java/org/apache/solr/schema/SortableDoubleField.java @@ -37,6 +37,8 @@ import java.util.Map; import java.io.IOException; /** * @version $Id$ + * + * @deprecated use {@link DoubleField} or {@link TrieDoubleField} - will be removed in 5.x */ public class SortableDoubleField extends FieldType { protected void init(IndexSchema schema, Map args) { diff --git a/solr/src/java/org/apache/solr/schema/SortableFloatField.java b/solr/src/java/org/apache/solr/schema/SortableFloatField.java index e56ffd70c2a..b495227b1f6 100644 --- a/solr/src/java/org/apache/solr/schema/SortableFloatField.java +++ b/solr/src/java/org/apache/solr/schema/SortableFloatField.java @@ -37,6 +37,8 @@ import java.util.Map; import java.io.IOException; /** * @version $Id$ + * + * @deprecated use {@link FloatField} or {@link TrieFloatField} - will be removed in 5.x */ public class SortableFloatField extends FieldType { protected void init(IndexSchema schema, Map args) { diff --git a/solr/src/java/org/apache/solr/schema/SortableIntField.java b/solr/src/java/org/apache/solr/schema/SortableIntField.java index b6db1cff194..421e4bc45fc 100644 --- a/solr/src/java/org/apache/solr/schema/SortableIntField.java +++ b/solr/src/java/org/apache/solr/schema/SortableIntField.java @@ -37,6 +37,8 @@ import java.util.Map; import java.io.IOException; /** * @version $Id$ + * + * @deprecated use {@link IntField} or {@link TrieIntField} - will be removed in 5.x */ public class SortableIntField extends FieldType { protected void init(IndexSchema schema, Map args) { diff --git a/solr/src/java/org/apache/solr/schema/SortableLongField.java b/solr/src/java/org/apache/solr/schema/SortableLongField.java index 3be76b9b1c1..d23fff2bb26 100644 --- a/solr/src/java/org/apache/solr/schema/SortableLongField.java +++ b/solr/src/java/org/apache/solr/schema/SortableLongField.java @@ -37,6 +37,8 @@ import java.util.Map; import java.io.IOException; /** * @version $Id$ + * + * @deprecated use {@link LongField} or {@link TrieLongtField} - will be removed in 5.x */ public class SortableLongField extends FieldType { protected void init(IndexSchema schema, Map args) { From 295c8f84c73327cf93fe705224c13e11aa852480 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Sat, 29 Jan 2011 19:48:56 +0000 Subject: [PATCH 050/185] LUCENE-1076: allow non-contiguous merges; improve handling of buffered deletes git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065095 13f79535-47bb-0310-9956-ffa450edef68 --- .../store/instantiated/TestIndicesEquals.java | 4 +- .../lucene/index/TestFieldNormModifier.java | 2 +- .../index/TestMultiPassIndexSplitter.java | 2 +- .../lucene/misc/TestLengthNormModifier.java | 2 +- .../lucene/search/DuplicateFilterTest.java | 9 +- .../lucene/search/FuzzyLikeThisQueryTest.java | 2 +- .../apache/lucene/index/BufferedDeletes.java | 550 +++++------------- .../lucene/index/BufferedDeletesStream.java | 440 ++++++++++++++ .../apache/lucene/index/DocumentsWriter.java | 30 +- .../org/apache/lucene/index/IndexWriter.java | 128 ++-- .../apache/lucene/index/LogMergePolicy.java | 84 ++- .../apache/lucene/index/SegmentDeletes.java | 191 ------ .../org/apache/lucene/index/SegmentInfo.java | 13 +- .../lucene/TestMergeSchedulerExternal.java | 4 +- .../test/org/apache/lucene/TestSearch.java | 14 +- .../lucene/TestSearchForDuplicates.java | 19 +- .../lucene/index/MockRandomMergePolicy.java | 93 +++ .../apache/lucene/index/TestAddIndexes.java | 1 + .../apache/lucene/index/TestAtomicUpdate.java | 1 + .../index/TestConcurrentMergeScheduler.java | 7 + .../lucene/index/TestDeletionPolicy.java | 128 ++-- .../lucene/index/TestDocsAndPositions.java | 8 +- .../apache/lucene/index/TestFieldsReader.java | 4 +- .../apache/lucene/index/TestIndexReader.java | 10 +- .../index/TestIndexReaderCloneNorms.java | 4 +- .../lucene/index/TestIndexReaderReopen.java | 2 +- .../apache/lucene/index/TestIndexWriter.java | 48 +- .../lucene/index/TestIndexWriterDelete.java | 2 +- .../index/TestIndexWriterExceptions.java | 7 +- .../index/TestIndexWriterMergePolicy.java | 2 +- .../lucene/index/TestIndexWriterMerging.java | 4 +- .../index/TestIndexWriterOnDiskFull.java | 10 +- .../lucene/index/TestIndexWriterReader.java | 4 +- .../org/apache/lucene/index/TestLazyBug.java | 2 +- .../lucene/index/TestMaxTermFrequency.java | 2 +- .../lucene/index/TestMultiLevelSkipList.java | 2 +- .../apache/lucene/index/TestNRTThreads.java | 34 +- .../org/apache/lucene/index/TestNorms.java | 4 +- .../apache/lucene/index/TestOmitNorms.java | 5 +- .../org/apache/lucene/index/TestOmitTf.java | 7 +- .../org/apache/lucene/index/TestPayloads.java | 3 +- .../index/TestPerFieldCodecSupport.java | 6 +- .../lucene/index/TestPerSegmentDeletes.java | 5 +- .../apache/lucene/index/TestSegmentInfo.java | 2 +- .../lucene/index/TestSegmentTermDocs.java | 2 +- .../lucene/index/TestStressIndexing2.java | 6 +- .../lucene/search/BaseTestRangeFilter.java | 6 +- .../apache/lucene/search/TestBoolean2.java | 2 +- .../search/TestDisjunctionMaxQuery.java | 2 +- .../apache/lucene/search/TestDocBoost.java | 7 +- .../lucene/search/TestExplanations.java | 2 +- .../apache/lucene/search/TestFieldCache.java | 2 +- .../lucene/search/TestFilteredQuery.java | 10 +- .../lucene/search/TestFilteredSearch.java | 5 +- .../apache/lucene/search/TestFuzzyQuery2.java | 2 +- .../lucene/search/TestMatchAllDocsQuery.java | 2 +- .../search/TestMultiThreadTermVectors.java | 2 +- .../search/TestNumericRangeQuery32.java | 3 +- .../search/TestNumericRangeQuery64.java | 3 +- .../apache/lucene/search/TestPhraseQuery.java | 2 +- .../org/apache/lucene/search/TestSort.java | 2 +- .../lucene/search/TestSpanQueryFilter.java | 5 +- .../lucene/search/TestSubScorerFreqs.java | 14 +- .../apache/lucene/search/TestTermScorer.java | 9 +- .../apache/lucene/search/TestTermVectors.java | 4 +- .../search/cache/TestEntryCreators.java | 5 +- .../search/function/FunctionTestSetup.java | 2 +- .../search/payloads/TestPayloadTermQuery.java | 2 +- .../lucene/search/spans/TestBasics.java | 2 +- .../spans/TestFieldMaskingSpanQuery.java | 3 +- .../search/spans/TestNearSpansOrdered.java | 2 +- .../apache/lucene/search/spans/TestSpans.java | 2 +- .../search/spans/TestSpansAdvanced.java | 5 +- .../search/spans/TestSpansAdvanced2.java | 2 +- .../apache/lucene/store/TestMultiMMap.java | 3 +- .../apache/lucene/util/LuceneTestCase.java | 19 +- 76 files changed, 1170 insertions(+), 870 deletions(-) create mode 100644 lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java delete mode 100644 lucene/src/java/org/apache/lucene/index/SegmentDeletes.java create mode 100644 lucene/src/test/org/apache/lucene/index/MockRandomMergePolicy.java diff --git a/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java b/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java index ae091b5ec6e..7a5398c4ed0 100644 --- a/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java +++ b/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java @@ -65,7 +65,7 @@ public class TestIndicesEquals extends LuceneTestCase { // create dir data IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); + TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy())); for (int i = 0; i < 20; i++) { Document document = new Document(); @@ -91,7 +91,7 @@ public class TestIndicesEquals extends LuceneTestCase { // create dir data IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); + TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy())); indexWriter.setInfoStream(VERBOSE ? System.out : null); if (VERBOSE) { System.out.println("TEST: make test index"); diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java index 8c83d449341..48bb42dfcf5 100644 --- a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java +++ b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java @@ -54,7 +54,7 @@ public class TestFieldNormModifier extends LuceneTestCase { super.setUp(); store = newDirectory(); IndexWriter writer = new IndexWriter(store, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); + TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy())); for (int i = 0; i < NUM_DOCS; i++) { Document d = new Document(); diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java index f861063942d..158b24ff58b 100644 --- a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java +++ b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java @@ -32,7 +32,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy())); Document doc; for (int i = 0; i < NUM_DOCS; i++) { doc = new Document(); diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java b/lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java index a856dd9fa58..7dfa6a311d5 100644 --- a/lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java +++ b/lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java @@ -59,7 +59,7 @@ public class TestLengthNormModifier extends LuceneTestCase { super.setUp(); store = newDirectory(); IndexWriter writer = new IndexWriter(store, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); + TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy())); for (int i = 0; i < NUM_DOCS; i++) { Document d = new Document(); diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java index a040d303fb8..2a3df020714 100644 --- a/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java +++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java @@ -20,16 +20,17 @@ package org.apache.lucene.search; import java.io.IOException; import java.util.HashSet; +import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.index.DocsEnum; -import org.apache.lucene.index.MultiFields; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.LuceneTestCase; public class DuplicateFilterTest extends LuceneTestCase { private static final String KEY_FIELD = "url"; @@ -42,7 +43,7 @@ public class DuplicateFilterTest extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy())); //Add series of docs with filterable fields : url, text and dates flags addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101"); diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java index 5f2bec5b04c..587a5710b9a 100644 --- a/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java +++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java @@ -40,7 +40,7 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy())); //Add series of docs with misspelt names addDoc(writer, "jonathon smythe","1"); diff --git a/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java b/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java index 0be1dd2ba30..ed955b90d2d 100644 --- a/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java +++ b/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java @@ -17,440 +17,166 @@ package org.apache.lucene.index; * limitations under the License. */ -import java.io.IOException; -import java.io.PrintStream; +import java.util.ArrayList; import java.util.HashMap; -import java.util.Date; -import java.util.Map.Entry; +import java.util.List; import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.SortedMap; +import java.util.TreeMap; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicInteger; -import org.apache.lucene.index.IndexReader.AtomicReaderContext; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.Weight; +import org.apache.lucene.util.RamUsageEstimator; -/** Holds a {@link SegmentDeletes} for each segment in the - * index. */ +/** Holds buffered deletes, by docID, term or query for a + * single segment. This is used to hold buffered pending + * deletes against the to-be-flushed segment as well as + * per-segment deletes for each segment in the index. */ + +// NOTE: we are sync'd by BufferedDeletes, ie, all access to +// instances of this class is via sync'd methods on +// BufferedDeletes class BufferedDeletes { - // Deletes for all flushed/merged segments: - private final Map deletesMap = new HashMap(); + /* Rough logic: HashMap has an array[Entry] w/ varying + load factor (say 2 * POINTER). Entry is object w/ Term + key, Integer val, int hash, Entry next + (OBJ_HEADER + 3*POINTER + INT). Term is object w/ + String field and String text (OBJ_HEADER + 2*POINTER). + We don't count Term's field since it's interned. + Term's text is String (OBJ_HEADER + 4*INT + POINTER + + OBJ_HEADER + string.length*CHAR). Integer is + OBJ_HEADER + INT. */ + final static int BYTES_PER_DEL_TERM = 8*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 5*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 6*RamUsageEstimator.NUM_BYTES_INT; - // used only by assert - private Term lastDeleteTerm; - - private PrintStream infoStream; - private final AtomicLong bytesUsed = new AtomicLong(); - private final AtomicInteger numTerms = new AtomicInteger(); - private final int messageID; + /* Rough logic: del docIDs are List. Say list + allocates ~2X size (2*POINTER). Integer is OBJ_HEADER + + int */ + final static int BYTES_PER_DEL_DOCID = 2*RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + RamUsageEstimator.NUM_BYTES_INT; - public BufferedDeletes(int messageID) { - this.messageID = messageID; - } + /* Rough logic: HashMap has an array[Entry] w/ varying + load factor (say 2 * POINTER). Entry is object w/ + Query key, Integer val, int hash, Entry next + (OBJ_HEADER + 3*POINTER + INT). Query we often + undercount (say 24 bytes). Integer is OBJ_HEADER + INT. */ + final static int BYTES_PER_DEL_QUERY = 5*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 2*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2*RamUsageEstimator.NUM_BYTES_INT + 24; - private synchronized void message(String message) { - if (infoStream != null) { - infoStream.println("BD " + messageID + " [" + new Date() + "; " + Thread.currentThread().getName() + "]: BD " + message); - } - } - - public synchronized void setInfoStream(PrintStream infoStream) { - this.infoStream = infoStream; - } + // TODO: many of the deletes stored here will map to + // Integer.MAX_VALUE; we could be more efficient for this + // case ie use a SortedSet not a SortedMap. But: Java's + // SortedSet impls are simply backed by a Map so we won't + // save anything unless we do something custom... + final AtomicInteger numTermDeletes = new AtomicInteger(); + final SortedMap terms = new TreeMap(); + final Map queries = new HashMap(); + final List docIDs = new ArrayList(); - public synchronized void pushDeletes(SegmentDeletes newDeletes, SegmentInfo info) { - pushDeletes(newDeletes, info, false); - } + public static final Integer MAX_INT = Integer.valueOf(Integer.MAX_VALUE); - // Moves all pending deletes onto the provided segment, - // then clears the pending deletes - public synchronized void pushDeletes(SegmentDeletes newDeletes, SegmentInfo info, boolean noLimit) { - assert newDeletes.any(); - numTerms.addAndGet(newDeletes.numTermDeletes.get()); + final AtomicLong bytesUsed = new AtomicLong(); - if (!noLimit) { - assert !deletesMap.containsKey(info); - assert info != null; - deletesMap.put(info, newDeletes); - bytesUsed.addAndGet(newDeletes.bytesUsed.get()); + private final static boolean VERBOSE_DELETES = false; + + long gen; + + @Override + public String toString() { + if (VERBOSE_DELETES) { + return "gen=" + gen + " numTerms=" + numTermDeletes + ", terms=" + terms + + ", queries=" + queries + ", docIDs=" + docIDs + ", bytesUsed=" + + bytesUsed; } else { - final SegmentDeletes deletes = getDeletes(info); - bytesUsed.addAndGet(-deletes.bytesUsed.get()); - deletes.update(newDeletes, noLimit); - bytesUsed.addAndGet(deletes.bytesUsed.get()); - } - if (infoStream != null) { - message("push deletes seg=" + info + " dels=" + getDeletes(info)); + String s = "gen=" + gen; + if (numTermDeletes.get() != 0) { + s += " " + numTermDeletes.get() + " deleted terms (unique count=" + terms.size() + ")"; + } + if (queries.size() != 0) { + s += " " + queries.size() + " deleted queries"; + } + if (docIDs.size() != 0) { + s += " " + docIDs.size() + " deleted docIDs"; + } + if (bytesUsed.get() != 0) { + s += " bytesUsed=" + bytesUsed.get(); + } + + return s; } - assert checkDeleteStats(); } - public synchronized void clear() { - deletesMap.clear(); - numTerms.set(0); + void update(BufferedDeletes in) { + numTermDeletes.addAndGet(in.numTermDeletes.get()); + for (Map.Entry ent : in.terms.entrySet()) { + final Term term = ent.getKey(); + if (!terms.containsKey(term)) { + // only incr bytesUsed if this term wasn't already buffered: + bytesUsed.addAndGet(BYTES_PER_DEL_TERM); + } + terms.put(term, MAX_INT); + } + + for (Map.Entry ent : in.queries.entrySet()) { + final Query query = ent.getKey(); + if (!queries.containsKey(query)) { + // only incr bytesUsed if this query wasn't already buffered: + bytesUsed.addAndGet(BYTES_PER_DEL_QUERY); + } + queries.put(query, MAX_INT); + } + + // docIDs never move across segments and the docIDs + // should already be cleared + } + + public void addQuery(Query query, int docIDUpto) { + Integer current = queries.put(query, docIDUpto); + // increment bytes used only if the query wasn't added so far. + if (current == null) { + bytesUsed.addAndGet(BYTES_PER_DEL_QUERY); + } + } + + public void addDocID(int docID) { + docIDs.add(Integer.valueOf(docID)); + bytesUsed.addAndGet(BYTES_PER_DEL_DOCID); + } + + public void addTerm(Term term, int docIDUpto) { + Integer current = terms.get(term); + if (current != null && docIDUpto < current) { + // Only record the new number if it's greater than the + // current one. This is important because if multiple + // threads are replacing the same doc at nearly the + // same time, it's possible that one thread that got a + // higher docID is scheduled before the other + // threads. If we blindly replace than we can + // incorrectly get both docs indexed. + return; + } + + terms.put(term, Integer.valueOf(docIDUpto)); + numTermDeletes.incrementAndGet(); + if (current == null) { + bytesUsed.addAndGet(BYTES_PER_DEL_TERM + term.bytes.length); + } + } + + void clear() { + terms.clear(); + queries.clear(); + docIDs.clear(); + numTermDeletes.set(0); bytesUsed.set(0); } - - synchronized boolean any() { - return bytesUsed.get() != 0; - } - - public int numTerms() { - return numTerms.get(); - } - - public long bytesUsed() { - return bytesUsed.get(); - } - - // IW calls this on finishing a merge. While the merge - // was running, it's possible new deletes were pushed onto - // our last (and only our last) segment. In this case we - // must carry forward those deletes onto the merged - // segment. - synchronized void commitMerge(MergePolicy.OneMerge merge) { - assert checkDeleteStats(); - if (infoStream != null) { - message("commitMerge merge.info=" + merge.info + " merge.segments=" + merge.segments); - } - final SegmentInfo lastInfo = merge.segments.lastElement(); - final SegmentDeletes lastDeletes = deletesMap.get(lastInfo); - if (lastDeletes != null) { - deletesMap.remove(lastInfo); - assert !deletesMap.containsKey(merge.info); - deletesMap.put(merge.info, lastDeletes); - // don't need to update numTerms/bytesUsed since we - // are just moving the deletes from one info to - // another - if (infoStream != null) { - message("commitMerge done: new deletions=" + lastDeletes); - } - } else if (infoStream != null) { - message("commitMerge done: no new deletions"); - } - assert !anyDeletes(merge.segments.range(0, merge.segments.size()-1)); - assert checkDeleteStats(); - } - - synchronized void clear(SegmentDeletes deletes) { - deletes.clear(); + + void clearDocIDs() { + bytesUsed.addAndGet(-docIDs.size()*BYTES_PER_DEL_DOCID); + docIDs.clear(); } - public synchronized boolean applyDeletes(IndexWriter.ReaderPool readerPool, SegmentInfos segmentInfos, SegmentInfos applyInfos) throws IOException { - if (!any()) { - return false; - } - final long t0 = System.currentTimeMillis(); - - if (infoStream != null) { - message("applyDeletes: applyInfos=" + applyInfos + "; index=" + segmentInfos); - } - - assert checkDeleteStats(); - - assert applyInfos.size() > 0; - - boolean any = false; - - final SegmentInfo lastApplyInfo = applyInfos.lastElement(); - final int lastIdx = segmentInfos.indexOf(lastApplyInfo); - - final SegmentInfo firstInfo = applyInfos.firstElement(); - final int firstIdx = segmentInfos.indexOf(firstInfo); - - // applyInfos must be a slice of segmentInfos - assert lastIdx - firstIdx + 1 == applyInfos.size(); - - // iterate over all segment infos backwards - // coalesceing deletes along the way - // when we're at or below the last of the - // segments to apply to, start applying the deletes - // we traverse up to the first apply infos - SegmentDeletes coalescedDeletes = null; - boolean hasDeletes = false; - for (int segIdx=segmentInfos.size()-1; segIdx >= firstIdx; segIdx--) { - final SegmentInfo info = segmentInfos.info(segIdx); - final SegmentDeletes deletes = deletesMap.get(info); - assert deletes == null || deletes.any(); - - if (deletes == null && coalescedDeletes == null) { - continue; - } - - if (infoStream != null) { - message("applyDeletes: seg=" + info + " segment's deletes=[" + (deletes == null ? "null" : deletes) + "]; coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "]"); - } - - hasDeletes |= deletes != null; - - if (segIdx <= lastIdx && hasDeletes) { - - final long delCountInc = applyDeletes(readerPool, info, coalescedDeletes, deletes); - - if (delCountInc != 0) { - any = true; - } - if (infoStream != null) { - message("deletes touched " + delCountInc + " docIDs"); - } - - if (deletes != null) { - // we've applied doc ids, and they're only applied - // on the current segment - bytesUsed.addAndGet(-deletes.docIDs.size() * SegmentDeletes.BYTES_PER_DEL_DOCID); - deletes.clearDocIDs(); - } - } - - // now coalesce at the max limit - if (deletes != null) { - if (coalescedDeletes == null) { - coalescedDeletes = new SegmentDeletes(); - } - // TODO: we could make this single pass (coalesce as - // we apply the deletes - coalescedDeletes.update(deletes, true); - } - } - - // move all deletes to segment just before our merge. - if (firstIdx > 0) { - - SegmentDeletes mergedDeletes = null; - // TODO: we could also make this single pass - for (SegmentInfo info : applyInfos) { - final SegmentDeletes deletes = deletesMap.get(info); - if (deletes != null) { - assert deletes.any(); - if (mergedDeletes == null) { - mergedDeletes = getDeletes(segmentInfos.info(firstIdx-1)); - numTerms.addAndGet(-mergedDeletes.numTermDeletes.get()); - assert numTerms.get() >= 0; - bytesUsed.addAndGet(-mergedDeletes.bytesUsed.get()); - assert bytesUsed.get() >= 0; - } - - mergedDeletes.update(deletes, true); - } - } - - if (mergedDeletes != null) { - numTerms.addAndGet(mergedDeletes.numTermDeletes.get()); - bytesUsed.addAndGet(mergedDeletes.bytesUsed.get()); - } - - if (infoStream != null) { - if (mergedDeletes != null) { - message("applyDeletes: merge all deletes into seg=" + segmentInfos.info(firstIdx-1) + ": " + mergedDeletes); - } else { - message("applyDeletes: no deletes to merge"); - } - } - } else { - // We drop the deletes in this case, because we've - // applied them to segment infos starting w/ the first - // segment. There are no prior segments so there's no - // reason to keep them around. When the applyInfos == - // segmentInfos this means all deletes have been - // removed: - } - remove(applyInfos); - - assert checkDeleteStats(); - assert applyInfos != segmentInfos || !any(); - - if (infoStream != null) { - message("applyDeletes took " + (System.currentTimeMillis()-t0) + " msec"); - } - return any; - } - - private synchronized long applyDeletes(IndexWriter.ReaderPool readerPool, - SegmentInfo info, - SegmentDeletes coalescedDeletes, - SegmentDeletes segmentDeletes) throws IOException { - assert readerPool.infoIsLive(info); - - assert coalescedDeletes == null || coalescedDeletes.docIDs.size() == 0; - - long delCount = 0; - - // Lock order: IW -> BD -> RP - SegmentReader reader = readerPool.get(info, false); - try { - if (coalescedDeletes != null) { - delCount += applyDeletes(coalescedDeletes, reader); - } - if (segmentDeletes != null) { - delCount += applyDeletes(segmentDeletes, reader); - } - } finally { - readerPool.release(reader); - } - return delCount; - } - - private synchronized long applyDeletes(SegmentDeletes deletes, SegmentReader reader) throws IOException { - - long delCount = 0; - - assert checkDeleteTerm(null); - - if (deletes.terms.size() > 0) { - Fields fields = reader.fields(); - if (fields == null) { - // This reader has no postings - return 0; - } - - TermsEnum termsEnum = null; - - String currentField = null; - DocsEnum docs = null; - - for (Entry entry: deletes.terms.entrySet()) { - Term term = entry.getKey(); - // Since we visit terms sorted, we gain performance - // by re-using the same TermsEnum and seeking only - // forwards - if (term.field() != currentField) { - assert currentField == null || currentField.compareTo(term.field()) < 0; - currentField = term.field(); - Terms terms = fields.terms(currentField); - if (terms != null) { - termsEnum = terms.iterator(); - } else { - termsEnum = null; - } - } - - if (termsEnum == null) { - continue; - } - assert checkDeleteTerm(term); - - if (termsEnum.seek(term.bytes(), false) == TermsEnum.SeekStatus.FOUND) { - DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs); - - if (docsEnum != null) { - docs = docsEnum; - final int limit = entry.getValue(); - while (true) { - final int docID = docs.nextDoc(); - if (docID == DocsEnum.NO_MORE_DOCS || docID >= limit) { - break; - } - reader.deleteDocument(docID); - // TODO: we could/should change - // reader.deleteDocument to return boolean - // true if it did in fact delete, because here - // we could be deleting an already-deleted doc - // which makes this an upper bound: - delCount++; - } - } - } - } - } - - // Delete by docID - for (Integer docIdInt : deletes.docIDs) { - int docID = docIdInt.intValue(); - reader.deleteDocument(docID); - delCount++; - } - - // Delete by query - if (deletes.queries.size() > 0) { - IndexSearcher searcher = new IndexSearcher(reader); - assert searcher.getTopReaderContext().isAtomic; - final AtomicReaderContext readerContext = (AtomicReaderContext) searcher.getTopReaderContext(); - try { - for (Entry entry : deletes.queries.entrySet()) { - Query query = entry.getKey(); - int limit = entry.getValue().intValue(); - Weight weight = query.weight(searcher); - Scorer scorer = weight.scorer(readerContext, Weight.ScorerContext.def()); - if (scorer != null) { - while(true) { - int doc = scorer.nextDoc(); - if (doc >= limit) - break; - - reader.deleteDocument(doc); - // TODO: we could/should change - // reader.deleteDocument to return boolean - // true if it did in fact delete, because here - // we could be deleting an already-deleted doc - // which makes this an upper bound: - delCount++; - } - } - } - } finally { - searcher.close(); - } - } - - return delCount; - } - - public synchronized SegmentDeletes getDeletes(SegmentInfo info) { - SegmentDeletes deletes = deletesMap.get(info); - if (deletes == null) { - deletes = new SegmentDeletes(); - deletesMap.put(info, deletes); - } - return deletes; - } - - public synchronized void remove(SegmentInfos infos) { - assert infos.size() > 0; - for (SegmentInfo info : infos) { - SegmentDeletes deletes = deletesMap.get(info); - if (deletes != null) { - bytesUsed.addAndGet(-deletes.bytesUsed.get()); - assert bytesUsed.get() >= 0: "bytesUsed=" + bytesUsed; - numTerms.addAndGet(-deletes.numTermDeletes.get()); - assert numTerms.get() >= 0: "numTerms=" + numTerms; - deletesMap.remove(info); - } - } - } - - // used only by assert - private boolean anyDeletes(SegmentInfos infos) { - for(SegmentInfo info : infos) { - if (deletesMap.containsKey(info)) { - return true; - } - } - return false; - } - - // used only by assert - private boolean checkDeleteTerm(Term term) { - if (term != null) { - assert lastDeleteTerm == null || term.compareTo(lastDeleteTerm) > 0: "lastTerm=" + lastDeleteTerm + " vs term=" + term; - } - lastDeleteTerm = term; - return true; - } - - // only for assert - private boolean checkDeleteStats() { - int numTerms2 = 0; - long bytesUsed2 = 0; - for(SegmentDeletes deletes : deletesMap.values()) { - numTerms2 += deletes.numTermDeletes.get(); - bytesUsed2 += deletes.bytesUsed.get(); - } - assert numTerms2 == numTerms.get(): "numTerms2=" + numTerms2 + " vs " + numTerms.get(); - assert bytesUsed2 == bytesUsed.get(): "bytesUsed2=" + bytesUsed2 + " vs " + bytesUsed; - return true; + boolean any() { + return terms.size() > 0 || docIDs.size() > 0 || queries.size() > 0; } } diff --git a/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java b/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java new file mode 100644 index 00000000000..b9a0184a0c1 --- /dev/null +++ b/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java @@ -0,0 +1,440 @@ +package org.apache.lucene.index; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.io.PrintStream; +import java.util.List; +import java.util.ArrayList; +import java.util.Date; +import java.util.Map.Entry; +import java.util.Comparator; +import java.util.Collections; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.lucene.index.IndexReader.AtomicReaderContext; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; + +/* Tracks the stream of {@link BuffereDeletes}. + * When DocumensWriter flushes, its buffered + * deletes are appended to this stream. We later + * apply these deletes (resolve them to the actual + * docIDs, per segment) when a merge is started + * (only to the to-be-merged segments). We + * also apply to all segments when NRT reader is pulled, + * commit/close is called, or when too many deletes are + * buffered and must be flushed (by RAM usage or by count). + * + * Each packet is assigned a generation, and each flushed or + * merged segment is also assigned a generation, so we can + * track which BufferedDeletes packets to apply to any given + * segment. */ + +class BufferedDeletesStream { + + // TODO: maybe linked list? + private final List deletes = new ArrayList(); + + // Starts at 1 so that SegmentInfos that have never had + // deletes applied (whose bufferedDelGen defaults to 0) + // will be correct: + private long nextGen = 1; + + // used only by assert + private Term lastDeleteTerm; + + private PrintStream infoStream; + private final AtomicLong bytesUsed = new AtomicLong(); + private final AtomicInteger numTerms = new AtomicInteger(); + private final int messageID; + + public BufferedDeletesStream(int messageID) { + this.messageID = messageID; + } + + private synchronized void message(String message) { + if (infoStream != null) { + infoStream.println("BD " + messageID + " [" + new Date() + "; " + Thread.currentThread().getName() + "]: " + message); + } + } + + public synchronized void setInfoStream(PrintStream infoStream) { + this.infoStream = infoStream; + } + + // Appends a new packet of buffered deletes to the stream, + // setting its generation: + public synchronized void push(BufferedDeletes packet) { + assert packet.any(); + assert checkDeleteStats(); + packet.gen = nextGen++; + deletes.add(packet); + numTerms.addAndGet(packet.numTermDeletes.get()); + bytesUsed.addAndGet(packet.bytesUsed.get()); + if (infoStream != null) { + message("push deletes " + packet + " delGen=" + packet.gen + " packetCount=" + deletes.size()); + } + assert checkDeleteStats(); + } + + public synchronized void clear() { + deletes.clear(); + nextGen = 1; + numTerms.set(0); + bytesUsed.set(0); + } + + public boolean any() { + return bytesUsed.get() != 0; + } + + public int numTerms() { + return numTerms.get(); + } + + public long bytesUsed() { + return bytesUsed.get(); + } + + public static class ApplyDeletesResult { + // True if any actual deletes took place: + public final boolean anyDeletes; + + // Current gen, for the merged segment: + public final long gen; + + ApplyDeletesResult(boolean anyDeletes, long gen) { + this.anyDeletes = anyDeletes; + this.gen = gen; + } + } + + // Sorts SegmentInfos from smallest to biggest bufferedDelGen: + private static final Comparator sortByDelGen = new Comparator() { + @Override + public int compare(SegmentInfo si1, SegmentInfo si2) { + final long cmp = si1.getBufferedDeletesGen() - si2.getBufferedDeletesGen(); + if (cmp > 0) { + return 1; + } else if (cmp < 0) { + return -1; + } else { + return 0; + } + } + + @Override + public boolean equals(Object other) { + return sortByDelGen == other; + } + }; + + /** Resolves the buffered deleted Term/Query/docIDs, into + * actual deleted docIDs in the deletedDocs BitVector for + * each SegmentReader. */ + public synchronized ApplyDeletesResult applyDeletes(IndexWriter.ReaderPool readerPool, SegmentInfos infos) throws IOException { + final long t0 = System.currentTimeMillis(); + + if (infos.size() == 0) { + return new ApplyDeletesResult(false, nextGen++); + } + + assert checkDeleteStats(); + + if (!any()) { + message("applyDeletes: no deletes; skipping"); + return new ApplyDeletesResult(false, nextGen++); + } + + if (infoStream != null) { + message("applyDeletes: infos=" + infos + " packetCount=" + deletes.size()); + } + + SegmentInfos infos2 = new SegmentInfos(); + infos2.addAll(infos); + Collections.sort(infos2, sortByDelGen); + + BufferedDeletes coalescedDeletes = null; + boolean anyNewDeletes = false; + + int infosIDX = infos2.size()-1; + int delIDX = deletes.size()-1; + + while (infosIDX >= 0) { + //System.out.println("BD: cycle delIDX=" + delIDX + " infoIDX=" + infosIDX); + + final BufferedDeletes packet = delIDX >= 0 ? deletes.get(delIDX) : null; + final SegmentInfo info = infos2.get(infosIDX); + final long segGen = info.getBufferedDeletesGen(); + + if (packet != null && segGen < packet.gen) { + //System.out.println(" coalesce"); + if (coalescedDeletes == null) { + coalescedDeletes = new BufferedDeletes(); + } + coalescedDeletes.update(packet); + delIDX--; + } else if (packet != null && segGen == packet.gen) { + //System.out.println(" eq"); + + // Lock order: IW -> BD -> RP + assert readerPool.infoIsLive(info); + SegmentReader reader = readerPool.get(info, false); + int delCount = 0; + try { + if (coalescedDeletes != null) { + delCount += applyDeletes(coalescedDeletes, reader); + } + delCount += applyDeletes(packet, reader); + } finally { + readerPool.release(reader); + } + anyNewDeletes |= delCount > 0; + + // We've applied doc ids, and they're only applied + // on the current segment + bytesUsed.addAndGet(-packet.docIDs.size() * BufferedDeletes.BYTES_PER_DEL_DOCID); + packet.clearDocIDs(); + + if (infoStream != null) { + message("seg=" + info + " segGen=" + segGen + " segDeletes=[" + packet + "]; coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount); + } + + if (coalescedDeletes == null) { + coalescedDeletes = new BufferedDeletes(); + } + coalescedDeletes.update(packet); + delIDX--; + infosIDX--; + info.setBufferedDeletesGen(nextGen); + + } else { + //System.out.println(" gt"); + + if (coalescedDeletes != null) { + // Lock order: IW -> BD -> RP + assert readerPool.infoIsLive(info); + SegmentReader reader = readerPool.get(info, false); + int delCount = 0; + try { + delCount += applyDeletes(coalescedDeletes, reader); + } finally { + readerPool.release(reader); + } + anyNewDeletes |= delCount > 0; + + if (infoStream != null) { + message("seg=" + info + " segGen=" + segGen + " coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount); + } + } + info.setBufferedDeletesGen(nextGen); + + infosIDX--; + } + } + + assert checkDeleteStats(); + if (infoStream != null) { + message("applyDeletes took " + (System.currentTimeMillis()-t0) + " msec"); + } + // assert infos != segmentInfos || !any() : "infos=" + infos + " segmentInfos=" + segmentInfos + " any=" + any; + + return new ApplyDeletesResult(anyNewDeletes, nextGen++); + } + + public synchronized long getNextGen() { + return nextGen++; + } + + // Lock order IW -> BD + /* Removes any BufferedDeletes that we no longer need to + * store because all segments in the index have had the + * deletes applied. */ + public synchronized void prune(SegmentInfos segmentInfos) { + assert checkDeleteStats(); + long minGen = Long.MAX_VALUE; + for(SegmentInfo info : segmentInfos) { + minGen = Math.min(info.getBufferedDeletesGen(), minGen); + } + + if (infoStream != null) { + message("prune sis=" + segmentInfos + " minGen=" + minGen + " packetCount=" + deletes.size()); + } + + final int limit = deletes.size(); + for(int delIDX=0;delIDX= minGen) { + prune(delIDX); + assert checkDeleteStats(); + return; + } + } + + // All deletes pruned + prune(limit); + assert !any(); + assert checkDeleteStats(); + } + + private synchronized void prune(int count) { + if (count > 0) { + if (infoStream != null) { + message("pruneDeletes: prune " + count + " packets; " + (deletes.size() - count) + " packets remain"); + } + for(int delIDX=0;delIDX= 0; + bytesUsed.addAndGet(-packet.bytesUsed.get()); + assert bytesUsed.get() >= 0; + } + deletes.subList(0, count).clear(); + } + } + + private synchronized long applyDeletes(BufferedDeletes deletes, SegmentReader reader) throws IOException { + + long delCount = 0; + + assert checkDeleteTerm(null); + + if (deletes.terms.size() > 0) { + Fields fields = reader.fields(); + if (fields == null) { + // This reader has no postings + return 0; + } + + TermsEnum termsEnum = null; + + String currentField = null; + DocsEnum docs = null; + + for (Entry entry: deletes.terms.entrySet()) { + Term term = entry.getKey(); + // Since we visit terms sorted, we gain performance + // by re-using the same TermsEnum and seeking only + // forwards + if (term.field() != currentField) { + assert currentField == null || currentField.compareTo(term.field()) < 0; + currentField = term.field(); + Terms terms = fields.terms(currentField); + if (terms != null) { + termsEnum = terms.iterator(); + } else { + termsEnum = null; + } + } + + if (termsEnum == null) { + continue; + } + assert checkDeleteTerm(term); + + if (termsEnum.seek(term.bytes(), false) == TermsEnum.SeekStatus.FOUND) { + DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs); + + if (docsEnum != null) { + docs = docsEnum; + final int limit = entry.getValue(); + while (true) { + final int docID = docs.nextDoc(); + if (docID == DocsEnum.NO_MORE_DOCS || docID >= limit) { + break; + } + reader.deleteDocument(docID); + // TODO: we could/should change + // reader.deleteDocument to return boolean + // true if it did in fact delete, because here + // we could be deleting an already-deleted doc + // which makes this an upper bound: + delCount++; + } + } + } + } + } + + // Delete by docID + for (Integer docIdInt : deletes.docIDs) { + int docID = docIdInt.intValue(); + reader.deleteDocument(docID); + delCount++; + } + + // Delete by query + if (deletes.queries.size() > 0) { + IndexSearcher searcher = new IndexSearcher(reader); + assert searcher.getTopReaderContext().isAtomic; + final AtomicReaderContext readerContext = (AtomicReaderContext) searcher.getTopReaderContext(); + try { + for (Entry entry : deletes.queries.entrySet()) { + Query query = entry.getKey(); + int limit = entry.getValue().intValue(); + Weight weight = query.weight(searcher); + Scorer scorer = weight.scorer(readerContext, Weight.ScorerContext.def()); + if (scorer != null) { + while(true) { + int doc = scorer.nextDoc(); + if (doc >= limit) + break; + + reader.deleteDocument(doc); + // TODO: we could/should change + // reader.deleteDocument to return boolean + // true if it did in fact delete, because here + // we could be deleting an already-deleted doc + // which makes this an upper bound: + delCount++; + } + } + } + } finally { + searcher.close(); + } + } + + return delCount; + } + + // used only by assert + private boolean checkDeleteTerm(Term term) { + if (term != null) { + assert lastDeleteTerm == null || term.compareTo(lastDeleteTerm) > 0: "lastTerm=" + lastDeleteTerm + " vs term=" + term; + } + lastDeleteTerm = term; + return true; + } + + // only for assert + private boolean checkDeleteStats() { + int numTerms2 = 0; + long bytesUsed2 = 0; + for(BufferedDeletes packet : deletes) { + numTerms2 += packet.numTermDeletes.get(); + bytesUsed2 += packet.bytesUsed.get(); + } + assert numTerms2 == numTerms.get(): "numTerms2=" + numTerms2 + " vs " + numTerms.get(); + assert bytesUsed2 == bytesUsed.get(): "bytesUsed2=" + bytesUsed2 + " vs " + bytesUsed; + return true; + } +} diff --git a/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java b/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java index 25cd8cac737..d3c6caee9fe 100644 --- a/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java +++ b/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java @@ -134,7 +134,7 @@ final class DocumentsWriter { private final int maxThreadStates; // Deletes for our still-in-RAM (to be flushed next) segment - private SegmentDeletes pendingDeletes = new SegmentDeletes(); + private BufferedDeletes pendingDeletes = new BufferedDeletes(); static class DocState { DocumentsWriter docWriter; @@ -278,16 +278,16 @@ final class DocumentsWriter { private boolean closed; private final FieldInfos fieldInfos; - private final BufferedDeletes bufferedDeletes; + private final BufferedDeletesStream bufferedDeletesStream; private final IndexWriter.FlushControl flushControl; - DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, BufferedDeletes bufferedDeletes) throws IOException { + DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, BufferedDeletesStream bufferedDeletesStream) throws IOException { this.directory = directory; this.writer = writer; this.similarityProvider = writer.getConfig().getSimilarityProvider(); this.maxThreadStates = maxThreadStates; this.fieldInfos = fieldInfos; - this.bufferedDeletes = bufferedDeletes; + this.bufferedDeletesStream = bufferedDeletesStream; flushControl = writer.flushControl; consumer = indexingChain.getChain(this); @@ -501,23 +501,24 @@ final class DocumentsWriter { } // for testing - public SegmentDeletes getPendingDeletes() { + public BufferedDeletes getPendingDeletes() { return pendingDeletes; } private void pushDeletes(SegmentInfo newSegment, SegmentInfos segmentInfos) { // Lock order: DW -> BD if (pendingDeletes.any()) { - if (newSegment != null) { + if (segmentInfos.size() > 0 || newSegment != null) { if (infoStream != null) { - message("flush: push buffered deletes to newSegment"); + message("flush: push buffered deletes"); } - bufferedDeletes.pushDeletes(pendingDeletes, newSegment); - } else if (segmentInfos.size() > 0) { + bufferedDeletesStream.push(pendingDeletes); if (infoStream != null) { - message("flush: push buffered deletes to previously flushed segment " + segmentInfos.lastElement()); + message("flush: delGen=" + pendingDeletes.gen); + } + if (newSegment != null) { + newSegment.setBufferedDeletesGen(pendingDeletes.gen); } - bufferedDeletes.pushDeletes(pendingDeletes, segmentInfos.lastElement(), true); } else { if (infoStream != null) { message("flush: drop buffered deletes: no segments"); @@ -526,7 +527,9 @@ final class DocumentsWriter { // there are no segments, the deletions cannot // affect anything. } - pendingDeletes = new SegmentDeletes(); + pendingDeletes = new BufferedDeletes(); + } else if (newSegment != null) { + newSegment.setBufferedDeletesGen(bufferedDeletesStream.getNextGen()); } } @@ -639,7 +642,6 @@ final class DocumentsWriter { // Lock order: IW -> DW -> BD pushDeletes(newSegment, segmentInfos); - if (infoStream != null) { message("flush time " + (System.currentTimeMillis()-startTime) + " msec"); } @@ -964,7 +966,7 @@ final class DocumentsWriter { final boolean doBalance; final long deletesRAMUsed; - deletesRAMUsed = bufferedDeletes.bytesUsed(); + deletesRAMUsed = bufferedDeletesStream.bytesUsed(); synchronized(this) { if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH || bufferIsFull) { diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java index 613d47058f5..20f7b35bbf8 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java @@ -251,7 +251,7 @@ public class IndexWriter implements Closeable { private final AtomicInteger flushDeletesCount = new AtomicInteger(); final ReaderPool readerPool = new ReaderPool(); - final BufferedDeletes bufferedDeletes; + final BufferedDeletesStream bufferedDeletesStream; // This is a "write once" variable (like the organic dye // on a DVD-R that may or may not be heated by a laser and @@ -707,8 +707,8 @@ public class IndexWriter implements Closeable { mergedSegmentWarmer = conf.getMergedSegmentWarmer(); codecs = conf.getCodecProvider(); - bufferedDeletes = new BufferedDeletes(messageID); - bufferedDeletes.setInfoStream(infoStream); + bufferedDeletesStream = new BufferedDeletesStream(messageID); + bufferedDeletesStream.setInfoStream(infoStream); poolReaders = conf.getReaderPooling(); OpenMode mode = conf.getOpenMode(); @@ -773,7 +773,7 @@ public class IndexWriter implements Closeable { setRollbackSegmentInfos(segmentInfos); - docWriter = new DocumentsWriter(directory, this, conf.getIndexingChain(), conf.getMaxThreadStates(), getCurrentFieldInfos(), bufferedDeletes); + docWriter = new DocumentsWriter(directory, this, conf.getIndexingChain(), conf.getMaxThreadStates(), getCurrentFieldInfos(), bufferedDeletesStream); docWriter.setInfoStream(infoStream); // Default deleter (for backwards compatibility) is @@ -921,7 +921,7 @@ public class IndexWriter implements Closeable { this.infoStream = infoStream; docWriter.setInfoStream(infoStream); deleter.setInfoStream(infoStream); - bufferedDeletes.setInfoStream(infoStream); + bufferedDeletesStream.setInfoStream(infoStream); if (infoStream != null) messageState(); } @@ -1167,7 +1167,7 @@ public class IndexWriter implements Closeable { public synchronized boolean hasDeletions() throws IOException { ensureOpen(); - if (bufferedDeletes.any()) { + if (bufferedDeletesStream.any()) { return true; } if (docWriter.anyDeletions()) { @@ -1882,7 +1882,7 @@ public class IndexWriter implements Closeable { mergePolicy.close(); mergeScheduler.close(); - bufferedDeletes.clear(); + bufferedDeletesStream.clear(); synchronized(this) { @@ -2525,10 +2525,10 @@ public class IndexWriter implements Closeable { // tiny segments: if (flushControl.getFlushDeletes() || (config.getRAMBufferSizeMB() != IndexWriterConfig.DISABLE_AUTO_FLUSH && - bufferedDeletes.bytesUsed() > (1024*1024*config.getRAMBufferSizeMB()/2))) { + bufferedDeletesStream.bytesUsed() > (1024*1024*config.getRAMBufferSizeMB()/2))) { applyAllDeletes = true; if (infoStream != null) { - message("force apply deletes bytesUsed=" + bufferedDeletes.bytesUsed() + " vs ramBuffer=" + (1024*1024*config.getRAMBufferSizeMB())); + message("force apply deletes bytesUsed=" + bufferedDeletesStream.bytesUsed() + " vs ramBuffer=" + (1024*1024*config.getRAMBufferSizeMB())); } } } @@ -2538,12 +2538,15 @@ public class IndexWriter implements Closeable { message("apply all deletes during flush"); } flushDeletesCount.incrementAndGet(); - if (bufferedDeletes.applyDeletes(readerPool, segmentInfos, segmentInfos)) { + final BufferedDeletesStream.ApplyDeletesResult result = bufferedDeletesStream.applyDeletes(readerPool, segmentInfos); + if (result.anyDeletes) { checkpoint(); } + bufferedDeletesStream.prune(segmentInfos); + assert !bufferedDeletesStream.any(); flushControl.clearDeletes(); } else if (infoStream != null) { - message("don't apply deletes now delTermCount=" + bufferedDeletes.numTerms() + " bytesUsed=" + bufferedDeletes.bytesUsed()); + message("don't apply deletes now delTermCount=" + bufferedDeletesStream.numTerms() + " bytesUsed=" + bufferedDeletesStream.bytesUsed()); } doAfterFlush(); @@ -2569,7 +2572,7 @@ public class IndexWriter implements Closeable { */ public final long ramSizeInBytes() { ensureOpen(); - return docWriter.bytesUsed() + bufferedDeletes.bytesUsed(); + return docWriter.bytesUsed() + bufferedDeletesStream.bytesUsed(); } /** Expert: Return the number of documents currently @@ -2579,28 +2582,12 @@ public class IndexWriter implements Closeable { return docWriter.getNumDocs(); } - private int ensureContiguousMerge(MergePolicy.OneMerge merge) { - - int first = segmentInfos.indexOf(merge.segments.info(0)); - if (first == -1) - throw new MergePolicy.MergeException("could not find segment " + merge.segments.info(0).name + " in current index " + segString(), directory); - - final int numSegments = segmentInfos.size(); - - final int numSegmentsToMerge = merge.segments.size(); - for(int i=0;i= numSegments || !segmentInfos.info(first+i).equals(info)) { - if (segmentInfos.indexOf(info) == -1) - throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.name + ") that is not in the current index " + segString(), directory); - else - throw new MergePolicy.MergeException("MergePolicy selected non-contiguous segments to merge (" + merge.segString(directory) + " vs " + segString() + "), which IndexWriter (currently) cannot handle", - directory); + private void ensureValidMerge(MergePolicy.OneMerge merge) { + for(SegmentInfo info : merge.segments) { + if (segmentInfos.indexOf(info) == -1) { + throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.name + ") that is not in the current index " + segString(), directory); } } - - return first; } /** Carefully merges deletes for the segments we just @@ -2625,9 +2612,11 @@ public class IndexWriter implements Closeable { // started merging: int docUpto = 0; int delCount = 0; + long minGen = Long.MAX_VALUE; for(int i=0; i < sourceSegments.size(); i++) { SegmentInfo info = sourceSegments.info(i); + minGen = Math.min(info.getBufferedDeletesGen(), minGen); int docCount = info.docCount; SegmentReader previousReader = merge.readersClone[i]; final Bits prevDelDocs = previousReader.getDeletedDocs(); @@ -2678,9 +2667,17 @@ public class IndexWriter implements Closeable { assert mergedReader.numDeletedDocs() == delCount; mergedReader.hasChanges = delCount > 0; + + // If new deletes were applied while we were merging + // (which happens if eg commit() or getReader() is + // called during our merge), then it better be the case + // that the delGen has increased for all our merged + // segments: + assert !mergedReader.hasChanges || minGen > mergedReader.getSegmentInfo().getBufferedDeletesGen(); + + mergedReader.getSegmentInfo().setBufferedDeletesGen(minGen); } - /* FIXME if we want to support non-contiguous segment merges */ synchronized private boolean commitMerge(MergePolicy.OneMerge merge, SegmentReader mergedReader) throws IOException { assert testPoint("startCommitMerge"); @@ -2706,7 +2703,7 @@ public class IndexWriter implements Closeable { return false; } - final int start = ensureContiguousMerge(merge); + ensureValidMerge(merge); commitMergedDeletes(merge, mergedReader); @@ -2716,10 +2713,32 @@ public class IndexWriter implements Closeable { // format as well: setMergeDocStoreIsCompoundFile(merge); - segmentInfos.subList(start, start + merge.segments.size()).clear(); assert !segmentInfos.contains(merge.info); - segmentInfos.add(start, merge.info); - + + final Set mergedAway = new HashSet(merge.segments); + int segIdx = 0; + int newSegIdx = 0; + boolean inserted = false; + final int curSegCount = segmentInfos.size(); + while(segIdx < curSegCount) { + final SegmentInfo info = segmentInfos.info(segIdx++); + if (mergedAway.contains(info)) { + if (!inserted) { + segmentInfos.set(segIdx-1, merge.info); + inserted = true; + newSegIdx++; + } + } else { + segmentInfos.set(newSegIdx++, info); + } + } + assert newSegIdx == curSegCount - merge.segments.size() + 1; + segmentInfos.subList(newSegIdx, segmentInfos.size()).clear(); + + if (infoStream != null) { + message("after commit: " + segString()); + } + closeMergeReaders(merge, false); // Must note the change to segmentInfos so any commits @@ -2731,16 +2750,12 @@ public class IndexWriter implements Closeable { // disk, updating SegmentInfo, etc.: readerPool.clear(merge.segments); - // remove pending deletes of the segments - // that were merged, moving them onto the segment just - // before the merged segment - // Lock order: IW -> BD - bufferedDeletes.commitMerge(merge); - if (merge.optimize) { // cascade the optimize: segmentsToOptimize.add(merge.info); } + + return true; } @@ -2868,7 +2883,7 @@ public class IndexWriter implements Closeable { } } - ensureContiguousMerge(merge); + ensureValidMerge(merge); pendingMerges.add(merge); @@ -2918,6 +2933,9 @@ public class IndexWriter implements Closeable { throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot merge"); } + // TODO: is there any perf benefit to sorting + // merged segments? eg biggest to smallest? + if (merge.info != null) // mergeInit already done return; @@ -2925,16 +2943,22 @@ public class IndexWriter implements Closeable { if (merge.isAborted()) return; - // Lock order: IW -> BD - if (bufferedDeletes.applyDeletes(readerPool, segmentInfos, merge.segments)) { - checkpoint(); - } - // Bind a new segment name here so even with // ConcurrentMergePolicy we keep deterministic segment // names. merge.info = new SegmentInfo(newSegmentName(), 0, directory, false, false, null, false); + // Lock order: IW -> BD + final BufferedDeletesStream.ApplyDeletesResult result = bufferedDeletesStream.applyDeletes(readerPool, merge.segments); + if (result.anyDeletes) { + checkpoint(); + } + + merge.info.setBufferedDeletesGen(result.gen); + + // Lock order: IW -> BD + bufferedDeletesStream.prune(segmentInfos); + Map details = new HashMap(); details.put("optimize", Boolean.toString(merge.optimize)); details.put("mergeFactor", Integer.toString(merge.segments.size())); @@ -3498,7 +3522,7 @@ public class IndexWriter implements Closeable { } synchronized boolean nrtIsCurrent(SegmentInfos infos) { - return infos.version == segmentInfos.version && !docWriter.anyChanges() && !bufferedDeletes.any(); + return infos.version == segmentInfos.version && !docWriter.anyChanges() && !bufferedDeletesStream.any(); } synchronized boolean isClosed() { @@ -3665,7 +3689,7 @@ public class IndexWriter implements Closeable { final double ramBufferSizeMB = config.getRAMBufferSizeMB(); if (ramBufferSizeMB != IndexWriterConfig.DISABLE_AUTO_FLUSH) { final long limit = (long) (ramBufferSizeMB*1024*1024); - long used = bufferedDeletes.bytesUsed() + docWriter.bytesUsed(); + long used = bufferedDeletesStream.bytesUsed() + docWriter.bytesUsed(); if (used >= limit) { // DocumentsWriter may be able to free up some @@ -3673,7 +3697,7 @@ public class IndexWriter implements Closeable { // Lock order: FC -> DW docWriter.balanceRAM(); - used = bufferedDeletes.bytesUsed() + docWriter.bytesUsed(); + used = bufferedDeletesStream.bytesUsed() + docWriter.bytesUsed(); if (used >= limit) { return setFlushPending("ram full: " + reason, false); } diff --git a/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java b/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java index 1c84bb01d5d..9dd6278f78f 100644 --- a/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java +++ b/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java @@ -19,6 +19,8 @@ package org.apache.lucene.index; import java.io.IOException; import java.util.Set; +import java.util.Arrays; +import java.util.Comparator; /**

This class implements a {@link MergePolicy} that tries * to merge segments into levels of exponentially @@ -67,6 +69,7 @@ public abstract class LogMergePolicy extends MergePolicy { // out there wrote his own LMP ... protected long maxMergeSizeForOptimize = Long.MAX_VALUE; protected int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS; + protected boolean requireContiguousMerge = false; protected double noCFSRatio = DEFAULT_NO_CFS_RATIO; @@ -105,6 +108,21 @@ public abstract class LogMergePolicy extends MergePolicy { writer.get().message("LMP: " + message); } + /** If true, merges must be in-order slice of the + * segments. If false, then the merge policy is free to + * pick any segments. The default is false, which is + * in general more efficient than true since it gives the + * merge policy more freedom to pick closely sized + * segments. */ + public void setRequireContiguousMerge(boolean v) { + requireContiguousMerge = v; + } + + /** See {@link #setRequireContiguousMerge}. */ + public boolean getRequireContiguousMerge() { + return requireContiguousMerge; + } + /**

Returns the number of segments that are merged at * once and also controls the total number of segments * allowed to accumulate in the index.

*/ @@ -356,6 +374,8 @@ public abstract class LogMergePolicy extends MergePolicy { } return null; } + + // TODO: handle non-contiguous merge case differently? // Find the newest (rightmost) segment that needs to // be optimized (other segments may have been flushed @@ -454,6 +474,37 @@ public abstract class LogMergePolicy extends MergePolicy { return spec; } + private static class SegmentInfoAndLevel implements Comparable { + SegmentInfo info; + float level; + int index; + + public SegmentInfoAndLevel(SegmentInfo info, float level, int index) { + this.info = info; + this.level = level; + this.index = index; + } + + // Sorts largest to smallest + public int compareTo(Object o) { + SegmentInfoAndLevel other = (SegmentInfoAndLevel) o; + if (level < other.level) + return 1; + else if (level > other.level) + return -1; + else + return 0; + } + } + + private static class SortByIndex implements Comparator { + public int compare(SegmentInfoAndLevel o1, SegmentInfoAndLevel o2) { + return o1.index - o2.index; + } + } + + private static final SortByIndex sortByIndex = new SortByIndex(); + /** Checks if any merges are now necessary and returns a * {@link MergePolicy.MergeSpecification} if so. A merge * is necessary when there are more than {@link @@ -470,7 +521,7 @@ public abstract class LogMergePolicy extends MergePolicy { // Compute levels, which is just log (base mergeFactor) // of the size of each segment - float[] levels = new float[numSegments]; + SegmentInfoAndLevel[] levels = new SegmentInfoAndLevel[numSegments]; final float norm = (float) Math.log(mergeFactor); for(int i=0;i. Say list - allocates ~2X size (2*POINTER). Integer is OBJ_HEADER - + int */ - final static int BYTES_PER_DEL_DOCID = 2*RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + RamUsageEstimator.NUM_BYTES_INT; - - /* Rough logic: HashMap has an array[Entry] w/ varying - load factor (say 2 * POINTER). Entry is object w/ - Query key, Integer val, int hash, Entry next - (OBJ_HEADER + 3*POINTER + INT). Query we often - undercount (say 24 bytes). Integer is OBJ_HEADER + INT. */ - final static int BYTES_PER_DEL_QUERY = 5*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 2*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2*RamUsageEstimator.NUM_BYTES_INT + 24; - - // TODO: many of the deletes stored here will map to - // Integer.MAX_VALUE; we could be more efficient for this - // case ie use a SortedSet not a SortedMap. But: Java's - // SortedSet impls are simply backed by a Map so we won't - // save anything unless we do something custom... - final AtomicInteger numTermDeletes = new AtomicInteger(); - final SortedMap terms = new TreeMap(); - final Map queries = new HashMap(); - final List docIDs = new ArrayList(); - - public static final Integer MAX_INT = Integer.valueOf(Integer.MAX_VALUE); - - final AtomicLong bytesUsed = new AtomicLong(); - - private final static boolean VERBOSE_DELETES = false; - - @Override - public String toString() { - if (VERBOSE_DELETES) { - return "SegmentDeletes [numTerms=" + numTermDeletes + ", terms=" + terms - + ", queries=" + queries + ", docIDs=" + docIDs + ", bytesUsed=" - + bytesUsed + "]"; - } else { - String s = ""; - if (numTermDeletes.get() != 0) { - s += " " + numTermDeletes.get() + " deleted terms (unique count=" + terms.size() + ")"; - } - if (queries.size() != 0) { - s += " " + queries.size() + " deleted queries"; - } - if (docIDs.size() != 0) { - s += " " + docIDs.size() + " deleted docIDs"; - } - if (bytesUsed.get() != 0) { - s += " bytesUsed=" + bytesUsed.get(); - } - - return s; - } - } - - void update(SegmentDeletes in, boolean noLimit) { - numTermDeletes.addAndGet(in.numTermDeletes.get()); - for (Map.Entry ent : in.terms.entrySet()) { - final Term term = ent.getKey(); - if (!terms.containsKey(term)) { - // only incr bytesUsed if this term wasn't already buffered: - bytesUsed.addAndGet(BYTES_PER_DEL_TERM); - } - final Integer limit; - if (noLimit) { - limit = MAX_INT; - } else { - limit = ent.getValue(); - } - terms.put(term, limit); - } - - for (Map.Entry ent : in.queries.entrySet()) { - final Query query = ent.getKey(); - if (!queries.containsKey(query)) { - // only incr bytesUsed if this query wasn't already buffered: - bytesUsed.addAndGet(BYTES_PER_DEL_QUERY); - } - final Integer limit; - if (noLimit) { - limit = MAX_INT; - } else { - limit = ent.getValue(); - } - queries.put(query, limit); - } - - // docIDs never move across segments and the docIDs - // should already be cleared - } - - public void addQuery(Query query, int docIDUpto) { - Integer current = queries.put(query, docIDUpto); - // increment bytes used only if the query wasn't added so far. - if (current == null) { - bytesUsed.addAndGet(BYTES_PER_DEL_QUERY); - } - } - - public void addDocID(int docID) { - docIDs.add(Integer.valueOf(docID)); - bytesUsed.addAndGet(BYTES_PER_DEL_DOCID); - } - - public void addTerm(Term term, int docIDUpto) { - Integer current = terms.get(term); - if (current != null && docIDUpto < current) { - // Only record the new number if it's greater than the - // current one. This is important because if multiple - // threads are replacing the same doc at nearly the - // same time, it's possible that one thread that got a - // higher docID is scheduled before the other - // threads. If we blindly replace than we can get - // double-doc in the segment. - return; - } - - terms.put(term, Integer.valueOf(docIDUpto)); - numTermDeletes.incrementAndGet(); - if (current == null) { - bytesUsed.addAndGet(BYTES_PER_DEL_TERM + term.bytes.length); - } - } - - void clear() { - terms.clear(); - queries.clear(); - docIDs.clear(); - numTermDeletes.set(0); - bytesUsed.set(0); - } - - void clearDocIDs() { - bytesUsed.addAndGet(-docIDs.size()*BYTES_PER_DEL_DOCID); - docIDs.clear(); - } - - boolean any() { - return terms.size() > 0 || docIDs.size() > 0 || queries.size() > 0; - } -} diff --git a/lucene/src/java/org/apache/lucene/index/SegmentInfo.java b/lucene/src/java/org/apache/lucene/index/SegmentInfo.java index e668fb9a279..47d0b54795d 100644 --- a/lucene/src/java/org/apache/lucene/index/SegmentInfo.java +++ b/lucene/src/java/org/apache/lucene/index/SegmentInfo.java @@ -94,6 +94,10 @@ public final class SegmentInfo { // specific versions afterwards ("3.0", "3.1" etc.). // see Constants.LUCENE_MAIN_VERSION. private String version; + + // NOTE: only used in-RAM by IW to track buffered deletes; + // this is never written to/read from the Directory + private long bufferedDeletesGen; public SegmentInfo(String name, int docCount, Directory dir, boolean isCompoundFile, boolean hasProx, SegmentCodecs segmentCodecs, boolean hasVectors) { @@ -679,5 +683,12 @@ public final class SegmentInfo { public String getVersion() { return version; } - + + long getBufferedDeletesGen() { + return bufferedDeletesGen; + } + + void setBufferedDeletesGen(long v) { + bufferedDeletesGen = v; + } } diff --git a/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java b/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java index 3bda6232951..cd6ebb358f0 100644 --- a/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java +++ b/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java @@ -91,8 +91,8 @@ public class TestMergeSchedulerExternal extends LuceneTestCase { IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergeScheduler(new MyMergeScheduler()) - .setMaxBufferedDocs(2).setRAMBufferSizeMB( - IndexWriterConfig.DISABLE_AUTO_FLUSH)); + .setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH) + .setMergePolicy(newLogMergePolicy())); LogMergePolicy logMP = (LogMergePolicy) writer.getConfig().getMergePolicy(); logMP.setMergeFactor(10); for(int i=0;i<20;i++) diff --git a/lucene/src/test/org/apache/lucene/TestSearch.java b/lucene/src/test/org/apache/lucene/TestSearch.java index 7878e3a9f0e..619a60485a6 100644 --- a/lucene/src/test/org/apache/lucene/TestSearch.java +++ b/lucene/src/test/org/apache/lucene/TestSearch.java @@ -74,8 +74,11 @@ public class TestSearch extends LuceneTestCase { Directory directory = newDirectory(); Analyzer analyzer = new MockAnalyzer(); IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer); - LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); - lmp.setUseCompoundFile(useCompoundFile); + MergePolicy mp = conf.getMergePolicy(); + if (mp instanceof LogMergePolicy) { + ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile); + } + IndexWriter writer = new IndexWriter(directory, conf); String[] docs = { @@ -90,6 +93,7 @@ public class TestSearch extends LuceneTestCase { for (int j = 0; j < docs.length; j++) { Document d = new Document(); d.add(newField("contents", docs[j], Field.Store.YES, Field.Index.ANALYZED)); + d.add(newField("id", ""+j, Field.Index.NOT_ANALYZED_NO_NORMS)); writer.addDocument(d); } writer.close(); @@ -106,6 +110,10 @@ public class TestSearch extends LuceneTestCase { }; ScoreDoc[] hits = null; + Sort sort = new Sort(new SortField[] { + SortField.FIELD_SCORE, + new SortField("id", SortField.INT)}); + QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "contents", analyzer); parser.setPhraseSlop(4); for (int j = 0; j < queries.length; j++) { @@ -115,7 +123,7 @@ public class TestSearch extends LuceneTestCase { System.out.println("TEST: query=" + query); } - hits = searcher.search(query, null, 1000).scoreDocs; + hits = searcher.search(query, null, 1000, sort).scoreDocs; out.println(hits.length + " total results"); for (int i = 0 ; i < hits.length && i < 10; i++) { diff --git a/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java b/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java index 08229ca220b..aec32f66285 100644 --- a/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java +++ b/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java @@ -80,8 +80,10 @@ public class TestSearchForDuplicates extends LuceneTestCase { Directory directory = newDirectory(); Analyzer analyzer = new MockAnalyzer(); IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer); - LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); - lmp.setUseCompoundFile(useCompoundFiles); + final MergePolicy mp = conf.getMergePolicy(); + if (mp instanceof LogMergePolicy) { + ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFiles); + } IndexWriter writer = new IndexWriter(directory, conf); if (VERBOSE) { System.out.println("TEST: now build index"); @@ -93,9 +95,6 @@ public class TestSearchForDuplicates extends LuceneTestCase { for (int j = 0; j < MAX_DOCS; j++) { Document d = new Document(); d.add(newField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES, Field.Index.ANALYZED)); - - // NOTE: this ID_FIELD produces no tokens since - // MockAnalyzer discards numbers d.add(newField(ID_FIELD, Integer.toString(j), Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(d); } @@ -112,7 +111,11 @@ public class TestSearchForDuplicates extends LuceneTestCase { System.out.println("TEST: search query=" + query); } - ScoreDoc[] hits = searcher.search(query, null, MAX_DOCS).scoreDocs; + final Sort sort = new Sort(new SortField[] { + SortField.FIELD_SCORE, + new SortField(ID_FIELD, SortField.INT)}); + + ScoreDoc[] hits = searcher.search(query, null, MAX_DOCS, sort).scoreDocs; printHits(out, hits, searcher); checkHits(hits, MAX_DOCS, searcher); @@ -127,7 +130,7 @@ public class TestSearchForDuplicates extends LuceneTestCase { query = parser.parse(HIGH_PRIORITY + " OR " + MED_PRIORITY); out.println("Query: " + query.toString(PRIORITY_FIELD)); - hits = searcher.search(query, null, MAX_DOCS).scoreDocs; + hits = searcher.search(query, null, MAX_DOCS, sort).scoreDocs; printHits(out, hits, searcher); checkHits(hits, MAX_DOCS, searcher); @@ -149,7 +152,7 @@ public class TestSearchForDuplicates extends LuceneTestCase { private void checkHits(ScoreDoc[] hits, int expectedCount, IndexSearcher searcher) throws IOException { assertEquals("total results", expectedCount, hits.length); for (int i = 0 ; i < hits.length; i++) { - if ( i < 10 || (i > 94 && i < 105) ) { + if (i < 10 || (i > 94 && i < 105) ) { Document d = searcher.doc(hits[i].doc); assertEquals("check " + i, String.valueOf(i), d.get(ID_FIELD)); } diff --git a/lucene/src/test/org/apache/lucene/index/MockRandomMergePolicy.java b/lucene/src/test/org/apache/lucene/index/MockRandomMergePolicy.java new file mode 100644 index 00000000000..7630dc7d220 --- /dev/null +++ b/lucene/src/test/org/apache/lucene/index/MockRandomMergePolicy.java @@ -0,0 +1,93 @@ +package org.apache.lucene.index; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.util.Collections; +import java.util.Random; +import java.util.Set; + +import org.apache.lucene.util._TestUtil; + +public class MockRandomMergePolicy extends MergePolicy { + private final Random random; + + public MockRandomMergePolicy(Random random) { + // fork a private random, since we are called + // unpredictably from threads: + this.random = new Random(random.nextLong()); + } + + @Override + public MergeSpecification findMerges(SegmentInfos segmentInfos) { + MergeSpecification mergeSpec = null; + //System.out.println("MRMP: findMerges sis=" + segmentInfos); + + if (segmentInfos.size() > 1 && random.nextInt(5) == 3) { + + SegmentInfos segmentInfos2 = new SegmentInfos(); + segmentInfos2.addAll(segmentInfos); + Collections.shuffle(segmentInfos2, random); + + // TODO: sometimes make more than 1 merge? + mergeSpec = new MergeSpecification(); + final int segsToMerge = _TestUtil.nextInt(random, 1, segmentInfos.size()); + mergeSpec.add(new OneMerge(segmentInfos2.range(0, segsToMerge))); + } + + return mergeSpec; + } + + @Override + public MergeSpecification findMergesForOptimize( + SegmentInfos segmentInfos, int maxSegmentCount, Set segmentsToOptimize) + throws CorruptIndexException, IOException { + + //System.out.println("MRMP: findMergesForOptimize sis=" + segmentInfos); + MergeSpecification mergeSpec = null; + if (segmentInfos.size() > 1 || (segmentInfos.size() == 1 && segmentInfos.info(0).hasDeletions())) { + mergeSpec = new MergeSpecification(); + SegmentInfos segmentInfos2 = new SegmentInfos(); + segmentInfos2.addAll(segmentInfos); + Collections.shuffle(segmentInfos2, random); + int upto = 0; + while(upto < segmentInfos.size()) { + int inc = _TestUtil.nextInt(random, 1, segmentInfos.size()-upto); + mergeSpec.add(new OneMerge(segmentInfos2.range(upto, upto+inc))); + upto += inc; + } + } + return mergeSpec; + } + + @Override + public MergeSpecification findMergesToExpungeDeletes( + SegmentInfos segmentInfos) + throws CorruptIndexException, IOException { + return findMerges(segmentInfos); + } + + @Override + public void close() { + } + + @Override + public boolean useCompoundFile(SegmentInfos infos, SegmentInfo mergedInfo) throws IOException { + return random.nextBoolean(); + } +} diff --git a/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java index 52d5b7d7d46..69063eb5b20 100755 --- a/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java +++ b/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java @@ -452,6 +452,7 @@ public class TestAddIndexes extends LuceneTestCase { setMaxBufferedDocs(100). setMergePolicy(newLogMergePolicy(10)) ); + writer.setInfoStream(VERBOSE ? System.out : null); writer.addIndexes(aux); assertEquals(30, writer.maxDoc()); assertEquals(3, writer.getSegmentCount()); diff --git a/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java b/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java index b5f44752178..95da21de23e 100644 --- a/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java +++ b/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java @@ -131,6 +131,7 @@ public class TestAtomicUpdate extends LuceneTestCase { .setMaxBufferedDocs(7); ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(3); IndexWriter writer = new MockIndexWriter(directory, conf); + writer.setInfoStream(VERBOSE ? System.out : null); // Establish a base index of 100 docs: for(int i=0;i<100;i++) { diff --git a/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java b/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java index 9c7c3bf0846..5e50c968b51 100644 --- a/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java +++ b/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java @@ -132,11 +132,15 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase { IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()) .setMergePolicy(mp)); + writer.setInfoStream(VERBOSE ? System.out : null); Document doc = new Document(); Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); doc.add(idField); for(int i=0;i<10;i++) { + if (VERBOSE) { + System.out.println("\nTEST: cycle"); + } for(int j=0;j<100;j++) { idField.setValue(Integer.toString(i*100+j)); writer.addDocument(doc); @@ -144,6 +148,9 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase { int delID = i; while(delID < 100*(1+i)) { + if (VERBOSE) { + System.out.println("TEST: del " + delID); + } writer.deleteDocuments(new Term("id", ""+delID)); delID += 10; } diff --git a/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java b/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java index 55592662b0b..6d90baa7ff5 100644 --- a/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java +++ b/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java @@ -119,6 +119,9 @@ public class TestDeletionPolicy extends LuceneTestCase { } public void onInit(List commits) throws IOException { + if (VERBOSE) { + System.out.println("TEST: onInit"); + } verifyCommitOrder(commits); numOnInit++; // do no deletions on init @@ -126,6 +129,9 @@ public class TestDeletionPolicy extends LuceneTestCase { } public void onCommit(List commits) throws IOException { + if (VERBOSE) { + System.out.println("TEST: onCommit"); + } verifyCommitOrder(commits); doDeletes(commits, true); } @@ -200,8 +206,10 @@ public class TestDeletionPolicy extends LuceneTestCase { IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()) .setIndexDeletionPolicy(policy); - LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); - lmp.setUseCompoundFile(true); + MergePolicy mp = conf.getMergePolicy(); + if (mp instanceof LogMergePolicy) { + ((LogMergePolicy) mp).setUseCompoundFile(true); + } IndexWriter writer = new IndexWriter(dir, conf); writer.close(); @@ -215,8 +223,10 @@ public class TestDeletionPolicy extends LuceneTestCase { conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode( OpenMode.APPEND).setIndexDeletionPolicy(policy); - lmp = (LogMergePolicy) conf.getMergePolicy(); - lmp.setUseCompoundFile(true); + mp = conf.getMergePolicy(); + if (mp instanceof LogMergePolicy) { + ((LogMergePolicy) mp).setUseCompoundFile(true); + } writer = new IndexWriter(dir, conf); for(int j=0;j<17;j++) { addDoc(writer); @@ -280,6 +290,10 @@ public class TestDeletionPolicy extends LuceneTestCase { public void testKeepAllDeletionPolicy() throws IOException { for(int pass=0;pass<2;pass++) { + if (VERBOSE) { + System.out.println("TEST: cycle pass=" + pass); + } + boolean useCompoundFile = (pass % 2) != 0; // Never deletes a commit @@ -292,34 +306,48 @@ public class TestDeletionPolicy extends LuceneTestCase { TEST_VERSION_CURRENT, new MockAnalyzer()) .setIndexDeletionPolicy(policy).setMaxBufferedDocs(10) .setMergeScheduler(new SerialMergeScheduler()); - LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); - lmp.setUseCompoundFile(useCompoundFile); - lmp.setMergeFactor(10); + MergePolicy mp = conf.getMergePolicy(); + if (mp instanceof LogMergePolicy) { + ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile); + } IndexWriter writer = new IndexWriter(dir, conf); for(int i=0;i<107;i++) { addDoc(writer); } writer.close(); - conf = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer()).setOpenMode( - OpenMode.APPEND).setIndexDeletionPolicy(policy); - lmp = (LogMergePolicy) conf.getMergePolicy(); - lmp.setUseCompoundFile(useCompoundFile); - writer = new IndexWriter(dir, conf); - writer.optimize(); - writer.close(); - - assertEquals(1, policy.numOnInit); + final boolean isOptimized; + { + IndexReader r = IndexReader.open(dir); + isOptimized = r.isOptimized(); + r.close(); + } + if (!isOptimized) { + conf = newIndexWriterConfig(TEST_VERSION_CURRENT, + new MockAnalyzer()).setOpenMode( + OpenMode.APPEND).setIndexDeletionPolicy(policy); + mp = conf.getMergePolicy(); + if (mp instanceof LogMergePolicy) { + ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile); + } + if (VERBOSE) { + System.out.println("TEST: open writer for optimize"); + } + writer = new IndexWriter(dir, conf); + writer.setInfoStream(VERBOSE ? System.out : null); + writer.optimize(); + writer.close(); + } + assertEquals(isOptimized ? 0:1, policy.numOnInit); // If we are not auto committing then there should // be exactly 2 commits (one per close above): - assertEquals(2, policy.numOnCommit); + assertEquals(1 + (isOptimized ? 0:1), policy.numOnCommit); // Test listCommits Collection commits = IndexReader.listCommits(dir); // 2 from closing writer - assertEquals(2, commits.size()); + assertEquals(1 + (isOptimized ? 0:1), commits.size()); // Make sure we can open a reader on each commit: for (final IndexCommit commit : commits) { @@ -480,8 +508,10 @@ public class TestDeletionPolicy extends LuceneTestCase { TEST_VERSION_CURRENT, new MockAnalyzer()) .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy) .setMaxBufferedDocs(10); - LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); - lmp.setUseCompoundFile(useCompoundFile); + MergePolicy mp = conf.getMergePolicy(); + if (mp instanceof LogMergePolicy) { + ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile); + } IndexWriter writer = new IndexWriter(dir, conf); for(int i=0;i<107;i++) { addDoc(writer); @@ -490,8 +520,10 @@ public class TestDeletionPolicy extends LuceneTestCase { conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()) .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy); - lmp = (LogMergePolicy) conf.getMergePolicy(); - lmp.setUseCompoundFile(useCompoundFile); + mp = conf.getMergePolicy(); + if (mp instanceof LogMergePolicy) { + ((LogMergePolicy) mp).setUseCompoundFile(true); + } writer = new IndexWriter(dir, conf); writer.optimize(); writer.close(); @@ -529,8 +561,10 @@ public class TestDeletionPolicy extends LuceneTestCase { TEST_VERSION_CURRENT, new MockAnalyzer()) .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy) .setMaxBufferedDocs(10); - LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); - lmp.setUseCompoundFile(useCompoundFile); + MergePolicy mp = conf.getMergePolicy(); + if (mp instanceof LogMergePolicy) { + ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile); + } IndexWriter writer = new IndexWriter(dir, conf); for(int i=0;i<17;i++) { addDoc(writer); @@ -586,24 +620,34 @@ public class TestDeletionPolicy extends LuceneTestCase { IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()) .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy); - LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); - lmp.setUseCompoundFile(useCompoundFile); + MergePolicy mp = conf.getMergePolicy(); + if (mp instanceof LogMergePolicy) { + ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile); + } IndexWriter writer = new IndexWriter(dir, conf); writer.close(); Term searchTerm = new Term("content", "aaa"); Query query = new TermQuery(searchTerm); for(int i=0;i 5 + extraFileCount); - + assertTrue("flush should have occurred and files should have been created", dir.listAll().length > 5 + extraFileCount); + // After rollback, IW should remove all files writer.rollback(); assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length); @@ -2846,7 +2844,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testNoUnwantedTVFiles() throws Exception { Directory dir = newDirectory(); - IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.01)); + IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.01).setMergePolicy(newLogMergePolicy())); ((LogMergePolicy) indexWriter.getConfig().getMergePolicy()).setUseCompoundFile(false); String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg"; diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java index c546b2a3cb3..3763e54035c 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java @@ -684,7 +684,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { MockDirectoryWrapper dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(2).setReaderPooling(false)); + TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(2).setReaderPooling(false).setMergePolicy(newLogMergePolicy())); modifier.setInfoStream(VERBOSE ? System.out : null); LogMergePolicy lmp = (LogMergePolicy) modifier.getConfig().getMergePolicy(); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java index da56333555e..101812330e1 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java @@ -288,6 +288,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { public void testExceptionDocumentsWriterInit() throws IOException { Directory dir = newDirectory(); MockIndexWriter2 w = new MockIndexWriter2(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); + w.setInfoStream(VERBOSE ? System.out : null); Document doc = new Document(); doc.add(newField("field", "a field", Field.Store.YES, Field.Index.ANALYZED)); @@ -359,7 +360,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { public void testExceptionOnMergeInit() throws IOException { Directory dir = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()) - .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()); + .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()).setMergePolicy(newLogMergePolicy()); ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2); MockIndexWriter3 w = new MockIndexWriter3(dir, conf); w.doFail = true; @@ -527,7 +528,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { System.out.println("TEST: cycle i=" + i); } MockDirectoryWrapper dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMergePolicy(newLogMergePolicy())); writer.setInfoStream(VERBOSE ? System.out : null); // don't allow a sudden merge to clean up the deleted @@ -844,7 +845,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { public void testOptimizeExceptions() throws IOException { Directory startDir = newDirectory(); - IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2); + IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy()); ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(100); IndexWriter w = new IndexWriter(startDir, conf); for(int i=0;i<27;i++) diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java index ecb44b9e0ae..0e50c7815e5 100755 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java @@ -104,7 +104,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()). setMaxBufferedDocs(10). - setMergePolicy(newLogMergePolicy()) + setMergePolicy(newInOrderLogMergePolicy()) ); for (int i = 0; i < 250; i++) { diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java index 6278b52c42c..57c5e26040d 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java @@ -58,7 +58,7 @@ public class TestIndexWriterMerging extends LuceneTestCase IndexWriter writer = new IndexWriter( merged, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()). - setMergePolicy(newLogMergePolicy(2)) + setMergePolicy(newInOrderLogMergePolicy(2)) ); writer.setInfoStream(VERBOSE ? System.out : null); writer.addIndexes(indexA, indexB); @@ -102,7 +102,7 @@ public class TestIndexWriterMerging extends LuceneTestCase newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()). setOpenMode(OpenMode.CREATE). setMaxBufferedDocs(2). - setMergePolicy(newLogMergePolicy(2)) + setMergePolicy(newInOrderLogMergePolicy(2)) ); for (int i = start; i < (start + numDocs); i++) diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java index 5016f5245d4..27f29a49a8e 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java @@ -232,7 +232,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase { // Make a new dir that will enforce disk usage: MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir)); - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND)); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy())); IOException err = null; writer.setInfoStream(VERBOSE ? System.out : null); @@ -401,10 +401,10 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase { // required is at most 2X total input size of // indices so let's make sure: assertTrue("max free Directory space required exceeded 1X the total input index sizes during " + methodName + - ": max temp usage = " + (dir.getMaxUsedSizeInBytes()-startDiskUsage) + " bytes; " + - "starting disk usage = " + startDiskUsage + " bytes; " + - "input index disk usage = " + inputDiskUsage + " bytes", - (dir.getMaxUsedSizeInBytes()-startDiskUsage) < 2*(startDiskUsage + inputDiskUsage)); + ": max temp usage = " + (dir.getMaxUsedSizeInBytes()-startDiskUsage) + " bytes vs limit=" + (2*(startDiskUsage + inputDiskUsage)) + + "; starting disk usage = " + startDiskUsage + " bytes; " + + "input index disk usage = " + inputDiskUsage + " bytes", + (dir.getMaxUsedSizeInBytes()-startDiskUsage) < 2*(startDiskUsage + inputDiskUsage)); } // Make sure we don't hit disk full during close below: diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java index 6758e89b5d2..60f5e49fbb0 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java @@ -366,7 +366,7 @@ public class TestIndexWriterReader extends LuceneTestCase { int numDirs = 3; Directory mainDir = newDirectory(); - IndexWriter mainWriter = new IndexWriter(mainDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); + IndexWriter mainWriter = new IndexWriter(mainDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy())); _TestUtil.reduceOpenFiles(mainWriter); mainWriter.setInfoStream(infoStream); @@ -900,7 +900,7 @@ public class TestIndexWriterReader extends LuceneTestCase { public void testExpungeDeletes() throws Throwable { Directory dir = newDirectory(); - final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); + final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED)); Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED); diff --git a/lucene/src/test/org/apache/lucene/index/TestLazyBug.java b/lucene/src/test/org/apache/lucene/index/TestLazyBug.java index 58681ab847f..13b668417c9 100755 --- a/lucene/src/test/org/apache/lucene/index/TestLazyBug.java +++ b/lucene/src/test/org/apache/lucene/index/TestLazyBug.java @@ -63,7 +63,7 @@ public class TestLazyBug extends LuceneTestCase { Directory dir = newDirectory(); try { IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); + TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy())); LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy(); lmp.setUseCompoundFile(false); diff --git a/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java b/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java index f3b7f2b3ca8..fe1f29be001 100644 --- a/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java +++ b/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java @@ -45,7 +45,7 @@ public class TestMaxTermFrequency extends LuceneTestCase { super.setUp(); dir = newDirectory(); IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(MockTokenizer.SIMPLE, true)); + new MockAnalyzer(MockTokenizer.SIMPLE, true)).setMergePolicy(newInOrderLogMergePolicy()); config.setSimilarityProvider(new TestSimilarity()); RandomIndexWriter writer = new RandomIndexWriter(random, dir, config); Document doc = new Document(); diff --git a/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java b/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java index 10dbc4f9fb0..425e790784d 100644 --- a/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java +++ b/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java @@ -69,7 +69,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase { public void testSimpleSkip() throws IOException { Directory dir = new CountingRAMDirectory(new RAMDirectory()); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new PayloadAnalyzer()).setCodecProvider(_TestUtil.alwaysCodec("Standard"))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new PayloadAnalyzer()).setCodecProvider(_TestUtil.alwaysCodec("Standard")).setMergePolicy(newInOrderLogMergePolicy())); Term term = new Term("test", "a"); for (int i = 0; i < 5000; i++) { Document d1 = new Document(); diff --git a/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java b/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java index e070fbdc187..f88bb18a286 100644 --- a/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java +++ b/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java @@ -23,6 +23,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Set; +import java.util.HashSet; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.Executors; @@ -94,7 +95,7 @@ public class TestNRTThreads extends LuceneTestCase { } } }); - + final IndexWriter writer = new IndexWriter(dir, conf); if (VERBOSE) { writer.setInfoStream(System.out); @@ -105,10 +106,12 @@ public class TestNRTThreads extends LuceneTestCase { ((ConcurrentMergeScheduler) ms).setMaxThreadCount(1); ((ConcurrentMergeScheduler) ms).setMaxMergeCount(1); } + /* LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy(); if (lmp.getMergeFactor() > 5) { lmp.setMergeFactor(5); } + */ final int NUM_INDEX_THREADS = 2; final int NUM_SEARCH_THREADS = 3; @@ -118,7 +121,7 @@ public class TestNRTThreads extends LuceneTestCase { final AtomicInteger addCount = new AtomicInteger(); final AtomicInteger delCount = new AtomicInteger(); - final List delIDs = Collections.synchronizedList(new ArrayList()); + final Set delIDs = Collections.synchronizedSet(new HashSet()); final long stopTime = System.currentTimeMillis() + RUN_TIME_SEC*1000; Thread[] threads = new Thread[NUM_INDEX_THREADS]; @@ -142,20 +145,20 @@ public class TestNRTThreads extends LuceneTestCase { } if (random.nextBoolean()) { if (VERBOSE) { - //System.out.println(Thread.currentThread().getName() + ": add doc id:" + doc.get("id")); + System.out.println(Thread.currentThread().getName() + ": add doc id:" + doc.get("id")); } writer.addDocument(doc); } else { // we use update but it never replaces a // prior doc if (VERBOSE) { - //System.out.println(Thread.currentThread().getName() + ": update doc id:" + doc.get("id")); + System.out.println(Thread.currentThread().getName() + ": update doc id:" + doc.get("id")); } writer.updateDocument(new Term("id", doc.get("id")), doc); } if (random.nextInt(5) == 3) { if (VERBOSE) { - //System.out.println(Thread.currentThread().getName() + ": buffer del id:" + doc.get("id")); + System.out.println(Thread.currentThread().getName() + ": buffer del id:" + doc.get("id")); } toDeleteIDs.add(doc.get("id")); } @@ -164,6 +167,9 @@ public class TestNRTThreads extends LuceneTestCase { System.out.println(Thread.currentThread().getName() + ": apply " + toDeleteIDs.size() + " deletes"); } for(String id : toDeleteIDs) { + if (VERBOSE) { + System.out.println(Thread.currentThread().getName() + ": del term=id:" + id); + } writer.deleteDocuments(new Term("id", id)); } final int count = delCount.addAndGet(toDeleteIDs.size()); @@ -347,12 +353,28 @@ public class TestNRTThreads extends LuceneTestCase { final IndexReader r2 = writer.getReader(); final IndexSearcher s = new IndexSearcher(r2); + boolean doFail = false; for(String id : delIDs) { final TopDocs hits = s.search(new TermQuery(new Term("id", id)), 1); if (hits.totalHits != 0) { - fail("doc id=" + id + " is supposed to be deleted, but got docID=" + hits.scoreDocs[0].doc); + System.out.println("doc id=" + id + " is supposed to be deleted, but got docID=" + hits.scoreDocs[0].doc); + doFail = true; } } + + final int endID = Integer.parseInt(docs.nextDoc().get("id")); + for(int id=0;id docs = new HashMap(); IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setRAMBufferSizeMB( - 0.1).setMaxBufferedDocs(maxBufferedDocs)); + 0.1).setMaxBufferedDocs(maxBufferedDocs).setMergePolicy(newLogMergePolicy())); w.setInfoStream(VERBOSE ? System.out : null); w.commit(); LogMergePolicy lmp = (LogMergePolicy) w.getConfig().getMergePolicy(); @@ -206,7 +206,7 @@ public class TestStressIndexing2 extends LuceneTestCase { IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE) .setRAMBufferSizeMB(0.1).setMaxBufferedDocs(maxBufferedDocs).setMaxThreadStates(maxThreadStates) - .setReaderPooling(doReaderPooling)); + .setReaderPooling(doReaderPooling).setMergePolicy(newLogMergePolicy())); w.setInfoStream(VERBOSE ? System.out : null); LogMergePolicy lmp = (LogMergePolicy) w.getConfig().getMergePolicy(); lmp.setUseCompoundFile(false); @@ -248,7 +248,7 @@ public class TestStressIndexing2 extends LuceneTestCase { public static void indexSerial(Random random, Map docs, Directory dir) throws IOException { - IndexWriter w = new IndexWriter(dir, LuceneTestCase.newIndexWriterConfig(random, TEST_VERSION_CURRENT, new MockAnalyzer())); + IndexWriter w = new IndexWriter(dir, LuceneTestCase.newIndexWriterConfig(random, TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy())); // index all docs in a single thread Iterator iter = docs.values().iterator(); diff --git a/lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java b/lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java index 332ba958698..6d21b13185c 100644 --- a/lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java +++ b/lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java @@ -124,14 +124,14 @@ public class BaseTestRangeFilter extends LuceneTestCase { RandomIndexWriter writer = new RandomIndexWriter(random, index.index, newIndexWriterConfig(random, TEST_VERSION_CURRENT, new MockAnalyzer()) - .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))); + .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)).setMergePolicy(newLogMergePolicy())); + _TestUtil.reduceOpenFiles(writer.w); + while(true) { int minCount = 0; int maxCount = 0; - _TestUtil.reduceOpenFiles(writer.w); - for (int d = minId; d <= maxId; d++) { idField.setValue(pad(d)); int r = index.allowNegativeRandomInts ? random.nextInt() : random diff --git a/lucene/src/test/org/apache/lucene/search/TestBoolean2.java b/lucene/src/test/org/apache/lucene/search/TestBoolean2.java index b4dfdbb6b6b..090eda2d18c 100644 --- a/lucene/src/test/org/apache/lucene/search/TestBoolean2.java +++ b/lucene/src/test/org/apache/lucene/search/TestBoolean2.java @@ -54,7 +54,7 @@ public class TestBoolean2 extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { directory = newDirectory(); - RandomIndexWriter writer= new RandomIndexWriter(random, directory); + RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy())); for (int i = 0; i < docFields.length; i++) { Document doc = new Document(); doc.add(newField(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED)); diff --git a/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java index e2462e9207c..1d2f8a6f2de 100644 --- a/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java @@ -85,7 +85,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { index = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, index, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()) - .setSimilarityProvider(sim)); + .setSimilarityProvider(sim).setMergePolicy(newInOrderLogMergePolicy())); // hed is the most important field, dek is secondary diff --git a/lucene/src/test/org/apache/lucene/search/TestDocBoost.java b/lucene/src/test/org/apache/lucene/search/TestDocBoost.java index c222d632bbd..f970477bda6 100644 --- a/lucene/src/test/org/apache/lucene/search/TestDocBoost.java +++ b/lucene/src/test/org/apache/lucene/search/TestDocBoost.java @@ -19,13 +19,14 @@ package org.apache.lucene.search; import java.io.IOException; -import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.*; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.LuceneTestCase; /** Document boost unit test. * @@ -36,7 +37,7 @@ public class TestDocBoost extends LuceneTestCase { public void testDocBoost() throws Exception { Directory store = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, store); + RandomIndexWriter writer = new RandomIndexWriter(random, store, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy())); Fieldable f1 = newField("field", "word", Field.Store.YES, Field.Index.ANALYZED); Fieldable f2 = newField("field", "word", Field.Store.YES, Field.Index.ANALYZED); diff --git a/lucene/src/test/org/apache/lucene/search/TestExplanations.java b/lucene/src/test/org/apache/lucene/search/TestExplanations.java index 2960a4e943b..5e712eac75f 100644 --- a/lucene/src/test/org/apache/lucene/search/TestExplanations.java +++ b/lucene/src/test/org/apache/lucene/search/TestExplanations.java @@ -68,7 +68,7 @@ public class TestExplanations extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer= new RandomIndexWriter(random, directory); + RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy())); for (int i = 0; i < docFields.length; i++) { Document doc = new Document(); doc.add(newField(KEY, ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED)); diff --git a/lucene/src/test/org/apache/lucene/search/TestFieldCache.java b/lucene/src/test/org/apache/lucene/search/TestFieldCache.java index b69efe6ac6a..93a440b2f7d 100644 --- a/lucene/src/test/org/apache/lucene/search/TestFieldCache.java +++ b/lucene/src/test/org/apache/lucene/search/TestFieldCache.java @@ -41,7 +41,7 @@ public class TestFieldCache extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer= new RandomIndexWriter(random, directory); + RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy())); long theLong = Long.MAX_VALUE; double theDouble = Double.MAX_VALUE; byte theByte = Byte.MAX_VALUE; diff --git a/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java b/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java index bca34a1f594..da3be2fb26c 100644 --- a/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java @@ -17,17 +17,19 @@ package org.apache.lucene.search; * limitations under the License. */ +import java.util.BitSet; + +import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.DocIdBitSet; -import java.util.BitSet; +import org.apache.lucene.util.LuceneTestCase; /** * FilteredQuery JUnit tests. @@ -49,7 +51,7 @@ public class TestFilteredQuery extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter (random, directory); + RandomIndexWriter writer = new RandomIndexWriter (random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy())); Document doc = new Document(); doc.add (newField("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED)); diff --git a/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java b/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java index 6070ad8e7cb..bada9039631 100644 --- a/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java +++ b/lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.CorruptIndexException; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; @@ -47,14 +46,14 @@ public class TestFilteredSearch extends LuceneTestCase { Directory directory = newDirectory(); int[] filterBits = {1, 36}; SimpleDocIdSetFilter filter = new SimpleDocIdSetFilter(filterBits); - IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); + IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy())); searchFiltered(writer, directory, filter, enforceSingleSegment); // run the test on more than one segment enforceSingleSegment = false; // reset - it is stateful filter.reset(); writer.close(); - writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10)); + writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10).setMergePolicy(newInOrderLogMergePolicy())); // we index 60 docs - this will create 6 segments searchFiltered(writer, directory, filter, enforceSingleSegment); writer.close(); diff --git a/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java b/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java index 314089b6ee9..85f40abfe37 100644 --- a/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java +++ b/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java @@ -79,7 +79,7 @@ public class TestFuzzyQuery2 extends LuceneTestCase { int terms = (int) Math.pow(2, bits); Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, new MockAnalyzer(MockTokenizer.KEYWORD, false)); + RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.KEYWORD, false)).setMergePolicy(newInOrderLogMergePolicy())); Document doc = new Document(); Field field = newField("field", "", Field.Store.NO, Field.Index.ANALYZED); diff --git a/lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java b/lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java index 394f387cb41..4f7356271ef 100644 --- a/lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java @@ -40,7 +40,7 @@ public class TestMatchAllDocsQuery extends LuceneTestCase { public void testQuery() throws Exception { Directory dir = newDirectory(); IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(2)); + TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(2).setMergePolicy(newInOrderLogMergePolicy())); addDoc("one", iw, 1f); addDoc("two", iw, 20f); addDoc("three four", iw, 300f); diff --git a/lucene/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java b/lucene/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java index 8c8cc1a76c3..7657d25dc78 100644 --- a/lucene/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java +++ b/lucene/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java @@ -38,7 +38,7 @@ public class TestMultiThreadTermVectors extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); + IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy())); //writer.setUseCompoundFile(false); //writer.infoStream = System.out; for (int i = 0; i < numDocs; i++) { diff --git a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java index 18b1ded0a24..e143d730e34 100644 --- a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java +++ b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java @@ -55,7 +55,8 @@ public class TestNumericRangeQuery32 extends LuceneTestCase { directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()) - .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))); + .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)) + .setMergePolicy(newInOrderLogMergePolicy())); NumericField field8 = new NumericField("field8", 8, Field.Store.YES, true), diff --git a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java index 27aebfce451..d3873fc6c12 100644 --- a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java +++ b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java @@ -52,7 +52,8 @@ public class TestNumericRangeQuery64 extends LuceneTestCase { directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()) - .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))); + .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)) + .setMergePolicy(newInOrderLogMergePolicy())); NumericField field8 = new NumericField("field8", 8, Field.Store.YES, true), diff --git a/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java b/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java index a9e9c78a320..71621a3f80d 100644 --- a/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java @@ -598,7 +598,7 @@ public class TestPhraseQuery extends LuceneTestCase { Directory dir = newDirectory(); Analyzer analyzer = new MockAnalyzer(); - RandomIndexWriter w = new RandomIndexWriter(random, dir, analyzer); + RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setMergePolicy(newInOrderLogMergePolicy())); List> docs = new ArrayList>(); Document d = new Document(); Field f = newField("f", "", Field.Store.NO, Field.Index.ANALYZED); diff --git a/lucene/src/test/org/apache/lucene/search/TestSort.java b/lucene/src/test/org/apache/lucene/search/TestSort.java index 9a5db0d1053..4428101c7d2 100644 --- a/lucene/src/test/org/apache/lucene/search/TestSort.java +++ b/lucene/src/test/org/apache/lucene/search/TestSort.java @@ -121,7 +121,7 @@ public class TestSort extends LuceneTestCase implements Serializable { throws IOException { Directory indexStore = newDirectory(); dirs.add(indexStore); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(random, indexStore, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy())); for (int i=0; i Date: Sat, 29 Jan 2011 19:51:30 +0000 Subject: [PATCH 051/185] LUCENE-1076: add CHANGES git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065096 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/CHANGES.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index e859ecd042c..1a29524de6e 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -150,6 +150,12 @@ Changes in Runtime Behavior * LUCENE-2720: IndexWriter throws IndexFormatTooOldException on open, rather than later when e.g. a merge starts. (Shai Erera, Mike McCandless, Uwe Schindler) +* LUCENE-1076: The default merge policy is now able to merge + non-contiguous segments, which means docIDs no longer necessarily + say "in order". If this is a problem then you can use either of the + LogMergePolicy impls, and call setRequireContiguousMerge(true). + (Mike McCandless) + API Changes * LUCENE-2302, LUCENE-1458, LUCENE-2111, LUCENE-2514: Terms are no longer From 185ad0c6310d2d5001840a177a2f8337aa2eeed1 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Sat, 29 Jan 2011 20:33:09 +0000 Subject: [PATCH 052/185] Java 1.5 can't @Override an interface git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065102 13f79535-47bb-0310-9956-ffa450edef68 --- .../src/java/org/apache/lucene/index/BufferedDeletesStream.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java b/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java index b9a0184a0c1..555c78b67c2 100644 --- a/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java +++ b/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java @@ -130,7 +130,7 @@ class BufferedDeletesStream { // Sorts SegmentInfos from smallest to biggest bufferedDelGen: private static final Comparator sortByDelGen = new Comparator() { - @Override + // @Override -- not until Java 1.6 public int compare(SegmentInfo si1, SegmentInfo si2) { final long cmp = si1.getBufferedDeletesGen() - si2.getBufferedDeletesGen(); if (cmp > 0) { From de55bd4de125d5d8b5d89c1b7b3ec2224f93f8ca Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Sun, 30 Jan 2011 13:03:21 +0000 Subject: [PATCH 053/185] LUCENE-2896: in advance(), don't skip when target doc delta is very small git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065261 13f79535-47bb-0310-9956-ffa450edef68 --- .../standard/StandardPostingsReader.java | 20 ++++++------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java index 707bb43dec1..0e53a99d536 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java @@ -375,13 +375,10 @@ public class StandardPostingsReader extends PostingsReaderBase { @Override public int advance(int target) throws IOException { - // TODO: jump right to next() if target is < X away - // from where we are now? - - if (limit >= skipInterval) { + if ((target - skipInterval) >= doc && limit >= skipInterval) { // There are enough docs in the posting to have - // skip data + // skip data, and it isn't too close. if (skipper == null) { // This is the first time this enum has ever been used for skipping -- do lazy init @@ -528,13 +525,10 @@ public class StandardPostingsReader extends PostingsReaderBase { //System.out.println("StandardR.D&PE advance target=" + target); - // TODO: jump right to next() if target is < X away - // from where we are now? - - if (limit >= skipInterval) { + if ((target - skipInterval) >= doc && limit >= skipInterval) { // There are enough docs in the posting to have - // skip data + // skip data, and it isn't too close if (skipper == null) { // This is the first time this enum has ever been used for skipping -- do lazy init @@ -724,13 +718,11 @@ public class StandardPostingsReader extends PostingsReaderBase { public int advance(int target) throws IOException { //System.out.println("StandardR.D&PE advance seg=" + segment + " target=" + target + " this=" + this); - // TODO: jump right to next() if target is < X away - // from where we are now? - if (limit >= skipInterval) { + if ((target - skipInterval) >= doc && limit >= skipInterval) { // There are enough docs in the posting to have - // skip data + // skip data, and it isn't too close if (skipper == null) { // This is the first time this enum has ever been used for skipping -- do lazy init From 5629a2b96b37f87c90463438033df1ddf90f3518 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Sun, 30 Jan 2011 13:28:41 +0000 Subject: [PATCH 054/185] add missing license headers where there are none, but the JIRA box was checked git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065265 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/lucene/store/NativePosixUtil.cpp | 17 +++++++++++++++++ .../org/apache/lucene/search/regex/package.html | 17 +++++++++++++++++ .../core/builders/TestQueryTreeBuilder.java | 17 +++++++++++++++++ .../spatial/geometry/TestDistanceUnits.java | 17 +++++++++++++++++ .../projections/SinusoidalProjectorTest.java | 17 +++++++++++++++++ .../org/apache/lucene/util/packed/package.html | 16 ++++++++++++++++ .../index/TestSnapshotDeletionPolicy.java | 17 +++++++++++++++++ .../lucene/analysis/el/TestGreekStemmer.java | 17 +++++++++++++++++ 8 files changed, 135 insertions(+) diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/store/NativePosixUtil.cpp b/lucene/contrib/misc/src/java/org/apache/lucene/store/NativePosixUtil.cpp index 7ccf7e7b445..fa05142f877 100644 --- a/lucene/contrib/misc/src/java/org/apache/lucene/store/NativePosixUtil.cpp +++ b/lucene/contrib/misc/src/java/org/apache/lucene/store/NativePosixUtil.cpp @@ -1,3 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + #include #include // posix_fadvise, constants for open #include // strerror diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/package.html b/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/package.html index c963307fecb..7b54ddb557e 100644 --- a/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/package.html +++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/package.html @@ -1,3 +1,20 @@ + + Regular expression Query. diff --git a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/core/builders/TestQueryTreeBuilder.java b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/core/builders/TestQueryTreeBuilder.java index f456d298b67..88ad9a21b16 100644 --- a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/core/builders/TestQueryTreeBuilder.java +++ b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/core/builders/TestQueryTreeBuilder.java @@ -1,5 +1,22 @@ package org.apache.lucene.queryParser.core.builders; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import junit.framework.Assert; import org.apache.lucene.queryParser.core.QueryNodeException; diff --git a/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/geometry/TestDistanceUnits.java b/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/geometry/TestDistanceUnits.java index 509e7009799..f1758859cf9 100644 --- a/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/geometry/TestDistanceUnits.java +++ b/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/geometry/TestDistanceUnits.java @@ -1,5 +1,22 @@ package org.apache.lucene.spatial.geometry; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import org.apache.lucene.util.LuceneTestCase; import org.junit.Test; diff --git a/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/projections/SinusoidalProjectorTest.java b/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/projections/SinusoidalProjectorTest.java index c10858cdb62..d764200b6c2 100644 --- a/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/projections/SinusoidalProjectorTest.java +++ b/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/projections/SinusoidalProjectorTest.java @@ -1,5 +1,22 @@ package org.apache.lucene.spatial.tier.projections; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import org.apache.lucene.util.LuceneTestCase; import org.junit.Test; diff --git a/lucene/src/java/org/apache/lucene/util/packed/package.html b/lucene/src/java/org/apache/lucene/util/packed/package.html index b98aa234276..d1d0e298ea1 100644 --- a/lucene/src/java/org/apache/lucene/util/packed/package.html +++ b/lucene/src/java/org/apache/lucene/util/packed/package.html @@ -1,4 +1,20 @@ + diff --git a/lucene/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java b/lucene/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java index e2899a66393..a4f138c40fa 100644 --- a/lucene/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java +++ b/lucene/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java @@ -1,5 +1,22 @@ package org.apache.lucene.index; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import java.util.Collection; import java.util.Map; import java.util.Random; diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekStemmer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekStemmer.java index 1b95c29b31a..8b0192e1555 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekStemmer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekStemmer.java @@ -1,5 +1,22 @@ package org.apache.lucene.analysis.el; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; From 7c24712e89cbbb65375e7c5d3a7ee11eac8759f7 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Sun, 30 Jan 2011 13:52:06 +0000 Subject: [PATCH 055/185] LUCENE-1866: add missing dirs to rat report git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065272 13f79535-47bb-0310-9956-ffa450edef68 --- solr/build.xml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/solr/build.xml b/solr/build.xml index c3ef9682c18..9214ee4d6ce 100644 --- a/solr/build.xml +++ b/solr/build.xml @@ -956,6 +956,8 @@ description="runs the tasks over src/java excluding the license directory"> + + From 24cfce7c1aad29f5caa1858c91caafa71d4846c4 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Sun, 30 Jan 2011 14:17:46 +0000 Subject: [PATCH 056/185] add missing license headers where there are none, but the JIRA box was checked (solr) git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065286 13f79535-47bb-0310-9956-ffa450edef68 --- .../common/params/QueryElevationParams.java | 17 +++++++++++++++++ .../handler/admin/SolrInfoMBeanHandler.java | 17 +++++++++++++++++ .../solr/search/function/TFValueSource.java | 17 +++++++++++++++++ .../distance/StringDistanceFunction.java | 17 +++++++++++++++++ .../solr/client/solrj/impl/CloudSolrServer.java | 17 +++++++++++++++++ .../solr/BaseDistributedSearchTestCase.java | 17 +++++++++++++++++ .../test/org/apache/solr/TestPluginEnable.java | 17 +++++++++++++++++ .../solr/client/solrj/SolrJettyTestBase.java | 17 +++++++++++++++++ .../TestLegacyMergeSchedulerPolicyConfig.java | 17 +++++++++++++++++ .../org/apache/solr/core/TestPropInject.java | 17 +++++++++++++++++ .../apache/solr/core/TestXIncludeConfig.java | 17 +++++++++++++++++ .../DistributedSpellCheckComponentTest.java | 17 +++++++++++++++++ .../DistributedTermsComponentTest.java | 17 +++++++++++++++++ .../org/apache/solr/search/TestLRUCache.java | 17 +++++++++++++++++ 14 files changed, 238 insertions(+) diff --git a/solr/src/common/org/apache/solr/common/params/QueryElevationParams.java b/solr/src/common/org/apache/solr/common/params/QueryElevationParams.java index 517eec4d7b8..0c15b7b2acf 100644 --- a/solr/src/common/org/apache/solr/common/params/QueryElevationParams.java +++ b/solr/src/common/org/apache/solr/common/params/QueryElevationParams.java @@ -1,5 +1,22 @@ package org.apache.solr.common.params; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + /** * Parameters used with the QueryElevationComponent diff --git a/solr/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java b/solr/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java index aa8d94c22d9..c5fef3c14e7 100644 --- a/solr/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java +++ b/solr/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java @@ -1,5 +1,22 @@ package org.apache.solr.handler.admin; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import org.apache.solr.handler.RequestHandlerBase; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.core.SolrInfoMBean; diff --git a/solr/src/java/org/apache/solr/search/function/TFValueSource.java b/solr/src/java/org/apache/solr/search/function/TFValueSource.java index c37a4949c18..b2a776e5a87 100755 --- a/solr/src/java/org/apache/solr/search/function/TFValueSource.java +++ b/solr/src/java/org/apache/solr/search/function/TFValueSource.java @@ -1,5 +1,22 @@ package org.apache.solr.search.function; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import org.apache.lucene.index.*; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.search.DocIdSetIterator; diff --git a/solr/src/java/org/apache/solr/search/function/distance/StringDistanceFunction.java b/solr/src/java/org/apache/solr/search/function/distance/StringDistanceFunction.java index f67639c9d28..222ef314b7a 100644 --- a/solr/src/java/org/apache/solr/search/function/distance/StringDistanceFunction.java +++ b/solr/src/java/org/apache/solr/search/function/distance/StringDistanceFunction.java @@ -1,5 +1,22 @@ package org.apache.solr.search.function.distance; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.search.spell.StringDistance; import org.apache.solr.search.function.DocValues; diff --git a/solr/src/solrj/org/apache/solr/client/solrj/impl/CloudSolrServer.java b/solr/src/solrj/org/apache/solr/client/solrj/impl/CloudSolrServer.java index 60955330996..1268c402589 100644 --- a/solr/src/solrj/org/apache/solr/client/solrj/impl/CloudSolrServer.java +++ b/solr/src/solrj/org/apache/solr/client/solrj/impl/CloudSolrServer.java @@ -1,5 +1,22 @@ package org.apache.solr.client.solrj.impl; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import java.io.IOException; import java.net.MalformedURLException; import java.util.ArrayList; diff --git a/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java b/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java index 33839c86c3d..1dc858ad0ef 100644 --- a/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java +++ b/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java @@ -1,5 +1,22 @@ package org.apache.solr; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import java.io.File; import java.io.IOException; import java.util.ArrayList; diff --git a/solr/src/test/org/apache/solr/TestPluginEnable.java b/solr/src/test/org/apache/solr/TestPluginEnable.java index 443c462869a..b390ddf37bb 100644 --- a/solr/src/test/org/apache/solr/TestPluginEnable.java +++ b/solr/src/test/org/apache/solr/TestPluginEnable.java @@ -1,5 +1,22 @@ package org.apache.solr; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import org.apache.solr.client.solrj.SolrServerException; import org.junit.BeforeClass; import org.junit.Test; diff --git a/solr/src/test/org/apache/solr/client/solrj/SolrJettyTestBase.java b/solr/src/test/org/apache/solr/client/solrj/SolrJettyTestBase.java index 145317efeae..ad8a70aa299 100755 --- a/solr/src/test/org/apache/solr/client/solrj/SolrJettyTestBase.java +++ b/solr/src/test/org/apache/solr/client/solrj/SolrJettyTestBase.java @@ -1,5 +1,22 @@ package org.apache.solr.client.solrj; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import java.io.File; import java.io.IOException; diff --git a/solr/src/test/org/apache/solr/core/TestLegacyMergeSchedulerPolicyConfig.java b/solr/src/test/org/apache/solr/core/TestLegacyMergeSchedulerPolicyConfig.java index f0bd861aaa2..e89815cecce 100644 --- a/solr/src/test/org/apache/solr/core/TestLegacyMergeSchedulerPolicyConfig.java +++ b/solr/src/test/org/apache/solr/core/TestLegacyMergeSchedulerPolicyConfig.java @@ -1,5 +1,22 @@ package org.apache.solr.core; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import java.io.IOException; import org.apache.lucene.index.IndexWriter; diff --git a/solr/src/test/org/apache/solr/core/TestPropInject.java b/solr/src/test/org/apache/solr/core/TestPropInject.java index c84e13fe877..858388a7ac0 100644 --- a/solr/src/test/org/apache/solr/core/TestPropInject.java +++ b/solr/src/test/org/apache/solr/core/TestPropInject.java @@ -1,5 +1,22 @@ package org.apache.solr.core; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import java.io.IOException; import org.apache.lucene.index.ConcurrentMergeScheduler; diff --git a/solr/src/test/org/apache/solr/core/TestXIncludeConfig.java b/solr/src/test/org/apache/solr/core/TestXIncludeConfig.java index 95b03bfb327..905685abeb8 100644 --- a/solr/src/test/org/apache/solr/core/TestXIncludeConfig.java +++ b/solr/src/test/org/apache/solr/core/TestXIncludeConfig.java @@ -1,5 +1,22 @@ package org.apache.solr.core; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import java.io.File; import org.apache.commons.io.FileUtils; diff --git a/solr/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java b/solr/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java index 75a968d4f7f..ed0edbb97b6 100644 --- a/solr/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java +++ b/solr/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java @@ -1,5 +1,22 @@ package org.apache.solr.handler.component; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import org.apache.solr.BaseDistributedSearchTestCase; import org.apache.solr.client.solrj.SolrServer; import org.apache.solr.common.params.ModifiableSolrParams; diff --git a/solr/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java b/solr/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java index ac3b7094c1e..bcf91c268d3 100644 --- a/solr/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java +++ b/solr/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java @@ -1,5 +1,22 @@ package org.apache.solr.handler.component; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import org.apache.solr.BaseDistributedSearchTestCase; /** diff --git a/solr/src/test/org/apache/solr/search/TestLRUCache.java b/solr/src/test/org/apache/solr/search/TestLRUCache.java index 7439704f075..7ff5b762085 100644 --- a/solr/src/test/org/apache/solr/search/TestLRUCache.java +++ b/solr/src/test/org/apache/solr/search/TestLRUCache.java @@ -1,5 +1,22 @@ package org.apache.solr.search; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + import java.io.IOException; import java.io.Serializable; import java.util.HashMap; From 6569aa5da376c73ebff0fc93fdf7f073e152c374 Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Sun, 30 Jan 2011 15:03:01 +0000 Subject: [PATCH 057/185] add ASL git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065302 13f79535-47bb-0310-9956-ffa450edef68 --- .../byTask/feeds/LongToEnglishQueryMaker.java | 17 +++++++++++++++++ .../apache/solr/core/RefCntRamDirectory.java | 17 +++++++++++++++++ .../request/PerSegmentSingleValuedFaceting.java | 17 +++++++++++++++++ .../solr/analysis/TestMultiWordSynonyms.java | 17 +++++++++++++++++ .../velocity/VelocityResponseWriterTest.java | 17 +++++++++++++++++ 5 files changed, 85 insertions(+) diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishQueryMaker.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishQueryMaker.java index 6abe9fcccd9..fdee2882518 100644 --- a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishQueryMaker.java +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishQueryMaker.java @@ -1,3 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.lucene.benchmark.byTask.feeds; import org.apache.lucene.analysis.Analyzer; diff --git a/solr/src/java/org/apache/solr/core/RefCntRamDirectory.java b/solr/src/java/org/apache/solr/core/RefCntRamDirectory.java index e3eaaf3d9e5..e9659814374 100644 --- a/solr/src/java/org/apache/solr/core/RefCntRamDirectory.java +++ b/solr/src/java/org/apache/solr/core/RefCntRamDirectory.java @@ -1,3 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.solr.core; import java.io.IOException; diff --git a/solr/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java b/solr/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java index 30f99a6f0a6..56015fde2cf 100755 --- a/solr/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java +++ b/solr/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java @@ -1,3 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.solr.request; import org.apache.lucene.index.IndexReader.AtomicReaderContext; diff --git a/solr/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java b/solr/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java index e4f71c57249..f0dd0782567 100644 --- a/solr/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java +++ b/solr/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java @@ -1,3 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.solr.analysis; import org.apache.lucene.analysis.core.WhitespaceTokenizer; diff --git a/solr/src/test/org/apache/solr/velocity/VelocityResponseWriterTest.java b/solr/src/test/org/apache/solr/velocity/VelocityResponseWriterTest.java index 911ca19b1ad..f8c61e5a8e1 100644 --- a/solr/src/test/org/apache/solr/velocity/VelocityResponseWriterTest.java +++ b/solr/src/test/org/apache/solr/velocity/VelocityResponseWriterTest.java @@ -1,3 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.solr.velocity; import org.apache.solr.response.SolrQueryResponse; From d1a5ca1460643c62a4761c5d3139679fbb5bc80d Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Sun, 30 Jan 2011 15:10:15 +0000 Subject: [PATCH 058/185] add missing @Override and @Deprecated annotations git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065304 13f79535-47bb-0310-9956-ffa450edef68 --- .../InstantiatedDocsAndPositionsEnum.java | 3 + .../apache/lucene/store/WindowsDirectory.java | 5 + .../precedence/TestPrecedenceQueryParser.java | 4 + .../index/FreqProxTermsWriterPerField.java | 1 + .../org/apache/lucene/index/MultiReader.java | 1 + .../lucene/index/PerFieldCodecWrapper.java | 1 + .../index/TermVectorsTermsWriterPerField.java | 1 + .../lucene/index/codecs/BlockTermsReader.java | 1 + .../codecs/preflex/SegmentTermPositions.java | 6 ++ .../index/codecs/preflex/TermInfosReader.java | 3 + .../index/codecs/sep/IntIndexInput.java | 1 + .../codecs/sep/SepPostingsReaderImpl.java | 1 + .../simpletext/SimpleTextFieldsReader.java | 1 + .../standard/StandardPostingsReader.java | 9 ++ .../apache/lucene/search/FuzzyTermsEnum.java | 1 + .../lucene/search/TotalHitCountCollector.java | 4 + .../lucene/search/cache/EntryCreator.java | 1 + .../search/function/MultiValueSource.java | 1 + .../search/payloads/PayloadNearQuery.java | 1 + .../spans/SpanNearPayloadCheckQuery.java | 1 + .../search/spans/SpanPayloadCheckQuery.java | 1 + .../java/org/apache/lucene/util/BytesRef.java | 1 + .../lucene/util/DoubleBarrelLRUCache.java | 1 + .../java/org/apache/lucene/util/IntsRef.java | 1 + .../org/apache/lucene/util/ReaderUtil.java | 1 + .../util/automaton/BasicOperations.java | 3 + .../lucene/util/automaton/SortedIntSet.java | 6 ++ .../lucene/util/automaton/UTF32ToUTF8.java | 1 + .../util/automaton/fst/PairOutputs.java | 1 + .../apache/lucene/util/packed/Packed32.java | 1 + .../apache/lucene/util/packed/Packed64.java | 1 + .../lucene/util/packed/PackedWriter.java | 1 + .../org/apache/lucene/TestAssertions.java | 11 +++ .../org/apache/lucene/TestExternalCodecs.java | 2 + .../TestCharTermAttributeImpl.java | 1 + .../lucene/index/TestDocsAndPositions.java | 1 + .../lucene/index/TestFilterIndexReader.java | 4 + .../lucene/index/TestIndexWriterReader.java | 1 + .../lucene/index/TestMultiLevelSkipList.java | 2 + .../lucene/index/TestPerSegmentDeletes.java | 1 + .../lucene/search/TestAutomatonQuery.java | 2 + .../search/TestAutomatonQueryUnicode.java | 2 + .../apache/lucene/search/TestRegexpQuery.java | 2 + .../search/cache/TestEntryCreators.java | 1 + .../util/LuceneJUnitDividingSelector.java | 3 + .../apache/lucene/util/TestBytesRefHash.java | 1 + .../lucene/util/TestDoubleBarrelLRUCache.java | 6 ++ .../util/TestRecyclingByteBlockAllocator.java | 1 + .../lucene/util/automaton/fst/TestFSTs.java | 2 + .../charfilter/HTMLStripCharFilter.java | 3 + .../commongrams/CommonGramsFilter.java | 1 + .../commongrams/CommonGramsQueryFilter.java | 2 + .../lucene/analysis/fa/PersianCharFilter.java | 1 + .../pattern/PatternReplaceCharFilter.java | 2 + .../lucene/analysis/synonym/SynonymMap.java | 1 + .../TestRemoveDuplicatesTokenFilter.java | 1 + .../miscellaneous/TestTrimFilter.java | 1 + .../TestWordDelimiterFilter.java | 3 + .../analysis/synonym/TestSynonymFilter.java | 1 + .../carrot2/CarrotClusteringEngine.java | 2 + .../carrot2/LuceneLanguageModelFactory.java | 3 +- .../MockDocumentClusteringEngine.java | 2 + .../dataimport/MailEntityProcessor.java | 2 + .../dataimport/TikaEntityProcessor.java | 1 + .../dataimport/TestMailEntityProcessor.java | 4 + .../BinContentStreamDataSource.java | 3 + .../handler/dataimport/BinFileDataSource.java | 3 + .../handler/dataimport/BinURLDataSource.java | 3 + .../dataimport/CachedSqlEntityProcessor.java | 3 + .../handler/dataimport/ClobTransformer.java | 1 + .../dataimport/ContentStreamDataSource.java | 3 + .../solr/handler/dataimport/ContextImpl.java | 21 ++++ .../solr/handler/dataimport/DataConfig.java | 1 + .../solr/handler/dataimport/DataImporter.java | 1 + .../dataimport/DateFormatTransformer.java | 1 + .../solr/handler/dataimport/DebugLogger.java | 5 + .../solr/handler/dataimport/DocBuilder.java | 1 + .../dataimport/EntityProcessorBase.java | 6 ++ .../dataimport/EntityProcessorWrapper.java | 8 ++ .../solr/handler/dataimport/EvaluatorBag.java | 5 + .../dataimport/FieldReaderDataSource.java | 3 + .../dataimport/FieldStreamDataSource.java | 3 + .../handler/dataimport/FileDataSource.java | 3 + .../dataimport/FileListEntityProcessor.java | 2 + .../handler/dataimport/JdbcDataSource.java | 4 + .../dataimport/LineEntityProcessor.java | 2 + .../handler/dataimport/LogTransformer.java | 1 + .../handler/dataimport/MockDataSource.java | 3 + .../dataimport/NumberFormatTransformer.java | 1 + .../dataimport/PlainTextEntityProcessor.java | 2 + .../handler/dataimport/RegexTransformer.java | 1 + .../handler/dataimport/ScriptTransformer.java | 1 + .../dataimport/SqlEntityProcessor.java | 5 + .../dataimport/TemplateTransformer.java | 1 + .../handler/dataimport/URLDataSource.java | 3 + .../dataimport/VariableResolverImpl.java | 2 + .../dataimport/XPathEntityProcessor.java | 3 + .../AbstractDataImportHandlerTestCase.java | 27 ++++++ .../TestCachedSqlEntityProcessor.java | 2 + .../TestContentStreamDataSource.java | 2 + .../handler/dataimport/TestDocBuilder.java | 6 ++ .../handler/dataimport/TestDocBuilder2.java | 2 + .../dataimport/TestEntityProcessorBase.java | 2 + .../handler/dataimport/TestErrorHandling.java | 4 + .../handler/dataimport/TestEvaluatorBag.java | 1 + .../dataimport/TestJdbcDataSource.java | 2 + .../dataimport/TestLineEntityProcessor.java | 3 + .../TestPlainTextEntityProcessor.java | 3 + .../dataimport/TestSqlEntityProcessor.java | 5 + .../dataimport/TestSqlEntityProcessor2.java | 1 + .../dataimport/TestVariableResolver.java | 1 + .../dataimport/TestXPathEntityProcessor.java | 3 + .../extraction/ExtractingDocumentLoader.java | 1 + .../extraction/ExtractingRequestHandler.java | 1 + .../handler/ExtractingRequestHandlerTest.java | 1 + .../processor/UIMAUpdateRequestProcessor.java | 1 + .../UIMAUpdateRequestProcessorFactory.java | 1 + .../UIMAUpdateRequestProcessorTest.java | 1 + .../org/apache/solr/common/SolrDocument.java | 2 + .../apache/solr/common/SolrDocumentList.java | 1 + .../apache/solr/common/cloud/CloudState.java | 1 + .../apache/solr/common/cloud/ZkNodeProps.java | 1 + .../solr/common/params/FacetParams.java | 3 + .../solr/common/util/ConcurrentLRUCache.java | 7 ++ .../solr/common/util/FastOutputStream.java | 1 + .../apache/solr/common/util/NamedList.java | 3 + .../solr/common/util/RegexFileFilter.java | 1 + .../ArabicLetterTokenizerFactory.java | 1 + .../solr/analysis/ChineseFilterFactory.java | 1 + .../analysis/ChineseTokenizerFactory.java | 1 + ...tionaryCompoundWordTokenFilterFactory.java | 1 + ...enationCompoundWordTokenFilterFactory.java | 1 + .../analysis/IndonesianStemFilterFactory.java | 1 + .../NumericPayloadTokenFilterFactory.java | 1 + .../PatternReplaceCharFilterFactory.java | 1 + .../solr/analysis/PositionFilterFactory.java | 1 + .../solr/analysis/ShingleFilterFactory.java | 1 + .../apache/solr/analysis/SolrAnalyzer.java | 1 + .../apache/solr/analysis/TokenizerChain.java | 1 + .../org/apache/solr/cloud/SolrZkServer.java | 1 + .../solr/cloud/ZkSolrResourceLoader.java | 3 + .../solr/core/AbstractSolrEventListener.java | 1 + .../apache/solr/core/RAMDirectoryFactory.java | 1 + .../apache/solr/core/RefCntRamDirectory.java | 1 + .../solr/core/RunExecutableListener.java | 3 + .../java/org/apache/solr/core/SolrCore.java | 2 + .../solr/core/StandardDirectoryFactory.java | 1 + .../solr/core/StandardIndexReaderFactory.java | 1 + .../handler/AnalysisRequestHandlerBase.java | 1 + .../handler/BinaryUpdateRequestHandler.java | 6 ++ .../solr/handler/CSVRequestHandler.java | 7 ++ .../handler/ContentStreamHandlerBase.java | 1 + .../DocumentAnalysisRequestHandler.java | 1 + .../handler/FieldAnalysisRequestHandler.java | 1 + .../handler/JsonUpdateRequestHandler.java | 1 + .../solr/handler/ReplicationHandler.java | 6 ++ .../org/apache/solr/handler/SnapPuller.java | 1 + .../org/apache/solr/handler/SnapShooter.java | 1 + .../org/apache/solr/handler/XMLLoader.java | 1 + .../solr/handler/XmlUpdateRequestHandler.java | 1 + .../handler/admin/SolrInfoMBeanHandler.java | 5 + .../handler/component/DebugComponent.java | 1 + .../handler/component/FacetComponent.java | 1 + .../handler/component/HighlightComponent.java | 1 + .../component/QueryElevationComponent.java | 7 ++ .../solr/handler/component/ShardDoc.java | 4 + .../solr/handler/component/ShardRequest.java | 1 + .../solr/handler/component/ShardResponse.java | 1 + .../component/TermVectorComponent.java | 8 ++ .../handler/component/TermsComponent.java | 5 + .../highlight/DefaultSolrHighlighter.java | 3 + .../apache/solr/highlight/GapFragmenter.java | 2 + .../solr/highlight/RegexFragmenter.java | 1 + .../solr/request/ServletSolrParams.java | 1 + .../org/apache/solr/request/SimpleFacets.java | 14 +++ .../solr/request/SolrQueryRequestBase.java | 1 + .../apache/solr/request/UnInvertedField.java | 4 + .../solr/response/JSONResponseWriter.java | 14 +++ .../response/PHPSerializedResponseWriter.java | 1 + .../org/apache/solr/response/PageTool.java | 1 + .../solr/response/RubyResponseWriter.java | 2 + .../response/SolrParamResourceLoader.java | 4 + .../response/SolrVelocityResourceLoader.java | 4 + .../org/apache/solr/response/XMLWriter.java | 2 + .../solr/schema/AbstractSubTypeFieldType.java | 1 + .../org/apache/solr/schema/BCDIntField.java | 6 ++ .../org/apache/solr/schema/BinaryField.java | 4 + .../org/apache/solr/schema/BoolField.java | 9 ++ .../org/apache/solr/schema/ByteField.java | 3 + .../org/apache/solr/schema/DateField.java | 21 ++++ .../org/apache/solr/schema/DoubleField.java | 3 + .../apache/solr/schema/ExternalFileField.java | 4 + .../org/apache/solr/schema/FieldType.java | 2 + .../org/apache/solr/schema/FloatField.java | 3 + .../org/apache/solr/schema/IndexSchema.java | 4 + .../java/org/apache/solr/schema/IntField.java | 3 + .../org/apache/solr/schema/LongField.java | 2 + .../apache/solr/schema/RandomSortField.java | 7 ++ .../org/apache/solr/schema/ShortField.java | 2 + .../solr/schema/SortableDoubleField.java | 18 ++++ .../solr/schema/SortableFloatField.java | 18 ++++ .../apache/solr/schema/SortableIntField.java | 18 ++++ .../apache/solr/schema/SortableLongField.java | 18 ++++ .../java/org/apache/solr/schema/StrField.java | 4 + .../apache/solr/schema/StrFieldSource.java | 13 +++ .../org/apache/solr/schema/TextField.java | 3 + .../org/apache/solr/schema/TrieField.java | 3 + .../org/apache/solr/search/BitDocSet.java | 3 + .../solr/search/BoostQParserPlugin.java | 5 + .../org/apache/solr/search/DisMaxQParser.java | 2 + .../solr/search/DisMaxQParserPlugin.java | 1 + .../java/org/apache/solr/search/DocSet.java | 2 + .../solr/search/DocSetHitCollector.java | 8 ++ .../search/ExtendedDismaxQParserPlugin.java | 8 ++ .../org/apache/solr/search/FastLRUCache.java | 1 + .../solr/search/FieldQParserPlugin.java | 2 + .../apache/solr/search/FunctionQParser.java | 1 + .../solr/search/FunctionQParserPlugin.java | 1 + .../search/FunctionRangeQParserPlugin.java | 2 + .../java/org/apache/solr/search/LRUCache.java | 2 + .../solr/search/LuceneQParserPlugin.java | 4 + .../solr/search/LuceneQueryOptimizer.java | 1 + .../MissingStringLastComparatorSource.java | 1 + .../org/apache/solr/search/MutableValue.java | 1 + .../solr/search/NestedQParserPlugin.java | 5 + .../solr/search/OldLuceneQParserPlugin.java | 1 + .../solr/search/PrefixQParserPlugin.java | 2 + .../org/apache/solr/search/QueryParsing.java | 1 + .../apache/solr/search/QueryResultKey.java | 2 + .../apache/solr/search/RawQParserPlugin.java | 2 + .../org/apache/solr/search/SolrCacheBase.java | 1 + .../solr/search/SolrConstantScoreQuery.java | 3 + .../apache/solr/search/SolrIndexSearcher.java | 24 ++++- .../apache/solr/search/SortedIntDocSet.java | 1 + .../solr/search/SpatialBoxQParserPlugin.java | 1 + .../apache/solr/search/TermQParserPlugin.java | 2 + .../apache/solr/search/ValueSourceParser.java | 96 +++++++++++++++++++ .../solr/search/function/BoostedQuery.java | 8 ++ .../solr/search/function/ByteFieldSource.java | 8 ++ .../search/function/ConstValueSource.java | 10 ++ .../search/function/DivFloatFunction.java | 2 + .../search/function/DocFreqValueSource.java | 14 +++ .../function/DoubleConstValueSource.java | 10 ++ .../search/function/DoubleFieldSource.java | 8 ++ .../search/function/DualFloatFunction.java | 10 ++ .../search/function/FieldCacheSource.java | 3 + .../solr/search/function/FileFloatSource.java | 14 +++ .../search/function/FloatFieldSource.java | 8 ++ .../solr/search/function/IntFieldSource.java | 8 ++ .../function/JoinDocFreqValueSource.java | 10 ++ .../search/function/LinearFloatFunction.java | 10 ++ .../solr/search/function/LongFieldSource.java | 8 ++ .../search/function/MaxFloatFunction.java | 10 ++ .../search/function/MultiFloatFunction.java | 10 ++ .../function/NumericFieldCacheSource.java | 2 + .../solr/search/function/OrdFieldSource.java | 12 +++ .../search/function/PowFloatFunction.java | 2 + .../search/function/ProductFloatFunction.java | 2 + .../search/function/QueryValueSource.java | 9 ++ .../function/RangeMapFloatFunction.java | 10 ++ .../function/ReciprocalFloatFunction.java | 10 ++ .../function/ReverseOrdFieldSource.java | 12 +++ .../search/function/ScaleFloatFunction.java | 10 ++ .../search/function/ShortFieldSource.java | 8 ++ .../search/function/SimpleFloatFunction.java | 6 ++ .../solr/search/function/SingleFunction.java | 3 + .../search/function/StringIndexDocValues.java | 1 + .../search/function/SumFloatFunction.java | 1 + .../solr/search/function/ValueSource.java | 10 ++ .../function/ValueSourceRangeFilter.java | 7 +- .../search/function/VectorValueSource.java | 4 + .../distance/GeohashHaversineFunction.java | 5 + .../distance/HaversineConstFunction.java | 7 ++ .../function/distance/HaversineFunction.java | 6 ++ .../distance/SquaredEuclideanFunction.java | 2 + .../distance/StringDistanceFunction.java | 5 + .../distance/VectorDistanceFunction.java | 5 + .../spelling/AbstractLuceneSpellChecker.java | 2 + .../solr/spelling/FileBasedSpellChecker.java | 2 + .../solr/spelling/IndexBasedSpellChecker.java | 2 + .../solr/spelling/SpellingQueryConverter.java | 1 + .../apache/solr/spelling/suggest/Lookup.java | 1 + .../solr/update/CommitUpdateCommand.java | 1 + .../solr/update/DeleteUpdateCommand.java | 1 + .../solr/update/DirectUpdateHandler2.java | 9 ++ .../apache/solr/update/SolrIndexWriter.java | 2 + .../org/apache/solr/update/UpdateCommand.java | 1 + .../update/processor/Lookup3Signature.java | 2 + .../solr/update/processor/MD5Signature.java | 3 + .../processor/TextProfileSignature.java | 3 + .../org/apache/solr/util/BoundedTreeSet.java | 2 + .../org/apache/solr/util/SolrPluginUtils.java | 1 + .../solrj/impl/BinaryRequestWriter.java | 5 + .../solrj/impl/BinaryResponseParser.java | 4 + .../client/solrj/impl/LBHttpSolrServer.java | 3 + .../impl/StreamingBinaryResponseParser.java | 2 + .../request/JavaBinUpdateRequestCodec.java | 3 + .../client/solrj/response/FieldStatsInfo.java | 1 + .../solr/BaseDistributedSearchTestCase.java | 6 ++ .../apache/solr/BasicFunctionalityTest.java | 5 + .../test/org/apache/solr/SolrTestCaseJ4.java | 3 + solr/src/test/org/apache/solr/TestTrie.java | 1 + ...estRemoveDuplicatesTokenFilterFactory.java | 1 + .../TestReversedWildcardFilterFactory.java | 1 + .../client/solrj/SolrExampleTestBase.java | 1 + .../client/solrj/TestLBHttpSolrServer.java | 1 + .../solrj/embedded/TestSolrProperties.java | 2 + .../cloud/AbstractDistributedZkTestCase.java | 1 + .../apache/solr/cloud/AbstractZkTestCase.java | 1 + .../solr/cloud/CloudStateUpdateTest.java | 1 + .../apache/solr/cloud/ZkControllerTest.java | 1 + .../apache/solr/cloud/ZkSolrClientTest.java | 1 + .../solr/core/AlternateDirectoryTest.java | 2 + .../solr/core/DummyValueSourceParser.java | 4 + .../solr/core/IndexReaderFactoryTest.java | 2 + .../MockQuerySenderListenerReqHandler.java | 7 ++ .../solr/core/TestArbitraryIndexDir.java | 2 + .../org/apache/solr/core/TestBadConfig.java | 3 + .../apache/solr/core/TestJmxIntegration.java | 2 + .../apache/solr/core/TestJmxMonitoredMap.java | 2 + .../org/apache/solr/core/TestPropInject.java | 2 + .../solr/core/TestSolrDeletionPolicy1.java | 1 + .../apache/solr/core/TestXIncludeConfig.java | 2 + .../apache/solr/handler/JsonLoaderTest.java | 5 + .../apache/solr/handler/TestCSVLoader.java | 2 + .../solr/handler/TestReplicationHandler.java | 2 + .../component/SpellCheckComponentTest.java | 2 + .../request/TestBinaryResponseWriter.java | 2 + .../apache/solr/request/TestWriterPerf.java | 4 + .../solr/schema/CustomSimilarityFactory.java | 1 + .../org/apache/solr/schema/DateFieldTest.java | 1 + .../apache/solr/schema/TestBinaryField.java | 1 + .../apache/solr/search/FooQParserPlugin.java | 2 + .../solr/search/TestExtendedDismaxParser.java | 4 + .../apache/solr/search/TestFastLRUCache.java | 1 + .../apache/solr/search/TestIndexSearcher.java | 1 + .../apache/solr/search/TestQueryTypes.java | 4 + .../apache/solr/search/TestQueryUtils.java | 4 + .../apache/solr/search/TestRangeQuery.java | 1 + .../apache/solr/search/TestSearchPerf.java | 4 + .../test/org/apache/solr/search/TestSort.java | 3 + .../search/function/NvlValueSourceParser.java | 8 +- .../search/function/SortByFunctionTest.java | 2 + .../apache/solr/servlet/CacheHeaderTest.java | 3 + .../servlet/DirectSolrConnectionTest.java | 2 + .../solr/servlet/NoCacheHeaderTest.java | 6 ++ .../spelling/IndexBasedSpellCheckerTest.java | 1 + .../SpellPossibilityIteratorTest.java | 3 +- .../apache/solr/update/AutoCommitTest.java | 2 + .../DirectUpdateHandlerOptimizeTest.java | 2 + .../solr/update/DirectUpdateHandlerTest.java | 1 + .../solr/update/TestIndexingPerformance.java | 2 + .../SignatureUpdateProcessorFactoryTest.java | 2 + .../solr/util/AbstractSolrTestCase.java | 3 + .../org/apache/solr/util/TestNumberUtils.java | 16 ++++ .../velocity/VelocityResponseWriterTest.java | 2 + .../solr/servlet/LogLevelSelection.java | 5 + 357 files changed, 1322 insertions(+), 9 deletions(-) diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocsAndPositionsEnum.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocsAndPositionsEnum.java index 816e454673e..e4eea034bd1 100644 --- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocsAndPositionsEnum.java +++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocsAndPositionsEnum.java @@ -82,14 +82,17 @@ public class InstantiatedDocsAndPositionsEnum extends DocsAndPositionsEnum { return currentDoc.getTermPositions().length; } + @Override public int nextPosition() { return currentDoc.getTermPositions()[++posUpto]; } + @Override public boolean hasPayload() { return currentDoc.getPayloads()[posUpto] != null; } + @Override public BytesRef getPayload() { payload.bytes = currentDoc.getPayloads()[posUpto]; payload.length = payload.bytes.length; diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/store/WindowsDirectory.java b/lucene/contrib/misc/src/java/org/apache/lucene/store/WindowsDirectory.java index 870ebfade1e..f1c3f74a117 100644 --- a/lucene/contrib/misc/src/java/org/apache/lucene/store/WindowsDirectory.java +++ b/lucene/contrib/misc/src/java/org/apache/lucene/store/WindowsDirectory.java @@ -64,6 +64,7 @@ public class WindowsDirectory extends FSDirectory { super(path, null); } + @Override public IndexInput openInput(String name, int bufferSize) throws IOException { ensureOpen(); return new WindowsIndexInput(new File(getDirectory(), name), Math.max(bufferSize, DEFAULT_BUFFERSIZE)); @@ -82,14 +83,17 @@ public class WindowsDirectory extends FSDirectory { isOpen = true; } + @Override protected void readInternal(byte[] b, int offset, int length) throws IOException { if (WindowsDirectory.read(fd, b, offset, length, getFilePointer()) != length) throw new IOException("Read past EOF"); } + @Override protected void seekInternal(long pos) throws IOException { } + @Override public synchronized void close() throws IOException { // NOTE: we synchronize and track "isOpen" because Lucene sometimes closes IIs twice! if (!isClone && isOpen) { @@ -98,6 +102,7 @@ public class WindowsDirectory extends FSDirectory { } } + @Override public long length() { return length; } diff --git a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java index 5d044b9ad4c..5cba05b3111 100644 --- a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java +++ b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java @@ -84,6 +84,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); + @Override public boolean incrementToken() throws IOException { if (inPhrase) { inPhrase = false; @@ -108,6 +109,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { public static final class QPTestAnalyzer extends Analyzer { /** Filters MockTokenizer with StopFilter. */ + @Override public final TokenStream tokenStream(String fieldName, Reader reader) { return new QPTestFilter(new MockTokenizer(reader, MockTokenizer.SIMPLE, true)); } @@ -115,6 +117,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { private int originalMaxClauses; + @Override public void setUp() throws Exception { super.setUp(); originalMaxClauses = BooleanQuery.getMaxClauseCount(); @@ -627,6 +630,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { } + @Override public void tearDown() { BooleanQuery.setMaxClauseCount(originalMaxClauses); } diff --git a/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java b/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java index f47d2a71bc0..b504f1557b9 100644 --- a/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java +++ b/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java @@ -194,6 +194,7 @@ final class FreqProxTermsWriterPerField extends TermsHashConsumerPerField implem return new FreqProxPostingsArray(size); } + @Override void copyTo(ParallelPostingsArray toArray, int numToCopy) { assert toArray instanceof FreqProxPostingsArray; FreqProxPostingsArray to = (FreqProxPostingsArray) toArray; diff --git a/lucene/src/java/org/apache/lucene/index/MultiReader.java b/lucene/src/java/org/apache/lucene/index/MultiReader.java index 0d3a082567b..c2682e40231 100644 --- a/lucene/src/java/org/apache/lucene/index/MultiReader.java +++ b/lucene/src/java/org/apache/lucene/index/MultiReader.java @@ -383,6 +383,7 @@ public class MultiReader extends IndexReader implements Cloneable { return subReaders; } + @Override public ReaderContext getTopReaderContext() { return topLevelContext; } diff --git a/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java b/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java index f7d4a1885d4..51c92321f54 100644 --- a/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java +++ b/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java @@ -204,6 +204,7 @@ final class PerFieldCodecWrapper extends Codec { } } + @Override public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { return new FieldsReader(state.dir, state.fieldInfos, state.segmentInfo, diff --git a/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java b/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java index 4938538d054..2b4e35e09cd 100644 --- a/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java +++ b/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java @@ -281,6 +281,7 @@ final class TermVectorsTermsWriterPerField extends TermsHashConsumerPerField { int[] lastOffsets; // Last offset we saw int[] lastPositions; // Last position where this term occurred + @Override ParallelPostingsArray newInstance(int size) { return new TermVectorsPostingsArray(size); } diff --git a/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java b/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java index d4a6ac1bce7..e25364c33a3 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java @@ -646,6 +646,7 @@ public class BlockTermsReader extends FieldsProducer { return SeekStatus.FOUND; } + @Override public long ord() { if (!doOrd) { throw new UnsupportedOperationException(); diff --git a/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermPositions.java b/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermPositions.java index f50d226741c..c642f6b1aaa 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermPositions.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermPositions.java @@ -58,6 +58,7 @@ extends SegmentTermDocs { this.proxStreamOrig = proxStream; // the proxStream will be cloned lazily when nextPosition() is called for the first time } + @Override final void seek(TermInfo ti, Term term) throws IOException { super.seek(ti, term); if (ti != null) @@ -69,6 +70,7 @@ extends SegmentTermDocs { needToLoadPayload = false; } + @Override public final void close() throws IOException { super.close(); if (proxStream != null) proxStream.close(); @@ -100,11 +102,13 @@ extends SegmentTermDocs { return delta; } + @Override protected final void skippingDoc() throws IOException { // we remember to skip a document lazily lazySkipProxCount += freq; } + @Override public final boolean next() throws IOException { // we remember to skip the remaining positions of the current // document lazily @@ -118,12 +122,14 @@ extends SegmentTermDocs { return false; } + @Override public final int read(final int[] docs, final int[] freqs) { throw new UnsupportedOperationException("TermPositions does not support processing multiple documents in one call. Use TermDocs instead."); } /** Called by super.skipTo(). */ + @Override protected void skipProx(long proxPointer, int payloadLength) throws IOException { // we save the pointer, we might have to skip there lazily lazySkipPointer = proxPointer; diff --git a/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java b/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java index adf0535390d..8205e73b972 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java @@ -67,15 +67,18 @@ public final class TermInfosReader { this.term = t; } + @Override public boolean equals(Object other) { CloneableTerm t = (CloneableTerm) other; return this.term.equals(t.term); } + @Override public int hashCode() { return term.hashCode(); } + @Override public Object clone() { return new CloneableTerm(term); } diff --git a/lucene/src/java/org/apache/lucene/index/codecs/sep/IntIndexInput.java b/lucene/src/java/org/apache/lucene/index/codecs/sep/IntIndexInput.java index 741272a329a..631476df0ba 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/sep/IntIndexInput.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/sep/IntIndexInput.java @@ -48,6 +48,7 @@ public abstract class IntIndexInput implements Closeable { public abstract void set(Index other); + @Override public abstract Object clone(); } diff --git a/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java b/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java index 08e2781732c..b693db361c9 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java @@ -160,6 +160,7 @@ public class SepPostingsReaderImpl extends PostingsReaderBase { return other; } + @Override public void copyFrom(TermState _other) { super.copyFrom(_other); SepTermState other = (SepTermState) _other; diff --git a/lucene/src/java/org/apache/lucene/index/codecs/simpletext/SimpleTextFieldsReader.java b/lucene/src/java/org/apache/lucene/index/codecs/simpletext/SimpleTextFieldsReader.java index e40fba2f44e..ea74a6b6627 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/simpletext/SimpleTextFieldsReader.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/simpletext/SimpleTextFieldsReader.java @@ -129,6 +129,7 @@ class SimpleTextFieldsReader extends FieldsProducer { fstEnum = new BytesRefFSTEnum>>(fst); } + @Override public SeekStatus seek(BytesRef text, boolean useCache /* ignored */) throws IOException { //System.out.println("seek to text=" + text.utf8ToString()); diff --git a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java index 0e53a99d536..0c9dd4f5c86 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java @@ -99,12 +99,14 @@ public class StandardPostingsReader extends PostingsReaderBase { ByteArrayDataInput bytesReader; byte[] bytes; + @Override public Object clone() { StandardTermState other = new StandardTermState(); other.copyFrom(this); return other; } + @Override public void copyFrom(TermState _other) { super.copyFrom(_other); StandardTermState other = (StandardTermState) _other; @@ -118,6 +120,7 @@ public class StandardPostingsReader extends PostingsReaderBase { // (rare!), they will be re-read from disk. } + @Override public String toString() { return super.toString() + " freqFP=" + freqOffset + " proxFP=" + proxOffset + " skipOffset=" + skipOffset; } @@ -569,6 +572,7 @@ public class StandardPostingsReader extends PostingsReaderBase { return doc; } + @Override public int nextPosition() throws IOException { if (lazyProxPointer != -1) { @@ -597,10 +601,12 @@ public class StandardPostingsReader extends PostingsReaderBase { /** Returns the payload at this position, or null if no * payload was indexed. */ + @Override public BytesRef getPayload() throws IOException { throw new IOException("No payloads exist for this field!"); } + @Override public boolean hasPayload() { return false; } @@ -765,6 +771,7 @@ public class StandardPostingsReader extends PostingsReaderBase { return doc; } + @Override public int nextPosition() throws IOException { if (lazyProxPointer != -1) { @@ -825,6 +832,7 @@ public class StandardPostingsReader extends PostingsReaderBase { /** Returns the payload at this position, or null if no * payload was indexed. */ + @Override public BytesRef getPayload() throws IOException { assert lazyProxPointer == -1; assert posPendingCount < freq; @@ -842,6 +850,7 @@ public class StandardPostingsReader extends PostingsReaderBase { return payload; } + @Override public boolean hasPayload() { return payloadPending && payloadLength > 0; } diff --git a/lucene/src/java/org/apache/lucene/search/FuzzyTermsEnum.java b/lucene/src/java/org/apache/lucene/search/FuzzyTermsEnum.java index 40795000c82..ee5e274ffaf 100644 --- a/lucene/src/java/org/apache/lucene/search/FuzzyTermsEnum.java +++ b/lucene/src/java/org/apache/lucene/search/FuzzyTermsEnum.java @@ -261,6 +261,7 @@ public final class FuzzyTermsEnum extends TermsEnum { return actualEnum.docsAndPositions(skipDocs, reuse); } + @Override public void seek(BytesRef term, TermState state) throws IOException { actualEnum.seek(term, state); } diff --git a/lucene/src/java/org/apache/lucene/search/TotalHitCountCollector.java b/lucene/src/java/org/apache/lucene/search/TotalHitCountCollector.java index 533d69c65d3..b154091e27d 100644 --- a/lucene/src/java/org/apache/lucene/search/TotalHitCountCollector.java +++ b/lucene/src/java/org/apache/lucene/search/TotalHitCountCollector.java @@ -31,16 +31,20 @@ public class TotalHitCountCollector extends Collector { return totalHits; } + @Override public void setScorer(Scorer scorer) { } + @Override public void collect(int doc) { totalHits++; } + @Override public void setNextReader(AtomicReaderContext context) { } + @Override public boolean acceptsDocsOutOfOrder() { return true; } diff --git a/lucene/src/java/org/apache/lucene/search/cache/EntryCreator.java b/lucene/src/java/org/apache/lucene/search/cache/EntryCreator.java index 0e0daff40cd..362cc83a71e 100644 --- a/lucene/src/java/org/apache/lucene/search/cache/EntryCreator.java +++ b/lucene/src/java/org/apache/lucene/search/cache/EntryCreator.java @@ -58,6 +58,7 @@ public abstract class EntryCreator implements Serializable // This can be removed //------------------------------------------------------------------------ + @Override public boolean equals(Object obj) { if( obj instanceof EntryCreator ) { return getCacheKey().equals( ((EntryCreator)obj).getCacheKey() ); diff --git a/lucene/src/java/org/apache/lucene/search/function/MultiValueSource.java b/lucene/src/java/org/apache/lucene/search/function/MultiValueSource.java index 7dbccb25a69..b3ec7681ad1 100644 --- a/lucene/src/java/org/apache/lucene/search/function/MultiValueSource.java +++ b/lucene/src/java/org/apache/lucene/search/function/MultiValueSource.java @@ -52,6 +52,7 @@ public final class MultiValueSource extends ValueSource { return other.getValues(context); } + @Override public DocValues getValues(ReaderContext context) throws IOException { if (context.isAtomic) { return getValues((AtomicReaderContext) context); diff --git a/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java b/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java index 19a771213ba..35356f30f7d 100644 --- a/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java +++ b/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java @@ -219,6 +219,7 @@ public class PayloadNearQuery extends SpanNearQuery { return true; } + @Override public float score() throws IOException { return super.score() diff --git a/lucene/src/java/org/apache/lucene/search/spans/SpanNearPayloadCheckQuery.java b/lucene/src/java/org/apache/lucene/search/spans/SpanNearPayloadCheckQuery.java index 5fce135941b..2b17f627327 100644 --- a/lucene/src/java/org/apache/lucene/search/spans/SpanNearPayloadCheckQuery.java +++ b/lucene/src/java/org/apache/lucene/search/spans/SpanNearPayloadCheckQuery.java @@ -72,6 +72,7 @@ public class SpanNearPayloadCheckQuery extends SpanPositionCheckQuery { return AcceptStatus.NO; } + @Override public String toString(String field) { StringBuilder buffer = new StringBuilder(); buffer.append("spanPayCheck("); diff --git a/lucene/src/java/org/apache/lucene/search/spans/SpanPayloadCheckQuery.java b/lucene/src/java/org/apache/lucene/search/spans/SpanPayloadCheckQuery.java index 69dbc306f19..086dad2f929 100644 --- a/lucene/src/java/org/apache/lucene/search/spans/SpanPayloadCheckQuery.java +++ b/lucene/src/java/org/apache/lucene/search/spans/SpanPayloadCheckQuery.java @@ -74,6 +74,7 @@ public class SpanPayloadCheckQuery extends SpanPositionCheckQuery{ return AcceptStatus.YES; } + @Override public String toString(String field) { StringBuilder buffer = new StringBuilder(); buffer.append("spanPayCheck("); diff --git a/lucene/src/java/org/apache/lucene/util/BytesRef.java b/lucene/src/java/org/apache/lucene/util/BytesRef.java index 1ec291a5c65..a90b6fb682d 100644 --- a/lucene/src/java/org/apache/lucene/util/BytesRef.java +++ b/lucene/src/java/org/apache/lucene/util/BytesRef.java @@ -209,6 +209,7 @@ public final class BytesRef implements Comparable, Externalizable { } /** Returns hex encoded bytes, eg [0x6c 0x75 0x63 0x65 0x6e 0x65] */ + @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append('['); diff --git a/lucene/src/java/org/apache/lucene/util/DoubleBarrelLRUCache.java b/lucene/src/java/org/apache/lucene/util/DoubleBarrelLRUCache.java index a0dd7c19f08..cdb958ef252 100644 --- a/lucene/src/java/org/apache/lucene/util/DoubleBarrelLRUCache.java +++ b/lucene/src/java/org/apache/lucene/util/DoubleBarrelLRUCache.java @@ -45,6 +45,7 @@ import java.util.Map; final public class DoubleBarrelLRUCache { public static abstract class CloneableKey { + @Override abstract public Object clone(); } diff --git a/lucene/src/java/org/apache/lucene/util/IntsRef.java b/lucene/src/java/org/apache/lucene/util/IntsRef.java index 1f284b5ea51..ee1bd2ed69c 100644 --- a/lucene/src/java/org/apache/lucene/util/IntsRef.java +++ b/lucene/src/java/org/apache/lucene/util/IntsRef.java @@ -123,6 +123,7 @@ public final class IntsRef implements Comparable { } } + @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append('['); diff --git a/lucene/src/java/org/apache/lucene/util/ReaderUtil.java b/lucene/src/java/org/apache/lucene/util/ReaderUtil.java index 772b5ebf751..8d772880d0b 100644 --- a/lucene/src/java/org/apache/lucene/util/ReaderUtil.java +++ b/lucene/src/java/org/apache/lucene/util/ReaderUtil.java @@ -47,6 +47,7 @@ public final class ReaderUtil { this.readerIndex = readerIndex; } + @Override public String toString() { return "slice start=" + start + " length=" + length + " readerIndex=" + readerIndex; } diff --git a/lucene/src/java/org/apache/lucene/util/automaton/BasicOperations.java b/lucene/src/java/org/apache/lucene/util/automaton/BasicOperations.java index ce1c19001b2..e7e9b301482 100644 --- a/lucene/src/java/org/apache/lucene/util/automaton/BasicOperations.java +++ b/lucene/src/java/org/apache/lucene/util/automaton/BasicOperations.java @@ -483,10 +483,12 @@ final public class BasicOperations { starts.count = 0; } + @Override public boolean equals(Object other) { return ((PointTransitions) other).point == point; } + @Override public int hashCode() { return point; } @@ -563,6 +565,7 @@ final public class BasicOperations { find(1+t.max).ends.add(t); } + @Override public String toString() { StringBuilder s = new StringBuilder(); for(int i=0;i extends Outputs> { } } + @Override public int hashCode() { return output1.hashCode() + output2.hashCode(); } diff --git a/lucene/src/java/org/apache/lucene/util/packed/Packed32.java b/lucene/src/java/org/apache/lucene/util/packed/Packed32.java index c8bb011c209..ff22ad7ef48 100644 --- a/lucene/src/java/org/apache/lucene/util/packed/Packed32.java +++ b/lucene/src/java/org/apache/lucene/util/packed/Packed32.java @@ -214,6 +214,7 @@ class Packed32 extends PackedInts.ReaderImpl implements PackedInts.Mutable { Arrays.fill(blocks, 0); } + @Override public String toString() { return "Packed32(bitsPerValue=" + bitsPerValue + ", maxPos=" + maxPos + ", elements.length=" + blocks.length + ")"; diff --git a/lucene/src/java/org/apache/lucene/util/packed/Packed64.java b/lucene/src/java/org/apache/lucene/util/packed/Packed64.java index b3826676503..62e77934ef3 100644 --- a/lucene/src/java/org/apache/lucene/util/packed/Packed64.java +++ b/lucene/src/java/org/apache/lucene/util/packed/Packed64.java @@ -199,6 +199,7 @@ class Packed64 extends PackedInts.ReaderImpl implements PackedInts.Mutable { | ((value << shifts[base + 2]) & writeMasks[base+2]); } + @Override public String toString() { return "Packed64(bitsPerValue=" + bitsPerValue + ", size=" + size() + ", maxPos=" + maxPos diff --git a/lucene/src/java/org/apache/lucene/util/packed/PackedWriter.java b/lucene/src/java/org/apache/lucene/util/packed/PackedWriter.java index 0cf054991ba..b2c86dd799f 100644 --- a/lucene/src/java/org/apache/lucene/util/packed/PackedWriter.java +++ b/lucene/src/java/org/apache/lucene/util/packed/PackedWriter.java @@ -106,6 +106,7 @@ class PackedWriter extends PackedInts.Writer { } } + @Override public String toString() { return "PackedWriter(written " + written + "/" + valueCount + " with " + bitsPerValue + " bits/value)"; diff --git a/lucene/src/test/org/apache/lucene/TestAssertions.java b/lucene/src/test/org/apache/lucene/TestAssertions.java index 373fd3db271..ce51fd34484 100644 --- a/lucene/src/test/org/apache/lucene/TestAssertions.java +++ b/lucene/src/test/org/apache/lucene/TestAssertions.java @@ -35,34 +35,45 @@ public class TestAssertions extends LuceneTestCase { } static class TestAnalyzer1 extends Analyzer { + @Override public final TokenStream tokenStream(String s, Reader r) { return null; } + @Override public final TokenStream reusableTokenStream(String s, Reader r) { return null; } } static final class TestAnalyzer2 extends Analyzer { + @Override public TokenStream tokenStream(String s, Reader r) { return null; } + @Override public TokenStream reusableTokenStream(String s, Reader r) { return null; } } static class TestAnalyzer3 extends Analyzer { + @Override public TokenStream tokenStream(String s, Reader r) { return null; } + @Override public TokenStream reusableTokenStream(String s, Reader r) { return null; } } static class TestAnalyzer4 extends Analyzer { + @Override public final TokenStream tokenStream(String s, Reader r) { return null; } + @Override public TokenStream reusableTokenStream(String s, Reader r) { return null; } } static class TestTokenStream1 extends TokenStream { + @Override public final boolean incrementToken() { return false; } } static final class TestTokenStream2 extends TokenStream { + @Override public boolean incrementToken() { return false; } } static class TestTokenStream3 extends TokenStream { + @Override public boolean incrementToken() { return false; } } diff --git a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java index 4252f752488..e7ff3954542 100644 --- a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java +++ b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java @@ -63,6 +63,7 @@ public class TestExternalCodecs extends LuceneTestCase { return t2.length-t1.length; } + @Override public boolean equals(Object other) { return this == other; } @@ -344,6 +345,7 @@ public class TestExternalCodecs extends LuceneTestCase { return ramField.termToDocs.get(current).totalTermFreq; } + @Override public DocsEnum docs(Bits skipDocs, DocsEnum reuse) { return new RAMDocsEnum(ramField.termToDocs.get(current), skipDocs); } diff --git a/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java b/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java index bac72b5951d..1d2ab4371b7 100644 --- a/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java +++ b/lucene/src/test/org/apache/lucene/analysis/tokenattributes/TestCharTermAttributeImpl.java @@ -227,6 +227,7 @@ public class TestCharTermAttributeImpl extends LuceneTestCase { public char charAt(int i) { return longTestString.charAt(i); } public int length() { return longTestString.length(); } public CharSequence subSequence(int start, int end) { return longTestString.subSequence(start, end); } + @Override public String toString() { return longTestString; } }); assertEquals("4567890123456"+longTestString, t.toString()); diff --git a/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java b/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java index 7929a4519b5..a3f02482b93 100644 --- a/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java +++ b/lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java @@ -36,6 +36,7 @@ public class TestDocsAndPositions extends LuceneTestCase { private String fieldName; private boolean usePayload; + @Override public void setUp() throws Exception { super.setUp(); fieldName = "field" + random.nextInt(); diff --git a/lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java b/lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java index 5f4dfd36e80..c17dc38b9aa 100644 --- a/lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java +++ b/lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java @@ -38,9 +38,11 @@ public class TestFilterIndexReader extends LuceneTestCase { TestFields(Fields in) { super(in); } + @Override public FieldsEnum iterator() throws IOException { return new TestFieldsEnum(super.iterator()); } + @Override public Terms terms(String field) throws IOException { return new TestTerms(super.terms(field)); } @@ -51,6 +53,7 @@ public class TestFilterIndexReader extends LuceneTestCase { super(in); } + @Override public TermsEnum iterator() throws IOException { return new TestTermsEnum(super.iterator()); } @@ -61,6 +64,7 @@ public class TestFilterIndexReader extends LuceneTestCase { super(in); } + @Override public TermsEnum terms() throws IOException { return new TestTermsEnum(super.terms()); } diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java index 60f5e49fbb0..8607d8fed1b 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java @@ -971,6 +971,7 @@ public class TestIndexWriterReader extends LuceneTestCase { setMaxBufferedDocs(2). setReaderPooling(true). setMergedSegmentWarmer(new IndexWriter.IndexReaderWarmer() { + @Override public void warm(IndexReader r) throws IOException { IndexSearcher s = new IndexSearcher(r); TopDocs hits = s.search(new TermQuery(new Term("foo", "bar")), 10); diff --git a/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java b/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java index 425e790784d..92c3689612a 100644 --- a/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java +++ b/lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java @@ -53,6 +53,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase { super(random, delegate); } + @Override public IndexInput openInput(String fileName) throws IOException { IndexInput in = super.openInput(fileName); if (fileName.endsWith(".frq")) @@ -61,6 +62,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase { } } + @Override @Before public void setUp() throws Exception { super.setUp(); diff --git a/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java b/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java index a18e2eaee48..c7312b45ab8 100644 --- a/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java +++ b/lucene/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java @@ -257,6 +257,7 @@ public class TestPerSegmentDeletes extends LuceneTestCase { @Override public void close() {} + @Override public MergeSpecification findMerges(SegmentInfos segmentInfos) throws CorruptIndexException, IOException { MergeSpecification ms = new MergeSpecification(); diff --git a/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java b/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java index 58e4e4d5b03..f5809dd1931 100644 --- a/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java @@ -40,6 +40,7 @@ public class TestAutomatonQuery extends LuceneTestCase { private final String FN = "field"; + @Override public void setUp() throws Exception { super.setUp(); directory = newDirectory(); @@ -65,6 +66,7 @@ public class TestAutomatonQuery extends LuceneTestCase { writer.close(); } + @Override public void tearDown() throws Exception { searcher.close(); reader.close(); diff --git a/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java b/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java index 29bec6c066a..b764dc0fc4f 100644 --- a/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java +++ b/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java @@ -41,6 +41,7 @@ public class TestAutomatonQueryUnicode extends LuceneTestCase { private final String FN = "field"; + @Override public void setUp() throws Exception { super.setUp(); directory = newDirectory(); @@ -85,6 +86,7 @@ public class TestAutomatonQueryUnicode extends LuceneTestCase { writer.close(); } + @Override public void tearDown() throws Exception { searcher.close(); reader.close(); diff --git a/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java b/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java index 254246bf210..65552c46f1f 100644 --- a/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java @@ -42,6 +42,7 @@ public class TestRegexpQuery extends LuceneTestCase { private Directory directory; private final String FN = "field"; + @Override public void setUp() throws Exception { super.setUp(); directory = newDirectory(); @@ -56,6 +57,7 @@ public class TestRegexpQuery extends LuceneTestCase { searcher = new IndexSearcher(reader); } + @Override public void tearDown() throws Exception { searcher.close(); reader.close(); diff --git a/lucene/src/test/org/apache/lucene/search/cache/TestEntryCreators.java b/lucene/src/test/org/apache/lucene/search/cache/TestEntryCreators.java index 4485adb82dc..fad6f63c3f2 100644 --- a/lucene/src/test/org/apache/lucene/search/cache/TestEntryCreators.java +++ b/lucene/src/test/org/apache/lucene/search/cache/TestEntryCreators.java @@ -54,6 +54,7 @@ public class TestEntryCreators extends LuceneTestCase { this.parser = parser; values = new Number[NUM_DOCS]; } + @Override public String toString() { return field; diff --git a/lucene/src/test/org/apache/lucene/util/LuceneJUnitDividingSelector.java b/lucene/src/test/org/apache/lucene/util/LuceneJUnitDividingSelector.java index cf27a7267fc..5a9509c5a82 100644 --- a/lucene/src/test/org/apache/lucene/util/LuceneJUnitDividingSelector.java +++ b/lucene/src/test/org/apache/lucene/util/LuceneJUnitDividingSelector.java @@ -30,6 +30,7 @@ public class LuceneJUnitDividingSelector extends BaseExtendSelector { /** Current part to accept. */ private int part; + @Override public void setParameters(Parameter[] pParameters) { super.setParameters(pParameters); for (int j = 0; j < pParameters.length; j++) { @@ -46,6 +47,7 @@ public class LuceneJUnitDividingSelector extends BaseExtendSelector { } } + @Override public void verifySettings() { super.verifySettings(); if (divisor <= 0 || part <= 0) { @@ -56,6 +58,7 @@ public class LuceneJUnitDividingSelector extends BaseExtendSelector { } } + @Override public boolean isSelected(File dir, String name, File path) { counter = counter % divisor + 1; return counter == part; diff --git a/lucene/src/test/org/apache/lucene/util/TestBytesRefHash.java b/lucene/src/test/org/apache/lucene/util/TestBytesRefHash.java index 553898a8a8c..2c82aea6ff3 100644 --- a/lucene/src/test/org/apache/lucene/util/TestBytesRefHash.java +++ b/lucene/src/test/org/apache/lucene/util/TestBytesRefHash.java @@ -40,6 +40,7 @@ public class TestBytesRefHash extends LuceneTestCase { /** */ + @Override @Before public void setUp() throws Exception { super.setUp(); diff --git a/lucene/src/test/org/apache/lucene/util/TestDoubleBarrelLRUCache.java b/lucene/src/test/org/apache/lucene/util/TestDoubleBarrelLRUCache.java index 03a935a6fef..952c218de3f 100644 --- a/lucene/src/test/org/apache/lucene/util/TestDoubleBarrelLRUCache.java +++ b/lucene/src/test/org/apache/lucene/util/TestDoubleBarrelLRUCache.java @@ -145,14 +145,17 @@ public class TestDoubleBarrelLRUCache extends LuceneTestCase { this.value = value; } + @Override public boolean equals(Object other) { return this.value.equals(((CloneableObject) other).value); } + @Override public int hashCode() { return value.hashCode(); } + @Override public Object clone() { return new CloneableObject(value); } @@ -165,14 +168,17 @@ public class TestDoubleBarrelLRUCache extends LuceneTestCase { this.value = value; } + @Override public boolean equals(Object other) { return this.value.equals(((CloneableInteger) other).value); } + @Override public int hashCode() { return value.hashCode(); } + @Override public Object clone() { return new CloneableInteger(value); } diff --git a/lucene/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java b/lucene/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java index beac79df7c9..b6c1c4d4f9c 100644 --- a/lucene/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java +++ b/lucene/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java @@ -31,6 +31,7 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { /** */ + @Override @Before public void setUp() throws Exception { super.setUp(); diff --git a/lucene/src/test/org/apache/lucene/util/automaton/fst/TestFSTs.java b/lucene/src/test/org/apache/lucene/util/automaton/fst/TestFSTs.java index 4922a1e0d64..f7d54753271 100644 --- a/lucene/src/test/org/apache/lucene/util/automaton/fst/TestFSTs.java +++ b/lucene/src/test/org/apache/lucene/util/automaton/fst/TestFSTs.java @@ -59,11 +59,13 @@ public class TestFSTs extends LuceneTestCase { private MockDirectoryWrapper dir; + @Override public void setUp() throws IOException { dir = newDirectory(); dir.setPreventDoubleWrite(false); } + @Override public void tearDown() throws IOException { dir.close(); } diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java index 87591992e1f..ff3f20fb8a1 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java @@ -673,6 +673,7 @@ public class HTMLStripCharFilter extends BaseCharFilter { + @Override public int read() throws IOException { // TODO: Do we ever want to preserve CDATA sections? // where do we have to worry about them? @@ -741,6 +742,7 @@ public class HTMLStripCharFilter extends BaseCharFilter { } + @Override public int read(char cbuf[], int off, int len) throws IOException { int i=0; for (i=0; ioutput:"the-rain", "rain-in" ,"in-spain", "falls", "mainly" * */ + @Override public boolean incrementToken() throws IOException { while (input.incrementToken()) { State current = captureState(); diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianCharFilter.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianCharFilter.java index c1ed38acfa5..962f839d45c 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianCharFilter.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianCharFilter.java @@ -32,6 +32,7 @@ public class PersianCharFilter extends CharFilter { super(in); } + @Override public int read(char[] cbuf, int off, int len) throws IOException { final int charsRead = super.read(cbuf, off, len); if (charsRead > 0) { diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilter.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilter.java index 0ccbb85b074..77f5c95475f 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilter.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilter.java @@ -113,6 +113,7 @@ public class PatternReplaceCharFilter extends BaseCharFilter { } } + @Override public int read() throws IOException { while( prepareReplaceBlock() ){ return replaceBlockBuffer.charAt( replaceBlockBufferOffset++ ); @@ -120,6 +121,7 @@ public class PatternReplaceCharFilter extends BaseCharFilter { return -1; } + @Override public int read(char[] cbuf, int off, int len) throws IOException { char[] tmp = new char[len]; int l = input.read(tmp, 0, len); diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java index ea8ba38c996..a74b3f8e9e7 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java @@ -78,6 +78,7 @@ public class SynonymMap { } + @Override public String toString() { StringBuilder sb = new StringBuilder("<"); if (synonyms!=null) { diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java index 946f9787c4c..9f3a28ad638 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java @@ -47,6 +47,7 @@ public class TestRemoveDuplicatesTokenFilter extends BaseTokenStreamTestCase { CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class); + @Override public boolean incrementToken() { if (toks.hasNext()) { clearAttributes(); diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java index 4e65f9b11a4..256cbacd1ca 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java @@ -87,6 +87,7 @@ public class TestTrimFilter extends BaseTokenStreamTestCase { this(tokens.toArray(new Token[tokens.size()])); } + @Override public boolean incrementToken() throws IOException { if (index >= tokens.length) return false; diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java index c784130d439..3d081184e58 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java @@ -213,6 +213,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase { /* analyzer that uses whitespace + wdf */ Analyzer a = new Analyzer() { + @Override public TokenStream tokenStream(String field, Reader reader) { return new WordDelimiterFilter( new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), @@ -239,6 +240,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase { /* analyzer that will consume tokens with large position increments */ Analyzer a2 = new Analyzer() { + @Override public TokenStream tokenStream(String field, Reader reader) { return new WordDelimiterFilter( new LargePosIncTokenFilter( @@ -271,6 +273,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase { new int[] { 1, 11, 1 }); Analyzer a3 = new Analyzer() { + @Override public TokenStream tokenStream(String field, Reader reader) { StopFilter filter = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), StandardAnalyzer.STOP_WORDS_SET); diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymFilter.java index 2c68e047abf..29c26d6ff2b 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymFilter.java @@ -395,6 +395,7 @@ public class TestSynonymFilter extends BaseTokenStreamTestCase { this(tokens.toArray(new Token[tokens.size()])); } + @Override public boolean incrementToken() throws IOException { if (index >= tokens.length) return false; diff --git a/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java b/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java index 126ea09c744..b0cb1981d89 100644 --- a/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java +++ b/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java @@ -77,6 +77,7 @@ public class CarrotClusteringEngine extends SearchClusteringEngine { private String idFieldName; + @Override @Deprecated public Object cluster(Query query, DocList docList, SolrQueryRequest sreq) { SolrIndexSearcher searcher = sreq.getSearcher(); @@ -90,6 +91,7 @@ public class CarrotClusteringEngine extends SearchClusteringEngine { } } + @Override public Object cluster(Query query, SolrDocumentList solrDocList, Map docIds, SolrQueryRequest sreq) { try { diff --git a/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/carrot2/LuceneLanguageModelFactory.java b/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/carrot2/LuceneLanguageModelFactory.java index 8efd3ab1be6..857fccf48f3 100644 --- a/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/carrot2/LuceneLanguageModelFactory.java +++ b/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/carrot2/LuceneLanguageModelFactory.java @@ -69,7 +69,8 @@ public class LuceneLanguageModelFactory extends DefaultLanguageModelFactory { /** * Provide an {@link IStemmer} implementation for a given language. */ - protected IStemmer createStemmer(LanguageCode language) { + @Override + protected IStemmer createStemmer(LanguageCode language) { switch (language) { case ARABIC: return ArabicStemmerFactory.createStemmer(); diff --git a/solr/contrib/clustering/src/test/java/org/apache/solr/handler/clustering/MockDocumentClusteringEngine.java b/solr/contrib/clustering/src/test/java/org/apache/solr/handler/clustering/MockDocumentClusteringEngine.java index 90f0ab73e5a..77b3fcfba06 100644 --- a/solr/contrib/clustering/src/test/java/org/apache/solr/handler/clustering/MockDocumentClusteringEngine.java +++ b/solr/contrib/clustering/src/test/java/org/apache/solr/handler/clustering/MockDocumentClusteringEngine.java @@ -25,11 +25,13 @@ import org.apache.solr.search.DocSet; * **/ public class MockDocumentClusteringEngine extends DocumentClusteringEngine { + @Override public NamedList cluster(DocSet docs, SolrParams solrParams) { NamedList result = new NamedList(); return result; } + @Override public NamedList cluster(SolrParams solrParams) { NamedList result = new NamedList(); return result; diff --git a/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java b/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java index 5b9374c744a..0231d1ec7e3 100644 --- a/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java @@ -51,6 +51,7 @@ public class MailEntityProcessor extends EntityProcessorBase { public SearchTerm getCustomSearch(Folder folder); } + @Override public void init(Context context) { super.init(context); // set attributes using XXX getXXXFromContext(attribute, defualtValue); @@ -95,6 +96,7 @@ public class MailEntityProcessor extends EntityProcessorBase { logConfig(); } + @Override public Map nextRow() { Message mail; Map row = null; diff --git a/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java b/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java index 4e4ad41afbe..b7b31ab5abb 100644 --- a/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java @@ -92,6 +92,7 @@ public class TikaEntityProcessor extends EntityProcessorBase { done = false; } + @Override public Map nextRow() { if(done) return null; Map row = new HashMap(); diff --git a/solr/contrib/dataimporthandler/src/extras/test/java/org/apache/solr/handler/dataimport/TestMailEntityProcessor.java b/solr/contrib/dataimporthandler/src/extras/test/java/org/apache/solr/handler/dataimport/TestMailEntityProcessor.java index 8a1f1083d4e..2ac19b32192 100644 --- a/solr/contrib/dataimporthandler/src/extras/test/java/org/apache/solr/handler/dataimport/TestMailEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/extras/test/java/org/apache/solr/handler/dataimport/TestMailEntityProcessor.java @@ -191,18 +191,22 @@ public class TestMailEntityProcessor extends AbstractDataImportHandlerTestCase { super(null, ".", null); } + @Override public boolean upload(SolrInputDocument doc) { return docs.add(doc); } + @Override public void log(int event, String name, Object row) { // Do nothing } + @Override public void doDeleteAll() { deleteAllCalled = Boolean.TRUE; } + @Override public void commit(boolean b) { commitCalled = Boolean.TRUE; } diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinContentStreamDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinContentStreamDataSource.java index 221d8eacbc7..1187f65e92d 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinContentStreamDataSource.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinContentStreamDataSource.java @@ -39,10 +39,12 @@ public class BinContentStreamDataSource extends DataSource { private InputStream in; + @Override public void init(Context context, Properties initProps) { this.context = (ContextImpl) context; } + @Override public InputStream getData(String query) { contentStream = context.getDocBuilder().requestParameters.contentStream; if (contentStream == null) @@ -55,6 +57,7 @@ public class BinContentStreamDataSource extends DataSource { } } + @Override public void close() { if (contentStream != null) { try { diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinFileDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinFileDataSource.java index 4d4cdebc62b..e473ab8da32 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinFileDataSource.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinFileDataSource.java @@ -43,10 +43,12 @@ import java.util.Properties; public class BinFileDataSource extends DataSource{ protected String basePath; + @Override public void init(Context context, Properties initProps) { basePath = initProps.getProperty(FileDataSource.BASE_PATH); } + @Override public InputStream getData(String query) { File f = FileDataSource.getFile(basePath,query); try { @@ -57,6 +59,7 @@ public class BinFileDataSource extends DataSource{ } } + @Override public void close() { } diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinURLDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinURLDataSource.java index 9d4d879c2ce..045d6fa3bd0 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinURLDataSource.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/BinURLDataSource.java @@ -49,6 +49,7 @@ public class BinURLDataSource extends DataSource{ public BinURLDataSource() { } + @Override public void init(Context context, Properties initProps) { this.context = context; this.initProps = initProps; @@ -72,6 +73,7 @@ public class BinURLDataSource extends DataSource{ } } + @Override public InputStream getData(String query) { URL url = null; try { @@ -89,6 +91,7 @@ public class BinURLDataSource extends DataSource{ } } + @Override public void close() { } private String getInitPropWithReplacements(String propertyName) { diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/CachedSqlEntityProcessor.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/CachedSqlEntityProcessor.java index 69b7b2b6f18..864e772288e 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/CachedSqlEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/CachedSqlEntityProcessor.java @@ -38,6 +38,7 @@ import java.util.Map; public class CachedSqlEntityProcessor extends SqlEntityProcessor { private boolean isFirst; + @Override @SuppressWarnings("unchecked") public void init(Context context) { super.init(context); @@ -45,6 +46,7 @@ public class CachedSqlEntityProcessor extends SqlEntityProcessor { isFirst = true; } + @Override public Map nextRow() { if (dataSourceRowCache != null) return getFromRowCacheTransformed(); @@ -60,6 +62,7 @@ public class CachedSqlEntityProcessor extends SqlEntityProcessor { } + @Override protected List> getAllNonCachedRows() { List> rows = new ArrayList>(); String q = getQuery(); diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ClobTransformer.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ClobTransformer.java index 5ebd3baa5dd..ae970d25fdb 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ClobTransformer.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ClobTransformer.java @@ -37,6 +37,7 @@ import java.util.Map; * @since solr 1.4 */ public class ClobTransformer extends Transformer { + @Override public Object transformRow(Map aRow, Context context) { for (Map map : context.getAllEntityFields()) { if (!TRUE.equals(map.get(CLOB))) continue; diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ContentStreamDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ContentStreamDataSource.java index 3b55fd6cf5e..58ed19ed595 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ContentStreamDataSource.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ContentStreamDataSource.java @@ -39,10 +39,12 @@ public class ContentStreamDataSource extends DataSource { private ContentStream contentStream; private Reader reader; + @Override public void init(Context context, Properties initProps) { this.context = (ContextImpl) context; } + @Override public Reader getData(String query) { contentStream = context.getDocBuilder().requestParameters.contentStream; if (contentStream == null) @@ -55,6 +57,7 @@ public class ContentStreamDataSource extends DataSource { } } + @Override public void close() { if (contentStream != null) { try { diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ContextImpl.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ContextImpl.java index 6dfa48276c9..bd726835e2d 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ContextImpl.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ContextImpl.java @@ -71,22 +71,27 @@ public class ContextImpl extends Context { parent = parentContext; } + @Override public String getEntityAttribute(String name) { return entity == null ? null : entity.allAttributes.get(name); } + @Override public String getResolvedEntityAttribute(String name) { return entity == null ? null : resolver.replaceTokens(entity.allAttributes.get(name)); } + @Override public List> getAllEntityFields() { return entity == null ? Collections.EMPTY_LIST : entity.allFieldsList; } + @Override public VariableResolver getVariableResolver() { return resolver; } + @Override public DataSource getDataSource() { if (ds != null) return ds; if(entity == null) return null; @@ -101,26 +106,32 @@ public class ContextImpl extends Context { return entity.dataSrc; } + @Override public DataSource getDataSource(String name) { return dataImporter.getDataSourceInstance(entity, name, this); } + @Override public boolean isRootEntity() { return entity.isDocRoot; } + @Override public String currentProcess() { return currProcess; } + @Override public Map getRequestParameters() { return requestParams; } + @Override public EntityProcessor getEntityProcessor() { return entity == null ? null : entity.processor; } + @Override public void setSessionAttribute(String name, Object val, String scope) { if(name == null) return; if (Context.SCOPE_ENTITY.equals(scope)) { @@ -148,6 +159,7 @@ public class ContextImpl extends Context { else entitySession.put(name, val); } + @Override public Object getSessionAttribute(String name, String scope) { if (Context.SCOPE_ENTITY.equals(scope)) { if (entitySession == null) @@ -166,6 +178,7 @@ public class ContextImpl extends Context { return null; } + @Override public Context getParentContext() { return parent; } @@ -187,15 +200,18 @@ public class ContextImpl extends Context { } + @Override public SolrCore getSolrCore() { return dataImporter == null ? null : dataImporter.getCore(); } + @Override public Map getStats() { return docBuilder != null ? docBuilder.importStatistics.getStatsSnapshot() : Collections.emptyMap(); } + @Override public String getScript() { if(dataImporter != null) { DataConfig.Script script = dataImporter.getConfig().script; @@ -204,6 +220,7 @@ public class ContextImpl extends Context { return null; } + @Override public String getScriptLanguage() { if (dataImporter != null) { DataConfig.Script script = dataImporter.getConfig().script; @@ -212,12 +229,14 @@ public class ContextImpl extends Context { return null; } + @Override public void deleteDoc(String id) { if(docBuilder != null){ docBuilder.writer.deleteDoc(id); } } + @Override public void deleteDocByQuery(String query) { if(docBuilder != null){ docBuilder.writer.deleteByQuery(query); @@ -227,10 +246,12 @@ public class ContextImpl extends Context { DocBuilder getDocBuilder(){ return docBuilder; } + @Override public Object resolve(String var) { return resolver.resolve(var); } + @Override public String replaceTokens(String template) { return resolver.replaceTokens(template); } diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataConfig.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataConfig.java index f548ff648a9..f0a9e412427 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataConfig.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataConfig.java @@ -214,6 +214,7 @@ public class DataConfig { public Map allAttributes = new HashMap() { + @Override public String put(String key, String value) { if (super.containsKey(key)) return super.get(key); diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataImporter.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataImporter.java index 6d9206d43ed..45f8fcf1598 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataImporter.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataImporter.java @@ -423,6 +423,7 @@ public class DataImporter { } static final ThreadLocal QUERY_COUNT = new ThreadLocal() { + @Override protected AtomicLong initialValue() { return new AtomicLong(); } diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DateFormatTransformer.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DateFormatTransformer.java index e2c6e221095..95c601e8911 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DateFormatTransformer.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DateFormatTransformer.java @@ -45,6 +45,7 @@ public class DateFormatTransformer extends Transformer { private static final Logger LOG = LoggerFactory .getLogger(DateFormatTransformer.class); + @Override @SuppressWarnings("unchecked") public Object transformRow(Map aRow, Context context) { diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DebugLogger.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DebugLogger.java index 77c1ea7669e..8ee0126dc25 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DebugLogger.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DebugLogger.java @@ -60,6 +60,7 @@ class DebugLogger { output = new NamedList(); debugStack = new Stack() { + @Override public DebugInfo pop() { if (size() == 1) throw new DataImportHandlerException( @@ -169,14 +170,17 @@ class DebugLogger { DataSource wrapDs(final DataSource ds) { return new DataSource() { + @Override public void init(Context context, Properties initProps) { ds.init(context, initProps); } + @Override public void close() { ds.close(); } + @Override public Object getData(String query) { writer.log(SolrWriter.ENTITY_META, "query", query); long start = System.currentTimeMillis(); @@ -203,6 +207,7 @@ class DebugLogger { Transformer wrapTransformer(final Transformer t) { return new Transformer() { + @Override public Object transformRow(Map row, Context context) { writer.log(SolrWriter.PRE_TRANSFORMER_ROW, null, row); String tName = getTransformerName(t); diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DocBuilder.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DocBuilder.java index e94ba8947f8..42bf6da9499 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DocBuilder.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DocBuilder.java @@ -139,6 +139,7 @@ public class DocBuilder { document = dataImporter.getConfig().document; final AtomicLong startTime = new AtomicLong(System.currentTimeMillis()); statusMessages.put(TIME_ELAPSED, new Object() { + @Override public String toString() { return getTimeElapsedSince(startTime.get()); } diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java index c2294bd6549..5d761194440 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java @@ -49,6 +49,7 @@ public class EntityProcessorBase extends EntityProcessor { protected String onError = ABORT; + @Override public void init(Context context) { rowIterator = null; this.context = context; @@ -86,14 +87,17 @@ public class EntityProcessorBase extends EntityProcessor { } } + @Override public Map nextModifiedRowKey() { return null; } + @Override public Map nextDeletedRowKey() { return null; } + @Override public Map nextModifiedParentRowKey() { return null; } @@ -105,11 +109,13 @@ public class EntityProcessorBase extends EntityProcessor { * @return a row where the key is the name of the field and value can be any Object or a Collection of objects. Return * null to signal end of rows */ + @Override public Map nextRow() { return null;// do not do anything } + @Override public void destroy() { /*no op*/ } diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java index c46ddcf9e6d..432e64ac767 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java @@ -54,6 +54,7 @@ public class EntityProcessorWrapper extends EntityProcessor { this.docBuilder = docBuilder; } + @Override public void init(Context context) { rowcache = null; this.context = context; @@ -79,6 +80,7 @@ public class EntityProcessorWrapper extends EntityProcessor { String[] transArr = transClasses.split(","); transformers = new ArrayList() { + @Override public boolean add(Transformer transformer) { if (docBuilder != null && docBuilder.verboseDebug) { transformer = docBuilder.writer.getDebugLogger().wrapTransformer(transformer); @@ -135,6 +137,7 @@ public class EntityProcessorWrapper extends EntityProcessor { o = clazz.newInstance(); } + @Override public Object transformRow(Map aRow, Context context) { try { return meth.invoke(o, aRow); @@ -223,6 +226,7 @@ public class EntityProcessorWrapper extends EntityProcessor { && Boolean.parseBoolean(oMap.get("$stopTransform").toString()); } + @Override public Map nextRow() { if (rowcache != null) { return getFromRowCache(); @@ -252,6 +256,7 @@ public class EntityProcessorWrapper extends EntityProcessor { } } + @Override public Map nextModifiedRowKey() { Map row = delegate.nextModifiedRowKey(); row = applyTransformer(row); @@ -259,6 +264,7 @@ public class EntityProcessorWrapper extends EntityProcessor { return row; } + @Override public Map nextDeletedRowKey() { Map row = delegate.nextDeletedRowKey(); row = applyTransformer(row); @@ -266,10 +272,12 @@ public class EntityProcessorWrapper extends EntityProcessor { return row; } + @Override public Map nextModifiedParentRowKey() { return delegate.nextModifiedParentRowKey(); } + @Override public void destroy() { delegate.destroy(); } diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EvaluatorBag.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EvaluatorBag.java index 9c4321a97d7..24e728d82c0 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EvaluatorBag.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EvaluatorBag.java @@ -66,6 +66,7 @@ public class EvaluatorBag { */ public static Evaluator getSqlEscapingEvaluator() { return new Evaluator() { + @Override public String evaluate(String expression, Context context) { List l = parseParams(expression, context.getVariableResolver()); if (l.size() != 1) { @@ -90,6 +91,7 @@ public class EvaluatorBag { */ public static Evaluator getSolrQueryEscapingEvaluator() { return new Evaluator() { + @Override public String evaluate(String expression, Context context) { List l = parseParams(expression, context.getVariableResolver()); if (l.size() != 1) { @@ -109,6 +111,7 @@ public class EvaluatorBag { */ public static Evaluator getUrlEvaluator() { return new Evaluator() { + @Override public String evaluate(String expression, Context context) { List l = parseParams(expression, context.getVariableResolver()); if (l.size() != 1) { @@ -138,6 +141,7 @@ public class EvaluatorBag { */ public static Evaluator getDateFormatEvaluator() { return new Evaluator() { + @Override public String evaluate(String expression, Context context) { List l = parseParams(expression, context.getVariableResolver()); if (l.size() != 2) { @@ -288,6 +292,7 @@ public class EvaluatorBag { } + @Override public String toString() { Object o = vr.resolve(varName); return o == null ? null : o.toString(); diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FieldReaderDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FieldReaderDataSource.java index 8b2ae93c12c..b9d9ec74ab9 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FieldReaderDataSource.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FieldReaderDataSource.java @@ -52,6 +52,7 @@ public class FieldReaderDataSource extends DataSource { private String encoding; private EntityProcessorWrapper entityProcessor; + @Override public void init(Context context, Properties initProps) { dataField = context.getEntityAttribute("dataField"); encoding = context.getEntityAttribute("encoding"); @@ -59,6 +60,7 @@ public class FieldReaderDataSource extends DataSource { /*no op*/ } + @Override public Reader getData(String query) { Object o = entityProcessor.getVariableResolver().resolve(dataField); if (o == null) { @@ -111,6 +113,7 @@ public class FieldReaderDataSource extends DataSource { } } + @Override public void close() { } diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FieldStreamDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FieldStreamDataSource.java index f92f7cb4b15..132367cc0fa 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FieldStreamDataSource.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FieldStreamDataSource.java @@ -52,12 +52,14 @@ public class FieldStreamDataSource extends DataSource { protected String dataField; private EntityProcessorWrapper wrapper; + @Override public void init(Context context, Properties initProps) { dataField = context.getEntityAttribute("dataField"); wrapper = (EntityProcessorWrapper) context.getEntityProcessor(); /*no op*/ } + @Override public InputStream getData(String query) { Object o = wrapper.getVariableResolver().resolve(dataField); if (o == null) { @@ -90,6 +92,7 @@ public class FieldStreamDataSource extends DataSource { } + @Override public void close() { } } diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FileDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FileDataSource.java index 64353ef3fa4..2f5a5aa1e2e 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FileDataSource.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FileDataSource.java @@ -59,6 +59,7 @@ public class FileDataSource extends DataSource { private static final Logger LOG = LoggerFactory.getLogger(FileDataSource.class); + @Override public void init(Context context, Properties initProps) { basePath = initProps.getProperty(BASE_PATH); if (initProps.get(URLDataSource.ENCODING) != null) @@ -79,6 +80,7 @@ public class FileDataSource extends DataSource { * returned Reader *

*/ + @Override public Reader getData(String query) { File f = getFile(basePath,query); try { @@ -130,6 +132,7 @@ public class FileDataSource extends DataSource { } } + @Override public void close() { } diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FileListEntityProcessor.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FileListEntityProcessor.java index 72924176731..7549af7dfbd 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FileListEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/FileListEntityProcessor.java @@ -106,6 +106,7 @@ public class FileListEntityProcessor extends EntityProcessorBase { private Pattern fileNamePattern, excludesPattern; + @Override public void init(Context context) { super.init(context); fileName = context.getEntityAttribute(FILE_NAME); @@ -195,6 +196,7 @@ public class FileListEntityProcessor extends EntityProcessorBase { return Long.parseLong(sizeStr); } + @Override public Map nextRow() { if (rowIterator != null) return getNext(); diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/JdbcDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/JdbcDataSource.java index cb38e480bf0..f48ca2cda86 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/JdbcDataSource.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/JdbcDataSource.java @@ -54,6 +54,7 @@ public class JdbcDataSource extends private int maxRows = 0; + @Override public void init(Context context, Properties initProps) { Object o = initProps.get(CONVERT_TYPE); if (o != null) @@ -204,6 +205,7 @@ public class JdbcDataSource extends } } + @Override public Iterator> getData(String query) { ResultSetIterator r = new ResultSetIterator(query); return r.getIterator(); @@ -370,6 +372,7 @@ public class JdbcDataSource extends } } + @Override protected void finalize() throws Throwable { try { if(!isClosed){ @@ -383,6 +386,7 @@ public class JdbcDataSource extends private boolean isClosed = false; + @Override public void close() { try { closeConnection(); diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java index 0a8b201ab42..30e366316f7 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java @@ -64,6 +64,7 @@ public class LineEntityProcessor extends EntityProcessorBase { /** * Parses each of the entity attributes. */ + @Override public void init(Context context) { super.init(context); String s; @@ -97,6 +98,7 @@ public class LineEntityProcessor extends EntityProcessorBase { * from the url. However transformers can be used to create as * many other fields as required. */ + @Override public Map nextRow() { if (reader == null) { reader = new BufferedReader((Reader) context.getDataSource().getData(url)); diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/LogTransformer.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/LogTransformer.java index d9d2f115d24..5a603a74049 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/LogTransformer.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/LogTransformer.java @@ -35,6 +35,7 @@ import java.util.Map; public class LogTransformer extends Transformer { Logger LOG = LoggerFactory.getLogger(LogTransformer.class); + @Override public Object transformRow(Map row, Context ctx) { String expr = ctx.getEntityAttribute(LOG_TEMPLATE); String level = ctx.replaceTokens(ctx.getEntityAttribute(LOG_LEVEL)); diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/MockDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/MockDataSource.java index 7b747d72a56..6fd7213b5a7 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/MockDataSource.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/MockDataSource.java @@ -45,13 +45,16 @@ public class MockDataSource extends cache.clear(); } + @Override public void init(Context context, Properties initProps) { } + @Override public Iterator> getData(String query) { return cache.get(query); } + @Override public void close() { cache.clear(); diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/NumberFormatTransformer.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/NumberFormatTransformer.java index d38ab75fcb0..36efecf5320 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/NumberFormatTransformer.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/NumberFormatTransformer.java @@ -49,6 +49,7 @@ public class NumberFormatTransformer extends Transformer { private static final Pattern localeRegex = Pattern.compile("^([a-z]{2})-([A-Z]{2})$"); + @Override @SuppressWarnings("unchecked") public Object transformRow(Map row, Context context) { for (Map fld : context.getAllEntityFields()) { diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/PlainTextEntityProcessor.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/PlainTextEntityProcessor.java index 79a981875bc..2d32eee0122 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/PlainTextEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/PlainTextEntityProcessor.java @@ -40,11 +40,13 @@ public class PlainTextEntityProcessor extends EntityProcessorBase { private static final Logger LOG = LoggerFactory.getLogger(PlainTextEntityProcessor.class); private boolean ended = false; + @Override public void init(Context context) { super.init(context); ended = false; } + @Override public Map nextRow() { if (ended) return null; DataSource ds = context.getDataSource(); diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/RegexTransformer.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/RegexTransformer.java index e5910093e73..429bb0cf2f3 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/RegexTransformer.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/RegexTransformer.java @@ -43,6 +43,7 @@ import java.util.regex.Pattern; public class RegexTransformer extends Transformer { private static final Logger LOG = LoggerFactory.getLogger(RegexTransformer.class); + @Override @SuppressWarnings("unchecked") public Map transformRow(Map row, Context ctx) { diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ScriptTransformer.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ScriptTransformer.java index ba06f49b91d..547fc66cf2a 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ScriptTransformer.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/ScriptTransformer.java @@ -47,6 +47,7 @@ public class ScriptTransformer extends Transformer { private String functionName; + @Override public Object transformRow(Map row, Context context) { try { if (engine == null) diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/SqlEntityProcessor.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/SqlEntityProcessor.java index 925a9569bf2..1748998720b 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/SqlEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/SqlEntityProcessor.java @@ -46,6 +46,7 @@ public class SqlEntityProcessor extends EntityProcessorBase { protected DataSource>> dataSource; + @Override @SuppressWarnings("unchecked") public void init(Context context) { super.init(context); @@ -65,6 +66,7 @@ public class SqlEntityProcessor extends EntityProcessorBase { } } + @Override public Map nextRow() { if (rowIterator == null) { String q = getQuery(); @@ -73,6 +75,7 @@ public class SqlEntityProcessor extends EntityProcessorBase { return getNext(); } + @Override public Map nextModifiedRowKey() { if (rowIterator == null) { String deltaQuery = context.getEntityAttribute(DELTA_QUERY); @@ -83,6 +86,7 @@ public class SqlEntityProcessor extends EntityProcessorBase { return getNext(); } + @Override public Map nextDeletedRowKey() { if (rowIterator == null) { String deletedPkQuery = context.getEntityAttribute(DEL_PK_QUERY); @@ -93,6 +97,7 @@ public class SqlEntityProcessor extends EntityProcessorBase { return getNext(); } + @Override public Map nextModifiedParentRowKey() { if (rowIterator == null) { String parentDeltaQuery = context.getEntityAttribute(PARENT_DELTA_QUERY); diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/TemplateTransformer.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/TemplateTransformer.java index 8c5527983ff..6fd0665c700 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/TemplateTransformer.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/TemplateTransformer.java @@ -51,6 +51,7 @@ public class TemplateTransformer extends Transformer { private static final Logger LOG = LoggerFactory.getLogger(TemplateTransformer.class); private Map> templateVsVars = new HashMap>(); + @Override @SuppressWarnings("unchecked") public Object transformRow(Map row, Context context) { diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/URLDataSource.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/URLDataSource.java index 274c120b270..234fb56f264 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/URLDataSource.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/URLDataSource.java @@ -56,6 +56,7 @@ public class URLDataSource extends DataSource { public URLDataSource() { } + @Override public void init(Context context, Properties initProps) { this.context = context; this.initProps = initProps; @@ -81,6 +82,7 @@ public class URLDataSource extends DataSource { } } + @Override public Reader getData(String query) { URL url = null; try { @@ -114,6 +116,7 @@ public class URLDataSource extends DataSource { } } + @Override public void close() { } diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/VariableResolverImpl.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/VariableResolverImpl.java index b0675cc4627..8d39dd13bde 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/VariableResolverImpl.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/VariableResolverImpl.java @@ -91,10 +91,12 @@ public class VariableResolverImpl extends VariableResolver { container.remove(name); } + @Override public String replaceTokens(String template) { return templateString.replaceTokens(template, this); } + @Override @SuppressWarnings("unchecked") public Object resolve(String name) { if (name == null) diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/XPathEntityProcessor.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/XPathEntityProcessor.java index 26196788dfe..e995fab3442 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/XPathEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/XPathEntityProcessor.java @@ -80,6 +80,7 @@ public class XPathEntityProcessor extends EntityProcessorBase { protected Thread publisherThread; + @Override @SuppressWarnings("unchecked") public void init(Context context) { super.init(context); @@ -171,6 +172,7 @@ public class XPathEntityProcessor extends EntityProcessorBase { } + @Override public Map nextRow() { Map result; @@ -398,6 +400,7 @@ public class XPathEntityProcessor extends EntityProcessorBase { final AtomicBoolean isEnd = new AtomicBoolean(false); final AtomicBoolean throwExp = new AtomicBoolean(true); publisherThread = new Thread() { + @Override public void run() { try { xpathReader.streamRecords(data, new XPathRecordReader.Handler() { diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/AbstractDataImportHandlerTestCase.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/AbstractDataImportHandlerTestCase.java index 07f78d0b412..8c3791f224a 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/AbstractDataImportHandlerTestCase.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/AbstractDataImportHandlerTestCase.java @@ -197,89 +197,110 @@ public abstract class AbstractDataImportHandlerTestCase extends this.root = root; } + @Override public String getEntityAttribute(String name) { return entityAttrs == null ? delegate.getEntityAttribute(name) : entityAttrs.get(name); } + @Override public String getResolvedEntityAttribute(String name) { return entityAttrs == null ? delegate.getResolvedEntityAttribute(name) : delegate.getVariableResolver().replaceTokens(entityAttrs.get(name)); } + @Override public List> getAllEntityFields() { return entityFields == null ? delegate.getAllEntityFields() : entityFields; } + @Override public VariableResolver getVariableResolver() { return delegate.getVariableResolver(); } + @Override public DataSource getDataSource() { return delegate.getDataSource(); } + @Override public boolean isRootEntity() { return root; } + @Override public String currentProcess() { return delegate.currentProcess(); } + @Override public Map getRequestParameters() { return delegate.getRequestParameters(); } + @Override public EntityProcessor getEntityProcessor() { return null; } + @Override public void setSessionAttribute(String name, Object val, String scope) { delegate.setSessionAttribute(name, val, scope); } + @Override public Object getSessionAttribute(String name, String scope) { return delegate.getSessionAttribute(name, scope); } + @Override public Context getParentContext() { return delegate.getParentContext(); } + @Override public DataSource getDataSource(String name) { return delegate.getDataSource(name); } + @Override public SolrCore getSolrCore() { return delegate.getSolrCore(); } + @Override public Map getStats() { return delegate.getStats(); } + @Override public String getScript() { return script == null ? delegate.getScript() : script; } + @Override public String getScriptLanguage() { return scriptlang == null ? delegate.getScriptLanguage() : scriptlang; } + @Override public void deleteDoc(String id) { } + @Override public void deleteDocByQuery(String query) { } + @Override public Object resolve(String var) { return delegate.resolve(var); } + @Override public String replaceTokens(String template) { return delegate.replaceTokens(template); } @@ -318,31 +339,37 @@ public abstract class AbstractDataImportHandlerTestCase extends reset(); } + @Override public void finish() throws IOException { finishCalled = true; super.finish(); } + @Override public void processAdd(AddUpdateCommand cmd) throws IOException { processAddCalled = true; super.processAdd(cmd); } + @Override public void processCommit(CommitUpdateCommand cmd) throws IOException { processCommitCalled = true; super.processCommit(cmd); } + @Override public void processDelete(DeleteUpdateCommand cmd) throws IOException { processDeleteCalled = true; super.processDelete(cmd); } + @Override public void processMergeIndexes(MergeIndexesCommand cmd) throws IOException { mergeIndexesCalled = true; super.processMergeIndexes(cmd); } + @Override public void processRollback(RollbackUpdateCommand cmd) throws IOException { rollbackCalled = true; super.processRollback(cmd); diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestCachedSqlEntityProcessor.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestCachedSqlEntityProcessor.java index ceda6edd3ec..0037d6796ca 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestCachedSqlEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestCachedSqlEntityProcessor.java @@ -158,6 +158,7 @@ public class TestCachedSqlEntityProcessor extends AbstractDataImportHandlerTestC public static class DoubleTransformer extends Transformer { + @Override public Object transformRow(Map row, Context context) { List> rows = new ArrayList>(); rows.add(row); @@ -169,6 +170,7 @@ public class TestCachedSqlEntityProcessor extends AbstractDataImportHandlerTestC public static class UppercaseTransformer extends Transformer { + @Override public Object transformRow(Map row, Context context) { for (Map.Entry entry : row.entrySet()) { Object val = entry.getValue(); diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java index c49be006377..692272d4e04 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java @@ -43,6 +43,7 @@ public class TestContentStreamDataSource extends AbstractDataImportHandlerTestCa SolrInstance instance = null; JettySolrRunner jetty; + @Override @Before public void setUp() throws Exception { super.setUp(); @@ -51,6 +52,7 @@ public class TestContentStreamDataSource extends AbstractDataImportHandlerTestCa jetty = createJetty(instance); } + @Override @After public void tearDown() throws Exception { jetty.stop(); diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDocBuilder.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDocBuilder.java index e9947e52e76..a16b7017ab4 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDocBuilder.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDocBuilder.java @@ -33,6 +33,7 @@ import java.util.*; */ public class TestDocBuilder extends AbstractDataImportHandlerTestCase { + @Override @After public void tearDown() throws Exception { MockDataSource.clearCache(); @@ -200,22 +201,27 @@ public class TestDocBuilder extends AbstractDataImportHandlerTestCase { super(null, ".",null); } + @Override public boolean upload(SolrInputDocument doc) { return docs.add(doc); } + @Override public void log(int event, String name, Object row) { // Do nothing } + @Override public void doDeleteAll() { deleteAllCalled = Boolean.TRUE; } + @Override public void commit(boolean b) { commitCalled = Boolean.TRUE; } + @Override public void finish() { finishCalled = Boolean.TRUE; } diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDocBuilder2.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDocBuilder2.java index f361eb20a43..4632318fa17 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDocBuilder2.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDocBuilder2.java @@ -252,6 +252,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase { } public static class MockTransformer extends Transformer { + @Override public Object transformRow(Map row, Context context) { assertTrue("Context gave incorrect data source", context.getDataSource("mockDs") instanceof MockDataSource2); return row; @@ -259,6 +260,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase { } public static class AddDynamicFieldTransformer extends Transformer { + @Override public Object transformRow(Map row, Context context) { // Add a dynamic field row.put("dynamic_s", "test"); diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestEntityProcessorBase.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestEntityProcessorBase.java index 2b7d3578e96..42b29610666 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestEntityProcessorBase.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestEntityProcessorBase.java @@ -57,6 +57,7 @@ public class TestEntityProcessorBase extends AbstractDataImportHandlerTestCase { static class T1 extends Transformer { + @Override public Object transformRow(Map aRow, Context context) { aRow.put("T1", "T1 called"); return aRow; @@ -66,6 +67,7 @@ public class TestEntityProcessorBase extends AbstractDataImportHandlerTestCase { static class T2 extends Transformer { + @Override public Object transformRow(Map aRow, Context context) { aRow.put("T2", "T2 called"); return aRow; diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestErrorHandling.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestErrorHandling.java index 0f703815bae..b8e285dffe1 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestErrorHandling.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestErrorHandling.java @@ -78,19 +78,23 @@ public class TestErrorHandling extends AbstractDataImportHandlerTestCase { public static class StringDataSource extends DataSource { public static String xml = ""; + @Override public void init(Context context, Properties initProps) { } + @Override public Reader getData(String query) { return new StringReader(xml); } + @Override public void close() { } } public static class ExceptionTransformer extends Transformer { + @Override public Object transformRow(Map row, Context context) { throw new RuntimeException("Test exception"); } diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestEvaluatorBag.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestEvaluatorBag.java index 18b30a36d7b..41ac1dc5d15 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestEvaluatorBag.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestEvaluatorBag.java @@ -39,6 +39,7 @@ public class TestEvaluatorBag extends AbstractDataImportHandlerTestCase { Map urlTests; + @Override @Before public void setUp() throws Exception { super.setUp(); diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestJdbcDataSource.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestJdbcDataSource.java index 68cc9ccc9ff..ac6626462ee 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestJdbcDataSource.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestJdbcDataSource.java @@ -57,6 +57,7 @@ public class TestJdbcDataSource extends AbstractDataImportHandlerTestCase { String sysProp = System.getProperty("java.naming.factory.initial"); + @Override @Before public void setUp() throws Exception { super.setUp(); @@ -69,6 +70,7 @@ public class TestJdbcDataSource extends AbstractDataImportHandlerTestCase { connection = mockControl.createMock(Connection.class); } + @Override @After public void tearDown() throws Exception { if (sysProp == null) { diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestLineEntityProcessor.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestLineEntityProcessor.java index c24fced0bbf..91f8d034cda 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestLineEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestLineEntityProcessor.java @@ -207,12 +207,15 @@ public class TestLineEntityProcessor extends AbstractDataImportHandlerTestCase { private DataSource getDataSource(final String xml) { return new DataSource() { + @Override public void init(Context context, Properties initProps) { } + @Override public void close() { } + @Override public Reader getData(String query) { return new StringReader(xml); } diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestPlainTextEntityProcessor.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestPlainTextEntityProcessor.java index 48a0b1b4214..e0a5b8bf39c 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestPlainTextEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestPlainTextEntityProcessor.java @@ -42,15 +42,18 @@ public class TestPlainTextEntityProcessor extends AbstractDataImportHandlerTestC public static class DS extends DataSource { static String s = "hello world"; + @Override public void init(Context context, Properties initProps) { } + @Override public Object getData(String query) { return new StringReader(s); } + @Override public void close() { } diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestSqlEntityProcessor.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestSqlEntityProcessor.java index 7fc50fa11cd..0fbfb846eae 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestSqlEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestSqlEntityProcessor.java @@ -135,19 +135,23 @@ public class TestSqlEntityProcessor extends AbstractDataImportHandlerTestCase { private static DataSource>> getDs( final List> rows) { return new DataSource>>() { + @Override public Iterator> getData(String query) { return rows.iterator(); } + @Override public void init(Context context, Properties initProps) { } + @Override public void close() { } }; } public static class T extends Transformer { + @Override public Object transformRow(Map aRow, Context context) { aRow.put("T", "Class T"); return aRow; @@ -162,6 +166,7 @@ public class TestSqlEntityProcessor extends AbstractDataImportHandlerTestCase { } public static class T2 extends Transformer { + @Override public Object transformRow(Map aRow, Context context) { Integer count = local.get(); local.set(count + 1); diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestSqlEntityProcessor2.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestSqlEntityProcessor2.java index 966818b4ef5..6c0627e3e07 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestSqlEntityProcessor2.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestSqlEntityProcessor2.java @@ -214,6 +214,7 @@ public class TestSqlEntityProcessor2 extends AbstractDataImportHandlerTestCase { } static class DateFormatValidatingEvaluator extends Evaluator { + @Override public String evaluate(String expression, Context context) { List l = EvaluatorBag.parseParams(expression, context.getVariableResolver()); Object o = l.get(0); diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestVariableResolver.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestVariableResolver.java index 2befb0bdaa0..55c4b91ee34 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestVariableResolver.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestVariableResolver.java @@ -169,6 +169,7 @@ public class TestVariableResolver extends AbstractDataImportHandlerTestCase { } public static class E extends Evaluator{ + @Override public String evaluate(String expression, Context context) { return "Hello World"; } diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestXPathEntityProcessor.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestXPathEntityProcessor.java index 29672e5b258..6f6fd817fa6 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestXPathEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestXPathEntityProcessor.java @@ -236,12 +236,15 @@ public class TestXPathEntityProcessor extends AbstractDataImportHandlerTestCase private DataSource getDataSource(final String xml) { return new DataSource() { + @Override public void init(Context context, Properties initProps) { } + @Override public void close() { } + @Override public Reader getData(String query) { return new StringReader(xml); } diff --git a/solr/contrib/extraction/src/main/java/org/apache/solr/handler/extraction/ExtractingDocumentLoader.java b/solr/contrib/extraction/src/main/java/org/apache/solr/handler/extraction/ExtractingDocumentLoader.java index a08967dfcc6..e7d03b9bb7c 100644 --- a/solr/contrib/extraction/src/main/java/org/apache/solr/handler/extraction/ExtractingDocumentLoader.java +++ b/solr/contrib/extraction/src/main/java/org/apache/solr/handler/extraction/ExtractingDocumentLoader.java @@ -120,6 +120,7 @@ public class ExtractingDocumentLoader extends ContentStreamLoader { * @param stream * @throws java.io.IOException */ + @Override public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream) throws IOException { errHeader = "ExtractingDocumentLoader: " + stream.getSourceInfo(); Parser parser = null; diff --git a/solr/contrib/extraction/src/main/java/org/apache/solr/handler/extraction/ExtractingRequestHandler.java b/solr/contrib/extraction/src/main/java/org/apache/solr/handler/extraction/ExtractingRequestHandler.java index 943f0d849dc..d77a81491fd 100644 --- a/solr/contrib/extraction/src/main/java/org/apache/solr/handler/extraction/ExtractingRequestHandler.java +++ b/solr/contrib/extraction/src/main/java/org/apache/solr/handler/extraction/ExtractingRequestHandler.java @@ -111,6 +111,7 @@ public class ExtractingRequestHandler extends ContentStreamHandlerBase implement } + @Override protected ContentStreamLoader newLoader(SolrQueryRequest req, UpdateRequestProcessor processor) { return new ExtractingDocumentLoader(req, processor, config, factory); } diff --git a/solr/contrib/extraction/src/test/java/org/apache/solr/handler/ExtractingRequestHandlerTest.java b/solr/contrib/extraction/src/test/java/org/apache/solr/handler/ExtractingRequestHandlerTest.java index 1eac099acf0..441f6d3ce1a 100644 --- a/solr/contrib/extraction/src/test/java/org/apache/solr/handler/ExtractingRequestHandlerTest.java +++ b/solr/contrib/extraction/src/test/java/org/apache/solr/handler/ExtractingRequestHandlerTest.java @@ -46,6 +46,7 @@ public class ExtractingRequestHandlerTest extends SolrTestCaseJ4 { initCore("solrconfig.xml", "schema.xml", "solr-extraction"); } + @Override @Before public void setUp() throws Exception { super.setUp(); diff --git a/solr/contrib/uima/src/main/java/org/apache/solr/uima/processor/UIMAUpdateRequestProcessor.java b/solr/contrib/uima/src/main/java/org/apache/solr/uima/processor/UIMAUpdateRequestProcessor.java index f042f2a3531..4f7e004666c 100644 --- a/solr/contrib/uima/src/main/java/org/apache/solr/uima/processor/UIMAUpdateRequestProcessor.java +++ b/solr/contrib/uima/src/main/java/org/apache/solr/uima/processor/UIMAUpdateRequestProcessor.java @@ -56,6 +56,7 @@ public class UIMAUpdateRequestProcessor extends UpdateRequestProcessor { solrUIMAConfiguration.getAePath(), solrUIMAConfiguration.getRuntimeParameters()); } + @Override public void processAdd(AddUpdateCommand cmd) throws IOException { try { /* get Solr document */ diff --git a/solr/contrib/uima/src/main/java/org/apache/solr/uima/processor/UIMAUpdateRequestProcessorFactory.java b/solr/contrib/uima/src/main/java/org/apache/solr/uima/processor/UIMAUpdateRequestProcessorFactory.java index 5078f9d3a19..b8167572195 100644 --- a/solr/contrib/uima/src/main/java/org/apache/solr/uima/processor/UIMAUpdateRequestProcessorFactory.java +++ b/solr/contrib/uima/src/main/java/org/apache/solr/uima/processor/UIMAUpdateRequestProcessorFactory.java @@ -29,6 +29,7 @@ import org.apache.solr.update.processor.UpdateRequestProcessorFactory; */ public class UIMAUpdateRequestProcessorFactory extends UpdateRequestProcessorFactory { + @Override public UpdateRequestProcessor getInstance(SolrQueryRequest req, SolrQueryResponse rsp, UpdateRequestProcessor next) { return new UIMAUpdateRequestProcessor(next, req.getCore()); diff --git a/solr/contrib/uima/src/test/java/org/apache/solr/uima/processor/UIMAUpdateRequestProcessorTest.java b/solr/contrib/uima/src/test/java/org/apache/solr/uima/processor/UIMAUpdateRequestProcessorTest.java index 3d2058bae6b..b0499538d6e 100644 --- a/solr/contrib/uima/src/test/java/org/apache/solr/uima/processor/UIMAUpdateRequestProcessorTest.java +++ b/solr/contrib/uima/src/test/java/org/apache/solr/uima/processor/UIMAUpdateRequestProcessorTest.java @@ -50,6 +50,7 @@ public class UIMAUpdateRequestProcessorTest extends SolrTestCaseJ4 { initCore("solrconfig.xml", "schema.xml", "solr-uima"); } + @Override @Before public void setUp() throws Exception { super.setUp(); diff --git a/solr/src/common/org/apache/solr/common/SolrDocument.java b/solr/src/common/org/apache/solr/common/SolrDocument.java index 48d66878ca5..813326e40f9 100644 --- a/solr/src/common/org/apache/solr/common/SolrDocument.java +++ b/solr/src/common/org/apache/solr/common/SolrDocument.java @@ -220,6 +220,7 @@ public class SolrDocument implements Map, Iterable> values() {throw new UnsupportedOperationException();} public Collection put(String key, Collection value) {throw new UnsupportedOperationException();} public Collection remove(Object key) {throw new UnsupportedOperationException();} + @Override public String toString() {return _fields.toString();} }; } @@ -248,6 +249,7 @@ public class SolrDocument implements Map, Iterable values() {throw new UnsupportedOperationException();} public Collection put(String key, Object value) {throw new UnsupportedOperationException();} public Collection remove(Object key) {throw new UnsupportedOperationException();} + @Override public String toString() {return _fields.toString();} }; } diff --git a/solr/src/common/org/apache/solr/common/SolrDocumentList.java b/solr/src/common/org/apache/solr/common/SolrDocumentList.java index 273cd80751a..9aca8d7780c 100644 --- a/solr/src/common/org/apache/solr/common/SolrDocumentList.java +++ b/solr/src/common/org/apache/solr/common/SolrDocumentList.java @@ -57,6 +57,7 @@ public class SolrDocumentList extends ArrayList this.start = start; } + @Override public String toString() { return "{numFound="+numFound +",start="+start diff --git a/solr/src/common/org/apache/solr/common/cloud/CloudState.java b/solr/src/common/org/apache/solr/common/cloud/CloudState.java index 23ec5fbad04..7b140e767f5 100644 --- a/solr/src/common/org/apache/solr/common/cloud/CloudState.java +++ b/solr/src/common/org/apache/solr/common/cloud/CloudState.java @@ -140,6 +140,7 @@ public class CloudState { return liveNodesSet; } + @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("live nodes:" + liveNodes); diff --git a/solr/src/common/org/apache/solr/common/cloud/ZkNodeProps.java b/solr/src/common/org/apache/solr/common/cloud/ZkNodeProps.java index 1819ee49440..973571d814c 100644 --- a/solr/src/common/org/apache/solr/common/cloud/ZkNodeProps.java +++ b/solr/src/common/org/apache/solr/common/cloud/ZkNodeProps.java @@ -46,6 +46,7 @@ public class ZkNodeProps extends HashMap { return sb.toString().getBytes("UTF-8"); } + @Override public String toString() { StringBuilder sb = new StringBuilder(); Set> entries = entrySet(); diff --git a/solr/src/common/org/apache/solr/common/params/FacetParams.java b/solr/src/common/org/apache/solr/common/params/FacetParams.java index d7a20cd11ea..e742638fdcb 100644 --- a/solr/src/common/org/apache/solr/common/params/FacetParams.java +++ b/solr/src/common/org/apache/solr/common/params/FacetParams.java @@ -270,6 +270,7 @@ public interface FacetParams { */ public enum FacetRangeOther { BEFORE, AFTER, BETWEEN, ALL, NONE; + @Override public String toString() { return super.toString().toLowerCase(); } public static FacetRangeOther get(String label) { try { @@ -288,6 +289,7 @@ public interface FacetParams { @Deprecated public enum FacetDateOther { BEFORE, AFTER, BETWEEN, ALL, NONE; + @Override public String toString() { return super.toString().toLowerCase(); } public static FacetDateOther get(String label) { try { @@ -321,6 +323,7 @@ public interface FacetParams { */ public enum FacetRangeInclude { ALL, LOWER, UPPER, EDGE, OUTER; + @Override public String toString() { return super.toString().toLowerCase(Locale.ENGLISH); } public static FacetRangeInclude get(String label) { try { diff --git a/solr/src/common/org/apache/solr/common/util/ConcurrentLRUCache.java b/solr/src/common/org/apache/solr/common/util/ConcurrentLRUCache.java index a3ca8be6264..33d76241d82 100644 --- a/solr/src/common/org/apache/solr/common/util/ConcurrentLRUCache.java +++ b/solr/src/common/org/apache/solr/common/util/ConcurrentLRUCache.java @@ -133,6 +133,7 @@ public class ConcurrentLRUCache { if (currentSize > upperWaterMark && !isCleaning) { if (newThreadForCleanup) { new Thread() { + @Override public void run() { markAndSweep(); } @@ -362,6 +363,7 @@ public class ConcurrentLRUCache { return Collections.unmodifiableCollection(Arrays.asList(heap)); } + @Override protected boolean lessThan(CacheEntry a, CacheEntry b) { // reverse the parameter order so that the queue keeps the oldest items return b.lastAccessedCopy < a.lastAccessedCopy; @@ -491,14 +493,17 @@ public class ConcurrentLRUCache { return this.lastAccessedCopy < that.lastAccessedCopy ? 1 : -1; } + @Override public int hashCode() { return value.hashCode(); } + @Override public boolean equals(Object obj) { return value.equals(obj); } + @Override public String toString() { return "key: " + key + " value: " + value + " lastAccessed:" + lastAccessed; } @@ -579,6 +584,7 @@ public class ConcurrentLRUCache { cache = new WeakReference(c); } + @Override public void run() { while (true) { synchronized (this) { @@ -608,6 +614,7 @@ public class ConcurrentLRUCache { } } + @Override protected void finalize() throws Throwable { try { if(!isDestroyed){ diff --git a/solr/src/common/org/apache/solr/common/util/FastOutputStream.java b/solr/src/common/org/apache/solr/common/util/FastOutputStream.java index e4b498e3b1d..f7efef9ef84 100755 --- a/solr/src/common/org/apache/solr/common/util/FastOutputStream.java +++ b/solr/src/common/org/apache/solr/common/util/FastOutputStream.java @@ -50,6 +50,7 @@ public class FastOutputStream extends OutputStream implements DataOutput { write((byte)b); } + @Override public void write(byte b[]) throws IOException { write(b,0,b.length); } diff --git a/solr/src/common/org/apache/solr/common/util/NamedList.java b/solr/src/common/org/apache/solr/common/util/NamedList.java index 517b53d3b80..579a7f29008 100644 --- a/solr/src/common/org/apache/solr/common/util/NamedList.java +++ b/solr/src/common/org/apache/solr/common/util/NamedList.java @@ -246,6 +246,7 @@ public class NamedList implements Cloneable, Serializable, Iterable implements Cloneable, Serializable, Iterableshallow copy of the named list. */ + @Override public NamedList clone() { ArrayList newList = new ArrayList(nvPairs.size()); newList.addAll(nvPairs); @@ -355,6 +357,7 @@ public class NamedList implements Cloneable, Serializable, Iterable args) { super.init(args); assureMatchVersion(); diff --git a/solr/src/java/org/apache/solr/analysis/ChineseFilterFactory.java b/solr/src/java/org/apache/solr/analysis/ChineseFilterFactory.java index 6ffb67971b9..9d75f025f6d 100644 --- a/solr/src/java/org/apache/solr/analysis/ChineseFilterFactory.java +++ b/solr/src/java/org/apache/solr/analysis/ChineseFilterFactory.java @@ -29,6 +29,7 @@ import org.apache.lucene.analysis.cn.ChineseFilter; */ @Deprecated public class ChineseFilterFactory extends BaseTokenFilterFactory { + @Override public void init(Map args) { super.init(args); warnDeprecated("Use StopFilterFactory instead."); diff --git a/solr/src/java/org/apache/solr/analysis/ChineseTokenizerFactory.java b/solr/src/java/org/apache/solr/analysis/ChineseTokenizerFactory.java index 978882e2a65..ef4d53688ed 100644 --- a/solr/src/java/org/apache/solr/analysis/ChineseTokenizerFactory.java +++ b/solr/src/java/org/apache/solr/analysis/ChineseTokenizerFactory.java @@ -30,6 +30,7 @@ import org.apache.lucene.analysis.cn.ChineseTokenizer; */ @Deprecated public class ChineseTokenizerFactory extends BaseTokenizerFactory { + @Override public void init(Map args) { super.init(args); warnDeprecated("Use StandardTokenizerFactory instead."); diff --git a/solr/src/java/org/apache/solr/analysis/DictionaryCompoundWordTokenFilterFactory.java b/solr/src/java/org/apache/solr/analysis/DictionaryCompoundWordTokenFilterFactory.java index 1cbe411dcba..63e650e9d7d 100644 --- a/solr/src/java/org/apache/solr/analysis/DictionaryCompoundWordTokenFilterFactory.java +++ b/solr/src/java/org/apache/solr/analysis/DictionaryCompoundWordTokenFilterFactory.java @@ -36,6 +36,7 @@ public class DictionaryCompoundWordTokenFilterFactory extends BaseTokenFilterFac private int minSubwordSize; private int maxSubwordSize; private boolean onlyLongestMatch; + @Override public void init(Map args) { super.init(args); assureMatchVersion(); diff --git a/solr/src/java/org/apache/solr/analysis/HyphenationCompoundWordTokenFilterFactory.java b/solr/src/java/org/apache/solr/analysis/HyphenationCompoundWordTokenFilterFactory.java index 339f1666849..a90cef5d5da 100644 --- a/solr/src/java/org/apache/solr/analysis/HyphenationCompoundWordTokenFilterFactory.java +++ b/solr/src/java/org/apache/solr/analysis/HyphenationCompoundWordTokenFilterFactory.java @@ -61,6 +61,7 @@ public class HyphenationCompoundWordTokenFilterFactory extends BaseTokenFilterFa private int maxSubwordSize; private boolean onlyLongestMatch; + @Override public void init(Map args) { super.init(args); assureMatchVersion(); diff --git a/solr/src/java/org/apache/solr/analysis/IndonesianStemFilterFactory.java b/solr/src/java/org/apache/solr/analysis/IndonesianStemFilterFactory.java index 06144903872..d99af63a32e 100644 --- a/solr/src/java/org/apache/solr/analysis/IndonesianStemFilterFactory.java +++ b/solr/src/java/org/apache/solr/analysis/IndonesianStemFilterFactory.java @@ -26,6 +26,7 @@ import org.apache.lucene.analysis.id.IndonesianStemFilter; public class IndonesianStemFilterFactory extends BaseTokenFilterFactory { private boolean stemDerivational = true; + @Override public void init(Map args) { super.init(args); stemDerivational = getBoolean("stemDerivational", true); diff --git a/solr/src/java/org/apache/solr/analysis/NumericPayloadTokenFilterFactory.java b/solr/src/java/org/apache/solr/analysis/NumericPayloadTokenFilterFactory.java index 69469eab54e..0181b96e636 100644 --- a/solr/src/java/org/apache/solr/analysis/NumericPayloadTokenFilterFactory.java +++ b/solr/src/java/org/apache/solr/analysis/NumericPayloadTokenFilterFactory.java @@ -27,6 +27,7 @@ import java.util.Map; public class NumericPayloadTokenFilterFactory extends BaseTokenFilterFactory { private float payload; private String typeMatch; + @Override public void init(Map args) { super.init(args); payload = Float.parseFloat(args.get("payload")); diff --git a/solr/src/java/org/apache/solr/analysis/PatternReplaceCharFilterFactory.java b/solr/src/java/org/apache/solr/analysis/PatternReplaceCharFilterFactory.java index bb45fa126a8..48014cb1a04 100644 --- a/solr/src/java/org/apache/solr/analysis/PatternReplaceCharFilterFactory.java +++ b/solr/src/java/org/apache/solr/analysis/PatternReplaceCharFilterFactory.java @@ -36,6 +36,7 @@ public class PatternReplaceCharFilterFactory extends BaseCharFilterFactory { private int maxBlockChars; private String blockDelimiters; + @Override public void init(Map args) { super.init( args ); try { diff --git a/solr/src/java/org/apache/solr/analysis/PositionFilterFactory.java b/solr/src/java/org/apache/solr/analysis/PositionFilterFactory.java index df6b8090539..ab1fb48c740 100644 --- a/solr/src/java/org/apache/solr/analysis/PositionFilterFactory.java +++ b/solr/src/java/org/apache/solr/analysis/PositionFilterFactory.java @@ -33,6 +33,7 @@ import java.util.Map; public class PositionFilterFactory extends BaseTokenFilterFactory { private int positionIncrement; + @Override public void init(Map args) { super.init(args); positionIncrement = getInt("positionIncrement", 0); diff --git a/solr/src/java/org/apache/solr/analysis/ShingleFilterFactory.java b/solr/src/java/org/apache/solr/analysis/ShingleFilterFactory.java index c158d33ae03..58f18ad99de 100644 --- a/solr/src/java/org/apache/solr/analysis/ShingleFilterFactory.java +++ b/solr/src/java/org/apache/solr/analysis/ShingleFilterFactory.java @@ -34,6 +34,7 @@ public class ShingleFilterFactory extends BaseTokenFilterFactory { private boolean outputUnigramsIfNoShingles; private String tokenSeparator; + @Override public void init(Map args) { super.init(args); maxShingleSize = getInt("maxShingleSize", diff --git a/solr/src/java/org/apache/solr/analysis/SolrAnalyzer.java b/solr/src/java/org/apache/solr/analysis/SolrAnalyzer.java index 6244e0c82f6..b0b03cc2b3d 100644 --- a/solr/src/java/org/apache/solr/analysis/SolrAnalyzer.java +++ b/solr/src/java/org/apache/solr/analysis/SolrAnalyzer.java @@ -32,6 +32,7 @@ public abstract class SolrAnalyzer extends Analyzer { posIncGap=gap; } + @Override public int getPositionIncrementGap(String fieldName) { return posIncGap; } diff --git a/solr/src/java/org/apache/solr/analysis/TokenizerChain.java b/solr/src/java/org/apache/solr/analysis/TokenizerChain.java index 4c0d04af3a7..3fd48842077 100644 --- a/solr/src/java/org/apache/solr/analysis/TokenizerChain.java +++ b/solr/src/java/org/apache/solr/analysis/TokenizerChain.java @@ -73,6 +73,7 @@ public final class TokenizerChain extends SolrAnalyzer { return new TokenStreamInfo(tk,ts); } + @Override public String toString() { StringBuilder sb = new StringBuilder("TokenizerChain("); for (CharFilterFactory filter: charFilters) { diff --git a/solr/src/java/org/apache/solr/cloud/SolrZkServer.java b/solr/src/java/org/apache/solr/cloud/SolrZkServer.java index d96718324aa..625c005f11e 100644 --- a/solr/src/java/org/apache/solr/cloud/SolrZkServer.java +++ b/solr/src/java/org/apache/solr/cloud/SolrZkServer.java @@ -319,6 +319,7 @@ class SolrZkServerProps extends QuorumPeerConfig { * @throws java.io.IOException * @throws ConfigException */ + @Override public void parseProperties(Properties zkProp) throws IOException, ConfigException { for (Entry entry : zkProp.entrySet()) { diff --git a/solr/src/java/org/apache/solr/cloud/ZkSolrResourceLoader.java b/solr/src/java/org/apache/solr/cloud/ZkSolrResourceLoader.java index 32a2ce0df51..8a6f5957d52 100644 --- a/solr/src/java/org/apache/solr/cloud/ZkSolrResourceLoader.java +++ b/solr/src/java/org/apache/solr/cloud/ZkSolrResourceLoader.java @@ -68,6 +68,7 @@ public class ZkSolrResourceLoader extends SolrResourceLoader { * * @return the stream for the named resource */ + @Override public InputStream openResource(String resource) { InputStream is = null; String file = collectionZkPath + "/" + resource; @@ -93,12 +94,14 @@ public class ZkSolrResourceLoader extends SolrResourceLoader { return is; } + @Override public String getConfigDir() { throw new ZooKeeperException( ErrorCode.SERVER_ERROR, "ZkSolrResourceLoader does not support getConfigDir() - likely, what you are trying to do is not supported in ZooKeeper mode"); } + @Override public String[] listConfigDir() { List list; try { diff --git a/solr/src/java/org/apache/solr/core/AbstractSolrEventListener.java b/solr/src/java/org/apache/solr/core/AbstractSolrEventListener.java index 8f0680e8c94..bf593b35ddc 100644 --- a/solr/src/java/org/apache/solr/core/AbstractSolrEventListener.java +++ b/solr/src/java/org/apache/solr/core/AbstractSolrEventListener.java @@ -42,6 +42,7 @@ class AbstractSolrEventListener implements SolrEventListener { throw new UnsupportedOperationException(); } + @Override public String toString() { return getClass().getName() + args; } diff --git a/solr/src/java/org/apache/solr/core/RAMDirectoryFactory.java b/solr/src/java/org/apache/solr/core/RAMDirectoryFactory.java index 3a27ce80801..268eb40c0ab 100644 --- a/solr/src/java/org/apache/solr/core/RAMDirectoryFactory.java +++ b/solr/src/java/org/apache/solr/core/RAMDirectoryFactory.java @@ -45,6 +45,7 @@ public class RAMDirectoryFactory extends StandardDirectoryFactory { } } + @Override public boolean exists(String path) { synchronized (RAMDirectoryFactory.class) { RefCntRamDirectory directory = directories.get(path); diff --git a/solr/src/java/org/apache/solr/core/RefCntRamDirectory.java b/solr/src/java/org/apache/solr/core/RefCntRamDirectory.java index e9659814374..29e5f658c88 100644 --- a/solr/src/java/org/apache/solr/core/RefCntRamDirectory.java +++ b/solr/src/java/org/apache/solr/core/RefCntRamDirectory.java @@ -51,6 +51,7 @@ public class RefCntRamDirectory extends RAMDirectory { } } + @Override public final synchronized void close() { decRef(); } diff --git a/solr/src/java/org/apache/solr/core/RunExecutableListener.java b/solr/src/java/org/apache/solr/core/RunExecutableListener.java index 01ac6340d5e..62f554e4641 100644 --- a/solr/src/java/org/apache/solr/core/RunExecutableListener.java +++ b/solr/src/java/org/apache/solr/core/RunExecutableListener.java @@ -37,6 +37,7 @@ class RunExecutableListener extends AbstractSolrEventListener { protected String[] envp; protected boolean wait=true; + @Override public void init(NamedList args) { super.init(args); @@ -102,6 +103,7 @@ class RunExecutableListener extends AbstractSolrEventListener { } + @Override public void postCommit() { // anything generic need to be passed to the external program? // the directory of the index? the command that caused it to be @@ -109,6 +111,7 @@ class RunExecutableListener extends AbstractSolrEventListener { exec("postCommit"); } + @Override public void newSearcher(SolrIndexSearcher newSearcher, SolrIndexSearcher currentSearcher) { exec("newSearcher"); } diff --git a/solr/src/java/org/apache/solr/core/SolrCore.java b/solr/src/java/org/apache/solr/core/SolrCore.java index b1774338967..298a89d7aaa 100644 --- a/solr/src/java/org/apache/solr/core/SolrCore.java +++ b/solr/src/java/org/apache/solr/core/SolrCore.java @@ -696,6 +696,7 @@ public final class SolrCore implements SolrInfoMBean { return refCount.get() <= 0; } + @Override protected void finalize() throws Throwable { try { if (getOpenCount() != 0) { @@ -1192,6 +1193,7 @@ public final class SolrCore implements SolrInfoMBean { private RefCounted newHolder(SolrIndexSearcher newSearcher) { RefCounted holder = new RefCounted(newSearcher) { + @Override public void close() { try { synchronized(searcherLock) { diff --git a/solr/src/java/org/apache/solr/core/StandardDirectoryFactory.java b/solr/src/java/org/apache/solr/core/StandardDirectoryFactory.java index 69996b72147..0dfd144b1e7 100644 --- a/solr/src/java/org/apache/solr/core/StandardDirectoryFactory.java +++ b/solr/src/java/org/apache/solr/core/StandardDirectoryFactory.java @@ -28,6 +28,7 @@ import org.apache.lucene.store.FSDirectory; */ public class StandardDirectoryFactory extends DirectoryFactory { + @Override public Directory open(String path) throws IOException { return FSDirectory.open(new File(path)); } diff --git a/solr/src/java/org/apache/solr/core/StandardIndexReaderFactory.java b/solr/src/java/org/apache/solr/core/StandardIndexReaderFactory.java index 4c31d57b1e8..2695cb26b76 100644 --- a/solr/src/java/org/apache/solr/core/StandardIndexReaderFactory.java +++ b/solr/src/java/org/apache/solr/core/StandardIndexReaderFactory.java @@ -32,6 +32,7 @@ public class StandardIndexReaderFactory extends IndexReaderFactory { /* (non-Javadoc) * @see org.apache.solr.core.IndexReaderFactory#newReader(org.apache.lucene.store.Directory, boolean) */ + @Override public IndexReader newReader(Directory indexDir, boolean readOnly) throws IOException { return IndexReader.open(indexDir, null, readOnly, termInfosIndexDivisor); diff --git a/solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java b/solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java index 48425034b1c..0066b8ca298 100644 --- a/solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java +++ b/solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java @@ -56,6 +56,7 @@ public abstract class AnalysisRequestHandlerBase extends RequestHandlerBase { public static final Set EMPTY_BYTES_SET = Collections.emptySet(); + @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { rsp.add("analysis", doAnalysis(req)); } diff --git a/solr/src/java/org/apache/solr/handler/BinaryUpdateRequestHandler.java b/solr/src/java/org/apache/solr/handler/BinaryUpdateRequestHandler.java index e098a5ffb2b..c9f92857b1c 100644 --- a/solr/src/java/org/apache/solr/handler/BinaryUpdateRequestHandler.java +++ b/solr/src/java/org/apache/solr/handler/BinaryUpdateRequestHandler.java @@ -46,8 +46,10 @@ import java.util.List; public class BinaryUpdateRequestHandler extends ContentStreamHandlerBase { + @Override protected ContentStreamLoader newLoader(SolrQueryRequest req, final UpdateRequestProcessor processor) { return new ContentStreamLoader() { + @Override public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream) throws Exception { InputStream is = null; try { @@ -111,18 +113,22 @@ public class BinaryUpdateRequestHandler extends ContentStreamHandlerBase { } } + @Override public String getDescription() { return "Add/Update multiple documents with javabin format"; } + @Override public String getSourceId() { return "$Id$"; } + @Override public String getSource() { return "$URL$"; } + @Override public String getVersion() { return "$Revision$"; } diff --git a/solr/src/java/org/apache/solr/handler/CSVRequestHandler.java b/solr/src/java/org/apache/solr/handler/CSVRequestHandler.java index 4a67d9ab909..6ba5ead5f89 100755 --- a/solr/src/java/org/apache/solr/handler/CSVRequestHandler.java +++ b/solr/src/java/org/apache/solr/handler/CSVRequestHandler.java @@ -42,6 +42,7 @@ import java.io.*; public class CSVRequestHandler extends ContentStreamHandlerBase { + @Override protected ContentStreamLoader newLoader(SolrQueryRequest req, UpdateRequestProcessor processor) { return new SingleThreadedCSVLoader(req, processor); } @@ -118,6 +119,7 @@ abstract class CSVLoader extends ContentStreamLoader { /** add zero length fields */ private class FieldAdderEmpty extends CSVLoader.FieldAdder { + @Override void add(SolrInputDocument doc, int line, int column, String val) { doc.addField(fields[column].getName(),val,1.0f); } @@ -127,6 +129,7 @@ abstract class CSVLoader extends ContentStreamLoader { private class FieldTrimmer extends CSVLoader.FieldAdder { private final CSVLoader.FieldAdder base; FieldTrimmer(CSVLoader.FieldAdder base) { this.base=base; } + @Override void add(SolrInputDocument doc, int line, int column, String val) { base.add(doc, line, column, val.trim()); } @@ -145,6 +148,7 @@ abstract class CSVLoader extends ContentStreamLoader { this.to=to; this.base=base; } + @Override void add(SolrInputDocument doc, int line, int column, String val) { if (from.equals(val)) val=to; base.add(doc,line,column,val); @@ -162,6 +166,7 @@ abstract class CSVLoader extends ContentStreamLoader { this.base = base; } + @Override void add(SolrInputDocument doc, int line, int column, String val) { CSVParser parser = new CSVParser(new StringReader(val), strategy); try { @@ -327,6 +332,7 @@ abstract class CSVLoader extends ContentStreamLoader { } /** load the CSV input */ + @Override public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream) throws IOException { errHeader = "CSVLoader: input=" + stream.getSourceInfo(); Reader reader = null; @@ -403,6 +409,7 @@ class SingleThreadedCSVLoader extends CSVLoader { super(req, processor); } + @Override void addDoc(int line, String[] vals) throws IOException { templateAdd.indexedId = null; SolrInputDocument doc = new SolrInputDocument(); diff --git a/solr/src/java/org/apache/solr/handler/ContentStreamHandlerBase.java b/solr/src/java/org/apache/solr/handler/ContentStreamHandlerBase.java index f47dfa6fb21..842d00d53af 100644 --- a/solr/src/java/org/apache/solr/handler/ContentStreamHandlerBase.java +++ b/solr/src/java/org/apache/solr/handler/ContentStreamHandlerBase.java @@ -32,6 +32,7 @@ import org.apache.solr.update.processor.UpdateRequestProcessorChain; **/ public abstract class ContentStreamHandlerBase extends RequestHandlerBase { + @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { SolrParams params = req.getParams(); UpdateRequestProcessorChain processorChain = diff --git a/solr/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java b/solr/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java index 195c9a3a7d1..92e1b098265 100644 --- a/solr/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java +++ b/solr/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java @@ -105,6 +105,7 @@ public class DocumentAnalysisRequestHandler extends AnalysisRequestHandlerBase { /** * {@inheritDoc} */ + @Override protected NamedList doAnalysis(SolrQueryRequest req) throws Exception { DocumentAnalysisRequest analysisRequest = resolveAnalysisRequest(req); return handleAnalysisRequest(analysisRequest, req.getSchema()); diff --git a/solr/src/java/org/apache/solr/handler/FieldAnalysisRequestHandler.java b/solr/src/java/org/apache/solr/handler/FieldAnalysisRequestHandler.java index 580735b81c3..b30ae6135ab 100644 --- a/solr/src/java/org/apache/solr/handler/FieldAnalysisRequestHandler.java +++ b/solr/src/java/org/apache/solr/handler/FieldAnalysisRequestHandler.java @@ -94,6 +94,7 @@ public class FieldAnalysisRequestHandler extends AnalysisRequestHandlerBase { /** * {@inheritDoc} */ + @Override protected NamedList doAnalysis(SolrQueryRequest req) throws Exception { FieldAnalysisRequest analysisRequest = resolveAnalysisRequest(req); IndexSchema indexSchema = req.getCore().getSchema(); diff --git a/solr/src/java/org/apache/solr/handler/JsonUpdateRequestHandler.java b/solr/src/java/org/apache/solr/handler/JsonUpdateRequestHandler.java index 12d00420abb..9f36c37d785 100644 --- a/solr/src/java/org/apache/solr/handler/JsonUpdateRequestHandler.java +++ b/solr/src/java/org/apache/solr/handler/JsonUpdateRequestHandler.java @@ -35,6 +35,7 @@ public class JsonUpdateRequestHandler extends ContentStreamHandlerBase { super.init(args); } + @Override protected ContentStreamLoader newLoader(SolrQueryRequest req, UpdateRequestProcessor processor) { return new JsonLoader(processor); } diff --git a/solr/src/java/org/apache/solr/handler/ReplicationHandler.java b/solr/src/java/org/apache/solr/handler/ReplicationHandler.java index 188175a28a2..30822960a83 100644 --- a/solr/src/java/org/apache/solr/handler/ReplicationHandler.java +++ b/solr/src/java/org/apache/solr/handler/ReplicationHandler.java @@ -98,6 +98,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw private AtomicBoolean replicationEnabled = new AtomicBoolean(true); + @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { rsp.setHttpCaching(false); final SolrParams solrParams = req.getParams(); @@ -143,6 +144,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw } final SolrParams paramsCopy = new ModifiableSolrParams(solrParams); new Thread() { + @Override public void run() { doFetch(paramsCopy); } @@ -447,18 +449,22 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw return fileMeta; } + @Override public String getDescription() { return "ReplicationHandler provides replication of index and configuration files from Master to Slaves"; } + @Override public String getSourceId() { return "$Id$"; } + @Override public String getSource() { return "$URL$"; } + @Override public String getVersion() { return "$Revision$"; } diff --git a/solr/src/java/org/apache/solr/handler/SnapPuller.java b/solr/src/java/org/apache/solr/handler/SnapPuller.java index b93d34f389b..20ce25e485d 100644 --- a/solr/src/java/org/apache/solr/handler/SnapPuller.java +++ b/solr/src/java/org/apache/solr/handler/SnapPuller.java @@ -508,6 +508,7 @@ public class SnapPuller { private void reloadCore() { new Thread() { + @Override public void run() { try { solrCore.getCoreDescriptor().getCoreContainer().reload(solrCore.getName()); diff --git a/solr/src/java/org/apache/solr/handler/SnapShooter.java b/solr/src/java/org/apache/solr/handler/SnapShooter.java index c1992636c2e..b534d04a97e 100644 --- a/solr/src/java/org/apache/solr/handler/SnapShooter.java +++ b/solr/src/java/org/apache/solr/handler/SnapShooter.java @@ -64,6 +64,7 @@ public class SnapShooter { replicationHandler.core.getDeletionPolicy().saveCommitPoint(indexCommit.getVersion()); new Thread() { + @Override public void run() { createSnapshot(indexCommit, replicationHandler); } diff --git a/solr/src/java/org/apache/solr/handler/XMLLoader.java b/solr/src/java/org/apache/solr/handler/XMLLoader.java index 47c370aaa0f..b87c54a9ecd 100644 --- a/solr/src/java/org/apache/solr/handler/XMLLoader.java +++ b/solr/src/java/org/apache/solr/handler/XMLLoader.java @@ -54,6 +54,7 @@ class XMLLoader extends ContentStreamLoader { this.inputFactory = inputFactory; } + @Override public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream) throws Exception { errHeader = "XMLLoader: " + stream.getSourceInfo(); Reader reader = null; diff --git a/solr/src/java/org/apache/solr/handler/XmlUpdateRequestHandler.java b/solr/src/java/org/apache/solr/handler/XmlUpdateRequestHandler.java index 7dbfc45cf04..aa26f6cef71 100644 --- a/solr/src/java/org/apache/solr/handler/XmlUpdateRequestHandler.java +++ b/solr/src/java/org/apache/solr/handler/XmlUpdateRequestHandler.java @@ -80,6 +80,7 @@ public class XmlUpdateRequestHandler extends ContentStreamHandlerBase { } } + @Override protected ContentStreamLoader newLoader(SolrQueryRequest req, UpdateRequestProcessor processor) { return new XMLLoader(processor, inputFactory); } diff --git a/solr/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java b/solr/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java index c5fef3c14e7..51d100f308d 100644 --- a/solr/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java +++ b/solr/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java @@ -49,6 +49,7 @@ public class SolrInfoMBeanHandler extends RequestHandlerBase { } + @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { SolrCore core = req.getCore(); @@ -94,18 +95,22 @@ public class SolrInfoMBeanHandler extends RequestHandlerBase { rsp.setHttpCaching(false); // never cache, no matter what init config looks like } + @Override public String getDescription() { return "Get Info (and statistics) about all registered SolrInfoMBeans"; } + @Override public String getSourceId() { return "$Id$"; } + @Override public String getSource() { return "$URL$"; } + @Override public String getVersion() { return "$Revision$"; } diff --git a/solr/src/java/org/apache/solr/handler/component/DebugComponent.java b/solr/src/java/org/apache/solr/handler/component/DebugComponent.java index d105c054150..a6717981786 100644 --- a/solr/src/java/org/apache/solr/handler/component/DebugComponent.java +++ b/solr/src/java/org/apache/solr/handler/component/DebugComponent.java @@ -86,6 +86,7 @@ public class DebugComponent extends SearchComponent } + @Override public void modifyRequest(ResponseBuilder rb, SearchComponent who, ShardRequest sreq) { if (!rb.isDebug()) return; diff --git a/solr/src/java/org/apache/solr/handler/component/FacetComponent.java b/solr/src/java/org/apache/solr/handler/component/FacetComponent.java index 5b0098680e9..0bbeaff45f2 100644 --- a/solr/src/java/org/apache/solr/handler/component/FacetComponent.java +++ b/solr/src/java/org/apache/solr/handler/component/FacetComponent.java @@ -726,6 +726,7 @@ public class FacetComponent extends SearchComponent public long count; public int termNum; // term number starting at 0 (used in bit arrays) + @Override public String toString() { return "{term="+name+",termNum="+termNum+",count="+count+"}"; } diff --git a/solr/src/java/org/apache/solr/handler/component/HighlightComponent.java b/solr/src/java/org/apache/solr/handler/component/HighlightComponent.java index 10070d795f7..137f30b52c3 100644 --- a/solr/src/java/org/apache/solr/handler/component/HighlightComponent.java +++ b/solr/src/java/org/apache/solr/handler/component/HighlightComponent.java @@ -131,6 +131,7 @@ public class HighlightComponent extends SearchComponent implements PluginInfoIni } } + @Override public void modifyRequest(ResponseBuilder rb, SearchComponent who, ShardRequest sreq) { if (!rb.doHighlights) return; diff --git a/solr/src/java/org/apache/solr/handler/component/QueryElevationComponent.java b/solr/src/java/org/apache/solr/handler/component/QueryElevationComponent.java index 8b4af7d715d..2539bea60ce 100644 --- a/solr/src/java/org/apache/solr/handler/component/QueryElevationComponent.java +++ b/solr/src/java/org/apache/solr/handler/component/QueryElevationComponent.java @@ -474,6 +474,7 @@ class ElevationComparatorSource extends FieldComparatorSource { this.priority = boosts; } + @Override public FieldComparator newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException { return new FieldComparator() { @@ -482,10 +483,12 @@ class ElevationComparatorSource extends FieldComparatorSource { int bottomVal; private final BytesRef tempBR = new BytesRef(); + @Override public int compare(int slot1, int slot2) { return values[slot2] - values[slot1]; // values will be small enough that there is no overflow concern } + @Override public void setBottom(int slot) { bottomVal = values[slot]; } @@ -496,19 +499,23 @@ class ElevationComparatorSource extends FieldComparatorSource { return prio == null ? 0 : prio.intValue(); } + @Override public int compareBottom(int doc) throws IOException { return docVal(doc) - bottomVal; } + @Override public void copy(int slot, int doc) throws IOException { values[slot] = docVal(doc); } + @Override public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { idIndex = FieldCache.DEFAULT.getTermsIndex(context.reader, fieldname); return this; } + @Override public Comparable value(int slot) { return values[slot]; } diff --git a/solr/src/java/org/apache/solr/handler/component/ShardDoc.java b/solr/src/java/org/apache/solr/handler/component/ShardDoc.java index 469d8adfb6f..5b17b1b5ec8 100755 --- a/solr/src/java/org/apache/solr/handler/component/ShardDoc.java +++ b/solr/src/java/org/apache/solr/handler/component/ShardDoc.java @@ -56,6 +56,7 @@ public class ShardDoc { int positionInResponse; // the ordinal position in the merged response arraylist + @Override public String toString(){ return "id="+id +" ,score="+score @@ -226,6 +227,7 @@ class ShardFieldSortedHitQueue extends PriorityQueue { // the negative sign on the final compareTo(). Comparator comparatorNatural(String fieldName) { return new ShardComparator(fieldName) { + @Override public final int compare(final Object o1, final Object o2) { ShardDoc sd1 = (ShardDoc) o1; ShardDoc sd2 = (ShardDoc) o2; @@ -247,6 +249,7 @@ class ShardFieldSortedHitQueue extends PriorityQueue { Locale locale) { final Collator collator = Collator.getInstance(locale); return new ShardComparator(fieldName) { + @Override public final int compare(final Object o1, final Object o2) { ShardDoc sd1 = (ShardDoc) o1; ShardDoc sd2 = (ShardDoc) o2; @@ -266,6 +269,7 @@ class ShardFieldSortedHitQueue extends PriorityQueue { Comparator comparatorMissingStringLast(final String fieldName) { return new ShardComparator(fieldName) { + @Override public final int compare(final Object o1, final Object o2) { ShardDoc sd1 = (ShardDoc) o1; ShardDoc sd2 = (ShardDoc) o2; diff --git a/solr/src/java/org/apache/solr/handler/component/ShardRequest.java b/solr/src/java/org/apache/solr/handler/component/ShardRequest.java index d8828cb24ac..528447f2723 100755 --- a/solr/src/java/org/apache/solr/handler/component/ShardRequest.java +++ b/solr/src/java/org/apache/solr/handler/component/ShardRequest.java @@ -56,6 +56,7 @@ public class ShardRequest { // this would work well if we ever transitioned to using internal ids and // didn't require a uniqueId + @Override public String toString() { return "ShardRequest:{params=" + params + ", purpose=" + Integer.toHexString(purpose) diff --git a/solr/src/java/org/apache/solr/handler/component/ShardResponse.java b/solr/src/java/org/apache/solr/handler/component/ShardResponse.java index 87f89571c44..8edf03e4984 100755 --- a/solr/src/java/org/apache/solr/handler/component/ShardResponse.java +++ b/solr/src/java/org/apache/solr/handler/component/ShardResponse.java @@ -27,6 +27,7 @@ public final class ShardResponse { private Throwable exception; private SolrResponse rsp; + @Override public String toString() { return "ShardResponse:{shard="+shard+",shardAddress="+shardAddress +"\n\trequest=" + req diff --git a/solr/src/java/org/apache/solr/handler/component/TermVectorComponent.java b/solr/src/java/org/apache/solr/handler/component/TermVectorComponent.java index 1fea0ac618b..8e3e8c8ef3c 100644 --- a/solr/src/java/org/apache/solr/handler/component/TermVectorComponent.java +++ b/solr/src/java/org/apache/solr/handler/component/TermVectorComponent.java @@ -71,6 +71,7 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar public static final String TERM_VECTORS = "termVectors"; + @Override public void process(ResponseBuilder rb) throws IOException { SolrParams params = rb.req.getParams(); if (!params.getBool(COMPONENT_NAME, false)) { @@ -288,6 +289,7 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar this.reader = reader; } + @Override public void map(BytesRef term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions) { NamedList termInfo = new NamedList(); fieldNL.add(term.utf8ToString(), termInfo); @@ -336,6 +338,7 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar return result; } + @Override public void setExpectations(String field, int numTerms, boolean storeOffsets, boolean storePositions) { if (fieldOptions.docFreq == true && reader != null) { @@ -358,6 +361,7 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar } } + @Override public void prepare(ResponseBuilder rb) throws IOException { } @@ -374,18 +378,22 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar } + @Override public String getVersion() { return "$Revision$"; } + @Override public String getSourceId() { return "$Id$"; } + @Override public String getSource() { return "$URL$"; } + @Override public String getDescription() { return "A Component for working with Term Vectors"; } diff --git a/solr/src/java/org/apache/solr/handler/component/TermsComponent.java b/solr/src/java/org/apache/solr/handler/component/TermsComponent.java index 9fc48b352bb..93929eb2fab 100644 --- a/solr/src/java/org/apache/solr/handler/component/TermsComponent.java +++ b/solr/src/java/org/apache/solr/handler/component/TermsComponent.java @@ -68,6 +68,7 @@ public class TermsComponent extends SearchComponent { } } + @Override public void process(ResponseBuilder rb) throws IOException { SolrParams params = rb.req.getParams(); if (!params.getBool(TermsParams.TERMS, false)) return; @@ -463,18 +464,22 @@ public class TermsComponent extends SearchComponent { } } + @Override public String getVersion() { return "$Revision$"; } + @Override public String getSourceId() { return "$Id$"; } + @Override public String getSource() { return "$URL$"; } + @Override public String getDescription() { return "A Component for working with Term Enumerators"; } diff --git a/solr/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java b/solr/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java index 41604fab4b1..7776ac94871 100644 --- a/solr/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java +++ b/solr/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java @@ -120,6 +120,7 @@ public class DefaultSolrHighlighter extends SolrHighlighter implements PluginInf } //just for back-compat with the deprecated method private boolean initialized = false; + @Override @Deprecated public void initalize( SolrConfig config) { if (initialized) return; @@ -333,6 +334,7 @@ public class DefaultSolrHighlighter extends SolrHighlighter implements PluginInf * @return NamedList containing a NamedList for each document, which in * turns contains sets (field, summary) pairs. */ + @Override @SuppressWarnings("unchecked") public NamedList doHighlighting(DocList docs, Query query, SolrQueryRequest req, String[] defaultFields) throws IOException { SolrParams params = req.getParams(); @@ -639,6 +641,7 @@ class TermOffsetsTokenStream { this.length = length; } + @Override public boolean incrementToken() throws IOException { while( true ){ if( bufferedToken == null ) { diff --git a/solr/src/java/org/apache/solr/highlight/GapFragmenter.java b/solr/src/java/org/apache/solr/highlight/GapFragmenter.java index b8d4f0ded43..765316b864b 100644 --- a/solr/src/java/org/apache/solr/highlight/GapFragmenter.java +++ b/solr/src/java/org/apache/solr/highlight/GapFragmenter.java @@ -92,6 +92,7 @@ class LuceneGapFragmenter extends SimpleFragmenter { /* (non-Javadoc) * @see org.apache.lucene.search.highlight.TextFragmenter#start(java.lang.String) */ + @Override public void start(String originalText, TokenStream tokenStream) { offsetAtt = tokenStream.getAttribute(OffsetAttribute.class); posIncAtt = tokenStream.getAttribute(PositionIncrementAttribute.class); @@ -101,6 +102,7 @@ class LuceneGapFragmenter extends SimpleFragmenter { /* (non-Javadoc) * @see org.apache.lucene.search.highlight.TextFragmenter#isNewFragment(org.apache.lucene.analysis.Token) */ + @Override public boolean isNewFragment() { int endOffset = offsetAtt.endOffset(); boolean isNewFrag = diff --git a/solr/src/java/org/apache/solr/highlight/RegexFragmenter.java b/solr/src/java/org/apache/solr/highlight/RegexFragmenter.java index ec18e33efbd..a958d66a9ab 100644 --- a/solr/src/java/org/apache/solr/highlight/RegexFragmenter.java +++ b/solr/src/java/org/apache/solr/highlight/RegexFragmenter.java @@ -54,6 +54,7 @@ public class RegexFragmenter extends HighlightingPluginBase implements SolrFragm protected String defaultPatternRaw; protected Pattern defaultPattern; + @Override public void init(NamedList args) { super.init(args); defaultPatternRaw = LuceneRegexFragmenter.DEFAULT_PATTERN_RAW; diff --git a/solr/src/java/org/apache/solr/request/ServletSolrParams.java b/solr/src/java/org/apache/solr/request/ServletSolrParams.java index 33b1cfa669a..49922c81332 100644 --- a/solr/src/java/org/apache/solr/request/ServletSolrParams.java +++ b/solr/src/java/org/apache/solr/request/ServletSolrParams.java @@ -29,6 +29,7 @@ public class ServletSolrParams extends MultiMapSolrParams { super(req.getParameterMap()); } + @Override public String get(String name) { String[] arr = map.get(name); if (arr==null) return null; diff --git a/solr/src/java/org/apache/solr/request/SimpleFacets.java b/solr/src/java/org/apache/solr/request/SimpleFacets.java index 2a959a3e87a..3026f2dd5a0 100644 --- a/solr/src/java/org/apache/solr/request/SimpleFacets.java +++ b/solr/src/java/org/apache/solr/request/SimpleFacets.java @@ -1177,9 +1177,11 @@ public class SimpleFacets { } public K key; public V val; + @Override public int hashCode() { return key.hashCode() ^ val.hashCode(); } + @Override public boolean equals(Object o) { if (! (o instanceof CountPair)) return false; CountPair that = (CountPair) o; @@ -1290,9 +1292,11 @@ public class SimpleFacets { extends RangeEndpointCalculator { public FloatRangeEndpointCalculator(final SchemaField f) { super(f); } + @Override protected Float parseVal(String rawval) { return Float.valueOf(rawval); } + @Override public Float parseAndAddGap(Float value, String gap) { return new Float(value.floatValue() + Float.valueOf(gap).floatValue()); } @@ -1301,9 +1305,11 @@ public class SimpleFacets { extends RangeEndpointCalculator { public DoubleRangeEndpointCalculator(final SchemaField f) { super(f); } + @Override protected Double parseVal(String rawval) { return Double.valueOf(rawval); } + @Override public Double parseAndAddGap(Double value, String gap) { return new Double(value.floatValue() + Double.valueOf(gap).floatValue()); } @@ -1312,9 +1318,11 @@ public class SimpleFacets { extends RangeEndpointCalculator { public IntegerRangeEndpointCalculator(final SchemaField f) { super(f); } + @Override protected Integer parseVal(String rawval) { return Integer.valueOf(rawval); } + @Override public Integer parseAndAddGap(Integer value, String gap) { return new Integer(value.intValue() + Integer.valueOf(gap).intValue()); } @@ -1323,9 +1331,11 @@ public class SimpleFacets { extends RangeEndpointCalculator { public LongRangeEndpointCalculator(final SchemaField f) { super(f); } + @Override protected Long parseVal(String rawval) { return Long.valueOf(rawval); } + @Override public Long parseAndAddGap(Long value, String gap) { return new Long(value.intValue() + Long.valueOf(gap).intValue()); } @@ -1342,15 +1352,19 @@ public class SimpleFacets { ("SchemaField must use filed type extending DateField"); } } + @Override public String formatValue(Date val) { return ((DateField)field.getType()).toExternal(val); } + @Override protected Date parseVal(String rawval) { return ((DateField)field.getType()).parseMath(now, rawval); } + @Override protected Object parseGap(final String rawval) { return rawval; } + @Override public Date parseAndAddGap(Date value, String gap) throws java.text.ParseException { final DateMathParser dmp = new DateMathParser(DateField.UTC, Locale.US); dmp.setNow(value); diff --git a/solr/src/java/org/apache/solr/request/SolrQueryRequestBase.java b/solr/src/java/org/apache/solr/request/SolrQueryRequestBase.java index d598966aeea..699cbe7c74d 100644 --- a/solr/src/java/org/apache/solr/request/SolrQueryRequestBase.java +++ b/solr/src/java/org/apache/solr/request/SolrQueryRequestBase.java @@ -127,6 +127,7 @@ public abstract class SolrQueryRequestBase implements SolrQueryRequest { return origParams.toString(); } + @Override public String toString() { return this.getClass().getSimpleName() + '{' + params + '}'; } diff --git a/solr/src/java/org/apache/solr/request/UnInvertedField.java b/solr/src/java/org/apache/solr/request/UnInvertedField.java index 9014d1e1e92..ede2328b2f2 100755 --- a/solr/src/java/org/apache/solr/request/UnInvertedField.java +++ b/solr/src/java/org/apache/solr/request/UnInvertedField.java @@ -882,6 +882,7 @@ public class UnInvertedField { return te.skipTo(termNum); } + @Override public String toString() { return "{field=" + field + ",memSize="+memSize() @@ -1158,6 +1159,7 @@ class TermIndex { ArrayList lst; PagedBytes bytes; + @Override protected BytesRef setTerm() throws IOException { BytesRef br = super.setTerm(); if (br != null && (pos & intervalMask)==0) { @@ -1173,10 +1175,12 @@ class TermIndex { return br; } + @Override public BytesRef skipTo(int termNumber) throws IOException { throw new UnsupportedOperationException(); } + @Override public void close() throws IOException { nTerms=pos; super.close(); diff --git a/solr/src/java/org/apache/solr/response/JSONResponseWriter.java b/solr/src/java/org/apache/solr/response/JSONResponseWriter.java index e30477920bd..48e373f5e7e 100644 --- a/solr/src/java/org/apache/solr/response/JSONResponseWriter.java +++ b/solr/src/java/org/apache/solr/response/JSONResponseWriter.java @@ -286,6 +286,7 @@ class JSONWriter extends TextResponseWriter { } + @Override public void writeNamedList(String name, NamedList val) throws IOException { if (val instanceof SimpleOrderedMap) { writeNamedListAsMapWithDups(name,val); @@ -391,6 +392,7 @@ class JSONWriter extends TextResponseWriter { writeMapCloser(); } + @Override public void writeSolrDocument(String name, SolrDocument doc, Set returnFields, Map pseudoFields) throws IOException { writeMapOpener(-1); // no trivial way to determine map size // TODO: could easily figure out size for SolrDocument if needed... @@ -437,6 +439,7 @@ class JSONWriter extends TextResponseWriter { // if a Doc can ever contain another doc, this optimization would have to go. private final HashMap scoreMap = new HashMap(1); + @Override public void writeDoc(String name, Document doc, Set returnFields, float score, boolean includeScore) throws IOException { Map other = null; if (includeScore) { @@ -446,6 +449,7 @@ class JSONWriter extends TextResponseWriter { writeDoc(name, doc.getFields(), returnFields, other); } + @Override public void writeDocList(String name, DocList ids, Set fields, Map otherFields) throws IOException { boolean includeScore=false; if (fields!=null) { @@ -595,6 +599,7 @@ class JSONWriter extends TextResponseWriter { writer.write(']'); } + @Override public void writeStr(String name, String val, boolean needsEscaping) throws IOException { // it might be more efficient to use a stringbuilder or write substrings // if writing chars to the stream is slow. @@ -647,6 +652,7 @@ class JSONWriter extends TextResponseWriter { } + @Override public void writeMap(String name, Map val, boolean excludeOuter, boolean isFirstVal) throws IOException { if (!excludeOuter) { writeMapOpener(val.size()); @@ -678,6 +684,7 @@ class JSONWriter extends TextResponseWriter { } } + @Override public void writeArray(String name, Iterator val) throws IOException { writeArrayOpener(-1); // no trivial way to determine array size incLevel(); @@ -697,30 +704,37 @@ class JSONWriter extends TextResponseWriter { // // Primitive types // + @Override public void writeNull(String name) throws IOException { writer.write("null"); } + @Override public void writeInt(String name, String val) throws IOException { writer.write(val); } + @Override public void writeLong(String name, String val) throws IOException { writer.write(val); } + @Override public void writeBool(String name, String val) throws IOException { writer.write(val); } + @Override public void writeFloat(String name, String val) throws IOException { writer.write(val); } + @Override public void writeDouble(String name, String val) throws IOException { writer.write(val); } + @Override public void writeDate(String name, String val) throws IOException { writeStr(name, val, false); } diff --git a/solr/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java b/solr/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java index 60d84f7b575..0149882cb05 100755 --- a/solr/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java +++ b/solr/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java @@ -92,6 +92,7 @@ class PHPSerializedWriter extends JSONWriter { doIndent = false; } + @Override public void writeResponse() throws IOException { Boolean omitHeader = req.getParams().getBool(CommonParams.OMIT_HEADER); if(omitHeader != null && omitHeader) rsp.getValues().remove("responseHeader"); diff --git a/solr/src/java/org/apache/solr/response/PageTool.java b/solr/src/java/org/apache/solr/response/PageTool.java index e6ad24794a1..6a664d63871 100644 --- a/solr/src/java/org/apache/solr/response/PageTool.java +++ b/solr/src/java/org/apache/solr/response/PageTool.java @@ -73,6 +73,7 @@ public class PageTool { return current_page_number; } + @Override public String toString() { return "Found " + results_found + " Page " + current_page_number + " of " + page_count + diff --git a/solr/src/java/org/apache/solr/response/RubyResponseWriter.java b/solr/src/java/org/apache/solr/response/RubyResponseWriter.java index 7ee0b5bd268..585b248d3e7 100644 --- a/solr/src/java/org/apache/solr/response/RubyResponseWriter.java +++ b/solr/src/java/org/apache/solr/response/RubyResponseWriter.java @@ -45,7 +45,9 @@ public class RubyResponseWriter implements QueryResponseWriter { class RubyWriter extends NaNFloatWriter { + @Override protected String getNaN() { return "(0.0/0.0)"; } + @Override protected String getInf() { return "(1.0/0.0)"; } public RubyWriter(Writer writer, SolrQueryRequest req, SolrQueryResponse rsp) { diff --git a/solr/src/java/org/apache/solr/response/SolrParamResourceLoader.java b/solr/src/java/org/apache/solr/response/SolrParamResourceLoader.java index c9381cf7af8..9784fe01efa 100644 --- a/solr/src/java/org/apache/solr/response/SolrParamResourceLoader.java +++ b/solr/src/java/org/apache/solr/response/SolrParamResourceLoader.java @@ -49,18 +49,22 @@ public class SolrParamResourceLoader extends ResourceLoader { } } + @Override public void init(ExtendedProperties extendedProperties) { } + @Override public InputStream getResourceStream(String s) throws ResourceNotFoundException { String template = templates.get(s); return template == null ? null : new ByteArrayInputStream(template.getBytes()); } + @Override public boolean isSourceModified(Resource resource) { return false; } + @Override public long getLastModified(Resource resource) { return 0; } diff --git a/solr/src/java/org/apache/solr/response/SolrVelocityResourceLoader.java b/solr/src/java/org/apache/solr/response/SolrVelocityResourceLoader.java index c42dc10a960..67feca0a4a1 100644 --- a/solr/src/java/org/apache/solr/response/SolrVelocityResourceLoader.java +++ b/solr/src/java/org/apache/solr/response/SolrVelocityResourceLoader.java @@ -33,17 +33,21 @@ public class SolrVelocityResourceLoader extends ResourceLoader { this.loader = loader; } + @Override public void init(ExtendedProperties extendedProperties) { } + @Override public InputStream getResourceStream(String template_name) throws ResourceNotFoundException { return loader.openResource(template_name); } + @Override public boolean isSourceModified(Resource resource) { return false; } + @Override public long getLastModified(Resource resource) { return 0; } diff --git a/solr/src/java/org/apache/solr/response/XMLWriter.java b/solr/src/java/org/apache/solr/response/XMLWriter.java index 7f6465fb49d..062ab413b3f 100644 --- a/solr/src/java/org/apache/solr/response/XMLWriter.java +++ b/solr/src/java/org/apache/solr/response/XMLWriter.java @@ -436,6 +436,7 @@ public final class XMLWriter extends TextResponseWriter { } + @Override public void writeVal(String name, Object val) throws IOException { // if there get to be enough types, perhaps hashing on the type @@ -494,6 +495,7 @@ public final class XMLWriter extends TextResponseWriter { // Generic compound types // + @Override public void writeNamedList(String name, NamedList val) throws IOException { int sz = val.size(); startTag("lst", name, sz<=0); diff --git a/solr/src/java/org/apache/solr/schema/AbstractSubTypeFieldType.java b/solr/src/java/org/apache/solr/schema/AbstractSubTypeFieldType.java index 913163b2d4c..8ebe95da7f7 100644 --- a/solr/src/java/org/apache/solr/schema/AbstractSubTypeFieldType.java +++ b/solr/src/java/org/apache/solr/schema/AbstractSubTypeFieldType.java @@ -103,6 +103,7 @@ public abstract class AbstractSubTypeFieldType extends FieldType implements Sche /** * Throws UnsupportedOperationException() */ + @Override public Query getFieldQuery(QParser parser, SchemaField field, String externalVal) { throw new UnsupportedOperationException(); } diff --git a/solr/src/java/org/apache/solr/schema/BCDIntField.java b/solr/src/java/org/apache/solr/schema/BCDIntField.java index 6a2ef7a0ea9..534a7dcf1b0 100644 --- a/solr/src/java/org/apache/solr/schema/BCDIntField.java +++ b/solr/src/java/org/apache/solr/schema/BCDIntField.java @@ -30,9 +30,11 @@ import java.io.IOException; * @version $Id$ */ public class BCDIntField extends FieldType { + @Override protected void init(IndexSchema schema, Map args) { } + @Override public SortField getSortField(SchemaField field,boolean reverse) { return getStringSort(field,reverse); } @@ -42,11 +44,13 @@ public class BCDIntField extends FieldType { throw new UnsupportedOperationException("ValueSource not implemented"); } + @Override public String toInternal(String val) { // TODO? make sure each character is a digit? return BCDUtils.base10toBase10kSortableInt(val); } + @Override public String toExternal(Fieldable f) { return indexedToReadable(f.stringValue()); } @@ -57,10 +61,12 @@ public class BCDIntField extends FieldType { return Integer.valueOf( toExternal(f) ); } + @Override public String indexedToReadable(String indexedForm) { return BCDUtils.base10kSortableIntToBase10(indexedForm); } + @Override public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { writer.writeInt(name,toExternal(f)); } diff --git a/solr/src/java/org/apache/solr/schema/BinaryField.java b/solr/src/java/org/apache/solr/schema/BinaryField.java index e78ead4d9f1..c9c22461d4d 100644 --- a/solr/src/java/org/apache/solr/schema/BinaryField.java +++ b/solr/src/java/org/apache/solr/schema/BinaryField.java @@ -33,19 +33,23 @@ public class BinaryField extends FieldType { return Base64.byteArrayToBase64(buf.array(), buf.position(), buf.limit()-buf.position()); } + @Override public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { writer.writeStr(name, toBase64String(toObject(f)), false); } + @Override public SortField getSortField(SchemaField field, boolean top) { throw new RuntimeException("Cannot sort on a Binary field"); } + @Override public String toExternal(Fieldable f) { return toBase64String(toObject(f)); } + @Override public ByteBuffer toObject(Fieldable f) { return ByteBuffer.wrap(f.getBinaryValue(), f.getBinaryOffset(), f.getBinaryLength() ) ; } diff --git a/solr/src/java/org/apache/solr/schema/BoolField.java b/solr/src/java/org/apache/solr/schema/BoolField.java index 907849a15ff..0694e4c4c13 100644 --- a/solr/src/java/org/apache/solr/schema/BoolField.java +++ b/solr/src/java/org/apache/solr/schema/BoolField.java @@ -37,9 +37,11 @@ import java.io.IOException; * @version $Id$ */ public class BoolField extends FieldType { + @Override protected void init(IndexSchema schema, Map args) { } + @Override public SortField getSortField(SchemaField field,boolean reverse) { return getStringSort(field,reverse); } @@ -58,6 +60,7 @@ public class BoolField extends FieldType { // handle single valued non-text fields (int,bool,etc) if needed. protected final static Analyzer boolAnalyzer = new SolrAnalyzer() { + @Override public TokenStreamInfo getStream(String fieldName, Reader reader) { Tokenizer tokenizer = new Tokenizer(reader) { final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); @@ -88,19 +91,23 @@ public class BoolField extends FieldType { }; + @Override public Analyzer getAnalyzer() { return boolAnalyzer; } + @Override public Analyzer getQueryAnalyzer() { return boolAnalyzer; } + @Override public String toInternal(String val) { char ch = (val!=null && val.length()>0) ? val.charAt(0) : 0; return (ch=='1' || ch=='t' || ch=='T') ? "T" : "F"; } + @Override public String toExternal(Fieldable f) { return indexedToReadable(f.stringValue()); } @@ -115,6 +122,7 @@ public class BoolField extends FieldType { return term.bytes[0] == 'T'; } + @Override public String indexedToReadable(String indexedForm) { char ch = indexedForm.charAt(0); return ch=='T' ? "true" : "false"; @@ -129,6 +137,7 @@ public class BoolField extends FieldType { } } + @Override public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { writer.writeBool(name, f.stringValue().charAt(0) =='T'); } diff --git a/solr/src/java/org/apache/solr/schema/ByteField.java b/solr/src/java/org/apache/solr/schema/ByteField.java index ce84ec3b966..204bce94836 100644 --- a/solr/src/java/org/apache/solr/schema/ByteField.java +++ b/solr/src/java/org/apache/solr/schema/ByteField.java @@ -33,11 +33,13 @@ import java.util.Map; * @version $Id$ */ public class ByteField extends FieldType { + @Override protected void init(IndexSchema schema, Map args) { restrictProps(SORT_MISSING_FIRST | SORT_MISSING_LAST); } ///////////////////////////////////////////////////////////// + @Override public SortField getSortField(SchemaField field, boolean reverse) { return new SortField(field.name, SortField.BYTE, reverse); } @@ -47,6 +49,7 @@ public class ByteField extends FieldType { return new ByteFieldSource( new ByteValuesCreator( field.name, null, CachedArrayCreator.CACHE_VALUES_AND_BITS ) ); } + @Override public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { String s = f.stringValue(); diff --git a/solr/src/java/org/apache/solr/schema/DateField.java b/solr/src/java/org/apache/solr/schema/DateField.java index d60584a5f3a..400501308f8 100644 --- a/solr/src/java/org/apache/solr/schema/DateField.java +++ b/solr/src/java/org/apache/solr/schema/DateField.java @@ -125,12 +125,14 @@ public class DateField extends FieldType { // The easiest fix is to simply remove the 'Z' for the internal // format. + @Override protected void init(IndexSchema schema, Map args) { } protected static String NOW = "NOW"; protected static char Z = 'Z'; + @Override public String toInternal(String val) { return toInternal(parseMath(null, val)); } @@ -183,6 +185,7 @@ public class DateField extends FieldType { return formatDate(val); } + @Override public String indexedToReadable(String indexedForm) { return indexedForm + Z; } @@ -193,6 +196,7 @@ public class DateField extends FieldType { out.write(Z); } + @Override public String toExternal(Fieldable f) { return indexedToReadable(f.stringValue()); } @@ -211,6 +215,7 @@ public class DateField extends FieldType { } } + @Override public SortField getSortField(SchemaField field,boolean reverse) { return getStringSort(field,reverse); } @@ -219,6 +224,7 @@ public class DateField extends FieldType { return new OrdFieldSource(field.name); } + @Override public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { writer.writeDate(name, toExternal(f)); } @@ -336,6 +342,7 @@ public class DateField extends FieldType { this.setTimeZone(CANONICAL_TZ); } + @Override public Date parse(String i, ParsePosition p) { /* delegate to SimpleDateFormat for easy stuff */ Date d = super.parse(i, p); @@ -357,6 +364,7 @@ public class DateField extends FieldType { return d; } + @Override public StringBuffer format(Date d, StringBuffer toAppendTo, FieldPosition pos) { /* delegate to SimpleDateFormat for easy stuff */ @@ -375,6 +383,7 @@ public class DateField extends FieldType { return toAppendTo; } + @Override public Object clone() { ISO8601CanonicalDateFormat c = (ISO8601CanonicalDateFormat) super.clone(); @@ -391,6 +400,7 @@ public class DateField extends FieldType { super(); proto = d; } + @Override protected DateFormat initialValue() { return (DateFormat) proto.clone(); } @@ -423,34 +433,42 @@ class DateFieldSource extends FieldCacheSource { this.ft = ft; } + @Override public String description() { return "date(" + field + ')'; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { return new StringIndexDocValues(this, readerContext, field) { + @Override protected String toTerm(String readableValue) { // needed for frange queries to work properly return ft.toInternal(readableValue); } + @Override public float floatVal(int doc) { return (float)intVal(doc); } + @Override public int intVal(int doc) { int ord=termsIndex.getOrd(doc); return ord; } + @Override public long longVal(int doc) { return (long)intVal(doc); } + @Override public double doubleVal(int doc) { return (double)intVal(doc); } + @Override public String strVal(int doc) { int ord=termsIndex.getOrd(doc); if (ord == 0) { @@ -463,18 +481,21 @@ class DateFieldSource extends FieldCacheSource { } } + @Override public String toString(int doc) { return description() + '=' + intVal(doc); } }; } + @Override public boolean equals(Object o) { return o instanceof DateFieldSource && super.equals(o); } private static int hcode = DateFieldSource.class.hashCode(); + @Override public int hashCode() { return hcode + super.hashCode(); }; diff --git a/solr/src/java/org/apache/solr/schema/DoubleField.java b/solr/src/java/org/apache/solr/schema/DoubleField.java index 79da6845ccd..62e34e7ab82 100644 --- a/solr/src/java/org/apache/solr/schema/DoubleField.java +++ b/solr/src/java/org/apache/solr/schema/DoubleField.java @@ -33,11 +33,13 @@ import java.util.Map; * @version $Id$ */ public class DoubleField extends FieldType { + @Override protected void init(IndexSchema schema, Map args) { restrictProps(SORT_MISSING_FIRST | SORT_MISSING_LAST); } ///////////////////////////////////////////////////////////// + @Override public SortField getSortField(SchemaField field, boolean reverse) { return new SortField(field.name, SortField.DOUBLE, reverse); } @@ -48,6 +50,7 @@ public class DoubleField extends FieldType { return new DoubleFieldSource( new DoubleValuesCreator( field.name, null, CachedArrayCreator.CACHE_VALUES_AND_BITS ) ); } + @Override public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { String s = f.stringValue(); diff --git a/solr/src/java/org/apache/solr/schema/ExternalFileField.java b/solr/src/java/org/apache/solr/schema/ExternalFileField.java index f468202c7ea..2eb539426b8 100755 --- a/solr/src/java/org/apache/solr/schema/ExternalFileField.java +++ b/solr/src/java/org/apache/solr/schema/ExternalFileField.java @@ -59,6 +59,7 @@ public class ExternalFileField extends FieldType { private IndexSchema schema; private float defVal; + @Override protected void init(IndexSchema schema, Map args) { restrictProps(SORT_MISSING_FIRST | SORT_MISSING_LAST); String ftypeS = getArg("valType", args); @@ -74,14 +75,17 @@ public class ExternalFileField extends FieldType { this.schema = schema; } + @Override public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { throw new UnsupportedOperationException(); } + @Override public SortField getSortField(SchemaField field,boolean reverse) { throw new UnsupportedOperationException(); } + @Override public ValueSource getValueSource(SchemaField field, QParser parser) { // default key field to unique key SchemaField keyField = keyFieldName==null ? schema.getUniqueKeyField() : schema.getField(keyFieldName); diff --git a/solr/src/java/org/apache/solr/schema/FieldType.java b/solr/src/java/org/apache/solr/schema/FieldType.java index a92c0f42578..712c22519f7 100644 --- a/solr/src/java/org/apache/solr/schema/FieldType.java +++ b/solr/src/java/org/apache/solr/schema/FieldType.java @@ -185,6 +185,7 @@ public abstract class FieldType extends FieldProperties { this.typeName = typeName; } + @Override public String toString() { return typeName + "{class=" + this.getClass().getName() // + propertiesToString(properties) @@ -390,6 +391,7 @@ public abstract class FieldType extends FieldProperties { this.maxChars=maxChars; } + @Override public TokenStreamInfo getStream(String fieldName, Reader reader) { Tokenizer ts = new Tokenizer(reader) { final char[] cbuf = new char[maxChars]; diff --git a/solr/src/java/org/apache/solr/schema/FloatField.java b/solr/src/java/org/apache/solr/schema/FloatField.java index 9cd2cad66b0..2df5ec9c345 100644 --- a/solr/src/java/org/apache/solr/schema/FloatField.java +++ b/solr/src/java/org/apache/solr/schema/FloatField.java @@ -32,10 +32,12 @@ import java.io.IOException; * @version $Id$ */ public class FloatField extends FieldType { + @Override protected void init(IndexSchema schema, Map args) { restrictProps(SORT_MISSING_FIRST | SORT_MISSING_LAST); } + @Override public SortField getSortField(SchemaField field,boolean reverse) { return new SortField(field.name,SortField.FLOAT, reverse); } @@ -45,6 +47,7 @@ public class FloatField extends FieldType { return new FloatFieldSource( new FloatValuesCreator( field.name, null, CachedArrayCreator.CACHE_VALUES_AND_BITS ) ); } + @Override public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { String s = f.stringValue(); diff --git a/solr/src/java/org/apache/solr/schema/IndexSchema.java b/solr/src/java/org/apache/solr/schema/IndexSchema.java index df47bbf4de0..b5727233e48 100644 --- a/solr/src/java/org/apache/solr/schema/IndexSchema.java +++ b/solr/src/java/org/apache/solr/schema/IndexSchema.java @@ -316,6 +316,7 @@ public final class IndexSchema { return analyzer!=null ? analyzer : getDynamicFieldType(fieldName).getAnalyzer(); } + @Override public TokenStream tokenStream(String fieldName, Reader reader) { return getAnalyzer(fieldName).tokenStream(fieldName,reader); @@ -497,6 +498,7 @@ public final class IndexSchema { Node node = (Node) xpath.evaluate("/schema/similarity", document, XPathConstants.NODE); if (node==null) { similarityFactory = new SimilarityFactory() { + @Override public SimilarityProvider getSimilarityProvider() { return IndexSearcher.getDefaultSimilarityProvider(); } @@ -512,6 +514,7 @@ public final class IndexSchema { } else { // just like always, assume it's a SimilarityProvider and get a ClassCastException - reasonable error handling similarityFactory = new SimilarityFactory() { + @Override public SimilarityProvider getSimilarityProvider() { return (SimilarityProvider) obj; } @@ -946,6 +949,7 @@ public final class IndexSchema { return new SchemaField(prototype, name); } + @Override public String toString() { return prototype.toString(); } diff --git a/solr/src/java/org/apache/solr/schema/IntField.java b/solr/src/java/org/apache/solr/schema/IntField.java index e8ccab2b667..5d8182d52ae 100644 --- a/solr/src/java/org/apache/solr/schema/IntField.java +++ b/solr/src/java/org/apache/solr/schema/IntField.java @@ -32,10 +32,12 @@ import java.io.IOException; * @version $Id$ */ public class IntField extends FieldType { + @Override protected void init(IndexSchema schema, Map args) { restrictProps(SORT_MISSING_FIRST | SORT_MISSING_LAST); } + @Override public SortField getSortField(SchemaField field,boolean reverse) { return new SortField(field.name,SortField.INT, reverse); } @@ -45,6 +47,7 @@ public class IntField extends FieldType { return new IntFieldSource(new IntValuesCreator( field.name, null, CachedArrayCreator.CACHE_VALUES_AND_BITS ) ); } + @Override public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { String s = f.stringValue(); diff --git a/solr/src/java/org/apache/solr/schema/LongField.java b/solr/src/java/org/apache/solr/schema/LongField.java index c716552f5af..f1189b6adb7 100644 --- a/solr/src/java/org/apache/solr/schema/LongField.java +++ b/solr/src/java/org/apache/solr/schema/LongField.java @@ -32,12 +32,14 @@ import java.util.Map; * @version $Id$ */ public class LongField extends FieldType { + @Override protected void init(IndexSchema schema, Map args) { restrictProps(SORT_MISSING_FIRST | SORT_MISSING_LAST); } ///////////////////////////////////////////////////////////// + @Override public SortField getSortField(SchemaField field,boolean reverse) { return new SortField(field.name,SortField.LONG, reverse); diff --git a/solr/src/java/org/apache/solr/schema/RandomSortField.java b/solr/src/java/org/apache/solr/schema/RandomSortField.java index fd69557e3b1..f21047177c8 100644 --- a/solr/src/java/org/apache/solr/schema/RandomSortField.java +++ b/solr/src/java/org/apache/solr/schema/RandomSortField.java @@ -100,33 +100,40 @@ public class RandomSortField extends FieldType { private static FieldComparatorSource randomComparatorSource = new FieldComparatorSource() { + @Override public FieldComparator newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException { return new FieldComparator() { int seed; private final int[] values = new int[numHits]; int bottomVal; + @Override public int compare(int slot1, int slot2) { return values[slot1] - values[slot2]; // values will be positive... no overflow possible. } + @Override public void setBottom(int slot) { bottomVal = values[slot]; } + @Override public int compareBottom(int doc) throws IOException { return bottomVal - hash(doc+seed); } + @Override public void copy(int slot, int doc) throws IOException { values[slot] = hash(doc+seed); } + @Override public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { seed = getSeed(fieldname, context); return this; } + @Override public Comparable value(int slot) { return values[slot]; } diff --git a/solr/src/java/org/apache/solr/schema/ShortField.java b/solr/src/java/org/apache/solr/schema/ShortField.java index 94c77450a74..2db6861ccc0 100644 --- a/solr/src/java/org/apache/solr/schema/ShortField.java +++ b/solr/src/java/org/apache/solr/schema/ShortField.java @@ -35,12 +35,14 @@ import java.util.Map; * **/ public class ShortField extends FieldType { + @Override protected void init(IndexSchema schema, Map args) { restrictProps(SORT_MISSING_FIRST | SORT_MISSING_LAST); } ///////////////////////////////////////////////////////////// + @Override public SortField getSortField(SchemaField field, boolean reverse) { return new SortField(field.name, SortField.SHORT, reverse); diff --git a/solr/src/java/org/apache/solr/schema/SortableDoubleField.java b/solr/src/java/org/apache/solr/schema/SortableDoubleField.java index b12858b45c2..bb57454b19c 100644 --- a/solr/src/java/org/apache/solr/schema/SortableDoubleField.java +++ b/solr/src/java/org/apache/solr/schema/SortableDoubleField.java @@ -40,10 +40,13 @@ import java.io.IOException; * * @deprecated use {@link DoubleField} or {@link TrieDoubleField} - will be removed in 5.x */ +@Deprecated public class SortableDoubleField extends FieldType { + @Override protected void init(IndexSchema schema, Map args) { } + @Override public SortField getSortField(SchemaField field,boolean reverse) { return getStringSort(field,reverse); } @@ -53,10 +56,12 @@ public class SortableDoubleField extends FieldType { return new SortableDoubleFieldSource(field.name); } + @Override public String toInternal(String val) { return NumberUtils.double2sortableStr(val); } + @Override public String toExternal(Fieldable f) { return indexedToReadable(f.stringValue()); } @@ -66,6 +71,7 @@ public class SortableDoubleField extends FieldType { return NumberUtils.SortableStr2double(f.stringValue()); } + @Override public String indexedToReadable(String indexedForm) { return NumberUtils.SortableStr2doubleStr(indexedForm); } @@ -76,6 +82,7 @@ public class SortableDoubleField extends FieldType { out.write( indexedToReadable(ByteUtils.UTF8toUTF16(input)) ); } + @Override public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { String sval = f.stringValue(); writer.writeDouble(name, NumberUtils.SortableStr2double(sval)); @@ -97,41 +104,50 @@ class SortableDoubleFieldSource extends FieldCacheSource { this.defVal = defVal; } + @Override public String description() { return "sdouble(" + field + ')'; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final double def = defVal; return new StringIndexDocValues(this, readerContext, field) { private final BytesRef spare = new BytesRef(); + @Override protected String toTerm(String readableValue) { return NumberUtils.double2sortableStr(readableValue); } + @Override public float floatVal(int doc) { return (float)doubleVal(doc); } + @Override public int intVal(int doc) { return (int)doubleVal(doc); } + @Override public long longVal(int doc) { return (long)doubleVal(doc); } + @Override public double doubleVal(int doc) { int ord=termsIndex.getOrd(doc); return ord==0 ? def : NumberUtils.SortableStr2double(termsIndex.lookup(ord, spare)); } + @Override public String strVal(int doc) { return Double.toString(doubleVal(doc)); } + @Override public String toString(int doc) { return description() + '=' + doubleVal(doc); } @@ -162,6 +178,7 @@ class SortableDoubleFieldSource extends FieldCacheSource { }; } + @Override public boolean equals(Object o) { return o instanceof SortableDoubleFieldSource && super.equals(o) @@ -169,6 +186,7 @@ class SortableDoubleFieldSource extends FieldCacheSource { } private static int hcode = SortableDoubleFieldSource.class.hashCode(); + @Override public int hashCode() { long bits = Double.doubleToLongBits(defVal); int ibits = (int)(bits ^ (bits>>>32)); // mix upper bits into lower. diff --git a/solr/src/java/org/apache/solr/schema/SortableFloatField.java b/solr/src/java/org/apache/solr/schema/SortableFloatField.java index b495227b1f6..5b10892c9d2 100644 --- a/solr/src/java/org/apache/solr/schema/SortableFloatField.java +++ b/solr/src/java/org/apache/solr/schema/SortableFloatField.java @@ -40,10 +40,13 @@ import java.io.IOException; * * @deprecated use {@link FloatField} or {@link TrieFloatField} - will be removed in 5.x */ +@Deprecated public class SortableFloatField extends FieldType { + @Override protected void init(IndexSchema schema, Map args) { } + @Override public SortField getSortField(SchemaField field,boolean reverse) { return getStringSort(field,reverse); } @@ -53,10 +56,12 @@ public class SortableFloatField extends FieldType { return new SortableFloatFieldSource(field.name); } + @Override public String toInternal(String val) { return NumberUtils.float2sortableStr(val); } + @Override public String toExternal(Fieldable f) { return indexedToReadable(f.stringValue()); } @@ -66,6 +71,7 @@ public class SortableFloatField extends FieldType { return NumberUtils.SortableStr2float(f.stringValue()); } + @Override public String indexedToReadable(String indexedForm) { return NumberUtils.SortableStr2floatStr(indexedForm); } @@ -76,6 +82,7 @@ public class SortableFloatField extends FieldType { out.write( indexedToReadable(ByteUtils.UTF8toUTF16(input)) ); } + @Override public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { String sval = f.stringValue(); writer.writeFloat(name, NumberUtils.SortableStr2float(sval)); @@ -97,41 +104,50 @@ class SortableFloatFieldSource extends FieldCacheSource { this.defVal = defVal; } + @Override public String description() { return "sfloat(" + field + ')'; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final float def = defVal; return new StringIndexDocValues(this, readerContext, field) { private final BytesRef spare = new BytesRef(); + @Override protected String toTerm(String readableValue) { return NumberUtils.float2sortableStr(readableValue); } + @Override public float floatVal(int doc) { int ord=termsIndex.getOrd(doc); return ord==0 ? def : NumberUtils.SortableStr2float(termsIndex.lookup(ord, spare)); } + @Override public int intVal(int doc) { return (int)floatVal(doc); } + @Override public long longVal(int doc) { return (long)floatVal(doc); } + @Override public double doubleVal(int doc) { return (double)floatVal(doc); } + @Override public String strVal(int doc) { return Float.toString(floatVal(doc)); } + @Override public String toString(int doc) { return description() + '=' + floatVal(doc); } @@ -162,6 +178,7 @@ class SortableFloatFieldSource extends FieldCacheSource { }; } + @Override public boolean equals(Object o) { return o instanceof SortableFloatFieldSource && super.equals(o) @@ -169,6 +186,7 @@ class SortableFloatFieldSource extends FieldCacheSource { } private static int hcode = SortableFloatFieldSource.class.hashCode(); + @Override public int hashCode() { return hcode + super.hashCode() + Float.floatToIntBits(defVal); }; diff --git a/solr/src/java/org/apache/solr/schema/SortableIntField.java b/solr/src/java/org/apache/solr/schema/SortableIntField.java index 421e4bc45fc..14bb248b972 100644 --- a/solr/src/java/org/apache/solr/schema/SortableIntField.java +++ b/solr/src/java/org/apache/solr/schema/SortableIntField.java @@ -40,10 +40,13 @@ import java.io.IOException; * * @deprecated use {@link IntField} or {@link TrieIntField} - will be removed in 5.x */ +@Deprecated public class SortableIntField extends FieldType { + @Override protected void init(IndexSchema schema, Map args) { } + @Override public SortField getSortField(SchemaField field,boolean reverse) { return getStringSort(field,reverse); } @@ -53,6 +56,7 @@ public class SortableIntField extends FieldType { return new SortableIntFieldSource(field.name); } + @Override public String toInternal(String val) { // special case single digits? years?, etc // stringCache? general stringCache on a @@ -60,10 +64,12 @@ public class SortableIntField extends FieldType { return NumberUtils.int2sortableStr(val); } + @Override public String toExternal(Fieldable f) { return indexedToReadable(f.stringValue()); } + @Override public String indexedToReadable(String indexedForm) { return NumberUtils.SortableStr2int(indexedForm); } @@ -79,6 +85,7 @@ public class SortableIntField extends FieldType { return NumberUtils.SortableStr2int(f.stringValue(), 0, 3); } + @Override public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { String sval = f.stringValue(); writer.writeInt(name, NumberUtils.SortableStr2int(sval,0,sval.length())); @@ -99,41 +106,50 @@ class SortableIntFieldSource extends FieldCacheSource { this.defVal = defVal; } + @Override public String description() { return "sint(" + field + ')'; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final int def = defVal; return new StringIndexDocValues(this, readerContext, field) { private final BytesRef spare = new BytesRef(); + @Override protected String toTerm(String readableValue) { return NumberUtils.int2sortableStr(readableValue); } + @Override public float floatVal(int doc) { return (float)intVal(doc); } + @Override public int intVal(int doc) { int ord=termsIndex.getOrd(doc); return ord==0 ? def : NumberUtils.SortableStr2int(termsIndex.lookup(ord, spare),0,3); } + @Override public long longVal(int doc) { return (long)intVal(doc); } + @Override public double doubleVal(int doc) { return (double)intVal(doc); } + @Override public String strVal(int doc) { return Integer.toString(intVal(doc)); } + @Override public String toString(int doc) { return description() + '=' + intVal(doc); } @@ -165,6 +181,7 @@ class SortableIntFieldSource extends FieldCacheSource { }; } + @Override public boolean equals(Object o) { return o instanceof SortableIntFieldSource && super.equals(o) @@ -172,6 +189,7 @@ class SortableIntFieldSource extends FieldCacheSource { } private static int hcode = SortableIntFieldSource.class.hashCode(); + @Override public int hashCode() { return hcode + super.hashCode() + defVal; }; diff --git a/solr/src/java/org/apache/solr/schema/SortableLongField.java b/solr/src/java/org/apache/solr/schema/SortableLongField.java index d23fff2bb26..f68d5e85fd3 100644 --- a/solr/src/java/org/apache/solr/schema/SortableLongField.java +++ b/solr/src/java/org/apache/solr/schema/SortableLongField.java @@ -40,10 +40,13 @@ import java.io.IOException; * * @deprecated use {@link LongField} or {@link TrieLongtField} - will be removed in 5.x */ +@Deprecated public class SortableLongField extends FieldType { + @Override protected void init(IndexSchema schema, Map args) { } + @Override public SortField getSortField(SchemaField field,boolean reverse) { return getStringSort(field,reverse); } @@ -53,10 +56,12 @@ public class SortableLongField extends FieldType { return new SortableLongFieldSource(field.name); } + @Override public String toInternal(String val) { return NumberUtils.long2sortableStr(val); } + @Override public String indexedToReadable(String indexedForm) { return NumberUtils.SortableStr2long(indexedForm); } @@ -67,6 +72,7 @@ public class SortableLongField extends FieldType { out.write( indexedToReadable(ByteUtils.UTF8toUTF16(input)) ); } + @Override public String toExternal(Fieldable f) { return indexedToReadable(f.stringValue()); } @@ -76,6 +82,7 @@ public class SortableLongField extends FieldType { return NumberUtils.SortableStr2long(f.stringValue(),0,5); } + @Override public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { String sval = f.stringValue(); writer.writeLong(name, NumberUtils.SortableStr2long(sval,0,sval.length())); @@ -98,41 +105,50 @@ class SortableLongFieldSource extends FieldCacheSource { this.defVal = defVal; } + @Override public String description() { return "slong(" + field + ')'; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final long def = defVal; return new StringIndexDocValues(this, readerContext, field) { private final BytesRef spare = new BytesRef(); + @Override protected String toTerm(String readableValue) { return NumberUtils.long2sortableStr(readableValue); } + @Override public float floatVal(int doc) { return (float)longVal(doc); } + @Override public int intVal(int doc) { return (int)longVal(doc); } + @Override public long longVal(int doc) { int ord=termsIndex.getOrd(doc); return ord==0 ? def : NumberUtils.SortableStr2long(termsIndex.lookup(ord, spare),0,5); } + @Override public double doubleVal(int doc) { return (double)longVal(doc); } + @Override public String strVal(int doc) { return Long.toString(longVal(doc)); } + @Override public String toString(int doc) { return description() + '=' + longVal(doc); } @@ -163,6 +179,7 @@ class SortableLongFieldSource extends FieldCacheSource { }; } + @Override public boolean equals(Object o) { return o instanceof SortableLongFieldSource && super.equals(o) @@ -170,6 +187,7 @@ class SortableLongFieldSource extends FieldCacheSource { } private static int hcode = SortableLongFieldSource.class.hashCode(); + @Override public int hashCode() { return hcode + super.hashCode() + (int)defVal; }; diff --git a/solr/src/java/org/apache/solr/schema/StrField.java b/solr/src/java/org/apache/solr/schema/StrField.java index 3bfc5a5a0d9..7e34ab228b5 100644 --- a/solr/src/java/org/apache/solr/schema/StrField.java +++ b/solr/src/java/org/apache/solr/schema/StrField.java @@ -31,18 +31,22 @@ import java.io.IOException; * @version $Id$ */ public class StrField extends FieldType { + @Override protected void init(IndexSchema schema, Map args) { super.init(schema, args); } + @Override public SortField getSortField(SchemaField field,boolean reverse) { return getStringSort(field,reverse); } + @Override public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { writer.writeStr(name, f.stringValue(), true); } + @Override public ValueSource getValueSource(SchemaField field, QParser parser) { return new StrFieldSource(field.getName()); } diff --git a/solr/src/java/org/apache/solr/schema/StrFieldSource.java b/solr/src/java/org/apache/solr/schema/StrFieldSource.java index 36dcfcefcee..c1f78473cec 100755 --- a/solr/src/java/org/apache/solr/schema/StrFieldSource.java +++ b/solr/src/java/org/apache/solr/schema/StrFieldSource.java @@ -32,41 +32,51 @@ public class StrFieldSource extends FieldCacheSource { super(field); } + @Override public String description() { return "str(" + field + ')'; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { return new StringIndexDocValues(this, readerContext, field) { + @Override protected String toTerm(String readableValue) { return readableValue; } + @Override public float floatVal(int doc) { return (float)intVal(doc); } + @Override public int intVal(int doc) { int ord=termsIndex.getOrd(doc); return ord; } + @Override public long longVal(int doc) { return (long)intVal(doc); } + @Override public double doubleVal(int doc) { return (double)intVal(doc); } + @Override public int ordVal(int doc) { return termsIndex.getOrd(doc); } + @Override public int numOrd() { return termsIndex.numOrd(); } + @Override public String strVal(int doc) { int ord=termsIndex.getOrd(doc); if (ord == 0) { @@ -76,18 +86,21 @@ public class StrFieldSource extends FieldCacheSource { } } + @Override public String toString(int doc) { return description() + '=' + strVal(doc); } }; } + @Override public boolean equals(Object o) { return o instanceof StrFieldSource && super.equals(o); } private static int hcode = SortableFloatFieldSource.class.hashCode(); + @Override public int hashCode() { return hcode + super.hashCode(); }; diff --git a/solr/src/java/org/apache/solr/schema/TextField.java b/solr/src/java/org/apache/solr/schema/TextField.java index 47f6cbdb26b..d43cf54efa6 100644 --- a/solr/src/java/org/apache/solr/schema/TextField.java +++ b/solr/src/java/org/apache/solr/schema/TextField.java @@ -49,6 +49,7 @@ import java.io.StringReader; public class TextField extends FieldType { protected boolean autoGeneratePhraseQueries = true; + @Override protected void init(IndexSchema schema, Map args) { properties |= TOKENIZED; if (schema.getVersion()> 1.1f) properties &= ~OMIT_TF_POSITIONS; @@ -62,10 +63,12 @@ public class TextField extends FieldType { return autoGeneratePhraseQueries; } + @Override public SortField getSortField(SchemaField field, boolean reverse) { return getStringSort(field, reverse); } + @Override public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { writer.writeStr(name, f.stringValue(), true); } diff --git a/solr/src/java/org/apache/solr/schema/TrieField.java b/solr/src/java/org/apache/solr/schema/TrieField.java index 314bdb38443..6f6d01c7023 100644 --- a/solr/src/java/org/apache/solr/schema/TrieField.java +++ b/solr/src/java/org/apache/solr/schema/TrieField.java @@ -121,6 +121,7 @@ public class TrieField extends FieldType { } } + @Override public SortField getSortField(SchemaField field, boolean top) { int flags = CachedArrayCreator.CACHE_VALUES_AND_BITS; Object missingValue = null; @@ -194,6 +195,7 @@ public class TrieField extends FieldType { } + @Override public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { byte[] arr = f.getBinaryValue(); if (arr==null) { @@ -586,6 +588,7 @@ class TrieDateFieldSource extends LongFieldSource { super(creator); } + @Override public String description() { return "date(" + field + ')'; } diff --git a/solr/src/java/org/apache/solr/search/BitDocSet.java b/solr/src/java/org/apache/solr/search/BitDocSet.java index 5bdd490a148..617986cb8f9 100644 --- a/solr/src/java/org/apache/solr/search/BitDocSet.java +++ b/solr/src/java/org/apache/solr/search/BitDocSet.java @@ -113,15 +113,18 @@ public class BitDocSet extends DocSetBase { * * @return the internal OpenBitSet that should not be modified. */ + @Override public OpenBitSet getBits() { return bits; } + @Override public void add(int doc) { bits.set(doc); size=-1; // invalidate size } + @Override public void addUnique(int doc) { bits.set(doc); size=-1; // invalidate size diff --git a/solr/src/java/org/apache/solr/search/BoostQParserPlugin.java b/solr/src/java/org/apache/solr/search/BoostQParserPlugin.java index a87ce664943..334f1eed83c 100755 --- a/solr/src/java/org/apache/solr/search/BoostQParserPlugin.java +++ b/solr/src/java/org/apache/solr/search/BoostQParserPlugin.java @@ -43,12 +43,14 @@ public class BoostQParserPlugin extends QParserPlugin { public void init(NamedList args) { } + @Override public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) { return new QParser(qstr, localParams, params, req) { QParser baseParser; ValueSource vs; String b; + @Override public Query parse() throws ParseException { b = localParams.get(BOOSTFUNC); baseParser = subQuery(localParams.get(QueryParsing.V), null); @@ -65,14 +67,17 @@ public class BoostQParserPlugin extends QParserPlugin { } + @Override public String[] getDefaultHighlightFields() { return baseParser.getDefaultHighlightFields(); } + @Override public Query getHighlightQuery() throws ParseException { return baseParser.getHighlightQuery(); } + @Override public void addDebugInfo(NamedList debugInfo) { // encapsulate base debug info in a sub-list? baseParser.addDebugInfo(debugInfo); diff --git a/solr/src/java/org/apache/solr/search/DisMaxQParser.java b/solr/src/java/org/apache/solr/search/DisMaxQParser.java index 99659743445..f8398a1a63f 100644 --- a/solr/src/java/org/apache/solr/search/DisMaxQParser.java +++ b/solr/src/java/org/apache/solr/search/DisMaxQParser.java @@ -79,6 +79,7 @@ public class DisMaxQParser extends QParser { protected QParser altQParser; + @Override public Query parse() throws ParseException { SolrParams solrParams = localParams == null ? params : new DefaultSolrParams(localParams, params); queryFields = SolrPluginUtils.parseFieldBoosts(solrParams.getParams(DisMaxParams.QF)); @@ -244,6 +245,7 @@ public class DisMaxQParser extends QParser { return parsedUserQuery == null ? altUserQuery : parsedUserQuery; } + @Override public void addDebugInfo(NamedList debugInfo) { super.addDebugInfo(debugInfo); debugInfo.add("altquerystring", altUserQuery); diff --git a/solr/src/java/org/apache/solr/search/DisMaxQParserPlugin.java b/solr/src/java/org/apache/solr/search/DisMaxQParserPlugin.java index f6ae213dfc2..ca90008579e 100755 --- a/solr/src/java/org/apache/solr/search/DisMaxQParserPlugin.java +++ b/solr/src/java/org/apache/solr/search/DisMaxQParserPlugin.java @@ -115,6 +115,7 @@ public class DisMaxQParserPlugin extends QParserPlugin { public void init(NamedList args) { } + @Override public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) { return new DisMaxQParser(qstr, localParams, params, req); } diff --git a/solr/src/java/org/apache/solr/search/DocSet.java b/solr/src/java/org/apache/solr/search/DocSet.java index e4482ac0c73..d188389ecd5 100644 --- a/solr/src/java/org/apache/solr/search/DocSet.java +++ b/solr/src/java/org/apache/solr/search/DocSet.java @@ -152,6 +152,7 @@ public interface DocSet /* extends Collection */ { abstract class DocSetBase implements DocSet { // Not implemented efficiently... for testing purposes only + @Override public boolean equals(Object obj) { if (!(obj instanceof DocSet)) return false; DocSet other = (DocSet)obj; @@ -260,6 +261,7 @@ abstract class DocSetBase implements DocSet { final int max = base + maxDoc; // one past the max doc in this segment. return new DocIdSet() { + @Override public DocIdSetIterator iterator() throws IOException { return new DocIdSetIterator() { int pos=base-1; diff --git a/solr/src/java/org/apache/solr/search/DocSetHitCollector.java b/solr/src/java/org/apache/solr/search/DocSetHitCollector.java index c0067a74380..3caafa99727 100644 --- a/solr/src/java/org/apache/solr/search/DocSetHitCollector.java +++ b/solr/src/java/org/apache/solr/search/DocSetHitCollector.java @@ -45,6 +45,7 @@ class DocSetCollector extends Collector { this.maxDoc = maxDoc; this.scratch = new int[smallSetSize]; } + @Override public void collect(int doc) throws IOException { doc += base; // optimistically collect the first docs in an array @@ -77,13 +78,16 @@ class DocSetCollector extends Collector { } } + @Override public void setScorer(Scorer scorer) throws IOException { } + @Override public void setNextReader(AtomicReaderContext context) throws IOException { this.base = context.docBase; } + @Override public boolean acceptsDocsOutOfOrder() { return false; } @@ -97,6 +101,7 @@ class DocSetDelegateCollector extends DocSetCollector { this.collector = collector; } + @Override public void collect(int doc) throws IOException { collector.collect(doc); @@ -120,6 +125,7 @@ class DocSetDelegateCollector extends DocSetCollector { pos++; } + @Override public DocSet getDocSet() { if (pos<=scratch.length) { // assumes docs were collected in sorted order! @@ -131,10 +137,12 @@ class DocSetDelegateCollector extends DocSetCollector { } } + @Override public void setScorer(Scorer scorer) throws IOException { collector.setScorer(scorer); } + @Override public void setNextReader(AtomicReaderContext context) throws IOException { collector.setNextReader(context); this.base = context.docBase; diff --git a/solr/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java b/solr/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java index 3e418520c42..daeab8f73ae 100755 --- a/solr/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java +++ b/solr/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java @@ -55,6 +55,7 @@ public class ExtendedDismaxQParserPlugin extends QParserPlugin { public void init(NamedList args) { } + @Override public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) { return new ExtendedDismaxQParser(qstr, localParams, params, req); } @@ -96,6 +97,7 @@ class ExtendedDismaxQParser extends QParser { private QParser altQParser; + @Override public Query parse() throws ParseException { SolrParams localParams = getLocalParams(); SolrParams params = getParams(); @@ -483,6 +485,7 @@ class ExtendedDismaxQParser extends QParser { return parsedUserQuery == null ? altUserQuery : parsedUserQuery; } + @Override public void addDebugInfo(NamedList debugInfo) { super.addDebugInfo(debugInfo); debugInfo.add("altquerystring", altUserQuery); @@ -820,6 +823,7 @@ class ExtendedDismaxQParser extends QParser { analyzer.removeStopFilter = remove; } + @Override protected Query getBooleanQuery(List clauses, boolean disableCoord) throws ParseException { Query q = super.getBooleanQuery(clauses, disableCoord); if (q != null) { @@ -834,6 +838,7 @@ class ExtendedDismaxQParser extends QParser { //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// + @Override protected void addClause(List clauses, int conj, int mods, Query q) { //System.out.println("addClause:clauses="+clauses+" conj="+conj+" mods="+mods+" q="+q); super.addClause(clauses, conj, mods, q); @@ -1075,6 +1080,7 @@ final class ExtendedAnalyzer extends Analyzer { this.queryAnalyzer = parser.getReq().getSchema().getQueryAnalyzer(); } + @Override public TokenStream tokenStream(String fieldName, Reader reader) { if (!removeStopFilter) { return queryAnalyzer.tokenStream(fieldName, reader); @@ -1138,10 +1144,12 @@ final class ExtendedAnalyzer extends Analyzer { return newa.tokenStream(fieldName, reader); } + @Override public int getPositionIncrementGap(String fieldName) { return queryAnalyzer.getPositionIncrementGap(fieldName); } + @Override public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException { if (!removeStopFilter) { return queryAnalyzer.reusableTokenStream(fieldName, reader); diff --git a/solr/src/java/org/apache/solr/search/FastLRUCache.java b/solr/src/java/org/apache/solr/search/FastLRUCache.java index a65dbbce9bb..594f37f2b56 100644 --- a/solr/src/java/org/apache/solr/search/FastLRUCache.java +++ b/solr/src/java/org/apache/solr/search/FastLRUCache.java @@ -274,6 +274,7 @@ public class FastLRUCache extends SolrCacheBase implements SolrCache { return lst; } + @Override public String toString() { return name + getStatistics().toString(); } diff --git a/solr/src/java/org/apache/solr/search/FieldQParserPlugin.java b/solr/src/java/org/apache/solr/search/FieldQParserPlugin.java index f9dded8d97b..499086f3ba5 100644 --- a/solr/src/java/org/apache/solr/search/FieldQParserPlugin.java +++ b/solr/src/java/org/apache/solr/search/FieldQParserPlugin.java @@ -38,8 +38,10 @@ public class FieldQParserPlugin extends QParserPlugin { public void init(NamedList args) { } + @Override public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) { return new QParser(qstr, localParams, params, req) { + @Override public Query parse() throws ParseException { String field = localParams.get(QueryParsing.F); String queryText = localParams.get(QueryParsing.V); diff --git a/solr/src/java/org/apache/solr/search/FunctionQParser.java b/solr/src/java/org/apache/solr/search/FunctionQParser.java index c5e710acd54..dbd2d0e061f 100755 --- a/solr/src/java/org/apache/solr/search/FunctionQParser.java +++ b/solr/src/java/org/apache/solr/search/FunctionQParser.java @@ -56,6 +56,7 @@ public class FunctionQParser extends QParser { return parseMultipleSources; } + @Override public Query parse() throws ParseException { sp = new QueryParsing.StrParser(getString()); diff --git a/solr/src/java/org/apache/solr/search/FunctionQParserPlugin.java b/solr/src/java/org/apache/solr/search/FunctionQParserPlugin.java index 164e1ccedf0..80cd846e74f 100644 --- a/solr/src/java/org/apache/solr/search/FunctionQParserPlugin.java +++ b/solr/src/java/org/apache/solr/search/FunctionQParserPlugin.java @@ -31,6 +31,7 @@ public class FunctionQParserPlugin extends QParserPlugin { public void init(NamedList args) { } + @Override public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) { return new FunctionQParser(qstr, localParams, params, req); } diff --git a/solr/src/java/org/apache/solr/search/FunctionRangeQParserPlugin.java b/solr/src/java/org/apache/solr/search/FunctionRangeQParserPlugin.java index 1a71af49610..2283e494ffd 100755 --- a/solr/src/java/org/apache/solr/search/FunctionRangeQParserPlugin.java +++ b/solr/src/java/org/apache/solr/search/FunctionRangeQParserPlugin.java @@ -39,11 +39,13 @@ public class FunctionRangeQParserPlugin extends QParserPlugin { public void init(NamedList args) { } + @Override public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) { return new QParser(qstr, localParams, params, req) { ValueSource vs; String funcStr; + @Override public Query parse() throws ParseException { funcStr = localParams.get(QueryParsing.V, null); Query funcQ = subQuery(funcStr, FunctionQParserPlugin.NAME).parse(); diff --git a/solr/src/java/org/apache/solr/search/LRUCache.java b/solr/src/java/org/apache/solr/search/LRUCache.java index ea222a4dc61..00f8379f7be 100644 --- a/solr/src/java/org/apache/solr/search/LRUCache.java +++ b/solr/src/java/org/apache/solr/search/LRUCache.java @@ -77,6 +77,7 @@ public class LRUCache extends SolrCacheBase implements SolrCache { description += ')'; map = new LinkedHashMap(initialSize, 0.75f, true) { + @Override protected boolean removeEldestEntry(Map.Entry eldest) { if (size() > limit) { // increment evictions regardless of state. @@ -277,6 +278,7 @@ public class LRUCache extends SolrCacheBase implements SolrCache { return lst; } + @Override public String toString() { return name + getStatistics().toString(); } diff --git a/solr/src/java/org/apache/solr/search/LuceneQParserPlugin.java b/solr/src/java/org/apache/solr/search/LuceneQParserPlugin.java index 7f8d40a5bb9..b9d61e9ad3a 100755 --- a/solr/src/java/org/apache/solr/search/LuceneQParserPlugin.java +++ b/solr/src/java/org/apache/solr/search/LuceneQParserPlugin.java @@ -41,6 +41,7 @@ public class LuceneQParserPlugin extends QParserPlugin { public void init(NamedList args) { } + @Override public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) { return new LuceneQParser(qstr, localParams, params, req); } @@ -55,6 +56,7 @@ class LuceneQParser extends QParser { } + @Override public Query parse() throws ParseException { String qstr = getString(); if (qstr == null) return null; @@ -73,6 +75,7 @@ class LuceneQParser extends QParser { } + @Override public String[] getDefaultHighlightFields() { return lparser == null ? new String[]{} : new String[]{lparser.getField()}; } @@ -87,6 +90,7 @@ class OldLuceneQParser extends LuceneQParser { super(qstr, localParams, params, req); } + @Override public Query parse() throws ParseException { // handle legacy "query;sort" syntax if (getLocalParams() == null) { diff --git a/solr/src/java/org/apache/solr/search/LuceneQueryOptimizer.java b/solr/src/java/org/apache/solr/search/LuceneQueryOptimizer.java index 65812a5bf41..c641239c8f7 100644 --- a/solr/src/java/org/apache/solr/search/LuceneQueryOptimizer.java +++ b/solr/src/java/org/apache/solr/search/LuceneQueryOptimizer.java @@ -46,6 +46,7 @@ class LuceneQueryOptimizer { */ public LuceneQueryOptimizer(final int cacheSize, float threshold) { this.cache = new LinkedHashMap(cacheSize, 0.75f, true) { + @Override protected boolean removeEldestEntry(Map.Entry eldest) { return size() > cacheSize; // limit size of cache } diff --git a/solr/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java b/solr/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java index b8da084eca2..4248750f744 100644 --- a/solr/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java +++ b/solr/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java @@ -47,6 +47,7 @@ public class MissingStringLastComparatorSource extends FieldComparatorSource { this.missingValueProxy=missingValueProxy; } + @Override public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException { return new TermOrdValComparator_SML(numHits, fieldname, sortPos, reversed, missingValueProxy); } diff --git a/solr/src/java/org/apache/solr/search/MutableValue.java b/solr/src/java/org/apache/solr/search/MutableValue.java index 678430832c3..28fbbb99adc 100755 --- a/solr/src/java/org/apache/solr/search/MutableValue.java +++ b/solr/src/java/org/apache/solr/search/MutableValue.java @@ -50,6 +50,7 @@ public abstract class MutableValue implements Comparable { return (c1 == c2) && this.equalsSameType(other); } + @Override public abstract int hashCode(); @Override diff --git a/solr/src/java/org/apache/solr/search/NestedQParserPlugin.java b/solr/src/java/org/apache/solr/search/NestedQParserPlugin.java index ee51c2e172e..4032c6f850a 100755 --- a/solr/src/java/org/apache/solr/search/NestedQParserPlugin.java +++ b/solr/src/java/org/apache/solr/search/NestedQParserPlugin.java @@ -38,25 +38,30 @@ public class NestedQParserPlugin extends QParserPlugin { public void init(NamedList args) { } + @Override public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) { return new QParser(qstr, localParams, params, req) { QParser baseParser; ValueSource vs; String b; + @Override public Query parse() throws ParseException { baseParser = subQuery(localParams.get(QueryParsing.V), null); return baseParser.getQuery(); } + @Override public String[] getDefaultHighlightFields() { return baseParser.getDefaultHighlightFields(); } + @Override public Query getHighlightQuery() throws ParseException { return baseParser.getHighlightQuery(); } + @Override public void addDebugInfo(NamedList debugInfo) { // encapsulate base debug info in a sub-list? baseParser.addDebugInfo(debugInfo); diff --git a/solr/src/java/org/apache/solr/search/OldLuceneQParserPlugin.java b/solr/src/java/org/apache/solr/search/OldLuceneQParserPlugin.java index a9c33ccb9ff..1a5e4eb050a 100755 --- a/solr/src/java/org/apache/solr/search/OldLuceneQParserPlugin.java +++ b/solr/src/java/org/apache/solr/search/OldLuceneQParserPlugin.java @@ -31,6 +31,7 @@ public class OldLuceneQParserPlugin extends QParserPlugin { public void init(NamedList args) { } + @Override public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) { return new OldLuceneQParser(qstr, localParams, params, req); } diff --git a/solr/src/java/org/apache/solr/search/PrefixQParserPlugin.java b/solr/src/java/org/apache/solr/search/PrefixQParserPlugin.java index 82b33584139..8e1858cfc67 100755 --- a/solr/src/java/org/apache/solr/search/PrefixQParserPlugin.java +++ b/solr/src/java/org/apache/solr/search/PrefixQParserPlugin.java @@ -36,8 +36,10 @@ public class PrefixQParserPlugin extends QParserPlugin { public void init(NamedList args) { } + @Override public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) { return new QParser(qstr, localParams, params, req) { + @Override public Query parse() throws ParseException { return new PrefixQuery(new Term(localParams.get(QueryParsing.F), localParams.get(QueryParsing.V))); } diff --git a/solr/src/java/org/apache/solr/search/QueryParsing.java b/solr/src/java/org/apache/solr/search/QueryParsing.java index 624f627797d..1ffbdefb828 100644 --- a/solr/src/java/org/apache/solr/search/QueryParsing.java +++ b/solr/src/java/org/apache/solr/search/QueryParsing.java @@ -828,6 +828,7 @@ public class QueryParsing { return pos < end ? val.charAt(pos) : 0; } + @Override public String toString() { return "'" + val + "'" + ", pos=" + pos; } diff --git a/solr/src/java/org/apache/solr/search/QueryResultKey.java b/solr/src/java/org/apache/solr/search/QueryResultKey.java index 2f191432f41..7c1e080d7a4 100644 --- a/solr/src/java/org/apache/solr/search/QueryResultKey.java +++ b/solr/src/java/org/apache/solr/search/QueryResultKey.java @@ -56,10 +56,12 @@ public final class QueryResultKey { hc = h; } + @Override public int hashCode() { return hc; } + @Override public boolean equals(Object o) { if (o==this) return true; if (!(o instanceof QueryResultKey)) return false; diff --git a/solr/src/java/org/apache/solr/search/RawQParserPlugin.java b/solr/src/java/org/apache/solr/search/RawQParserPlugin.java index 70d4d5d8861..cbbea8978a1 100644 --- a/solr/src/java/org/apache/solr/search/RawQParserPlugin.java +++ b/solr/src/java/org/apache/solr/search/RawQParserPlugin.java @@ -40,8 +40,10 @@ public class RawQParserPlugin extends QParserPlugin { public void init(NamedList args) { } + @Override public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) { return new QParser(qstr, localParams, params, req) { + @Override public Query parse() throws ParseException { return new TermQuery(new Term(localParams.get(QueryParsing.F), localParams.get(QueryParsing.V))); } diff --git a/solr/src/java/org/apache/solr/search/SolrCacheBase.java b/solr/src/java/org/apache/solr/search/SolrCacheBase.java index 23a7267df4b..7b639b7dda9 100644 --- a/solr/src/java/org/apache/solr/search/SolrCacheBase.java +++ b/solr/src/java/org/apache/solr/search/SolrCacheBase.java @@ -55,6 +55,7 @@ public abstract class SolrCacheBase { throw new RuntimeException("Can't parse autoWarm value: " + configValue, e); } } + @Override public String toString() { return strVal; } diff --git a/solr/src/java/org/apache/solr/search/SolrConstantScoreQuery.java b/solr/src/java/org/apache/solr/search/SolrConstantScoreQuery.java index 653b9c93beb..357ee668d4d 100755 --- a/solr/src/java/org/apache/solr/search/SolrConstantScoreQuery.java +++ b/solr/src/java/org/apache/solr/search/SolrConstantScoreQuery.java @@ -41,14 +41,17 @@ public class SolrConstantScoreQuery extends ConstantScoreQuery { } /** Returns the encapsulated filter */ + @Override public Filter getFilter() { return filter; } + @Override public Query rewrite(IndexReader reader) throws IOException { return this; } + @Override public void extractTerms(Set terms) { // OK to not add any terms when used for MultiSearcher, // but may not be OK for highlighting diff --git a/solr/src/java/org/apache/solr/search/SolrIndexSearcher.java b/solr/src/java/org/apache/solr/search/SolrIndexSearcher.java index fbc4cedac08..06eaa74139c 100644 --- a/solr/src/java/org/apache/solr/search/SolrIndexSearcher.java +++ b/solr/src/java/org/apache/solr/search/SolrIndexSearcher.java @@ -184,6 +184,7 @@ public class SolrIndexSearcher extends IndexSearcher implements SolrInfoMBean { } + @Override public String toString() { return name; } @@ -207,6 +208,7 @@ public class SolrIndexSearcher extends IndexSearcher implements SolrInfoMBean { * * In particular, the underlying reader and any cache's in use are closed. */ + @Override public void close() throws IOException { if (cachingEnabled) { StringBuilder sb = new StringBuilder(); @@ -390,6 +392,7 @@ public class SolrIndexSearcher extends IndexSearcher implements SolrInfoMBean { /** * Retrieve the {@link Document} instance corresponding to the document id. */ + @Override public Document doc(int i) throws IOException { return doc(i, (Set)null); } @@ -398,6 +401,7 @@ public class SolrIndexSearcher extends IndexSearcher implements SolrInfoMBean { * This method does not currently use the Solr document cache. * * @see IndexReader#document(int, FieldSelector) */ + @Override public Document doc(int n, FieldSelector fieldSelector) throws IOException { return getIndexReader().document(n, fieldSelector); } @@ -1129,13 +1133,17 @@ public class SolrIndexSearcher extends IndexSearcher implements SolrInfoMBean { if (!needScores) { collector = new Collector () { + @Override public void setScorer(Scorer scorer) throws IOException { } + @Override public void collect(int doc) throws IOException { numHits[0]++; } + @Override public void setNextReader(AtomicReaderContext context) throws IOException { } + @Override public boolean acceptsDocsOutOfOrder() { return true; } @@ -1143,16 +1151,20 @@ public class SolrIndexSearcher extends IndexSearcher implements SolrInfoMBean { } else { collector = new Collector() { Scorer scorer; + @Override public void setScorer(Scorer scorer) throws IOException { this.scorer = scorer; } + @Override public void collect(int doc) throws IOException { numHits[0]++; float score = scorer.score(); if (score > topscore[0]) topscore[0]=score; } + @Override public void setNextReader(AtomicReaderContext context) throws IOException { } + @Override public boolean acceptsDocsOutOfOrder() { return true; } @@ -1250,16 +1262,20 @@ public class SolrIndexSearcher extends IndexSearcher implements SolrInfoMBean { } else { collector = setCollector = new DocSetDelegateCollector(smallSetSize, maxDoc, new Collector() { Scorer scorer; - public void setScorer(Scorer scorer) throws IOException { + @Override + public void setScorer(Scorer scorer) throws IOException { this.scorer = scorer; } - public void collect(int doc) throws IOException { + @Override + public void collect(int doc) throws IOException { float score = scorer.score(); if (score > topscore[0]) topscore[0]=score; } - public void setNextReader(AtomicReaderContext context) throws IOException { + @Override + public void setNextReader(AtomicReaderContext context) throws IOException { } - public boolean acceptsDocsOutOfOrder() { + @Override + public boolean acceptsDocsOutOfOrder() { return false; } }); diff --git a/solr/src/java/org/apache/solr/search/SortedIntDocSet.java b/solr/src/java/org/apache/solr/search/SortedIntDocSet.java index 07a62d3e041..295a794bde9 100755 --- a/solr/src/java/org/apache/solr/search/SortedIntDocSet.java +++ b/solr/src/java/org/apache/solr/search/SortedIntDocSet.java @@ -587,6 +587,7 @@ public class SortedIntDocSet extends DocSetBase { return new DocIdSet() { + @Override public DocIdSetIterator iterator() throws IOException { return new DocIdSetIterator() { int idx = startIdx; diff --git a/solr/src/java/org/apache/solr/search/SpatialBoxQParserPlugin.java b/solr/src/java/org/apache/solr/search/SpatialBoxQParserPlugin.java index 3dd099fb5ff..37065c5a2a3 100755 --- a/solr/src/java/org/apache/solr/search/SpatialBoxQParserPlugin.java +++ b/solr/src/java/org/apache/solr/search/SpatialBoxQParserPlugin.java @@ -31,6 +31,7 @@ public class SpatialBoxQParserPlugin extends SpatialFilterQParserPlugin { return new SpatialFilterQParser(qstr, localParams, params, req, true); } + @Override public void init(NamedList args) { } diff --git a/solr/src/java/org/apache/solr/search/TermQParserPlugin.java b/solr/src/java/org/apache/solr/search/TermQParserPlugin.java index 5499a71dfe0..39ce3d537fa 100644 --- a/solr/src/java/org/apache/solr/search/TermQParserPlugin.java +++ b/solr/src/java/org/apache/solr/search/TermQParserPlugin.java @@ -47,8 +47,10 @@ public class TermQParserPlugin extends QParserPlugin { public void init(NamedList args) { } + @Override public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) { return new QParser(qstr, localParams, params, req) { + @Override public Query parse() throws ParseException { String fname = localParams.get(QueryParsing.F); FieldType ft = req.getSchema().getFieldTypeNoEx(fname); diff --git a/solr/src/java/org/apache/solr/search/ValueSourceParser.java b/solr/src/java/org/apache/solr/search/ValueSourceParser.java index b7668608f61..867ec18d067 100755 --- a/solr/src/java/org/apache/solr/search/ValueSourceParser.java +++ b/solr/src/java/org/apache/solr/search/ValueSourceParser.java @@ -81,23 +81,27 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { static { addParser("ord", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { String field = fp.parseId(); return new OrdFieldSource(field); } }); addParser("literal", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { return new LiteralValueSource(fp.getString()); } }); addParser("rord", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { String field = fp.parseId(); return new ReverseOrdFieldSource(field); } }); addParser("top", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { // top(vs) is now a no-op ValueSource source = fp.parseValueSource(); @@ -105,6 +109,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { } }); addParser("linear", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { ValueSource source = fp.parseValueSource(); float slope = fp.parseFloat(); @@ -113,6 +118,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { } }); addParser("max", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { ValueSource source = fp.parseValueSource(); float val = fp.parseFloat(); @@ -120,6 +126,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { } }); addParser("recip", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { ValueSource source = fp.parseValueSource(); float m = fp.parseFloat(); @@ -129,6 +136,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { } }); addParser("scale", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { ValueSource source = fp.parseValueSource(); float min = fp.parseFloat(); @@ -137,6 +145,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { } }); addParser("div", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { ValueSource a = fp.parseValueSource(); ValueSource b = fp.parseValueSource(); @@ -144,6 +153,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { } }); addParser("map", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { ValueSource source = fp.parseValueSource(); float min = fp.parseFloat(); @@ -155,13 +165,16 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { }); addParser("abs", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { ValueSource source = fp.parseValueSource(); return new SimpleFloatFunction(source) { + @Override protected String name() { return "abs"; } + @Override protected float func(int doc, DocValues vals) { return Math.abs(vals.floatVal(doc)); } @@ -169,6 +182,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { } }); addParser("sum", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { List sources = fp.parseValueSourceList(); return new SumFloatFunction(sources.toArray(new ValueSource[sources.size()])); @@ -177,6 +191,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { alias("sum","add"); addParser("product", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { List sources = fp.parseValueSourceList(); return new ProductFloatFunction(sources.toArray(new ValueSource[sources.size()])); @@ -185,14 +200,17 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { alias("product","mul"); addParser("sub", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { ValueSource a = fp.parseValueSource(); ValueSource b = fp.parseValueSource(); return new DualFloatFunction(a, b) { + @Override protected String name() { return "sub"; } + @Override protected float func(int doc, DocValues aVals, DocValues bVals) { return aVals.floatVal(doc) - bVals.floatVal(doc); } @@ -200,12 +218,14 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { } }); addParser("vector", new ValueSourceParser(){ + @Override public ValueSource parse(FunctionQParser fp) throws ParseException{ return new VectorValueSource(fp.parseValueSourceList()); } }); addParser("query", new ValueSourceParser() { // boost(query($q),rating) + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { Query q = fp.parseNestedQuery(); float defVal = 0.0f; @@ -216,6 +236,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { } }); addParser("boost", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { Query q = fp.parseNestedQuery(); ValueSource vs = fp.parseValueSource(); @@ -224,6 +245,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { } }); addParser("joindf", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { String f0 = fp.parseArg(); String qf = fp.parseArg(); @@ -234,6 +256,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { addParser("geodist", HaversineConstFunction.parser); addParser("hsin", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { double radius = fp.parseDouble(); @@ -274,6 +297,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { }); addParser("ghhsin", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { double radius = fp.parseDouble(); @@ -285,6 +309,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { }); addParser("geohash", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { ValueSource lat = fp.parseValueSource(); @@ -294,6 +319,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { } }); addParser("strdist", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { ValueSource str1 = fp.parseValueSource(); @@ -319,117 +345,140 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { }); addParser(new DoubleParser("rad") { + @Override public double func(int doc, DocValues vals) { return vals.doubleVal(doc) * DistanceUtils.DEGREES_TO_RADIANS; } }); addParser(new DoubleParser("deg") { + @Override public double func(int doc, DocValues vals) { return vals.doubleVal(doc) * DistanceUtils.RADIANS_TO_DEGREES; } }); addParser(new DoubleParser("sqrt") { + @Override public double func(int doc, DocValues vals) { return Math.sqrt(vals.doubleVal(doc)); } }); addParser(new DoubleParser("cbrt") { + @Override public double func(int doc, DocValues vals) { return Math.cbrt(vals.doubleVal(doc)); } }); addParser(new DoubleParser("log") { + @Override public double func(int doc, DocValues vals) { return Math.log10(vals.doubleVal(doc)); } }); addParser(new DoubleParser("ln") { + @Override public double func(int doc, DocValues vals) { return Math.log(vals.doubleVal(doc)); } }); addParser(new DoubleParser("exp") { + @Override public double func(int doc, DocValues vals) { return Math.exp(vals.doubleVal(doc)); } }); addParser(new DoubleParser("sin") { + @Override public double func(int doc, DocValues vals) { return Math.sin(vals.doubleVal(doc)); } }); addParser(new DoubleParser("cos") { + @Override public double func(int doc, DocValues vals) { return Math.cos(vals.doubleVal(doc)); } }); addParser(new DoubleParser("tan") { + @Override public double func(int doc, DocValues vals) { return Math.tan(vals.doubleVal(doc)); } }); addParser(new DoubleParser("asin") { + @Override public double func(int doc, DocValues vals) { return Math.asin(vals.doubleVal(doc)); } }); addParser(new DoubleParser("acos") { + @Override public double func(int doc, DocValues vals) { return Math.acos(vals.doubleVal(doc)); } }); addParser(new DoubleParser("atan") { + @Override public double func(int doc, DocValues vals) { return Math.atan(vals.doubleVal(doc)); } }); addParser(new DoubleParser("sinh") { + @Override public double func(int doc, DocValues vals) { return Math.sinh(vals.doubleVal(doc)); } }); addParser(new DoubleParser("cosh") { + @Override public double func(int doc, DocValues vals) { return Math.cosh(vals.doubleVal(doc)); } }); addParser(new DoubleParser("tanh") { + @Override public double func(int doc, DocValues vals) { return Math.tanh(vals.doubleVal(doc)); } }); addParser(new DoubleParser("ceil") { + @Override public double func(int doc, DocValues vals) { return Math.ceil(vals.doubleVal(doc)); } }); addParser(new DoubleParser("floor") { + @Override public double func(int doc, DocValues vals) { return Math.floor(vals.doubleVal(doc)); } }); addParser(new DoubleParser("rint") { + @Override public double func(int doc, DocValues vals) { return Math.rint(vals.doubleVal(doc)); } }); addParser(new Double2Parser("pow") { + @Override public double func(int doc, DocValues a, DocValues b) { return Math.pow(a.doubleVal(doc), b.doubleVal(doc)); } }); addParser(new Double2Parser("hypot") { + @Override public double func(int doc, DocValues a, DocValues b) { return Math.hypot(a.doubleVal(doc), b.doubleVal(doc)); } }); addParser(new Double2Parser("atan2") { + @Override public double func(int doc, DocValues a, DocValues b) { return Math.atan2(a.doubleVal(doc), b.doubleVal(doc)); } }); addParser("sqedist", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { List sources = fp.parseValueSourceList(); MVResult mvr = getMultiValueSources(sources); @@ -439,6 +488,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { }); addParser("dist", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { float power = fp.parseFloat(); List sources = fp.parseValueSourceList(); @@ -450,11 +500,13 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { addParser("pi", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { return new DoubleConstValueSource(Math.PI); } }); addParser("e", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { return new DoubleConstValueSource(Math.E); } @@ -462,6 +514,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { addParser("docfreq", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { TInfo tinfo = parseTerm(fp); return new DocFreqValueSource(tinfo.field, tinfo.val, tinfo.indexedField, tinfo.indexedBytes); @@ -469,6 +522,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { }); addParser("idf", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { TInfo tinfo = parseTerm(fp); return new IDFValueSource(tinfo.field, tinfo.val, tinfo.indexedField, tinfo.indexedBytes); @@ -476,6 +530,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { }); addParser("termfreq", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { TInfo tinfo = parseTerm(fp); return new TermFreqValueSource(tinfo.field, tinfo.val, tinfo.indexedField, tinfo.indexedBytes); @@ -483,6 +538,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { }); addParser("tf", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { TInfo tinfo = parseTerm(fp); return new TFValueSource(tinfo.field, tinfo.val, tinfo.indexedField, tinfo.indexedBytes); @@ -490,6 +546,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { }); addParser("norm", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { String field = fp.parseArg(); return new NormValueSource(field); @@ -497,12 +554,14 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { }); addParser("maxdoc", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { return new MaxDocValueSource(); } }); addParser("numdocs", new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { return new NumDocsValueSource(); } @@ -599,6 +658,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { class DateValueSourceParser extends ValueSourceParser { DateField df = new TrieDateField(); + @Override public void init(NamedList args) { } @@ -619,6 +679,7 @@ class DateValueSourceParser extends ValueSourceParser { return f.getType().getValueSource(f, fp); } + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { String first = fp.parseArg(); String second = fp.parseArg(); @@ -655,10 +716,12 @@ class DateValueSourceParser extends ValueSourceParser { // "dv" if (d1 != null && v2 != null) return new DualFloatFunction(new LongConstValueSource(ms1), v2) { + @Override protected String name() { return "ms"; } + @Override protected float func(int doc, DocValues aVals, DocValues bVals) { return ms1 - bVals.longVal(doc); } @@ -667,10 +730,12 @@ class DateValueSourceParser extends ValueSourceParser { // "vd" if (v1 != null && d2 != null) return new DualFloatFunction(v1, new LongConstValueSource(ms2)) { + @Override protected String name() { return "ms"; } + @Override protected float func(int doc, DocValues aVals, DocValues bVals) { return aVals.longVal(doc) - ms2; } @@ -679,10 +744,12 @@ class DateValueSourceParser extends ValueSourceParser { // "vv" if (v1 != null && v2 != null) return new DualFloatFunction(v1, v2) { + @Override protected String name() { return "ms"; } + @Override protected float func(int doc, DocValues aVals, DocValues bVals) { return aVals.longVal(doc) - bVals.longVal(doc); } @@ -706,42 +773,52 @@ class LongConstValueSource extends ConstNumberSource { this.fv = constant; } + @Override public String description() { return "const(" + constant + ")"; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { return new DocValues() { + @Override public float floatVal(int doc) { return fv; } + @Override public int intVal(int doc) { return (int) constant; } + @Override public long longVal(int doc) { return constant; } + @Override public double doubleVal(int doc) { return dv; } + @Override public String strVal(int doc) { return Long.toString(constant); } + @Override public String toString(int doc) { return description(); } }; } + @Override public int hashCode() { return (int) constant + (int) (constant >>> 32); } + @Override public boolean equals(Object o) { if (LongConstValueSource.class != o.getClass()) return false; LongConstValueSource other = (LongConstValueSource) o; @@ -793,6 +870,7 @@ abstract class DoubleParser extends NamedParser { public abstract double func(int doc, DocValues vals); + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { return new Function(fp.parseValueSource()); } @@ -802,6 +880,7 @@ abstract class DoubleParser extends NamedParser { super(source); } + @Override public String name() { return DoubleParser.this.name(); } @@ -810,21 +889,27 @@ abstract class DoubleParser extends NamedParser { public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final DocValues vals = source.getValues(context, readerContext); return new DocValues() { + @Override public float floatVal(int doc) { return (float)doubleVal(doc); } + @Override public int intVal(int doc) { return (int)doubleVal(doc); } + @Override public long longVal(int doc) { return (long)doubleVal(doc); } + @Override public double doubleVal(int doc) { return func(doc, vals); } + @Override public String strVal(int doc) { return Double.toString(doubleVal(doc)); } + @Override public String toString(int doc) { return name() + '(' + vals.toString(doc) + ')'; } @@ -841,6 +926,7 @@ abstract class Double2Parser extends NamedParser { public abstract double func(int doc, DocValues a, DocValues b); + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { return new Function(fp.parseValueSource(), fp.parseValueSource()); } @@ -858,29 +944,37 @@ abstract class Double2Parser extends NamedParser { this.b = b; } + @Override public String description() { return name() + "(" + a.description() + "," + b.description() + ")"; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final DocValues aVals = a.getValues(context, readerContext); final DocValues bVals = b.getValues(context, readerContext); return new DocValues() { + @Override public float floatVal(int doc) { return (float)doubleVal(doc); } + @Override public int intVal(int doc) { return (int)doubleVal(doc); } + @Override public long longVal(int doc) { return (long)doubleVal(doc); } + @Override public double doubleVal(int doc) { return func(doc, aVals, bVals); } + @Override public String strVal(int doc) { return Double.toString(doubleVal(doc)); } + @Override public String toString(int doc) { return name() + '(' + aVals.toString(doc) + ',' + bVals.toString(doc) + ')'; } @@ -891,6 +985,7 @@ abstract class Double2Parser extends NamedParser { public void createWeight(Map context, IndexSearcher searcher) throws IOException { } + @Override public int hashCode() { int h = a.hashCode(); h ^= (h << 13) | (h >>> 20); @@ -900,6 +995,7 @@ abstract class Double2Parser extends NamedParser { return h; } + @Override public boolean equals(Object o) { if (this.getClass() != o.getClass()) return false; Function other = (Function)o; diff --git a/solr/src/java/org/apache/solr/search/function/BoostedQuery.java b/solr/src/java/org/apache/solr/search/function/BoostedQuery.java index 1283c600b60..c065aef4318 100755 --- a/solr/src/java/org/apache/solr/search/function/BoostedQuery.java +++ b/solr/src/java/org/apache/solr/search/function/BoostedQuery.java @@ -41,6 +41,7 @@ public class BoostedQuery extends Query { public Query getQuery() { return q; } public ValueSource getValueSource() { return boostVal; } + @Override public Query rewrite(IndexReader reader) throws IOException { Query newQ = q.rewrite(reader); if (newQ == q) return this; @@ -49,10 +50,12 @@ public class BoostedQuery extends Query { return bq; } + @Override public void extractTerms(Set terms) { q.extractTerms(terms); } + @Override public Weight createWeight(IndexSearcher searcher) throws IOException { return new BoostedQuery.BoostedWeight(searcher); } @@ -69,10 +72,12 @@ public class BoostedQuery extends Query { boostVal.createWeight(fcontext,searcher); } + @Override public Query getQuery() { return BoostedQuery.this; } + @Override public float getValue() { return getBoost(); } @@ -173,6 +178,7 @@ public class BoostedQuery extends Query { } + @Override public String toString(String field) { StringBuilder sb = new StringBuilder(); sb.append("boost(").append(q.toString(field)).append(',').append(boostVal).append(')'); @@ -180,6 +186,7 @@ public class BoostedQuery extends Query { return sb.toString(); } + @Override public boolean equals(Object o) { if (getClass() != o.getClass()) return false; BoostedQuery other = (BoostedQuery)o; @@ -188,6 +195,7 @@ public class BoostedQuery extends Query { && this.boostVal.equals(other.boostVal); } + @Override public int hashCode() { int h = q.hashCode(); h ^= (h << 17) | (h >>> 16); diff --git a/solr/src/java/org/apache/solr/search/function/ByteFieldSource.java b/solr/src/java/org/apache/solr/search/function/ByteFieldSource.java index 2dd5bb77ddf..36cdddeb716 100644 --- a/solr/src/java/org/apache/solr/search/function/ByteFieldSource.java +++ b/solr/src/java/org/apache/solr/search/function/ByteFieldSource.java @@ -37,10 +37,12 @@ public class ByteFieldSource extends NumericFieldCacheSource { super(creator); } + @Override public String description() { return "byte(" + field + ')'; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final ByteValues vals = cache.getBytes(readerContext.reader, field, creator); final byte[] arr = vals.values; @@ -56,26 +58,32 @@ public class ByteFieldSource extends NumericFieldCacheSource { return (short) arr[doc]; } + @Override public float floatVal(int doc) { return (float) arr[doc]; } + @Override public int intVal(int doc) { return (int) arr[doc]; } + @Override public long longVal(int doc) { return (long) arr[doc]; } + @Override public double doubleVal(int doc) { return (double) arr[doc]; } + @Override public String strVal(int doc) { return Byte.toString(arr[doc]); } + @Override public String toString(int doc) { return description() + '=' + byteVal(doc); } diff --git a/solr/src/java/org/apache/solr/search/function/ConstValueSource.java b/solr/src/java/org/apache/solr/search/function/ConstValueSource.java index 846591dafb2..d829fc9ddd8 100755 --- a/solr/src/java/org/apache/solr/search/function/ConstValueSource.java +++ b/solr/src/java/org/apache/solr/search/function/ConstValueSource.java @@ -34,37 +34,47 @@ public class ConstValueSource extends ConstNumberSource { this.dv = constant; } + @Override public String description() { return "const(" + constant + ")"; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { return new DocValues() { + @Override public float floatVal(int doc) { return constant; } + @Override public int intVal(int doc) { return (int)constant; } + @Override public long longVal(int doc) { return (long)constant; } + @Override public double doubleVal(int doc) { return dv; } + @Override public String strVal(int doc) { return Float.toString(constant); } + @Override public String toString(int doc) { return description(); } }; } + @Override public int hashCode() { return Float.floatToIntBits(constant) * 31; } + @Override public boolean equals(Object o) { if (!(o instanceof ConstValueSource)) return false; ConstValueSource other = (ConstValueSource)o; diff --git a/solr/src/java/org/apache/solr/search/function/DivFloatFunction.java b/solr/src/java/org/apache/solr/search/function/DivFloatFunction.java index a66c7c61e1d..ad072b57fd8 100755 --- a/solr/src/java/org/apache/solr/search/function/DivFloatFunction.java +++ b/solr/src/java/org/apache/solr/search/function/DivFloatFunction.java @@ -28,10 +28,12 @@ public class DivFloatFunction extends DualFloatFunction { super(a,b); } + @Override protected String name() { return "div"; } + @Override protected float func(int doc, DocValues aVals, DocValues bVals) { return aVals.floatVal(doc) / bVals.floatVal(doc); } diff --git a/solr/src/java/org/apache/solr/search/function/DocFreqValueSource.java b/solr/src/java/org/apache/solr/search/function/DocFreqValueSource.java index 641f2a9aaa8..fff0219ffc0 100755 --- a/solr/src/java/org/apache/solr/search/function/DocFreqValueSource.java +++ b/solr/src/java/org/apache/solr/search/function/DocFreqValueSource.java @@ -45,21 +45,27 @@ class ConstIntDocValues extends DocValues { this.parent = parent; } + @Override public float floatVal(int doc) { return fval; } + @Override public int intVal(int doc) { return ival; } + @Override public long longVal(int doc) { return lval; } + @Override public double doubleVal(int doc) { return dval; } + @Override public String strVal(int doc) { return sval; } + @Override public String toString(int doc) { return parent.description() + '=' + sval; } @@ -82,21 +88,27 @@ class ConstDoubleDocValues extends DocValues { this.parent = parent; } + @Override public float floatVal(int doc) { return fval; } + @Override public int intVal(int doc) { return ival; } + @Override public long longVal(int doc) { return lval; } + @Override public double doubleVal(int doc) { return dval; } + @Override public String strVal(int doc) { return sval; } + @Override public String toString(int doc) { return parent.description() + '=' + sval; } @@ -250,10 +262,12 @@ public class DocFreqValueSource extends ValueSource { context.put("searcher",searcher); } + @Override public int hashCode() { return getClass().hashCode() + indexedField.hashCode()*29 + indexedBytes.hashCode(); } + @Override public boolean equals(Object o) { if (this.getClass() != o.getClass()) return false; DocFreqValueSource other = (DocFreqValueSource)o; diff --git a/solr/src/java/org/apache/solr/search/function/DoubleConstValueSource.java b/solr/src/java/org/apache/solr/search/function/DoubleConstValueSource.java index 9df2d685f2d..7c8632dacca 100755 --- a/solr/src/java/org/apache/solr/search/function/DoubleConstValueSource.java +++ b/solr/src/java/org/apache/solr/search/function/DoubleConstValueSource.java @@ -33,43 +33,53 @@ public class DoubleConstValueSource extends ConstNumberSource { this.lv = (long)constant; } + @Override public String description() { return "const(" + constant + ")"; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { return new DocValues() { + @Override public float floatVal(int doc) { return fv; } + @Override public int intVal(int doc) { return (int) lv; } + @Override public long longVal(int doc) { return lv; } + @Override public double doubleVal(int doc) { return constant; } + @Override public String strVal(int doc) { return Double.toString(constant); } + @Override public String toString(int doc) { return description(); } }; } + @Override public int hashCode() { long bits = Double.doubleToRawLongBits(constant); return (int)(bits ^ (bits >>> 32)); } + @Override public boolean equals(Object o) { if (!(o instanceof DoubleConstValueSource)) return false; DoubleConstValueSource other = (DoubleConstValueSource) o; diff --git a/solr/src/java/org/apache/solr/search/function/DoubleFieldSource.java b/solr/src/java/org/apache/solr/search/function/DoubleFieldSource.java index 02017aee0a6..a7bfe94dfd5 100644 --- a/solr/src/java/org/apache/solr/search/function/DoubleFieldSource.java +++ b/solr/src/java/org/apache/solr/search/function/DoubleFieldSource.java @@ -42,36 +42,44 @@ public class DoubleFieldSource extends NumericFieldCacheSource { super(creator); } + @Override public String description() { return "double(" + field + ')'; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final DoubleValues vals = cache.getDoubles(readerContext.reader, field, creator); final double[] arr = vals.values; final Bits valid = vals.valid; return new DocValues() { + @Override public float floatVal(int doc) { return (float) arr[doc]; } + @Override public int intVal(int doc) { return (int) arr[doc]; } + @Override public long longVal(int doc) { return (long) arr[doc]; } + @Override public double doubleVal(int doc) { return arr[doc]; } + @Override public String strVal(int doc) { return Double.toString(arr[doc]); } + @Override public String toString(int doc) { return description() + '=' + doubleVal(doc); } diff --git a/solr/src/java/org/apache/solr/search/function/DualFloatFunction.java b/solr/src/java/org/apache/solr/search/function/DualFloatFunction.java index 9eaec662091..b7a6d689960 100755 --- a/solr/src/java/org/apache/solr/search/function/DualFloatFunction.java +++ b/solr/src/java/org/apache/solr/search/function/DualFloatFunction.java @@ -39,29 +39,37 @@ public abstract class DualFloatFunction extends ValueSource { protected abstract String name(); protected abstract float func(int doc, DocValues aVals, DocValues bVals); + @Override public String description() { return name() + "(" + a.description() + "," + b.description() + ")"; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final DocValues aVals = a.getValues(context, readerContext); final DocValues bVals = b.getValues(context, readerContext); return new DocValues() { + @Override public float floatVal(int doc) { return func(doc, aVals, bVals); } + @Override public int intVal(int doc) { return (int)floatVal(doc); } + @Override public long longVal(int doc) { return (long)floatVal(doc); } + @Override public double doubleVal(int doc) { return floatVal(doc); } + @Override public String strVal(int doc) { return Float.toString(floatVal(doc)); } + @Override public String toString(int doc) { return name() + '(' + aVals.toString(doc) + ',' + bVals.toString(doc) + ')'; } @@ -74,6 +82,7 @@ public abstract class DualFloatFunction extends ValueSource { b.createWeight(context,searcher); } + @Override public int hashCode() { int h = a.hashCode(); h ^= (h << 13) | (h >>> 20); @@ -83,6 +92,7 @@ public abstract class DualFloatFunction extends ValueSource { return h; } + @Override public boolean equals(Object o) { if (this.getClass() != o.getClass()) return false; DualFloatFunction other = (DualFloatFunction)o; diff --git a/solr/src/java/org/apache/solr/search/function/FieldCacheSource.java b/solr/src/java/org/apache/solr/search/function/FieldCacheSource.java index f8f3531b73a..df80cecae03 100644 --- a/solr/src/java/org/apache/solr/search/function/FieldCacheSource.java +++ b/solr/src/java/org/apache/solr/search/function/FieldCacheSource.java @@ -37,10 +37,12 @@ public abstract class FieldCacheSource extends ValueSource { return cache; } + @Override public String description() { return field; } + @Override public boolean equals(Object o) { if (!(o instanceof FieldCacheSource)) return false; FieldCacheSource other = (FieldCacheSource)o; @@ -48,6 +50,7 @@ public abstract class FieldCacheSource extends ValueSource { && this.cache == other.cache; } + @Override public int hashCode() { return cache.hashCode() + field.hashCode(); }; diff --git a/solr/src/java/org/apache/solr/search/function/FileFloatSource.java b/solr/src/java/org/apache/solr/search/function/FileFloatSource.java index 817062c770d..7294ff8c744 100755 --- a/solr/src/java/org/apache/solr/search/function/FileFloatSource.java +++ b/solr/src/java/org/apache/solr/search/function/FileFloatSource.java @@ -53,10 +53,12 @@ public class FileFloatSource extends ValueSource { this.dataDir = parser.getReq().getCore().getDataDir(); } + @Override public String description() { return "float(" + field + ')'; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { int offset = 0; ReaderContext topLevelContext = ReaderUtil.getTopLevelContext(readerContext); @@ -64,32 +66,39 @@ public class FileFloatSource extends ValueSource { final float[] arr = getCachedFloats(topLevelContext.reader); return new DocValues() { + @Override public float floatVal(int doc) { return arr[doc + off]; } + @Override public int intVal(int doc) { return (int)arr[doc + off]; } + @Override public long longVal(int doc) { return (long)arr[doc + off]; } + @Override public double doubleVal(int doc) { return (double)arr[doc + off]; } + @Override public String strVal(int doc) { return Float.toString(arr[doc + off]); } + @Override public String toString(int doc) { return description() + '=' + floatVal(doc); } }; } + @Override public boolean equals(Object o) { if (o.getClass() != FileFloatSource.class) return false; FileFloatSource other = (FileFloatSource)o; @@ -99,10 +108,12 @@ public class FileFloatSource extends ValueSource { && this.dataDir.equals(other.dataDir); } + @Override public int hashCode() { return FileFloatSource.class.hashCode() + field.getName().hashCode(); }; + @Override public String toString() { return "FileFloatSource(field="+field.getName()+",keyField="+keyField.getName() + ",defVal="+defVal+",dataDir="+dataDir+")"; @@ -114,6 +125,7 @@ public class FileFloatSource extends ValueSource { } static Cache floatCache = new Cache() { + @Override protected Object createValue(IndexReader reader, Object key) { return getFloats(((Entry)key).ffs, reader); } @@ -173,12 +185,14 @@ public class FileFloatSource extends ValueSource { this.ffs = ffs; } + @Override public boolean equals(Object o) { if (!(o instanceof Entry)) return false; Entry other = (Entry)o; return ffs.equals(other.ffs); } + @Override public int hashCode() { return ffs.hashCode(); } diff --git a/solr/src/java/org/apache/solr/search/function/FloatFieldSource.java b/solr/src/java/org/apache/solr/search/function/FloatFieldSource.java index a985b49fe7c..ed53627f1a3 100644 --- a/solr/src/java/org/apache/solr/search/function/FloatFieldSource.java +++ b/solr/src/java/org/apache/solr/search/function/FloatFieldSource.java @@ -41,36 +41,44 @@ public class FloatFieldSource extends NumericFieldCacheSource { super(creator); } + @Override public String description() { return "float(" + field + ')'; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final FloatValues vals = cache.getFloats(readerContext.reader, field, creator); final float[] arr = vals.values; final Bits valid = vals.valid; return new DocValues() { + @Override public float floatVal(int doc) { return arr[doc]; } + @Override public int intVal(int doc) { return (int)arr[doc]; } + @Override public long longVal(int doc) { return (long)arr[doc]; } + @Override public double doubleVal(int doc) { return (double)arr[doc]; } + @Override public String strVal(int doc) { return Float.toString(arr[doc]); } + @Override public String toString(int doc) { return description() + '=' + floatVal(doc); } diff --git a/solr/src/java/org/apache/solr/search/function/IntFieldSource.java b/solr/src/java/org/apache/solr/search/function/IntFieldSource.java index 0cee5e769cf..7da8ffb573c 100644 --- a/solr/src/java/org/apache/solr/search/function/IntFieldSource.java +++ b/solr/src/java/org/apache/solr/search/function/IntFieldSource.java @@ -41,11 +41,13 @@ public class IntFieldSource extends NumericFieldCacheSource { super(creator); } + @Override public String description() { return "int(" + field + ')'; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final IntValues vals = cache.getInts(readerContext.reader, field, creator); final int[] arr = vals.values; @@ -54,26 +56,32 @@ public class IntFieldSource extends NumericFieldCacheSource { return new DocValues() { final MutableValueInt val = new MutableValueInt(); + @Override public float floatVal(int doc) { return (float)arr[doc]; } + @Override public int intVal(int doc) { return arr[doc]; } + @Override public long longVal(int doc) { return (long)arr[doc]; } + @Override public double doubleVal(int doc) { return (double)arr[doc]; } + @Override public String strVal(int doc) { return Float.toString(arr[doc]); } + @Override public String toString(int doc) { return description() + '=' + intVal(doc); } diff --git a/solr/src/java/org/apache/solr/search/function/JoinDocFreqValueSource.java b/solr/src/java/org/apache/solr/search/function/JoinDocFreqValueSource.java index dae45789fca..a7d5f1f6bb0 100644 --- a/solr/src/java/org/apache/solr/search/function/JoinDocFreqValueSource.java +++ b/solr/src/java/org/apache/solr/search/function/JoinDocFreqValueSource.java @@ -43,10 +43,12 @@ public class JoinDocFreqValueSource extends FieldCacheSource { this.qfield = qfield; } + @Override public String description() { return NAME + "(" + field +":("+qfield+"))"; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final DocTerms terms = cache.getTerms(readerContext.reader, field, true ); @@ -55,6 +57,7 @@ public class JoinDocFreqValueSource extends FieldCacheSource { return new DocValues() { BytesRef ref = new BytesRef(); + @Override public int intVal(int doc) { try { @@ -68,28 +71,34 @@ public class JoinDocFreqValueSource extends FieldCacheSource { } } + @Override public float floatVal(int doc) { return (float)intVal(doc); } + @Override public long longVal(int doc) { return (long)intVal(doc); } + @Override public double doubleVal(int doc) { return (double)intVal(doc); } + @Override public String strVal(int doc) { return intVal(doc) + ""; } + @Override public String toString(int doc) { return description() + '=' + intVal(doc); } }; } + @Override public boolean equals(Object o) { if (o.getClass() != JoinDocFreqValueSource.class) return false; JoinDocFreqValueSource other = (JoinDocFreqValueSource)o; @@ -97,6 +106,7 @@ public class JoinDocFreqValueSource extends FieldCacheSource { return super.equals(other); } + @Override public int hashCode() { return qfield.hashCode() + super.hashCode(); }; diff --git a/solr/src/java/org/apache/solr/search/function/LinearFloatFunction.java b/solr/src/java/org/apache/solr/search/function/LinearFloatFunction.java index 7ceb07e2bf9..4fb7a5b5609 100644 --- a/solr/src/java/org/apache/solr/search/function/LinearFloatFunction.java +++ b/solr/src/java/org/apache/solr/search/function/LinearFloatFunction.java @@ -42,28 +42,36 @@ public class LinearFloatFunction extends ValueSource { this.intercept = intercept; } + @Override public String description() { return slope + "*float(" + source.description() + ")+" + intercept; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final DocValues vals = source.getValues(context, readerContext); return new DocValues() { + @Override public float floatVal(int doc) { return vals.floatVal(doc) * slope + intercept; } + @Override public int intVal(int doc) { return (int)floatVal(doc); } + @Override public long longVal(int doc) { return (long)floatVal(doc); } + @Override public double doubleVal(int doc) { return (double)floatVal(doc); } + @Override public String strVal(int doc) { return Float.toString(floatVal(doc)); } + @Override public String toString(int doc) { return slope + "*float(" + vals.toString(doc) + ")+" + intercept; } @@ -75,6 +83,7 @@ public class LinearFloatFunction extends ValueSource { source.createWeight(context, searcher); } + @Override public int hashCode() { int h = Float.floatToIntBits(slope); h = (h >>> 2) | (h << 30); @@ -83,6 +92,7 @@ public class LinearFloatFunction extends ValueSource { return h + source.hashCode(); } + @Override public boolean equals(Object o) { if (LinearFloatFunction.class != o.getClass()) return false; LinearFloatFunction other = (LinearFloatFunction)o; diff --git a/solr/src/java/org/apache/solr/search/function/LongFieldSource.java b/solr/src/java/org/apache/solr/search/function/LongFieldSource.java index dfa92b7b2c1..ffc58a6cdd1 100644 --- a/solr/src/java/org/apache/solr/search/function/LongFieldSource.java +++ b/solr/src/java/org/apache/solr/search/function/LongFieldSource.java @@ -43,6 +43,7 @@ public class LongFieldSource extends NumericFieldCacheSource { super(creator); } + @Override public String description() { return "long(" + field + ')'; } @@ -51,32 +52,39 @@ public class LongFieldSource extends NumericFieldCacheSource { return Long.parseLong(extVal); } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final LongValues vals = cache.getLongs(readerContext.reader, field, creator); final long[] arr = vals.values; final Bits valid = vals.valid; return new DocValues() { + @Override public float floatVal(int doc) { return (float) arr[doc]; } + @Override public int intVal(int doc) { return (int) arr[doc]; } + @Override public long longVal(int doc) { return arr[doc]; } + @Override public double doubleVal(int doc) { return arr[doc]; } + @Override public String strVal(int doc) { return Long.toString(arr[doc]); } + @Override public String toString(int doc) { return description() + '=' + longVal(doc); } diff --git a/solr/src/java/org/apache/solr/search/function/MaxFloatFunction.java b/solr/src/java/org/apache/solr/search/function/MaxFloatFunction.java index 37af4d94809..f7678f3b53a 100644 --- a/solr/src/java/org/apache/solr/search/function/MaxFloatFunction.java +++ b/solr/src/java/org/apache/solr/search/function/MaxFloatFunction.java @@ -41,29 +41,37 @@ public class MaxFloatFunction extends ValueSource { this.fval = fval; } + @Override public String description() { return "max(" + source.description() + "," + fval + ")"; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final DocValues vals = source.getValues(context, readerContext); return new DocValues() { + @Override public float floatVal(int doc) { float v = vals.floatVal(doc); return v < fval ? fval : v; } + @Override public int intVal(int doc) { return (int)floatVal(doc); } + @Override public long longVal(int doc) { return (long)floatVal(doc); } + @Override public double doubleVal(int doc) { return (double)floatVal(doc); } + @Override public String strVal(int doc) { return Float.toString(floatVal(doc)); } + @Override public String toString(int doc) { return "max(" + vals.toString(doc) + "," + fval + ")"; } @@ -75,12 +83,14 @@ public class MaxFloatFunction extends ValueSource { source.createWeight(context, searcher); } + @Override public int hashCode() { int h = Float.floatToIntBits(fval); h = (h >>> 2) | (h << 30); return h + source.hashCode(); } + @Override public boolean equals(Object o) { if (MaxFloatFunction.class != o.getClass()) return false; MaxFloatFunction other = (MaxFloatFunction)o; diff --git a/solr/src/java/org/apache/solr/search/function/MultiFloatFunction.java b/solr/src/java/org/apache/solr/search/function/MultiFloatFunction.java index f66bdfe7502..4e28799800e 100644 --- a/solr/src/java/org/apache/solr/search/function/MultiFloatFunction.java +++ b/solr/src/java/org/apache/solr/search/function/MultiFloatFunction.java @@ -38,6 +38,7 @@ public abstract class MultiFloatFunction extends ValueSource { abstract protected String name(); abstract protected float func(int doc, DocValues[] valsArr); + @Override public String description() { StringBuilder sb = new StringBuilder(); sb.append(name()).append('('); @@ -54,6 +55,7 @@ public abstract class MultiFloatFunction extends ValueSource { return sb.toString(); } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final DocValues[] valsArr = new DocValues[sources.length]; for (int i=0; i extends Fie this.creator = creator; } + @Override public final boolean equals(Object o) { if (o.getClass() != this.getClass()) return false; NumericFieldCacheSource other = (NumericFieldCacheSource) o; @@ -40,6 +41,7 @@ public abstract class NumericFieldCacheSource extends Fie this.creator.getClass() == other.creator.getClass(); } + @Override public final int hashCode() { int h = creator == null ? this.getClass().hashCode() : creator.getClass().hashCode(); h += super.hashCode(); diff --git a/solr/src/java/org/apache/solr/search/function/OrdFieldSource.java b/solr/src/java/org/apache/solr/search/function/OrdFieldSource.java index 7c249571c48..09435accaa2 100644 --- a/solr/src/java/org/apache/solr/search/function/OrdFieldSource.java +++ b/solr/src/java/org/apache/solr/search/function/OrdFieldSource.java @@ -52,11 +52,13 @@ public class OrdFieldSource extends ValueSource { this.field = field; } + @Override public String description() { return "ord(" + field + ')'; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final int off = readerContext.docBase; final IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader; @@ -66,35 +68,43 @@ public class OrdFieldSource extends ValueSource { return readableValue; } + @Override public float floatVal(int doc) { return (float)sindex.getOrd(doc+off); } + @Override public int intVal(int doc) { return sindex.getOrd(doc+off); } + @Override public long longVal(int doc) { return (long)sindex.getOrd(doc+off); } + @Override public double doubleVal(int doc) { return (double)sindex.getOrd(doc+off); } + @Override public int ordVal(int doc) { return sindex.getOrd(doc+off); } + @Override public int numOrd() { return sindex.numOrd(); } + @Override public String strVal(int doc) { // the string value of the ordinal, not the string itself return Integer.toString(sindex.getOrd(doc+off)); } + @Override public String toString(int doc) { return description() + '=' + intVal(doc); } @@ -119,11 +129,13 @@ public class OrdFieldSource extends ValueSource { }; } + @Override public boolean equals(Object o) { return o.getClass() == OrdFieldSource.class && this.field.equals(((OrdFieldSource)o).field); } private static final int hcode = OrdFieldSource.class.hashCode(); + @Override public int hashCode() { return hcode + field.hashCode(); }; diff --git a/solr/src/java/org/apache/solr/search/function/PowFloatFunction.java b/solr/src/java/org/apache/solr/search/function/PowFloatFunction.java index c085e11b641..f5f7f68d930 100755 --- a/solr/src/java/org/apache/solr/search/function/PowFloatFunction.java +++ b/solr/src/java/org/apache/solr/search/function/PowFloatFunction.java @@ -28,10 +28,12 @@ public class PowFloatFunction extends DualFloatFunction { super(a,b); } + @Override protected String name() { return "pow"; } + @Override protected float func(int doc, DocValues aVals, DocValues bVals) { return (float)Math.pow(aVals.floatVal(doc), bVals.floatVal(doc)); } diff --git a/solr/src/java/org/apache/solr/search/function/ProductFloatFunction.java b/solr/src/java/org/apache/solr/search/function/ProductFloatFunction.java index 99f668e7e97..fa74cfc50ef 100755 --- a/solr/src/java/org/apache/solr/search/function/ProductFloatFunction.java +++ b/solr/src/java/org/apache/solr/search/function/ProductFloatFunction.java @@ -25,10 +25,12 @@ public class ProductFloatFunction extends MultiFloatFunction { super(sources); } + @Override protected String name() { return "product"; } + @Override protected float func(int doc, DocValues[] valsArr) { float val = 1.0f; for (DocValues vals : valsArr) { diff --git a/solr/src/java/org/apache/solr/search/function/QueryValueSource.java b/solr/src/java/org/apache/solr/search/function/QueryValueSource.java index 8c52d01f029..b7d13efb351 100755 --- a/solr/src/java/org/apache/solr/search/function/QueryValueSource.java +++ b/solr/src/java/org/apache/solr/search/function/QueryValueSource.java @@ -42,6 +42,7 @@ public class QueryValueSource extends ValueSource { public Query getQuery() { return q; } public float getDefaultValue() { return defVal; } + @Override public String description() { return "query(" + q + ",def=" + defVal + ")"; } @@ -51,10 +52,12 @@ public class QueryValueSource extends ValueSource { return new QueryDocValues(readerContext, q, defVal, fcontext); } + @Override public int hashCode() { return q.hashCode() * 29; } + @Override public boolean equals(Object o) { if (QueryValueSource.class != o.getClass()) return false; QueryValueSource other = (QueryValueSource)o; @@ -109,6 +112,7 @@ class QueryDocValues extends DocValues { weight = w; } + @Override public float floatVal(int doc) { try { if (doc < lastDocRequested) { @@ -135,18 +139,23 @@ class QueryDocValues extends DocValues { } } + @Override public int intVal(int doc) { return (int)floatVal(doc); } + @Override public long longVal(int doc) { return (long)floatVal(doc); } + @Override public double doubleVal(int doc) { return (double)floatVal(doc); } + @Override public String strVal(int doc) { return Float.toString(floatVal(doc)); } + @Override public String toString(int doc) { return "query(" + q + ",def=" + defVal + ")=" + floatVal(doc); } diff --git a/solr/src/java/org/apache/solr/search/function/RangeMapFloatFunction.java b/solr/src/java/org/apache/solr/search/function/RangeMapFloatFunction.java index 83bedeab8af..1812f0cfccd 100755 --- a/solr/src/java/org/apache/solr/search/function/RangeMapFloatFunction.java +++ b/solr/src/java/org/apache/solr/search/function/RangeMapFloatFunction.java @@ -46,29 +46,37 @@ public class RangeMapFloatFunction extends ValueSource { this.defaultVal = def; } + @Override public String description() { return "map(" + source.description() + "," + min + "," + max + "," + target + ")"; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final DocValues vals = source.getValues(context, readerContext); return new DocValues() { + @Override public float floatVal(int doc) { float val = vals.floatVal(doc); return (val>=min && val<=max) ? target : (defaultVal == null ? val : defaultVal); } + @Override public int intVal(int doc) { return (int)floatVal(doc); } + @Override public long longVal(int doc) { return (long)floatVal(doc); } + @Override public double doubleVal(int doc) { return (double)floatVal(doc); } + @Override public String strVal(int doc) { return Float.toString(floatVal(doc)); } + @Override public String toString(int doc) { return "map(" + vals.toString(doc) + ",min=" + min + ",max=" + max + ",target=" + target + ")"; } @@ -80,6 +88,7 @@ public class RangeMapFloatFunction extends ValueSource { source.createWeight(context, searcher); } + @Override public int hashCode() { int h = source.hashCode(); h ^= (h << 10) | (h >>> 23); @@ -93,6 +102,7 @@ public class RangeMapFloatFunction extends ValueSource { return h; } + @Override public boolean equals(Object o) { if (RangeMapFloatFunction.class != o.getClass()) return false; RangeMapFloatFunction other = (RangeMapFloatFunction)o; diff --git a/solr/src/java/org/apache/solr/search/function/ReciprocalFloatFunction.java b/solr/src/java/org/apache/solr/search/function/ReciprocalFloatFunction.java index 99627759db8..3b672a2245f 100644 --- a/solr/src/java/org/apache/solr/search/function/ReciprocalFloatFunction.java +++ b/solr/src/java/org/apache/solr/search/function/ReciprocalFloatFunction.java @@ -57,24 +57,31 @@ public class ReciprocalFloatFunction extends ValueSource { this.b=b; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final DocValues vals = source.getValues(context, readerContext); return new DocValues() { + @Override public float floatVal(int doc) { return a/(m*vals.floatVal(doc) + b); } + @Override public int intVal(int doc) { return (int)floatVal(doc); } + @Override public long longVal(int doc) { return (long)floatVal(doc); } + @Override public double doubleVal(int doc) { return (double)floatVal(doc); } + @Override public String strVal(int doc) { return Float.toString(floatVal(doc)); } + @Override public String toString(int doc) { return Float.toString(a) + "/(" + m + "*float(" + vals.toString(doc) + ')' @@ -88,18 +95,21 @@ public class ReciprocalFloatFunction extends ValueSource { source.createWeight(context, searcher); } + @Override public String description() { return Float.toString(a) + "/(" + m + "*float(" + source.description() + ")" + "+" + b + ')'; } + @Override public int hashCode() { int h = Float.floatToIntBits(a) + Float.floatToIntBits(m); h ^= (h << 13) | (h >>> 20); return h + (Float.floatToIntBits(b)) + source.hashCode(); } + @Override public boolean equals(Object o) { if (ReciprocalFloatFunction.class != o.getClass()) return false; ReciprocalFloatFunction other = (ReciprocalFloatFunction)o; diff --git a/solr/src/java/org/apache/solr/search/function/ReverseOrdFieldSource.java b/solr/src/java/org/apache/solr/search/function/ReverseOrdFieldSource.java index 639bdbba72b..e08e6a819f5 100644 --- a/solr/src/java/org/apache/solr/search/function/ReverseOrdFieldSource.java +++ b/solr/src/java/org/apache/solr/search/function/ReverseOrdFieldSource.java @@ -53,10 +53,12 @@ public class ReverseOrdFieldSource extends ValueSource { this.field = field; } + @Override public String description() { return "rord("+field+')'; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader; final int off = readerContext.docBase; @@ -65,41 +67,50 @@ public class ReverseOrdFieldSource extends ValueSource { final int end = sindex.numOrd(); return new DocValues() { + @Override public float floatVal(int doc) { return (float)(end - sindex.getOrd(doc+off)); } + @Override public int intVal(int doc) { return (end - sindex.getOrd(doc+off)); } + @Override public long longVal(int doc) { return (long)(end - sindex.getOrd(doc+off)); } + @Override public int ordVal(int doc) { return (end - sindex.getOrd(doc+off)); } + @Override public int numOrd() { return end; } + @Override public double doubleVal(int doc) { return (double)(end - sindex.getOrd(doc+off)); } + @Override public String strVal(int doc) { // the string value of the ordinal, not the string itself return Integer.toString((end - sindex.getOrd(doc+off))); } + @Override public String toString(int doc) { return description() + '=' + strVal(doc); } }; } + @Override public boolean equals(Object o) { if (o.getClass() != ReverseOrdFieldSource.class) return false; ReverseOrdFieldSource other = (ReverseOrdFieldSource)o; @@ -107,6 +118,7 @@ public class ReverseOrdFieldSource extends ValueSource { } private static final int hcode = ReverseOrdFieldSource.class.hashCode(); + @Override public int hashCode() { return hcode + field.hashCode(); }; diff --git a/solr/src/java/org/apache/solr/search/function/ScaleFloatFunction.java b/solr/src/java/org/apache/solr/search/function/ScaleFloatFunction.java index 40e3192325c..d277adfb327 100755 --- a/solr/src/java/org/apache/solr/search/function/ScaleFloatFunction.java +++ b/solr/src/java/org/apache/solr/search/function/ScaleFloatFunction.java @@ -46,6 +46,7 @@ public class ScaleFloatFunction extends ValueSource { this.max = max; } + @Override public String description() { return "scale(" + source.description() + "," + min + "," + max + ")"; } @@ -93,6 +94,7 @@ public class ScaleFloatFunction extends ValueSource { return scaleInfo; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { ScaleInfo scaleInfo = (ScaleInfo)context.get(source); @@ -107,21 +109,27 @@ public class ScaleFloatFunction extends ValueSource { final DocValues vals = source.getValues(context, readerContext); return new DocValues() { + @Override public float floatVal(int doc) { return (vals.floatVal(doc) - minSource) * scale + min; } + @Override public int intVal(int doc) { return (int)floatVal(doc); } + @Override public long longVal(int doc) { return (long)floatVal(doc); } + @Override public double doubleVal(int doc) { return (double)floatVal(doc); } + @Override public String strVal(int doc) { return Float.toString(floatVal(doc)); } + @Override public String toString(int doc) { return "scale(" + vals.toString(doc) + ",toMin=" + min + ",toMax=" + max + ",fromMin=" + minSource @@ -136,6 +144,7 @@ public class ScaleFloatFunction extends ValueSource { source.createWeight(context, searcher); } + @Override public int hashCode() { int h = Float.floatToIntBits(min); h = h*29; @@ -145,6 +154,7 @@ public class ScaleFloatFunction extends ValueSource { return h; } + @Override public boolean equals(Object o) { if (ScaleFloatFunction.class != o.getClass()) return false; ScaleFloatFunction other = (ScaleFloatFunction)o; diff --git a/solr/src/java/org/apache/solr/search/function/ShortFieldSource.java b/solr/src/java/org/apache/solr/search/function/ShortFieldSource.java index fb4e41b5562..701339c0163 100644 --- a/solr/src/java/org/apache/solr/search/function/ShortFieldSource.java +++ b/solr/src/java/org/apache/solr/search/function/ShortFieldSource.java @@ -35,10 +35,12 @@ public class ShortFieldSource extends NumericFieldCacheSource { } + @Override public String description() { return "short(" + field + ')'; } + @Override public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final ShortValues vals = cache.getShorts(readerContext.reader, field, creator); final short[] arr = vals.values; @@ -54,26 +56,32 @@ public class ShortFieldSource extends NumericFieldCacheSource { return arr[doc]; } + @Override public float floatVal(int doc) { return (float) arr[doc]; } + @Override public int intVal(int doc) { return (int) arr[doc]; } + @Override public long longVal(int doc) { return (long) arr[doc]; } + @Override public double doubleVal(int doc) { return (double) arr[doc]; } + @Override public String strVal(int doc) { return Short.toString(arr[doc]); } + @Override public String toString(int doc) { return description() + '=' + shortVal(doc); } diff --git a/solr/src/java/org/apache/solr/search/function/SimpleFloatFunction.java b/solr/src/java/org/apache/solr/search/function/SimpleFloatFunction.java index e4079ac2dbd..355e161c4ad 100755 --- a/solr/src/java/org/apache/solr/search/function/SimpleFloatFunction.java +++ b/solr/src/java/org/apache/solr/search/function/SimpleFloatFunction.java @@ -35,21 +35,27 @@ import java.util.Map; public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final DocValues vals = source.getValues(context, readerContext); return new DocValues() { + @Override public float floatVal(int doc) { return func(doc, vals); } + @Override public int intVal(int doc) { return (int)floatVal(doc); } + @Override public long longVal(int doc) { return (long)floatVal(doc); } + @Override public double doubleVal(int doc) { return (double)floatVal(doc); } + @Override public String strVal(int doc) { return Float.toString(floatVal(doc)); } + @Override public String toString(int doc) { return name() + '(' + vals.toString(doc) + ')'; } diff --git a/solr/src/java/org/apache/solr/search/function/SingleFunction.java b/solr/src/java/org/apache/solr/search/function/SingleFunction.java index e040dc5b250..05bba175858 100755 --- a/solr/src/java/org/apache/solr/search/function/SingleFunction.java +++ b/solr/src/java/org/apache/solr/search/function/SingleFunction.java @@ -33,14 +33,17 @@ import java.util.Map; protected abstract String name(); + @Override public String description() { return name() + '(' + source.description() + ')'; } + @Override public int hashCode() { return source.hashCode() + name().hashCode(); } + @Override public boolean equals(Object o) { if (this.getClass() != o.getClass()) return false; SingleFunction other = (SingleFunction)o; diff --git a/solr/src/java/org/apache/solr/search/function/StringIndexDocValues.java b/solr/src/java/org/apache/solr/search/function/StringIndexDocValues.java index f533a7319fc..55891ca36fd 100755 --- a/solr/src/java/org/apache/solr/search/function/StringIndexDocValues.java +++ b/solr/src/java/org/apache/solr/search/function/StringIndexDocValues.java @@ -89,6 +89,7 @@ public abstract class StringIndexDocValues extends DocValues { }; } + @Override public String toString(int doc) { return vs.description() + '=' + strVal(doc); } diff --git a/solr/src/java/org/apache/solr/search/function/SumFloatFunction.java b/solr/src/java/org/apache/solr/search/function/SumFloatFunction.java index a0a6c17a68a..bd838c845ef 100755 --- a/solr/src/java/org/apache/solr/search/function/SumFloatFunction.java +++ b/solr/src/java/org/apache/solr/search/function/SumFloatFunction.java @@ -30,6 +30,7 @@ public class SumFloatFunction extends MultiFloatFunction { return "sum"; } + @Override protected float func(int doc, DocValues[] valsArr) { float val = 0.0f; for (DocValues vals : valsArr) { diff --git a/solr/src/java/org/apache/solr/search/function/ValueSource.java b/solr/src/java/org/apache/solr/search/function/ValueSource.java index 7674b803089..ffcdac97a7d 100644 --- a/solr/src/java/org/apache/solr/search/function/ValueSource.java +++ b/solr/src/java/org/apache/solr/search/function/ValueSource.java @@ -49,8 +49,10 @@ public abstract class ValueSource implements Serializable { */ public abstract DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException; + @Override public abstract boolean equals(Object o); + @Override public abstract int hashCode(); /** @@ -58,6 +60,7 @@ public abstract class ValueSource implements Serializable { */ public abstract String description(); + @Override public String toString() { return description(); } @@ -127,6 +130,7 @@ public abstract class ValueSource implements Serializable { this.context = context; } + @Override public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException { return new ValueSourceComparator(context, numHits); @@ -149,6 +153,7 @@ public abstract class ValueSource implements Serializable { values = new double[numHits]; } + @Override public int compare(int slot1, int slot2) { final double v1 = values[slot1]; final double v2 = values[slot2]; @@ -162,6 +167,7 @@ public abstract class ValueSource implements Serializable { } + @Override public int compareBottom(int doc) { final double v2 = docVals.doubleVal(doc); if (bottom > v2) { @@ -173,19 +179,23 @@ public abstract class ValueSource implements Serializable { } } + @Override public void copy(int slot, int doc) { values[slot] = docVals.doubleVal(doc); } + @Override public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { docVals = getValues(fcontext, context); return this; } + @Override public void setBottom(final int bottom) { this.bottom = values[bottom]; } + @Override public Comparable value(int slot) { return values[slot]; } diff --git a/solr/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java b/solr/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java index 151b3878111..d17d7d1db28 100755 --- a/solr/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java +++ b/solr/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java @@ -49,9 +49,11 @@ public class ValueSourceRangeFilter extends SolrFilter { this.includeUpper = upperVal != null && includeUpper; } + @Override public DocIdSet getDocIdSet(final Map context, final AtomicReaderContext readerContext) throws IOException { return new DocIdSet() { - public DocIdSetIterator iterator() throws IOException { + @Override + public DocIdSetIterator iterator() throws IOException { return valueSource.getValues(context, readerContext).getRangeScorer(readerContext.reader, lowerVal, upperVal, includeLower, includeUpper); } }; @@ -62,6 +64,7 @@ public class ValueSourceRangeFilter extends SolrFilter { valueSource.createWeight(context, searcher); } + @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("frange("); @@ -75,6 +78,7 @@ public class ValueSourceRangeFilter extends SolrFilter { return sb.toString(); } + @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof ValueSourceRangeFilter)) return false; @@ -89,6 +93,7 @@ public class ValueSourceRangeFilter extends SolrFilter { return true; } + @Override public int hashCode() { int h = valueSource.hashCode(); h += lowerVal != null ? lowerVal.hashCode() : 0x572353db; diff --git a/solr/src/java/org/apache/solr/search/function/VectorValueSource.java b/solr/src/java/org/apache/solr/search/function/VectorValueSource.java index 523e54670a3..4c6aa8a347f 100644 --- a/solr/src/java/org/apache/solr/search/function/VectorValueSource.java +++ b/solr/src/java/org/apache/solr/search/function/VectorValueSource.java @@ -44,6 +44,7 @@ public class VectorValueSource extends MultiValueSource { return sources; } + @Override public int dimension() { return sources.size(); } @@ -97,6 +98,7 @@ public class VectorValueSource extends MultiValueSource { vals[0] = x.strVal(doc); vals[1] = y.strVal(doc); } + @Override public String toString(int doc) { return name() + "(" + x.toString(doc) + "," + y.toString(doc) + ")"; } @@ -178,12 +180,14 @@ public class VectorValueSource extends MultiValueSource { }; } + @Override public void createWeight(Map context, IndexSearcher searcher) throws IOException { for (ValueSource source : sources) source.createWeight(context, searcher); } + @Override public String description() { StringBuilder sb = new StringBuilder(); sb.append(name()).append('('); diff --git a/solr/src/java/org/apache/solr/search/function/distance/GeohashHaversineFunction.java b/solr/src/java/org/apache/solr/search/function/distance/GeohashHaversineFunction.java index 728b528e578..6a94d2f24f9 100644 --- a/solr/src/java/org/apache/solr/search/function/distance/GeohashHaversineFunction.java +++ b/solr/src/java/org/apache/solr/search/function/distance/GeohashHaversineFunction.java @@ -59,22 +59,27 @@ public class GeohashHaversineFunction extends ValueSource { final DocValues gh2DV = geoHash2.getValues(context, readerContext); return new DocValues() { + @Override public float floatVal(int doc) { return (float) doubleVal(doc); } + @Override public int intVal(int doc) { return (int) doubleVal(doc); } + @Override public long longVal(int doc) { return (long) doubleVal(doc); } + @Override public double doubleVal(int doc) { return distance(doc, gh1DV, gh2DV); } + @Override public String strVal(int doc) { return Double.toString(doubleVal(doc)); } diff --git a/solr/src/java/org/apache/solr/search/function/distance/HaversineConstFunction.java b/solr/src/java/org/apache/solr/search/function/distance/HaversineConstFunction.java index 8ee45f57064..197407e970f 100755 --- a/solr/src/java/org/apache/solr/search/function/distance/HaversineConstFunction.java +++ b/solr/src/java/org/apache/solr/search/function/distance/HaversineConstFunction.java @@ -39,6 +39,7 @@ import java.util.Map; public class HaversineConstFunction extends ValueSource { public static ValueSourceParser parser = new ValueSourceParser() { + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { // TODO: dispatch through SpatialQueriable in the future? @@ -198,18 +199,22 @@ public class HaversineConstFunction extends ValueSource { final double latCenterRad_cos = this.latCenterRad_cos; return new DocValues() { + @Override public float floatVal(int doc) { return (float) doubleVal(doc); } + @Override public int intVal(int doc) { return (int) doubleVal(doc); } + @Override public long longVal(int doc) { return (long) doubleVal(doc); } + @Override public double doubleVal(int doc) { double latRad = latVals.doubleVal(doc) * DistanceUtils.DEGREES_TO_RADIANS; double lonRad = lonVals.doubleVal(doc) * DistanceUtils.DEGREES_TO_RADIANS; @@ -222,6 +227,7 @@ public class HaversineConstFunction extends ValueSource { return (EARTH_MEAN_DIAMETER * Math.atan2(Math.sqrt(h), Math.sqrt(1 - h))); } + @Override public String strVal(int doc) { return Double.toString(doubleVal(doc)); } @@ -260,6 +266,7 @@ public class HaversineConstFunction extends ValueSource { return result; } + @Override public String description() { return name() + '(' + p2 + ',' + latCenter + ',' + lonCenter + ')'; } diff --git a/solr/src/java/org/apache/solr/search/function/distance/HaversineFunction.java b/solr/src/java/org/apache/solr/search/function/distance/HaversineFunction.java index f54c3aa9ba4..734543e0198 100644 --- a/solr/src/java/org/apache/solr/search/function/distance/HaversineFunction.java +++ b/solr/src/java/org/apache/solr/search/function/distance/HaversineFunction.java @@ -100,22 +100,27 @@ public class HaversineFunction extends ValueSource { final DocValues vals2 = p2.getValues(context, readerContext); return new DocValues() { + @Override public float floatVal(int doc) { return (float) doubleVal(doc); } + @Override public int intVal(int doc) { return (int) doubleVal(doc); } + @Override public long longVal(int doc) { return (long) doubleVal(doc); } + @Override public double doubleVal(int doc) { return distance(doc, vals1, vals2); } + @Override public String strVal(int doc) { return Double.toString(doubleVal(doc)); } @@ -159,6 +164,7 @@ public class HaversineFunction extends ValueSource { return result; } + @Override public String description() { StringBuilder sb = new StringBuilder(); sb.append(name()).append('('); diff --git a/solr/src/java/org/apache/solr/search/function/distance/SquaredEuclideanFunction.java b/solr/src/java/org/apache/solr/search/function/distance/SquaredEuclideanFunction.java index 729e7de9e3c..37401b18b4e 100644 --- a/solr/src/java/org/apache/solr/search/function/distance/SquaredEuclideanFunction.java +++ b/solr/src/java/org/apache/solr/search/function/distance/SquaredEuclideanFunction.java @@ -33,6 +33,7 @@ public class SquaredEuclideanFunction extends VectorDistanceFunction { } + @Override protected String name() { return name; @@ -41,6 +42,7 @@ public class SquaredEuclideanFunction extends VectorDistanceFunction { /** * @param doc The doc to score */ + @Override protected double distance(int doc, DocValues dv1, DocValues dv2) { double[] vals1 = new double[source1.dimension()]; diff --git a/solr/src/java/org/apache/solr/search/function/distance/StringDistanceFunction.java b/solr/src/java/org/apache/solr/search/function/distance/StringDistanceFunction.java index 222ef314b7a..43d4552fc83 100644 --- a/solr/src/java/org/apache/solr/search/function/distance/StringDistanceFunction.java +++ b/solr/src/java/org/apache/solr/search/function/distance/StringDistanceFunction.java @@ -53,18 +53,22 @@ public class StringDistanceFunction extends ValueSource { final DocValues str2DV = str2.getValues(context, readerContext); return new DocValues() { + @Override public float floatVal(int doc) { return dist.getDistance(str1DV.strVal(doc), str2DV.strVal(doc)); } + @Override public int intVal(int doc) { return (int) doubleVal(doc); } + @Override public long longVal(int doc) { return (long) doubleVal(doc); } + @Override public double doubleVal(int doc) { return (double) floatVal(doc); } @@ -81,6 +85,7 @@ public class StringDistanceFunction extends ValueSource { }; } + @Override public String description() { StringBuilder sb = new StringBuilder(); sb.append("strdist").append('('); diff --git a/solr/src/java/org/apache/solr/search/function/distance/VectorDistanceFunction.java b/solr/src/java/org/apache/solr/search/function/distance/VectorDistanceFunction.java index 3b86177bc04..5a7071927d9 100644 --- a/solr/src/java/org/apache/solr/search/function/distance/VectorDistanceFunction.java +++ b/solr/src/java/org/apache/solr/search/function/distance/VectorDistanceFunction.java @@ -96,22 +96,27 @@ public class VectorDistanceFunction extends ValueSource { return (short) doubleVal(doc); } + @Override public float floatVal(int doc) { return (float) doubleVal(doc); } + @Override public int intVal(int doc) { return (int) doubleVal(doc); } + @Override public long longVal(int doc) { return (long) doubleVal(doc); } + @Override public double doubleVal(int doc) { return distance(doc, vals1, vals2); } + @Override public String strVal(int doc) { return Double.toString(doubleVal(doc)); } diff --git a/solr/src/java/org/apache/solr/spelling/AbstractLuceneSpellChecker.java b/solr/src/java/org/apache/solr/spelling/AbstractLuceneSpellChecker.java index 5400046f161..2a53e0f58f1 100644 --- a/solr/src/java/org/apache/solr/spelling/AbstractLuceneSpellChecker.java +++ b/solr/src/java/org/apache/solr/spelling/AbstractLuceneSpellChecker.java @@ -91,6 +91,7 @@ public abstract class AbstractLuceneSpellChecker extends SolrSpellChecker { protected StringDistance sd; + @Override public String init(NamedList config, SolrCore core) { super.init(config, core); indexDir = (String) config.get(INDEX_DIR); @@ -213,6 +214,7 @@ public abstract class AbstractLuceneSpellChecker extends SolrSpellChecker { return reader; } + @Override public void reload(SolrCore core, SolrIndexSearcher searcher) throws IOException { spellChecker.setSpellIndex(index); diff --git a/solr/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java b/solr/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java index fa0939d9456..6d2a4f3fa5f 100644 --- a/solr/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java +++ b/solr/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java @@ -50,12 +50,14 @@ public class FileBasedSpellChecker extends AbstractLuceneSpellChecker { private String characterEncoding; public static final String WORD_FIELD_NAME = "word"; + @Override public String init(NamedList config, SolrCore core) { super.init(config, core); characterEncoding = (String) config.get(SOURCE_FILE_CHAR_ENCODING); return name; } + @Override public void build(SolrCore core, SolrIndexSearcher searcher) { try { loadExternalFileDictionary(core); diff --git a/solr/src/java/org/apache/solr/spelling/IndexBasedSpellChecker.java b/solr/src/java/org/apache/solr/spelling/IndexBasedSpellChecker.java index bac6c8cbb47..c81180210f2 100644 --- a/solr/src/java/org/apache/solr/spelling/IndexBasedSpellChecker.java +++ b/solr/src/java/org/apache/solr/spelling/IndexBasedSpellChecker.java @@ -49,6 +49,7 @@ public class IndexBasedSpellChecker extends AbstractLuceneSpellChecker { protected float threshold; protected IndexReader reader; + @Override public String init(NamedList config, SolrCore core) { super.init(config, core); threshold = config.get(THRESHOLD_TOKEN_FREQUENCY) == null ? 0.0f @@ -68,6 +69,7 @@ public class IndexBasedSpellChecker extends AbstractLuceneSpellChecker { } } + @Override public void build(SolrCore core, SolrIndexSearcher searcher) { IndexReader reader = null; try { diff --git a/solr/src/java/org/apache/solr/spelling/SpellingQueryConverter.java b/solr/src/java/org/apache/solr/spelling/SpellingQueryConverter.java index 0825781bdb0..688f4a64cce 100644 --- a/solr/src/java/org/apache/solr/spelling/SpellingQueryConverter.java +++ b/solr/src/java/org/apache/solr/spelling/SpellingQueryConverter.java @@ -91,6 +91,7 @@ public class SpellingQueryConverter extends QueryConverter { * @param original the original query string * @return a Collection of Lucene Tokens */ + @Override public Collection convert(String original) { if (original == null) { // this can happen with q.alt = and no query return Collections.emptyList(); diff --git a/solr/src/java/org/apache/solr/spelling/suggest/Lookup.java b/solr/src/java/org/apache/solr/spelling/suggest/Lookup.java index e61c600c037..a697fa5df00 100644 --- a/solr/src/java/org/apache/solr/spelling/suggest/Lookup.java +++ b/solr/src/java/org/apache/solr/spelling/suggest/Lookup.java @@ -25,6 +25,7 @@ public abstract class Lookup { this.value = value; } + @Override public String toString() { return key + "/" + value; } diff --git a/solr/src/java/org/apache/solr/update/CommitUpdateCommand.java b/solr/src/java/org/apache/solr/update/CommitUpdateCommand.java index 8d33e0280b1..a6bdb39cd67 100644 --- a/solr/src/java/org/apache/solr/update/CommitUpdateCommand.java +++ b/solr/src/java/org/apache/solr/update/CommitUpdateCommand.java @@ -39,6 +39,7 @@ public class CommitUpdateCommand extends UpdateCommand { super("commit", req); this.optimize=optimize; } + @Override public String toString() { return "commit(optimize="+optimize +",waitFlush="+waitFlush diff --git a/solr/src/java/org/apache/solr/update/DeleteUpdateCommand.java b/solr/src/java/org/apache/solr/update/DeleteUpdateCommand.java index ff2ad4764be..79b865a62ea 100644 --- a/solr/src/java/org/apache/solr/update/DeleteUpdateCommand.java +++ b/solr/src/java/org/apache/solr/update/DeleteUpdateCommand.java @@ -30,6 +30,7 @@ public class DeleteUpdateCommand extends UpdateCommand { super("delete", req); } + @Override public String toString() { StringBuilder sb = new StringBuilder(commandName); sb.append(':'); diff --git a/solr/src/java/org/apache/solr/update/DirectUpdateHandler2.java b/solr/src/java/org/apache/solr/update/DirectUpdateHandler2.java index b096b383a76..55f9ac77fc0 100644 --- a/solr/src/java/org/apache/solr/update/DirectUpdateHandler2.java +++ b/solr/src/java/org/apache/solr/update/DirectUpdateHandler2.java @@ -134,6 +134,7 @@ public class DirectUpdateHandler2 extends UpdateHandler { } } + @Override public int addDoc(AddUpdateCommand cmd) throws IOException { addCommands.incrementAndGet(); addCommandsCumulative.incrementAndGet(); @@ -202,6 +203,7 @@ public class DirectUpdateHandler2 extends UpdateHandler { // could return the number of docs deleted, but is that always possible to know??? + @Override public void delete(DeleteUpdateCommand cmd) throws IOException { deleteByIdCommands.incrementAndGet(); deleteByIdCommandsCumulative.incrementAndGet(); @@ -221,6 +223,7 @@ public class DirectUpdateHandler2 extends UpdateHandler { // why not return number of docs deleted? // Depending on implementation, we may not be able to immediately determine the num... + @Override public void deleteByQuery(DeleteUpdateCommand cmd) throws IOException { deleteByQueryCommands.incrementAndGet(); deleteByQueryCommandsCumulative.incrementAndGet(); @@ -263,6 +266,7 @@ public class DirectUpdateHandler2 extends UpdateHandler { } } + @Override public int mergeIndexes(MergeIndexesCommand cmd) throws IOException { mergeIndexesCommands.incrementAndGet(); int rc = -1; @@ -300,6 +304,7 @@ public class DirectUpdateHandler2 extends UpdateHandler { } } + @Override public void commit(CommitUpdateCommand cmd) throws IOException { if (cmd.optimize) { @@ -369,6 +374,7 @@ public class DirectUpdateHandler2 extends UpdateHandler { /** * @since Solr 1.4 */ + @Override public void rollback(RollbackUpdateCommand cmd) throws IOException { rollbackCommands.incrementAndGet(); @@ -402,6 +408,7 @@ public class DirectUpdateHandler2 extends UpdateHandler { } + @Override public void close() throws IOException { log.info("closing " + this); iwCommit.lock(); @@ -547,6 +554,7 @@ public class DirectUpdateHandler2 extends UpdateHandler { // to facilitate testing: blocks if called during commit public synchronized int getCommitCount() { return autoCommitCount; } + @Override public String toString() { if(timeUpperBound > 0 || docsUpperBound > 0) { return @@ -619,6 +627,7 @@ public class DirectUpdateHandler2 extends UpdateHandler { return lst; } + @Override public String toString() { return "DirectUpdateHandler2" + getStatistics(); } diff --git a/solr/src/java/org/apache/solr/update/SolrIndexWriter.java b/solr/src/java/org/apache/solr/update/SolrIndexWriter.java index 6073a6a42d7..476104b8bb6 100644 --- a/solr/src/java/org/apache/solr/update/SolrIndexWriter.java +++ b/solr/src/java/org/apache/solr/update/SolrIndexWriter.java @@ -132,6 +132,7 @@ public class SolrIndexWriter extends IndexWriter { * **** */ private volatile boolean isClosed = false; + @Override public void close() throws IOException { log.debug("Closing Writer " + name); try { @@ -178,6 +179,7 @@ public class SolrIndexWriter extends IndexWriter { // We might ideally want to override print(String) as well, but // looking through the code that writes to infoStream, it appears // that all the classes except CheckIndex just use println. + @Override public void println(String x) { print(dateFormat.format(new Date()) + " "); super.println(x); diff --git a/solr/src/java/org/apache/solr/update/UpdateCommand.java b/solr/src/java/org/apache/solr/update/UpdateCommand.java index 83e7ca99bf9..0e8f8bc1e07 100644 --- a/solr/src/java/org/apache/solr/update/UpdateCommand.java +++ b/solr/src/java/org/apache/solr/update/UpdateCommand.java @@ -33,6 +33,7 @@ import org.apache.solr.request.SolrQueryRequest; this.commandName = commandName; } + @Override public String toString() { return commandName; } diff --git a/solr/src/java/org/apache/solr/update/processor/Lookup3Signature.java b/solr/src/java/org/apache/solr/update/processor/Lookup3Signature.java index 1960ffac2b2..d2e5353ef9b 100755 --- a/solr/src/java/org/apache/solr/update/processor/Lookup3Signature.java +++ b/solr/src/java/org/apache/solr/update/processor/Lookup3Signature.java @@ -25,10 +25,12 @@ public class Lookup3Signature extends Signature { public Lookup3Signature() { } + @Override public void add(String content) { hash = Hash.lookup3ycs64(content,0,content.length(),hash); } + @Override public byte[] getSignature() { return new byte[]{(byte)(hash>>56),(byte)(hash>>48),(byte)(hash>>40),(byte)(hash>>32),(byte)(hash>>24),(byte)(hash>>16),(byte)(hash>>8),(byte)(hash>>0)}; } diff --git a/solr/src/java/org/apache/solr/update/processor/MD5Signature.java b/solr/src/java/org/apache/solr/update/processor/MD5Signature.java index c7dec90dfc9..4ad6f37d110 100755 --- a/solr/src/java/org/apache/solr/update/processor/MD5Signature.java +++ b/solr/src/java/org/apache/solr/update/processor/MD5Signature.java @@ -26,6 +26,7 @@ import org.slf4j.LoggerFactory; public class MD5Signature extends Signature { protected final static Logger log = LoggerFactory.getLogger(MD5Signature.class); private static ThreadLocal DIGESTER_FACTORY = new ThreadLocal() { + @Override protected MessageDigest initialValue() { try { return MessageDigest.getInstance("MD5"); @@ -41,6 +42,7 @@ public class MD5Signature extends Signature { digester.reset(); } + @Override public void add(String content) { try { digester.update(content.getBytes("UTF-8")); @@ -51,6 +53,7 @@ public class MD5Signature extends Signature { } } + @Override public byte[] getSignature() { return digester.digest(); } diff --git a/solr/src/java/org/apache/solr/update/processor/TextProfileSignature.java b/solr/src/java/org/apache/solr/update/processor/TextProfileSignature.java index c953a2db2b5..e94a7332bc8 100755 --- a/solr/src/java/org/apache/solr/update/processor/TextProfileSignature.java +++ b/solr/src/java/org/apache/solr/update/processor/TextProfileSignature.java @@ -51,12 +51,14 @@ public class TextProfileSignature extends MD5Signature { private float quantRate; private float minTokenLen; + @Override public void init(SolrParams params) { quantRate = params.getFloat("quantRate", 0.01f); minTokenLen = params.getInt("minTokenLen", 2); } + @Override public byte[] getSignature() { return super.getSignature(); } @@ -144,6 +146,7 @@ public class TextProfileSignature extends MD5Signature { this.val = val; } + @Override public String toString() { return val + " " + cnt; } diff --git a/solr/src/java/org/apache/solr/util/BoundedTreeSet.java b/solr/src/java/org/apache/solr/util/BoundedTreeSet.java index dda5fdab14d..4abe7b44145 100644 --- a/solr/src/java/org/apache/solr/util/BoundedTreeSet.java +++ b/solr/src/java/org/apache/solr/util/BoundedTreeSet.java @@ -55,11 +55,13 @@ public class BoundedTreeSet extends TreeSet { remove(last()); } } + @Override public boolean add(E item) { boolean out = super.add(item); adjust(); return out; } + @Override public boolean addAll(Collection c) { boolean out = super.addAll(c); adjust(); diff --git a/solr/src/java/org/apache/solr/util/SolrPluginUtils.java b/solr/src/java/org/apache/solr/util/SolrPluginUtils.java index f98b4308b92..6cdf1d6c3fb 100644 --- a/solr/src/java/org/apache/solr/util/SolrPluginUtils.java +++ b/solr/src/java/org/apache/solr/util/SolrPluginUtils.java @@ -703,6 +703,7 @@ public class SolrPluginUtils { * DisjunctionMaxQuery. (so yes: aliases which point at other * aliases should work) */ + @Override protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException { diff --git a/solr/src/solrj/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java b/solr/src/solrj/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java index a2fbddc77d1..37bf449a76c 100644 --- a/solr/src/solrj/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java +++ b/solr/src/solrj/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java @@ -36,6 +36,7 @@ import java.util.List; */ public class BinaryRequestWriter extends RequestWriter { + @Override public Collection getContentStreams(SolrRequest req) throws IOException { if (req instanceof UpdateRequest) { UpdateRequest updateRequest = (UpdateRequest) req; @@ -55,10 +56,12 @@ public class BinaryRequestWriter extends RequestWriter { } + @Override public String getUpdateContentType() { return "application/octet-stream"; } + @Override public ContentStream getContentStream(final UpdateRequest request) throws IOException { final BAOS baos = new BAOS(); new JavaBinUpdateRequestCodec().marshal(request, baos); @@ -91,6 +94,7 @@ public class BinaryRequestWriter extends RequestWriter { } + @Override public void write(SolrRequest request, OutputStream os) throws IOException { if (request instanceof UpdateRequest) { UpdateRequest updateRequest = (UpdateRequest) request; @@ -106,6 +110,7 @@ public class BinaryRequestWriter extends RequestWriter { } } + @Override public String getPath(SolrRequest req) { if (req instanceof UpdateRequest) { return "/update/javabin"; diff --git a/solr/src/solrj/org/apache/solr/client/solrj/impl/BinaryResponseParser.java b/solr/src/solrj/org/apache/solr/client/solrj/impl/BinaryResponseParser.java index 4b29ce8b52b..13794601adb 100755 --- a/solr/src/solrj/org/apache/solr/client/solrj/impl/BinaryResponseParser.java +++ b/solr/src/solrj/org/apache/solr/client/solrj/impl/BinaryResponseParser.java @@ -30,10 +30,12 @@ import java.io.Reader; * @since solr 1.3 */ public class BinaryResponseParser extends ResponseParser { + @Override public String getWriterType() { return "javabin"; } + @Override public NamedList processResponse(InputStream body, String encoding) { try { return (NamedList) new JavaBinCodec().unmarshal(body); @@ -44,10 +46,12 @@ public class BinaryResponseParser extends ResponseParser { } + @Override public String getVersion() { return "2"; } + @Override public NamedList processResponse(Reader reader) { throw new RuntimeException("Cannot handle character stream"); } diff --git a/solr/src/solrj/org/apache/solr/client/solrj/impl/LBHttpSolrServer.java b/solr/src/solrj/org/apache/solr/client/solrj/impl/LBHttpSolrServer.java index b13b708e0bf..a12a9397e9b 100644 --- a/solr/src/solrj/org/apache/solr/client/solrj/impl/LBHttpSolrServer.java +++ b/solr/src/solrj/org/apache/solr/client/solrj/impl/LBHttpSolrServer.java @@ -105,6 +105,7 @@ public class LBHttpSolrServer extends SolrServer { this.solrServer = solrServer; } + @Override public String toString() { return solrServer.getBaseURL(); } @@ -376,6 +377,7 @@ public class LBHttpSolrServer extends SolrServer { * @throws SolrServerException * @throws IOException */ + @Override public NamedList request(final SolrRequest request) throws SolrServerException, IOException { Exception ex = null; @@ -535,6 +537,7 @@ public class LBHttpSolrServer extends SolrServer { return httpClient; } + @Override protected void finalize() throws Throwable { try { if(this.aliveCheckExecutor!=null) diff --git a/solr/src/solrj/org/apache/solr/client/solrj/impl/StreamingBinaryResponseParser.java b/solr/src/solrj/org/apache/solr/client/solrj/impl/StreamingBinaryResponseParser.java index bcbf378552f..b0b90864296 100644 --- a/solr/src/solrj/org/apache/solr/client/solrj/impl/StreamingBinaryResponseParser.java +++ b/solr/src/solrj/org/apache/solr/client/solrj/impl/StreamingBinaryResponseParser.java @@ -48,12 +48,14 @@ public class StreamingBinaryResponseParser extends BinaryResponseParser { try { JavaBinCodec codec = new JavaBinCodec() { + @Override public SolrDocument readSolrDocument(FastInputStream dis) throws IOException { SolrDocument doc = super.readSolrDocument(dis); callback.streamSolrDocument( doc ); return null; } + @Override public SolrDocumentList readSolrDocumentList(FastInputStream dis) throws IOException { SolrDocumentList solrDocs = new SolrDocumentList(); List list = (List) readVal(dis); diff --git a/solr/src/solrj/org/apache/solr/client/solrj/request/JavaBinUpdateRequestCodec.java b/solr/src/solrj/org/apache/solr/client/solrj/request/JavaBinUpdateRequestCodec.java index a45412584f8..0f5842e632e 100644 --- a/solr/src/solrj/org/apache/solr/client/solrj/request/JavaBinUpdateRequestCodec.java +++ b/solr/src/solrj/org/apache/solr/client/solrj/request/JavaBinUpdateRequestCodec.java @@ -67,6 +67,7 @@ public class JavaBinUpdateRequestCodec { nl.add("delByQ", updateRequest.getDeleteQuery()); nl.add("docs", docIter); new JavaBinCodec(){ + @Override public void writeMap(Map val) throws IOException { if (val instanceof SolrInputDocument) { writeVal(solrInputDocumentToList((SolrInputDocument) val)); @@ -101,6 +102,7 @@ public class JavaBinUpdateRequestCodec { // is ever refactored, this will not work. private boolean seenOuterMostDocIterator = false; + @Override public NamedList readNamedList(FastInputStream dis) throws IOException { int sz = readSize(dis); NamedList nl = new NamedList(); @@ -115,6 +117,7 @@ public class JavaBinUpdateRequestCodec { return nl; } + @Override public List readIterator(FastInputStream fis) throws IOException { // default behavior for reading any regular Iterator in the stream diff --git a/solr/src/solrj/org/apache/solr/client/solrj/response/FieldStatsInfo.java b/solr/src/solrj/org/apache/solr/client/solrj/response/FieldStatsInfo.java index f2064dd5b3c..d0154c29698 100644 --- a/solr/src/solrj/org/apache/solr/client/solrj/response/FieldStatsInfo.java +++ b/solr/src/solrj/org/apache/solr/client/solrj/response/FieldStatsInfo.java @@ -94,6 +94,7 @@ public class FieldStatsInfo implements Serializable { } } + @Override public String toString() { StringBuilder sb = new StringBuilder(); diff --git a/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java b/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java index 1dc858ad0ef..1934bf7cd31 100644 --- a/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java +++ b/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java @@ -98,24 +98,28 @@ public abstract class BaseDistributedSearchTestCase extends SolrTestCaseJ4 { public static Logger log = LoggerFactory.getLogger(BaseDistributedSearchTestCase.class); public static RandVal rint = new RandVal() { + @Override public Object val() { return r.nextInt(); } }; public static RandVal rlong = new RandVal() { + @Override public Object val() { return r.nextLong(); } }; public static RandVal rfloat = new RandVal() { + @Override public Object val() { return r.nextFloat(); } }; public static RandVal rdouble = new RandVal() { + @Override public Object val() { return r.nextDouble(); } @@ -337,6 +341,7 @@ public abstract class BaseDistributedSearchTestCase extends SolrTestCaseJ4 { Thread[] threads = new Thread[nThreads]; for (int i = 0; i < threads.length; i++) { threads[i] = new Thread() { + @Override public void run() { for (int j = 0; j < stress; j++) { int which = r.nextInt(clients.size()); @@ -623,6 +628,7 @@ public abstract class BaseDistributedSearchTestCase extends SolrTestCaseJ4 { public static class RandDate extends RandVal { public static TrieDateField df = new TrieDateField(); + @Override public Object val() { long v = r.nextLong(); Date d = new Date(v); diff --git a/solr/src/test/org/apache/solr/BasicFunctionalityTest.java b/solr/src/test/org/apache/solr/BasicFunctionalityTest.java index 7662ca0eff1..3847825c7a5 100644 --- a/solr/src/test/org/apache/solr/BasicFunctionalityTest.java +++ b/solr/src/test/org/apache/solr/BasicFunctionalityTest.java @@ -221,10 +221,15 @@ public class BasicFunctionalityTest extends SolrTestCaseJ4 { public void testRequestHandlerBaseException() { final String tmp = "BOO! ignore_exception"; SolrRequestHandler handler = new RequestHandlerBase() { + @Override public String getDescription() { return tmp; } + @Override public String getSourceId() { return tmp; } + @Override public String getSource() { return tmp; } + @Override public String getVersion() { return tmp; } + @Override public void handleRequestBody ( SolrQueryRequest req, SolrQueryResponse rsp ) { throw new RuntimeException(tmp); diff --git a/solr/src/test/org/apache/solr/SolrTestCaseJ4.java b/solr/src/test/org/apache/solr/SolrTestCaseJ4.java index 9eb858da5a6..9efe83751eb 100755 --- a/solr/src/test/org/apache/solr/SolrTestCaseJ4.java +++ b/solr/src/test/org/apache/solr/SolrTestCaseJ4.java @@ -602,6 +602,7 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { /** Neccessary to make method signatures un-ambiguous */ public static class XmlDoc { public String xml; + @Override public String toString() { return xml; } } @@ -727,6 +728,7 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { public int order; // the order this document was added to the index + @Override public String toString() { return "Doc("+order+"):"+fields.toString(); } @@ -780,6 +782,7 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { public static class Fld { public FldType ftype; public List vals; + @Override public String toString() { return ftype.fname + "=" + (vals.size()==1 ? vals.get(0).toString() : vals.toString()); } diff --git a/solr/src/test/org/apache/solr/TestTrie.java b/solr/src/test/org/apache/solr/TestTrie.java index 9a818099609..861c349b5fa 100644 --- a/solr/src/test/org/apache/solr/TestTrie.java +++ b/solr/src/test/org/apache/solr/TestTrie.java @@ -41,6 +41,7 @@ public class TestTrie extends SolrTestCaseJ4 { initCore("solrconfig.xml","schema-trie.xml"); } + @Override @After public void tearDown() throws Exception { clearIndex(); diff --git a/solr/src/test/org/apache/solr/analysis/TestRemoveDuplicatesTokenFilterFactory.java b/solr/src/test/org/apache/solr/analysis/TestRemoveDuplicatesTokenFilterFactory.java index 6eb8a17f2c0..7795fd640f4 100644 --- a/solr/src/test/org/apache/solr/analysis/TestRemoveDuplicatesTokenFilterFactory.java +++ b/solr/src/test/org/apache/solr/analysis/TestRemoveDuplicatesTokenFilterFactory.java @@ -48,6 +48,7 @@ public class TestRemoveDuplicatesTokenFilterFactory extends BaseTokenTestCase { CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class); + @Override public boolean incrementToken() { if (toks.hasNext()) { clearAttributes(); diff --git a/solr/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java b/solr/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java index 5673ea8676e..184f241cff0 100644 --- a/solr/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java +++ b/solr/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java @@ -52,6 +52,7 @@ public class TestReversedWildcardFilterFactory extends SolrTestCaseJ4 { initCore("solrconfig.xml","schema-reversed.xml"); } + @Override @Before public void setUp() throws Exception { super.setUp(); diff --git a/solr/src/test/org/apache/solr/client/solrj/SolrExampleTestBase.java b/solr/src/test/org/apache/solr/client/solrj/SolrExampleTestBase.java index d9725aafd93..60fc20123e3 100644 --- a/solr/src/test/org/apache/solr/client/solrj/SolrExampleTestBase.java +++ b/solr/src/test/org/apache/solr/client/solrj/SolrExampleTestBase.java @@ -30,6 +30,7 @@ import org.apache.solr.util.AbstractSolrTestCase; */ abstract public class SolrExampleTestBase extends AbstractSolrTestCase { + @Override public String getSolrHome() { return "../../../example/solr/"; } @Override public String getSchemaFile() { return getSolrHome()+"conf/schema.xml"; } diff --git a/solr/src/test/org/apache/solr/client/solrj/TestLBHttpSolrServer.java b/solr/src/test/org/apache/solr/client/solrj/TestLBHttpSolrServer.java index 8886a9445db..75f4e341860 100644 --- a/solr/src/test/org/apache/solr/client/solrj/TestLBHttpSolrServer.java +++ b/solr/src/test/org/apache/solr/client/solrj/TestLBHttpSolrServer.java @@ -48,6 +48,7 @@ public class TestLBHttpSolrServer extends LuceneTestCase { SolrInstance[] solr = new SolrInstance[3]; HttpClient httpClient; + @Override public void setUp() throws Exception { super.setUp(); httpClient = new HttpClient(new MultiThreadedHttpConnectionManager()); diff --git a/solr/src/test/org/apache/solr/client/solrj/embedded/TestSolrProperties.java b/solr/src/test/org/apache/solr/client/solrj/embedded/TestSolrProperties.java index 997f1cdcb92..937f8778481 100644 --- a/solr/src/test/org/apache/solr/client/solrj/embedded/TestSolrProperties.java +++ b/solr/src/test/org/apache/solr/client/solrj/embedded/TestSolrProperties.java @@ -66,6 +66,7 @@ public class TestSolrProperties extends LuceneTestCase { return "solr.xml"; } + @Override @Before public void setUp() throws Exception { super.setUp(); @@ -77,6 +78,7 @@ public class TestSolrProperties extends LuceneTestCase { cores = new CoreContainer(home.getAbsolutePath(), solrXml); } + @Override @After public void tearDown() throws Exception { if (cores != null) diff --git a/solr/src/test/org/apache/solr/cloud/AbstractDistributedZkTestCase.java b/solr/src/test/org/apache/solr/cloud/AbstractDistributedZkTestCase.java index 053b07732ca..3fe298359ca 100644 --- a/solr/src/test/org/apache/solr/cloud/AbstractDistributedZkTestCase.java +++ b/solr/src/test/org/apache/solr/cloud/AbstractDistributedZkTestCase.java @@ -51,6 +51,7 @@ public abstract class AbstractDistributedZkTestCase extends BaseDistributedSearc System.setProperty("solr.test.sys.prop2", "proptwo"); } + @Override protected void createServers(int numShards) throws Exception { System.setProperty("collection", "control_collection"); controlJetty = createJetty(testDir, testDir + "/control/data", "control_shard"); diff --git a/solr/src/test/org/apache/solr/cloud/AbstractZkTestCase.java b/solr/src/test/org/apache/solr/cloud/AbstractZkTestCase.java index 31721ed2782..72eb68a523b 100644 --- a/solr/src/test/org/apache/solr/cloud/AbstractZkTestCase.java +++ b/solr/src/test/org/apache/solr/cloud/AbstractZkTestCase.java @@ -99,6 +99,7 @@ public abstract class AbstractZkTestCase extends SolrTestCaseJ4 { + File.separator + "conf" + File.separator + name)); } + @Override public void tearDown() throws Exception { if (DEBUG) { printLayout(zkServer.getZkHost()); diff --git a/solr/src/test/org/apache/solr/cloud/CloudStateUpdateTest.java b/solr/src/test/org/apache/solr/cloud/CloudStateUpdateTest.java index 8d836ac6e97..3e60c0401e9 100644 --- a/solr/src/test/org/apache/solr/cloud/CloudStateUpdateTest.java +++ b/solr/src/test/org/apache/solr/cloud/CloudStateUpdateTest.java @@ -229,6 +229,7 @@ public class CloudStateUpdateTest extends SolrTestCaseJ4 { } + @Override public void tearDown() throws Exception { if (VERBOSE) { printLayout(zkServer.getZkHost()); diff --git a/solr/src/test/org/apache/solr/cloud/ZkControllerTest.java b/solr/src/test/org/apache/solr/cloud/ZkControllerTest.java index 4cbbd9e062d..f8eedd9cb5a 100644 --- a/solr/src/test/org/apache/solr/cloud/ZkControllerTest.java +++ b/solr/src/test/org/apache/solr/cloud/ZkControllerTest.java @@ -217,6 +217,7 @@ public class ZkControllerTest extends SolrTestCaseJ4 { .create(shardsPath + "/" + zkNodeName, bytes, CreateMode.PERSISTENT); } + @Override public void tearDown() throws Exception { SolrConfig.severeErrors.clear(); super.tearDown(); diff --git a/solr/src/test/org/apache/solr/cloud/ZkSolrClientTest.java b/solr/src/test/org/apache/solr/cloud/ZkSolrClientTest.java index c8a0bce9b1b..7aece955627 100644 --- a/solr/src/test/org/apache/solr/cloud/ZkSolrClientTest.java +++ b/solr/src/test/org/apache/solr/cloud/ZkSolrClientTest.java @@ -232,6 +232,7 @@ public class ZkSolrClientTest extends AbstractSolrTestCase { return null; } + @Override public void tearDown() throws Exception { SolrConfig.severeErrors.clear(); super.tearDown(); diff --git a/solr/src/test/org/apache/solr/core/AlternateDirectoryTest.java b/solr/src/test/org/apache/solr/core/AlternateDirectoryTest.java index 9f1d46a92b7..b036cb8c35d 100755 --- a/solr/src/test/org/apache/solr/core/AlternateDirectoryTest.java +++ b/solr/src/test/org/apache/solr/core/AlternateDirectoryTest.java @@ -48,6 +48,7 @@ public class AlternateDirectoryTest extends SolrTestCaseJ4 { public static volatile boolean openCalled = false; public static volatile Directory dir; + @Override public Directory open(String path) throws IOException { openCalled = true; // need to close the directory, or otherwise the test fails. @@ -63,6 +64,7 @@ public class AlternateDirectoryTest extends SolrTestCaseJ4 { static public class TestIndexReaderFactory extends IndexReaderFactory { static volatile boolean newReaderCalled = false; + @Override public IndexReader newReader(Directory indexDir, boolean readOnly) throws IOException { TestIndexReaderFactory.newReaderCalled = true; diff --git a/solr/src/test/org/apache/solr/core/DummyValueSourceParser.java b/solr/src/test/org/apache/solr/core/DummyValueSourceParser.java index 96f27c8689e..3e13f693450 100644 --- a/solr/src/test/org/apache/solr/core/DummyValueSourceParser.java +++ b/solr/src/test/org/apache/solr/core/DummyValueSourceParser.java @@ -32,17 +32,21 @@ import org.apache.solr.search.function.ValueSource; public class DummyValueSourceParser extends ValueSourceParser { private NamedList args; + @Override public void init(NamedList args) { this.args = args; } + @Override public ValueSource parse(FunctionQParser fp) throws ParseException { ValueSource source = fp.parseValueSource(); ValueSource result = new SimpleFloatFunction(source) { + @Override protected String name() { return "foo"; } + @Override protected float func(int doc, DocValues vals) { float result = 0; return result; diff --git a/solr/src/test/org/apache/solr/core/IndexReaderFactoryTest.java b/solr/src/test/org/apache/solr/core/IndexReaderFactoryTest.java index 870e21d5567..1fbec3fde3b 100644 --- a/solr/src/test/org/apache/solr/core/IndexReaderFactoryTest.java +++ b/solr/src/test/org/apache/solr/core/IndexReaderFactoryTest.java @@ -20,10 +20,12 @@ import org.apache.solr.util.AbstractSolrTestCase; public class IndexReaderFactoryTest extends AbstractSolrTestCase { + @Override public String getSchemaFile() { return "schema.xml"; } + @Override public String getSolrConfigFile() { return "solrconfig-termindex.xml"; } diff --git a/solr/src/test/org/apache/solr/core/MockQuerySenderListenerReqHandler.java b/solr/src/test/org/apache/solr/core/MockQuerySenderListenerReqHandler.java index d4a27c0ded8..4b351684c23 100644 --- a/solr/src/test/org/apache/solr/core/MockQuerySenderListenerReqHandler.java +++ b/solr/src/test/org/apache/solr/core/MockQuerySenderListenerReqHandler.java @@ -34,36 +34,43 @@ public class MockQuerySenderListenerReqHandler extends RequestHandlerBase { AtomicInteger initCounter = new AtomicInteger(0); + @Override public void init(NamedList args) { initCounter.incrementAndGet(); super.init(args); } + @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { this.req = req; this.rsp = rsp; } + @Override public String getDescription() { String result = null; return result; } + @Override public String getSourceId() { String result = null; return result; } + @Override public String getSource() { String result = null; return result; } + @Override public String getVersion() { String result = null; return result; } + @Override public NamedList getStatistics() { NamedList lst = super.getStatistics(); lst.add("initCount", initCounter.intValue()); diff --git a/solr/src/test/org/apache/solr/core/TestArbitraryIndexDir.java b/solr/src/test/org/apache/solr/core/TestArbitraryIndexDir.java index d19895c1994..59d1afba922 100644 --- a/solr/src/test/org/apache/solr/core/TestArbitraryIndexDir.java +++ b/solr/src/test/org/apache/solr/core/TestArbitraryIndexDir.java @@ -42,6 +42,7 @@ import org.xml.sax.SAXException; */ public class TestArbitraryIndexDir extends AbstractSolrTestCase{ + @Override public void setUp() throws Exception { super.setUp(); @@ -58,6 +59,7 @@ public class TestArbitraryIndexDir extends AbstractSolrTestCase{ ("standard",0,20,"version","2.2"); } + @Override public void tearDown() throws Exception { super.tearDown(); diff --git a/solr/src/test/org/apache/solr/core/TestBadConfig.java b/solr/src/test/org/apache/solr/core/TestBadConfig.java index 54a742da392..d7ceb8955f4 100644 --- a/solr/src/test/org/apache/solr/core/TestBadConfig.java +++ b/solr/src/test/org/apache/solr/core/TestBadConfig.java @@ -21,9 +21,12 @@ import org.apache.solr.util.AbstractSolrTestCase; public class TestBadConfig extends AbstractSolrTestCase { + @Override public String getSchemaFile() { return "schema.xml"; } + @Override public String getSolrConfigFile() { return "bad_solrconfig.xml"; } + @Override public void setUp() throws Exception { ignoreException("unset.sys.property"); try { diff --git a/solr/src/test/org/apache/solr/core/TestJmxIntegration.java b/solr/src/test/org/apache/solr/core/TestJmxIntegration.java index 46f42d11fec..3977c4a1e94 100644 --- a/solr/src/test/org/apache/solr/core/TestJmxIntegration.java +++ b/solr/src/test/org/apache/solr/core/TestJmxIntegration.java @@ -46,6 +46,7 @@ public class TestJmxIntegration extends AbstractSolrTestCase { return "solrconfig.xml"; } + @Override @Before public void setUp() throws Exception { // Make sure that at least one MBeanServer is available @@ -53,6 +54,7 @@ public class TestJmxIntegration extends AbstractSolrTestCase { super.setUp(); } + @Override @After public void tearDown() throws Exception { super.tearDown(); diff --git a/solr/src/test/org/apache/solr/core/TestJmxMonitoredMap.java b/solr/src/test/org/apache/solr/core/TestJmxMonitoredMap.java index 2356df8a6c3..ff91adeb41b 100644 --- a/solr/src/test/org/apache/solr/core/TestJmxMonitoredMap.java +++ b/solr/src/test/org/apache/solr/core/TestJmxMonitoredMap.java @@ -51,6 +51,7 @@ public class TestJmxMonitoredMap extends LuceneTestCase { private JmxMonitoredMap monitoredMap; + @Override @Before public void setUp() throws Exception { super.setUp(); @@ -84,6 +85,7 @@ public class TestJmxMonitoredMap extends LuceneTestCase { } } + @Override @After public void tearDown() throws Exception { try { diff --git a/solr/src/test/org/apache/solr/core/TestPropInject.java b/solr/src/test/org/apache/solr/core/TestPropInject.java index 858388a7ac0..345feacb8cb 100644 --- a/solr/src/test/org/apache/solr/core/TestPropInject.java +++ b/solr/src/test/org/apache/solr/core/TestPropInject.java @@ -26,10 +26,12 @@ import org.apache.solr.update.DirectUpdateHandler2; import org.apache.solr.util.AbstractSolrTestCase; public class TestPropInject extends AbstractSolrTestCase { + @Override public String getSchemaFile() { return "schema.xml"; } + @Override public String getSolrConfigFile() { return "solrconfig-propinject.xml"; } diff --git a/solr/src/test/org/apache/solr/core/TestSolrDeletionPolicy1.java b/solr/src/test/org/apache/solr/core/TestSolrDeletionPolicy1.java index 2d9cf6c03b1..29f2f5d8cf2 100644 --- a/solr/src/test/org/apache/solr/core/TestSolrDeletionPolicy1.java +++ b/solr/src/test/org/apache/solr/core/TestSolrDeletionPolicy1.java @@ -34,6 +34,7 @@ public class TestSolrDeletionPolicy1 extends SolrTestCaseJ4 { initCore("solrconfig-delpolicy1.xml","schema.xml"); } + @Override @Before public void setUp() throws Exception { super.setUp(); diff --git a/solr/src/test/org/apache/solr/core/TestXIncludeConfig.java b/solr/src/test/org/apache/solr/core/TestXIncludeConfig.java index 905685abeb8..fbe8d74c86a 100644 --- a/solr/src/test/org/apache/solr/core/TestXIncludeConfig.java +++ b/solr/src/test/org/apache/solr/core/TestXIncludeConfig.java @@ -33,11 +33,13 @@ import javax.xml.parsers.DocumentBuilderFactory; public class TestXIncludeConfig extends AbstractSolrTestCase { protected boolean supports; + @Override public String getSchemaFile() { return "schema.xml"; } //public String getSolrConfigFile() { return "solrconfig.xml"; } + @Override public String getSolrConfigFile() { return "solrconfig-xinclude.xml"; } diff --git a/solr/src/test/org/apache/solr/handler/JsonLoaderTest.java b/solr/src/test/org/apache/solr/handler/JsonLoaderTest.java index c9b280d88d4..e6635475356 100644 --- a/solr/src/test/org/apache/solr/handler/JsonLoaderTest.java +++ b/solr/src/test/org/apache/solr/handler/JsonLoaderTest.java @@ -146,23 +146,28 @@ class BufferingRequestProcessor extends UpdateRequestProcessor super(next); } + @Override public void processAdd(AddUpdateCommand cmd) throws IOException { addCommands.add( cmd ); } + @Override public void processDelete(DeleteUpdateCommand cmd) throws IOException { deleteCommands.add( cmd ); } + @Override public void processCommit(CommitUpdateCommand cmd) throws IOException { commitCommands.add( cmd ); } + @Override public void processRollback(RollbackUpdateCommand cmd) throws IOException { rollbackCommands.add( cmd ); } + @Override public void finish() throws IOException { // nothing? } diff --git a/solr/src/test/org/apache/solr/handler/TestCSVLoader.java b/solr/src/test/org/apache/solr/handler/TestCSVLoader.java index 1339a851c7b..37a1f9f9dd4 100755 --- a/solr/src/test/org/apache/solr/handler/TestCSVLoader.java +++ b/solr/src/test/org/apache/solr/handler/TestCSVLoader.java @@ -41,6 +41,7 @@ public class TestCSVLoader extends SolrTestCaseJ4 { String def_charset = "UTF-8"; File file = new File(filename); + @Override @Before public void setUp() throws Exception { // if you override setUp or tearDown, you better call @@ -49,6 +50,7 @@ public class TestCSVLoader extends SolrTestCaseJ4 { cleanup(); } + @Override @After public void tearDown() throws Exception { // if you override setUp or tearDown, you better call diff --git a/solr/src/test/org/apache/solr/handler/TestReplicationHandler.java b/solr/src/test/org/apache/solr/handler/TestReplicationHandler.java index b3f68ede013..da6013b5097 100644 --- a/solr/src/test/org/apache/solr/handler/TestReplicationHandler.java +++ b/solr/src/test/org/apache/solr/handler/TestReplicationHandler.java @@ -622,6 +622,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 { class BackupThread extends Thread { volatile String fail = null; + @Override public void run() { String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_BACKUP; URL url; @@ -646,6 +647,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 { volatile String fail = null; volatile String response = null; volatile boolean success = false; + @Override public void run() { String masterUrl = "http://localhost:" + masterJetty.getLocalPort() + "/solr/replication?command=" + ReplicationHandler.CMD_DETAILS; URL url; diff --git a/solr/src/test/org/apache/solr/handler/component/SpellCheckComponentTest.java b/solr/src/test/org/apache/solr/handler/component/SpellCheckComponentTest.java index 257aef389a6..30972f8b881 100644 --- a/solr/src/test/org/apache/solr/handler/component/SpellCheckComponentTest.java +++ b/solr/src/test/org/apache/solr/handler/component/SpellCheckComponentTest.java @@ -42,6 +42,7 @@ public class SpellCheckComponentTest extends SolrTestCaseJ4 { initCore("solrconfig-spellcheckcomponent.xml","schema.xml"); } + @Override public void setUp() throws Exception { super.setUp(); assertU(adoc("id", "0", "lowerfilt", "This is a title")); @@ -59,6 +60,7 @@ public class SpellCheckComponentTest extends SolrTestCaseJ4 { assertU((commit())); } + @Override public void tearDown() throws Exception { super.tearDown(); assertU(delQ("*:*")); diff --git a/solr/src/test/org/apache/solr/request/TestBinaryResponseWriter.java b/solr/src/test/org/apache/solr/request/TestBinaryResponseWriter.java index 2d6063d2e4f..703a7e121b5 100644 --- a/solr/src/test/org/apache/solr/request/TestBinaryResponseWriter.java +++ b/solr/src/test/org/apache/solr/request/TestBinaryResponseWriter.java @@ -38,10 +38,12 @@ import java.util.UUID; */ public class TestBinaryResponseWriter extends AbstractSolrTestCase { + @Override public String getSchemaFile() { return "schema12.xml"; } + @Override public String getSolrConfigFile() { return "solrconfig.xml"; } diff --git a/solr/src/test/org/apache/solr/request/TestWriterPerf.java b/solr/src/test/org/apache/solr/request/TestWriterPerf.java index e7da448650a..7bc0d774411 100755 --- a/solr/src/test/org/apache/solr/request/TestWriterPerf.java +++ b/solr/src/test/org/apache/solr/request/TestWriterPerf.java @@ -37,15 +37,19 @@ public class TestWriterPerf extends AbstractSolrTestCase { public static final Logger log = LoggerFactory.getLogger(TestWriterPerf.class); + @Override public String getSchemaFile() { return "schema11.xml"; } + @Override public String getSolrConfigFile() { return "solrconfig-functionquery.xml"; } public String getCoreName() { return "basic"; } + @Override public void setUp() throws Exception { // if you override setUp or tearDown, you better call // the super classes version super.setUp(); } + @Override public void tearDown() throws Exception { // if you override setUp or tearDown, you better call // the super classes version diff --git a/solr/src/test/org/apache/solr/schema/CustomSimilarityFactory.java b/solr/src/test/org/apache/solr/schema/CustomSimilarityFactory.java index 88296e00b03..03fbaecaec2 100644 --- a/solr/src/test/org/apache/solr/schema/CustomSimilarityFactory.java +++ b/solr/src/test/org/apache/solr/schema/CustomSimilarityFactory.java @@ -19,6 +19,7 @@ package org.apache.solr.schema; import org.apache.lucene.search.SimilarityProvider; public class CustomSimilarityFactory extends SimilarityFactory { + @Override public SimilarityProvider getSimilarityProvider() { return new MockConfigurableSimilarity(params.get("echo")); } diff --git a/solr/src/test/org/apache/solr/schema/DateFieldTest.java b/solr/src/test/org/apache/solr/schema/DateFieldTest.java index 334067f6608..9168d4fe993 100644 --- a/solr/src/test/org/apache/solr/schema/DateFieldTest.java +++ b/solr/src/test/org/apache/solr/schema/DateFieldTest.java @@ -29,6 +29,7 @@ public class DateFieldTest extends LuceneTestCase { protected DateField f = null; protected DateMathParser p = new DateMathParser(UTC, Locale.US); + @Override public void setUp() throws Exception { super.setUp(); f = new DateField(); diff --git a/solr/src/test/org/apache/solr/schema/TestBinaryField.java b/solr/src/test/org/apache/solr/schema/TestBinaryField.java index 7fab9f0c188..72fb0f1225b 100644 --- a/solr/src/test/org/apache/solr/schema/TestBinaryField.java +++ b/solr/src/test/org/apache/solr/schema/TestBinaryField.java @@ -174,6 +174,7 @@ public class TestBinaryField extends LuceneTestCase { } + @Override public void tearDown() throws Exception { jetty.stop(); super.tearDown(); diff --git a/solr/src/test/org/apache/solr/search/FooQParserPlugin.java b/solr/src/test/org/apache/solr/search/FooQParserPlugin.java index 397e1c349ac..b58006fc2a3 100755 --- a/solr/src/test/org/apache/solr/search/FooQParserPlugin.java +++ b/solr/src/test/org/apache/solr/search/FooQParserPlugin.java @@ -27,6 +27,7 @@ import org.apache.lucene.index.Term; public class FooQParserPlugin extends QParserPlugin { + @Override public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) { return new FooQParser(qstr, localParams, params, req); } @@ -40,6 +41,7 @@ class FooQParser extends QParser { super(qstr, localParams, params, req); } + @Override public Query parse() throws ParseException { return new TermQuery(new Term(localParams.get(QueryParsing.F), localParams.get(QueryParsing.V))); } diff --git a/solr/src/test/org/apache/solr/search/TestExtendedDismaxParser.java b/solr/src/test/org/apache/solr/search/TestExtendedDismaxParser.java index 3ff940b75a7..1b890801a19 100755 --- a/solr/src/test/org/apache/solr/search/TestExtendedDismaxParser.java +++ b/solr/src/test/org/apache/solr/search/TestExtendedDismaxParser.java @@ -20,15 +20,19 @@ package org.apache.solr.search; import org.apache.solr.util.AbstractSolrTestCase; public class TestExtendedDismaxParser extends AbstractSolrTestCase { + @Override public String getSchemaFile() { return "schema12.xml"; } + @Override public String getSolrConfigFile() { return "solrconfig.xml"; } // public String getCoreName() { return "collection1"; } + @Override public void setUp() throws Exception { // if you override setUp or tearDown, you better call // the super classes version super.setUp(); } + @Override public void tearDown() throws Exception { // if you override setUp or tearDown, you better call // the super classes version diff --git a/solr/src/test/org/apache/solr/search/TestFastLRUCache.java b/solr/src/test/org/apache/solr/search/TestFastLRUCache.java index 0ff414ba79d..d313eb4d597 100644 --- a/solr/src/test/org/apache/solr/search/TestFastLRUCache.java +++ b/solr/src/test/org/apache/solr/search/TestFastLRUCache.java @@ -334,6 +334,7 @@ public class TestFastLRUCache extends LuceneTestCase { for (int i=0; i> suggestions = new LinkedHashMap>(); - @Before + @Override + @Before public void setUp() throws Exception { super.setUp(); suggestions.clear(); diff --git a/solr/src/test/org/apache/solr/update/AutoCommitTest.java b/solr/src/test/org/apache/solr/update/AutoCommitTest.java index e121423394c..cb0afdded27 100644 --- a/solr/src/test/org/apache/solr/update/AutoCommitTest.java +++ b/solr/src/test/org/apache/solr/update/AutoCommitTest.java @@ -78,7 +78,9 @@ class CommitListener implements SolrEventListener { public class AutoCommitTest extends AbstractSolrTestCase { + @Override public String getSchemaFile() { return "schema.xml"; } + @Override public String getSolrConfigFile() { return "solrconfig.xml"; } /** diff --git a/solr/src/test/org/apache/solr/update/DirectUpdateHandlerOptimizeTest.java b/solr/src/test/org/apache/solr/update/DirectUpdateHandlerOptimizeTest.java index 98f92553e7d..2f425545d65 100644 --- a/solr/src/test/org/apache/solr/update/DirectUpdateHandlerOptimizeTest.java +++ b/solr/src/test/org/apache/solr/update/DirectUpdateHandlerOptimizeTest.java @@ -32,10 +32,12 @@ import java.io.FileFilter; **/ public class DirectUpdateHandlerOptimizeTest extends AbstractSolrTestCase { + @Override public String getSchemaFile() { return "schema12.xml"; } + @Override public String getSolrConfigFile() { // return "solrconfig-duh-optimize.xml"; return "solrconfig.xml"; diff --git a/solr/src/test/org/apache/solr/update/DirectUpdateHandlerTest.java b/solr/src/test/org/apache/solr/update/DirectUpdateHandlerTest.java index 242ea064b3d..654d474d161 100644 --- a/solr/src/test/org/apache/solr/update/DirectUpdateHandlerTest.java +++ b/solr/src/test/org/apache/solr/update/DirectUpdateHandlerTest.java @@ -42,6 +42,7 @@ public class DirectUpdateHandlerTest extends SolrTestCaseJ4 { initCore("solrconfig.xml", "schema12.xml"); } + @Override @Before public void setUp() throws Exception { super.setUp(); diff --git a/solr/src/test/org/apache/solr/update/TestIndexingPerformance.java b/solr/src/test/org/apache/solr/update/TestIndexingPerformance.java index f513a94ca1d..51a55c4aaf1 100755 --- a/solr/src/test/org/apache/solr/update/TestIndexingPerformance.java +++ b/solr/src/test/org/apache/solr/update/TestIndexingPerformance.java @@ -41,7 +41,9 @@ public class TestIndexingPerformance extends AbstractSolrTestCase { public static final Logger log = LoggerFactory.getLogger(TestIndexingPerformance.class); + @Override public String getSchemaFile() { return "schema12.xml"; } + @Override public String getSolrConfigFile() { return "solrconfig_perf.xml"; } public void testIndexingPerf() throws IOException { diff --git a/solr/src/test/org/apache/solr/update/processor/SignatureUpdateProcessorFactoryTest.java b/solr/src/test/org/apache/solr/update/processor/SignatureUpdateProcessorFactoryTest.java index 25f1a639442..7bd622b6074 100755 --- a/solr/src/test/org/apache/solr/update/processor/SignatureUpdateProcessorFactoryTest.java +++ b/solr/src/test/org/apache/solr/update/processor/SignatureUpdateProcessorFactoryTest.java @@ -129,6 +129,7 @@ public class SignatureUpdateProcessorFactoryTest extends SolrTestCaseJ4 { for (int i = 0; i < threads.length; i++) { threads[i] = new Thread() { + @Override public void run() { for (int i = 0; i < 30; i++) { // h.update(adoc("id", Integer.toString(1+ i), "v_t", @@ -150,6 +151,7 @@ public class SignatureUpdateProcessorFactoryTest extends SolrTestCaseJ4 { for (int i = 0; i < threads2.length; i++) { threads2[i] = new Thread() { + @Override public void run() { for (int i = 0; i < 10; i++) { // h.update(adoc("id" , Integer.toString(1+ i + 10000), "v_t", diff --git a/solr/src/test/org/apache/solr/util/AbstractSolrTestCase.java b/solr/src/test/org/apache/solr/util/AbstractSolrTestCase.java index fec67be17b0..c9ecd2f3038 100644 --- a/solr/src/test/org/apache/solr/util/AbstractSolrTestCase.java +++ b/solr/src/test/org/apache/solr/util/AbstractSolrTestCase.java @@ -111,6 +111,7 @@ public abstract class AbstractSolrTestCase extends LuceneTestCase { public static Logger log = LoggerFactory.getLogger(AbstractSolrTestCase.class); private String factoryProp; + @Override public void setUp() throws Exception { super.setUp(); log.info("####SETUP_START " + getName()); @@ -170,6 +171,7 @@ public abstract class AbstractSolrTestCase extends LuceneTestCase { * to delete dataDir, unless the system property "solr.test.leavedatadir" * is set. */ + @Override public void tearDown() throws Exception { log.info("####TEARDOWN_START " + getName()); if (factoryProp == null) { @@ -403,6 +405,7 @@ public abstract class AbstractSolrTestCase extends LuceneTestCase { /** Neccessary to make method signatures un-ambiguous */ public static class Doc { public String xml; + @Override public String toString() { return xml; } } diff --git a/solr/src/test/org/apache/solr/util/TestNumberUtils.java b/solr/src/test/org/apache/solr/util/TestNumberUtils.java index 0e6c915697a..cc462944093 100644 --- a/solr/src/test/org/apache/solr/util/TestNumberUtils.java +++ b/solr/src/test/org/apache/solr/util/TestNumberUtils.java @@ -182,72 +182,88 @@ abstract class Converter { } class Int2Int extends Converter { + @Override public String toInternal(String val) { return Integer.toString(Integer.parseInt(val)); } + @Override public String toExternal(String val) { return Integer.toString(Integer.parseInt(val)); } } class SortInt extends Converter { + @Override public String toInternal(String val) { return NumberUtils.int2sortableStr(val); } + @Override public String toExternal(String val) { return NumberUtils.SortableStr2int(val); } } class SortLong extends Converter { + @Override public String toInternal(String val) { return NumberUtils.long2sortableStr(val); } + @Override public String toExternal(String val) { return NumberUtils.SortableStr2long(val); } } class Float2Float extends Converter { + @Override public String toInternal(String val) { return Float.toString(Float.parseFloat(val)); } + @Override public String toExternal(String val) { return Float.toString(Float.parseFloat(val)); } } class SortFloat extends Converter { + @Override public String toInternal(String val) { return NumberUtils.float2sortableStr(val); } + @Override public String toExternal(String val) { return NumberUtils.SortableStr2floatStr(val); } } class SortDouble extends Converter { + @Override public String toInternal(String val) { return NumberUtils.double2sortableStr(val); } + @Override public String toExternal(String val) { return NumberUtils.SortableStr2doubleStr(val); } } class Base100S extends Converter { + @Override public String toInternal(String val) { return BCDUtils.base10toBase100SortableInt(val); } + @Override public String toExternal(String val) { return BCDUtils.base100SortableIntToBase10(val); } } class Base10kS extends Converter { + @Override public String toInternal(String val) { return BCDUtils.base10toBase10kSortableInt(val); } + @Override public String toExternal(String val) { return BCDUtils.base10kSortableIntToBase10(val); } diff --git a/solr/src/test/org/apache/solr/velocity/VelocityResponseWriterTest.java b/solr/src/test/org/apache/solr/velocity/VelocityResponseWriterTest.java index f8c61e5a8e1..ae220a0a852 100644 --- a/solr/src/test/org/apache/solr/velocity/VelocityResponseWriterTest.java +++ b/solr/src/test/org/apache/solr/velocity/VelocityResponseWriterTest.java @@ -26,7 +26,9 @@ import java.io.StringWriter; import java.io.IOException; public class VelocityResponseWriterTest extends AbstractSolrTestCase { + @Override public String getSchemaFile() { return "schema.xml"; } + @Override public String getSolrConfigFile() { return "solrconfig.xml"; } diff --git a/solr/src/webapp/src/org/apache/solr/servlet/LogLevelSelection.java b/solr/src/webapp/src/org/apache/solr/servlet/LogLevelSelection.java index e272d5e973f..e8996e26346 100644 --- a/solr/src/webapp/src/org/apache/solr/servlet/LogLevelSelection.java +++ b/solr/src/webapp/src/org/apache/solr/servlet/LogLevelSelection.java @@ -36,6 +36,7 @@ import java.util.logging.Logger; * @since solr 1.3 */ public final class LogLevelSelection extends HttpServlet { + @Override public void init() throws ServletException { } @@ -43,6 +44,7 @@ public final class LogLevelSelection extends HttpServlet { * Processes an HTTP GET request and changes the logging level as * specified. */ + @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException { @@ -147,6 +149,7 @@ public final class LogLevelSelection extends HttpServlet { } + @Override public void doPost(HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException { @@ -259,6 +262,7 @@ public final class LogLevelSelection extends HttpServlet { return name.compareTo(((LogWrapper) other).name); } + @Override public boolean equals(Object obj) { if (this == obj) return true; @@ -275,6 +279,7 @@ public final class LogLevelSelection extends HttpServlet { return true; } + @Override public int hashCode() { final int prime = 31; int result = 1; From 9af1a725691cbfe4593922e23af570f3fb18a46d Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Sun, 30 Jan 2011 17:29:55 +0000 Subject: [PATCH 059/185] add missing svn:eol-style git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065327 13f79535-47bb-0310-9956-ffa450edef68 --- .../lucene/queryParser/QueryParserBase.java | 2300 ++++++++--------- .../org/apache/solr/util/SentinelIntSet.java | 268 +- .../cloud/AbstractDistributedZkTestCase.java | 186 +- .../apache/solr/cloud/AbstractZkTestCase.java | 294 +-- .../solr/cloud/BasicDistributedZkTest.java | 568 ++-- .../org/apache/solr/cloud/BasicZkTest.java | 276 +- .../solr/cloud/CloudStateUpdateTest.java | 510 ++-- .../apache/solr/cloud/ZkControllerTest.java | 450 ++-- .../apache/solr/cloud/ZkNodePropsTest.java | 98 +- .../apache/solr/cloud/ZkSolrClientTest.java | 482 ++-- .../org/apache/solr/cloud/ZkTestServer.java | 638 ++--- 11 files changed, 3035 insertions(+), 3035 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java b/lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java index 078e2adfb63..41ad00987ed 100644 --- a/lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java +++ b/lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java @@ -1,1150 +1,1150 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.lucene.queryParser; - -import java.io.IOException; -import java.io.StringReader; -import java.text.Collator; -import java.text.DateFormat; -import java.util.*; - -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.CachingTokenFilter; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; -import org.apache.lucene.document.DateTools; -import org.apache.lucene.index.Term; -import org.apache.lucene.queryParser.QueryParser.Operator; -import org.apache.lucene.search.*; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.Version; - -/** This class is overridden by QueryParser in QueryParser.jj - * and acts to separate the majority of the Java code from the .jj grammar file. - */ -public abstract class QueryParserBase { - - /** Do not catch this exception in your code, it means you are using methods that you should no longer use. */ - public static class MethodRemovedUseAnother extends Throwable {} - - static final int CONJ_NONE = 0; - static final int CONJ_AND = 1; - static final int CONJ_OR = 2; - - static final int MOD_NONE = 0; - static final int MOD_NOT = 10; - static final int MOD_REQ = 11; - - // make it possible to call setDefaultOperator() without accessing - // the nested class: - /** Alternative form of QueryParser.Operator.AND */ - public static final Operator AND_OPERATOR = Operator.AND; - /** Alternative form of QueryParser.Operator.OR */ - public static final Operator OR_OPERATOR = Operator.OR; - - /** The actual operator that parser uses to combine query terms */ - Operator operator = OR_OPERATOR; - - boolean lowercaseExpandedTerms = true; - MultiTermQuery.RewriteMethod multiTermRewriteMethod = MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT; - boolean allowLeadingWildcard = false; - boolean enablePositionIncrements = true; - - Analyzer analyzer; - String field; - int phraseSlop = 0; - float fuzzyMinSim = FuzzyQuery.defaultMinSimilarity; - int fuzzyPrefixLength = FuzzyQuery.defaultPrefixLength; - Locale locale = Locale.getDefault(); - - // the default date resolution - DateTools.Resolution dateResolution = null; - // maps field names to date resolutions - Map fieldToDateResolution = null; - - // The collator to use when determining range inclusion, - // for use when constructing RangeQuerys. - Collator rangeCollator = null; - - boolean autoGeneratePhraseQueries; - - // So the generated QueryParser(CharStream) won't error out - protected QueryParserBase() { - } - - /** Initializes a query parser. Called by the QueryParser constructor - * @param matchVersion Lucene version to match. See above. - * @param f the default field for query terms. - * @param a used to find terms in the query text. - */ - public void init(Version matchVersion, String f, Analyzer a) { - analyzer = a; - field = f; - if (matchVersion.onOrAfter(Version.LUCENE_31)) { - setAutoGeneratePhraseQueries(false); - } else { - setAutoGeneratePhraseQueries(true); - } - } - - // the generated parser will create these in QueryParser - public abstract void ReInit(CharStream stream); - public abstract Query TopLevelQuery(String field) throws ParseException; - - - /** Parses a query string, returning a {@link org.apache.lucene.search.Query}. - * @param query the query string to be parsed. - * @throws ParseException if the parsing fails - */ - public Query parse(String query) throws ParseException { - ReInit(new FastCharStream(new StringReader(query))); - try { - // TopLevelQuery is a Query followed by the end-of-input (EOF) - Query res = TopLevelQuery(field); - return res!=null ? res : newBooleanQuery(false); - } - catch (ParseException tme) { - // rethrow to include the original query: - ParseException e = new ParseException("Cannot parse '" +query+ "': " + tme.getMessage()); - e.initCause(tme); - throw e; - } - catch (TokenMgrError tme) { - ParseException e = new ParseException("Cannot parse '" +query+ "': " + tme.getMessage()); - e.initCause(tme); - throw e; - } - catch (BooleanQuery.TooManyClauses tmc) { - ParseException e = new ParseException("Cannot parse '" +query+ "': too many boolean clauses"); - e.initCause(tmc); - throw e; - } - } - - - /** - * @return Returns the analyzer. - */ - public Analyzer getAnalyzer() { - return analyzer; - } - - /** - * @return Returns the default field. - */ - public String getField() { - return field; - } - - /** - * @see #setAutoGeneratePhraseQueries(boolean) - */ - public final boolean getAutoGeneratePhraseQueries() { - return autoGeneratePhraseQueries; - } - - /** - * Set to true if phrase queries will be automatically generated - * when the analyzer returns more than one term from whitespace - * delimited text. - * NOTE: this behavior may not be suitable for all languages. - *

- * Set to false if phrase queries should only be generated when - * surrounded by double quotes. - */ - public final void setAutoGeneratePhraseQueries(boolean value) { - this.autoGeneratePhraseQueries = value; - } - - /** - * Get the minimal similarity for fuzzy queries. - */ - public float getFuzzyMinSim() { - return fuzzyMinSim; - } - - /** - * Set the minimum similarity for fuzzy queries. - * Default is 2f. - */ - public void setFuzzyMinSim(float fuzzyMinSim) { - this.fuzzyMinSim = fuzzyMinSim; - } - - /** - * Get the prefix length for fuzzy queries. - * @return Returns the fuzzyPrefixLength. - */ - public int getFuzzyPrefixLength() { - return fuzzyPrefixLength; - } - - /** - * Set the prefix length for fuzzy queries. Default is 0. - * @param fuzzyPrefixLength The fuzzyPrefixLength to set. - */ - public void setFuzzyPrefixLength(int fuzzyPrefixLength) { - this.fuzzyPrefixLength = fuzzyPrefixLength; - } - - /** - * Sets the default slop for phrases. If zero, then exact phrase matches - * are required. Default value is zero. - */ - public void setPhraseSlop(int phraseSlop) { - this.phraseSlop = phraseSlop; - } - - /** - * Gets the default slop for phrases. - */ - public int getPhraseSlop() { - return phraseSlop; - } - - - /** - * Set to true to allow leading wildcard characters. - *

- * When set, * or ? are allowed as - * the first character of a PrefixQuery and WildcardQuery. - * Note that this can produce very slow - * queries on big indexes. - *

- * Default: false. - */ - public void setAllowLeadingWildcard(boolean allowLeadingWildcard) { - this.allowLeadingWildcard = allowLeadingWildcard; - } - - /** - * @see #setAllowLeadingWildcard(boolean) - */ - public boolean getAllowLeadingWildcard() { - return allowLeadingWildcard; - } - - /** - * Set to true to enable position increments in result query. - *

- * When set, result phrase and multi-phrase queries will - * be aware of position increments. - * Useful when e.g. a StopFilter increases the position increment of - * the token that follows an omitted token. - *

- * Default: true. - */ - public void setEnablePositionIncrements(boolean enable) { - this.enablePositionIncrements = enable; - } - - /** - * @see #setEnablePositionIncrements(boolean) - */ - public boolean getEnablePositionIncrements() { - return enablePositionIncrements; - } - - /** - * Sets the boolean operator of the QueryParser. - * In default mode (OR_OPERATOR) terms without any modifiers - * are considered optional: for example capital of Hungary is equal to - * capital OR of OR Hungary.
- * In AND_OPERATOR mode terms are considered to be in conjunction: the - * above mentioned query is parsed as capital AND of AND Hungary - */ - public void setDefaultOperator(Operator op) { - this.operator = op; - } - - - /** - * Gets implicit operator setting, which will be either AND_OPERATOR - * or OR_OPERATOR. - */ - public Operator getDefaultOperator() { - return operator; - } - - - /** - * Whether terms of wildcard, prefix, fuzzy and range queries are to be automatically - * lower-cased or not. Default is true. - */ - public void setLowercaseExpandedTerms(boolean lowercaseExpandedTerms) { - this.lowercaseExpandedTerms = lowercaseExpandedTerms; - } - - - /** - * @see #setLowercaseExpandedTerms(boolean) - */ - public boolean getLowercaseExpandedTerms() { - return lowercaseExpandedTerms; - } - - /** - * By default QueryParser uses {@link org.apache.lucene.search.MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT} - * when creating a PrefixQuery, WildcardQuery or RangeQuery. This implementation is generally preferable because it - * a) Runs faster b) Does not have the scarcity of terms unduly influence score - * c) avoids any "TooManyBooleanClauses" exception. - * However, if your application really needs to use the - * old-fashioned BooleanQuery expansion rewriting and the above - * points are not relevant then use this to change - * the rewrite method. - */ - public void setMultiTermRewriteMethod(MultiTermQuery.RewriteMethod method) { - multiTermRewriteMethod = method; - } - - - /** - * @see #setMultiTermRewriteMethod - */ - public MultiTermQuery.RewriteMethod getMultiTermRewriteMethod() { - return multiTermRewriteMethod; - } - - /** - * Set locale used by date range parsing. - */ - public void setLocale(Locale locale) { - this.locale = locale; - } - - /** - * Returns current locale, allowing access by subclasses. - */ - public Locale getLocale() { - return locale; - } - - /** - * Sets the default date resolution used by RangeQueries for fields for which no - * specific date resolutions has been set. Field specific resolutions can be set - * with {@link #setDateResolution(String, org.apache.lucene.document.DateTools.Resolution)}. - * - * @param dateResolution the default date resolution to set - */ - public void setDateResolution(DateTools.Resolution dateResolution) { - this.dateResolution = dateResolution; - } - - /** - * Sets the date resolution used by RangeQueries for a specific field. - * - * @param fieldName field for which the date resolution is to be set - * @param dateResolution date resolution to set - */ - public void setDateResolution(String fieldName, DateTools.Resolution dateResolution) { - if (fieldName == null) { - throw new IllegalArgumentException("Field cannot be null."); - } - - if (fieldToDateResolution == null) { - // lazily initialize HashMap - fieldToDateResolution = new HashMap(); - } - - fieldToDateResolution.put(fieldName, dateResolution); - } - - /** - * Returns the date resolution that is used by RangeQueries for the given field. - * Returns null, if no default or field specific date resolution has been set - * for the given field. - * - */ - public DateTools.Resolution getDateResolution(String fieldName) { - if (fieldName == null) { - throw new IllegalArgumentException("Field cannot be null."); - } - - if (fieldToDateResolution == null) { - // no field specific date resolutions set; return default date resolution instead - return this.dateResolution; - } - - DateTools.Resolution resolution = fieldToDateResolution.get(fieldName); - if (resolution == null) { - // no date resolutions set for the given field; return default date resolution instead - resolution = this.dateResolution; - } - - return resolution; - } - - /** - * Sets the collator used to determine index term inclusion in ranges - * for RangeQuerys. - *

- * WARNING: Setting the rangeCollator to a non-null - * collator using this method will cause every single index Term in the - * Field referenced by lowerTerm and/or upperTerm to be examined. - * Depending on the number of index Terms in this Field, the operation could - * be very slow. - * - * @param rc the collator to use when constructing RangeQuerys - */ - public void setRangeCollator(Collator rc) { - rangeCollator = rc; - } - - /** - * @return the collator used to determine index term inclusion in ranges - * for RangeQuerys. - */ - public Collator getRangeCollator() { - return rangeCollator; - } - - protected void addClause(List clauses, int conj, int mods, Query q) { - boolean required, prohibited; - - // If this term is introduced by AND, make the preceding term required, - // unless it's already prohibited - if (clauses.size() > 0 && conj == CONJ_AND) { - BooleanClause c = clauses.get(clauses.size()-1); - if (!c.isProhibited()) - c.setOccur(BooleanClause.Occur.MUST); - } - - if (clauses.size() > 0 && operator == AND_OPERATOR && conj == CONJ_OR) { - // If this term is introduced by OR, make the preceding term optional, - // unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b) - // notice if the input is a OR b, first term is parsed as required; without - // this modification a OR b would parsed as +a OR b - BooleanClause c = clauses.get(clauses.size()-1); - if (!c.isProhibited()) - c.setOccur(BooleanClause.Occur.SHOULD); - } - - // We might have been passed a null query; the term might have been - // filtered away by the analyzer. - if (q == null) - return; - - if (operator == OR_OPERATOR) { - // We set REQUIRED if we're introduced by AND or +; PROHIBITED if - // introduced by NOT or -; make sure not to set both. - prohibited = (mods == MOD_NOT); - required = (mods == MOD_REQ); - if (conj == CONJ_AND && !prohibited) { - required = true; - } - } else { - // We set PROHIBITED if we're introduced by NOT or -; We set REQUIRED - // if not PROHIBITED and not introduced by OR - prohibited = (mods == MOD_NOT); - required = (!prohibited && conj != CONJ_OR); - } - if (required && !prohibited) - clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST)); - else if (!required && !prohibited) - clauses.add(newBooleanClause(q, BooleanClause.Occur.SHOULD)); - else if (!required && prohibited) - clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST_NOT)); - else - throw new RuntimeException("Clause cannot be both required and prohibited"); - } - - /** - * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow - */ - protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException { - // Use the analyzer to get all the tokens, and then build a TermQuery, - // PhraseQuery, or nothing based on the term count - - TokenStream source; - try { - source = analyzer.reusableTokenStream(field, new StringReader(queryText)); - source.reset(); - } catch (IOException e) { - source = analyzer.tokenStream(field, new StringReader(queryText)); - } - CachingTokenFilter buffer = new CachingTokenFilter(source); - TermToBytesRefAttribute termAtt = null; - PositionIncrementAttribute posIncrAtt = null; - int numTokens = 0; - - boolean success = false; - try { - buffer.reset(); - success = true; - } catch (IOException e) { - // success==false if we hit an exception - } - if (success) { - if (buffer.hasAttribute(TermToBytesRefAttribute.class)) { - termAtt = buffer.getAttribute(TermToBytesRefAttribute.class); - } - if (buffer.hasAttribute(PositionIncrementAttribute.class)) { - posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class); - } - } - - int positionCount = 0; - boolean severalTokensAtSamePosition = false; - - boolean hasMoreTokens = false; - if (termAtt != null) { - try { - hasMoreTokens = buffer.incrementToken(); - while (hasMoreTokens) { - numTokens++; - int positionIncrement = (posIncrAtt != null) ? posIncrAtt.getPositionIncrement() : 1; - if (positionIncrement != 0) { - positionCount += positionIncrement; - } else { - severalTokensAtSamePosition = true; - } - hasMoreTokens = buffer.incrementToken(); - } - } catch (IOException e) { - // ignore - } - } - try { - // rewind the buffer stream - buffer.reset(); - - // close original stream - all tokens buffered - source.close(); - } - catch (IOException e) { - // ignore - } - - if (numTokens == 0) - return null; - else if (numTokens == 1) { - BytesRef term = new BytesRef(); - try { - boolean hasNext = buffer.incrementToken(); - assert hasNext == true; - termAtt.toBytesRef(term); - } catch (IOException e) { - // safe to ignore, because we know the number of tokens - } - return newTermQuery(new Term(field, term)); - } else { - if (severalTokensAtSamePosition || (!quoted && !autoGeneratePhraseQueries)) { - if (positionCount == 1 || (!quoted && !autoGeneratePhraseQueries)) { - // no phrase query: - BooleanQuery q = newBooleanQuery(positionCount == 1); - - BooleanClause.Occur occur = positionCount > 1 && operator == AND_OPERATOR ? - BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD; - - for (int i = 0; i < numTokens; i++) { - BytesRef term = new BytesRef(); - try { - boolean hasNext = buffer.incrementToken(); - assert hasNext == true; - termAtt.toBytesRef(term); - } catch (IOException e) { - // safe to ignore, because we know the number of tokens - } - - Query currentQuery = newTermQuery( - new Term(field, term)); - q.add(currentQuery, occur); - } - return q; - } - else { - // phrase query: - MultiPhraseQuery mpq = newMultiPhraseQuery(); - mpq.setSlop(phraseSlop); - List multiTerms = new ArrayList(); - int position = -1; - for (int i = 0; i < numTokens; i++) { - BytesRef term = new BytesRef(); - int positionIncrement = 1; - try { - boolean hasNext = buffer.incrementToken(); - assert hasNext == true; - termAtt.toBytesRef(term); - if (posIncrAtt != null) { - positionIncrement = posIncrAtt.getPositionIncrement(); - } - } catch (IOException e) { - // safe to ignore, because we know the number of tokens - } - - if (positionIncrement > 0 && multiTerms.size() > 0) { - if (enablePositionIncrements) { - mpq.add(multiTerms.toArray(new Term[0]),position); - } else { - mpq.add(multiTerms.toArray(new Term[0])); - } - multiTerms.clear(); - } - position += positionIncrement; - multiTerms.add(new Term(field, term)); - } - if (enablePositionIncrements) { - mpq.add(multiTerms.toArray(new Term[0]),position); - } else { - mpq.add(multiTerms.toArray(new Term[0])); - } - return mpq; - } - } - else { - PhraseQuery pq = newPhraseQuery(); - pq.setSlop(phraseSlop); - int position = -1; - - - for (int i = 0; i < numTokens; i++) { - BytesRef term = new BytesRef(); - int positionIncrement = 1; - - try { - boolean hasNext = buffer.incrementToken(); - assert hasNext == true; - termAtt.toBytesRef(term); - if (posIncrAtt != null) { - positionIncrement = posIncrAtt.getPositionIncrement(); - } - } catch (IOException e) { - // safe to ignore, because we know the number of tokens - } - - if (enablePositionIncrements) { - position += positionIncrement; - pq.add(new Term(field, term),position); - } else { - pq.add(new Term(field, term)); - } - } - return pq; - } - } - } - - - - /** - * Base implementation delegates to {@link #getFieldQuery(String,String,boolean)}. - * This method may be overridden, for example, to return - * a SpanNearQuery instead of a PhraseQuery. - * - * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow - */ - protected Query getFieldQuery(String field, String queryText, int slop) - throws ParseException { - Query query = getFieldQuery(field, queryText, true); - - if (query instanceof PhraseQuery) { - ((PhraseQuery) query).setSlop(slop); - } - if (query instanceof MultiPhraseQuery) { - ((MultiPhraseQuery) query).setSlop(slop); - } - - return query; - } - - /** - * - * @exception org.apache.lucene.queryParser.ParseException - */ - protected Query getRangeQuery(String field, - String part1, - String part2, - boolean startInclusive, - boolean endInclusive) throws ParseException - { - if (lowercaseExpandedTerms) { - part1 = part1==null ? null : part1.toLowerCase(); - part2 = part2==null ? null : part2.toLowerCase(); - } - - - DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT, locale); - df.setLenient(true); - DateTools.Resolution resolution = getDateResolution(field); - - try { - part1 = DateTools.dateToString(df.parse(part1), resolution); - } catch (Exception e) { } - - try { - Date d2 = df.parse(part2); - if (endInclusive) { - // The user can only specify the date, not the time, so make sure - // the time is set to the latest possible time of that date to really - // include all documents: - Calendar cal = Calendar.getInstance(locale); - cal.setTime(d2); - cal.set(Calendar.HOUR_OF_DAY, 23); - cal.set(Calendar.MINUTE, 59); - cal.set(Calendar.SECOND, 59); - cal.set(Calendar.MILLISECOND, 999); - d2 = cal.getTime(); - } - part2 = DateTools.dateToString(d2, resolution); - } catch (Exception e) { } - - return newRangeQuery(field, part1, part2, startInclusive, endInclusive); - } - - /** - * Builds a new BooleanQuery instance - * @param disableCoord disable coord - * @return new BooleanQuery instance - */ - protected BooleanQuery newBooleanQuery(boolean disableCoord) { - return new BooleanQuery(disableCoord); - } - - /** - * Builds a new BooleanClause instance - * @param q sub query - * @param occur how this clause should occur when matching documents - * @return new BooleanClause instance - */ - protected BooleanClause newBooleanClause(Query q, BooleanClause.Occur occur) { - return new BooleanClause(q, occur); - } - - /** - * Builds a new TermQuery instance - * @param term term - * @return new TermQuery instance - */ - protected Query newTermQuery(Term term){ - return new TermQuery(term); - } - - /** - * Builds a new PhraseQuery instance - * @return new PhraseQuery instance - */ - protected PhraseQuery newPhraseQuery(){ - return new PhraseQuery(); - } - - /** - * Builds a new MultiPhraseQuery instance - * @return new MultiPhraseQuery instance - */ - protected MultiPhraseQuery newMultiPhraseQuery(){ - return new MultiPhraseQuery(); - } - - /** - * Builds a new PrefixQuery instance - * @param prefix Prefix term - * @return new PrefixQuery instance - */ - protected Query newPrefixQuery(Term prefix){ - PrefixQuery query = new PrefixQuery(prefix); - query.setRewriteMethod(multiTermRewriteMethod); - return query; - } - - /** - * Builds a new RegexpQuery instance - * @param regexp Regexp term - * @return new RegexpQuery instance - */ - protected Query newRegexpQuery(Term regexp) { - RegexpQuery query = new RegexpQuery(regexp); - query.setRewriteMethod(multiTermRewriteMethod); - return query; - } - - /** - * Builds a new FuzzyQuery instance - * @param term Term - * @param minimumSimilarity minimum similarity - * @param prefixLength prefix length - * @return new FuzzyQuery Instance - */ - protected Query newFuzzyQuery(Term term, float minimumSimilarity, int prefixLength) { - // FuzzyQuery doesn't yet allow constant score rewrite - return new FuzzyQuery(term,minimumSimilarity,prefixLength); - } - - /** - * Builds a new TermRangeQuery instance - * @param field Field - * @param part1 min - * @param part2 max - * @param startInclusive true if the start of the range is inclusive - * @param endInclusive true if the end of the range is inclusive - * @return new TermRangeQuery instance - */ - protected Query newRangeQuery(String field, String part1, String part2, boolean startInclusive, boolean endInclusive) { - final TermRangeQuery query = new TermRangeQuery(field, part1, part2, startInclusive, endInclusive, rangeCollator); - query.setRewriteMethod(multiTermRewriteMethod); - return query; - } - - /** - * Builds a new MatchAllDocsQuery instance - * @return new MatchAllDocsQuery instance - */ - protected Query newMatchAllDocsQuery() { - return new MatchAllDocsQuery(); - } - - /** - * Builds a new WildcardQuery instance - * @param t wildcard term - * @return new WildcardQuery instance - */ - protected Query newWildcardQuery(Term t) { - WildcardQuery query = new WildcardQuery(t); - query.setRewriteMethod(multiTermRewriteMethod); - return query; - } - - /** - * Factory method for generating query, given a set of clauses. - * By default creates a boolean query composed of clauses passed in. - * - * Can be overridden by extending classes, to modify query being - * returned. - * - * @param clauses List that contains {@link org.apache.lucene.search.BooleanClause} instances - * to join. - * - * @return Resulting {@link org.apache.lucene.search.Query} object. - * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow - */ - protected Query getBooleanQuery(List clauses) throws ParseException { - return getBooleanQuery(clauses, false); - } - - /** - * Factory method for generating query, given a set of clauses. - * By default creates a boolean query composed of clauses passed in. - * - * Can be overridden by extending classes, to modify query being - * returned. - * - * @param clauses List that contains {@link org.apache.lucene.search.BooleanClause} instances - * to join. - * @param disableCoord true if coord scoring should be disabled. - * - * @return Resulting {@link org.apache.lucene.search.Query} object. - * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow - */ - protected Query getBooleanQuery(List clauses, boolean disableCoord) - throws ParseException - { - if (clauses.size()==0) { - return null; // all clause words were filtered away by the analyzer. - } - BooleanQuery query = newBooleanQuery(disableCoord); - for(final BooleanClause clause: clauses) { - query.add(clause); - } - return query; - } - - /** - * Factory method for generating a query. Called when parser - * parses an input term token that contains one or more wildcard - * characters (? and *), but is not a prefix term token (one - * that has just a single * character at the end) - *

- * Depending on settings, prefix term may be lower-cased - * automatically. It will not go through the default Analyzer, - * however, since normal Analyzers are unlikely to work properly - * with wildcard templates. - *

- * Can be overridden by extending classes, to provide custom handling for - * wildcard queries, which may be necessary due to missing analyzer calls. - * - * @param field Name of the field query will use. - * @param termStr Term token that contains one or more wild card - * characters (? or *), but is not simple prefix term - * - * @return Resulting {@link org.apache.lucene.search.Query} built for the term - * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow - */ - protected Query getWildcardQuery(String field, String termStr) throws ParseException - { - if ("*".equals(field)) { - if ("*".equals(termStr)) return newMatchAllDocsQuery(); - } - if (!allowLeadingWildcard && (termStr.startsWith("*") || termStr.startsWith("?"))) - throw new ParseException("'*' or '?' not allowed as first character in WildcardQuery"); - if (lowercaseExpandedTerms) { - termStr = termStr.toLowerCase(); - } - Term t = new Term(field, termStr); - return newWildcardQuery(t); - } - - /** - * Factory method for generating a query. Called when parser - * parses an input term token that contains a regular expression - * query. - *

- * Depending on settings, pattern term may be lower-cased - * automatically. It will not go through the default Analyzer, - * however, since normal Analyzers are unlikely to work properly - * with regular expression templates. - *

- * Can be overridden by extending classes, to provide custom handling for - * regular expression queries, which may be necessary due to missing analyzer - * calls. - * - * @param field Name of the field query will use. - * @param termStr Term token that contains a regular expression - * - * @return Resulting {@link org.apache.lucene.search.Query} built for the term - * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow - */ - protected Query getRegexpQuery(String field, String termStr) throws ParseException - { - if (lowercaseExpandedTerms) { - termStr = termStr.toLowerCase(); - } - Term t = new Term(field, termStr); - return newRegexpQuery(t); - } - - /** - * Factory method for generating a query (similar to - * {@link #getWildcardQuery}). Called when parser parses an input term - * token that uses prefix notation; that is, contains a single '*' wildcard - * character as its last character. Since this is a special case - * of generic wildcard term, and such a query can be optimized easily, - * this usually results in a different query object. - *

- * Depending on settings, a prefix term may be lower-cased - * automatically. It will not go through the default Analyzer, - * however, since normal Analyzers are unlikely to work properly - * with wildcard templates. - *

- * Can be overridden by extending classes, to provide custom handling for - * wild card queries, which may be necessary due to missing analyzer calls. - * - * @param field Name of the field query will use. - * @param termStr Term token to use for building term for the query - * (without trailing '*' character!) - * - * @return Resulting {@link org.apache.lucene.search.Query} built for the term - * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow - */ - protected Query getPrefixQuery(String field, String termStr) throws ParseException - { - if (!allowLeadingWildcard && termStr.startsWith("*")) - throw new ParseException("'*' not allowed as first character in PrefixQuery"); - if (lowercaseExpandedTerms) { - termStr = termStr.toLowerCase(); - } - Term t = new Term(field, termStr); - return newPrefixQuery(t); - } - - /** - * Factory method for generating a query (similar to - * {@link #getWildcardQuery}). Called when parser parses - * an input term token that has the fuzzy suffix (~) appended. - * - * @param field Name of the field query will use. - * @param termStr Term token to use for building term for the query - * - * @return Resulting {@link org.apache.lucene.search.Query} built for the term - * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow - */ - protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException - { - if (lowercaseExpandedTerms) { - termStr = termStr.toLowerCase(); - } - Term t = new Term(field, termStr); - return newFuzzyQuery(t, minSimilarity, fuzzyPrefixLength); - } - - - // extracted from the .jj grammar - Query handleBareTokenQuery(String qfield, Token term, Token fuzzySlop, boolean prefix, boolean wildcard, boolean fuzzy, boolean regexp) throws ParseException { - Query q; - - String termImage=discardEscapeChar(term.image); - if (wildcard) { - q = getWildcardQuery(qfield, term.image); - } else if (prefix) { - q = getPrefixQuery(qfield, - discardEscapeChar(term.image.substring - (0, term.image.length()-1))); - } else if (regexp) { - q = getRegexpQuery(qfield, term.image.substring(1, term.image.length()-1)); - } else if (fuzzy) { - float fms = fuzzyMinSim; - try { - fms = Float.valueOf(fuzzySlop.image.substring(1)).floatValue(); - } catch (Exception ignored) { } - if(fms < 0.0f){ - throw new ParseException("Minimum similarity for a FuzzyQuery has to be between 0.0f and 1.0f !"); - } else if (fms >= 1.0f && fms != (int) fms) { - throw new ParseException("Fractional edit distances are not allowed!"); - } - q = getFuzzyQuery(qfield, termImage, fms); - } else { - q = getFieldQuery(qfield, termImage, false); - } - return q; - } - - // extracted from the .jj grammar - Query handleQuotedTerm(String qfield, Token term, Token fuzzySlop) throws ParseException { - int s = phraseSlop; // default - if (fuzzySlop != null) { - try { - s = Float.valueOf(fuzzySlop.image.substring(1)).intValue(); - } - catch (Exception ignored) { } - } - return getFieldQuery(qfield, discardEscapeChar(term.image.substring(1, term.image.length()-1)), s); - } - - // extracted from the .jj grammar - Query handleBoost(Query q, Token boost) throws ParseException { - if (boost != null) { - float f = (float) 1.0; - try { - f = Float.valueOf(boost.image).floatValue(); - } - catch (Exception ignored) { - /* Should this be handled somehow? (defaults to "no boost", if - * boost number is invalid) - */ - } - - // avoid boosting null queries, such as those caused by stop words - if (q != null) { - q.setBoost(f); - } - } - return q; - } - - - - /** - * Returns a String where the escape char has been - * removed, or kept only once if there was a double escape. - * - * Supports escaped unicode characters, e. g. translates - * \\u0041 to A. - * - */ - String discardEscapeChar(String input) throws ParseException { - // Create char array to hold unescaped char sequence - char[] output = new char[input.length()]; - - // The length of the output can be less than the input - // due to discarded escape chars. This variable holds - // the actual length of the output - int length = 0; - - // We remember whether the last processed character was - // an escape character - boolean lastCharWasEscapeChar = false; - - // The multiplier the current unicode digit must be multiplied with. - // E. g. the first digit must be multiplied with 16^3, the second with 16^2... - int codePointMultiplier = 0; - - // Used to calculate the codepoint of the escaped unicode character - int codePoint = 0; - - for (int i = 0; i < input.length(); i++) { - char curChar = input.charAt(i); - if (codePointMultiplier > 0) { - codePoint += hexToInt(curChar) * codePointMultiplier; - codePointMultiplier >>>= 4; - if (codePointMultiplier == 0) { - output[length++] = (char)codePoint; - codePoint = 0; - } - } else if (lastCharWasEscapeChar) { - if (curChar == 'u') { - // found an escaped unicode character - codePointMultiplier = 16 * 16 * 16; - } else { - // this character was escaped - output[length] = curChar; - length++; - } - lastCharWasEscapeChar = false; - } else { - if (curChar == '\\') { - lastCharWasEscapeChar = true; - } else { - output[length] = curChar; - length++; - } - } - } - - if (codePointMultiplier > 0) { - throw new ParseException("Truncated unicode escape sequence."); - } - - if (lastCharWasEscapeChar) { - throw new ParseException("Term can not end with escape character."); - } - - return new String(output, 0, length); - } - - /** Returns the numeric value of the hexadecimal character */ - static final int hexToInt(char c) throws ParseException { - if ('0' <= c && c <= '9') { - return c - '0'; - } else if ('a' <= c && c <= 'f'){ - return c - 'a' + 10; - } else if ('A' <= c && c <= 'F') { - return c - 'A' + 10; - } else { - throw new ParseException("None-hex character in unicode escape sequence: " + c); - } - } - - /** - * Returns a String where those characters that QueryParser - * expects to be escaped are escaped by a preceding \. - */ - public static String escape(String s) { - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < s.length(); i++) { - char c = s.charAt(i); - // These characters are part of the query syntax and must be escaped - if (c == '\\' || c == '+' || c == '-' || c == '!' || c == '(' || c == ')' || c == ':' - || c == '^' || c == '[' || c == ']' || c == '\"' || c == '{' || c == '}' || c == '~' - || c == '*' || c == '?' || c == '|' || c == '&') { - sb.append('\\'); - } - sb.append(c); - } - return sb.toString(); - } - -} +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.lucene.queryParser; + +import java.io.IOException; +import java.io.StringReader; +import java.text.Collator; +import java.text.DateFormat; +import java.util.*; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.CachingTokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; +import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; +import org.apache.lucene.document.DateTools; +import org.apache.lucene.index.Term; +import org.apache.lucene.queryParser.QueryParser.Operator; +import org.apache.lucene.search.*; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.Version; + +/** This class is overridden by QueryParser in QueryParser.jj + * and acts to separate the majority of the Java code from the .jj grammar file. + */ +public abstract class QueryParserBase { + + /** Do not catch this exception in your code, it means you are using methods that you should no longer use. */ + public static class MethodRemovedUseAnother extends Throwable {} + + static final int CONJ_NONE = 0; + static final int CONJ_AND = 1; + static final int CONJ_OR = 2; + + static final int MOD_NONE = 0; + static final int MOD_NOT = 10; + static final int MOD_REQ = 11; + + // make it possible to call setDefaultOperator() without accessing + // the nested class: + /** Alternative form of QueryParser.Operator.AND */ + public static final Operator AND_OPERATOR = Operator.AND; + /** Alternative form of QueryParser.Operator.OR */ + public static final Operator OR_OPERATOR = Operator.OR; + + /** The actual operator that parser uses to combine query terms */ + Operator operator = OR_OPERATOR; + + boolean lowercaseExpandedTerms = true; + MultiTermQuery.RewriteMethod multiTermRewriteMethod = MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT; + boolean allowLeadingWildcard = false; + boolean enablePositionIncrements = true; + + Analyzer analyzer; + String field; + int phraseSlop = 0; + float fuzzyMinSim = FuzzyQuery.defaultMinSimilarity; + int fuzzyPrefixLength = FuzzyQuery.defaultPrefixLength; + Locale locale = Locale.getDefault(); + + // the default date resolution + DateTools.Resolution dateResolution = null; + // maps field names to date resolutions + Map fieldToDateResolution = null; + + // The collator to use when determining range inclusion, + // for use when constructing RangeQuerys. + Collator rangeCollator = null; + + boolean autoGeneratePhraseQueries; + + // So the generated QueryParser(CharStream) won't error out + protected QueryParserBase() { + } + + /** Initializes a query parser. Called by the QueryParser constructor + * @param matchVersion Lucene version to match. See above. + * @param f the default field for query terms. + * @param a used to find terms in the query text. + */ + public void init(Version matchVersion, String f, Analyzer a) { + analyzer = a; + field = f; + if (matchVersion.onOrAfter(Version.LUCENE_31)) { + setAutoGeneratePhraseQueries(false); + } else { + setAutoGeneratePhraseQueries(true); + } + } + + // the generated parser will create these in QueryParser + public abstract void ReInit(CharStream stream); + public abstract Query TopLevelQuery(String field) throws ParseException; + + + /** Parses a query string, returning a {@link org.apache.lucene.search.Query}. + * @param query the query string to be parsed. + * @throws ParseException if the parsing fails + */ + public Query parse(String query) throws ParseException { + ReInit(new FastCharStream(new StringReader(query))); + try { + // TopLevelQuery is a Query followed by the end-of-input (EOF) + Query res = TopLevelQuery(field); + return res!=null ? res : newBooleanQuery(false); + } + catch (ParseException tme) { + // rethrow to include the original query: + ParseException e = new ParseException("Cannot parse '" +query+ "': " + tme.getMessage()); + e.initCause(tme); + throw e; + } + catch (TokenMgrError tme) { + ParseException e = new ParseException("Cannot parse '" +query+ "': " + tme.getMessage()); + e.initCause(tme); + throw e; + } + catch (BooleanQuery.TooManyClauses tmc) { + ParseException e = new ParseException("Cannot parse '" +query+ "': too many boolean clauses"); + e.initCause(tmc); + throw e; + } + } + + + /** + * @return Returns the analyzer. + */ + public Analyzer getAnalyzer() { + return analyzer; + } + + /** + * @return Returns the default field. + */ + public String getField() { + return field; + } + + /** + * @see #setAutoGeneratePhraseQueries(boolean) + */ + public final boolean getAutoGeneratePhraseQueries() { + return autoGeneratePhraseQueries; + } + + /** + * Set to true if phrase queries will be automatically generated + * when the analyzer returns more than one term from whitespace + * delimited text. + * NOTE: this behavior may not be suitable for all languages. + *

+ * Set to false if phrase queries should only be generated when + * surrounded by double quotes. + */ + public final void setAutoGeneratePhraseQueries(boolean value) { + this.autoGeneratePhraseQueries = value; + } + + /** + * Get the minimal similarity for fuzzy queries. + */ + public float getFuzzyMinSim() { + return fuzzyMinSim; + } + + /** + * Set the minimum similarity for fuzzy queries. + * Default is 2f. + */ + public void setFuzzyMinSim(float fuzzyMinSim) { + this.fuzzyMinSim = fuzzyMinSim; + } + + /** + * Get the prefix length for fuzzy queries. + * @return Returns the fuzzyPrefixLength. + */ + public int getFuzzyPrefixLength() { + return fuzzyPrefixLength; + } + + /** + * Set the prefix length for fuzzy queries. Default is 0. + * @param fuzzyPrefixLength The fuzzyPrefixLength to set. + */ + public void setFuzzyPrefixLength(int fuzzyPrefixLength) { + this.fuzzyPrefixLength = fuzzyPrefixLength; + } + + /** + * Sets the default slop for phrases. If zero, then exact phrase matches + * are required. Default value is zero. + */ + public void setPhraseSlop(int phraseSlop) { + this.phraseSlop = phraseSlop; + } + + /** + * Gets the default slop for phrases. + */ + public int getPhraseSlop() { + return phraseSlop; + } + + + /** + * Set to true to allow leading wildcard characters. + *

+ * When set, * or ? are allowed as + * the first character of a PrefixQuery and WildcardQuery. + * Note that this can produce very slow + * queries on big indexes. + *

+ * Default: false. + */ + public void setAllowLeadingWildcard(boolean allowLeadingWildcard) { + this.allowLeadingWildcard = allowLeadingWildcard; + } + + /** + * @see #setAllowLeadingWildcard(boolean) + */ + public boolean getAllowLeadingWildcard() { + return allowLeadingWildcard; + } + + /** + * Set to true to enable position increments in result query. + *

+ * When set, result phrase and multi-phrase queries will + * be aware of position increments. + * Useful when e.g. a StopFilter increases the position increment of + * the token that follows an omitted token. + *

+ * Default: true. + */ + public void setEnablePositionIncrements(boolean enable) { + this.enablePositionIncrements = enable; + } + + /** + * @see #setEnablePositionIncrements(boolean) + */ + public boolean getEnablePositionIncrements() { + return enablePositionIncrements; + } + + /** + * Sets the boolean operator of the QueryParser. + * In default mode (OR_OPERATOR) terms without any modifiers + * are considered optional: for example capital of Hungary is equal to + * capital OR of OR Hungary.
+ * In AND_OPERATOR mode terms are considered to be in conjunction: the + * above mentioned query is parsed as capital AND of AND Hungary + */ + public void setDefaultOperator(Operator op) { + this.operator = op; + } + + + /** + * Gets implicit operator setting, which will be either AND_OPERATOR + * or OR_OPERATOR. + */ + public Operator getDefaultOperator() { + return operator; + } + + + /** + * Whether terms of wildcard, prefix, fuzzy and range queries are to be automatically + * lower-cased or not. Default is true. + */ + public void setLowercaseExpandedTerms(boolean lowercaseExpandedTerms) { + this.lowercaseExpandedTerms = lowercaseExpandedTerms; + } + + + /** + * @see #setLowercaseExpandedTerms(boolean) + */ + public boolean getLowercaseExpandedTerms() { + return lowercaseExpandedTerms; + } + + /** + * By default QueryParser uses {@link org.apache.lucene.search.MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT} + * when creating a PrefixQuery, WildcardQuery or RangeQuery. This implementation is generally preferable because it + * a) Runs faster b) Does not have the scarcity of terms unduly influence score + * c) avoids any "TooManyBooleanClauses" exception. + * However, if your application really needs to use the + * old-fashioned BooleanQuery expansion rewriting and the above + * points are not relevant then use this to change + * the rewrite method. + */ + public void setMultiTermRewriteMethod(MultiTermQuery.RewriteMethod method) { + multiTermRewriteMethod = method; + } + + + /** + * @see #setMultiTermRewriteMethod + */ + public MultiTermQuery.RewriteMethod getMultiTermRewriteMethod() { + return multiTermRewriteMethod; + } + + /** + * Set locale used by date range parsing. + */ + public void setLocale(Locale locale) { + this.locale = locale; + } + + /** + * Returns current locale, allowing access by subclasses. + */ + public Locale getLocale() { + return locale; + } + + /** + * Sets the default date resolution used by RangeQueries for fields for which no + * specific date resolutions has been set. Field specific resolutions can be set + * with {@link #setDateResolution(String, org.apache.lucene.document.DateTools.Resolution)}. + * + * @param dateResolution the default date resolution to set + */ + public void setDateResolution(DateTools.Resolution dateResolution) { + this.dateResolution = dateResolution; + } + + /** + * Sets the date resolution used by RangeQueries for a specific field. + * + * @param fieldName field for which the date resolution is to be set + * @param dateResolution date resolution to set + */ + public void setDateResolution(String fieldName, DateTools.Resolution dateResolution) { + if (fieldName == null) { + throw new IllegalArgumentException("Field cannot be null."); + } + + if (fieldToDateResolution == null) { + // lazily initialize HashMap + fieldToDateResolution = new HashMap(); + } + + fieldToDateResolution.put(fieldName, dateResolution); + } + + /** + * Returns the date resolution that is used by RangeQueries for the given field. + * Returns null, if no default or field specific date resolution has been set + * for the given field. + * + */ + public DateTools.Resolution getDateResolution(String fieldName) { + if (fieldName == null) { + throw new IllegalArgumentException("Field cannot be null."); + } + + if (fieldToDateResolution == null) { + // no field specific date resolutions set; return default date resolution instead + return this.dateResolution; + } + + DateTools.Resolution resolution = fieldToDateResolution.get(fieldName); + if (resolution == null) { + // no date resolutions set for the given field; return default date resolution instead + resolution = this.dateResolution; + } + + return resolution; + } + + /** + * Sets the collator used to determine index term inclusion in ranges + * for RangeQuerys. + *

+ * WARNING: Setting the rangeCollator to a non-null + * collator using this method will cause every single index Term in the + * Field referenced by lowerTerm and/or upperTerm to be examined. + * Depending on the number of index Terms in this Field, the operation could + * be very slow. + * + * @param rc the collator to use when constructing RangeQuerys + */ + public void setRangeCollator(Collator rc) { + rangeCollator = rc; + } + + /** + * @return the collator used to determine index term inclusion in ranges + * for RangeQuerys. + */ + public Collator getRangeCollator() { + return rangeCollator; + } + + protected void addClause(List clauses, int conj, int mods, Query q) { + boolean required, prohibited; + + // If this term is introduced by AND, make the preceding term required, + // unless it's already prohibited + if (clauses.size() > 0 && conj == CONJ_AND) { + BooleanClause c = clauses.get(clauses.size()-1); + if (!c.isProhibited()) + c.setOccur(BooleanClause.Occur.MUST); + } + + if (clauses.size() > 0 && operator == AND_OPERATOR && conj == CONJ_OR) { + // If this term is introduced by OR, make the preceding term optional, + // unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b) + // notice if the input is a OR b, first term is parsed as required; without + // this modification a OR b would parsed as +a OR b + BooleanClause c = clauses.get(clauses.size()-1); + if (!c.isProhibited()) + c.setOccur(BooleanClause.Occur.SHOULD); + } + + // We might have been passed a null query; the term might have been + // filtered away by the analyzer. + if (q == null) + return; + + if (operator == OR_OPERATOR) { + // We set REQUIRED if we're introduced by AND or +; PROHIBITED if + // introduced by NOT or -; make sure not to set both. + prohibited = (mods == MOD_NOT); + required = (mods == MOD_REQ); + if (conj == CONJ_AND && !prohibited) { + required = true; + } + } else { + // We set PROHIBITED if we're introduced by NOT or -; We set REQUIRED + // if not PROHIBITED and not introduced by OR + prohibited = (mods == MOD_NOT); + required = (!prohibited && conj != CONJ_OR); + } + if (required && !prohibited) + clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST)); + else if (!required && !prohibited) + clauses.add(newBooleanClause(q, BooleanClause.Occur.SHOULD)); + else if (!required && prohibited) + clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST_NOT)); + else + throw new RuntimeException("Clause cannot be both required and prohibited"); + } + + /** + * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow + */ + protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException { + // Use the analyzer to get all the tokens, and then build a TermQuery, + // PhraseQuery, or nothing based on the term count + + TokenStream source; + try { + source = analyzer.reusableTokenStream(field, new StringReader(queryText)); + source.reset(); + } catch (IOException e) { + source = analyzer.tokenStream(field, new StringReader(queryText)); + } + CachingTokenFilter buffer = new CachingTokenFilter(source); + TermToBytesRefAttribute termAtt = null; + PositionIncrementAttribute posIncrAtt = null; + int numTokens = 0; + + boolean success = false; + try { + buffer.reset(); + success = true; + } catch (IOException e) { + // success==false if we hit an exception + } + if (success) { + if (buffer.hasAttribute(TermToBytesRefAttribute.class)) { + termAtt = buffer.getAttribute(TermToBytesRefAttribute.class); + } + if (buffer.hasAttribute(PositionIncrementAttribute.class)) { + posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class); + } + } + + int positionCount = 0; + boolean severalTokensAtSamePosition = false; + + boolean hasMoreTokens = false; + if (termAtt != null) { + try { + hasMoreTokens = buffer.incrementToken(); + while (hasMoreTokens) { + numTokens++; + int positionIncrement = (posIncrAtt != null) ? posIncrAtt.getPositionIncrement() : 1; + if (positionIncrement != 0) { + positionCount += positionIncrement; + } else { + severalTokensAtSamePosition = true; + } + hasMoreTokens = buffer.incrementToken(); + } + } catch (IOException e) { + // ignore + } + } + try { + // rewind the buffer stream + buffer.reset(); + + // close original stream - all tokens buffered + source.close(); + } + catch (IOException e) { + // ignore + } + + if (numTokens == 0) + return null; + else if (numTokens == 1) { + BytesRef term = new BytesRef(); + try { + boolean hasNext = buffer.incrementToken(); + assert hasNext == true; + termAtt.toBytesRef(term); + } catch (IOException e) { + // safe to ignore, because we know the number of tokens + } + return newTermQuery(new Term(field, term)); + } else { + if (severalTokensAtSamePosition || (!quoted && !autoGeneratePhraseQueries)) { + if (positionCount == 1 || (!quoted && !autoGeneratePhraseQueries)) { + // no phrase query: + BooleanQuery q = newBooleanQuery(positionCount == 1); + + BooleanClause.Occur occur = positionCount > 1 && operator == AND_OPERATOR ? + BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD; + + for (int i = 0; i < numTokens; i++) { + BytesRef term = new BytesRef(); + try { + boolean hasNext = buffer.incrementToken(); + assert hasNext == true; + termAtt.toBytesRef(term); + } catch (IOException e) { + // safe to ignore, because we know the number of tokens + } + + Query currentQuery = newTermQuery( + new Term(field, term)); + q.add(currentQuery, occur); + } + return q; + } + else { + // phrase query: + MultiPhraseQuery mpq = newMultiPhraseQuery(); + mpq.setSlop(phraseSlop); + List multiTerms = new ArrayList(); + int position = -1; + for (int i = 0; i < numTokens; i++) { + BytesRef term = new BytesRef(); + int positionIncrement = 1; + try { + boolean hasNext = buffer.incrementToken(); + assert hasNext == true; + termAtt.toBytesRef(term); + if (posIncrAtt != null) { + positionIncrement = posIncrAtt.getPositionIncrement(); + } + } catch (IOException e) { + // safe to ignore, because we know the number of tokens + } + + if (positionIncrement > 0 && multiTerms.size() > 0) { + if (enablePositionIncrements) { + mpq.add(multiTerms.toArray(new Term[0]),position); + } else { + mpq.add(multiTerms.toArray(new Term[0])); + } + multiTerms.clear(); + } + position += positionIncrement; + multiTerms.add(new Term(field, term)); + } + if (enablePositionIncrements) { + mpq.add(multiTerms.toArray(new Term[0]),position); + } else { + mpq.add(multiTerms.toArray(new Term[0])); + } + return mpq; + } + } + else { + PhraseQuery pq = newPhraseQuery(); + pq.setSlop(phraseSlop); + int position = -1; + + + for (int i = 0; i < numTokens; i++) { + BytesRef term = new BytesRef(); + int positionIncrement = 1; + + try { + boolean hasNext = buffer.incrementToken(); + assert hasNext == true; + termAtt.toBytesRef(term); + if (posIncrAtt != null) { + positionIncrement = posIncrAtt.getPositionIncrement(); + } + } catch (IOException e) { + // safe to ignore, because we know the number of tokens + } + + if (enablePositionIncrements) { + position += positionIncrement; + pq.add(new Term(field, term),position); + } else { + pq.add(new Term(field, term)); + } + } + return pq; + } + } + } + + + + /** + * Base implementation delegates to {@link #getFieldQuery(String,String,boolean)}. + * This method may be overridden, for example, to return + * a SpanNearQuery instead of a PhraseQuery. + * + * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow + */ + protected Query getFieldQuery(String field, String queryText, int slop) + throws ParseException { + Query query = getFieldQuery(field, queryText, true); + + if (query instanceof PhraseQuery) { + ((PhraseQuery) query).setSlop(slop); + } + if (query instanceof MultiPhraseQuery) { + ((MultiPhraseQuery) query).setSlop(slop); + } + + return query; + } + + /** + * + * @exception org.apache.lucene.queryParser.ParseException + */ + protected Query getRangeQuery(String field, + String part1, + String part2, + boolean startInclusive, + boolean endInclusive) throws ParseException + { + if (lowercaseExpandedTerms) { + part1 = part1==null ? null : part1.toLowerCase(); + part2 = part2==null ? null : part2.toLowerCase(); + } + + + DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT, locale); + df.setLenient(true); + DateTools.Resolution resolution = getDateResolution(field); + + try { + part1 = DateTools.dateToString(df.parse(part1), resolution); + } catch (Exception e) { } + + try { + Date d2 = df.parse(part2); + if (endInclusive) { + // The user can only specify the date, not the time, so make sure + // the time is set to the latest possible time of that date to really + // include all documents: + Calendar cal = Calendar.getInstance(locale); + cal.setTime(d2); + cal.set(Calendar.HOUR_OF_DAY, 23); + cal.set(Calendar.MINUTE, 59); + cal.set(Calendar.SECOND, 59); + cal.set(Calendar.MILLISECOND, 999); + d2 = cal.getTime(); + } + part2 = DateTools.dateToString(d2, resolution); + } catch (Exception e) { } + + return newRangeQuery(field, part1, part2, startInclusive, endInclusive); + } + + /** + * Builds a new BooleanQuery instance + * @param disableCoord disable coord + * @return new BooleanQuery instance + */ + protected BooleanQuery newBooleanQuery(boolean disableCoord) { + return new BooleanQuery(disableCoord); + } + + /** + * Builds a new BooleanClause instance + * @param q sub query + * @param occur how this clause should occur when matching documents + * @return new BooleanClause instance + */ + protected BooleanClause newBooleanClause(Query q, BooleanClause.Occur occur) { + return new BooleanClause(q, occur); + } + + /** + * Builds a new TermQuery instance + * @param term term + * @return new TermQuery instance + */ + protected Query newTermQuery(Term term){ + return new TermQuery(term); + } + + /** + * Builds a new PhraseQuery instance + * @return new PhraseQuery instance + */ + protected PhraseQuery newPhraseQuery(){ + return new PhraseQuery(); + } + + /** + * Builds a new MultiPhraseQuery instance + * @return new MultiPhraseQuery instance + */ + protected MultiPhraseQuery newMultiPhraseQuery(){ + return new MultiPhraseQuery(); + } + + /** + * Builds a new PrefixQuery instance + * @param prefix Prefix term + * @return new PrefixQuery instance + */ + protected Query newPrefixQuery(Term prefix){ + PrefixQuery query = new PrefixQuery(prefix); + query.setRewriteMethod(multiTermRewriteMethod); + return query; + } + + /** + * Builds a new RegexpQuery instance + * @param regexp Regexp term + * @return new RegexpQuery instance + */ + protected Query newRegexpQuery(Term regexp) { + RegexpQuery query = new RegexpQuery(regexp); + query.setRewriteMethod(multiTermRewriteMethod); + return query; + } + + /** + * Builds a new FuzzyQuery instance + * @param term Term + * @param minimumSimilarity minimum similarity + * @param prefixLength prefix length + * @return new FuzzyQuery Instance + */ + protected Query newFuzzyQuery(Term term, float minimumSimilarity, int prefixLength) { + // FuzzyQuery doesn't yet allow constant score rewrite + return new FuzzyQuery(term,minimumSimilarity,prefixLength); + } + + /** + * Builds a new TermRangeQuery instance + * @param field Field + * @param part1 min + * @param part2 max + * @param startInclusive true if the start of the range is inclusive + * @param endInclusive true if the end of the range is inclusive + * @return new TermRangeQuery instance + */ + protected Query newRangeQuery(String field, String part1, String part2, boolean startInclusive, boolean endInclusive) { + final TermRangeQuery query = new TermRangeQuery(field, part1, part2, startInclusive, endInclusive, rangeCollator); + query.setRewriteMethod(multiTermRewriteMethod); + return query; + } + + /** + * Builds a new MatchAllDocsQuery instance + * @return new MatchAllDocsQuery instance + */ + protected Query newMatchAllDocsQuery() { + return new MatchAllDocsQuery(); + } + + /** + * Builds a new WildcardQuery instance + * @param t wildcard term + * @return new WildcardQuery instance + */ + protected Query newWildcardQuery(Term t) { + WildcardQuery query = new WildcardQuery(t); + query.setRewriteMethod(multiTermRewriteMethod); + return query; + } + + /** + * Factory method for generating query, given a set of clauses. + * By default creates a boolean query composed of clauses passed in. + * + * Can be overridden by extending classes, to modify query being + * returned. + * + * @param clauses List that contains {@link org.apache.lucene.search.BooleanClause} instances + * to join. + * + * @return Resulting {@link org.apache.lucene.search.Query} object. + * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow + */ + protected Query getBooleanQuery(List clauses) throws ParseException { + return getBooleanQuery(clauses, false); + } + + /** + * Factory method for generating query, given a set of clauses. + * By default creates a boolean query composed of clauses passed in. + * + * Can be overridden by extending classes, to modify query being + * returned. + * + * @param clauses List that contains {@link org.apache.lucene.search.BooleanClause} instances + * to join. + * @param disableCoord true if coord scoring should be disabled. + * + * @return Resulting {@link org.apache.lucene.search.Query} object. + * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow + */ + protected Query getBooleanQuery(List clauses, boolean disableCoord) + throws ParseException + { + if (clauses.size()==0) { + return null; // all clause words were filtered away by the analyzer. + } + BooleanQuery query = newBooleanQuery(disableCoord); + for(final BooleanClause clause: clauses) { + query.add(clause); + } + return query; + } + + /** + * Factory method for generating a query. Called when parser + * parses an input term token that contains one or more wildcard + * characters (? and *), but is not a prefix term token (one + * that has just a single * character at the end) + *

+ * Depending on settings, prefix term may be lower-cased + * automatically. It will not go through the default Analyzer, + * however, since normal Analyzers are unlikely to work properly + * with wildcard templates. + *

+ * Can be overridden by extending classes, to provide custom handling for + * wildcard queries, which may be necessary due to missing analyzer calls. + * + * @param field Name of the field query will use. + * @param termStr Term token that contains one or more wild card + * characters (? or *), but is not simple prefix term + * + * @return Resulting {@link org.apache.lucene.search.Query} built for the term + * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow + */ + protected Query getWildcardQuery(String field, String termStr) throws ParseException + { + if ("*".equals(field)) { + if ("*".equals(termStr)) return newMatchAllDocsQuery(); + } + if (!allowLeadingWildcard && (termStr.startsWith("*") || termStr.startsWith("?"))) + throw new ParseException("'*' or '?' not allowed as first character in WildcardQuery"); + if (lowercaseExpandedTerms) { + termStr = termStr.toLowerCase(); + } + Term t = new Term(field, termStr); + return newWildcardQuery(t); + } + + /** + * Factory method for generating a query. Called when parser + * parses an input term token that contains a regular expression + * query. + *

+ * Depending on settings, pattern term may be lower-cased + * automatically. It will not go through the default Analyzer, + * however, since normal Analyzers are unlikely to work properly + * with regular expression templates. + *

+ * Can be overridden by extending classes, to provide custom handling for + * regular expression queries, which may be necessary due to missing analyzer + * calls. + * + * @param field Name of the field query will use. + * @param termStr Term token that contains a regular expression + * + * @return Resulting {@link org.apache.lucene.search.Query} built for the term + * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow + */ + protected Query getRegexpQuery(String field, String termStr) throws ParseException + { + if (lowercaseExpandedTerms) { + termStr = termStr.toLowerCase(); + } + Term t = new Term(field, termStr); + return newRegexpQuery(t); + } + + /** + * Factory method for generating a query (similar to + * {@link #getWildcardQuery}). Called when parser parses an input term + * token that uses prefix notation; that is, contains a single '*' wildcard + * character as its last character. Since this is a special case + * of generic wildcard term, and such a query can be optimized easily, + * this usually results in a different query object. + *

+ * Depending on settings, a prefix term may be lower-cased + * automatically. It will not go through the default Analyzer, + * however, since normal Analyzers are unlikely to work properly + * with wildcard templates. + *

+ * Can be overridden by extending classes, to provide custom handling for + * wild card queries, which may be necessary due to missing analyzer calls. + * + * @param field Name of the field query will use. + * @param termStr Term token to use for building term for the query + * (without trailing '*' character!) + * + * @return Resulting {@link org.apache.lucene.search.Query} built for the term + * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow + */ + protected Query getPrefixQuery(String field, String termStr) throws ParseException + { + if (!allowLeadingWildcard && termStr.startsWith("*")) + throw new ParseException("'*' not allowed as first character in PrefixQuery"); + if (lowercaseExpandedTerms) { + termStr = termStr.toLowerCase(); + } + Term t = new Term(field, termStr); + return newPrefixQuery(t); + } + + /** + * Factory method for generating a query (similar to + * {@link #getWildcardQuery}). Called when parser parses + * an input term token that has the fuzzy suffix (~) appended. + * + * @param field Name of the field query will use. + * @param termStr Term token to use for building term for the query + * + * @return Resulting {@link org.apache.lucene.search.Query} built for the term + * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow + */ + protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException + { + if (lowercaseExpandedTerms) { + termStr = termStr.toLowerCase(); + } + Term t = new Term(field, termStr); + return newFuzzyQuery(t, minSimilarity, fuzzyPrefixLength); + } + + + // extracted from the .jj grammar + Query handleBareTokenQuery(String qfield, Token term, Token fuzzySlop, boolean prefix, boolean wildcard, boolean fuzzy, boolean regexp) throws ParseException { + Query q; + + String termImage=discardEscapeChar(term.image); + if (wildcard) { + q = getWildcardQuery(qfield, term.image); + } else if (prefix) { + q = getPrefixQuery(qfield, + discardEscapeChar(term.image.substring + (0, term.image.length()-1))); + } else if (regexp) { + q = getRegexpQuery(qfield, term.image.substring(1, term.image.length()-1)); + } else if (fuzzy) { + float fms = fuzzyMinSim; + try { + fms = Float.valueOf(fuzzySlop.image.substring(1)).floatValue(); + } catch (Exception ignored) { } + if(fms < 0.0f){ + throw new ParseException("Minimum similarity for a FuzzyQuery has to be between 0.0f and 1.0f !"); + } else if (fms >= 1.0f && fms != (int) fms) { + throw new ParseException("Fractional edit distances are not allowed!"); + } + q = getFuzzyQuery(qfield, termImage, fms); + } else { + q = getFieldQuery(qfield, termImage, false); + } + return q; + } + + // extracted from the .jj grammar + Query handleQuotedTerm(String qfield, Token term, Token fuzzySlop) throws ParseException { + int s = phraseSlop; // default + if (fuzzySlop != null) { + try { + s = Float.valueOf(fuzzySlop.image.substring(1)).intValue(); + } + catch (Exception ignored) { } + } + return getFieldQuery(qfield, discardEscapeChar(term.image.substring(1, term.image.length()-1)), s); + } + + // extracted from the .jj grammar + Query handleBoost(Query q, Token boost) throws ParseException { + if (boost != null) { + float f = (float) 1.0; + try { + f = Float.valueOf(boost.image).floatValue(); + } + catch (Exception ignored) { + /* Should this be handled somehow? (defaults to "no boost", if + * boost number is invalid) + */ + } + + // avoid boosting null queries, such as those caused by stop words + if (q != null) { + q.setBoost(f); + } + } + return q; + } + + + + /** + * Returns a String where the escape char has been + * removed, or kept only once if there was a double escape. + * + * Supports escaped unicode characters, e. g. translates + * \\u0041 to A. + * + */ + String discardEscapeChar(String input) throws ParseException { + // Create char array to hold unescaped char sequence + char[] output = new char[input.length()]; + + // The length of the output can be less than the input + // due to discarded escape chars. This variable holds + // the actual length of the output + int length = 0; + + // We remember whether the last processed character was + // an escape character + boolean lastCharWasEscapeChar = false; + + // The multiplier the current unicode digit must be multiplied with. + // E. g. the first digit must be multiplied with 16^3, the second with 16^2... + int codePointMultiplier = 0; + + // Used to calculate the codepoint of the escaped unicode character + int codePoint = 0; + + for (int i = 0; i < input.length(); i++) { + char curChar = input.charAt(i); + if (codePointMultiplier > 0) { + codePoint += hexToInt(curChar) * codePointMultiplier; + codePointMultiplier >>>= 4; + if (codePointMultiplier == 0) { + output[length++] = (char)codePoint; + codePoint = 0; + } + } else if (lastCharWasEscapeChar) { + if (curChar == 'u') { + // found an escaped unicode character + codePointMultiplier = 16 * 16 * 16; + } else { + // this character was escaped + output[length] = curChar; + length++; + } + lastCharWasEscapeChar = false; + } else { + if (curChar == '\\') { + lastCharWasEscapeChar = true; + } else { + output[length] = curChar; + length++; + } + } + } + + if (codePointMultiplier > 0) { + throw new ParseException("Truncated unicode escape sequence."); + } + + if (lastCharWasEscapeChar) { + throw new ParseException("Term can not end with escape character."); + } + + return new String(output, 0, length); + } + + /** Returns the numeric value of the hexadecimal character */ + static final int hexToInt(char c) throws ParseException { + if ('0' <= c && c <= '9') { + return c - '0'; + } else if ('a' <= c && c <= 'f'){ + return c - 'a' + 10; + } else if ('A' <= c && c <= 'F') { + return c - 'A' + 10; + } else { + throw new ParseException("None-hex character in unicode escape sequence: " + c); + } + } + + /** + * Returns a String where those characters that QueryParser + * expects to be escaped are escaped by a preceding \. + */ + public static String escape(String s) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < s.length(); i++) { + char c = s.charAt(i); + // These characters are part of the query syntax and must be escaped + if (c == '\\' || c == '+' || c == '-' || c == '!' || c == '(' || c == ')' || c == ':' + || c == '^' || c == '[' || c == ']' || c == '\"' || c == '{' || c == '}' || c == '~' + || c == '*' || c == '?' || c == '|' || c == '&') { + sb.append('\\'); + } + sb.append(c); + } + return sb.toString(); + } + +} diff --git a/solr/src/java/org/apache/solr/util/SentinelIntSet.java b/solr/src/java/org/apache/solr/util/SentinelIntSet.java index fc4d120e7d5..2c53a723f39 100644 --- a/solr/src/java/org/apache/solr/util/SentinelIntSet.java +++ b/solr/src/java/org/apache/solr/util/SentinelIntSet.java @@ -1,134 +1,134 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.solr.util; - -import java.util.Arrays; - -/** A native int set where one value is reserved to mean "EMPTY" */ -public class SentinelIntSet { - public int[] keys; - public int count; - public final int emptyVal; - public int rehashCount; // the count at which a rehash should be done - - public SentinelIntSet(int size, int emptyVal) { - this.emptyVal = emptyVal; - int tsize = Math.max(org.apache.lucene.util.BitUtil.nextHighestPowerOfTwo(size), 1); - rehashCount = tsize - (tsize>>2); - if (size >= rehashCount) { // should be able to hold "size" w/o rehashing - tsize <<= 1; - rehashCount = tsize - (tsize>>2); - } - keys = new int[tsize]; - if (emptyVal != 0) - clear(); - } - - public void clear() { - Arrays.fill(keys, emptyVal); - count = 0; - } - - public int hash(int key) { - return key; - } - - public int size() { return count; } - - /** returns the slot for this key */ - public int getSlot(int key) { - assert key != emptyVal; - int h = hash(key); - int s = h & (keys.length-1); - if (keys[s] == key || keys[s]== emptyVal) return s; - - int increment = (h>>7)|1; - do { - s = (s + increment) & (keys.length-1); - } while (keys[s] != key && keys[s] != emptyVal); - return s; - } - - /** returns the slot for this key, or -slot-1 if not found */ - public int find(int key) { - assert key != emptyVal; - int h = hash(key); - int s = h & (keys.length-1); - if (keys[s] == key) return s; - if (keys[s] == emptyVal) return -s-1; - - int increment = (h>>7)|1; - for(;;) { - s = (s + increment) & (keys.length-1); - if (keys[s] == key) return s; - if (keys[s] == emptyVal) return -s-1; - } - } - - - public boolean exists(int key) { - return find(key) >= 0; - } - - - public int put(int key) { - int s = find(key); - if (s < 0) { - if (count >= rehashCount) { - rehash(); - s = getSlot(key); - } else { - s = -s-1; - } - count++; - keys[s] = key; - putKey(key, s); - } else { - overwriteKey(key, s); - } - return s; - } - - - protected void putKey(int key, int slot) {} - protected void overwriteKey(int key, int slot) {} - - protected void startRehash(int newSize) {} - protected void moveKey(int key, int oldSlot, int newSlot) {} - protected void endRehash() {} - - public void rehash() { - int newSize = keys.length << 1; - startRehash(newSize); - int[] oldKeys = keys; - keys = new int[newSize]; - if (emptyVal != 0) Arrays.fill(keys, emptyVal); - - for (int i=0; i>2); - - } - -} +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.util; + +import java.util.Arrays; + +/** A native int set where one value is reserved to mean "EMPTY" */ +public class SentinelIntSet { + public int[] keys; + public int count; + public final int emptyVal; + public int rehashCount; // the count at which a rehash should be done + + public SentinelIntSet(int size, int emptyVal) { + this.emptyVal = emptyVal; + int tsize = Math.max(org.apache.lucene.util.BitUtil.nextHighestPowerOfTwo(size), 1); + rehashCount = tsize - (tsize>>2); + if (size >= rehashCount) { // should be able to hold "size" w/o rehashing + tsize <<= 1; + rehashCount = tsize - (tsize>>2); + } + keys = new int[tsize]; + if (emptyVal != 0) + clear(); + } + + public void clear() { + Arrays.fill(keys, emptyVal); + count = 0; + } + + public int hash(int key) { + return key; + } + + public int size() { return count; } + + /** returns the slot for this key */ + public int getSlot(int key) { + assert key != emptyVal; + int h = hash(key); + int s = h & (keys.length-1); + if (keys[s] == key || keys[s]== emptyVal) return s; + + int increment = (h>>7)|1; + do { + s = (s + increment) & (keys.length-1); + } while (keys[s] != key && keys[s] != emptyVal); + return s; + } + + /** returns the slot for this key, or -slot-1 if not found */ + public int find(int key) { + assert key != emptyVal; + int h = hash(key); + int s = h & (keys.length-1); + if (keys[s] == key) return s; + if (keys[s] == emptyVal) return -s-1; + + int increment = (h>>7)|1; + for(;;) { + s = (s + increment) & (keys.length-1); + if (keys[s] == key) return s; + if (keys[s] == emptyVal) return -s-1; + } + } + + + public boolean exists(int key) { + return find(key) >= 0; + } + + + public int put(int key) { + int s = find(key); + if (s < 0) { + if (count >= rehashCount) { + rehash(); + s = getSlot(key); + } else { + s = -s-1; + } + count++; + keys[s] = key; + putKey(key, s); + } else { + overwriteKey(key, s); + } + return s; + } + + + protected void putKey(int key, int slot) {} + protected void overwriteKey(int key, int slot) {} + + protected void startRehash(int newSize) {} + protected void moveKey(int key, int oldSlot, int newSlot) {} + protected void endRehash() {} + + public void rehash() { + int newSize = keys.length << 1; + startRehash(newSize); + int[] oldKeys = keys; + keys = new int[newSize]; + if (emptyVal != 0) Arrays.fill(keys, emptyVal); + + for (int i=0; i>2); + + } + +} diff --git a/solr/src/test/org/apache/solr/cloud/AbstractDistributedZkTestCase.java b/solr/src/test/org/apache/solr/cloud/AbstractDistributedZkTestCase.java index 3fe298359ca..e7ced578ee8 100644 --- a/solr/src/test/org/apache/solr/cloud/AbstractDistributedZkTestCase.java +++ b/solr/src/test/org/apache/solr/cloud/AbstractDistributedZkTestCase.java @@ -1,93 +1,93 @@ -package org.apache.solr.cloud; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.File; - -import org.apache.solr.BaseDistributedSearchTestCase; -import org.apache.solr.client.solrj.embedded.JettySolrRunner; -import org.apache.solr.common.cloud.SolrZkClient; -import org.apache.solr.core.SolrConfig; -import org.junit.Before; - -public abstract class AbstractDistributedZkTestCase extends BaseDistributedSearchTestCase { - private static final boolean DEBUG = false; - protected ZkTestServer zkServer; - - @Before - @Override - public void setUp() throws Exception { - super.setUp(); - log.info("####SETUP_START " + getName()); - - ignoreException("java.nio.channels.ClosedChannelException"); - - String zkDir = testDir.getAbsolutePath() + File.separator - + "zookeeper/server1/data"; - zkServer = new ZkTestServer(zkDir); - zkServer.run(); - - System.setProperty("zkHost", zkServer.getZkAddress()); - - AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(), "solrconfig.xml", "schema.xml"); - - // set some system properties for use by tests - System.setProperty("solr.test.sys.prop1", "propone"); - System.setProperty("solr.test.sys.prop2", "proptwo"); - } - - @Override - protected void createServers(int numShards) throws Exception { - System.setProperty("collection", "control_collection"); - controlJetty = createJetty(testDir, testDir + "/control/data", "control_shard"); - System.clearProperty("collection"); - controlClient = createNewSolrServer(controlJetty.getLocalPort()); - - StringBuilder sb = new StringBuilder(); - for (int i = 1; i <= numShards; i++) { - if (sb.length() > 0) sb.append(','); - JettySolrRunner j = createJetty(testDir, testDir + "/jetty" + i, "shard" + (i + 2)); - jettys.add(j); - clients.add(createNewSolrServer(j.getLocalPort())); - sb.append("localhost:").append(j.getLocalPort()).append(context); - } - - shards = sb.toString(); - } - - @Override - public void tearDown() throws Exception { - if (DEBUG) { - printLayout(); - } - zkServer.shutdown(); - System.clearProperty("zkHost"); - System.clearProperty("collection"); - System.clearProperty("solr.test.sys.prop1"); - System.clearProperty("solr.test.sys.prop2"); - super.tearDown(); - resetExceptionIgnores(); - SolrConfig.severeErrors.clear(); - } - - protected void printLayout() throws Exception { - SolrZkClient zkClient = new SolrZkClient(zkServer.getZkHost(), AbstractZkTestCase.TIMEOUT); - zkClient.printLayoutToStdOut(); - zkClient.close(); - } -} +package org.apache.solr.cloud; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.File; + +import org.apache.solr.BaseDistributedSearchTestCase; +import org.apache.solr.client.solrj.embedded.JettySolrRunner; +import org.apache.solr.common.cloud.SolrZkClient; +import org.apache.solr.core.SolrConfig; +import org.junit.Before; + +public abstract class AbstractDistributedZkTestCase extends BaseDistributedSearchTestCase { + private static final boolean DEBUG = false; + protected ZkTestServer zkServer; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + log.info("####SETUP_START " + getName()); + + ignoreException("java.nio.channels.ClosedChannelException"); + + String zkDir = testDir.getAbsolutePath() + File.separator + + "zookeeper/server1/data"; + zkServer = new ZkTestServer(zkDir); + zkServer.run(); + + System.setProperty("zkHost", zkServer.getZkAddress()); + + AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(), "solrconfig.xml", "schema.xml"); + + // set some system properties for use by tests + System.setProperty("solr.test.sys.prop1", "propone"); + System.setProperty("solr.test.sys.prop2", "proptwo"); + } + + @Override + protected void createServers(int numShards) throws Exception { + System.setProperty("collection", "control_collection"); + controlJetty = createJetty(testDir, testDir + "/control/data", "control_shard"); + System.clearProperty("collection"); + controlClient = createNewSolrServer(controlJetty.getLocalPort()); + + StringBuilder sb = new StringBuilder(); + for (int i = 1; i <= numShards; i++) { + if (sb.length() > 0) sb.append(','); + JettySolrRunner j = createJetty(testDir, testDir + "/jetty" + i, "shard" + (i + 2)); + jettys.add(j); + clients.add(createNewSolrServer(j.getLocalPort())); + sb.append("localhost:").append(j.getLocalPort()).append(context); + } + + shards = sb.toString(); + } + + @Override + public void tearDown() throws Exception { + if (DEBUG) { + printLayout(); + } + zkServer.shutdown(); + System.clearProperty("zkHost"); + System.clearProperty("collection"); + System.clearProperty("solr.test.sys.prop1"); + System.clearProperty("solr.test.sys.prop2"); + super.tearDown(); + resetExceptionIgnores(); + SolrConfig.severeErrors.clear(); + } + + protected void printLayout() throws Exception { + SolrZkClient zkClient = new SolrZkClient(zkServer.getZkHost(), AbstractZkTestCase.TIMEOUT); + zkClient.printLayoutToStdOut(); + zkClient.close(); + } +} diff --git a/solr/src/test/org/apache/solr/cloud/AbstractZkTestCase.java b/solr/src/test/org/apache/solr/cloud/AbstractZkTestCase.java index 72eb68a523b..f7660f05cf0 100644 --- a/solr/src/test/org/apache/solr/cloud/AbstractZkTestCase.java +++ b/solr/src/test/org/apache/solr/cloud/AbstractZkTestCase.java @@ -1,147 +1,147 @@ -package org.apache.solr.cloud; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.File; -import java.io.IOException; -import java.util.List; - -import org.apache.solr.SolrTestCaseJ4; -import org.apache.solr.common.cloud.SolrZkClient; -import org.apache.solr.common.cloud.ZkNodeProps; -import org.apache.solr.core.SolrConfig; -import org.apache.zookeeper.CreateMode; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Base test class for ZooKeeper tests. - */ -public abstract class AbstractZkTestCase extends SolrTestCaseJ4 { - - static final int TIMEOUT = 10000; - - private static final boolean DEBUG = false; - - protected static Logger log = LoggerFactory - .getLogger(AbstractZkTestCase.class); - - protected static ZkTestServer zkServer; - - protected static String zkDir; - - - @BeforeClass - public static void azt_beforeClass() throws Exception { - createTempDir(); - zkDir = dataDir.getAbsolutePath() + File.separator - + "zookeeper/server1/data"; - zkServer = new ZkTestServer(zkDir); - zkServer.run(); - - System.setProperty("zkHost", zkServer.getZkAddress()); - System.setProperty("hostPort", "0000"); - - buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(), - "solrconfig.xml", "schema.xml"); - - initCore("solrconfig.xml", "schema.xml"); - } - - // static to share with distrib test - static void buildZooKeeper(String zkHost, String zkAddress, String config, - String schema) throws Exception { - SolrZkClient zkClient = new SolrZkClient(zkHost, AbstractZkTestCase.TIMEOUT); - zkClient.makePath("/solr"); - zkClient.close(); - - zkClient = new SolrZkClient(zkAddress, AbstractZkTestCase.TIMEOUT); - - ZkNodeProps props = new ZkNodeProps(); - props.put("configName", "conf1"); - zkClient.makePath("/collections/collection1", props.store(), CreateMode.PERSISTENT); - zkClient.makePath("/collections/collection1/shards", CreateMode.PERSISTENT); - - zkClient.makePath("/collections/control_collection", props.store(), CreateMode.PERSISTENT); - zkClient.makePath("/collections/control_collection/shards", CreateMode.PERSISTENT); - - putConfig(zkClient, config); - putConfig(zkClient, schema); - putConfig(zkClient, "stopwords.txt"); - putConfig(zkClient, "protwords.txt"); - putConfig(zkClient, "mapping-ISOLatin1Accent.txt"); - putConfig(zkClient, "old_synonyms.txt"); - putConfig(zkClient, "synonyms.txt"); - - zkClient.close(); - } - - private static void putConfig(SolrZkClient zkConnection, String name) - throws Exception { - zkConnection.setData("/configs/conf1/" + name, getFile("solr" - + File.separator + "conf" + File.separator + name)); - } - - @Override - public void tearDown() throws Exception { - if (DEBUG) { - printLayout(zkServer.getZkHost()); - } - - SolrConfig.severeErrors.clear(); - super.tearDown(); - } - - @AfterClass - public static void azt_afterClass() throws IOException { - zkServer.shutdown(); - System.clearProperty("zkHost"); - System.clearProperty("solr.test.sys.prop1"); - System.clearProperty("solr.test.sys.prop2"); - } - - protected void printLayout(String zkHost) throws Exception { - SolrZkClient zkClient = new SolrZkClient(zkHost, AbstractZkTestCase.TIMEOUT); - zkClient.printLayoutToStdOut(); - zkClient.close(); - } - - static void makeSolrZkNode(String zkHost) throws Exception { - SolrZkClient zkClient = new SolrZkClient(zkHost, TIMEOUT); - zkClient.makePath("/solr"); - zkClient.close(); - } - - static void tryCleanSolrZkNode(String zkHost) throws Exception { - tryCleanPath(zkHost, "/solr"); - } - - static void tryCleanPath(String zkHost, String path) throws Exception { - SolrZkClient zkClient = new SolrZkClient(zkHost, TIMEOUT); - if (zkClient.exists(path)) { - List children = zkClient.getChildren(path, null); - for (String string : children) { - tryCleanPath(zkHost, path+"/"+string); - } - zkClient.delete(path, -1); - } - zkClient.close(); - } -} +package org.apache.solr.cloud; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.File; +import java.io.IOException; +import java.util.List; + +import org.apache.solr.SolrTestCaseJ4; +import org.apache.solr.common.cloud.SolrZkClient; +import org.apache.solr.common.cloud.ZkNodeProps; +import org.apache.solr.core.SolrConfig; +import org.apache.zookeeper.CreateMode; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Base test class for ZooKeeper tests. + */ +public abstract class AbstractZkTestCase extends SolrTestCaseJ4 { + + static final int TIMEOUT = 10000; + + private static final boolean DEBUG = false; + + protected static Logger log = LoggerFactory + .getLogger(AbstractZkTestCase.class); + + protected static ZkTestServer zkServer; + + protected static String zkDir; + + + @BeforeClass + public static void azt_beforeClass() throws Exception { + createTempDir(); + zkDir = dataDir.getAbsolutePath() + File.separator + + "zookeeper/server1/data"; + zkServer = new ZkTestServer(zkDir); + zkServer.run(); + + System.setProperty("zkHost", zkServer.getZkAddress()); + System.setProperty("hostPort", "0000"); + + buildZooKeeper(zkServer.getZkHost(), zkServer.getZkAddress(), + "solrconfig.xml", "schema.xml"); + + initCore("solrconfig.xml", "schema.xml"); + } + + // static to share with distrib test + static void buildZooKeeper(String zkHost, String zkAddress, String config, + String schema) throws Exception { + SolrZkClient zkClient = new SolrZkClient(zkHost, AbstractZkTestCase.TIMEOUT); + zkClient.makePath("/solr"); + zkClient.close(); + + zkClient = new SolrZkClient(zkAddress, AbstractZkTestCase.TIMEOUT); + + ZkNodeProps props = new ZkNodeProps(); + props.put("configName", "conf1"); + zkClient.makePath("/collections/collection1", props.store(), CreateMode.PERSISTENT); + zkClient.makePath("/collections/collection1/shards", CreateMode.PERSISTENT); + + zkClient.makePath("/collections/control_collection", props.store(), CreateMode.PERSISTENT); + zkClient.makePath("/collections/control_collection/shards", CreateMode.PERSISTENT); + + putConfig(zkClient, config); + putConfig(zkClient, schema); + putConfig(zkClient, "stopwords.txt"); + putConfig(zkClient, "protwords.txt"); + putConfig(zkClient, "mapping-ISOLatin1Accent.txt"); + putConfig(zkClient, "old_synonyms.txt"); + putConfig(zkClient, "synonyms.txt"); + + zkClient.close(); + } + + private static void putConfig(SolrZkClient zkConnection, String name) + throws Exception { + zkConnection.setData("/configs/conf1/" + name, getFile("solr" + + File.separator + "conf" + File.separator + name)); + } + + @Override + public void tearDown() throws Exception { + if (DEBUG) { + printLayout(zkServer.getZkHost()); + } + + SolrConfig.severeErrors.clear(); + super.tearDown(); + } + + @AfterClass + public static void azt_afterClass() throws IOException { + zkServer.shutdown(); + System.clearProperty("zkHost"); + System.clearProperty("solr.test.sys.prop1"); + System.clearProperty("solr.test.sys.prop2"); + } + + protected void printLayout(String zkHost) throws Exception { + SolrZkClient zkClient = new SolrZkClient(zkHost, AbstractZkTestCase.TIMEOUT); + zkClient.printLayoutToStdOut(); + zkClient.close(); + } + + static void makeSolrZkNode(String zkHost) throws Exception { + SolrZkClient zkClient = new SolrZkClient(zkHost, TIMEOUT); + zkClient.makePath("/solr"); + zkClient.close(); + } + + static void tryCleanSolrZkNode(String zkHost) throws Exception { + tryCleanPath(zkHost, "/solr"); + } + + static void tryCleanPath(String zkHost, String path) throws Exception { + SolrZkClient zkClient = new SolrZkClient(zkHost, TIMEOUT); + if (zkClient.exists(path)) { + List children = zkClient.getChildren(path, null); + for (String string : children) { + tryCleanPath(zkHost, path+"/"+string); + } + zkClient.delete(path, -1); + } + zkClient.close(); + } +} diff --git a/solr/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java index fbcd378cf0e..35d86e0ac5c 100644 --- a/solr/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java +++ b/solr/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java @@ -1,284 +1,284 @@ -package org.apache.solr.cloud; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.net.MalformedURLException; - -import org.apache.solr.SolrTestCaseJ4; -import org.apache.solr.client.solrj.SolrServerException; -import org.apache.solr.client.solrj.impl.CloudSolrServer; -import org.apache.solr.client.solrj.response.QueryResponse; -import org.apache.solr.common.params.CommonParams; -import org.apache.solr.common.params.ModifiableSolrParams; -import org.junit.BeforeClass; - -/** - * - */ -public class BasicDistributedZkTest extends AbstractDistributedZkTestCase { - - private static final String DEFAULT_COLLECTION = "collection1"; - private static final boolean DEBUG = false; - String t1="a_t"; - String i1="a_si"; - String nint = "n_i"; - String tint = "n_ti"; - String nfloat = "n_f"; - String tfloat = "n_tf"; - String ndouble = "n_d"; - String tdouble = "n_td"; - String nlong = "n_l"; - String tlong = "n_tl"; - String ndate = "n_dt"; - String tdate = "n_tdt"; - - String oddField="oddField_s"; - String missingField="ignore_exception__missing_but_valid_field_t"; - String invalidField="ignore_exception__invalid_field_not_in_schema"; - - public BasicDistributedZkTest() { - fixShardCount = true; - - System.setProperty("CLOUD_UPDATE_DELAY", "0"); - } - - - @BeforeClass - public static void beforeClass() throws Exception { - System.setProperty("solr.solr.home", SolrTestCaseJ4.TEST_HOME); - } - - @Override - protected void setDistributedParams(ModifiableSolrParams params) { - - if (r.nextBoolean()) { - // don't set shards, let that be figured out from the cloud state - params.set("distrib", "true"); - } else { - // use shard ids rather than physical locations - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < shardCount; i++) { - if (i > 0) - sb.append(','); - sb.append("shard" + (i + 3)); - } - params.set("shards", sb.toString()); - params.set("distrib", "true"); - } - } - - @Override - public void doTest() throws Exception { - del("*:*"); - indexr(id,1, i1, 100, tlong, 100,t1,"now is the time for all good men" - ,"foo_f", 1.414f, "foo_b", "true", "foo_d", 1.414d); - indexr(id,2, i1, 50 , tlong, 50,t1,"to come to the aid of their country." - ); - indexr(id,3, i1, 2, tlong, 2,t1,"how now brown cow" - ); - indexr(id,4, i1, -100 ,tlong, 101,t1,"the quick fox jumped over the lazy dog" - ); - indexr(id,5, i1, 500, tlong, 500 ,t1,"the quick fox jumped way over the lazy dog" - ); - indexr(id,6, i1, -600, tlong, 600 ,t1,"humpty dumpy sat on a wall"); - indexr(id,7, i1, 123, tlong, 123 ,t1,"humpty dumpy had a great fall"); - indexr(id,8, i1, 876, tlong, 876,t1,"all the kings horses and all the kings men"); - indexr(id,9, i1, 7, tlong, 7,t1,"couldn't put humpty together again"); - indexr(id,10, i1, 4321, tlong, 4321,t1,"this too shall pass"); - indexr(id,11, i1, -987, tlong, 987,t1,"An eye for eye only ends up making the whole world blind."); - indexr(id,12, i1, 379, tlong, 379,t1,"Great works are performed, not by strength, but by perseverance."); - indexr(id,13, i1, 232, tlong, 232,t1,"no eggs on wall, lesson learned", oddField, "odd man out"); - - indexr(id, 14, "SubjectTerms_mfacet", new String[] {"mathematical models", "mathematical analysis"}); - indexr(id, 15, "SubjectTerms_mfacet", new String[] {"test 1", "test 2", "test3"}); - indexr(id, 16, "SubjectTerms_mfacet", new String[] {"test 1", "test 2", "test3"}); - String[] vals = new String[100]; - for (int i=0; i<100; i++) { - vals[i] = "test " + i; - } - indexr(id, 17, "SubjectTerms_mfacet", vals); - - for (int i=100; i<150; i++) { - indexr(id, i); - } - - commit(); - - handle.clear(); - handle.put("QTime", SKIPVAL); - handle.put("timestamp", SKIPVAL); - - // random value sort - for (String f : fieldNames) { - query("q","*:*", "sort",f+" desc"); - query("q","*:*", "sort",f+" asc"); - } - - // these queries should be exactly ordered and scores should exactly match - query("q","*:*", "sort",i1+" desc"); - query("q","*:*", "sort",i1+" asc"); - query("q","*:*", "sort",i1+" desc", "fl","*,score"); - query("q","*:*", "sort",tlong+" asc", "fl","score"); // test legacy behavior - "score"=="*,score" - query("q","*:*", "sort",tlong+" desc"); - handle.put("maxScore", SKIPVAL); - query("q","{!func}"+i1);// does not expect maxScore. So if it comes ,ignore it. JavaBinCodec.writeSolrDocumentList() - //is agnostic of request params. - handle.remove("maxScore"); - query("q","{!func}"+i1, "fl","*,score"); // even scores should match exactly here - - handle.put("highlighting", UNORDERED); - handle.put("response", UNORDERED); - - handle.put("maxScore", SKIPVAL); - query("q","quick"); - query("q","all","fl","id","start","0"); - query("q","all","fl","foofoofoo","start","0"); // no fields in returned docs - query("q","all","fl","id","start","100"); - - handle.put("score", SKIPVAL); - query("q","quick","fl","*,score"); - query("q","all","fl","*,score","start","1"); - query("q","all","fl","*,score","start","100"); - - query("q","now their fox sat had put","fl","*,score", - "hl","true","hl.fl",t1); - - query("q","now their fox sat had put","fl","foofoofoo", - "hl","true","hl.fl",t1); - - query("q","matchesnothing","fl","*,score"); - - query("q","*:*", "rows",100, "facet","true", "facet.field",t1); - query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count"); - query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count", "facet.mincount",2); - query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index"); - query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index", "facet.mincount",2); - query("q","*:*", "rows",100, "facet","true", "facet.field",t1,"facet.limit",1); - query("q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*"); - query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.offset",1); - query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.mincount",2); - - // test faceting multiple things at once - query("q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*" - ,"facet.field",t1); - - // test filter tagging, facet exclusion, and naming (multi-select facet support) - query("q","*:*", "rows",100, "facet","true", "facet.query","{!key=myquick}quick", "facet.query","{!key=myall ex=a}all", "facet.query","*:*" - ,"facet.field","{!key=mykey ex=a}"+t1 - ,"facet.field","{!key=other ex=b}"+t1 - ,"facet.field","{!key=again ex=a,b}"+t1 - ,"facet.field",t1 - ,"fq","{!tag=a}id:[1 TO 7]", "fq","{!tag=b}id:[3 TO 9]" - ); - query("q", "*:*", "facet", "true", "facet.field", "{!ex=t1}SubjectTerms_mfacet", "fq", "{!tag=t1}SubjectTerms_mfacet:(test 1)", "facet.limit", "10", "facet.mincount", "1"); - - // test field that is valid in schema but missing in all shards - query("q","*:*", "rows",100, "facet","true", "facet.field",missingField, "facet.mincount",2); - // test field that is valid in schema and missing in some shards - query("q","*:*", "rows",100, "facet","true", "facet.field",oddField, "facet.mincount",2); - - query("q","*:*", "sort",i1+" desc", "stats", "true", "stats.field", i1); - - /*** TODO: the failure may come back in "exception" - try { - // test error produced for field that is invalid for schema - query("q","*:*", "rows",100, "facet","true", "facet.field",invalidField, "facet.mincount",2); - TestCase.fail("SolrServerException expected for invalid field that is not in schema"); - } catch (SolrServerException ex) { - // expected - } - ***/ - - // Try to get better coverage for refinement queries by turning off over requesting. - // This makes it much more likely that we may not get the top facet values and hence - // we turn of that checking. - handle.put("facet_fields", SKIPVAL); - query("q","*:*", "rows",0, "facet","true", "facet.field",t1,"facet.limit",5, "facet.shard.limit",5); - // check a complex key name - query("q","*:*", "rows",0, "facet","true", "facet.field","{!key='a b/c \\' \\} foo'}"+t1,"facet.limit",5, "facet.shard.limit",5); - handle.remove("facet_fields"); - - - // index the same document to two servers and make sure things - // don't blow up. - if (clients.size()>=2) { - index(id,100, i1, 107 ,t1,"oh no, a duplicate!"); - for (int i=0; i 0) + sb.append(','); + sb.append("shard" + (i + 3)); + } + params.set("shards", sb.toString()); + params.set("distrib", "true"); + } + } + + @Override + public void doTest() throws Exception { + del("*:*"); + indexr(id,1, i1, 100, tlong, 100,t1,"now is the time for all good men" + ,"foo_f", 1.414f, "foo_b", "true", "foo_d", 1.414d); + indexr(id,2, i1, 50 , tlong, 50,t1,"to come to the aid of their country." + ); + indexr(id,3, i1, 2, tlong, 2,t1,"how now brown cow" + ); + indexr(id,4, i1, -100 ,tlong, 101,t1,"the quick fox jumped over the lazy dog" + ); + indexr(id,5, i1, 500, tlong, 500 ,t1,"the quick fox jumped way over the lazy dog" + ); + indexr(id,6, i1, -600, tlong, 600 ,t1,"humpty dumpy sat on a wall"); + indexr(id,7, i1, 123, tlong, 123 ,t1,"humpty dumpy had a great fall"); + indexr(id,8, i1, 876, tlong, 876,t1,"all the kings horses and all the kings men"); + indexr(id,9, i1, 7, tlong, 7,t1,"couldn't put humpty together again"); + indexr(id,10, i1, 4321, tlong, 4321,t1,"this too shall pass"); + indexr(id,11, i1, -987, tlong, 987,t1,"An eye for eye only ends up making the whole world blind."); + indexr(id,12, i1, 379, tlong, 379,t1,"Great works are performed, not by strength, but by perseverance."); + indexr(id,13, i1, 232, tlong, 232,t1,"no eggs on wall, lesson learned", oddField, "odd man out"); + + indexr(id, 14, "SubjectTerms_mfacet", new String[] {"mathematical models", "mathematical analysis"}); + indexr(id, 15, "SubjectTerms_mfacet", new String[] {"test 1", "test 2", "test3"}); + indexr(id, 16, "SubjectTerms_mfacet", new String[] {"test 1", "test 2", "test3"}); + String[] vals = new String[100]; + for (int i=0; i<100; i++) { + vals[i] = "test " + i; + } + indexr(id, 17, "SubjectTerms_mfacet", vals); + + for (int i=100; i<150; i++) { + indexr(id, i); + } + + commit(); + + handle.clear(); + handle.put("QTime", SKIPVAL); + handle.put("timestamp", SKIPVAL); + + // random value sort + for (String f : fieldNames) { + query("q","*:*", "sort",f+" desc"); + query("q","*:*", "sort",f+" asc"); + } + + // these queries should be exactly ordered and scores should exactly match + query("q","*:*", "sort",i1+" desc"); + query("q","*:*", "sort",i1+" asc"); + query("q","*:*", "sort",i1+" desc", "fl","*,score"); + query("q","*:*", "sort",tlong+" asc", "fl","score"); // test legacy behavior - "score"=="*,score" + query("q","*:*", "sort",tlong+" desc"); + handle.put("maxScore", SKIPVAL); + query("q","{!func}"+i1);// does not expect maxScore. So if it comes ,ignore it. JavaBinCodec.writeSolrDocumentList() + //is agnostic of request params. + handle.remove("maxScore"); + query("q","{!func}"+i1, "fl","*,score"); // even scores should match exactly here + + handle.put("highlighting", UNORDERED); + handle.put("response", UNORDERED); + + handle.put("maxScore", SKIPVAL); + query("q","quick"); + query("q","all","fl","id","start","0"); + query("q","all","fl","foofoofoo","start","0"); // no fields in returned docs + query("q","all","fl","id","start","100"); + + handle.put("score", SKIPVAL); + query("q","quick","fl","*,score"); + query("q","all","fl","*,score","start","1"); + query("q","all","fl","*,score","start","100"); + + query("q","now their fox sat had put","fl","*,score", + "hl","true","hl.fl",t1); + + query("q","now their fox sat had put","fl","foofoofoo", + "hl","true","hl.fl",t1); + + query("q","matchesnothing","fl","*,score"); + + query("q","*:*", "rows",100, "facet","true", "facet.field",t1); + query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count"); + query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","count", "facet.mincount",2); + query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index"); + query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.limit",-1, "facet.sort","index", "facet.mincount",2); + query("q","*:*", "rows",100, "facet","true", "facet.field",t1,"facet.limit",1); + query("q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*"); + query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.offset",1); + query("q","*:*", "rows",100, "facet","true", "facet.field",t1, "facet.mincount",2); + + // test faceting multiple things at once + query("q","*:*", "rows",100, "facet","true", "facet.query","quick", "facet.query","all", "facet.query","*:*" + ,"facet.field",t1); + + // test filter tagging, facet exclusion, and naming (multi-select facet support) + query("q","*:*", "rows",100, "facet","true", "facet.query","{!key=myquick}quick", "facet.query","{!key=myall ex=a}all", "facet.query","*:*" + ,"facet.field","{!key=mykey ex=a}"+t1 + ,"facet.field","{!key=other ex=b}"+t1 + ,"facet.field","{!key=again ex=a,b}"+t1 + ,"facet.field",t1 + ,"fq","{!tag=a}id:[1 TO 7]", "fq","{!tag=b}id:[3 TO 9]" + ); + query("q", "*:*", "facet", "true", "facet.field", "{!ex=t1}SubjectTerms_mfacet", "fq", "{!tag=t1}SubjectTerms_mfacet:(test 1)", "facet.limit", "10", "facet.mincount", "1"); + + // test field that is valid in schema but missing in all shards + query("q","*:*", "rows",100, "facet","true", "facet.field",missingField, "facet.mincount",2); + // test field that is valid in schema and missing in some shards + query("q","*:*", "rows",100, "facet","true", "facet.field",oddField, "facet.mincount",2); + + query("q","*:*", "sort",i1+" desc", "stats", "true", "stats.field", i1); + + /*** TODO: the failure may come back in "exception" + try { + // test error produced for field that is invalid for schema + query("q","*:*", "rows",100, "facet","true", "facet.field",invalidField, "facet.mincount",2); + TestCase.fail("SolrServerException expected for invalid field that is not in schema"); + } catch (SolrServerException ex) { + // expected + } + ***/ + + // Try to get better coverage for refinement queries by turning off over requesting. + // This makes it much more likely that we may not get the top facet values and hence + // we turn of that checking. + handle.put("facet_fields", SKIPVAL); + query("q","*:*", "rows",0, "facet","true", "facet.field",t1,"facet.limit",5, "facet.shard.limit",5); + // check a complex key name + query("q","*:*", "rows",0, "facet","true", "facet.field","{!key='a b/c \\' \\} foo'}"+t1,"facet.limit",5, "facet.shard.limit",5); + handle.remove("facet_fields"); + + + // index the same document to two servers and make sure things + // don't blow up. + if (clients.size()>=2) { + index(id,100, i1, 107 ,t1,"oh no, a duplicate!"); + for (int i=0; i slices = null; - for (int i = 75; i > 0; i--) { - cloudState2 = zkController2.getCloudState(); - slices = cloudState2.getSlices("testcore"); - - if (slices != null && slices.containsKey(host + ":1661_solr_testcore")) { - break; - } - Thread.sleep(500); - } - - assertNotNull(slices); - assertTrue(slices.containsKey(host + ":1661_solr_testcore")); - - Slice slice = slices.get(host + ":1661_solr_testcore"); - assertEquals(host + ":1661_solr_testcore", slice.getName()); - - Map shards = slice.getShards(); - - assertEquals(1, shards.size()); - - ZkNodeProps zkProps = shards.get(host + ":1661_solr_testcore"); - - assertNotNull(zkProps); - - assertEquals(host + ":1661_solr", zkProps.get("node_name")); - - assertEquals("http://" + host + ":1661/solr/testcore", zkProps.get("url")); - - Set liveNodes = cloudState2.getLiveNodes(); - assertNotNull(liveNodes); - assertEquals(3, liveNodes.size()); - - container3.shutdown(); - - // slight pause (15s timeout) for watch to trigger - for(int i = 0; i < (5 * 15); i++) { - if(zkController2.getCloudState().getLiveNodes().size() == 2) { - break; - } - Thread.sleep(200); - } - - assertEquals(2, zkController2.getCloudState().getLiveNodes().size()); - - // quickly kill / start client - - container2.getZkController().getZkClient().getSolrZooKeeper().getConnection() - .disconnect(); - container2.shutdown(); - - container2 = init2.initialize(); - - // pause for watch to trigger - for(int i = 0; i < 200; i++) { - if (container1.getZkController().getCloudState().liveNodesContain( - container2.getZkController().getNodeName())) { - break; - } - Thread.sleep(100); - } - - assertTrue(container1.getZkController().getCloudState().liveNodesContain( - container2.getZkController().getNodeName())); - - } - - @Override - public void tearDown() throws Exception { - if (VERBOSE) { - printLayout(zkServer.getZkHost()); - } - container1.shutdown(); - container2.shutdown(); - container3.shutdown(); - zkServer.shutdown(); - super.tearDown(); - System.clearProperty("zkClientTimeout"); - System.clearProperty("zkHost"); - System.clearProperty("hostPort"); - System.clearProperty("CLOUD_UPDATE_DELAY"); - SolrConfig.severeErrors.clear(); - } - - private void printLayout(String zkHost) throws Exception { - SolrZkClient zkClient = new SolrZkClient( - zkHost, AbstractZkTestCase.TIMEOUT); - zkClient.printLayoutToStdOut(); - zkClient.close(); - } -} +package org.apache.solr.cloud; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.File; +import java.util.Map; +import java.util.Set; + +import org.apache.solr.SolrTestCaseJ4; +import org.apache.solr.common.cloud.CloudState; +import org.apache.solr.common.cloud.Slice; +import org.apache.solr.common.cloud.SolrZkClient; +import org.apache.solr.common.cloud.ZkNodeProps; +import org.apache.solr.core.CoreContainer; +import org.apache.solr.core.CoreContainer.Initializer; +import org.apache.solr.core.CoreDescriptor; +import org.apache.solr.core.SolrConfig; +import org.apache.solr.core.SolrCore; +import org.apache.zookeeper.CreateMode; +import org.junit.BeforeClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * TODO: look at hostPort used below + */ +public class CloudStateUpdateTest extends SolrTestCaseJ4 { + protected static Logger log = LoggerFactory + .getLogger(AbstractZkTestCase.class); + + private static final boolean VERBOSE = false; + + protected ZkTestServer zkServer; + + protected String zkDir; + + private CoreContainer container1; + + private CoreContainer container2; + + private CoreContainer container3; + + private File dataDir1; + + private File dataDir2; + + private File dataDir3; + + private File dataDir4; + + private Initializer init2; + + @BeforeClass + public static void beforeClass() throws Exception { + initCore(); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + + System.setProperty("zkClientTimeout", "3000"); + + zkDir = dataDir.getAbsolutePath() + File.separator + + "zookeeper/server1/data"; + zkServer = new ZkTestServer(zkDir); + zkServer.run(); + System.setProperty("zkHost", zkServer.getZkAddress()); + AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(), zkServer + .getZkAddress(), "solrconfig.xml", "schema.xml"); + + log.info("####SETUP_START " + getName()); + dataDir1 = new File(dataDir + File.separator + "data1"); + dataDir1.mkdirs(); + + dataDir2 = new File(dataDir + File.separator + "data2"); + dataDir2.mkdirs(); + + dataDir3 = new File(dataDir + File.separator + "data3"); + dataDir3.mkdirs(); + + dataDir4 = new File(dataDir + File.separator + "data3"); + dataDir4.mkdirs(); + + // set some system properties for use by tests + System.setProperty("solr.test.sys.prop1", "propone"); + System.setProperty("solr.test.sys.prop2", "proptwo"); + + System.setProperty("hostPort", "1661"); + CoreContainer.Initializer init1 = new CoreContainer.Initializer() { + { + this.dataDir = CloudStateUpdateTest.this.dataDir1.getAbsolutePath(); + } + }; + + container1 = init1.initialize(); + System.clearProperty("hostPort"); + + System.setProperty("hostPort", "1662"); + init2 = new CoreContainer.Initializer() { + { + this.dataDir = CloudStateUpdateTest.this.dataDir2.getAbsolutePath(); + } + }; + + container2 = init2.initialize(); + System.clearProperty("hostPort"); + + System.setProperty("hostPort", "1663"); + CoreContainer.Initializer init3 = new CoreContainer.Initializer() { + { + this.dataDir = CloudStateUpdateTest.this.dataDir3.getAbsolutePath(); + } + }; + container3 = init3.initialize(); + System.clearProperty("hostPort"); + + log.info("####SETUP_END " + getName()); + + } + + @Test + public void testCoreRegistration() throws Exception { + System.setProperty("CLOUD_UPDATE_DELAY", "1"); + + ZkNodeProps props2 = new ZkNodeProps(); + props2.put("configName", "conf1"); + + SolrZkClient zkClient = new SolrZkClient(zkServer.getZkAddress(), AbstractZkTestCase.TIMEOUT); + zkClient.makePath("/collections/testcore", props2.store(), CreateMode.PERSISTENT); + zkClient.makePath("/collections/testcore/shards", CreateMode.PERSISTENT); + zkClient.close(); + + CoreDescriptor dcore = new CoreDescriptor(container1, "testcore", + "testcore"); + + dcore.setDataDir(dataDir4.getAbsolutePath()); + + SolrCore core = container1.create(dcore); + container1.register(core, false); + + ZkController zkController2 = container2.getZkController(); + + String host = zkController2.getHostName(); + + // slight pause - TODO: takes an oddly long amount of time to schedule tasks + // with almost no delay ... + CloudState cloudState2 = null; + Map slices = null; + for (int i = 75; i > 0; i--) { + cloudState2 = zkController2.getCloudState(); + slices = cloudState2.getSlices("testcore"); + + if (slices != null && slices.containsKey(host + ":1661_solr_testcore")) { + break; + } + Thread.sleep(500); + } + + assertNotNull(slices); + assertTrue(slices.containsKey(host + ":1661_solr_testcore")); + + Slice slice = slices.get(host + ":1661_solr_testcore"); + assertEquals(host + ":1661_solr_testcore", slice.getName()); + + Map shards = slice.getShards(); + + assertEquals(1, shards.size()); + + ZkNodeProps zkProps = shards.get(host + ":1661_solr_testcore"); + + assertNotNull(zkProps); + + assertEquals(host + ":1661_solr", zkProps.get("node_name")); + + assertEquals("http://" + host + ":1661/solr/testcore", zkProps.get("url")); + + Set liveNodes = cloudState2.getLiveNodes(); + assertNotNull(liveNodes); + assertEquals(3, liveNodes.size()); + + container3.shutdown(); + + // slight pause (15s timeout) for watch to trigger + for(int i = 0; i < (5 * 15); i++) { + if(zkController2.getCloudState().getLiveNodes().size() == 2) { + break; + } + Thread.sleep(200); + } + + assertEquals(2, zkController2.getCloudState().getLiveNodes().size()); + + // quickly kill / start client + + container2.getZkController().getZkClient().getSolrZooKeeper().getConnection() + .disconnect(); + container2.shutdown(); + + container2 = init2.initialize(); + + // pause for watch to trigger + for(int i = 0; i < 200; i++) { + if (container1.getZkController().getCloudState().liveNodesContain( + container2.getZkController().getNodeName())) { + break; + } + Thread.sleep(100); + } + + assertTrue(container1.getZkController().getCloudState().liveNodesContain( + container2.getZkController().getNodeName())); + + } + + @Override + public void tearDown() throws Exception { + if (VERBOSE) { + printLayout(zkServer.getZkHost()); + } + container1.shutdown(); + container2.shutdown(); + container3.shutdown(); + zkServer.shutdown(); + super.tearDown(); + System.clearProperty("zkClientTimeout"); + System.clearProperty("zkHost"); + System.clearProperty("hostPort"); + System.clearProperty("CLOUD_UPDATE_DELAY"); + SolrConfig.severeErrors.clear(); + } + + private void printLayout(String zkHost) throws Exception { + SolrZkClient zkClient = new SolrZkClient( + zkHost, AbstractZkTestCase.TIMEOUT); + zkClient.printLayoutToStdOut(); + zkClient.close(); + } +} diff --git a/solr/src/test/org/apache/solr/cloud/ZkControllerTest.java b/solr/src/test/org/apache/solr/cloud/ZkControllerTest.java index f8eedd9cb5a..5b74b0926ac 100644 --- a/solr/src/test/org/apache/solr/cloud/ZkControllerTest.java +++ b/solr/src/test/org/apache/solr/cloud/ZkControllerTest.java @@ -1,225 +1,225 @@ -package org.apache.solr.cloud; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -import java.io.File; -import java.io.IOException; -import java.util.Map; - -import org.apache.solr.SolrTestCaseJ4; -import org.apache.solr.common.cloud.CloudState; -import org.apache.solr.common.cloud.Slice; -import org.apache.solr.common.cloud.SolrZkClient; -import org.apache.solr.common.cloud.ZkNodeProps; -import org.apache.solr.common.cloud.ZkStateReader; -import org.apache.solr.core.SolrConfig; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.KeeperException; - -import org.junit.BeforeClass; -import org.junit.Test; - -public class ZkControllerTest extends SolrTestCaseJ4 { - - private static final String TEST_NODE_NAME = "test_node_name"; - - private static final String URL3 = "http://localhost:3133/solr/core1"; - - private static final String URL2 = "http://localhost:3123/solr/core1"; - - private static final String SHARD3 = "localhost:3123_solr_core3"; - - private static final String SHARD2 = "localhost:3123_solr_core2"; - - private static final String SHARD1 = "localhost:3123_solr_core1"; - - private static final String COLLECTION_NAME = "collection1"; - - static final int TIMEOUT = 10000; - - private static final String URL1 = "http://localhost:3133/solr/core0"; - - private static final boolean DEBUG = false; - - @BeforeClass - public static void beforeClass() throws Exception { - initCore(); - } - - @Test - public void testReadShards() throws Exception { - String zkDir = dataDir.getAbsolutePath() + File.separator - + "zookeeper/server1/data"; - ZkTestServer server = null; - SolrZkClient zkClient = null; - ZkController zkController = null; - try { - server = new ZkTestServer(zkDir); - server.run(); - AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost()); - AbstractZkTestCase.makeSolrZkNode(server.getZkHost()); - - zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT); - String shardsPath = "/collections/collection1/shards/shardid1"; - zkClient.makePath(shardsPath); - - addShardToZk(zkClient, shardsPath, SHARD1, URL1); - addShardToZk(zkClient, shardsPath, SHARD2, URL2); - addShardToZk(zkClient, shardsPath, SHARD3, URL3); - - if (DEBUG) { - zkClient.printLayoutToStdOut(); - } - - zkController = new ZkController(server.getZkAddress(), - TIMEOUT, 1000, "localhost", "8983", "solr"); - - zkController.getZkStateReader().updateCloudState(true); - CloudState cloudInfo = zkController.getCloudState(); - Map slices = cloudInfo.getSlices("collection1"); - assertNotNull(slices); - - for (Slice slice : slices.values()) { - Map shards = slice.getShards(); - if (DEBUG) { - for (String shardName : shards.keySet()) { - ZkNodeProps props = shards.get(shardName); - System.out.println("shard:" + shardName); - System.out.println("props:" + props.toString()); - } - } - assertNotNull(shards.get(SHARD1)); - assertNotNull(shards.get(SHARD2)); - assertNotNull(shards.get(SHARD3)); - - ZkNodeProps props = shards.get(SHARD1); - assertEquals(URL1, props.get(ZkStateReader.URL_PROP)); - assertEquals(TEST_NODE_NAME, props.get(ZkStateReader.NODE_NAME)); - - props = shards.get(SHARD2); - assertEquals(URL2, props.get(ZkStateReader.URL_PROP)); - assertEquals(TEST_NODE_NAME, props.get(ZkStateReader.NODE_NAME)); - - props = shards.get(SHARD3); - assertEquals(URL3, props.get(ZkStateReader.URL_PROP)); - assertEquals(TEST_NODE_NAME, props.get(ZkStateReader.NODE_NAME)); - - } - - } finally { - if (zkClient != null) { - zkClient.close(); - } - if (zkController != null) { - zkController.close(); - } - if (server != null) { - server.shutdown(); - } - } - } - - @Test - public void testReadConfigName() throws Exception { - String zkDir = dataDir.getAbsolutePath() + File.separator - + "zookeeper/server1/data"; - - ZkTestServer server = new ZkTestServer(zkDir); - try { - server.run(); - - AbstractZkTestCase.makeSolrZkNode(server.getZkHost()); - - SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT); - String actualConfigName = "firstConfig"; - - zkClient.makePath(ZkController.CONFIGS_ZKNODE + "/" + actualConfigName); - - ZkNodeProps props = new ZkNodeProps(); - props.put("configName", actualConfigName); - zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + COLLECTION_NAME , props.store(), CreateMode.PERSISTENT); - - if (DEBUG) { - zkClient.printLayoutToStdOut(); - } - zkClient.close(); - ZkController zkController = new ZkController(server.getZkAddress(), TIMEOUT, TIMEOUT, - "localhost", "8983", "/solr"); - try { - String configName = zkController.readConfigName(COLLECTION_NAME); - assertEquals(configName, actualConfigName); - } finally { - zkController.close(); - } - } finally { - - server.shutdown(); - } - - } - - @Test - public void testUploadToCloud() throws Exception { - String zkDir = dataDir.getAbsolutePath() + File.separator - + "zookeeper/server1/data"; - - ZkTestServer server = new ZkTestServer(zkDir); - ZkController zkController = null; - try { - server.run(); - - AbstractZkTestCase.makeSolrZkNode(server.getZkHost()); - - zkController = new ZkController(server.getZkAddress(), - TIMEOUT, 1000, "localhost", "8983", "/solr"); - - zkController.uploadToZK(getFile("solr/conf"), - ZkController.CONFIGS_ZKNODE + "/config1"); - - if (DEBUG) { - zkController.printLayoutToStdOut(); - } - - } finally { - if (zkController != null) { - zkController.close(); - } - server.shutdown(); - } - - } - - private void addShardToZk(SolrZkClient zkClient, String shardsPath, - String zkNodeName, String url) throws IOException, - KeeperException, InterruptedException { - - ZkNodeProps props = new ZkNodeProps(); - props.put(ZkStateReader.URL_PROP, url); - props.put(ZkStateReader.NODE_NAME, TEST_NODE_NAME); - byte[] bytes = props.store(); - - zkClient - .create(shardsPath + "/" + zkNodeName, bytes, CreateMode.PERSISTENT); - } - - @Override - public void tearDown() throws Exception { - SolrConfig.severeErrors.clear(); - super.tearDown(); - } -} +package org.apache.solr.cloud; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +import java.io.File; +import java.io.IOException; +import java.util.Map; + +import org.apache.solr.SolrTestCaseJ4; +import org.apache.solr.common.cloud.CloudState; +import org.apache.solr.common.cloud.Slice; +import org.apache.solr.common.cloud.SolrZkClient; +import org.apache.solr.common.cloud.ZkNodeProps; +import org.apache.solr.common.cloud.ZkStateReader; +import org.apache.solr.core.SolrConfig; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; + +import org.junit.BeforeClass; +import org.junit.Test; + +public class ZkControllerTest extends SolrTestCaseJ4 { + + private static final String TEST_NODE_NAME = "test_node_name"; + + private static final String URL3 = "http://localhost:3133/solr/core1"; + + private static final String URL2 = "http://localhost:3123/solr/core1"; + + private static final String SHARD3 = "localhost:3123_solr_core3"; + + private static final String SHARD2 = "localhost:3123_solr_core2"; + + private static final String SHARD1 = "localhost:3123_solr_core1"; + + private static final String COLLECTION_NAME = "collection1"; + + static final int TIMEOUT = 10000; + + private static final String URL1 = "http://localhost:3133/solr/core0"; + + private static final boolean DEBUG = false; + + @BeforeClass + public static void beforeClass() throws Exception { + initCore(); + } + + @Test + public void testReadShards() throws Exception { + String zkDir = dataDir.getAbsolutePath() + File.separator + + "zookeeper/server1/data"; + ZkTestServer server = null; + SolrZkClient zkClient = null; + ZkController zkController = null; + try { + server = new ZkTestServer(zkDir); + server.run(); + AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost()); + AbstractZkTestCase.makeSolrZkNode(server.getZkHost()); + + zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT); + String shardsPath = "/collections/collection1/shards/shardid1"; + zkClient.makePath(shardsPath); + + addShardToZk(zkClient, shardsPath, SHARD1, URL1); + addShardToZk(zkClient, shardsPath, SHARD2, URL2); + addShardToZk(zkClient, shardsPath, SHARD3, URL3); + + if (DEBUG) { + zkClient.printLayoutToStdOut(); + } + + zkController = new ZkController(server.getZkAddress(), + TIMEOUT, 1000, "localhost", "8983", "solr"); + + zkController.getZkStateReader().updateCloudState(true); + CloudState cloudInfo = zkController.getCloudState(); + Map slices = cloudInfo.getSlices("collection1"); + assertNotNull(slices); + + for (Slice slice : slices.values()) { + Map shards = slice.getShards(); + if (DEBUG) { + for (String shardName : shards.keySet()) { + ZkNodeProps props = shards.get(shardName); + System.out.println("shard:" + shardName); + System.out.println("props:" + props.toString()); + } + } + assertNotNull(shards.get(SHARD1)); + assertNotNull(shards.get(SHARD2)); + assertNotNull(shards.get(SHARD3)); + + ZkNodeProps props = shards.get(SHARD1); + assertEquals(URL1, props.get(ZkStateReader.URL_PROP)); + assertEquals(TEST_NODE_NAME, props.get(ZkStateReader.NODE_NAME)); + + props = shards.get(SHARD2); + assertEquals(URL2, props.get(ZkStateReader.URL_PROP)); + assertEquals(TEST_NODE_NAME, props.get(ZkStateReader.NODE_NAME)); + + props = shards.get(SHARD3); + assertEquals(URL3, props.get(ZkStateReader.URL_PROP)); + assertEquals(TEST_NODE_NAME, props.get(ZkStateReader.NODE_NAME)); + + } + + } finally { + if (zkClient != null) { + zkClient.close(); + } + if (zkController != null) { + zkController.close(); + } + if (server != null) { + server.shutdown(); + } + } + } + + @Test + public void testReadConfigName() throws Exception { + String zkDir = dataDir.getAbsolutePath() + File.separator + + "zookeeper/server1/data"; + + ZkTestServer server = new ZkTestServer(zkDir); + try { + server.run(); + + AbstractZkTestCase.makeSolrZkNode(server.getZkHost()); + + SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT); + String actualConfigName = "firstConfig"; + + zkClient.makePath(ZkController.CONFIGS_ZKNODE + "/" + actualConfigName); + + ZkNodeProps props = new ZkNodeProps(); + props.put("configName", actualConfigName); + zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" + COLLECTION_NAME , props.store(), CreateMode.PERSISTENT); + + if (DEBUG) { + zkClient.printLayoutToStdOut(); + } + zkClient.close(); + ZkController zkController = new ZkController(server.getZkAddress(), TIMEOUT, TIMEOUT, + "localhost", "8983", "/solr"); + try { + String configName = zkController.readConfigName(COLLECTION_NAME); + assertEquals(configName, actualConfigName); + } finally { + zkController.close(); + } + } finally { + + server.shutdown(); + } + + } + + @Test + public void testUploadToCloud() throws Exception { + String zkDir = dataDir.getAbsolutePath() + File.separator + + "zookeeper/server1/data"; + + ZkTestServer server = new ZkTestServer(zkDir); + ZkController zkController = null; + try { + server.run(); + + AbstractZkTestCase.makeSolrZkNode(server.getZkHost()); + + zkController = new ZkController(server.getZkAddress(), + TIMEOUT, 1000, "localhost", "8983", "/solr"); + + zkController.uploadToZK(getFile("solr/conf"), + ZkController.CONFIGS_ZKNODE + "/config1"); + + if (DEBUG) { + zkController.printLayoutToStdOut(); + } + + } finally { + if (zkController != null) { + zkController.close(); + } + server.shutdown(); + } + + } + + private void addShardToZk(SolrZkClient zkClient, String shardsPath, + String zkNodeName, String url) throws IOException, + KeeperException, InterruptedException { + + ZkNodeProps props = new ZkNodeProps(); + props.put(ZkStateReader.URL_PROP, url); + props.put(ZkStateReader.NODE_NAME, TEST_NODE_NAME); + byte[] bytes = props.store(); + + zkClient + .create(shardsPath + "/" + zkNodeName, bytes, CreateMode.PERSISTENT); + } + + @Override + public void tearDown() throws Exception { + SolrConfig.severeErrors.clear(); + super.tearDown(); + } +} diff --git a/solr/src/test/org/apache/solr/cloud/ZkNodePropsTest.java b/solr/src/test/org/apache/solr/cloud/ZkNodePropsTest.java index e7516e2338f..3da482c4cf7 100644 --- a/solr/src/test/org/apache/solr/cloud/ZkNodePropsTest.java +++ b/solr/src/test/org/apache/solr/cloud/ZkNodePropsTest.java @@ -1,49 +1,49 @@ -package org.apache.solr.cloud; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -import java.io.IOException; - -import org.apache.solr.SolrTestCaseJ4; -import org.apache.solr.common.cloud.ZkNodeProps; -import org.junit.Test; - - -public class ZkNodePropsTest extends SolrTestCaseJ4 { - @Test - public void testBasic() throws IOException { - - ZkNodeProps props = new ZkNodeProps(); - props.put("prop1", "value1"); - props.put("prop2", "value2"); - props.put("prop3", "value3"); - props.put("prop4", "value4"); - props.put("prop5", "value5"); - props.put("prop6", "value6"); - byte[] bytes = props.store(); - - ZkNodeProps props2 = new ZkNodeProps(); - props2.load(bytes); - assertEquals("value1", props2.get("prop1")); - assertEquals("value2", props2.get("prop2")); - assertEquals("value3", props2.get("prop3")); - assertEquals("value4", props2.get("prop4")); - assertEquals("value5", props2.get("prop5")); - assertEquals("value6", props2.get("prop6")); - } -} +package org.apache.solr.cloud; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +import java.io.IOException; + +import org.apache.solr.SolrTestCaseJ4; +import org.apache.solr.common.cloud.ZkNodeProps; +import org.junit.Test; + + +public class ZkNodePropsTest extends SolrTestCaseJ4 { + @Test + public void testBasic() throws IOException { + + ZkNodeProps props = new ZkNodeProps(); + props.put("prop1", "value1"); + props.put("prop2", "value2"); + props.put("prop3", "value3"); + props.put("prop4", "value4"); + props.put("prop5", "value5"); + props.put("prop6", "value6"); + byte[] bytes = props.store(); + + ZkNodeProps props2 = new ZkNodeProps(); + props2.load(bytes); + assertEquals("value1", props2.get("prop1")); + assertEquals("value2", props2.get("prop2")); + assertEquals("value3", props2.get("prop3")); + assertEquals("value4", props2.get("prop4")); + assertEquals("value5", props2.get("prop5")); + assertEquals("value6", props2.get("prop6")); + } +} diff --git a/solr/src/test/org/apache/solr/cloud/ZkSolrClientTest.java b/solr/src/test/org/apache/solr/cloud/ZkSolrClientTest.java index 7aece955627..7358e1987b1 100644 --- a/solr/src/test/org/apache/solr/cloud/ZkSolrClientTest.java +++ b/solr/src/test/org/apache/solr/cloud/ZkSolrClientTest.java @@ -1,241 +1,241 @@ -package org.apache.solr.cloud; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -import java.io.File; -import java.util.concurrent.atomic.AtomicInteger; - -import junit.framework.TestCase; - -import org.apache.solr.common.cloud.SolrZkClient; -import org.apache.solr.core.SolrConfig; -import org.apache.solr.util.AbstractSolrTestCase; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher; - -public class ZkSolrClientTest extends AbstractSolrTestCase { - private static final boolean DEBUG = false; - - public void testConnect() throws Exception { - String zkDir = dataDir.getAbsolutePath() + File.separator - + "zookeeper/server1/data"; - ZkTestServer server = null; - - server = new ZkTestServer(zkDir); - server.run(); - - SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), 100); - - zkClient.close(); - server.shutdown(); - } - - public void testMakeRootNode() throws Exception { - String zkDir = dataDir.getAbsolutePath() + File.separator - + "zookeeper/server1/data"; - ZkTestServer server = null; - - server = new ZkTestServer(zkDir); - server.run(); - - AbstractZkTestCase.makeSolrZkNode(server.getZkHost()); - - SolrZkClient zkClient = new SolrZkClient(server.getZkHost(), - AbstractZkTestCase.TIMEOUT); - - assertTrue(zkClient.exists("/solr")); - - zkClient.close(); - server.shutdown(); - } - - public void testReconnect() throws Exception { - String zkDir = dataDir.getAbsolutePath() + File.separator - + "zookeeper/server1/data"; - ZkTestServer server = null; - SolrZkClient zkClient = null; - try { - server = new ZkTestServer(zkDir); - server.run(); - - AbstractZkTestCase.makeSolrZkNode(server.getZkHost()); - - zkClient = new SolrZkClient(server.getZkAddress(), AbstractZkTestCase.TIMEOUT); - String shardsPath = "/collections/collection1/shards"; - zkClient.makePath(shardsPath); - - zkClient.makePath("collections/collection1"); - int zkServerPort = server.getPort(); - // this tests disconnect state - server.shutdown(); - - Thread.sleep(80); - - - try { - zkClient.makePath("collections/collection2"); - TestCase.fail("Server should be down here"); - } catch (KeeperException.ConnectionLossException e) { - - } - - // bring server back up - server = new ZkTestServer(zkDir, zkServerPort); - server.run(); - - // TODO: can we do better? - // wait for reconnect - Thread.sleep(600); - - try { - zkClient.makePath("collections/collection3"); - } catch (KeeperException.ConnectionLossException e) { - Thread.sleep(5000); // try again in a bit - zkClient.makePath("collections/collection3"); - } - - if (DEBUG) { - zkClient.printLayoutToStdOut(); - } - - assertNotNull(zkClient.exists("/collections/collection3", null)); - assertNotNull(zkClient.exists("/collections/collection1", null)); - - // simulate session expiration - - // one option - long sessionId = zkClient.getSolrZooKeeper().getSessionId(); - server.expire(sessionId); - - // another option - //zkClient.getSolrZooKeeper().getConnection().disconnect(); - - // this tests expired state - - Thread.sleep(1000); // pause for reconnect - - for (int i = 0; i < 8; i++) { - try { - zkClient.makePath("collections/collection4"); - break; - } catch (KeeperException.SessionExpiredException e) { - - } catch (KeeperException.ConnectionLossException e) { - - } - Thread.sleep(1000 * i); - } - - if (DEBUG) { - zkClient.printLayoutToStdOut(); - } - - assertNotNull("Node does not exist, but it should", zkClient.exists("/collections/collection4", null)); - - } finally { - - if (zkClient != null) { - zkClient.close(); - } - if (server != null) { - server.shutdown(); - } - } - } - - public void testWatchChildren() throws Exception { - String zkDir = dataDir.getAbsolutePath() + File.separator - + "zookeeper/server1/data"; - - final AtomicInteger cnt = new AtomicInteger(); - ZkTestServer server = new ZkTestServer(zkDir); - server.run(); - Thread.sleep(400); - AbstractZkTestCase.makeSolrZkNode(server.getZkHost()); - final SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), AbstractZkTestCase.TIMEOUT); - try { - zkClient.makePath("/collections"); - - zkClient.getChildren("/collections", new Watcher() { - - public void process(WatchedEvent event) { - if (DEBUG) { - System.out.println("children changed"); - } - cnt.incrementAndGet(); - // remake watch - try { - zkClient.getChildren("/collections", this); - } catch (KeeperException e) { - throw new RuntimeException(e); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - }); - - zkClient.makePath("/collections/collection99/shards"); - - zkClient.makePath("collections/collection99/config=collection1"); - - zkClient.makePath("collections/collection99/config=collection3"); - - zkClient.makePath("/collections/collection97/shards"); - - if (DEBUG) { - zkClient.printLayoutToStdOut(); - } - - // pause for the watches to fire - Thread.sleep(700); - - if (cnt.intValue() < 2) { - Thread.sleep(4000); // wait a bit more - } - - assertEquals(2, cnt.intValue()); - - } finally { - - if (zkClient != null) { - zkClient.close(); - } - if (server != null) { - server.shutdown(); - } - } - } - - @Override - public String getSchemaFile() { - return null; - } - - @Override - public String getSolrConfigFile() { - return null; - } - - @Override - public void tearDown() throws Exception { - SolrConfig.severeErrors.clear(); - super.tearDown(); - } - -} +package org.apache.solr.cloud; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +import java.io.File; +import java.util.concurrent.atomic.AtomicInteger; + +import junit.framework.TestCase; + +import org.apache.solr.common.cloud.SolrZkClient; +import org.apache.solr.core.SolrConfig; +import org.apache.solr.util.AbstractSolrTestCase; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; + +public class ZkSolrClientTest extends AbstractSolrTestCase { + private static final boolean DEBUG = false; + + public void testConnect() throws Exception { + String zkDir = dataDir.getAbsolutePath() + File.separator + + "zookeeper/server1/data"; + ZkTestServer server = null; + + server = new ZkTestServer(zkDir); + server.run(); + + SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), 100); + + zkClient.close(); + server.shutdown(); + } + + public void testMakeRootNode() throws Exception { + String zkDir = dataDir.getAbsolutePath() + File.separator + + "zookeeper/server1/data"; + ZkTestServer server = null; + + server = new ZkTestServer(zkDir); + server.run(); + + AbstractZkTestCase.makeSolrZkNode(server.getZkHost()); + + SolrZkClient zkClient = new SolrZkClient(server.getZkHost(), + AbstractZkTestCase.TIMEOUT); + + assertTrue(zkClient.exists("/solr")); + + zkClient.close(); + server.shutdown(); + } + + public void testReconnect() throws Exception { + String zkDir = dataDir.getAbsolutePath() + File.separator + + "zookeeper/server1/data"; + ZkTestServer server = null; + SolrZkClient zkClient = null; + try { + server = new ZkTestServer(zkDir); + server.run(); + + AbstractZkTestCase.makeSolrZkNode(server.getZkHost()); + + zkClient = new SolrZkClient(server.getZkAddress(), AbstractZkTestCase.TIMEOUT); + String shardsPath = "/collections/collection1/shards"; + zkClient.makePath(shardsPath); + + zkClient.makePath("collections/collection1"); + int zkServerPort = server.getPort(); + // this tests disconnect state + server.shutdown(); + + Thread.sleep(80); + + + try { + zkClient.makePath("collections/collection2"); + TestCase.fail("Server should be down here"); + } catch (KeeperException.ConnectionLossException e) { + + } + + // bring server back up + server = new ZkTestServer(zkDir, zkServerPort); + server.run(); + + // TODO: can we do better? + // wait for reconnect + Thread.sleep(600); + + try { + zkClient.makePath("collections/collection3"); + } catch (KeeperException.ConnectionLossException e) { + Thread.sleep(5000); // try again in a bit + zkClient.makePath("collections/collection3"); + } + + if (DEBUG) { + zkClient.printLayoutToStdOut(); + } + + assertNotNull(zkClient.exists("/collections/collection3", null)); + assertNotNull(zkClient.exists("/collections/collection1", null)); + + // simulate session expiration + + // one option + long sessionId = zkClient.getSolrZooKeeper().getSessionId(); + server.expire(sessionId); + + // another option + //zkClient.getSolrZooKeeper().getConnection().disconnect(); + + // this tests expired state + + Thread.sleep(1000); // pause for reconnect + + for (int i = 0; i < 8; i++) { + try { + zkClient.makePath("collections/collection4"); + break; + } catch (KeeperException.SessionExpiredException e) { + + } catch (KeeperException.ConnectionLossException e) { + + } + Thread.sleep(1000 * i); + } + + if (DEBUG) { + zkClient.printLayoutToStdOut(); + } + + assertNotNull("Node does not exist, but it should", zkClient.exists("/collections/collection4", null)); + + } finally { + + if (zkClient != null) { + zkClient.close(); + } + if (server != null) { + server.shutdown(); + } + } + } + + public void testWatchChildren() throws Exception { + String zkDir = dataDir.getAbsolutePath() + File.separator + + "zookeeper/server1/data"; + + final AtomicInteger cnt = new AtomicInteger(); + ZkTestServer server = new ZkTestServer(zkDir); + server.run(); + Thread.sleep(400); + AbstractZkTestCase.makeSolrZkNode(server.getZkHost()); + final SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), AbstractZkTestCase.TIMEOUT); + try { + zkClient.makePath("/collections"); + + zkClient.getChildren("/collections", new Watcher() { + + public void process(WatchedEvent event) { + if (DEBUG) { + System.out.println("children changed"); + } + cnt.incrementAndGet(); + // remake watch + try { + zkClient.getChildren("/collections", this); + } catch (KeeperException e) { + throw new RuntimeException(e); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + }); + + zkClient.makePath("/collections/collection99/shards"); + + zkClient.makePath("collections/collection99/config=collection1"); + + zkClient.makePath("collections/collection99/config=collection3"); + + zkClient.makePath("/collections/collection97/shards"); + + if (DEBUG) { + zkClient.printLayoutToStdOut(); + } + + // pause for the watches to fire + Thread.sleep(700); + + if (cnt.intValue() < 2) { + Thread.sleep(4000); // wait a bit more + } + + assertEquals(2, cnt.intValue()); + + } finally { + + if (zkClient != null) { + zkClient.close(); + } + if (server != null) { + server.shutdown(); + } + } + } + + @Override + public String getSchemaFile() { + return null; + } + + @Override + public String getSolrConfigFile() { + return null; + } + + @Override + public void tearDown() throws Exception { + SolrConfig.severeErrors.clear(); + super.tearDown(); + } + +} diff --git a/solr/src/test/org/apache/solr/cloud/ZkTestServer.java b/solr/src/test/org/apache/solr/cloud/ZkTestServer.java index ef1275823cb..d435910d790 100644 --- a/solr/src/test/org/apache/solr/cloud/ZkTestServer.java +++ b/solr/src/test/org/apache/solr/cloud/ZkTestServer.java @@ -1,319 +1,319 @@ -package org.apache.solr.cloud; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -import java.io.BufferedReader; -import java.io.File; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.Socket; -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.List; - -import javax.management.JMException; - -import org.apache.solr.SolrTestCaseJ4; -import org.apache.zookeeper.jmx.ManagedUtil; -import org.apache.zookeeper.server.NIOServerCnxn; -import org.apache.zookeeper.server.ServerConfig; -import org.apache.zookeeper.server.ZooKeeperServer; -import org.apache.zookeeper.server.SessionTracker.Session; -import org.apache.zookeeper.server.persistence.FileTxnSnapLog; -import org.apache.zookeeper.server.quorum.QuorumPeerConfig.ConfigException; - -public class ZkTestServer { - - protected final ZKServerMain zkServer = new ZKServerMain(); - - private String zkDir; - - private int clientPort; - - private Thread zooThread; - - class ZKServerMain { - - private NIOServerCnxn.Factory cnxnFactory; - private ZooKeeperServer zooKeeperServer; - - protected void initializeAndRun(String[] args) throws ConfigException, - IOException { - try { - ManagedUtil.registerLog4jMBeans(); - } catch (JMException e) { - - } - - ServerConfig config = new ServerConfig(); - if (args.length == 1) { - config.parse(args[0]); - } else { - config.parse(args); - } - - runFromConfig(config); - } - - /** - * Run from a ServerConfig. - * - * @param config ServerConfig to use. - * @throws IOException - */ - public void runFromConfig(ServerConfig config) throws IOException { - try { - // Note that this thread isn't going to be doing anything else, - // so rather than spawning another thread, we will just call - // run() in this thread. - // create a file logger url from the command line args - zooKeeperServer = new ZooKeeperServer(); - - FileTxnSnapLog ftxn = new FileTxnSnapLog(new File(config - .getDataLogDir()), new File(config.getDataDir())); - zooKeeperServer.setTxnLogFactory(ftxn); - zooKeeperServer.setTickTime(config.getTickTime()); - cnxnFactory = new NIOServerCnxn.Factory(config.getClientPortAddress(), config - .getMaxClientCnxns()); - cnxnFactory.startup(zooKeeperServer); - cnxnFactory.join(); - if (zooKeeperServer.isRunning()) { - zooKeeperServer.shutdown(); - } - } catch (InterruptedException e) { - } - } - - /** - * Shutdown the serving instance - * @throws IOException - */ - protected void shutdown() throws IOException { - zooKeeperServer.shutdown(); - zooKeeperServer.getZKDatabase().close(); - waitForServerDown(getZkHost() + ":" + getPort(), 5000); - cnxnFactory.shutdown(); - } - - public int getLocalPort() { - if (cnxnFactory == null) { - throw new IllegalStateException("A port has not yet been selected"); - } - int port = cnxnFactory.getLocalPort(); - if (port == 0) { - throw new IllegalStateException("A port has not yet been selected"); - } - return port; - } - } - - public ZkTestServer(String zkDir) { - this.zkDir = zkDir; - } - - public ZkTestServer(String zkDir, int port) { - this.zkDir = zkDir; - this.clientPort = port; - } - - public String getZkHost() { - return "127.0.0.1:" + zkServer.getLocalPort(); - } - - public String getZkAddress() { - return "127.0.0.1:" + zkServer.getLocalPort() + "/solr"; - } - - public int getPort() { - return zkServer.getLocalPort(); - } - - public void expire(final long sessionId) { - zkServer.zooKeeperServer.expire(new Session() { - @Override - public long getSessionId() { - return sessionId; - } - @Override - public int getTimeout() { - return 4000; - }}); - } - - public void run() throws InterruptedException { - // we don't call super.setUp - zooThread = new Thread() { - - @Override - public void run() { - ServerConfig config = new ServerConfig() { - - { - setClientPort(ZkTestServer.this.clientPort); - this.dataDir = zkDir; - this.dataLogDir = zkDir; - this.tickTime = 1500; - } - - public void setClientPort(int clientPort) { - if (clientPortAddress != null) { - try { - this.clientPortAddress = new InetSocketAddress( - InetAddress.getByName(clientPortAddress.getHostName()), clientPort); - } catch (UnknownHostException e) { - throw new RuntimeException(e); - } - } else { - this.clientPortAddress = new InetSocketAddress(clientPort); - } - } - }; - - try { - zkServer.runFromConfig(config); - } catch (Throwable e) { - throw new RuntimeException(e); - } - } - }; - - zooThread.setDaemon(true); - zooThread.start(); - - int cnt = 0; - int port = -1; - try { - port = getPort(); - } catch(IllegalStateException e) { - - } - while (port < 1) { - Thread.sleep(100); - try { - port = getPort(); - } catch(IllegalStateException e) { - - } - if (cnt == 40) { - throw new RuntimeException("Could not get the port for ZooKeeper server"); - } - cnt++; - } - } - - @SuppressWarnings("deprecation") - public void shutdown() throws IOException { - SolrTestCaseJ4.ignoreException("java.nio.channels.ClosedChannelException"); - // TODO: this can log an exception while trying to unregister a JMX MBean - try { - zkServer.shutdown(); - } finally { - SolrTestCaseJ4.resetExceptionIgnores(); - } - } - - - public static boolean waitForServerDown(String hp, long timeout) { - long start = System.currentTimeMillis(); - while (true) { - try { - HostPort hpobj = parseHostPortList(hp).get(0); - send4LetterWord(hpobj.host, hpobj.port, "stat"); - } catch (IOException e) { - return true; - } - - if (System.currentTimeMillis() > start + timeout) { - break; - } - try { - Thread.sleep(250); - } catch (InterruptedException e) { - // ignore - } - } - return false; - } - - public static class HostPort { - String host; - int port; - - HostPort(String host, int port) { - this.host = host; - this.port = port; - } - } - - /** - * Send the 4letterword - * @param host the destination host - * @param port the destination port - * @param cmd the 4letterword - * @return - * @throws IOException - */ - public static String send4LetterWord(String host, int port, String cmd) - throws IOException - { - - Socket sock = new Socket(host, port); - BufferedReader reader = null; - try { - OutputStream outstream = sock.getOutputStream(); - outstream.write(cmd.getBytes()); - outstream.flush(); - // this replicates NC - close the output stream before reading - sock.shutdownOutput(); - - reader = - new BufferedReader( - new InputStreamReader(sock.getInputStream())); - StringBuilder sb = new StringBuilder(); - String line; - while((line = reader.readLine()) != null) { - sb.append(line + "\n"); - } - return sb.toString(); - } finally { - sock.close(); - if (reader != null) { - reader.close(); - } - } - } - - public static List parseHostPortList(String hplist) { - ArrayList alist = new ArrayList(); - for (String hp : hplist.split(",")) { - int idx = hp.lastIndexOf(':'); - String host = hp.substring(0, idx); - int port; - try { - port = Integer.parseInt(hp.substring(idx + 1)); - } catch (RuntimeException e) { - throw new RuntimeException("Problem parsing " + hp + e.toString()); - } - alist.add(new HostPort(host, port)); - } - return alist; - } -} +package org.apache.solr.cloud; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.List; + +import javax.management.JMException; + +import org.apache.solr.SolrTestCaseJ4; +import org.apache.zookeeper.jmx.ManagedUtil; +import org.apache.zookeeper.server.NIOServerCnxn; +import org.apache.zookeeper.server.ServerConfig; +import org.apache.zookeeper.server.ZooKeeperServer; +import org.apache.zookeeper.server.SessionTracker.Session; +import org.apache.zookeeper.server.persistence.FileTxnSnapLog; +import org.apache.zookeeper.server.quorum.QuorumPeerConfig.ConfigException; + +public class ZkTestServer { + + protected final ZKServerMain zkServer = new ZKServerMain(); + + private String zkDir; + + private int clientPort; + + private Thread zooThread; + + class ZKServerMain { + + private NIOServerCnxn.Factory cnxnFactory; + private ZooKeeperServer zooKeeperServer; + + protected void initializeAndRun(String[] args) throws ConfigException, + IOException { + try { + ManagedUtil.registerLog4jMBeans(); + } catch (JMException e) { + + } + + ServerConfig config = new ServerConfig(); + if (args.length == 1) { + config.parse(args[0]); + } else { + config.parse(args); + } + + runFromConfig(config); + } + + /** + * Run from a ServerConfig. + * + * @param config ServerConfig to use. + * @throws IOException + */ + public void runFromConfig(ServerConfig config) throws IOException { + try { + // Note that this thread isn't going to be doing anything else, + // so rather than spawning another thread, we will just call + // run() in this thread. + // create a file logger url from the command line args + zooKeeperServer = new ZooKeeperServer(); + + FileTxnSnapLog ftxn = new FileTxnSnapLog(new File(config + .getDataLogDir()), new File(config.getDataDir())); + zooKeeperServer.setTxnLogFactory(ftxn); + zooKeeperServer.setTickTime(config.getTickTime()); + cnxnFactory = new NIOServerCnxn.Factory(config.getClientPortAddress(), config + .getMaxClientCnxns()); + cnxnFactory.startup(zooKeeperServer); + cnxnFactory.join(); + if (zooKeeperServer.isRunning()) { + zooKeeperServer.shutdown(); + } + } catch (InterruptedException e) { + } + } + + /** + * Shutdown the serving instance + * @throws IOException + */ + protected void shutdown() throws IOException { + zooKeeperServer.shutdown(); + zooKeeperServer.getZKDatabase().close(); + waitForServerDown(getZkHost() + ":" + getPort(), 5000); + cnxnFactory.shutdown(); + } + + public int getLocalPort() { + if (cnxnFactory == null) { + throw new IllegalStateException("A port has not yet been selected"); + } + int port = cnxnFactory.getLocalPort(); + if (port == 0) { + throw new IllegalStateException("A port has not yet been selected"); + } + return port; + } + } + + public ZkTestServer(String zkDir) { + this.zkDir = zkDir; + } + + public ZkTestServer(String zkDir, int port) { + this.zkDir = zkDir; + this.clientPort = port; + } + + public String getZkHost() { + return "127.0.0.1:" + zkServer.getLocalPort(); + } + + public String getZkAddress() { + return "127.0.0.1:" + zkServer.getLocalPort() + "/solr"; + } + + public int getPort() { + return zkServer.getLocalPort(); + } + + public void expire(final long sessionId) { + zkServer.zooKeeperServer.expire(new Session() { + @Override + public long getSessionId() { + return sessionId; + } + @Override + public int getTimeout() { + return 4000; + }}); + } + + public void run() throws InterruptedException { + // we don't call super.setUp + zooThread = new Thread() { + + @Override + public void run() { + ServerConfig config = new ServerConfig() { + + { + setClientPort(ZkTestServer.this.clientPort); + this.dataDir = zkDir; + this.dataLogDir = zkDir; + this.tickTime = 1500; + } + + public void setClientPort(int clientPort) { + if (clientPortAddress != null) { + try { + this.clientPortAddress = new InetSocketAddress( + InetAddress.getByName(clientPortAddress.getHostName()), clientPort); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + } else { + this.clientPortAddress = new InetSocketAddress(clientPort); + } + } + }; + + try { + zkServer.runFromConfig(config); + } catch (Throwable e) { + throw new RuntimeException(e); + } + } + }; + + zooThread.setDaemon(true); + zooThread.start(); + + int cnt = 0; + int port = -1; + try { + port = getPort(); + } catch(IllegalStateException e) { + + } + while (port < 1) { + Thread.sleep(100); + try { + port = getPort(); + } catch(IllegalStateException e) { + + } + if (cnt == 40) { + throw new RuntimeException("Could not get the port for ZooKeeper server"); + } + cnt++; + } + } + + @SuppressWarnings("deprecation") + public void shutdown() throws IOException { + SolrTestCaseJ4.ignoreException("java.nio.channels.ClosedChannelException"); + // TODO: this can log an exception while trying to unregister a JMX MBean + try { + zkServer.shutdown(); + } finally { + SolrTestCaseJ4.resetExceptionIgnores(); + } + } + + + public static boolean waitForServerDown(String hp, long timeout) { + long start = System.currentTimeMillis(); + while (true) { + try { + HostPort hpobj = parseHostPortList(hp).get(0); + send4LetterWord(hpobj.host, hpobj.port, "stat"); + } catch (IOException e) { + return true; + } + + if (System.currentTimeMillis() > start + timeout) { + break; + } + try { + Thread.sleep(250); + } catch (InterruptedException e) { + // ignore + } + } + return false; + } + + public static class HostPort { + String host; + int port; + + HostPort(String host, int port) { + this.host = host; + this.port = port; + } + } + + /** + * Send the 4letterword + * @param host the destination host + * @param port the destination port + * @param cmd the 4letterword + * @return + * @throws IOException + */ + public static String send4LetterWord(String host, int port, String cmd) + throws IOException + { + + Socket sock = new Socket(host, port); + BufferedReader reader = null; + try { + OutputStream outstream = sock.getOutputStream(); + outstream.write(cmd.getBytes()); + outstream.flush(); + // this replicates NC - close the output stream before reading + sock.shutdownOutput(); + + reader = + new BufferedReader( + new InputStreamReader(sock.getInputStream())); + StringBuilder sb = new StringBuilder(); + String line; + while((line = reader.readLine()) != null) { + sb.append(line + "\n"); + } + return sb.toString(); + } finally { + sock.close(); + if (reader != null) { + reader.close(); + } + } + } + + public static List parseHostPortList(String hplist) { + ArrayList alist = new ArrayList(); + for (String hp : hplist.split(",")) { + int idx = hp.lastIndexOf(':'); + String host = hp.substring(0, idx); + int port; + try { + port = Integer.parseInt(hp.substring(idx + 1)); + } catch (RuntimeException e) { + throw new RuntimeException("Problem parsing " + hp + e.toString()); + } + alist.add(new HostPort(host, port)); + } + return alist; + } +} From 277dfa0e8812f8000102ed9b7bedac8b6b8e878b Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Sun, 30 Jan 2011 18:06:37 +0000 Subject: [PATCH 060/185] LUCENE-2900: allow explicit control over whether deletes must be applied when pulling NRT reader git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065337 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/CHANGES.txt | 7 ++++ .../queryParser/standard/TestQPHelper.java | 2 +- .../lucene/spatial/tier/TestDistance.java | 2 +- .../apache/lucene/index/DirectoryReader.java | 10 ++++-- .../org/apache/lucene/index/IndexReader.java | 36 ++++++++++++------- .../org/apache/lucene/index/IndexWriter.java | 16 +++++---- .../org/apache/lucene/TestExternalCodecs.java | 4 +-- .../lucene/index/TestIndexWriterReader.java | 2 +- .../apache/lucene/index/TestNRTThreads.java | 4 +-- .../lucene/queryParser/TestQueryParser.java | 2 +- .../lucene/search/TestCachingSpanFilter.java | 2 +- .../search/TestCachingWrapperFilter.java | 2 +- .../search/TestElevationComparator.java | 2 +- .../apache/lucene/search/TestFieldCache.java | 2 +- .../org/apache/lucene/search/TestSort.java | 2 +- .../search/function/TestValueSource.java | 2 +- .../lucene/search/payloads/PayloadHelper.java | 2 +- .../lucene/store/TestFileSwitchDirectory.java | 2 +- .../lucene/util/automaton/fst/TestFSTs.java | 2 +- .../byTask/tasks/NearRealtimeReaderTask.java | 2 +- 20 files changed, 66 insertions(+), 39 deletions(-) diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index 1a29524de6e..d666278b314 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -635,6 +635,13 @@ API Changes it should keep it itself. Fixed Scorers to pass their parent Weight, so that Scorer.visitSubScorers (LUCENE-2590) will work correctly. (Robert Muir, Doron Cohen) + +* LUCENE-2900: When opening a near-real-time (NRT) reader + (IndexReader.re/open(IndexWriter)) you can now specify whether + deletes should be applied. Applying deletes can be costly, and some + expert use cases can handle seeing deleted documents returned. The + deletes remain buffered so that the next time you open an NRT reader + and pass true, all deletes will be a applied. (Mike McCandless) Bug fixes diff --git a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java index 4883f6bef05..ff8a4678c67 100644 --- a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java +++ b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java @@ -1277,7 +1277,7 @@ public class TestQPHelper extends LuceneTestCase { Document doc = new Document(); doc.add(newField("field", "", Field.Store.NO, Field.Index.ANALYZED)); w.addDocument(doc); - IndexReader r = IndexReader.open(w); + IndexReader r = IndexReader.open(w, true); IndexSearcher s = new IndexSearcher(r); Query q = new StandardQueryParser(new CannedAnalyzer()).parse("\"a\"", "field"); diff --git a/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java b/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java index f63e6acbcf3..7aaa919a335 100644 --- a/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java +++ b/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java @@ -99,7 +99,7 @@ public class TestDistance extends LuceneTestCase { public void testLatLongFilterOnDeletedDocs() throws Exception { writer.deleteDocuments(new Term("name", "Potomac")); - IndexReader r = IndexReader.open(writer); + IndexReader r = IndexReader.open(writer, true); LatLongDistanceFilter f = new LatLongDistanceFilter(new QueryWrapperFilter(new MatchAllDocsQuery()), lat, lng, 1.0, latField, lngField); diff --git a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java index 06c4d4009be..344a91159c7 100644 --- a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java +++ b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java @@ -70,6 +70,8 @@ class DirectoryReader extends IndexReader implements Cloneable { // opened on a past IndexCommit: private long maxIndexVersion; + private final boolean applyAllDeletes; + // static IndexReader open(final Directory directory, final IndexDeletionPolicy deletionPolicy, final IndexCommit commit, final boolean readOnly, // final int termInfosIndexDivisor) throws CorruptIndexException, IOException { // return open(directory, deletionPolicy, commit, readOnly, termInfosIndexDivisor, null); @@ -107,6 +109,7 @@ class DirectoryReader extends IndexReader implements Cloneable { this.codecs = codecs; } readerFinishedListeners = new MapBackedSet(new ConcurrentHashMap()); + applyAllDeletes = false; // To reduce the chance of hitting FileNotFound // (and having to retry), we open segments in @@ -138,9 +141,11 @@ class DirectoryReader extends IndexReader implements Cloneable { } // Used by near real-time search - DirectoryReader(IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor, CodecProvider codecs) throws IOException { + DirectoryReader(IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor, CodecProvider codecs, boolean applyAllDeletes) throws IOException { this.directory = writer.getDirectory(); this.readOnly = true; + this.applyAllDeletes = applyAllDeletes; // saved for reopen + segmentInfos = (SegmentInfos) infos.clone();// make sure we clone otherwise we share mutable state with IW this.termInfosIndexDivisor = termInfosIndexDivisor; if (codecs == null) { @@ -193,6 +198,7 @@ class DirectoryReader extends IndexReader implements Cloneable { this.segmentInfos = infos; this.termInfosIndexDivisor = termInfosIndexDivisor; this.readerFinishedListeners = readerFinishedListeners; + applyAllDeletes = false; if (codecs == null) { this.codecs = CodecProvider.getDefault(); @@ -401,7 +407,7 @@ class DirectoryReader extends IndexReader implements Cloneable { // TODO: right now we *always* make a new reader; in // the future we could have write make some effort to // detect that no changes have occurred - IndexReader reader = writer.getReader(); + IndexReader reader = writer.getReader(applyAllDeletes); reader.readerFinishedListeners = readerFinishedListeners; return reader; } diff --git a/lucene/src/java/org/apache/lucene/index/IndexReader.java b/lucene/src/java/org/apache/lucene/index/IndexReader.java index c73c514edf4..4aa108ec827 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexReader.java +++ b/lucene/src/java/org/apache/lucene/index/IndexReader.java @@ -295,24 +295,26 @@ public abstract class IndexReader implements Cloneable,Closeable { /** * Open a near real time IndexReader from the {@link org.apache.lucene.index.IndexWriter}. * - * * @param writer The IndexWriter to open from + * @param applyAllDeletes If true, all buffered deletes will + * be applied (made visible) in the returned reader. If + * false, the deletes are not applied but remain buffered + * (in IndexWriter) so that they will be applied in the + * future. Applying deletes can be costly, so if your app + * can tolerate deleted documents being returned you might + * gain some performance by passing false. * @return The new IndexReader * @throws CorruptIndexException * @throws IOException if there is a low-level IO error * - * @see #reopen(IndexWriter) + * @see #reopen(IndexWriter,boolean) * * @lucene.experimental */ - public static IndexReader open(final IndexWriter writer) throws CorruptIndexException, IOException { - return writer.getReader(); + public static IndexReader open(final IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException { + return writer.getReader(applyAllDeletes); } - - - - /** Expert: returns an IndexReader reading the index in the given * {@link IndexCommit}. You should pass readOnly=true, since it * gives much better concurrent performance, unless you @@ -617,18 +619,26 @@ public abstract class IndexReader implements Cloneable,Closeable { * if you attempt to reopen any of those readers, you'll * hit an {@link AlreadyClosedException}.

* - * @lucene.experimental - * * @return IndexReader that covers entire index plus all * changes made so far by this IndexWriter instance * + * @param writer The IndexWriter to open from + * @param applyAllDeletes If true, all buffered deletes will + * be applied (made visible) in the returned reader. If + * false, the deletes are not applied but remain buffered + * (in IndexWriter) so that they will be applied in the + * future. Applying deletes can be costly, so if your app + * can tolerate deleted documents being returned you might + * gain some performance by passing false. + * * @throws IOException + * + * @lucene.experimental */ - public IndexReader reopen(IndexWriter writer) throws CorruptIndexException, IOException { - return writer.getReader(); + public IndexReader reopen(IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException { + return writer.getReader(applyAllDeletes); } - /** * Efficiently clones the IndexReader (sharing most * internal state). diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java index 20f7b35bbf8..666ca8231d1 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java @@ -274,6 +274,10 @@ public class IndexWriter implements Closeable { // for testing boolean anyNonBulkMerges; + IndexReader getReader() throws IOException { + return getReader(true); + } + /** * Expert: returns a readonly reader, covering all * committed as well as un-committed changes to the index. @@ -333,7 +337,7 @@ public class IndexWriter implements Closeable { * * @throws IOException */ - IndexReader getReader() throws IOException { + IndexReader getReader(boolean applyAllDeletes) throws IOException { ensureOpen(); final long tStart = System.currentTimeMillis(); @@ -352,8 +356,8 @@ public class IndexWriter implements Closeable { // just like we do when loading segments_N IndexReader r; synchronized(this) { - flush(false, true); - r = new DirectoryReader(this, segmentInfos, config.getReaderTermsIndexDivisor(), codecs); + flush(false, applyAllDeletes); + r = new DirectoryReader(this, segmentInfos, config.getReaderTermsIndexDivisor(), codecs, applyAllDeletes); if (infoStream != null) { message("return reader version=" + r.getVersion() + " reader=" + r); } @@ -2463,9 +2467,9 @@ public class IndexWriter implements Closeable { * to the Directory. * @param triggerMerge if true, we may merge segments (if * deletes or docs were flushed) if necessary - * @param flushDeletes whether pending deletes should also + * @param applyAllDeletes whether pending deletes should also */ - protected final void flush(boolean triggerMerge, boolean flushDeletes) throws CorruptIndexException, IOException { + protected final void flush(boolean triggerMerge, boolean applyAllDeletes) throws CorruptIndexException, IOException { // NOTE: this method cannot be sync'd because // maybeMerge() in turn calls mergeScheduler.merge which @@ -2476,7 +2480,7 @@ public class IndexWriter implements Closeable { // We can be called during close, when closing==true, so we must pass false to ensureOpen: ensureOpen(false); - if (doFlush(flushDeletes) && triggerMerge) { + if (doFlush(applyAllDeletes) && triggerMerge) { maybeMerge(); } } diff --git a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java index e7ff3954542..0de0ff4038c 100644 --- a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java +++ b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java @@ -660,7 +660,7 @@ public class TestExternalCodecs extends LuceneTestCase { } w.deleteDocuments(new Term("id", "77")); - IndexReader r = IndexReader.open(w); + IndexReader r = IndexReader.open(w, true); IndexReader[] subs = r.getSequentialSubReaders(); // test each segment for(int i=0;i 1); diff --git a/lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java b/lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java index 07221a2c6b1..5349d18eeb6 100644 --- a/lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java +++ b/lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java @@ -126,7 +126,7 @@ public class PayloadHelper { doc.add(new Field(NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(doc); } - reader = IndexReader.open(writer); + reader = IndexReader.open(writer, true); writer.close(); IndexSearcher searcher = new IndexSearcher(reader); diff --git a/lucene/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java b/lucene/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java index a5ab76eb65c..606ee1e692b 100644 --- a/lucene/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java +++ b/lucene/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java @@ -49,7 +49,7 @@ public class TestFileSwitchDirectory extends LuceneTestCase { setMergePolicy(newLogMergePolicy(false)) ); TestIndexWriterReader.createIndexNoClose(true, "ram", writer); - IndexReader reader = IndexReader.open(writer); + IndexReader reader = IndexReader.open(writer, true); assertEquals(100, reader.maxDoc()); writer.commit(); // we should see only fdx,fdt files here diff --git a/lucene/src/test/org/apache/lucene/util/automaton/fst/TestFSTs.java b/lucene/src/test/org/apache/lucene/util/automaton/fst/TestFSTs.java index f7d54753271..39dc27be1e2 100644 --- a/lucene/src/test/org/apache/lucene/util/automaton/fst/TestFSTs.java +++ b/lucene/src/test/org/apache/lucene/util/automaton/fst/TestFSTs.java @@ -960,7 +960,7 @@ public class TestFSTs extends LuceneTestCase { writer.addDocument(doc); docCount++; } - IndexReader r = IndexReader.open(writer); + IndexReader r = IndexReader.open(writer, true); writer.close(); final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(random.nextBoolean()); Builder builder = new Builder(FST.INPUT_TYPE.BYTE1, 0, 0, true, outputs); diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NearRealtimeReaderTask.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NearRealtimeReaderTask.java index 398c72fdc2c..47ea3f428d9 100644 --- a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NearRealtimeReaderTask.java +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NearRealtimeReaderTask.java @@ -59,7 +59,7 @@ public class NearRealtimeReaderTask extends PerfTask { } long t = System.currentTimeMillis(); - IndexReader r = IndexReader.open(w); + IndexReader r = IndexReader.open(w, true); runData.setIndexReader(r); // Transfer our reference to runData r.decRef(); From e7088279f754fe5809a78302eda2a0874a30dbf7 Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Sun, 30 Jan 2011 18:30:34 +0000 Subject: [PATCH 061/185] LUCENE-1253: LengthFilter (and Solr's KeepWordTokenFilter) now require up front specification of enablePositionIncrement. Together with StopFilter they have a common base class (FilteringTokenFilter) that handles the position increments automatically. Implementors only need to override an accept() method that filters tokens git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065343 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/CHANGES.txt | 6 ++ .../lucene/analysis/core/StopFilter.java | 55 +---------- .../miscellaneous/KeepWordFilter.java | 14 ++- .../analysis/miscellaneous/LengthFilter.java | 25 ++--- .../analysis/util/FilteringTokenFilter.java | 96 +++++++++++++++++++ .../miscellaneous/TestKeepWordFilter.java | 20 +++- .../miscellaneous/TestLengthFilter.java | 25 +++-- .../solr/analysis/KeepWordFilterFactory.java | 29 ++++-- .../solr/analysis/LengthFilterFactory.java | 5 +- .../solr/analysis/LengthFilterTest.java | 12 ++- 10 files changed, 186 insertions(+), 101 deletions(-) create mode 100644 modules/analysis/common/src/java/org/apache/lucene/analysis/util/FilteringTokenFilter.java diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index d666278b314..79ded77817e 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -643,6 +643,12 @@ API Changes deletes remain buffered so that the next time you open an NRT reader and pass true, all deletes will be a applied. (Mike McCandless) +* LUCENE-1253: LengthFilter (and Solr's KeepWordTokenFilter) now + require up front specification of enablePositionIncrement. Together with + StopFilter they have a common base class (FilteringTokenFilter) that handles + the position increments automatically. Implementors only need to override an + accept() method that filters tokens. (Uwe Schindler, Robert Muir) + Bug fixes * LUCENE-2249: ParallelMultiSearcher should shut down thread pool on diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/core/StopFilter.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/core/StopFilter.java index 0aba57fd08e..45b847a833e 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/core/StopFilter.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/core/StopFilter.java @@ -22,10 +22,9 @@ import java.util.Arrays; import java.util.List; import java.util.Set; -import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.util.FilteringTokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.util.Version; @@ -42,14 +41,10 @@ import org.apache.lucene.util.Version; * increments are preserved * */ -public final class StopFilter extends TokenFilter { +public final class StopFilter extends FilteringTokenFilter { private final CharArraySet stopWords; - private boolean enablePositionIncrements = true; - private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); - private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class); - /** * Construct a token stream filtering the given input. If @@ -75,7 +70,7 @@ public final class StopFilter extends TokenFilter { */ public StopFilter(Version matchVersion, TokenStream input, Set stopWords, boolean ignoreCase) { - super(input); + super(true, input); this.stopWords = stopWords instanceof CharArraySet ? (CharArraySet) stopWords : new CharArraySet(matchVersion, stopWords, ignoreCase); } @@ -157,48 +152,8 @@ public final class StopFilter extends TokenFilter { * Returns the next input Token whose term() is not a stop word. */ @Override - public final boolean incrementToken() throws IOException { - // return the first non-stop word found - int skippedPositions = 0; - while (input.incrementToken()) { - if (!stopWords.contains(termAtt.buffer(), 0, termAtt.length())) { - if (enablePositionIncrements) { - posIncrAtt.setPositionIncrement(posIncrAtt.getPositionIncrement() + skippedPositions); - } - return true; - } - skippedPositions += posIncrAtt.getPositionIncrement(); - } - // reached EOS -- return false - return false; + protected boolean accept() throws IOException { + return !stopWords.contains(termAtt.buffer(), 0, termAtt.length()); } - /** - * @see #setEnablePositionIncrements(boolean) - */ - public boolean getEnablePositionIncrements() { - return enablePositionIncrements; - } - - /** - * If true, this StopFilter will preserve - * positions of the incoming tokens (ie, accumulate and - * set position increments of the removed stop tokens). - * Generally, true is best as it does not - * lose information (positions of the original tokens) - * during indexing. - * - * Default is true. - * - *

When set, when a token is stopped - * (omitted), the position increment of the following - * token is incremented. - * - *

NOTE: be sure to also - * set {@link QueryParser#setEnablePositionIncrements} if - * you use QueryParser to create queries. - */ - public void setEnablePositionIncrements(boolean enable) { - this.enablePositionIncrements = enable; - } } diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeepWordFilter.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeepWordFilter.java index e488fe4dd46..935c96f5bb7 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeepWordFilter.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeepWordFilter.java @@ -21,6 +21,7 @@ import java.io.IOException; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.util.FilteringTokenFilter; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.util.CharArraySet; @@ -30,22 +31,19 @@ import org.apache.lucene.analysis.util.CharArraySet; * * @since solr 1.3 */ -public final class KeepWordFilter extends TokenFilter { +public final class KeepWordFilter extends FilteringTokenFilter { private final CharArraySet words; private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); /** The words set passed to this constructor will be directly used by this filter * and should not be modified, */ - public KeepWordFilter(TokenStream in, CharArraySet words) { - super(in); + public KeepWordFilter(boolean enablePositionIncrements, TokenStream in, CharArraySet words) { + super(enablePositionIncrements, in); this.words = words; } @Override - public boolean incrementToken() throws IOException { - while (input.incrementToken()) { - if (words.contains(termAtt.buffer(), 0, termAtt.length())) return true; - } - return false; + public boolean accept() throws IOException { + return words.contains(termAtt.buffer(), 0, termAtt.length()); } } diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LengthFilter.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LengthFilter.java index bfccddbeab4..3f36f2f48e2 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LengthFilter.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LengthFilter.java @@ -21,6 +21,7 @@ import java.io.IOException; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.util.FilteringTokenFilter; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; /** @@ -29,7 +30,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; * Note: Length is calculated as the number of UTF-16 code units. *

*/ -public final class LengthFilter extends TokenFilter { +public final class LengthFilter extends FilteringTokenFilter { private final int min; private final int max; @@ -40,27 +41,15 @@ public final class LengthFilter extends TokenFilter { * Build a filter that removes words that are too long or too * short from the text. */ - public LengthFilter(TokenStream in, int min, int max) - { - super(in); + public LengthFilter(boolean enablePositionIncrements, TokenStream in, int min, int max) { + super(enablePositionIncrements, in); this.min = min; this.max = max; } - /** - * Returns the next input Token whose term() is the right len - */ @Override - public final boolean incrementToken() throws IOException { - // return the first non-stop word found - while (input.incrementToken()) { - int len = termAtt.length(); - if (len >= min && len <= max) { - return true; - } - // note: else we ignore it but should we index each part of it? - } - // reached EOS -- return false - return false; + public boolean accept() throws IOException { + final int len = termAtt.length(); + return (len >= min && len <= max); } } diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/util/FilteringTokenFilter.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/util/FilteringTokenFilter.java new file mode 100644 index 00000000000..aa5d41fdc7c --- /dev/null +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/util/FilteringTokenFilter.java @@ -0,0 +1,96 @@ +package org.apache.lucene.analysis.util; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; + +import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; +import org.apache.lucene.queryParser.QueryParser; // for javadoc + +/** + * Abstract base class for TokenFilters that may remove tokens. + * You have to implement {@link #accept} and return a boolean if the current + * token should be preserved. {@link #incrementToken} uses this method + * to decide if a token should be passed to the caller. + */ +public abstract class FilteringTokenFilter extends TokenFilter { + + private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class); + private boolean enablePositionIncrements; // no init needed, as ctor enforces setting value! + + public FilteringTokenFilter(boolean enablePositionIncrements, TokenStream input){ + super(input); + this.enablePositionIncrements = enablePositionIncrements; + } + + /** Override this method and return if the current input token should be returned by {@link #incrementToken}. */ + protected abstract boolean accept() throws IOException; + + @Override + public final boolean incrementToken() throws IOException { + if (enablePositionIncrements) { + int skippedPositions = 0; + while (input.incrementToken()) { + if (accept()) { + if (skippedPositions != 0) { + posIncrAtt.setPositionIncrement(posIncrAtt.getPositionIncrement() + skippedPositions); + } + return true; + } + skippedPositions += posIncrAtt.getPositionIncrement(); + } + } else { + while (input.incrementToken()) { + if (accept()) { + return true; + } + } + } + // reached EOS -- return false + return false; + } + + /** + * @see #setEnablePositionIncrements(boolean) + */ + public boolean getEnablePositionIncrements() { + return enablePositionIncrements; + } + + /** + * If true, this TokenFilter will preserve + * positions of the incoming tokens (ie, accumulate and + * set position increments of the removed tokens). + * Generally, true is best as it does not + * lose information (positions of the original tokens) + * during indexing. + * + *

When set, when a token is stopped + * (omitted), the position increment of the following + * token is incremented. + * + *

NOTE: be sure to also + * set {@link QueryParser#setEnablePositionIncrements} if + * you use QueryParser to create queries. + */ + public void setEnablePositionIncrements(boolean enable) { + this.enablePositionIncrements = enable; + } +} diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java index 5039b4bc47a..2ec9cb92872 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java @@ -35,16 +35,26 @@ public class TestKeepWordFilter extends BaseTokenStreamTestCase { words.add( "aaa" ); words.add( "bbb" ); - String input = "aaa BBB ccc ddd EEE"; + String input = "xxx yyy aaa zzz BBB ccc ddd EEE"; // Test Stopwords TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(input)); - stream = new KeepWordFilter(stream, new CharArraySet(TEST_VERSION_CURRENT, words, true)); - assertTokenStreamContents(stream, new String[] { "aaa", "BBB" }); + stream = new KeepWordFilter(true, stream, new CharArraySet(TEST_VERSION_CURRENT, words, true)); + assertTokenStreamContents(stream, new String[] { "aaa", "BBB" }, new int[] { 3, 2 }); // Now force case stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(input)); - stream = new KeepWordFilter(stream, new CharArraySet(TEST_VERSION_CURRENT,words, false)); - assertTokenStreamContents(stream, new String[] { "aaa" }); + stream = new KeepWordFilter(true, stream, new CharArraySet(TEST_VERSION_CURRENT,words, false)); + assertTokenStreamContents(stream, new String[] { "aaa" }, new int[] { 3 }); + + // Test Stopwords + stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(input)); + stream = new KeepWordFilter(false, stream, new CharArraySet(TEST_VERSION_CURRENT, words, true)); + assertTokenStreamContents(stream, new String[] { "aaa", "BBB" }, new int[] { 1, 1 }); + + // Now force case + stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(input)); + stream = new KeepWordFilter(false, stream, new CharArraySet(TEST_VERSION_CURRENT,words, false)); + assertTokenStreamContents(stream, new String[] { "aaa" }, new int[] { 1 }); } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLengthFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLengthFilter.java index de8b7311d19..070164c0161 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLengthFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLengthFilter.java @@ -24,19 +24,24 @@ import java.io.StringReader; public class TestLengthFilter extends BaseTokenStreamTestCase { - public void testFilter() throws Exception { + public void testFilterNoPosIncr() throws Exception { TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("short toolong evenmuchlongertext a ab toolong foo")); - LengthFilter filter = new LengthFilter(stream, 2, 6); - CharTermAttribute termAtt = filter.getAttribute(CharTermAttribute.class); + LengthFilter filter = new LengthFilter(false, stream, 2, 6); + assertTokenStreamContents(filter, + new String[]{"short", "ab", "foo"}, + new int[]{1, 1, 1} + ); + } - assertTrue(filter.incrementToken()); - assertEquals("short", termAtt.toString()); - assertTrue(filter.incrementToken()); - assertEquals("ab", termAtt.toString()); - assertTrue(filter.incrementToken()); - assertEquals("foo", termAtt.toString()); - assertFalse(filter.incrementToken()); + public void testFilterWithPosIncr() throws Exception { + TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, + new StringReader("short toolong evenmuchlongertext a ab toolong foo")); + LengthFilter filter = new LengthFilter(true, stream, 2, 6); + assertTokenStreamContents(filter, + new String[]{"short", "ab", "foo"}, + new int[]{1, 4, 2} + ); } } diff --git a/solr/src/java/org/apache/solr/analysis/KeepWordFilterFactory.java b/solr/src/java/org/apache/solr/analysis/KeepWordFilterFactory.java index eaff7c72341..d9b8ee90a88 100644 --- a/solr/src/java/org/apache/solr/analysis/KeepWordFilterFactory.java +++ b/solr/src/java/org/apache/solr/analysis/KeepWordFilterFactory.java @@ -23,22 +23,27 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.KeepWordFilter; import org.apache.lucene.analysis.util.CharArraySet; +import java.util.Map; import java.util.Set; import java.io.IOException; /** * @version $Id$ - * @since solr 1.3 */ public class KeepWordFilterFactory extends BaseTokenFilterFactory implements ResourceLoaderAware { - private CharArraySet words; - private boolean ignoreCase; + @Override + public void init(Map args) { + super.init(args); + assureMatchVersion(); + } public void inform(ResourceLoader loader) { String wordFiles = args.get("words"); ignoreCase = getBoolean("ignoreCase", false); - if (wordFiles != null) { + enablePositionIncrements = getBoolean("enablePositionIncrements",false); + + if (wordFiles != null) { try { words = getWordSet(loader, wordFiles, ignoreCase); } catch (IOException e) { @@ -47,6 +52,10 @@ public class KeepWordFilterFactory extends BaseTokenFilterFactory implements Res } } + private CharArraySet words; + private boolean ignoreCase; + private boolean enablePositionIncrements; + /** * Set the keep word list. * NOTE: if ignoreCase==true, the words are expected to be lowercase @@ -62,15 +71,19 @@ public class KeepWordFilterFactory extends BaseTokenFilterFactory implements Res this.ignoreCase = ignoreCase; } - public KeepWordFilter create(TokenStream input) { - return new KeepWordFilter(input, words); + public boolean isEnablePositionIncrements() { + return enablePositionIncrements; + } + + public boolean isIgnoreCase() { + return ignoreCase; } public CharArraySet getWords() { return words; } - public boolean isIgnoreCase() { - return ignoreCase; + public KeepWordFilter create(TokenStream input) { + return new KeepWordFilter(enablePositionIncrements, input, words); } } diff --git a/solr/src/java/org/apache/solr/analysis/LengthFilterFactory.java b/solr/src/java/org/apache/solr/analysis/LengthFilterFactory.java index f8105c77709..74d67422269 100644 --- a/solr/src/java/org/apache/solr/analysis/LengthFilterFactory.java +++ b/solr/src/java/org/apache/solr/analysis/LengthFilterFactory.java @@ -27,6 +27,7 @@ import java.util.Map; */ public class LengthFilterFactory extends BaseTokenFilterFactory { int min,max; + boolean enablePositionIncrements; public static final String MIN_KEY = "min"; public static final String MAX_KEY = "max"; @@ -35,8 +36,10 @@ public class LengthFilterFactory extends BaseTokenFilterFactory { super.init(args); min=Integer.parseInt(args.get(MIN_KEY)); max=Integer.parseInt(args.get(MAX_KEY)); + enablePositionIncrements = getBoolean("enablePositionIncrements",false); } + public LengthFilter create(TokenStream input) { - return new LengthFilter(input,min,max); + return new LengthFilter(enablePositionIncrements, input,min,max); } } diff --git a/solr/src/test/org/apache/solr/analysis/LengthFilterTest.java b/solr/src/test/org/apache/solr/analysis/LengthFilterTest.java index 66ba3a89281..95f5dc1cf25 100644 --- a/solr/src/test/org/apache/solr/analysis/LengthFilterTest.java +++ b/solr/src/test/org/apache/solr/analysis/LengthFilterTest.java @@ -31,9 +31,19 @@ public class LengthFilterTest extends BaseTokenTestCase { Map args = new HashMap(); args.put(LengthFilterFactory.MIN_KEY, String.valueOf(4)); args.put(LengthFilterFactory.MAX_KEY, String.valueOf(10)); + // default: args.put("enablePositionIncrements", "false"); factory.init(args); String test = "foo foobar super-duper-trooper"; TokenStream stream = factory.create(new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader(test))); - assertTokenStreamContents(stream, new String[] { "foobar" }); + assertTokenStreamContents(stream, new String[] { "foobar" }, new int[] { 1 }); + + factory = new LengthFilterFactory(); + args = new HashMap(); + args.put(LengthFilterFactory.MIN_KEY, String.valueOf(4)); + args.put(LengthFilterFactory.MAX_KEY, String.valueOf(10)); + args.put("enablePositionIncrements", "true"); + factory.init(args); + stream = factory.create(new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader(test))); + assertTokenStreamContents(stream, new String[] { "foobar" }, new int[] { 2 }); } } \ No newline at end of file From 146c27063f021d224783d6b0ad0fad25c28f4bae Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Sun, 30 Jan 2011 22:26:22 +0000 Subject: [PATCH 062/185] SOLR-236: fix bug where numFound was always zero if group.offset and group.limit were both zero. git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065406 13f79535-47bb-0310-9956-ffa450edef68 --- solr/src/java/org/apache/solr/search/Grouping.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/solr/src/java/org/apache/solr/search/Grouping.java b/solr/src/java/org/apache/solr/search/Grouping.java index 992109130a5..3075ac4700c 100755 --- a/solr/src/java/org/apache/solr/search/Grouping.java +++ b/solr/src/java/org/apache/solr/search/Grouping.java @@ -77,10 +77,11 @@ public class Grouping { int docsToCollect = getMax(off, len, max); // TODO: implement a DocList impl that doesn't need to start at offset=0 - TopDocs topDocs = collector.topDocs(0, docsToCollect); + TopDocs topDocs = collector.topDocs(0, Math.max(docsToCollect,1)); // 0 isn't supported as a valid value + int docsCollected = Math.min(docsToCollect, topDocs.scoreDocs.length); - int ids[] = new int[topDocs.scoreDocs.length]; - float[] scores = needScores ? new float[topDocs.scoreDocs.length] : null; + int ids[] = new int[docsCollected]; + float[] scores = needScores ? new float[docsCollected] : null; for (int i=0; i Date: Sun, 30 Jan 2011 22:34:36 +0000 Subject: [PATCH 063/185] typo police git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065410 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/lucene/analysis/NumericTokenStream.java | 2 +- lucene/src/java/org/apache/lucene/analysis/package.html | 2 +- .../java/org/apache/lucene/document/AbstractField.java | 2 +- .../org/apache/lucene/index/ConcurrentMergeScheduler.java | 2 +- .../src/java/org/apache/lucene/index/IndexFileNames.java | 2 +- .../org/apache/lucene/index/IndexNotFoundException.java | 2 +- lucene/src/java/org/apache/lucene/index/IndexWriter.java | 4 ++-- .../java/org/apache/lucene/index/NoMergeScheduler.java | 2 +- .../org/apache/lucene/index/PayloadProcessorProvider.java | 2 +- .../org/apache/lucene/index/PerFieldCodecWrapper.java | 2 +- .../lucene/index/PersistentSnapshotDeletionPolicy.java | 2 +- .../lucene/index/codecs/FixedGapTermsIndexReader.java | 2 +- .../apache/lucene/index/codecs/preflex/PreFlexFields.java | 2 +- .../lucene/index/codecs/preflex/SegmentTermEnum.java | 2 +- .../src/java/org/apache/lucene/search/BoostAttribute.java | 2 +- .../src/java/org/apache/lucene/search/IndexSearcher.java | 2 +- .../src/java/org/apache/lucene/search/MultiCollector.java | 2 +- lucene/src/java/org/apache/lucene/search/SortField.java | 4 ++-- .../java/org/apache/lucene/search/TopTermsRewrite.java | 2 +- .../apache/lucene/search/function/CustomScoreQuery.java | 2 +- .../src/java/org/apache/lucene/store/FSLockFactory.java | 2 +- .../java/org/apache/lucene/store/NativeFSLockFactory.java | 2 +- .../java/org/apache/lucene/store/SimpleFSLockFactory.java | 2 +- lucene/src/java/org/apache/lucene/util/ArrayUtil.java | 8 ++++---- lucene/src/java/org/apache/lucene/util/BytesRefHash.java | 2 +- .../src/java/org/apache/lucene/util/CollectionUtil.java | 4 ++-- .../java/org/apache/lucene/util/DoubleBarrelLRUCache.java | 2 +- lucene/src/java/org/apache/lucene/util/IOUtils.java | 2 +- lucene/src/java/org/apache/lucene/util/SetOnce.java | 2 +- .../java/org/apache/lucene/util/automaton/fst/FST.java | 2 +- .../java/org/apache/lucene/util/packed/PackedInts.java | 2 +- 31 files changed, 37 insertions(+), 37 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java b/lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java index 502c3a214ac..b98a24646ca 100644 --- a/lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java +++ b/lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java @@ -135,7 +135,7 @@ public final class NumericTokenStream extends TokenStream { } } - /** Implementatation of {@link NumericTermAttribute}. + /** Implementation of {@link NumericTermAttribute}. * @lucene.internal * @since 4.0 */ diff --git a/lucene/src/java/org/apache/lucene/analysis/package.html b/lucene/src/java/org/apache/lucene/analysis/package.html index d98f84f5d66..28569e483ba 100644 --- a/lucene/src/java/org/apache/lucene/analysis/package.html +++ b/lucene/src/java/org/apache/lucene/analysis/package.html @@ -305,7 +305,7 @@ with the TokenStream.

  • Attribute instances are reused for all tokens of a document. Thus, a TokenStream/-Filter needs to update the appropriate Attribute(s) in incrementToken(). The consumer, commonly the Lucene indexer, consumes the data in the -Attributes and then calls incrementToken() again until it retuns false, which indicates that the end of the stream +Attributes and then calls incrementToken() again until it returns false, which indicates that the end of the stream was reached. This means that in each call of incrementToken() a TokenStream/-Filter can safely overwrite the data in the Attribute instances.
  • diff --git a/lucene/src/java/org/apache/lucene/document/AbstractField.java b/lucene/src/java/org/apache/lucene/document/AbstractField.java index 35287d882d4..a3f880c7357 100755 --- a/lucene/src/java/org/apache/lucene/document/AbstractField.java +++ b/lucene/src/java/org/apache/lucene/document/AbstractField.java @@ -77,7 +77,7 @@ public abstract class AbstractField implements Fieldable { * used to compute the norm factor for the field. By * default, in the {@link * org.apache.lucene.search.Similarity#computeNorm(String, - * FieldInvertState)} method, the boost value is multipled + * FieldInvertState)} method, the boost value is multiplied * by the {@link * org.apache.lucene.search.Similarity#lengthNorm(String, * int)} and then diff --git a/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java b/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java index 1927235cdce..b9cafc7c5c2 100644 --- a/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java +++ b/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java @@ -145,7 +145,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler { /** * Called whenever the running merges have changed, to pause & unpause * threads. This method sorts the merge threads by their merge size in - * descending order and then pauses/unpauses threads from first to lsat -- + * descending order and then pauses/unpauses threads from first to last -- * that way, smaller merges are guaranteed to run before larger ones. */ protected synchronized void updateMergeThreads() { diff --git a/lucene/src/java/org/apache/lucene/index/IndexFileNames.java b/lucene/src/java/org/apache/lucene/index/IndexFileNames.java index 26be89baf0f..d8a5a878203 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexFileNames.java +++ b/lucene/src/java/org/apache/lucene/index/IndexFileNames.java @@ -204,7 +204,7 @@ public final class IndexFileNames { /** * Returns true if the given filename ends with the given extension. One - * should provide a pure extension, withouth '.'. + * should provide a pure extension, without '.'. */ public static boolean matchesExtension(String filename, String ext) { // It doesn't make a difference whether we allocate a StringBuilder ourself diff --git a/lucene/src/java/org/apache/lucene/index/IndexNotFoundException.java b/lucene/src/java/org/apache/lucene/index/IndexNotFoundException.java index 5e7107448b8..dc0a6fa0d1e 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexNotFoundException.java +++ b/lucene/src/java/org/apache/lucene/index/IndexNotFoundException.java @@ -21,7 +21,7 @@ import java.io.FileNotFoundException; /** * Signals that no index was found in the Directory. Possibly because the - * directory is empty, however can slso indicate an index corruption. + * directory is empty, however can also indicate an index corruption. */ public final class IndexNotFoundException extends FileNotFoundException { diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java index 666ca8231d1..3eaea73a36f 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java @@ -686,7 +686,7 @@ public class IndexWriter implements Closeable { * according conf.getOpenMode(). * @param conf * the configuration settings according to which IndexWriter should - * be initalized. + * be initialized. * @throws CorruptIndexException * if the index is corrupt * @throws LockObtainFailedException @@ -2463,7 +2463,7 @@ public class IndexWriter implements Closeable { } /** - * Flush all in-memory buffered udpates (adds and deletes) + * Flush all in-memory buffered updates (adds and deletes) * to the Directory. * @param triggerMerge if true, we may merge segments (if * deletes or docs were flushed) if necessary diff --git a/lucene/src/java/org/apache/lucene/index/NoMergeScheduler.java b/lucene/src/java/org/apache/lucene/index/NoMergeScheduler.java index 158abe842ce..e98723b55b3 100644 --- a/lucene/src/java/org/apache/lucene/index/NoMergeScheduler.java +++ b/lucene/src/java/org/apache/lucene/index/NoMergeScheduler.java @@ -23,7 +23,7 @@ import java.io.IOException; * A {@link MergeScheduler} which never executes any merges. It is also a * singleton and can be accessed through {@link NoMergeScheduler#INSTANCE}. Use * it if you want to prevent an {@link IndexWriter} from ever executing merges, - * irregardles of the {@link MergePolicy} used. Note that you can achieve the + * irregardless of the {@link MergePolicy} used. Note that you can achieve the * same thing by using {@link NoMergePolicy}, however with * {@link NoMergeScheduler} you also ensure that no unnecessary code of any * {@link MergeScheduler} implementation is ever executed. Hence it is diff --git a/lucene/src/java/org/apache/lucene/index/PayloadProcessorProvider.java b/lucene/src/java/org/apache/lucene/index/PayloadProcessorProvider.java index e9fe11adfb8..bf825c1dacd 100644 --- a/lucene/src/java/org/apache/lucene/index/PayloadProcessorProvider.java +++ b/lucene/src/java/org/apache/lucene/index/PayloadProcessorProvider.java @@ -24,7 +24,7 @@ import org.apache.lucene.util.BytesRef; /** * Provides a {@link DirPayloadProcessor} to be used for a {@link Directory}. - * This allows using differnt {@link DirPayloadProcessor}s for different + * This allows using different {@link DirPayloadProcessor}s for different * directories, for e.g. to perform different processing of payloads of * different directories. *

    diff --git a/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java b/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java index 51c92321f54..cea213aca35 100644 --- a/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java +++ b/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java @@ -214,7 +214,7 @@ final class PerFieldCodecWrapper extends Codec { @Override public void files(Directory dir, SegmentInfo info, String codecId, Set files) throws IOException { - // ignore codecid sicne segmentCodec will assign it per codec + // ignore codecid since segmentCodec will assign it per codec segmentCodecs.files(dir, info, files); } diff --git a/lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java b/lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java index fc09266c377..f4869ea926a 100644 --- a/lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java +++ b/lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java @@ -103,7 +103,7 @@ public class PersistentSnapshotDeletionPolicy extends SnapshotDeletionPolicy { * @param mode * specifies whether a new index should be created, deleting all * existing snapshots information (immediately), or open an existing - * index, initializing the class with the snapsthots information. + * index, initializing the class with the snapshots information. * @param matchVersion * specifies the {@link Version} that should be used when opening the * IndexWriter. diff --git a/lucene/src/java/org/apache/lucene/index/codecs/FixedGapTermsIndexReader.java b/lucene/src/java/org/apache/lucene/index/codecs/FixedGapTermsIndexReader.java index aba1e76cb10..c4350694cb0 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/FixedGapTermsIndexReader.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/FixedGapTermsIndexReader.java @@ -44,7 +44,7 @@ public class FixedGapTermsIndexReader extends TermsIndexReaderBase { // number of places to multiply out the actual ord, and we // will overflow int during those multiplies. So to avoid // having to upgrade each multiple to long in multiple - // places (error proned), we use long here: + // places (error prone), we use long here: private long totalIndexInterval; private int indexDivisor; diff --git a/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java b/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java index f7bbef7906c..65b7460f782 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java @@ -538,7 +538,7 @@ public class PreFlexFields extends FieldsProducer { // We can easily detect S in UTF8: if a byte has // prefix 11110 (0xf0), then that byte and the // following 3 bytes encode a single unicode codepoint - // in S. Similary,we can detect E: if a byte has + // in S. Similarly, we can detect E: if a byte has // prefix 1110111 (0xee), then that byte and the // following 2 bytes encode a single unicode codepoint // in E. diff --git a/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermEnum.java b/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermEnum.java index a8703ae83f1..fb7c8ceec46 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermEnum.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermEnum.java @@ -45,7 +45,7 @@ public final class SegmentTermEnum implements Cloneable { // whenever you add a new format, make it 1 smaller (negative version logic)! public static final int FORMAT_CURRENT = FORMAT_VERSION_UTF8_LENGTH_IN_BYTES; - // when removing support for old versions, levae the last supported version here + // when removing support for old versions, leave the last supported version here public static final int FORMAT_MINIMUM = FORMAT_VERSION_UTF8_LENGTH_IN_BYTES; private TermBuffer termBuffer = new TermBuffer(); diff --git a/lucene/src/java/org/apache/lucene/search/BoostAttribute.java b/lucene/src/java/org/apache/lucene/search/BoostAttribute.java index e9187a85445..58f44633b8d 100644 --- a/lucene/src/java/org/apache/lucene/search/BoostAttribute.java +++ b/lucene/src/java/org/apache/lucene/search/BoostAttribute.java @@ -27,7 +27,7 @@ import org.apache.lucene.index.TermsEnum; // javadocs only * {@link TopTermsRewrite} mode. * {@link FuzzyQuery} is using this to take the edit distance into account. *

    Please note: This attribute is intended to be added only by the TermsEnum - * to itsself in its constructor and consumed by the {@link MultiTermQuery.RewriteMethod}. + * to itself in its constructor and consumed by the {@link MultiTermQuery.RewriteMethod}. * @lucene.internal */ public interface BoostAttribute extends Attribute { diff --git a/lucene/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/src/java/org/apache/lucene/search/IndexSearcher.java index cd1dd0a208e..6e885c003ff 100644 --- a/lucene/src/java/org/apache/lucene/search/IndexSearcher.java +++ b/lucene/src/java/org/apache/lucene/search/IndexSearcher.java @@ -787,7 +787,7 @@ public class IndexSearcher { } public Iterator iterator() { - // use the shortcut here - this is only used in a privat context + // use the shortcut here - this is only used in a private context return this; } } diff --git a/lucene/src/java/org/apache/lucene/search/MultiCollector.java b/lucene/src/java/org/apache/lucene/search/MultiCollector.java index 08e08403d33..682413d7a18 100644 --- a/lucene/src/java/org/apache/lucene/search/MultiCollector.java +++ b/lucene/src/java/org/apache/lucene/search/MultiCollector.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.Scorer; /** * A {@link Collector} which allows running a search with several * {@link Collector}s. It offers a static {@link #wrap} method which accepts a - * list of collectots and wraps them with {@link MultiCollector}, while + * list of collectors and wraps them with {@link MultiCollector}, while * filtering out the null null ones. */ public class MultiCollector extends Collector { diff --git a/lucene/src/java/org/apache/lucene/search/SortField.java b/lucene/src/java/org/apache/lucene/search/SortField.java index e058002f374..663261ebd59 100644 --- a/lucene/src/java/org/apache/lucene/search/SortField.java +++ b/lucene/src/java/org/apache/lucene/search/SortField.java @@ -36,7 +36,7 @@ import org.apache.lucene.util.StringHelper; public class SortField implements Serializable { - /** Sort by document score (relevancy). Sort values are Float and higher + /** Sort by document score (relevance). Sort values are Float and higher * values are at the front. */ public static final int SCORE = 0; @@ -84,7 +84,7 @@ implements Serializable { * uses ordinals to do the sorting. */ public static final int STRING_VAL = 11; - /** Represents sorting by document score (relevancy). */ + /** Represents sorting by document score (relevance). */ public static final SortField FIELD_SCORE = new SortField (null, SCORE); /** Represents sorting by document number (index order). */ diff --git a/lucene/src/java/org/apache/lucene/search/TopTermsRewrite.java b/lucene/src/java/org/apache/lucene/search/TopTermsRewrite.java index b75ec16b57a..472e99de705 100644 --- a/lucene/src/java/org/apache/lucene/search/TopTermsRewrite.java +++ b/lucene/src/java/org/apache/lucene/search/TopTermsRewrite.java @@ -87,7 +87,7 @@ public abstract class TopTermsRewrite extends TermCollectingRew @Override public boolean collect(BytesRef bytes) throws IOException { final float boost = boostAtt.getBoost(); - // ignore uncompetetive hits + // ignore uncompetitive hits if (stQueue.size() == maxSize) { final ScoreTerm t = stQueue.peek(); if (boost < t.boost) diff --git a/lucene/src/java/org/apache/lucene/search/function/CustomScoreQuery.java b/lucene/src/java/org/apache/lucene/search/function/CustomScoreQuery.java index 8f29cbe1955..8a5ba9abf41 100755 --- a/lucene/src/java/org/apache/lucene/search/function/CustomScoreQuery.java +++ b/lucene/src/java/org/apache/lucene/search/function/CustomScoreQuery.java @@ -52,7 +52,7 @@ public class CustomScoreQuery extends Query { /** * Create a CustomScoreQuery over input subQuery. - * @param subQuery the sub query whose scored is being customed. Must not be null. + * @param subQuery the sub query whose scored is being customized. Must not be null. */ public CustomScoreQuery(Query subQuery) { this(subQuery, new ValueSourceQuery[0]); diff --git a/lucene/src/java/org/apache/lucene/store/FSLockFactory.java b/lucene/src/java/org/apache/lucene/store/FSLockFactory.java index bd705892adf..1bca363b088 100644 --- a/lucene/src/java/org/apache/lucene/store/FSLockFactory.java +++ b/lucene/src/java/org/apache/lucene/store/FSLockFactory.java @@ -33,7 +33,7 @@ public abstract class FSLockFactory extends LockFactory { /** * Set the lock directory. This method can be only called * once to initialize the lock directory. It is used by {@link FSDirectory} - * to set the lock directory to itsself. + * to set the lock directory to itself. * Subclasses can also use this method to set the directory * in the constructor. */ diff --git a/lucene/src/java/org/apache/lucene/store/NativeFSLockFactory.java b/lucene/src/java/org/apache/lucene/store/NativeFSLockFactory.java index 53c30a65651..f4f63e6d2fc 100755 --- a/lucene/src/java/org/apache/lucene/store/NativeFSLockFactory.java +++ b/lucene/src/java/org/apache/lucene/store/NativeFSLockFactory.java @@ -60,7 +60,7 @@ public class NativeFSLockFactory extends FSLockFactory { * Create a NativeFSLockFactory instance, with null (unset) * lock directory. When you pass this factory to a {@link FSDirectory} * subclass, the lock directory is automatically set to the - * directory itsself. Be sure to create one instance for each directory + * directory itself. Be sure to create one instance for each directory * your create! */ public NativeFSLockFactory() throws IOException { diff --git a/lucene/src/java/org/apache/lucene/store/SimpleFSLockFactory.java b/lucene/src/java/org/apache/lucene/store/SimpleFSLockFactory.java index dc8d73fe390..1f532aa509f 100755 --- a/lucene/src/java/org/apache/lucene/store/SimpleFSLockFactory.java +++ b/lucene/src/java/org/apache/lucene/store/SimpleFSLockFactory.java @@ -57,7 +57,7 @@ public class SimpleFSLockFactory extends FSLockFactory { * Create a SimpleFSLockFactory instance, with null (unset) * lock directory. When you pass this factory to a {@link FSDirectory} * subclass, the lock directory is automatically set to the - * directory itsself. Be sure to create one instance for each directory + * directory itself. Be sure to create one instance for each directory * your create! */ public SimpleFSLockFactory() throws IOException { diff --git a/lucene/src/java/org/apache/lucene/util/ArrayUtil.java b/lucene/src/java/org/apache/lucene/util/ArrayUtil.java index 50e04ac93d1..c9c0f149b21 100644 --- a/lucene/src/java/org/apache/lucene/util/ArrayUtil.java +++ b/lucene/src/java/org/apache/lucene/util/ArrayUtil.java @@ -634,7 +634,7 @@ public final class ArrayUtil { /** * Sorts the given array slice using the {@link Comparator}. This method uses the insertion sort - * algorithm. It is only recommened to use this algorithm for partially sorted small arrays! + * algorithm. It is only recommended to use this algorithm for partially sorted small arrays! * @param fromIndex start index (inclusive) * @param toIndex end index (exclusive) */ @@ -644,7 +644,7 @@ public final class ArrayUtil { /** * Sorts the given array using the {@link Comparator}. This method uses the insertion sort - * algorithm. It is only recommened to use this algorithm for partially sorted small arrays! + * algorithm. It is only recommended to use this algorithm for partially sorted small arrays! */ public static void insertionSort(T[] a, Comparator comp) { insertionSort(a, 0, a.length, comp); @@ -652,7 +652,7 @@ public final class ArrayUtil { /** * Sorts the given array slice in natural order. This method uses the insertion sort - * algorithm. It is only recommened to use this algorithm for partially sorted small arrays! + * algorithm. It is only recommended to use this algorithm for partially sorted small arrays! * @param fromIndex start index (inclusive) * @param toIndex end index (exclusive) */ @@ -662,7 +662,7 @@ public final class ArrayUtil { /** * Sorts the given array in natural order. This method uses the insertion sort - * algorithm. It is only recommened to use this algorithm for partially sorted small arrays! + * algorithm. It is only recommended to use this algorithm for partially sorted small arrays! */ public static > void insertionSort(T[] a) { insertionSort(a, 0, a.length); diff --git a/lucene/src/java/org/apache/lucene/util/BytesRefHash.java b/lucene/src/java/org/apache/lucene/util/BytesRefHash.java index 70b46228311..2a0367ae231 100644 --- a/lucene/src/java/org/apache/lucene/util/BytesRefHash.java +++ b/lucene/src/java/org/apache/lucene/util/BytesRefHash.java @@ -38,7 +38,7 @@ import org.apache.lucene.util.ByteBlockPool.DirectAllocator; *

    * Note: The maximum capacity {@link BytesRef} instance passed to * {@link #add(BytesRef)} must not be longer than {@link ByteBlockPool#BYTE_BLOCK_SIZE}-2. - * The internal storage is limited to 2GB totalbyte storage. + * The internal storage is limited to 2GB total byte storage. *

    * * @lucene.internal diff --git a/lucene/src/java/org/apache/lucene/util/CollectionUtil.java b/lucene/src/java/org/apache/lucene/util/CollectionUtil.java index 7e60fd19b43..ff2a76a6b87 100644 --- a/lucene/src/java/org/apache/lucene/util/CollectionUtil.java +++ b/lucene/src/java/org/apache/lucene/util/CollectionUtil.java @@ -140,7 +140,7 @@ public final class CollectionUtil { /** * Sorts the given random access {@link List} using the {@link Comparator}. * The list must implement {@link RandomAccess}. This method uses the insertion sort - * algorithm. It is only recommened to use this algorithm for partially sorted small lists! + * algorithm. It is only recommended to use this algorithm for partially sorted small lists! * @throws IllegalArgumentException if list is e.g. a linked list without random access. */ public static void insertionSort(List list, Comparator comp) { @@ -150,7 +150,7 @@ public final class CollectionUtil { /** * Sorts the given random access {@link List} in natural order. * The list must implement {@link RandomAccess}. This method uses the insertion sort - * algorithm. It is only recommened to use this algorithm for partially sorted small lists! + * algorithm. It is only recommended to use this algorithm for partially sorted small lists! * @throws IllegalArgumentException if list is e.g. a linked list without random access. */ public static > void insertionSort(List list) { diff --git a/lucene/src/java/org/apache/lucene/util/DoubleBarrelLRUCache.java b/lucene/src/java/org/apache/lucene/util/DoubleBarrelLRUCache.java index cdb958ef252..a476bd2b0f8 100644 --- a/lucene/src/java/org/apache/lucene/util/DoubleBarrelLRUCache.java +++ b/lucene/src/java/org/apache/lucene/util/DoubleBarrelLRUCache.java @@ -74,7 +74,7 @@ final public class DoubleBarrelLRUCache { } /** - * Creates a new instnace with the internal object set to the given object. + * Creates a new instance with the internal object set to the given object. * Note that any calls to {@link #set(Object)} afterwards will result in * {@link AlreadySetException} * diff --git a/lucene/src/java/org/apache/lucene/util/automaton/fst/FST.java b/lucene/src/java/org/apache/lucene/util/automaton/fst/FST.java index e94f9e544fa..1d25d0a8f8a 100644 --- a/lucene/src/java/org/apache/lucene/util/automaton/fst/FST.java +++ b/lucene/src/java/org/apache/lucene/util/automaton/fst/FST.java @@ -490,7 +490,7 @@ public class FST { } } - // Not private beacaus NodeHash needs access: + // Not private because NodeHash needs access: Arc readFirstRealArc(int address, Arc arc) throws IOException { final BytesReader in = getBytesReader(address); diff --git a/lucene/src/java/org/apache/lucene/util/packed/PackedInts.java b/lucene/src/java/org/apache/lucene/util/packed/PackedInts.java index 71d525d2b0b..c7f670c6eaa 100644 --- a/lucene/src/java/org/apache/lucene/util/packed/PackedInts.java +++ b/lucene/src/java/org/apache/lucene/util/packed/PackedInts.java @@ -251,7 +251,7 @@ public class PackedInts { /** Returns how many bits are required to hold values up * to and including maxValue - * @param maxValue the maximum value tha should be representable. + * @param maxValue the maximum value that should be representable. * @return the amount of bits needed to represent values from 0 to maxValue. * @lucene.internal */ From 48ca1148e21b87a7f26757f1352b6d3e52ce6138 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Sun, 30 Jan 2011 22:55:19 +0000 Subject: [PATCH 064/185] fix javadocs warnings git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065416 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/src/java/org/apache/lucene/index/IndexReader.java | 1 + .../src/java/org/apache/lucene/index/codecs/BlockTermState.java | 1 + 2 files changed, 2 insertions(+) diff --git a/lucene/src/java/org/apache/lucene/index/IndexReader.java b/lucene/src/java/org/apache/lucene/index/IndexReader.java index 4aa108ec827..8a0b5d9a56c 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexReader.java +++ b/lucene/src/java/org/apache/lucene/index/IndexReader.java @@ -19,6 +19,7 @@ package org.apache.lucene.index; import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldSelector; +import org.apache.lucene.search.FieldCache; // javadocs import org.apache.lucene.search.Similarity; import org.apache.lucene.index.codecs.Codec; import org.apache.lucene.index.codecs.CodecProvider; diff --git a/lucene/src/java/org/apache/lucene/index/codecs/BlockTermState.java b/lucene/src/java/org/apache/lucene/index/codecs/BlockTermState.java index 4ab22aef72f..40bf8e95e11 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/BlockTermState.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/BlockTermState.java @@ -16,6 +16,7 @@ package org.apache.lucene.index.codecs; * limitations under the License. */ +import org.apache.lucene.index.DocsEnum; // javadocs import org.apache.lucene.index.OrdTermState; import org.apache.lucene.index.TermState; From 1377b916e69f602bc10e04b735ed63eb462567cc Mon Sep 17 00:00:00 2001 From: Koji Sekiguchi Date: Mon, 31 Jan 2011 01:57:57 +0000 Subject: [PATCH 065/185] print out version infos in JavaBinCodec when throwing exception git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065465 13f79535-47bb-0310-9956-ffa450edef68 --- solr/src/common/org/apache/solr/common/util/JavaBinCodec.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/solr/src/common/org/apache/solr/common/util/JavaBinCodec.java b/solr/src/common/org/apache/solr/common/util/JavaBinCodec.java index 1a990098db8..2cb2104aecb 100755 --- a/solr/src/common/org/apache/solr/common/util/JavaBinCodec.java +++ b/solr/src/common/org/apache/solr/common/util/JavaBinCodec.java @@ -96,7 +96,8 @@ public class JavaBinCodec { FastInputStream dis = FastInputStream.wrap(is); version = dis.readByte(); if (version != VERSION) { - throw new RuntimeException("Invalid version or the data in not in 'javabin' format"); + throw new RuntimeException("Invalid version (expected " + VERSION + + ", but " + version + ") or the data in not in 'javabin' format"); } return readVal(dis); } From 107c06324bebc48b5df961b68f1680d3bc7789b9 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Mon, 31 Jan 2011 02:59:40 +0000 Subject: [PATCH 066/185] fix more javadocs warnings git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065474 13f79535-47bb-0310-9956-ffa450edef68 --- .../instantiated/InstantiatedIndexWriter.java | 4 ++-- lucene/contrib/queryparser/src/java/overview.html | 5 ----- .../lucene/analysis/br/BrazilianAnalyzer.java | 2 +- .../apache/lucene/analysis/cjk/CJKAnalyzer.java | 2 +- .../apache/lucene/analysis/cz/CzechAnalyzer.java | 2 +- .../apache/lucene/analysis/nl/DutchAnalyzer.java | 2 +- .../benchmark/quality/utils/SubmissionReport.java | 2 +- .../apache/solr/common/cloud/SolrZkClient.java | 15 +++++++-------- .../apache/solr/common/params/FacetParams.java | 1 - .../org/apache/solr/common/util/StrUtils.java | 2 +- .../java/org/apache/solr/cloud/ZkController.java | 10 +++++----- .../apache/solr/handler/RequestHandlerUtils.java | 2 +- .../apache/solr/response/BaseResponseWriter.java | 4 ++-- .../java/org/apache/solr/schema/IndexSchema.java | 2 +- .../org/apache/solr/schema/SortableLongField.java | 2 +- .../org/apache/solr/search/SolrIndexSearcher.java | 2 +- .../apache/solr/search/function/ValueSource.java | 2 +- .../solr/client/solrj/impl/LBHttpSolrServer.java | 2 +- .../client/solrj/response/SpellCheckResponse.java | 4 ++-- 19 files changed, 30 insertions(+), 37 deletions(-) diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java index d7b6de961da..6114e9fff9e 100644 --- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java +++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java @@ -114,14 +114,14 @@ public class InstantiatedIndexWriter implements Closeable { * MAddDocs_20000 - 7 4000 100 false - - 1 - - 20000 - - 535,8 - - 37,33 - 309 680 640 - 501 968 896 * * - * @see org.apache.lucene.index.IndexWriter#setMergeFactor(int) + * @see org.apache.lucene.index.LogMergePolicy#setMergeFactor(int) */ public void setMergeFactor(int mergeFactor) { this.mergeFactor = mergeFactor; } /** - * @see org.apache.lucene.index.IndexWriter#getMergeFactor() + * @see org.apache.lucene.index.LogMergePolicy#getMergeFactor() */ public int getMergeFactor() { return mergeFactor; diff --git a/lucene/contrib/queryparser/src/java/overview.html b/lucene/contrib/queryparser/src/java/overview.html index 4b89bf053c7..d3df6f0722c 100644 --- a/lucene/contrib/queryparser/src/java/overview.html +++ b/lucene/contrib/queryparser/src/java/overview.html @@ -138,11 +138,6 @@ you don't need to worry about dealing with those. config.setAnalyzer(new WhitespaceAnalyzer()); Query query = qpHelper.parse("apache AND lucene", "defaultField"); -

    -To make it easy for people who are using current Lucene's query parser to switch to -the new one, there is a {@link org.apache.lucene.queryParser.standard.QueryParserWrapper} under org.apache.lucene.queryParser.standard -that keeps the old query parser interface, but uses the new query parser infrastructure. -

    diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java index 1b144b45918..5e35643c77d 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java @@ -82,7 +82,7 @@ public final class BrazilianAnalyzer extends StopwordAnalyzerBase { private Set excltable = Collections.emptySet(); /** - * Builds an analyzer with the default stop words ({@link #BRAZILIAN_STOP_WORDS}). + * Builds an analyzer with the default stop words ({@link #getDefaultStopSet()}). */ public BrazilianAnalyzer(Version matchVersion) { this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET); diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java index d5796812d62..7eafcd2d903 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java @@ -65,7 +65,7 @@ public final class CJKAnalyzer extends StopwordAnalyzerBase { } /** - * Builds an analyzer which removes words in {@link #STOP_WORDS}. + * Builds an analyzer which removes words in {@link #getDefaultStopSet()}. */ public CJKAnalyzer(Version matchVersion) { this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET); diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java index 89e58f4cf87..d23f759adc6 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java @@ -86,7 +86,7 @@ public final class CzechAnalyzer extends StopwordAnalyzerBase { private final Set stemExclusionTable; /** - * Builds an analyzer with the default stop words ({@link #CZECH_STOP_WORDS}). + * Builds an analyzer with the default stop words ({@link #getDefaultStopSet()}). * * @param matchVersion Lucene version to match See * {@link above} diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java index f7a295a93ff..9aca85ccbcc 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java @@ -109,7 +109,7 @@ public final class DutchAnalyzer extends ReusableAnalyzerBase { private final Version matchVersion; /** - * Builds an analyzer with the default stop words ({@link #DUTCH_STOP_WORDS}) + * Builds an analyzer with the default stop words ({@link #getDefaultStopSet()}) * and a few default entries for the stem exclusion table. * */ diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SubmissionReport.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SubmissionReport.java index 79e3f0b1644..2a02aa7cfa6 100644 --- a/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SubmissionReport.java +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SubmissionReport.java @@ -28,7 +28,7 @@ import org.apache.lucene.search.TopDocs; /** * Create a log ready for submission. * Extend this class and override - * {@link #report(QualityQuery, TopDocs, String, Searcher)} + * {@link #report(QualityQuery, TopDocs, String, IndexSearcher)} * to create different reports. */ public class SubmissionReport { diff --git a/solr/src/common/org/apache/solr/common/cloud/SolrZkClient.java b/solr/src/common/org/apache/solr/common/cloud/SolrZkClient.java index 70ff66b7585..98a63066d62 100644 --- a/solr/src/common/org/apache/solr/common/cloud/SolrZkClient.java +++ b/solr/src/common/org/apache/solr/common/cloud/SolrZkClient.java @@ -74,7 +74,6 @@ public class SolrZkClient { * @param zkClientTimeout * @param strat * @param onReconnect - * @param clientConnectTimeout * @throws InterruptedException * @throws TimeoutException * @throws IOException @@ -164,7 +163,7 @@ public class SolrZkClient { /** * @param path - * @return + * @return true if path exists * @throws KeeperException * @throws InterruptedException */ @@ -178,7 +177,7 @@ public class SolrZkClient { * @param data * @param acl * @param createMode - * @return + * @return path of created node * @throws KeeperException * @throws InterruptedException */ @@ -190,7 +189,7 @@ public class SolrZkClient { /** * @param path * @param watcher - * @return + * @return children of the node at the path * @throws KeeperException * @throws InterruptedException */ @@ -203,7 +202,7 @@ public class SolrZkClient { * @param path * @param watcher * @param stat - * @return + * @return node's data * @throws KeeperException * @throws InterruptedException */ @@ -216,7 +215,7 @@ public class SolrZkClient { * @param path * @param data * @param version - * @return + * @return node's state * @throws KeeperException * @throws InterruptedException */ @@ -229,8 +228,8 @@ public class SolrZkClient { * * @param path * @param data - * @param watcher - * @return + * @param createMode + * @return path of created node * @throws KeeperException * @throws InterruptedException */ diff --git a/solr/src/common/org/apache/solr/common/params/FacetParams.java b/solr/src/common/org/apache/solr/common/params/FacetParams.java index e742638fdcb..02aa6310ba8 100644 --- a/solr/src/common/org/apache/solr/common/params/FacetParams.java +++ b/solr/src/common/org/apache/solr/common/params/FacetParams.java @@ -228,7 +228,6 @@ public interface FacetParams { * String indicating what "other" ranges should be computed for a * numerical range facet (multi-value). * Can be overriden on a per field basis. - * @see FacetNumberOther */ public static final String FACET_RANGE_OTHER = FACET_RANGE + ".other"; /** diff --git a/solr/src/common/org/apache/solr/common/util/StrUtils.java b/solr/src/common/org/apache/solr/common/util/StrUtils.java index 7848358f1b6..fc2187aa44a 100644 --- a/solr/src/common/org/apache/solr/common/util/StrUtils.java +++ b/solr/src/common/org/apache/solr/common/util/StrUtils.java @@ -242,7 +242,7 @@ public class StrUtils { * {@link NullPointerException} and {@link SolrException} free version of {@link #parseBool(String)} * @param s * @param def - * @return + * @return parsed boolean value (or def, if s is null or invalid) */ public static boolean parseBool(String s, boolean def) { if( s != null ) { diff --git a/solr/src/java/org/apache/solr/cloud/ZkController.java b/solr/src/java/org/apache/solr/cloud/ZkController.java index 602794e2d27..45fa0104025 100644 --- a/solr/src/java/org/apache/solr/cloud/ZkController.java +++ b/solr/src/java/org/apache/solr/cloud/ZkController.java @@ -186,7 +186,7 @@ public final class ZkController { /** * @param collection * @param fileName - * @return + * @return true if config file exists * @throws KeeperException * @throws InterruptedException */ @@ -206,7 +206,7 @@ public final class ZkController { /** * @param zkConfigName * @param fileName - * @return + * @return config file data (in bytes) * @throws KeeperException * @throws InterruptedException */ @@ -250,7 +250,7 @@ public final class ZkController { } /** - * @return + * @return zookeeper server address */ public String getZkServerAddress() { return zkServerAddress; @@ -392,7 +392,7 @@ public final class ZkController { /** * @param path - * @return + * @return true if the path exists * @throws KeeperException * @throws InterruptedException */ @@ -403,7 +403,7 @@ public final class ZkController { /** * @param collection - * @return + * @return config value * @throws KeeperException * @throws InterruptedException * @throws IOException diff --git a/solr/src/java/org/apache/solr/handler/RequestHandlerUtils.java b/solr/src/java/org/apache/solr/handler/RequestHandlerUtils.java index 34eb84ed26e..d7d85767a61 100755 --- a/solr/src/java/org/apache/solr/handler/RequestHandlerUtils.java +++ b/solr/src/java/org/apache/solr/handler/RequestHandlerUtils.java @@ -49,7 +49,7 @@ public class RequestHandlerUtils * Check the request parameters and decide if it should commit or optimize. * If it does, it will check parameters for "waitFlush" and "waitSearcher" * - * @deprecated Use {@link #handleCommit(UpdateRequestProcessor,SolrParams,boolean)} + * @deprecated Use {@link #handleCommit(SolrQueryRequest,UpdateRequestProcessor,SolrParams,boolean)} * * @since solr 1.2 */ diff --git a/solr/src/java/org/apache/solr/response/BaseResponseWriter.java b/solr/src/java/org/apache/solr/response/BaseResponseWriter.java index b63604813ad..d86de764fee 100644 --- a/solr/src/java/org/apache/solr/response/BaseResponseWriter.java +++ b/solr/src/java/org/apache/solr/response/BaseResponseWriter.java @@ -302,12 +302,12 @@ public abstract class BaseResponseWriter { * {@link SolrInputDocument}s to be spit out as a {@link SolrDocumentList} * so they can be processed as a whole, rather than on a doc-by-doc basis. * If set to false, this method calls - * {@link #writeAllDocs(DocListInfo, List)}, else if set to true, then this + * {@link #writeAllDocs(BaseResponseWriter.DocListInfo, List)}, else if set to true, then this * method forces calling {@link #writeDoc(SolrDocument)} on a doc-by-doc * basis. one * * @return True to force {@link #writeDoc(SolrDocument)} to be called, False - * to force {@link #writeAllDocs(DocListInfo, List)} to be called. + * to force {@link #writeAllDocs(BaseResponseWriter.DocListInfo, List)} to be called. */ public boolean isStreamingDocs() { return true; } diff --git a/solr/src/java/org/apache/solr/schema/IndexSchema.java b/solr/src/java/org/apache/solr/schema/IndexSchema.java index b5727233e48..079ddcd42d9 100644 --- a/solr/src/java/org/apache/solr/schema/IndexSchema.java +++ b/solr/src/java/org/apache/solr/schema/IndexSchema.java @@ -99,7 +99,7 @@ public final class IndexSchema { * If the is stream is null, the resource loader will load the schema resource by name. * @see SolrResourceLoader#openSchema * By default, this follows the normal config path directory searching rules. - * @see Config#openResource + * @see SolrResourceLoader#openResource */ public IndexSchema(SolrConfig solrConfig, String name, InputStream is) { this.solrConfig = solrConfig; diff --git a/solr/src/java/org/apache/solr/schema/SortableLongField.java b/solr/src/java/org/apache/solr/schema/SortableLongField.java index f68d5e85fd3..a3501d15dc6 100644 --- a/solr/src/java/org/apache/solr/schema/SortableLongField.java +++ b/solr/src/java/org/apache/solr/schema/SortableLongField.java @@ -38,7 +38,7 @@ import java.io.IOException; /** * @version $Id$ * - * @deprecated use {@link LongField} or {@link TrieLongtField} - will be removed in 5.x + * @deprecated use {@link LongField} or {@link TrieLongField} - will be removed in 5.x */ @Deprecated public class SortableLongField extends FieldType { diff --git a/solr/src/java/org/apache/solr/search/SolrIndexSearcher.java b/solr/src/java/org/apache/solr/search/SolrIndexSearcher.java index 06eaa74139c..c69102ac897 100644 --- a/solr/src/java/org/apache/solr/search/SolrIndexSearcher.java +++ b/solr/src/java/org/apache/solr/search/SolrIndexSearcher.java @@ -357,7 +357,7 @@ public class SolrIndexSearcher extends IndexSearcher implements SolrInfoMBean { /** * @return the indexDir on which this searcher is opened - * @see org.apache.solr.search.SolrIndexSearcher#SolrIndexSearcher(org.apache.solr.core.SolrCore, org.apache.solr.schema.IndexSchema, String, String, boolean) + * @see #SolrIndexSearcher(SolrCore, IndexSchema, String, Directory, boolean) */ public String getIndexDir() { return indexDir; diff --git a/solr/src/java/org/apache/solr/search/function/ValueSource.java b/solr/src/java/org/apache/solr/search/function/ValueSource.java index ffcdac97a7d..8f62760f5e4 100644 --- a/solr/src/java/org/apache/solr/search/function/ValueSource.java +++ b/solr/src/java/org/apache/solr/search/function/ValueSource.java @@ -92,7 +92,7 @@ public abstract class ValueSource implements Serializable { * EXPERIMENTAL: This method is subject to change. *
    WARNING: Sorted function queries are not currently weighted. *

    - * Get the SortField for this ValueSource. Uses the {@link #getValues(java.util.Map, AtomicReaderContext)} + * Get the SortField for this ValueSource. Uses the {@link #getValues(java.util.Map, IndexReader.AtomicReaderContext)} * to populate the SortField. * * @param reverse true if this is a reverse sort. diff --git a/solr/src/solrj/org/apache/solr/client/solrj/impl/LBHttpSolrServer.java b/solr/src/solrj/org/apache/solr/client/solrj/impl/LBHttpSolrServer.java index a12a9397e9b..6922223f34f 100644 --- a/solr/src/solrj/org/apache/solr/client/solrj/impl/LBHttpSolrServer.java +++ b/solr/src/solrj/org/apache/solr/client/solrj/impl/LBHttpSolrServer.java @@ -150,7 +150,7 @@ public class LBHttpSolrServer extends SolrServer { return numDeadServersToTry; } - /** @return The number of dead servers to try if there are no live servers left. + /** @param numDeadServersToTry The number of dead servers to try if there are no live servers left. * Defaults to the number of servers in this request. */ public void setNumDeadServersToTry(int numDeadServersToTry) { this.numDeadServersToTry = numDeadServersToTry; diff --git a/solr/src/solrj/org/apache/solr/client/solrj/response/SpellCheckResponse.java b/solr/src/solrj/org/apache/solr/client/solrj/response/SpellCheckResponse.java index e4001ec19c6..c5bb419ee87 100644 --- a/solr/src/solrj/org/apache/solr/client/solrj/response/SpellCheckResponse.java +++ b/solr/src/solrj/org/apache/solr/client/solrj/response/SpellCheckResponse.java @@ -115,7 +115,7 @@ public class SpellCheckResponse { *

    * Return the first collated query string. For convenience and backwards-compatibility. Use getCollatedResults() for full data. *

    - * @return + * @return first collated query string */ public String getCollatedResult() { return collations==null || collations.size()==0 ? null : collations.get(0).collationQueryString; @@ -126,7 +126,7 @@ public class SpellCheckResponse { * Return all collations. * Will include # of hits and misspelling-to-correction details if "spellcheck.collateExtendedResults was true. *

    - * @return + * @return all collations */ public List getCollatedResults() { return collations; From feba52697be6a6a412889b8b87bfa6195a732dc9 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Mon, 31 Jan 2011 12:01:08 +0000 Subject: [PATCH 067/185] LUCENE-1076: fix false test failure git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065572 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java b/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java index 71621a3f80d..162befd08ab 100644 --- a/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java @@ -335,7 +335,7 @@ public class TestPhraseQuery extends LuceneTestCase { public void testSlopScoring() throws IOException { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy())); Document doc = new Document(); doc.add(newField("field", "foo firstname lastname foo", Field.Store.YES, Field.Index.ANALYZED)); From 72ab9cafcea12bac20aa6bd8c8915538e8153433 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Mon, 31 Jan 2011 13:22:24 +0000 Subject: [PATCH 068/185] add missing package.htmls git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065600 13f79535-47bb-0310-9956-ffa450edef68 --- .../lucene/index/codecs/intblock/package.html | 25 +++++++++++++++++++ .../apache/lucene/index/codecs/package.html | 25 +++++++++++++++++++ .../lucene/index/codecs/preflex/package.html | 25 +++++++++++++++++++ .../lucene/index/codecs/pulsing/package.html | 25 +++++++++++++++++++ .../lucene/index/codecs/sep/package.html | 25 +++++++++++++++++++ .../index/codecs/simpletext/package.html | 25 +++++++++++++++++++ .../lucene/index/codecs/standard/package.html | 25 +++++++++++++++++++ .../apache/lucene/search/cache/package.html | 25 +++++++++++++++++++ .../lucene/util/automaton/fst/package.html | 25 +++++++++++++++++++ 9 files changed, 225 insertions(+) create mode 100644 lucene/src/java/org/apache/lucene/index/codecs/intblock/package.html create mode 100644 lucene/src/java/org/apache/lucene/index/codecs/package.html create mode 100644 lucene/src/java/org/apache/lucene/index/codecs/preflex/package.html create mode 100644 lucene/src/java/org/apache/lucene/index/codecs/pulsing/package.html create mode 100644 lucene/src/java/org/apache/lucene/index/codecs/sep/package.html create mode 100644 lucene/src/java/org/apache/lucene/index/codecs/simpletext/package.html create mode 100644 lucene/src/java/org/apache/lucene/index/codecs/standard/package.html create mode 100644 lucene/src/java/org/apache/lucene/search/cache/package.html create mode 100644 lucene/src/java/org/apache/lucene/util/automaton/fst/package.html diff --git a/lucene/src/java/org/apache/lucene/index/codecs/intblock/package.html b/lucene/src/java/org/apache/lucene/index/codecs/intblock/package.html new file mode 100644 index 00000000000..403ea1b55f6 --- /dev/null +++ b/lucene/src/java/org/apache/lucene/index/codecs/intblock/package.html @@ -0,0 +1,25 @@ + + + + + + + +Intblock: base support for fixed or variable length block integer encoders + + diff --git a/lucene/src/java/org/apache/lucene/index/codecs/package.html b/lucene/src/java/org/apache/lucene/index/codecs/package.html new file mode 100644 index 00000000000..78dcb95de64 --- /dev/null +++ b/lucene/src/java/org/apache/lucene/index/codecs/package.html @@ -0,0 +1,25 @@ + + + + + + + +Codecs API: API for customization of the encoding and structure of the index. + + diff --git a/lucene/src/java/org/apache/lucene/index/codecs/preflex/package.html b/lucene/src/java/org/apache/lucene/index/codecs/preflex/package.html new file mode 100644 index 00000000000..c6c96c978c2 --- /dev/null +++ b/lucene/src/java/org/apache/lucene/index/codecs/preflex/package.html @@ -0,0 +1,25 @@ + + + + + + + +Preflex codec: supports Lucene 3.x indexes (readonly) + + diff --git a/lucene/src/java/org/apache/lucene/index/codecs/pulsing/package.html b/lucene/src/java/org/apache/lucene/index/codecs/pulsing/package.html new file mode 100644 index 00000000000..4216cc69dea --- /dev/null +++ b/lucene/src/java/org/apache/lucene/index/codecs/pulsing/package.html @@ -0,0 +1,25 @@ + + + + + + + +Pulsing Codec: inlines low frequency terms' postings into terms dictionary. + + diff --git a/lucene/src/java/org/apache/lucene/index/codecs/sep/package.html b/lucene/src/java/org/apache/lucene/index/codecs/sep/package.html new file mode 100644 index 00000000000..b51d9102715 --- /dev/null +++ b/lucene/src/java/org/apache/lucene/index/codecs/sep/package.html @@ -0,0 +1,25 @@ + + + + + + + +Sep: base support for separate files (doc,frq,pos,skp,pyl) + + diff --git a/lucene/src/java/org/apache/lucene/index/codecs/simpletext/package.html b/lucene/src/java/org/apache/lucene/index/codecs/simpletext/package.html new file mode 100644 index 00000000000..88aad683412 --- /dev/null +++ b/lucene/src/java/org/apache/lucene/index/codecs/simpletext/package.html @@ -0,0 +1,25 @@ + + + + + + + +Simpletext Codec: writes human readable postings. + + diff --git a/lucene/src/java/org/apache/lucene/index/codecs/standard/package.html b/lucene/src/java/org/apache/lucene/index/codecs/standard/package.html new file mode 100644 index 00000000000..aca1dc4b665 --- /dev/null +++ b/lucene/src/java/org/apache/lucene/index/codecs/standard/package.html @@ -0,0 +1,25 @@ + + + + + + + +Standard Codec + + diff --git a/lucene/src/java/org/apache/lucene/search/cache/package.html b/lucene/src/java/org/apache/lucene/search/cache/package.html new file mode 100644 index 00000000000..1ca0c5ddc44 --- /dev/null +++ b/lucene/src/java/org/apache/lucene/search/cache/package.html @@ -0,0 +1,25 @@ + + + + + + + +Fieldcache + + diff --git a/lucene/src/java/org/apache/lucene/util/automaton/fst/package.html b/lucene/src/java/org/apache/lucene/util/automaton/fst/package.html new file mode 100644 index 00000000000..c5be56e42fc --- /dev/null +++ b/lucene/src/java/org/apache/lucene/util/automaton/fst/package.html @@ -0,0 +1,25 @@ + + + + + + + +Finite state transducers + + From cd8929f9f5d3f80c0d6ea65215e5656922228ede Mon Sep 17 00:00:00 2001 From: Koji Sekiguchi Date: Mon, 31 Jan 2011 13:26:26 +0000 Subject: [PATCH 069/185] SOLR-2340: add the entry to CHANGES.txt git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065601 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 5022a1ed447..40519a36325 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -403,7 +403,6 @@ New Features http://wiki.apache.org/solr/SpatialSearch and the example. Refactored some items in Lucene spatial. Removed SpatialTileField as the underlying CartesianTier is broken beyond repair and is going to be moved. (gsingers) - * SOLR-2128: Full parameter substitution for function queries. Example: q=add($v1,$v2)&v1=mul(popularity,5)&v2=20.0 (yonik) @@ -754,6 +753,9 @@ Other Changes * SOLR-1826: Add unit tests for highlighting with termOffsets=true and overlapping tokens. (Stefan Oestreicher via rmuir) +* SOLR-2340: Add version infos to message in JavaBinCodec when throwing + exception. (koji) + Build ---------------------- From 5ccf063a5d635e60e0b1d755bb55fcec8b00c37e Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Mon, 31 Jan 2011 14:06:45 +0000 Subject: [PATCH 070/185] LUCENE-2901: fix consistency of KeywordMarkerFilter, it should only set, not unset the attribute git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065621 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/CHANGES.txt | 4 ++-- .../miscellaneous/KeywordMarkerFilter.java | 8 +++++--- .../miscellaneous/TestKeywordMarkerFilter.java | 14 ++++++++++++++ 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index 79ded77817e..cf9f02efb99 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -747,8 +747,8 @@ New features stopwords, and implement many analyzers in contrib with it. (Simon Willnauer via Robert Muir) -* LUCENE-2198: Support protected words in stemming TokenFilters using a - new KeywordAttribute. (Simon Willnauer via Uwe Schindler) +* LUCENE-2198, LUCENE-2901: Support protected words in stemming TokenFilters using a + new KeywordAttribute. (Simon Willnauer, Drew Farris via Uwe Schindler) * LUCENE-2183, LUCENE-2240, LUCENE-2241: Added Unicode 4 support to CharTokenizer and its subclasses. CharTokenizer now has new diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeywordMarkerFilter.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeywordMarkerFilter.java index b5fb812baca..7a55e32c53f 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeywordMarkerFilter.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeywordMarkerFilter.java @@ -74,10 +74,12 @@ public final class KeywordMarkerFilter extends TokenFilter { @Override public final boolean incrementToken() throws IOException { if (input.incrementToken()) { - keywordAttr.setKeyword(keywordSet.contains(termAtt.buffer(), 0, - termAtt.length())); + if (keywordSet.contains(termAtt.buffer(), 0, termAtt.length())) { + keywordAttr.setKeyword(true); + } return true; - } else + } else { return false; + } } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilter.java index f12e7c488c8..4637ee1210b 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilter.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilter.java @@ -2,6 +2,7 @@ package org.apache.lucene.analysis.miscellaneous; import java.io.IOException; import java.io.StringReader; +import java.util.Arrays; import java.util.HashSet; import java.util.Locale; import java.util.Set; @@ -57,6 +58,19 @@ public class TestKeywordMarkerFilter extends BaseTokenStreamTestCase { "The quIck browN LuceneFox Jumps")), set2)), output); } + // LUCENE-2901 + public void testComposition() throws Exception { + TokenStream ts = new LowerCaseFilterMock( + new KeywordMarkerFilter( + new KeywordMarkerFilter( + new WhitespaceTokenizer(TEST_VERSION_CURRENT, + new StringReader("Dogs Trees Birds Houses")), + new HashSet(Arrays.asList(new String[] { "Birds", "Houses" }))), + new HashSet(Arrays.asList(new String[] { "Dogs", "Trees" })))); + + assertTokenStreamContents(ts, new String[] { "Dogs", "Trees", "Birds", "Houses" }); + } + public static final class LowerCaseFilterMock extends TokenFilter { private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); From c0b98f063a070ff2740d863e7d4fc2bcc6141cbc Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Mon, 31 Jan 2011 19:20:34 +0000 Subject: [PATCH 071/185] LUCENE-1591: rollback to old patched xercesImpl.jar to workaround XERCESJ-1257, which we hit on current Wikipedia XML export (enwiki-20110115-pages-articles.xml) git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065719 13f79535-47bb-0310-9956-ffa450edef68 --- modules/benchmark/lib/xercesImpl-2.10.0.jar | 2 -- modules/benchmark/lib/xercesImpl-2.9.1-patched-XERCESJ-1257.jar | 2 ++ modules/benchmark/lib/xml-apis-2.10.0.jar | 2 -- modules/benchmark/lib/xml-apis-2.9.0.jar | 2 ++ 4 files changed, 4 insertions(+), 4 deletions(-) delete mode 100644 modules/benchmark/lib/xercesImpl-2.10.0.jar create mode 100644 modules/benchmark/lib/xercesImpl-2.9.1-patched-XERCESJ-1257.jar delete mode 100644 modules/benchmark/lib/xml-apis-2.10.0.jar create mode 100644 modules/benchmark/lib/xml-apis-2.9.0.jar diff --git a/modules/benchmark/lib/xercesImpl-2.10.0.jar b/modules/benchmark/lib/xercesImpl-2.10.0.jar deleted file mode 100644 index 11b416c0503..00000000000 --- a/modules/benchmark/lib/xercesImpl-2.10.0.jar +++ /dev/null @@ -1,2 +0,0 @@ -AnyObjectId[9dcd8c38196b24e51f78d8e1b0a42d1ffef60acb] was removed in git history. -Apache SVN contains full history. \ No newline at end of file diff --git a/modules/benchmark/lib/xercesImpl-2.9.1-patched-XERCESJ-1257.jar b/modules/benchmark/lib/xercesImpl-2.9.1-patched-XERCESJ-1257.jar new file mode 100644 index 00000000000..6eacbf558b1 --- /dev/null +++ b/modules/benchmark/lib/xercesImpl-2.9.1-patched-XERCESJ-1257.jar @@ -0,0 +1,2 @@ +AnyObjectId[bbb5aa7ad5bcea61c5c66ceb2ba340431cc7262d] was removed in git history. +Apache SVN contains full history. \ No newline at end of file diff --git a/modules/benchmark/lib/xml-apis-2.10.0.jar b/modules/benchmark/lib/xml-apis-2.10.0.jar deleted file mode 100644 index c59f0f17531..00000000000 --- a/modules/benchmark/lib/xml-apis-2.10.0.jar +++ /dev/null @@ -1,2 +0,0 @@ -AnyObjectId[46733464fc746776c331ecc51061f3a05e662fd1] was removed in git history. -Apache SVN contains full history. \ No newline at end of file diff --git a/modules/benchmark/lib/xml-apis-2.9.0.jar b/modules/benchmark/lib/xml-apis-2.9.0.jar new file mode 100644 index 00000000000..214dd3e0819 --- /dev/null +++ b/modules/benchmark/lib/xml-apis-2.9.0.jar @@ -0,0 +1,2 @@ +AnyObjectId[d42c0ea6cfd17ed6b444b8337febbc0bdb55ed83] was removed in git history. +Apache SVN contains full history. \ No newline at end of file From 7088a4d555574e767273b787eee3c3b092aff728 Mon Sep 17 00:00:00 2001 From: Steven Rowe Date: Mon, 31 Jan 2011 21:16:39 +0000 Subject: [PATCH 072/185] LUCENE-1591: Eclipse config update for xercesImpl.jar revert git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065787 13f79535-47bb-0310-9956-ffa450edef68 --- dev-tools/eclipse/dot.classpath | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-tools/eclipse/dot.classpath b/dev-tools/eclipse/dot.classpath index b3b5c76a5cc..b633d43635d 100644 --- a/dev-tools/eclipse/dot.classpath +++ b/dev-tools/eclipse/dot.classpath @@ -92,8 +92,8 @@ - - + + From 80d45a4ee3409062961fb3a634c008052473668f Mon Sep 17 00:00:00 2001 From: Steven Rowe Date: Mon, 31 Jan 2011 21:28:47 +0000 Subject: [PATCH 073/185] LUCENE-1591: update maven poms for xercesImpl.jar revert git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065802 13f79535-47bb-0310-9956-ffa450edef68 --- dev-tools/maven/pom.xml.template | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/dev-tools/maven/pom.xml.template b/dev-tools/maven/pom.xml.template index 174df33cfd1..9bf98955a61 100644 --- a/dev-tools/maven/pom.xml.template +++ b/dev-tools/maven/pom.xml.template @@ -365,12 +365,12 @@ xerces xercesImpl - 2.10.0 + 2.9.1-patched-XERCESJ-1257 xml-apis xml-apis - 2.10.0 + 2.9.0 javax.servlet @@ -640,9 +640,9 @@ xerces xercesImpl - 2.10.0 + 2.9.1-patched-XERCESJ-1257 jar - modules/benchmark/lib/xercesImpl-2.10.0.jar + modules/benchmark/lib/xercesImpl-2.9.1-patched-XERCESJ-1257.jar @@ -654,9 +654,9 @@ xml-apis xml-apis - 2.10.0 + 2.9.0 jar - modules/benchmark/lib/xml-apis-2.10.0.jar + modules/benchmark/lib/xml-apis-2.9.0.jar From 25f16877ddc16d3f223dab9ecbbddaab3559f545 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Mon, 31 Jan 2011 23:30:00 +0000 Subject: [PATCH 074/185] LUCENE-2902: tests should run checkIndex on indexes they create git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065853 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/lucene/TestExternalCodecs.java | 3 ++- .../lucene/index/TestIndexWriterExceptions.java | 7 ++++--- .../lucene/index/TestIndexWriterMerging.java | 1 - .../lucene/store/MockDirectoryWrapper.java | 17 +++++++++++++++++ .../lucene/store/TestFileSwitchDirectory.java | 6 ++++-- 5 files changed, 27 insertions(+), 7 deletions(-) diff --git a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java index 0de0ff4038c..c38f14e7b3e 100644 --- a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java +++ b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java @@ -631,7 +631,8 @@ public class TestExternalCodecs extends LuceneTestCase { final int NUM_DOCS = 173; - Directory dir = newDirectory(); + MockDirectoryWrapper dir = newDirectory(); + dir.setCheckIndexOnClose(false); // we use a custom codec provider IndexWriter w = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, true)). diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java index 101812330e1..6d05837a35d 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java @@ -983,7 +983,8 @@ public class TestIndexWriterExceptions extends LuceneTestCase { // latest segments file and make sure we get an // IOException trying to open the index: public void testSimulatedCorruptIndex1() throws IOException { - Directory dir = newDirectory(); + MockDirectoryWrapper dir = newDirectory(); + dir.setCheckIndexOnClose(false); // we are corrupting it! IndexWriter writer = null; @@ -1030,8 +1031,8 @@ public class TestIndexWriterExceptions extends LuceneTestCase { // files and make sure we get an IOException trying to // open the index: public void testSimulatedCorruptIndex2() throws IOException { - Directory dir = newDirectory(); - + MockDirectoryWrapper dir = newDirectory(); + dir.setCheckIndexOnClose(false); // we are corrupting it! IndexWriter writer = null; writer = new IndexWriter( diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java index 57c5e26040d..87883d635a1 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java @@ -66,7 +66,6 @@ public class TestIndexWriterMerging extends LuceneTestCase writer.close(); fail = verifyIndex(merged, 0); - merged.close(); assertFalse("The merged index is invalid", fail); indexA.close(); diff --git a/lucene/src/test/org/apache/lucene/store/MockDirectoryWrapper.java b/lucene/src/test/org/apache/lucene/store/MockDirectoryWrapper.java index 13587132b9d..bb9552bf7d9 100644 --- a/lucene/src/test/org/apache/lucene/store/MockDirectoryWrapper.java +++ b/lucene/src/test/org/apache/lucene/store/MockDirectoryWrapper.java @@ -31,7 +31,9 @@ import java.util.Map; import java.util.Random; import java.util.Set; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util._TestUtil; /** * This is a Directory Wrapper that adds methods @@ -48,6 +50,7 @@ public class MockDirectoryWrapper extends Directory { Random randomState; boolean noDeleteOpenFile = true; boolean preventDoubleWrite = true; + boolean checkIndexOnClose = true; boolean trackDiskUsage = false; private Set unSyncedFiles; private Set createdFiles; @@ -205,6 +208,17 @@ public class MockDirectoryWrapper extends Directory { return noDeleteOpenFile; } + /** + * Set whether or not checkindex should be run + * on close + */ + public void setCheckIndexOnClose(boolean value) { + this.checkIndexOnClose = value; + } + + public boolean getCheckIndexOnClose() { + return checkIndexOnClose; + } /** * If 0.0, no exceptions will be thrown. Else this should * be a double 0.0 - 1.0. We will randomly throw an @@ -393,6 +407,9 @@ public class MockDirectoryWrapper extends Directory { throw new RuntimeException("MockDirectoryWrapper: cannot close: there are still open files: " + openFiles, cause); } open = false; + if (checkIndexOnClose && IndexReader.indexExists(this)) { + _TestUtil.checkIndex(this); + } delegate.close(); } diff --git a/lucene/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java b/lucene/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java index 606ee1e692b..635970fd85c 100644 --- a/lucene/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java +++ b/lucene/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java @@ -39,8 +39,10 @@ public class TestFileSwitchDirectory extends LuceneTestCase { fileExtensions.add(IndexFileNames.FIELDS_EXTENSION); fileExtensions.add(IndexFileNames.FIELDS_INDEX_EXTENSION); - Directory primaryDir = new MockDirectoryWrapper(random, new RAMDirectory()); - Directory secondaryDir = new MockDirectoryWrapper(random, new RAMDirectory()); + MockDirectoryWrapper primaryDir = new MockDirectoryWrapper(random, new RAMDirectory()); + primaryDir.setCheckIndexOnClose(false); // only part of an index + MockDirectoryWrapper secondaryDir = new MockDirectoryWrapper(random, new RAMDirectory()); + secondaryDir.setCheckIndexOnClose(false); // only part of an index FileSwitchDirectory fsd = new FileSwitchDirectory(fileExtensions, primaryDir, secondaryDir, true); IndexWriter writer = new IndexWriter( From 62b692e9a3b0b4f86dfaeac43b5c7d7743d2185d Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Mon, 31 Jan 2011 23:35:02 +0000 Subject: [PATCH 075/185] LUCENE-2897: apply delete-by-term on flushed segment while we flush (still buffer delete-by-terms for past segments) git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065855 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/CHANGES.txt | 3 + .../apache/lucene/index/BufferedDeletes.java | 84 ++++++- .../lucene/index/BufferedDeletesStream.java | 227 +++++++++--------- .../apache/lucene/index/DocumentsWriter.java | 64 ++++- .../lucene/index/FreqProxTermsWriter.java | 50 +++- .../lucene/index/FrozenBufferedDeletes.java | 145 +++++++++++ .../org/apache/lucene/index/IndexWriter.java | 2 +- .../apache/lucene/index/LogMergePolicy.java | 2 +- .../apache/lucene/index/SegmentMerger.java | 2 +- .../lucene/index/SegmentWriteState.java | 15 +- .../lucene/index/codecs/BlockTermsReader.java | 4 +- .../apache/lucene/index/TestAddIndexes.java | 1 + .../org/apache/lucene/index/TestCodecs.java | 2 +- .../apache/lucene/index/TestIndexWriter.java | 2 +- .../lucene/index/TestIndexWriterDelete.java | 2 - modules/benchmark/conf/createLineFile.alg | 8 +- 16 files changed, 466 insertions(+), 147 deletions(-) create mode 100644 lucene/src/java/org/apache/lucene/index/FrozenBufferedDeletes.java diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index cf9f02efb99..9a77cc1956d 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -330,6 +330,9 @@ Optimizations seek the term dictionary in TermQuery / TermWeight. (Simon Willnauer, Mike McCandless, Robert Muir) +* LUCENE-2897: Apply deleted terms while flushing a segment. We still + buffer deleted terms to later apply to past segments. (Mike McCandless) + Bug fixes * LUCENE-2633: PackedInts Packed32 and Packed64 did not support internal diff --git a/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java b/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java index ed955b90d2d..c72a1f6b0a3 100644 --- a/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java +++ b/lucene/src/java/org/apache/lucene/index/BufferedDeletes.java @@ -18,21 +18,23 @@ package org.apache.lucene.index; */ import java.util.ArrayList; +import java.util.Iterator; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.SortedMap; import java.util.TreeMap; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicInteger; import org.apache.lucene.search.Query; import org.apache.lucene.util.RamUsageEstimator; +import org.apache.lucene.index.BufferedDeletesStream.QueryAndLimit; -/** Holds buffered deletes, by docID, term or query for a - * single segment. This is used to hold buffered pending - * deletes against the to-be-flushed segment as well as - * per-segment deletes for each segment in the index. */ +/* Holds buffered deletes, by docID, term or query for a + * single segment. This is used to hold buffered pending + * deletes against the to-be-flushed segment. Once the + * deletes are pushed (on flush in DocumentsWriter), these + * deletes are converted to a FrozenDeletes instance. */ // NOTE: we are sync'd by BufferedDeletes, ie, all access to // instances of this class is via sync'd methods on @@ -63,13 +65,8 @@ class BufferedDeletes { undercount (say 24 bytes). Integer is OBJ_HEADER + INT. */ final static int BYTES_PER_DEL_QUERY = 5*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 2*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2*RamUsageEstimator.NUM_BYTES_INT + 24; - // TODO: many of the deletes stored here will map to - // Integer.MAX_VALUE; we could be more efficient for this - // case ie use a SortedSet not a SortedMap. But: Java's - // SortedSet impls are simply backed by a Map so we won't - // save anything unless we do something custom... final AtomicInteger numTermDeletes = new AtomicInteger(); - final SortedMap terms = new TreeMap(); + final Map terms; final Map queries = new HashMap(); final List docIDs = new ArrayList(); @@ -81,6 +78,14 @@ class BufferedDeletes { long gen; + public BufferedDeletes(boolean sortTerms) { + if (sortTerms) { + terms = new TreeMap(); + } else { + terms = new HashMap(); + } + } + @Override public String toString() { if (VERBOSE_DELETES) { @@ -130,6 +135,26 @@ class BufferedDeletes { // should already be cleared } + void update(FrozenBufferedDeletes in) { + numTermDeletes.addAndGet(in.numTermDeletes); + for(Term term : in.terms) { + if (!terms.containsKey(term)) { + // only incr bytesUsed if this term wasn't already buffered: + bytesUsed.addAndGet(BYTES_PER_DEL_TERM); + } + terms.put(term, MAX_INT); + } + + for(int queryIdx=0;queryIdx termsIterable() { + return new Iterable() { + // @Override -- not until Java 1.6 + public Iterator iterator() { + return terms.keySet().iterator(); + } + }; + } + + public Iterable queriesIterable() { + return new Iterable() { + + // @Override -- not until Java 1.6 + public Iterator iterator() { + return new Iterator() { + private final Iterator> iter = queries.entrySet().iterator(); + + // @Override -- not until Java 1.6 + public boolean hasNext() { + return iter.hasNext(); + } + + // @Override -- not until Java 1.6 + public QueryAndLimit next() { + final Map.Entry ent = iter.next(); + return new QueryAndLimit(ent.getKey(), ent.getValue()); + } + + // @Override -- not until Java 1.6 + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + }; + } void clear() { terms.clear(); diff --git a/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java b/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java index 555c78b67c2..de3046db5dd 100644 --- a/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java +++ b/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java @@ -22,7 +22,6 @@ import java.io.PrintStream; import java.util.List; import java.util.ArrayList; import java.util.Date; -import java.util.Map.Entry; import java.util.Comparator; import java.util.Collections; import java.util.concurrent.atomic.AtomicInteger; @@ -52,7 +51,7 @@ import org.apache.lucene.search.Weight; class BufferedDeletesStream { // TODO: maybe linked list? - private final List deletes = new ArrayList(); + private final List deletes = new ArrayList(); // Starts at 1 so that SegmentInfos that have never had // deletes applied (whose bufferedDelGen defaults to 0) @@ -83,13 +82,13 @@ class BufferedDeletesStream { // Appends a new packet of buffered deletes to the stream, // setting its generation: - public synchronized void push(BufferedDeletes packet) { + public synchronized void push(FrozenBufferedDeletes packet) { assert packet.any(); assert checkDeleteStats(); - packet.gen = nextGen++; + assert packet.gen < nextGen; deletes.add(packet); - numTerms.addAndGet(packet.numTermDeletes.get()); - bytesUsed.addAndGet(packet.bytesUsed.get()); + numTerms.addAndGet(packet.numTermDeletes); + bytesUsed.addAndGet(packet.bytesUsed); if (infoStream != null) { message("push deletes " + packet + " delGen=" + packet.gen + " packetCount=" + deletes.size()); } @@ -182,14 +181,14 @@ class BufferedDeletesStream { while (infosIDX >= 0) { //System.out.println("BD: cycle delIDX=" + delIDX + " infoIDX=" + infosIDX); - final BufferedDeletes packet = delIDX >= 0 ? deletes.get(delIDX) : null; + final FrozenBufferedDeletes packet = delIDX >= 0 ? deletes.get(delIDX) : null; final SegmentInfo info = infos2.get(infosIDX); final long segGen = info.getBufferedDeletesGen(); if (packet != null && segGen < packet.gen) { //System.out.println(" coalesce"); if (coalescedDeletes == null) { - coalescedDeletes = new BufferedDeletes(); + coalescedDeletes = new BufferedDeletes(true); } coalescedDeletes.update(packet); delIDX--; @@ -202,25 +201,25 @@ class BufferedDeletesStream { int delCount = 0; try { if (coalescedDeletes != null) { - delCount += applyDeletes(coalescedDeletes, reader); + //System.out.println(" del coalesced"); + delCount += applyTermDeletes(coalescedDeletes.termsIterable(), reader); + delCount += applyQueryDeletes(coalescedDeletes.queriesIterable(), reader); } - delCount += applyDeletes(packet, reader); + //System.out.println(" del exact"); + // Don't delete by Term here; DocumentsWriter + // already did that on flush: + delCount += applyQueryDeletes(packet.queriesIterable(), reader); } finally { readerPool.release(reader); } anyNewDeletes |= delCount > 0; - // We've applied doc ids, and they're only applied - // on the current segment - bytesUsed.addAndGet(-packet.docIDs.size() * BufferedDeletes.BYTES_PER_DEL_DOCID); - packet.clearDocIDs(); - if (infoStream != null) { message("seg=" + info + " segGen=" + segGen + " segDeletes=[" + packet + "]; coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount); } if (coalescedDeletes == null) { - coalescedDeletes = new BufferedDeletes(); + coalescedDeletes = new BufferedDeletes(true); } coalescedDeletes.update(packet); delIDX--; @@ -236,7 +235,8 @@ class BufferedDeletesStream { SegmentReader reader = readerPool.get(info, false); int delCount = 0; try { - delCount += applyDeletes(coalescedDeletes, reader); + delCount += applyTermDeletes(coalescedDeletes.termsIterable(), reader); + delCount += applyQueryDeletes(coalescedDeletes.queriesIterable(), reader); } finally { readerPool.release(reader); } @@ -301,121 +301,122 @@ class BufferedDeletesStream { message("pruneDeletes: prune " + count + " packets; " + (deletes.size() - count) + " packets remain"); } for(int delIDX=0;delIDX= 0; - bytesUsed.addAndGet(-packet.bytesUsed.get()); + bytesUsed.addAndGet(-packet.bytesUsed); assert bytesUsed.get() >= 0; } deletes.subList(0, count).clear(); } } - private synchronized long applyDeletes(BufferedDeletes deletes, SegmentReader reader) throws IOException { - + // Delete by Term + private synchronized long applyTermDeletes(Iterable termsIter, SegmentReader reader) throws IOException { long delCount = 0; + Fields fields = reader.fields(); + if (fields == null) { + // This reader has no postings + return 0; + } + TermsEnum termsEnum = null; + + String currentField = null; + DocsEnum docs = null; + assert checkDeleteTerm(null); - - if (deletes.terms.size() > 0) { - Fields fields = reader.fields(); - if (fields == null) { - // This reader has no postings - return 0; + + for (Term term : termsIter) { + // Since we visit terms sorted, we gain performance + // by re-using the same TermsEnum and seeking only + // forwards + if (term.field() != currentField) { + assert currentField == null || currentField.compareTo(term.field()) < 0; + currentField = term.field(); + Terms terms = fields.terms(currentField); + if (terms != null) { + termsEnum = terms.iterator(); + } else { + termsEnum = null; + } } - TermsEnum termsEnum = null; - - String currentField = null; - DocsEnum docs = null; - - for (Entry entry: deletes.terms.entrySet()) { - Term term = entry.getKey(); - // Since we visit terms sorted, we gain performance - // by re-using the same TermsEnum and seeking only - // forwards - if (term.field() != currentField) { - assert currentField == null || currentField.compareTo(term.field()) < 0; - currentField = term.field(); - Terms terms = fields.terms(currentField); - if (terms != null) { - termsEnum = terms.iterator(); - } else { - termsEnum = null; - } - } + if (termsEnum == null) { + continue; + } + assert checkDeleteTerm(term); + + // System.out.println(" term=" + term); - if (termsEnum == null) { - continue; - } - assert checkDeleteTerm(term); - - if (termsEnum.seek(term.bytes(), false) == TermsEnum.SeekStatus.FOUND) { - DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs); + if (termsEnum.seek(term.bytes(), false) == TermsEnum.SeekStatus.FOUND) { + DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs); - if (docsEnum != null) { - docs = docsEnum; - final int limit = entry.getValue(); - while (true) { - final int docID = docs.nextDoc(); - if (docID == DocsEnum.NO_MORE_DOCS || docID >= limit) { - break; - } - reader.deleteDocument(docID); - // TODO: we could/should change - // reader.deleteDocument to return boolean - // true if it did in fact delete, because here - // we could be deleting an already-deleted doc - // which makes this an upper bound: - delCount++; + if (docsEnum != null) { + while (true) { + final int docID = docsEnum.nextDoc(); + if (docID == DocsEnum.NO_MORE_DOCS) { + break; } + reader.deleteDocument(docID); + // TODO: we could/should change + // reader.deleteDocument to return boolean + // true if it did in fact delete, because here + // we could be deleting an already-deleted doc + // which makes this an upper bound: + delCount++; } } } } - // Delete by docID - for (Integer docIdInt : deletes.docIDs) { - int docID = docIdInt.intValue(); - reader.deleteDocument(docID); - delCount++; - } - - // Delete by query - if (deletes.queries.size() > 0) { - IndexSearcher searcher = new IndexSearcher(reader); - assert searcher.getTopReaderContext().isAtomic; - final AtomicReaderContext readerContext = (AtomicReaderContext) searcher.getTopReaderContext(); - try { - for (Entry entry : deletes.queries.entrySet()) { - Query query = entry.getKey(); - int limit = entry.getValue().intValue(); - Weight weight = query.weight(searcher); - Scorer scorer = weight.scorer(readerContext, Weight.ScorerContext.def()); - if (scorer != null) { - while(true) { - int doc = scorer.nextDoc(); - if (doc >= limit) - break; - - reader.deleteDocument(doc); - // TODO: we could/should change - // reader.deleteDocument to return boolean - // true if it did in fact delete, because here - // we could be deleting an already-deleted doc - // which makes this an upper bound: - delCount++; - } - } - } - } finally { - searcher.close(); - } - } - return delCount; } - + + public static class QueryAndLimit { + public final Query query; + public final int limit; + public QueryAndLimit(Query query, int limit) { + this.query = query; + this.limit = limit; + } + } + + // Delete by query + private synchronized long applyQueryDeletes(Iterable queriesIter, SegmentReader reader) throws IOException { + long delCount = 0; + IndexSearcher searcher = new IndexSearcher(reader); + assert searcher.getTopReaderContext().isAtomic; + final AtomicReaderContext readerContext = (AtomicReaderContext) searcher.getTopReaderContext(); + try { + for (QueryAndLimit ent : queriesIter) { + Query query = ent.query; + int limit = ent.limit; + Weight weight = query.weight(searcher); + Scorer scorer = weight.scorer(readerContext, Weight.ScorerContext.def()); + if (scorer != null) { + while(true) { + int doc = scorer.nextDoc(); + if (doc >= limit) + break; + + reader.deleteDocument(doc); + // TODO: we could/should change + // reader.deleteDocument to return boolean + // true if it did in fact delete, because here + // we could be deleting an already-deleted doc + // which makes this an upper bound: + delCount++; + } + } + } + } finally { + searcher.close(); + } + + return delCount; + } + // used only by assert private boolean checkDeleteTerm(Term term) { if (term != null) { @@ -429,9 +430,9 @@ class BufferedDeletesStream { private boolean checkDeleteStats() { int numTerms2 = 0; long bytesUsed2 = 0; - for(BufferedDeletes packet : deletes) { - numTerms2 += packet.numTermDeletes.get(); - bytesUsed2 += packet.bytesUsed.get(); + for(FrozenBufferedDeletes packet : deletes) { + numTerms2 += packet.numTermDeletes; + bytesUsed2 += packet.bytesUsed; } assert numTerms2 == numTerms.get(): "numTerms2=" + numTerms2 + " vs " + numTerms.get(); assert bytesUsed2 == bytesUsed.get(): "bytesUsed2=" + bytesUsed2 + " vs " + bytesUsed; diff --git a/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java b/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java index d3c6caee9fe..bb5304371fc 100644 --- a/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java +++ b/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java @@ -35,9 +35,11 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMFile; import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.BitVector; +import org.apache.lucene.util.RamUsageEstimator; import org.apache.lucene.util.RecyclingByteBlockAllocator; import org.apache.lucene.util.ThreadInterruptedException; -import org.apache.lucene.util.RamUsageEstimator; + import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_MASK; import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SIZE; @@ -133,8 +135,9 @@ final class DocumentsWriter { // this, they wait for others to finish first private final int maxThreadStates; + // TODO: cutover to BytesRefHash // Deletes for our still-in-RAM (to be flushed next) segment - private BufferedDeletes pendingDeletes = new BufferedDeletes(); + private BufferedDeletes pendingDeletes = new BufferedDeletes(false); static class DocState { DocumentsWriter docWriter; @@ -336,6 +339,9 @@ final class DocumentsWriter { return doFlush; } + // TODO: we could check w/ FreqProxTermsWriter: if the + // term doesn't exist, don't bother buffering into the + // per-DWPT map (but still must go into the global map) boolean deleteTerm(Term term, boolean skipWait) { final boolean doFlush = flushControl.waitUpdate(0, 1, skipWait); synchronized(this) { @@ -507,17 +513,19 @@ final class DocumentsWriter { private void pushDeletes(SegmentInfo newSegment, SegmentInfos segmentInfos) { // Lock order: DW -> BD + final long delGen = bufferedDeletesStream.getNextGen(); if (pendingDeletes.any()) { if (segmentInfos.size() > 0 || newSegment != null) { + final FrozenBufferedDeletes packet = new FrozenBufferedDeletes(pendingDeletes, delGen); if (infoStream != null) { message("flush: push buffered deletes"); } - bufferedDeletesStream.push(pendingDeletes); + bufferedDeletesStream.push(packet); if (infoStream != null) { - message("flush: delGen=" + pendingDeletes.gen); + message("flush: delGen=" + packet.gen); } if (newSegment != null) { - newSegment.setBufferedDeletesGen(pendingDeletes.gen); + newSegment.setBufferedDeletesGen(packet.gen); } } else { if (infoStream != null) { @@ -527,9 +535,9 @@ final class DocumentsWriter { // there are no segments, the deletions cannot // affect anything. } - pendingDeletes = new BufferedDeletes(); + pendingDeletes.clear(); } else if (newSegment != null) { - newSegment.setBufferedDeletesGen(bufferedDeletesStream.getNextGen()); + newSegment.setBufferedDeletesGen(delGen); } } @@ -580,7 +588,19 @@ final class DocumentsWriter { final SegmentWriteState flushState = new SegmentWriteState(infoStream, directory, segment, fieldInfos, numDocs, writer.getConfig().getTermIndexInterval(), - SegmentCodecs.build(fieldInfos, writer.codecs)); + SegmentCodecs.build(fieldInfos, writer.codecs), + pendingDeletes); + // Apply delete-by-docID now (delete-byDocID only + // happens when an exception is hit processing that + // doc, eg if analyzer has some problem w/ the text): + if (pendingDeletes.docIDs.size() > 0) { + flushState.deletedDocs = new BitVector(numDocs); + for(int delDocID : pendingDeletes.docIDs) { + flushState.deletedDocs.set(delDocID); + } + pendingDeletes.bytesUsed.addAndGet(-pendingDeletes.docIDs.size() * BufferedDeletes.BYTES_PER_DEL_DOCID); + pendingDeletes.docIDs.clear(); + } newSegment = new SegmentInfo(segment, numDocs, directory, false, fieldInfos.hasProx(), flushState.segmentCodecs, false); @@ -592,10 +612,14 @@ final class DocumentsWriter { double startMBUsed = bytesUsed()/1024./1024.; consumer.flush(threads, flushState); + newSegment.setHasVectors(flushState.hasVectors); if (infoStream != null) { message("new segment has " + (flushState.hasVectors ? "vectors" : "no vectors")); + if (flushState.deletedDocs != null) { + message("new segment has " + flushState.deletedDocs.count() + " deleted docs"); + } message("flushedFiles=" + newSegment.files()); message("flushed codecs=" + newSegment.getSegmentCodecs()); } @@ -616,6 +640,30 @@ final class DocumentsWriter { newSegment.setUseCompoundFile(true); } + // Must write deleted docs after the CFS so we don't + // slurp the del file into CFS: + if (flushState.deletedDocs != null) { + final int delCount = flushState.deletedDocs.count(); + assert delCount > 0; + newSegment.setDelCount(delCount); + newSegment.advanceDelGen(); + final String delFileName = newSegment.getDelFileName(); + boolean success2 = false; + try { + flushState.deletedDocs.write(directory, delFileName); + success2 = true; + } finally { + if (!success2) { + try { + directory.deleteFile(delFileName); + } catch (Throwable t) { + // suppress this so we keep throwing the + // original exception + } + } + } + } + if (infoStream != null) { message("flush: segment=" + newSegment); final double newSegmentSizeNoStore = newSegment.sizeInBytes(false)/1024./1024.; diff --git a/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java b/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java index 7d9df0a661d..d342cb47249 100644 --- a/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java +++ b/lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java @@ -26,8 +26,9 @@ import java.util.Map; import org.apache.lucene.index.codecs.FieldsConsumer; import org.apache.lucene.index.codecs.PostingsConsumer; -import org.apache.lucene.index.codecs.TermsConsumer; import org.apache.lucene.index.codecs.TermStats; +import org.apache.lucene.index.codecs.TermsConsumer; +import org.apache.lucene.util.BitVector; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CollectionUtil; @@ -108,7 +109,7 @@ final class FreqProxTermsWriter extends TermsHashConsumer { // If this field has postings then add them to the // segment - appendPostings(fields, consumer); + appendPostings(fieldName, state, fields, consumer); for(int i=0;i ent : deletes.queries.entrySet()) { + queries[upto] = ent.getKey(); + queryLimits[upto] = ent.getValue(); + upto++; + } + bytesUsed = terms.length * BYTES_PER_DEL_TERM + queries.length * BYTES_PER_DEL_QUERY; + numTermDeletes = deletes.numTermDeletes.get(); + this.gen = gen; + } + + public Iterable termsIterable() { + return new Iterable() { + // @Override -- not until Java 1.6 + public Iterator iterator() { + return new Iterator() { + private int upto; + + // @Override -- not until Java 1.6 + public boolean hasNext() { + return upto < terms.length; + } + + // @Override -- not until Java 1.6 + public Term next() { + return terms[upto++]; + } + + // @Override -- not until Java 1.6 + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + }; + } + + public Iterable queriesIterable() { + return new Iterable() { + // @Override -- not until Java 1.6 + public Iterator iterator() { + return new Iterator() { + private int upto; + + // @Override -- not until Java 1.6 + public boolean hasNext() { + return upto < queries.length; + } + + // @Override -- not until Java 1.6 + public QueryAndLimit next() { + QueryAndLimit ret = new QueryAndLimit(queries[upto], queryLimits[upto]); + upto++; + return ret; + } + + // @Override -- not until Java 1.6 + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + }; + } + + @Override + public String toString() { + String s = ""; + if (numTermDeletes != 0) { + s += " " + numTermDeletes + " deleted terms (unique count=" + terms.length + ")"; + } + if (queries.length != 0) { + s += " " + queries.length + " deleted queries"; + } + if (bytesUsed != 0) { + s += " bytesUsed=" + bytesUsed; + } + + return s; + } + + boolean any() { + return terms.length > 0 || queries.length > 0; + } +} diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java index 3eaea73a36f..8da73a3cdd0 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java @@ -339,7 +339,7 @@ public class IndexWriter implements Closeable { */ IndexReader getReader(boolean applyAllDeletes) throws IOException { ensureOpen(); - + final long tStart = System.currentTimeMillis(); if (infoStream != null) { diff --git a/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java b/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java index 9dd6278f78f..8c53d24bc0e 100644 --- a/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java +++ b/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java @@ -661,7 +661,7 @@ public abstract class LogMergePolicy extends MergePolicy { sb.append("maxMergeSizeForOptimize=").append(maxMergeSizeForOptimize).append(", "); sb.append("calibrateSizeByDeletes=").append(calibrateSizeByDeletes).append(", "); sb.append("maxMergeDocs=").append(maxMergeDocs).append(", "); - sb.append("useCompoundFile=").append(useCompoundFile); + sb.append("useCompoundFile=").append(useCompoundFile).append(", "); sb.append("requireContiguousMerge=").append(requireContiguousMerge); sb.append("]"); return sb.toString(); diff --git a/lucene/src/java/org/apache/lucene/index/SegmentMerger.java b/lucene/src/java/org/apache/lucene/index/SegmentMerger.java index bacfec8a599..a708c93ffcf 100644 --- a/lucene/src/java/org/apache/lucene/index/SegmentMerger.java +++ b/lucene/src/java/org/apache/lucene/index/SegmentMerger.java @@ -266,7 +266,7 @@ final class SegmentMerger { // details. throw new RuntimeException("mergeFields produced an invalid result: docCount is " + docCount + " but fdx file size is " + fdxFileLength + " file=" + fileName + " file exists?=" + directory.fileExists(fileName) + "; now aborting this merge to prevent index corruption"); - segmentWriteState = new SegmentWriteState(null, directory, segment, fieldInfos, docCount, termIndexInterval, codecInfo); + segmentWriteState = new SegmentWriteState(null, directory, segment, fieldInfos, docCount, termIndexInterval, codecInfo, null); return docCount; } diff --git a/lucene/src/java/org/apache/lucene/index/SegmentWriteState.java b/lucene/src/java/org/apache/lucene/index/SegmentWriteState.java index 514f0c4e410..288c75097a0 100644 --- a/lucene/src/java/org/apache/lucene/index/SegmentWriteState.java +++ b/lucene/src/java/org/apache/lucene/index/SegmentWriteState.java @@ -20,6 +20,7 @@ package org.apache.lucene.index; import java.io.PrintStream; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BitVector; /** * @lucene.experimental @@ -32,6 +33,16 @@ public class SegmentWriteState { public final int numDocs; public boolean hasVectors; + // Deletes to apply while we are flushing the segment. A + // Term is enrolled in here if it was deleted at one + // point, and it's mapped to the docIDUpto, meaning any + // docID < docIDUpto containing this term should be + // deleted. + public final BufferedDeletes segDeletes; + + // Lazily created: + public BitVector deletedDocs; + final SegmentCodecs segmentCodecs; public final String codecId; @@ -57,8 +68,9 @@ public class SegmentWriteState { public SegmentWriteState(PrintStream infoStream, Directory directory, String segmentName, FieldInfos fieldInfos, - int numDocs, int termIndexInterval, SegmentCodecs segmentCodecs) { + int numDocs, int termIndexInterval, SegmentCodecs segmentCodecs, BufferedDeletes segDeletes) { this.infoStream = infoStream; + this.segDeletes = segDeletes; this.directory = directory; this.segmentName = segmentName; this.fieldInfos = fieldInfos; @@ -80,5 +92,6 @@ public class SegmentWriteState { termIndexInterval = state.termIndexInterval; segmentCodecs = state.segmentCodecs; this.codecId = codecId; + segDeletes = state.segDeletes; } } diff --git a/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java b/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java index e25364c33a3..2e0e9cd3959 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java @@ -108,7 +108,7 @@ public class BlockTermsReader extends FieldsProducer { } } - private String segment; + //private String segment; public BlockTermsReader(TermsIndexReaderBase indexReader, Directory dir, FieldInfos fieldInfos, String segment, PostingsReaderBase postingsReader, int readBufferSize, Comparator termComp, int termsCacheSize, String codecId) @@ -118,7 +118,7 @@ public class BlockTermsReader extends FieldsProducer { termsCache = new DoubleBarrelLRUCache(termsCacheSize); this.termComp = termComp; - this.segment = segment; + //this.segment = segment; in = dir.openInput(IndexFileNames.segmentFileName(segment, codecId, BlockTermsWriter.TERMS_EXTENSION), readBufferSize); diff --git a/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java index 69063eb5b20..5ed6c089912 100755 --- a/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java +++ b/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java @@ -157,6 +157,7 @@ public class TestAddIndexes extends LuceneTestCase { setUpDirs(dir, aux); IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND)); + writer.setInfoStream(VERBOSE ? System.out : null); writer.addIndexes(aux); // Adds 10 docs, then replaces them with another 10 diff --git a/lucene/src/test/org/apache/lucene/index/TestCodecs.java b/lucene/src/test/org/apache/lucene/index/TestCodecs.java index cabf1d330d7..c25796606c4 100644 --- a/lucene/src/test/org/apache/lucene/index/TestCodecs.java +++ b/lucene/src/test/org/apache/lucene/index/TestCodecs.java @@ -589,7 +589,7 @@ public class TestCodecs extends LuceneTestCase { final int termIndexInterval = _TestUtil.nextInt(random, 13, 27); final SegmentCodecs codecInfo = SegmentCodecs.build(fieldInfos, CodecProvider.getDefault()); - final SegmentWriteState state = new SegmentWriteState(null, dir, SEGMENT, fieldInfos, 10000, termIndexInterval, codecInfo); + final SegmentWriteState state = new SegmentWriteState(null, dir, SEGMENT, fieldInfos, 10000, termIndexInterval, codecInfo, null); final FieldsConsumer consumer = state.segmentCodecs.codec().fieldsConsumer(state); Arrays.sort(fields); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java index 7c63115773a..a74ac7081dd 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java @@ -2576,7 +2576,7 @@ public class TestIndexWriter extends LuceneTestCase { count++; } } - assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500); + assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 1500); } w.close(); dir.close(); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java index 3763e54035c..731c5a3e21f 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java @@ -157,8 +157,6 @@ public class TestIndexWriterDelete extends LuceneTestCase { assertEquals(0, modifier.getSegmentCount()); modifier.commit(); - modifier.commit(); - IndexReader reader = IndexReader.open(dir, true); assertEquals(1, reader.numDocs()); diff --git a/modules/benchmark/conf/createLineFile.alg b/modules/benchmark/conf/createLineFile.alg index 969f30762df..cad01d991cf 100644 --- a/modules/benchmark/conf/createLineFile.alg +++ b/modules/benchmark/conf/createLineFile.alg @@ -29,10 +29,14 @@ # # Where to get documents from: -content.source=org.apache.lucene.benchmark.byTask.feeds.ReutersContentSource +content.source=org.apache.lucene.benchmark.byTask.feeds.EnwikiContentSource # Where to write the line file output: -line.file.out=work/reuters.lines.txt +line.file.out=/x/tmp/enwiki.out.txt + +docs.file=/x/lucene/data/enwiki/enwiki-20110115-pages-articles.xml + +keep.image.only.docs = false # Stop after processing the document feed once: content.source.forever=false From d087701cd6f9b232dd8f052a2493d8ff4194aec2 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Tue, 1 Feb 2011 02:08:35 +0000 Subject: [PATCH 076/185] SOLR-1916: add locale/timezone warning git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065891 13f79535-47bb-0310-9956-ffa450edef68 --- solr/contrib/dataimporthandler/README.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 solr/contrib/dataimporthandler/README.txt diff --git a/solr/contrib/dataimporthandler/README.txt b/solr/contrib/dataimporthandler/README.txt new file mode 100644 index 00000000000..5bc66ac3201 --- /dev/null +++ b/solr/contrib/dataimporthandler/README.txt @@ -0,0 +1,7 @@ +Although Solr strives to be agnostic of the Locale where the server is +running, some code paths in DataImportHandler are known to depend on the +System default Locale, Timezone, or Charset. It is recommended that when +running Solr you set the following system properties: + -Duser.language=xx -Duser.country=YY -Duser.timezone=ZZZ + +where xx, YY, and ZZZ are consistent with any database server's configuration. From d09e29d5c9845fa3e54e8a7ce51031f897d66066 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Tue, 1 Feb 2011 11:41:49 +0000 Subject: [PATCH 077/185] make MockRandomMergePolicy slightly less evil; fixes 2 recent test failures git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1066008 13f79535-47bb-0310-9956-ffa450edef68 --- .../test/org/apache/lucene/index/MockRandomMergePolicy.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lucene/src/test/org/apache/lucene/index/MockRandomMergePolicy.java b/lucene/src/test/org/apache/lucene/index/MockRandomMergePolicy.java index 7630dc7d220..e8bc977931b 100644 --- a/lucene/src/test/org/apache/lucene/index/MockRandomMergePolicy.java +++ b/lucene/src/test/org/apache/lucene/index/MockRandomMergePolicy.java @@ -67,7 +67,8 @@ public class MockRandomMergePolicy extends MergePolicy { Collections.shuffle(segmentInfos2, random); int upto = 0; while(upto < segmentInfos.size()) { - int inc = _TestUtil.nextInt(random, 1, segmentInfos.size()-upto); + int max = Math.min(10, segmentInfos.size()-upto); + int inc = max <= 2 ? max : _TestUtil.nextInt(random, 2, max); mergeSpec.add(new OneMerge(segmentInfos2.range(upto, upto+inc))); upto += inc; } @@ -88,6 +89,7 @@ public class MockRandomMergePolicy extends MergePolicy { @Override public boolean useCompoundFile(SegmentInfos infos, SegmentInfo mergedInfo) throws IOException { - return random.nextBoolean(); + // 80% of the time we create CFS: + return random.nextInt(5) != 1; } } From eee47902b7fb0f16e059c0c1c696615675f4cc2e Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Wed, 2 Feb 2011 16:22:51 +0000 Subject: [PATCH 078/185] LUCENE-2831: remove unnecessary casts git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1066515 13f79535-47bb-0310-9956-ffa450edef68 --- solr/src/java/org/apache/solr/search/DocSet.java | 5 ++--- solr/src/java/org/apache/solr/search/SortedIntDocSet.java | 3 +-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/solr/src/java/org/apache/solr/search/DocSet.java b/solr/src/java/org/apache/solr/search/DocSet.java index d188389ecd5..ddfc3cde67f 100644 --- a/solr/src/java/org/apache/solr/search/DocSet.java +++ b/solr/src/java/org/apache/solr/search/DocSet.java @@ -248,9 +248,8 @@ abstract class DocSetBase implements DocSet { return new Filter() { @Override - public DocIdSet getDocIdSet(AtomicReaderContext ctx) throws IOException { - IndexReader.AtomicReaderContext context = (IndexReader.AtomicReaderContext)ctx; // TODO: remove after lucene migration - IndexReader reader = ctx.reader; + public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException { + IndexReader reader = context.reader; if (context.isTopLevel) { return bs; diff --git a/solr/src/java/org/apache/solr/search/SortedIntDocSet.java b/solr/src/java/org/apache/solr/search/SortedIntDocSet.java index 295a794bde9..ee3b9b47160 100755 --- a/solr/src/java/org/apache/solr/search/SortedIntDocSet.java +++ b/solr/src/java/org/apache/solr/search/SortedIntDocSet.java @@ -552,8 +552,7 @@ public class SortedIntDocSet extends DocSetBase { int lastEndIdx = 0; @Override - public DocIdSet getDocIdSet(AtomicReaderContext contextX) throws IOException { - AtomicReaderContext context = (AtomicReaderContext)contextX; // TODO: remove after lucene migration + public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException { IndexReader reader = context.reader; final int base = context.docBase; From 2635ac0cf7a726d36ff6d7362c12970bb99b3c0c Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 2 Feb 2011 22:34:15 +0000 Subject: [PATCH 079/185] LUCENE-2831: Use leaf reader slices for parallel execution instead of SubSearcher instances. git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1066669 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/lucene/search/IndexSearcher.java | 198 ++++++++++-------- .../search/function/QueryValueSource.java | 4 +- 2 files changed, 115 insertions(+), 87 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/src/java/org/apache/lucene/search/IndexSearcher.java index 6e885c003ff..23736d0726c 100644 --- a/lucene/src/java/org/apache/lucene/search/IndexSearcher.java +++ b/lucene/src/java/org/apache/lucene/search/IndexSearcher.java @@ -65,10 +65,11 @@ public class IndexSearcher { // in the next release protected final ReaderContext readerContext; protected final AtomicReaderContext[] leafContexts; + // used with executor - each slice holds a set of leafs executed within one thread + protected final LeafSlice[] leafSlices; // These are only used for multi-threaded search private final ExecutorService executor; - protected final IndexSearcher[] subSearchers; // the default SimilarityProvider private static final SimilarityProvider defaultProvider = new DefaultSimilarity(); @@ -175,47 +176,22 @@ public class IndexSearcher { this.closeReader = closeReader; this.readerContext = context; leafContexts = ReaderUtil.leaves(context); - - if (executor == null) { - subSearchers = null; - } else { - subSearchers = new IndexSearcher[this.leafContexts.length]; - for (int i = 0; i < subSearchers.length; i++) { - if (leafContexts[i].reader == context.reader) { - subSearchers[i] = this; - } else { - subSearchers[i] = new IndexSearcher(context, leafContexts[i]); - } - } - } - } - - /** - * Expert: Creates a searcher from a top-level {@link ReaderContext} with and - * executes searches on the given leave slice exclusively instead of searching - * over all leaves. This constructor should be used to run one or more leaves - * within a single thread. Hence, for scorer and filter this looks like an - * ordinary search in the hierarchy such that there is no difference between - * single and multi-threaded. - * - * @lucene.experimental - * */ - public IndexSearcher(ReaderContext topLevel, AtomicReaderContext... leaves) { - assert assertLeaves(topLevel, leaves); - readerContext = topLevel; - reader = topLevel.reader; - leafContexts = leaves; - executor = null; - subSearchers = null; - closeReader = false; + this.leafSlices = executor == null ? null : slices(leafContexts); } - private boolean assertLeaves(ReaderContext topLevel, AtomicReaderContext... leaves) { - for (AtomicReaderContext leaf : leaves) { - assert ReaderUtil.getTopLevelContext(leaf) == topLevel : "leaf context is not a leaf of the given top-level context"; + /** + * Expert: Creates an array of leaf slices each holding a subset of the given leaves. + * Each {@link LeafSlice} is executed in a single thread. By default there + * will be one {@link LeafSlice} per leaf ({@link AtomicReaderContext}). + */ + protected LeafSlice[] slices(AtomicReaderContext...leaves) { + LeafSlice[] slices = new LeafSlice[leaves.length]; + for (int i = 0; i < slices.length; i++) { + slices[i] = new LeafSlice(leaves[i]); } - return true; + return slices; } + /** Return the {@link IndexReader} this searches. */ public IndexReader getIndexReader() { @@ -236,11 +212,11 @@ public class IndexSearcher { return reader.docFreq(term); } else { final ExecutionHelper runner = new ExecutionHelper(executor); - for(int i = 0; i < subSearchers.length; i++) { - final IndexSearcher searchable = subSearchers[i]; + for(int i = 0; i < leafContexts.length; i++) { + final IndexReader leaf = leafContexts[i].reader; runner.submit(new Callable() { public Integer call() throws IOException { - return Integer.valueOf(searchable.docFreq(term)); + return Integer.valueOf(leaf.docFreq(term)); } }); } @@ -324,7 +300,7 @@ public class IndexSearcher { */ public void search(Query query, Filter filter, Collector results) throws IOException { - search(createWeight(query), filter, results); + search(leafContexts, createWeight(query), filter, results); } /** Lower-level search API. @@ -342,7 +318,7 @@ public class IndexSearcher { */ public void search(Query query, Collector results) throws IOException { - search(createWeight(query), null, results); + search(leafContexts, createWeight(query), null, results); } /** Search implementation with arbitrary sorting. Finds @@ -382,25 +358,16 @@ public class IndexSearcher { * @throws BooleanQuery.TooManyClauses */ protected TopDocs search(Weight weight, Filter filter, int nDocs) throws IOException { - if (executor == null) { - // single thread - int limit = reader.maxDoc(); - if (limit == 0) { - limit = 1; - } - nDocs = Math.min(nDocs, limit); - TopScoreDocCollector collector = TopScoreDocCollector.create(nDocs, !weight.scoresDocsOutOfOrder()); - search(weight, filter, collector); - return collector.topDocs(); + return search(leafContexts, weight, filter, nDocs); } else { final HitQueue hq = new HitQueue(nDocs, false); final Lock lock = new ReentrantLock(); final ExecutionHelper runner = new ExecutionHelper(executor); - for (int i = 0; i < subSearchers.length; i++) { // search each sub + for (int i = 0; i < leafSlices.length; i++) { // search each sub runner.submit( - new SearcherCallableNoSort(lock, subSearchers[i], weight, filter, nDocs, hq)); + new SearcherCallableNoSort(lock, this, leafSlices[i], weight, filter, nDocs, hq)); } int totalHits = 0; @@ -418,6 +385,25 @@ public class IndexSearcher { } } + /** Expert: Low-level search implementation. Finds the top n + * hits for query, using the given leaf readers applying filter if non-null. + * + *

    Applications should usually call {@link IndexSearcher#search(Query,int)} or + * {@link IndexSearcher#search(Query,Filter,int)} instead. + * @throws BooleanQuery.TooManyClauses + */ + protected TopDocs search(AtomicReaderContext[] leaves, Weight weight, Filter filter, int nDocs) throws IOException { + // single thread + int limit = reader.maxDoc(); + if (limit == 0) { + limit = 1; + } + nDocs = Math.min(nDocs, limit); + TopScoreDocCollector collector = TopScoreDocCollector.create(nDocs, !weight.scoresDocsOutOfOrder()); + search(leaves, weight, filter, collector); + return collector.topDocs(); + } + /** Expert: Low-level search implementation with arbitrary sorting. Finds * the top n hits for query, applying * filter if non-null, and sorting the hits by the criteria in @@ -449,27 +435,18 @@ public class IndexSearcher { throws IOException { if (sort == null) throw new NullPointerException(); - + if (executor == null) { - // single thread - int limit = reader.maxDoc(); - if (limit == 0) { - limit = 1; - } - nDocs = Math.min(nDocs, limit); - - TopFieldCollector collector = TopFieldCollector.create(sort, nDocs, - fillFields, fieldSortDoTrackScores, fieldSortDoMaxScore, !weight.scoresDocsOutOfOrder()); - search(weight, filter, collector); - return (TopFieldDocs) collector.topDocs(); + // use all leaves here! + return search (leafContexts, weight, filter, nDocs, sort, fillFields); } else { // TODO: make this respect fillFields final FieldDocSortedHitQueue hq = new FieldDocSortedHitQueue(nDocs); final Lock lock = new ReentrantLock(); final ExecutionHelper runner = new ExecutionHelper(executor); - for (int i = 0; i < subSearchers.length; i++) { // search each sub + for (int i = 0; i < leafSlices.length; i++) { // search each leaf slice runner.submit( - new SearcherCallableWithSort(lock, subSearchers[i], weight, filter, nDocs, hq, sort)); + new SearcherCallableWithSort(lock, this, leafSlices[i], weight, filter, nDocs, hq, sort)); } int totalHits = 0; float maxScore = Float.NEGATIVE_INFINITY; @@ -484,6 +461,33 @@ public class IndexSearcher { return new TopFieldDocs(totalHits, scoreDocs, hq.getFields(), maxScore); } } + + + /** + * Just like {@link #search(Weight, Filter, int, Sort)}, but you choose + * whether or not the fields in the returned {@link FieldDoc} instances should + * be set by specifying fillFields. + * + *

    NOTE: this does not compute scores by default. If you + * need scores, create a {@link TopFieldCollector} + * instance by calling {@link TopFieldCollector#create} and + * then pass that to {@link #search(Weight, Filter, + * Collector)}.

    + */ + protected TopFieldDocs search(AtomicReaderContext[] leaves, Weight weight, Filter filter, int nDocs, + Sort sort, boolean fillFields) throws IOException { + // single thread + int limit = reader.maxDoc(); + if (limit == 0) { + limit = 1; + } + nDocs = Math.min(nDocs, limit); + + TopFieldCollector collector = TopFieldCollector.create(sort, nDocs, + fillFields, fieldSortDoTrackScores, fieldSortDoMaxScore, !weight.scoresDocsOutOfOrder()); + search(leaves, weight, filter, collector); + return (TopFieldDocs) collector.topDocs(); + } /** * Lower-level search API. @@ -497,6 +501,12 @@ public class IndexSearcher { * documents. The high-level search API ({@link IndexSearcher#search(Query,int)}) is * usually more efficient, as it skips non-high-scoring hits. * + *

    + * NOTE: this method executes the searches on all given leaves exclusively. + * To search across all the searchers leaves use {@link #leafContexts}. + * + * @param leaves + * the searchers leaves to execute the searches on * @param weight * to match documents * @param filter @@ -505,7 +515,7 @@ public class IndexSearcher { * to receive hits * @throws BooleanQuery.TooManyClauses */ - protected void search(Weight weight, Filter filter, Collector collector) + protected void search(AtomicReaderContext[] leaves, Weight weight, Filter filter, Collector collector) throws IOException { // TODO: should we make this @@ -513,18 +523,18 @@ public class IndexSearcher { ScorerContext scorerContext = ScorerContext.def().scoreDocsInOrder(true).topScorer(true); // always use single thread: if (filter == null) { - for (int i = 0; i < leafContexts.length; i++) { // search each subreader - collector.setNextReader(leafContexts[i]); + for (int i = 0; i < leaves.length; i++) { // search each subreader + collector.setNextReader(leaves[i]); scorerContext = scorerContext.scoreDocsInOrder(!collector.acceptsDocsOutOfOrder()); - Scorer scorer = weight.scorer(leafContexts[i], scorerContext); + Scorer scorer = weight.scorer(leaves[i], scorerContext); if (scorer != null) { scorer.score(collector); } } } else { - for (int i = 0; i < leafContexts.length; i++) { // search each subreader - collector.setNextReader(leafContexts[i]); - searchWithFilter(leafContexts[i], weight, filter, collector); + for (int i = 0; i < leaves.length; i++) { // search each subreader + collector.setNextReader(leaves[i]); + searchWithFilter(leaves[i], weight, filter, collector); } } } @@ -649,7 +659,7 @@ public class IndexSearcher { * Returns this searchers the top-level {@link ReaderContext}. * @see IndexReader#getTopReaderContext() */ - /* Sugar for .getIndexReader().getTopReaderContext() */ + /* sugar for #getReader().getTopReaderContext() */ public ReaderContext getTopReaderContext() { return readerContext; } @@ -660,24 +670,26 @@ public class IndexSearcher { private static final class SearcherCallableNoSort implements Callable { private final Lock lock; - private final IndexSearcher searchable; + private final IndexSearcher searcher; private final Weight weight; private final Filter filter; private final int nDocs; private final HitQueue hq; + private final LeafSlice slice; - public SearcherCallableNoSort(Lock lock, IndexSearcher searchable, Weight weight, + public SearcherCallableNoSort(Lock lock, IndexSearcher searcher, LeafSlice slice, Weight weight, Filter filter, int nDocs, HitQueue hq) { this.lock = lock; - this.searchable = searchable; + this.searcher = searcher; this.weight = weight; this.filter = filter; this.nDocs = nDocs; this.hq = hq; + this.slice = slice; } public TopDocs call() throws IOException { - final TopDocs docs = searchable.search (weight, filter, nDocs); + final TopDocs docs = searcher.search (slice.leaves, weight, filter, nDocs); final ScoreDoc[] scoreDocs = docs.scoreDocs; for (int j = 0; j < scoreDocs.length; j++) { // merge scoreDocs into hq final ScoreDoc scoreDoc = scoreDocs[j]; @@ -701,26 +713,28 @@ public class IndexSearcher { private static final class SearcherCallableWithSort implements Callable { private final Lock lock; - private final IndexSearcher searchable; + private final IndexSearcher searcher; private final Weight weight; private final Filter filter; private final int nDocs; private final FieldDocSortedHitQueue hq; private final Sort sort; + private final LeafSlice slice; - public SearcherCallableWithSort(Lock lock, IndexSearcher searchable, Weight weight, + public SearcherCallableWithSort(Lock lock, IndexSearcher searcher, LeafSlice slice, Weight weight, Filter filter, int nDocs, FieldDocSortedHitQueue hq, Sort sort) { this.lock = lock; - this.searchable = searchable; + this.searcher = searcher; this.weight = weight; this.filter = filter; this.nDocs = nDocs; this.hq = hq; this.sort = sort; + this.slice = slice; } public TopFieldDocs call() throws IOException { - final TopFieldDocs docs = searchable.search (weight, filter, nDocs, sort); + final TopFieldDocs docs = searcher.search (slice.leaves, weight, filter, nDocs, sort, true); lock.lock(); try { hq.setFields(docs.fields); @@ -791,4 +805,18 @@ public class IndexSearcher { return this; } } + + /** + * A class holding a subset of the {@link IndexSearcher}s leaf contexts to be + * executed within a single thread. + * + * @lucene.experimental + */ + public static class LeafSlice { + final AtomicReaderContext[] leaves; + + public LeafSlice(AtomicReaderContext...leaves) { + this.leaves = leaves; + } + } } diff --git a/solr/src/java/org/apache/solr/search/function/QueryValueSource.java b/solr/src/java/org/apache/solr/search/function/QueryValueSource.java index b7d13efb351..ebf40a15dfb 100755 --- a/solr/src/java/org/apache/solr/search/function/QueryValueSource.java +++ b/solr/src/java/org/apache/solr/search/function/QueryValueSource.java @@ -100,11 +100,11 @@ class QueryDocValues extends DocValues { if (w == null) { IndexSearcher weightSearcher; if(fcontext == null) { - weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext), readerContext); + weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext)); } else { weightSearcher = (IndexSearcher)fcontext.get("searcher"); if (weightSearcher == null) { - weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext), readerContext); + weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext)); } } w = q.weight(weightSearcher); From dde8fc7020a4ad1303046b96363279ff3fa0448d Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Wed, 2 Feb 2011 23:27:25 +0000 Subject: [PATCH 080/185] LUCENE-2751: add LuceneTestCase.newSearcher. use this to get an indexsearcher that randomly uses threads, etc git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1066691 13f79535-47bb-0310-9956-ffa450edef68 --- .../highlight/HighlighterPhraseTest.java | 10 ++-- .../search/highlight/TokenSourcesTest.java | 4 +- .../store/instantiated/TestEmptyIndex.java | 2 +- .../store/instantiated/TestRealTime.java | 2 +- .../lucene/search/ChainedFilterTest.java | 4 +- .../lucene/search/DuplicateFilterTest.java | 2 +- .../lucene/search/FuzzyLikeThisQueryTest.java | 2 +- .../lucene/search/regex/TestRegexQuery.java | 2 +- .../search/similar/TestMoreLikeThis.java | 2 +- .../queryParser/standard/TestQPHelper.java | 3 +- .../apache/lucene/xmlparser/TestParser.java | 2 +- .../org/apache/lucene/TestExternalCodecs.java | 4 +- .../apache/lucene/document/TestDocument.java | 4 +- .../index/TestBackwardsCompatibility.java | 2 +- .../org/apache/lucene/index/TestCodecs.java | 2 +- .../lucene/index/TestDeletionPolicy.java | 8 +-- .../apache/lucene/index/TestIndexReader.java | 5 +- .../lucene/index/TestIndexReaderReopen.java | 4 +- .../apache/lucene/index/TestIndexWriter.java | 3 +- .../lucene/index/TestIndexWriterDelete.java | 2 +- .../index/TestIndexWriterOnDiskFull.java | 4 +- .../lucene/index/TestIndexWriterReader.java | 25 +++++++--- .../lucene/index/TestLazyProxSkipping.java | 4 +- .../apache/lucene/index/TestNRTThreads.java | 9 ++-- .../lucene/index/TestParallelReader.java | 4 +- .../index/TestPerFieldCodecSupport.java | 2 +- .../lucene/queryParser/TestQueryParser.java | 3 +- .../org/apache/lucene/search/CheckHits.java | 6 ++- .../org/apache/lucene/search/QueryUtils.java | 25 +++++++--- .../lucene/search/TestAutomatonQuery.java | 2 +- .../search/TestAutomatonQueryUnicode.java | 2 +- .../apache/lucene/search/TestBoolean2.java | 2 +- .../search/TestBooleanMinShouldMatch.java | 2 +- .../apache/lucene/search/TestBooleanOr.java | 2 +- .../lucene/search/TestBooleanQuery.java | 3 +- .../lucene/search/TestBooleanScorer.java | 5 +- .../lucene/search/TestCachingSpanFilter.java | 18 ++++--- .../search/TestCachingWrapperFilter.java | 24 ++++++--- .../search/TestComplexExplanations.java | 6 +++ .../lucene/search/TestConstantScoreQuery.java | 2 +- .../apache/lucene/search/TestDateFilter.java | 6 ++- .../apache/lucene/search/TestDateSort.java | 3 +- .../search/TestDisjunctionMaxQuery.java | 2 +- .../apache/lucene/search/TestDocBoost.java | 2 +- .../apache/lucene/search/TestDocIdSet.java | 2 +- .../search/TestElevationComparator.java | 2 +- .../lucene/search/TestExplanations.java | 2 +- .../search/TestFieldCacheRangeFilter.java | 25 ++++++---- .../search/TestFieldCacheTermsFilter.java | 3 +- .../lucene/search/TestFilteredQuery.java | 2 +- .../apache/lucene/search/TestFuzzyQuery.java | 14 +++--- .../apache/lucene/search/TestFuzzyQuery2.java | 2 +- .../lucene/search/TestMatchAllDocsQuery.java | 4 +- .../lucene/search/TestMultiPhraseQuery.java | 13 ++--- .../search/TestMultiTermConstantScore.java | 28 +++++++---- .../search/TestMultiTermQueryRewrites.java | 6 +-- .../TestMultiValuedNumericRangeQuery.java | 2 +- .../org/apache/lucene/search/TestNot.java | 2 +- .../search/TestNumericRangeQuery32.java | 2 +- .../search/TestNumericRangeQuery64.java | 2 +- .../lucene/search/TestPhrasePrefixQuery.java | 2 +- .../apache/lucene/search/TestPhraseQuery.java | 12 ++--- .../lucene/search/TestPositionIncrement.java | 4 +- .../TestPositiveScoresOnlyCollector.java | 2 +- .../lucene/search/TestPrefixFilter.java | 2 +- .../search/TestPrefixInBooleanQuery.java | 2 +- .../apache/lucene/search/TestPrefixQuery.java | 2 +- .../lucene/search/TestPrefixRandom.java | 2 +- .../lucene/search/TestQueryWrapperFilter.java | 2 +- .../apache/lucene/search/TestRegexpQuery.java | 2 +- .../lucene/search/TestRegexpRandom.java | 2 +- .../lucene/search/TestRegexpRandom2.java | 2 +- .../TestScoreCachingWrappingScorer.java | 2 +- .../lucene/search/TestSearchWithThreads.java | 2 +- .../apache/lucene/search/TestSimilarity.java | 2 +- .../lucene/search/TestSimilarityProvider.java | 2 +- .../lucene/search/TestSloppyPhraseQuery.java | 2 +- .../org/apache/lucene/search/TestSort.java | 8 +-- .../lucene/search/TestSubScorerFreqs.java | 2 +- .../lucene/search/TestTermRangeFilter.java | 18 ++++--- .../apache/lucene/search/TestTermScorer.java | 2 +- .../apache/lucene/search/TestTermVectors.java | 8 +-- .../search/TestTimeLimitingCollector.java | 2 +- .../lucene/search/TestTopDocsCollector.java | 2 +- .../search/TestTopScoreDocCollector.java | 2 +- .../lucene/search/TestWildcardRandom.java | 2 +- .../lucene/search/payloads/PayloadHelper.java | 3 +- .../search/payloads/TestPayloadNearQuery.java | 2 +- .../search/payloads/TestPayloadTermQuery.java | 2 +- .../lucene/search/spans/TestBasics.java | 2 +- .../spans/TestFieldMaskingSpanQuery.java | 2 +- .../search/spans/TestNearSpansOrdered.java | 2 +- .../lucene/search/spans/TestPayloadSpans.java | 17 +++++-- .../search/spans/TestSpanFirstQuery.java | 2 +- .../spans/TestSpanMultiTermQueryWrapper.java | 2 +- .../apache/lucene/search/spans/TestSpans.java | 5 +- .../search/spans/TestSpansAdvanced.java | 2 +- .../search/spans/TestSpansAdvanced2.java | 2 +- .../lucene/store/TestBufferedIndexInput.java | 2 +- .../apache/lucene/store/TestRAMDirectory.java | 2 +- .../apache/lucene/util/LuceneTestCase.java | 49 +++++++++++++++++-- .../query/QueryAutoStopWordAnalyzerTest.java | 5 +- .../lucene/collation/CollationTestBase.java | 2 +- 103 files changed, 335 insertions(+), 203 deletions(-) diff --git a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java index 1f60e6ea5c5..755d9f5d4ec 100644 --- a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java +++ b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java @@ -70,7 +70,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final IndexReader indexReader = IndexReader.open(directory, true); try { assertEquals(1, indexReader.numDocs()); - final IndexSearcher indexSearcher = new IndexSearcher(indexReader); + final IndexSearcher indexSearcher = newSearcher(indexReader); try { final PhraseQuery phraseQuery = new PhraseQuery(); phraseQuery.add(new Term(FIELD, "fox")); @@ -114,7 +114,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final IndexReader indexReader = IndexReader.open(directory, true); try { assertEquals(1, indexReader.numDocs()); - final IndexSearcher indexSearcher = new IndexSearcher(indexReader); + final IndexSearcher indexSearcher = newSearcher(indexReader); try { final Query phraseQuery = new SpanNearQuery(new SpanQuery[] { new SpanTermQuery(new Term(FIELD, "fox")), @@ -184,7 +184,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final IndexReader indexReader = IndexReader.open(directory, true); try { assertEquals(1, indexReader.numDocs()); - final IndexSearcher indexSearcher = new IndexSearcher(indexReader); + final IndexSearcher indexSearcher = newSearcher(indexReader); try { final PhraseQuery phraseQuery = new PhraseQuery(); phraseQuery.add(new Term(FIELD, "did")); @@ -227,7 +227,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final IndexReader indexReader = IndexReader.open(directory, true); try { assertEquals(1, indexReader.numDocs()); - final IndexSearcher indexSearcher = new IndexSearcher(indexReader); + final IndexSearcher indexSearcher = newSearcher(indexReader); try { final PhraseQuery phraseQuery = new PhraseQuery(); phraseQuery.add(new Term(FIELD, "did")); @@ -268,7 +268,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final IndexReader indexReader = IndexReader.open(directory, true); try { assertEquals(1, indexReader.numDocs()); - final IndexSearcher indexSearcher = new IndexSearcher(indexReader); + final IndexSearcher indexSearcher = newSearcher(indexReader); try { final Query phraseQuery = new SpanNearQuery(new SpanQuery[] { new SpanTermQuery(new Term(FIELD, "did")), diff --git a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java index 3fcaa10b535..572aa219b78 100644 --- a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java +++ b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java @@ -113,7 +113,7 @@ public class TokenSourcesTest extends LuceneTestCase { final IndexReader indexReader = IndexReader.open(directory, true); try { assertEquals(1, indexReader.numDocs()); - final IndexSearcher indexSearcher = new IndexSearcher(indexReader); + final IndexSearcher indexSearcher = newSearcher(indexReader); try { final DisjunctionMaxQuery query = new DisjunctionMaxQuery(1); query.add(new SpanTermQuery(new Term(FIELD, "{fox}"))); @@ -159,7 +159,7 @@ public class TokenSourcesTest extends LuceneTestCase { final IndexReader indexReader = IndexReader.open(directory, true); try { assertEquals(1, indexReader.numDocs()); - final IndexSearcher indexSearcher = new IndexSearcher(indexReader); + final IndexSearcher indexSearcher = newSearcher(indexReader); try { final DisjunctionMaxQuery query = new DisjunctionMaxQuery(1); query.add(new SpanTermQuery(new Term(FIELD, "{fox}"))); diff --git a/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java b/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java index 1eb03fdaa5a..f513a0bb423 100644 --- a/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java +++ b/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java @@ -37,7 +37,7 @@ public class TestEmptyIndex extends LuceneTestCase { InstantiatedIndex ii = new InstantiatedIndex(); IndexReader r = new InstantiatedIndexReader(ii); - IndexSearcher s = new IndexSearcher(r); + IndexSearcher s = newSearcher(r); TopDocs td = s.search(new TermQuery(new Term("foo", "bar")), 1); diff --git a/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestRealTime.java b/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestRealTime.java index 383cd807caf..413d7f56fae 100644 --- a/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestRealTime.java +++ b/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestRealTime.java @@ -36,7 +36,7 @@ public class TestRealTime extends LuceneTestCase { InstantiatedIndex index = new InstantiatedIndex(); InstantiatedIndexReader reader = new InstantiatedIndexReader(index); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); InstantiatedIndexWriter writer = new InstantiatedIndexWriter(index); Document doc; diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java index b2b16f8db96..1494dfeb0e2 100644 --- a/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java +++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java @@ -72,7 +72,7 @@ public class ChainedFilterTest extends LuceneTestCase { reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); // query for everything to make life easier BooleanQuery bq = new BooleanQuery(); @@ -194,7 +194,7 @@ public class ChainedFilterTest extends LuceneTestCase { IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); Query query = new TermQuery(new Term("none", "none")); diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java index 2a3df020714..29c7f0f2e37 100644 --- a/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java +++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java @@ -61,7 +61,7 @@ public class DuplicateFilterTest extends LuceneTestCase { reader = writer.getReader(); writer.close(); - searcher =new IndexSearcher(reader); + searcher =newSearcher(reader); } diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java index 587a5710b9a..0f9b6ca7712 100644 --- a/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java +++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java @@ -51,7 +51,7 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase { addDoc(writer, "johnathon smythe","6"); reader = writer.getReader(); writer.close(); - searcher=new IndexSearcher(reader); + searcher=newSearcher(reader); } @Override diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java index 8fa1ba82469..1e7d1559d69 100644 --- a/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java +++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java @@ -51,7 +51,7 @@ public class TestRegexQuery extends LuceneTestCase { writer.addDocument(doc); reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); } @Override diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java index 09a45246be3..6de5e91ddc5 100644 --- a/lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java +++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java @@ -53,7 +53,7 @@ public class TestMoreLikeThis extends LuceneTestCase { reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); } @Override diff --git a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java index ff8a4678c67..2c8b4b61ccf 100644 --- a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java +++ b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java @@ -1278,11 +1278,12 @@ public class TestQPHelper extends LuceneTestCase { doc.add(newField("field", "", Field.Store.NO, Field.Index.ANALYZED)); w.addDocument(doc); IndexReader r = IndexReader.open(w, true); - IndexSearcher s = new IndexSearcher(r); + IndexSearcher s = newSearcher(r); Query q = new StandardQueryParser(new CannedAnalyzer()).parse("\"a\"", "field"); assertTrue(q instanceof MultiPhraseQuery); assertEquals(1, s.search(q, 10).totalHits); + s.close(); r.close(); w.close(); dir.close(); diff --git a/lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java b/lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java index 9adc36300e6..6122b8aab6a 100644 --- a/lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java +++ b/lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java @@ -73,7 +73,7 @@ public class TestParser extends LuceneTestCase { d.close(); writer.close(); reader=IndexReader.open(dir, true); - searcher=new IndexSearcher(reader); + searcher=newSearcher(reader); } diff --git a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java index c38f14e7b3e..7cf57a96393 100644 --- a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java +++ b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java @@ -671,7 +671,7 @@ public class TestExternalCodecs extends LuceneTestCase { testTermsOrder(r); assertEquals(NUM_DOCS-1, r.numDocs()); - IndexSearcher s = new IndexSearcher(r); + IndexSearcher s = newSearcher(r); assertEquals(NUM_DOCS-1, s.search(new TermQuery(new Term("field1", "standard")), 1).totalHits); assertEquals(NUM_DOCS-1, s.search(new TermQuery(new Term("field2", "pulsing")), 1).totalHits); r.close(); @@ -682,7 +682,7 @@ public class TestExternalCodecs extends LuceneTestCase { r = IndexReader.open(w, true); assertEquals(NUM_DOCS-2, r.maxDoc()); assertEquals(NUM_DOCS-2, r.numDocs()); - s = new IndexSearcher(r); + s = newSearcher(r); assertEquals(NUM_DOCS-2, s.search(new TermQuery(new Term("field1", "standard")), 1).totalHits); assertEquals(NUM_DOCS-2, s.search(new TermQuery(new Term("field2", "pulsing")), 1).totalHits); assertEquals(1, s.search(new TermQuery(new Term("id", "76")), 1).totalHits); diff --git a/lucene/src/test/org/apache/lucene/document/TestDocument.java b/lucene/src/test/org/apache/lucene/document/TestDocument.java index 51a71bf51dc..c505df68c94 100644 --- a/lucene/src/test/org/apache/lucene/document/TestDocument.java +++ b/lucene/src/test/org/apache/lucene/document/TestDocument.java @@ -156,7 +156,7 @@ public class TestDocument extends LuceneTestCase { writer.addDocument(makeDocumentWithFields()); IndexReader reader = writer.getReader(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); // search for something that does exists Query query = new TermQuery(new Term("keyword", "test1")); @@ -238,7 +238,7 @@ public class TestDocument extends LuceneTestCase { writer.addDocument(doc); IndexReader reader = writer.getReader(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); Query query = new TermQuery(new Term("keyword", "test")); diff --git a/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java index 83bbc0b69be..729c64ff531 100644 --- a/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java +++ b/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java @@ -409,7 +409,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase { // make sure we can do delete & setNorm against this segment: IndexReader reader = IndexReader.open(dir, false); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); Term searchTerm = new Term("id", "6"); int delCount = reader.deleteDocuments(searchTerm); assertEquals("wrong delete count", 1, delCount); diff --git a/lucene/src/test/org/apache/lucene/index/TestCodecs.java b/lucene/src/test/org/apache/lucene/index/TestCodecs.java index c25796606c4..8bd12c906aa 100644 --- a/lucene/src/test/org/apache/lucene/index/TestCodecs.java +++ b/lucene/src/test/org/apache/lucene/index/TestCodecs.java @@ -362,7 +362,7 @@ public class TestCodecs extends LuceneTestCase { private ScoreDoc[] search(final IndexWriter writer, final Query q, final int n) throws IOException { final IndexReader reader = writer.getReader(); - final IndexSearcher searcher = new IndexSearcher(reader); + final IndexSearcher searcher = newSearcher(reader); try { return searcher.search(q, null, n).scoreDocs; } diff --git a/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java b/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java index 6d90baa7ff5..cfe6ecb423f 100644 --- a/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java +++ b/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java @@ -652,7 +652,7 @@ public class TestDeletionPolicy extends LuceneTestCase { IndexReader reader = IndexReader.open(dir, policy, false); reader.deleteDocument(3*i+1); reader.setNorm(4*i+1, "content", conf.getSimilarityProvider().get("content").encodeNormValue(2.0F)); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(16*(1+i), hits.length); // this is a commit @@ -696,7 +696,7 @@ public class TestDeletionPolicy extends LuceneTestCase { // Work backwards in commits on what the expected // count should be. - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); hits = searcher.search(query, null, 1000).scoreDocs; if (i > 1) { if (i % 2 == 0) { @@ -772,7 +772,7 @@ public class TestDeletionPolicy extends LuceneTestCase { IndexReader reader = IndexReader.open(dir, policy, false); reader.deleteDocument(3); reader.setNorm(5, "content", conf.getSimilarityProvider().get("content").encodeNormValue(2.0F)); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(16, hits.length); // this is a commit @@ -807,7 +807,7 @@ public class TestDeletionPolicy extends LuceneTestCase { // Work backwards in commits on what the expected // count should be. - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(expectedCount, hits.length); searcher.close(); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReader.java b/lucene/src/test/org/apache/lucene/index/TestIndexReader.java index 3630324c893..7b3399cfe6f 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexReader.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexReader.java @@ -900,7 +900,7 @@ public class TestIndexReader extends LuceneTestCase { IndexReader r = IndexReader.open(startDir); - IndexSearcher searcher = new IndexSearcher(r); + IndexSearcher searcher = newSearcher(r); ScoreDoc[] hits = null; try { hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; @@ -908,6 +908,7 @@ public class TestIndexReader extends LuceneTestCase e.printStackTrace(); fail("exception when init searching: " + e); } + searcher.close(); r.close(); } @@ -1023,7 +1024,7 @@ public class TestIndexReader extends LuceneTestCase } */ - IndexSearcher searcher = new IndexSearcher(newReader); + IndexSearcher searcher = newSearcher(newReader); ScoreDoc[] hits = null; try { hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java b/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java index 286806f6ca8..7e2e9e0c940 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java @@ -773,14 +773,14 @@ public class TestIndexReaderReopen extends LuceneTestCase { // not synchronized IndexReader refreshed = r.reopen(); - IndexSearcher searcher = new IndexSearcher(refreshed); + IndexSearcher searcher = newSearcher(refreshed); ScoreDoc[] hits = searcher.search( new TermQuery(new Term("field1", "a" + rnd.nextInt(refreshed.maxDoc()))), null, 1000).scoreDocs; if (hits.length > 0) { searcher.doc(hits[0].doc); } - + searcher.close(); if (refreshed != r) { refreshed.close(); } diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java index a74ac7081dd..c668c6a809f 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java @@ -2817,7 +2817,7 @@ public class TestIndexWriter extends LuceneTestCase { for(int x=0;x<2;x++) { IndexReader r = w.getReader(); - IndexSearcher s = new IndexSearcher(r); + IndexSearcher s = newSearcher(r); if (VERBOSE) { System.out.println("TEST: cycle x=" + x + " r=" + r); @@ -2833,6 +2833,7 @@ public class TestIndexWriter extends LuceneTestCase { assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i)); } } + s.close(); r.close(); w.optimize(); } diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java index 731c5a3e21f..f0b3fd6d8b0 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java @@ -565,7 +565,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { + e); } - IndexSearcher searcher = new IndexSearcher(newReader); + IndexSearcher searcher = newSearcher(newReader); ScoreDoc[] hits = null; try { hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java index 27f29a49a8e..5fc03471ecf 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java @@ -177,7 +177,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase { IndexReader reader = IndexReader.open(startDir, true); assertEquals("first docFreq", 57, reader.docFreq(searchTerm)); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; assertEquals("first number of hits", 57, hits.length); searcher.close(); @@ -360,7 +360,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase { } } - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); try { hits = searcher.search(new TermQuery(searchTerm), null, END_COUNT).scoreDocs; } catch (IOException e) { diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java index eeef97b83db..08c62ca51d1 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java @@ -718,8 +718,9 @@ public class TestIndexWriterReader extends LuceneTestCase { // reader should remain usable even after IndexWriter is closed: assertEquals(100, r.numDocs()); Query q = new TermQuery(new Term("indexname", "test")); - assertEquals(100, new IndexSearcher(r).search(q, 10).totalHits); - + IndexSearcher searcher = newSearcher(r); + assertEquals(100, searcher.search(q, 10).totalHits); + searcher.close(); try { r.reopen(); fail("failed to hit AlreadyClosedException"); @@ -785,7 +786,9 @@ public class TestIndexWriterReader extends LuceneTestCase { r = r2; } Query q = new TermQuery(new Term("indexname", "test")); - final int count = new IndexSearcher(r).search(q, 10).totalHits; + IndexSearcher searcher = newSearcher(r); + final int count = searcher.search(q, 10).totalHits; + searcher.close(); assertTrue(count >= lastCount); lastCount = count; } @@ -800,7 +803,9 @@ public class TestIndexWriterReader extends LuceneTestCase { r = r2; } Query q = new TermQuery(new Term("indexname", "test")); - final int count = new IndexSearcher(r).search(q, 10).totalHits; + IndexSearcher searcher = newSearcher(r); + final int count = searcher.search(q, 10).totalHits; + searcher.close(); assertTrue(count >= lastCount); assertEquals(0, excs.size()); @@ -873,7 +878,9 @@ public class TestIndexWriterReader extends LuceneTestCase { r = r2; } Query q = new TermQuery(new Term("indexname", "test")); - sum += new IndexSearcher(r).search(q, 10).totalHits; + IndexSearcher searcher = newSearcher(r); + sum += searcher.search(q, 10).totalHits; + searcher.close(); } for(int i=0;i 0); assertEquals(0, excs.size()); @@ -973,10 +981,11 @@ public class TestIndexWriterReader extends LuceneTestCase { setMergedSegmentWarmer(new IndexWriter.IndexReaderWarmer() { @Override public void warm(IndexReader r) throws IOException { - IndexSearcher s = new IndexSearcher(r); + IndexSearcher s = newSearcher(r); TopDocs hits = s.search(new TermQuery(new Term("foo", "bar")), 10); assertEquals(20, hits.totalHits); didWarm.set(true); + s.close(); } }). setMergePolicy(newLogMergePolicy(10)) diff --git a/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java b/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java index 469302fdb4e..8bd31b4a73c 100755 --- a/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java +++ b/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java @@ -98,7 +98,7 @@ public class TestLazyProxSkipping extends LuceneTestCase { SegmentReader reader = getOnlySegmentReader(IndexReader.open(directory, false)); - this.searcher = new IndexSearcher(reader); + this.searcher = newSearcher(reader); } private ScoreDoc[] search() throws IOException { @@ -126,7 +126,9 @@ public class TestLazyProxSkipping extends LuceneTestCase { // test whether only the minimum amount of seeks() // are performed performTest(5); + searcher.close(); performTest(10); + searcher.close(); } public void testSeek() throws IOException { diff --git a/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java b/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java index 93dcf9a035f..c0c3d3c8332 100644 --- a/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java +++ b/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java @@ -88,7 +88,9 @@ public class TestNRTThreads extends LuceneTestCase { } } - sum += new IndexSearcher(reader).search(new TermQuery(new Term("body", "united")), 10).totalHits; + IndexSearcher searcher = newSearcher(reader); + sum += searcher.search(new TermQuery(new Term("body", "united")), 10).totalHits; + searcher.close(); if (VERBOSE) { System.out.println("TEST: warm visited " + sum + " fields"); @@ -352,7 +354,7 @@ public class TestNRTThreads extends LuceneTestCase { } final IndexReader r2 = writer.getReader(); - final IndexSearcher s = new IndexSearcher(r2); + final IndexSearcher s = newSearcher(r2); boolean doFail = false; for(String id : delIDs) { final TopDocs hits = s.search(new TermQuery(new Term("id", id)), 1); @@ -384,6 +386,7 @@ public class TestNRTThreads extends LuceneTestCase { assertFalse(writer.anyNonBulkMerges); writer.close(false); _TestUtil.checkIndex(dir); + s.close(); dir.close(); _TestUtil.rmDir(tempDir); docs.close(); @@ -398,7 +401,7 @@ public class TestNRTThreads extends LuceneTestCase { } private void smokeTestReader(IndexReader r) throws Exception { - IndexSearcher s = new IndexSearcher(r); + IndexSearcher s = newSearcher(r); runQuery(s, new TermQuery(new Term("body", "united"))); runQuery(s, new TermQuery(new Term("titleTokenized", "states"))); PhraseQuery pq = new PhraseQuery(); diff --git a/lucene/src/test/org/apache/lucene/index/TestParallelReader.java b/lucene/src/test/org/apache/lucene/index/TestParallelReader.java index 7c755389d09..fdee60a05aa 100644 --- a/lucene/src/test/org/apache/lucene/index/TestParallelReader.java +++ b/lucene/src/test/org/apache/lucene/index/TestParallelReader.java @@ -47,7 +47,9 @@ public class TestParallelReader extends LuceneTestCase { @Override public void tearDown() throws Exception { single.getIndexReader().close(); + single.close(); parallel.getIndexReader().close(); + parallel.close(); dir.close(); dir1.close(); dir2.close(); @@ -267,7 +269,7 @@ public class TestParallelReader extends LuceneTestCase { ParallelReader pr = new ParallelReader(); pr.add(IndexReader.open(dir1, false)); pr.add(IndexReader.open(dir2, false)); - return new IndexSearcher(pr); + return newSearcher(pr); } private Directory getDir1(Random random) throws IOException { diff --git a/lucene/src/test/org/apache/lucene/index/TestPerFieldCodecSupport.java b/lucene/src/test/org/apache/lucene/index/TestPerFieldCodecSupport.java index 2929785d289..4ed85a91e87 100644 --- a/lucene/src/test/org/apache/lucene/index/TestPerFieldCodecSupport.java +++ b/lucene/src/test/org/apache/lucene/index/TestPerFieldCodecSupport.java @@ -227,7 +227,7 @@ public class TestPerFieldCodecSupport extends LuceneTestCase { } IndexReader reader = IndexReader.open(dir, null, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, codecs); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); TopDocs search = searcher.search(new TermQuery(t), num + 10); assertEquals(num, search.totalHits); searcher.close(); diff --git a/lucene/src/test/org/apache/lucene/queryParser/TestQueryParser.java b/lucene/src/test/org/apache/lucene/queryParser/TestQueryParser.java index b8834303ab2..4803a58a339 100644 --- a/lucene/src/test/org/apache/lucene/queryParser/TestQueryParser.java +++ b/lucene/src/test/org/apache/lucene/queryParser/TestQueryParser.java @@ -1142,10 +1142,11 @@ public class TestQueryParser extends LuceneTestCase { w.addDocument(doc); IndexReader r = IndexReader.open(w, true); w.close(); - IndexSearcher s = new IndexSearcher(r); + IndexSearcher s = newSearcher(r); QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "f", a); Query q = qp.parse("\"wizard of ozzy\""); assertEquals(1, s.search(q, 1).totalHits); + s.close(); r.close(); dir.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/CheckHits.java b/lucene/src/test/org/apache/lucene/search/CheckHits.java index fbfa0de093e..6846e59deca 100644 --- a/lucene/src/test/org/apache/lucene/search/CheckHits.java +++ b/lucene/src/test/org/apache/lucene/search/CheckHits.java @@ -100,11 +100,13 @@ public class CheckHits { for (int i = -1; i < 2; i++) { actual.clear(); - QueryUtils.wrapUnderlyingReader - (random, searcher, i).search(query, c); + IndexSearcher s = QueryUtils.wrapUnderlyingReader + (random, searcher, i); + s.search(query, c); Assert.assertEquals("Wrap Reader " + i + ": " + query.toString(defaultFieldName), correct, actual); + s.close(); } } diff --git a/lucene/src/test/org/apache/lucene/search/QueryUtils.java b/lucene/src/test/org/apache/lucene/search/QueryUtils.java index 710251c84f4..e84b2f9a8b9 100644 --- a/lucene/src/test/org/apache/lucene/search/QueryUtils.java +++ b/lucene/src/test/org/apache/lucene/search/QueryUtils.java @@ -21,6 +21,7 @@ import org.apache.lucene.search.Weight.ScorerContext; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.ReaderUtil; import static org.apache.lucene.util.LuceneTestCase.TEST_VERSION_CURRENT; @@ -114,9 +115,13 @@ public class QueryUtils { checkFirstSkipTo(q1,s); checkSkipTo(q1,s); if (wrap) { - check(random, q1, wrapUnderlyingReader(random, s, -1), false); - check(random, q1, wrapUnderlyingReader(random, s, 0), false); - check(random, q1, wrapUnderlyingReader(random, s, +1), false); + IndexSearcher wrapped; + check(random, q1, wrapped = wrapUnderlyingReader(random, s, -1), false); + wrapped.close(); + check(random, q1, wrapped = wrapUnderlyingReader(random, s, 0), false); + wrapped.close(); + check(random, q1, wrapped = wrapUnderlyingReader(random, s, +1), false); + wrapped.close(); } checkExplanations(q1,s); checkSerialization(q1,s); @@ -158,7 +163,7 @@ public class QueryUtils { IndexReader.open(makeEmptyIndex(random, 0), true), 0 < edge ? r : IndexReader.open(makeEmptyIndex(random, 0), true)) }; - IndexSearcher out = new IndexSearcher(new MultiReader(readers)); + IndexSearcher out = LuceneTestCase.newSearcher(new MultiReader(readers)); out.setSimilarityProvider(s.getSimilarityProvider()); return out; } @@ -318,7 +323,7 @@ public class QueryUtils { // previous reader, hits NO_MORE_DOCS if (lastReader[0] != null) { final IndexReader previousReader = lastReader[0]; - IndexSearcher indexSearcher = new IndexSearcher(previousReader); + IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader); Weight w = q.weight(indexSearcher); Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def()); if (scorer != null) { @@ -326,6 +331,7 @@ public class QueryUtils { Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more); } leafPtr++; + indexSearcher.close(); } lastReader[0] = context.reader; assert readerContextArray[leafPtr].reader == context.reader; @@ -343,13 +349,14 @@ public class QueryUtils { // confirm that skipping beyond the last doc, on the // previous reader, hits NO_MORE_DOCS final IndexReader previousReader = lastReader[0]; - IndexSearcher indexSearcher = new IndexSearcher(previousReader); + IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader); Weight w = q.weight(indexSearcher); Scorer scorer = w.scorer((AtomicReaderContext)previousReader.getTopReaderContext(), ScorerContext.def()); if (scorer != null) { boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more); } + indexSearcher.close(); } } } @@ -400,13 +407,14 @@ public class QueryUtils { // previous reader, hits NO_MORE_DOCS if (lastReader[0] != null) { final IndexReader previousReader = lastReader[0]; - IndexSearcher indexSearcher = new IndexSearcher(previousReader); + IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader); Weight w = q.weight(indexSearcher); Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def()); if (scorer != null) { boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more); } + indexSearcher.close(); leafPtr++; } @@ -423,13 +431,14 @@ public class QueryUtils { // confirm that skipping beyond the last doc, on the // previous reader, hits NO_MORE_DOCS final IndexReader previousReader = lastReader[0]; - IndexSearcher indexSearcher = new IndexSearcher(previousReader); + IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader); Weight w = q.weight(indexSearcher); Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def()); if (scorer != null) { boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more); } + indexSearcher.close(); } } } diff --git a/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java b/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java index f5809dd1931..410aadc4206 100644 --- a/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java @@ -62,7 +62,7 @@ public class TestAutomatonQuery extends LuceneTestCase { + " with numbers 1234 5678.9 and letter b"); writer.addDocument(doc); reader = writer.getReader(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); writer.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java b/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java index b764dc0fc4f..53af0ad3a62 100644 --- a/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java +++ b/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java @@ -82,7 +82,7 @@ public class TestAutomatonQueryUnicode extends LuceneTestCase { field.setValue("\uFFFD\uFFFD"); writer.addDocument(doc); reader = writer.getReader(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); writer.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/TestBoolean2.java b/lucene/src/test/org/apache/lucene/search/TestBoolean2.java index 090eda2d18c..13184b0d77a 100644 --- a/lucene/src/test/org/apache/lucene/search/TestBoolean2.java +++ b/lucene/src/test/org/apache/lucene/search/TestBoolean2.java @@ -92,7 +92,7 @@ public class TestBoolean2 extends LuceneTestCase { w.addDocument(doc); } reader = w.getReader(); - bigSearcher = new IndexSearcher(reader); + bigSearcher = newSearcher(reader); w.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java b/lucene/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java index 55b067b6b78..bd53696d570 100644 --- a/lucene/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java +++ b/lucene/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java @@ -65,7 +65,7 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase { } r = w.getReader(); - s = new IndexSearcher(r); + s = newSearcher(r); w.close(); //System.out.println("Set up " + getName()); } diff --git a/lucene/src/test/org/apache/lucene/search/TestBooleanOr.java b/lucene/src/test/org/apache/lucene/search/TestBooleanOr.java index 850b3a8ff32..169cae15420 100644 --- a/lucene/src/test/org/apache/lucene/search/TestBooleanOr.java +++ b/lucene/src/test/org/apache/lucene/search/TestBooleanOr.java @@ -154,7 +154,7 @@ public class TestBooleanOr extends LuceneTestCase { reader = writer.getReader(); // - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); writer.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/TestBooleanQuery.java b/lucene/src/test/org/apache/lucene/search/TestBooleanQuery.java index 870a5eb0ac8..50b95728c99 100644 --- a/lucene/src/test/org/apache/lucene/search/TestBooleanQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestBooleanQuery.java @@ -73,7 +73,7 @@ public class TestBooleanQuery extends LuceneTestCase { w.addDocument(doc); IndexReader r = w.getReader(); - IndexSearcher s = new IndexSearcher(r); + IndexSearcher s = newSearcher(r); BooleanQuery q = new BooleanQuery(); q.add(new TermQuery(new Term("field", "a")), BooleanClause.Occur.SHOULD); @@ -120,6 +120,7 @@ public class TestBooleanQuery extends LuceneTestCase { dmq.add(pq); assertEquals(1, s.search(dmq, 10).totalHits); + s.close(); r.close(); w.close(); dir.close(); diff --git a/lucene/src/test/org/apache/lucene/search/TestBooleanScorer.java b/lucene/src/test/org/apache/lucene/search/TestBooleanScorer.java index 9d25a4cda74..89cad9ff71a 100644 --- a/lucene/src/test/org/apache/lucene/search/TestBooleanScorer.java +++ b/lucene/src/test/org/apache/lucene/search/TestBooleanScorer.java @@ -56,9 +56,10 @@ public class TestBooleanScorer extends LuceneTestCase query.add(booleanQuery1, BooleanClause.Occur.MUST); query.add(new TermQuery(new Term(FIELD, "9")), BooleanClause.Occur.MUST_NOT); - IndexSearcher indexSearcher = new IndexSearcher(ir); + IndexSearcher indexSearcher = newSearcher(ir); ScoreDoc[] hits = indexSearcher.search(query, null, 1000).scoreDocs; assertEquals("Number of matched documents", 2, hits.length); + indexSearcher.close(); ir.close(); directory.close(); } @@ -74,7 +75,7 @@ public class TestBooleanScorer extends LuceneTestCase writer.commit(); IndexReader ir = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(ir); + IndexSearcher searcher = newSearcher(ir); BooleanWeight weight = (BooleanWeight) new BooleanQuery().createWeight(searcher); Scorer[] scorers = new Scorer[] {new Scorer(weight) { private int doc = -1; diff --git a/lucene/src/test/org/apache/lucene/search/TestCachingSpanFilter.java b/lucene/src/test/org/apache/lucene/search/TestCachingSpanFilter.java index baf7c0d08be..3424658ed27 100644 --- a/lucene/src/test/org/apache/lucene/search/TestCachingSpanFilter.java +++ b/lucene/src/test/org/apache/lucene/search/TestCachingSpanFilter.java @@ -48,7 +48,7 @@ public class TestCachingSpanFilter extends LuceneTestCase { // but we use .reopen on this reader below and expect to // (must) get an NRT reader: IndexReader reader = IndexReader.open(writer.w, true); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); // add a doc, refresh the reader, and check that its there Document doc = new Document(); @@ -56,7 +56,8 @@ public class TestCachingSpanFilter extends LuceneTestCase { writer.addDocument(doc); reader = refreshReader(reader); - searcher = new IndexSearcher(reader); + searcher.close(); + searcher = newSearcher(reader); TopDocs docs = searcher.search(new MatchAllDocsQuery(), 1); assertEquals("Should find a hit...", 1, docs.totalHits); @@ -76,7 +77,8 @@ public class TestCachingSpanFilter extends LuceneTestCase { writer.deleteDocuments(new Term("id", "1")); reader = refreshReader(reader); - searcher = new IndexSearcher(reader); + searcher.close(); + searcher = newSearcher(reader); docs = searcher.search(new MatchAllDocsQuery(), filter, 1); assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits); @@ -90,7 +92,8 @@ public class TestCachingSpanFilter extends LuceneTestCase { writer.addDocument(doc); reader = refreshReader(reader); - searcher = new IndexSearcher(reader); + searcher.close(); + searcher = newSearcher(reader); docs = searcher.search(new MatchAllDocsQuery(), filter, 1); assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits); @@ -108,7 +111,8 @@ public class TestCachingSpanFilter extends LuceneTestCase { // that had no new deletions reader = refreshReader(reader); assertTrue(reader != oldReader); - searcher = new IndexSearcher(reader); + searcher.close(); + searcher = newSearcher(reader); int missCount = filter.missCount; docs = searcher.search(constantScore, 1); assertEquals("[just filter] Should find a hit...", 1, docs.totalHits); @@ -118,7 +122,8 @@ public class TestCachingSpanFilter extends LuceneTestCase { writer.deleteDocuments(new Term("id", "1")); reader = refreshReader(reader); - searcher = new IndexSearcher(reader); + searcher.close(); + searcher = newSearcher(reader); docs = searcher.search(new MatchAllDocsQuery(), filter, 1); assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits); @@ -132,6 +137,7 @@ public class TestCachingSpanFilter extends LuceneTestCase { // entry: assertTrue(oldReader != null); + searcher.close(); writer.close(); reader.close(); dir.close(); diff --git a/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java b/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java index c2ef9a393d7..357b3df1017 100644 --- a/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java +++ b/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java @@ -170,7 +170,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase { // but we use .reopen on this reader below and expect to // (must) get an NRT reader: IndexReader reader = IndexReader.open(writer.w, true); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); // add a doc, refresh the reader, and check that its there Document doc = new Document(); @@ -178,7 +178,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase { writer.addDocument(doc); reader = refreshReader(reader); - searcher = new IndexSearcher(reader); + searcher.close(); + searcher = newSearcher(reader); TopDocs docs = searcher.search(new MatchAllDocsQuery(), 1); assertEquals("Should find a hit...", 1, docs.totalHits); @@ -198,7 +199,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase { writer.deleteDocuments(new Term("id", "1")); reader = refreshReader(reader); - searcher = new IndexSearcher(reader); + searcher.close(); + searcher = newSearcher(reader); docs = searcher.search(new MatchAllDocsQuery(), filter, 1); assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits); @@ -213,7 +215,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase { writer.addDocument(doc); reader = refreshReader(reader); - searcher = new IndexSearcher(reader); + searcher.close(); + searcher = newSearcher(reader); docs = searcher.search(new MatchAllDocsQuery(), filter, 1); @@ -232,7 +235,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase { // that had no change to deletions reader = refreshReader(reader); assertTrue(reader != oldReader); - searcher = new IndexSearcher(reader); + searcher.close(); + searcher = newSearcher(reader); int missCount = filter.missCount; docs = searcher.search(constantScore, 1); assertEquals("[just filter] Should find a hit...", 1, docs.totalHits); @@ -242,7 +246,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase { writer.deleteDocuments(new Term("id", "1")); reader = refreshReader(reader); - searcher = new IndexSearcher(reader); + searcher.close(); + searcher = newSearcher(reader); missCount = filter.missCount; docs = searcher.search(new MatchAllDocsQuery(), filter, 1); @@ -257,7 +262,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase { writer.addDocument(doc); reader = refreshReader(reader); - searcher = new IndexSearcher(reader); + searcher.close(); + searcher = newSearcher(reader); docs = searcher.search(new MatchAllDocsQuery(), filter, 1); assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits); @@ -269,7 +275,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase { writer.deleteDocuments(new Term("id", "1")); reader = refreshReader(reader); - searcher = new IndexSearcher(reader); + searcher.close(); + searcher = newSearcher(reader); docs = searcher.search(new MatchAllDocsQuery(), filter, 1); assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits); @@ -287,6 +294,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase { // entry: assertTrue(oldReader != null); + searcher.close(); reader.close(); writer.close(); dir.close(); diff --git a/lucene/src/test/org/apache/lucene/search/TestComplexExplanations.java b/lucene/src/test/org/apache/lucene/search/TestComplexExplanations.java index 91122a7bad2..0d3e274a022 100644 --- a/lucene/src/test/org/apache/lucene/search/TestComplexExplanations.java +++ b/lucene/src/test/org/apache/lucene/search/TestComplexExplanations.java @@ -36,6 +36,12 @@ public class TestComplexExplanations extends TestExplanations { super.setUp(); searcher.setSimilarityProvider(createQnorm1Similarity()); } + + @Override + public void tearDown() throws Exception { + searcher.close(); + super.tearDown(); + } // must be static for weight serialization tests private static DefaultSimilarity createQnorm1Similarity() { diff --git a/lucene/src/test/org/apache/lucene/search/TestConstantScoreQuery.java b/lucene/src/test/org/apache/lucene/search/TestConstantScoreQuery.java index 574d75b2428..4b9e12b16a7 100644 --- a/lucene/src/test/org/apache/lucene/search/TestConstantScoreQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestConstantScoreQuery.java @@ -94,7 +94,7 @@ public class TestConstantScoreQuery extends LuceneTestCase { reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); // set a similarity that does not normalize our boost away searcher.setSimilarityProvider(new DefaultSimilarity() { diff --git a/lucene/src/test/org/apache/lucene/search/TestDateFilter.java b/lucene/src/test/org/apache/lucene/search/TestDateFilter.java index 7a3ce2cf96b..37bb73f3406 100644 --- a/lucene/src/test/org/apache/lucene/search/TestDateFilter.java +++ b/lucene/src/test/org/apache/lucene/search/TestDateFilter.java @@ -57,7 +57,7 @@ public class TestDateFilter extends LuceneTestCase { IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); // filter that should preserve matches // DateFilter df1 = DateFilter.Before("datefield", now); @@ -98,6 +98,7 @@ public class TestDateFilter extends LuceneTestCase { result = searcher.search(query2, df2, 1000).scoreDocs; assertEquals(0, result.length); + searcher.close(); reader.close(); indexStore.close(); } @@ -123,7 +124,7 @@ public class TestDateFilter extends LuceneTestCase { IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); // filter that should preserve matches // DateFilter df1 = DateFilter.After("datefield", now); @@ -165,6 +166,7 @@ public class TestDateFilter extends LuceneTestCase { result = searcher.search(query2, df2, 1000).scoreDocs; assertEquals(0, result.length); + searcher.close(); reader.close(); indexStore.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/TestDateSort.java b/lucene/src/test/org/apache/lucene/search/TestDateSort.java index 40627d0b5f9..5a4c1b3a1bf 100644 --- a/lucene/src/test/org/apache/lucene/search/TestDateSort.java +++ b/lucene/src/test/org/apache/lucene/search/TestDateSort.java @@ -28,7 +28,6 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.queryParser.QueryParser; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; @@ -78,7 +77,7 @@ public class TestDateSort extends LuceneTestCase { } public void testReverseDateSort() throws Exception { - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); Sort sort = new Sort(new SortField(DATE_TIME_FIELD, SortField.STRING, true)); diff --git a/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java index 1d2f8a6f2de..012e95eb98f 100644 --- a/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java @@ -149,7 +149,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { r = new SlowMultiReaderWrapper(writer.getReader()); writer.close(); - s = new IndexSearcher(r); + s = newSearcher(r); s.setSimilarityProvider(sim); } diff --git a/lucene/src/test/org/apache/lucene/search/TestDocBoost.java b/lucene/src/test/org/apache/lucene/search/TestDocBoost.java index f970477bda6..8521724a0c5 100644 --- a/lucene/src/test/org/apache/lucene/search/TestDocBoost.java +++ b/lucene/src/test/org/apache/lucene/search/TestDocBoost.java @@ -65,7 +65,7 @@ public class TestDocBoost extends LuceneTestCase { final float[] scores = new float[4]; - new IndexSearcher(reader).search + newSearcher(reader).search (new TermQuery(new Term("field", "word")), new Collector() { private int base = 0; diff --git a/lucene/src/test/org/apache/lucene/search/TestDocIdSet.java b/lucene/src/test/org/apache/lucene/search/TestDocIdSet.java index 6ca1192b25c..f4b015e52e1 100644 --- a/lucene/src/test/org/apache/lucene/search/TestDocIdSet.java +++ b/lucene/src/test/org/apache/lucene/search/TestDocIdSet.java @@ -109,7 +109,7 @@ public class TestDocIdSet extends LuceneTestCase { writer.close(); // First verify the document is searchable. - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); Assert.assertEquals(1, searcher.search(new MatchAllDocsQuery(), 10).totalHits); // Now search w/ a Filter which returns a null DocIdSet diff --git a/lucene/src/test/org/apache/lucene/search/TestElevationComparator.java b/lucene/src/test/org/apache/lucene/search/TestElevationComparator.java index d4e1bc747bc..cab656c48dc 100644 --- a/lucene/src/test/org/apache/lucene/search/TestElevationComparator.java +++ b/lucene/src/test/org/apache/lucene/search/TestElevationComparator.java @@ -53,7 +53,7 @@ public class TestElevationComparator extends LuceneTestCase { IndexReader r = IndexReader.open(writer, true); writer.close(); - IndexSearcher searcher = new IndexSearcher(r); + IndexSearcher searcher = newSearcher(r); runTest(searcher, true); runTest(searcher, false); diff --git a/lucene/src/test/org/apache/lucene/search/TestExplanations.java b/lucene/src/test/org/apache/lucene/search/TestExplanations.java index 5e712eac75f..3f2712af511 100644 --- a/lucene/src/test/org/apache/lucene/search/TestExplanations.java +++ b/lucene/src/test/org/apache/lucene/search/TestExplanations.java @@ -77,7 +77,7 @@ public class TestExplanations extends LuceneTestCase { } reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); } protected String[] docFields = { diff --git a/lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java b/lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java index 5b77e632e43..db56940e110 100644 --- a/lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java +++ b/lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java @@ -44,7 +44,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { public void testRangeFilterId() throws IOException { IndexReader reader = signedIndexReader; - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); int medId = ((maxId - minId) / 2); @@ -122,14 +122,14 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { result = search.search(q,FieldCacheRangeFilter.newStringRange("id",medIP,medIP,T,T), numDocs).scoreDocs; assertEquals("med,med,T,T", 1, result.length); - + search.close(); } @Test public void testFieldCacheRangeFilterRand() throws IOException { IndexReader reader = signedIndexReader; - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); String minRP = pad(signedIndexDir.minR); String maxRP = pad(signedIndexDir.maxR); @@ -185,6 +185,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { assertEquals("max,max,T,T", 1, result.length); result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",maxRP,null,T,F), numDocs).scoreDocs; assertEquals("max,nul,T,T", 1, result.length); + search.close(); } // byte-ranges cannot be tested, because all ranges are too big for bytes, need an extra range for that @@ -193,7 +194,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { public void testFieldCacheRangeFilterShorts() throws IOException { IndexReader reader = signedIndexReader; - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); int numDocs = reader.numDocs(); int medId = ((maxId - minId) / 2); @@ -277,13 +278,14 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { assertEquals("overflow special case", 0, result.length); result = search.search(q,FieldCacheRangeFilter.newShortRange("id",maxIdO,minIdO,T,T), numDocs).scoreDocs; assertEquals("inverse range", 0, result.length); + search.close(); } @Test public void testFieldCacheRangeFilterInts() throws IOException { IndexReader reader = signedIndexReader; - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); int numDocs = reader.numDocs(); int medId = ((maxId - minId) / 2); @@ -368,13 +370,14 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { assertEquals("overflow special case", 0, result.length); result = search.search(q,FieldCacheRangeFilter.newIntRange("id",maxIdO,minIdO,T,T), numDocs).scoreDocs; assertEquals("inverse range", 0, result.length); + search.close(); } @Test public void testFieldCacheRangeFilterLongs() throws IOException { IndexReader reader = signedIndexReader; - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); int numDocs = reader.numDocs(); int medId = ((maxId - minId) / 2); @@ -459,6 +462,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { assertEquals("overflow special case", 0, result.length); result = search.search(q,FieldCacheRangeFilter.newLongRange("id",maxIdO,minIdO,T,T), numDocs).scoreDocs; assertEquals("inverse range", 0, result.length); + search.close(); } // float and double tests are a bit minimalistic, but its complicated, because missing precision @@ -467,7 +471,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { public void testFieldCacheRangeFilterFloats() throws IOException { IndexReader reader = signedIndexReader; - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); int numDocs = reader.numDocs(); Float minIdO = Float.valueOf(minId + .5f); @@ -490,13 +494,14 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { assertEquals("infinity special case", 0, result.length); result = search.search(q,FieldCacheRangeFilter.newFloatRange("id",null,Float.valueOf(Float.NEGATIVE_INFINITY),F,F), numDocs).scoreDocs; assertEquals("infinity special case", 0, result.length); + search.close(); } @Test public void testFieldCacheRangeFilterDoubles() throws IOException { IndexReader reader = signedIndexReader; - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); int numDocs = reader.numDocs(); Double minIdO = Double.valueOf(minId + .5); @@ -519,6 +524,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { assertEquals("infinity special case", 0, result.length); result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id",null, Double.valueOf(Double.NEGATIVE_INFINITY),F,F), numDocs).scoreDocs; assertEquals("infinity special case", 0, result.length); + search.close(); } // test using a sparse index (with deleted docs). @@ -539,7 +545,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { writer.close(); IndexReader reader = IndexReader.open(dir, true); - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); assertTrue(reader.hasDeletions()); ScoreDoc[] result; @@ -559,6 +565,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) -10),T,T), 100).scoreDocs; assertEquals("find all", 11, result.length); + search.close(); reader.close(); dir.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java b/lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java index 3178f637b46..f526f3ea3ea 100644 --- a/lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java +++ b/lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java @@ -47,7 +47,7 @@ public class TestFieldCacheTermsFilter extends LuceneTestCase { IndexReader reader = w.getReader(); w.close(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); int numDocs = reader.numDocs(); ScoreDoc[] results; MatchAllDocsQuery q = new MatchAllDocsQuery(); @@ -68,6 +68,7 @@ public class TestFieldCacheTermsFilter extends LuceneTestCase { results = searcher.search(q, new FieldCacheTermsFilter(fieldName, terms.toArray(new String[0])), numDocs).scoreDocs; assertEquals("Must match 2", 2, results.length); + searcher.close(); reader.close(); rd.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java b/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java index da3be2fb26c..eeb282e1dd8 100644 --- a/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java @@ -81,7 +81,7 @@ public class TestFilteredQuery extends LuceneTestCase { reader = writer.getReader(); writer.close (); - searcher = new IndexSearcher (reader); + searcher = newSearcher(reader); query = new TermQuery (new Term ("field", "three")); filter = newStaticFilterB(); } diff --git a/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java b/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java index 4fb415ffe45..c478a59f0d8 100644 --- a/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java @@ -50,7 +50,7 @@ public class TestFuzzyQuery extends LuceneTestCase { addDoc("ddddd", writer); IndexReader reader = writer.getReader(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); writer.close(); FuzzyQuery query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMinSimilarity, 0); @@ -198,7 +198,7 @@ public class TestFuzzyQuery extends LuceneTestCase { addDoc("segment", writer); IndexReader reader = writer.getReader(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); writer.close(); FuzzyQuery query; @@ -309,7 +309,7 @@ public class TestFuzzyQuery extends LuceneTestCase { IndexReader ir2 = writer2.getReader(); MultiReader mr = new MultiReader(ir1, ir2); - IndexSearcher searcher = new IndexSearcher(mr); + IndexSearcher searcher = newSearcher(mr); FuzzyQuery fq = new FuzzyQuery(new Term("field", "z123456"), 1f, 0, 2); TopDocs docs = searcher.search(fq, 2); assertEquals(5, docs.totalHits); // 5 docs, from the a and b's @@ -330,7 +330,7 @@ public class TestFuzzyQuery extends LuceneTestCase { addDoc("segment", writer); IndexReader reader = writer.getReader(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); writer.close(); Query query; @@ -368,7 +368,7 @@ public class TestFuzzyQuery extends LuceneTestCase { addDoc("Lucenne", writer); IndexReader reader = writer.getReader(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); writer.close(); FuzzyQuery query = new FuzzyQuery(new Term("field", "lucene")); @@ -413,7 +413,7 @@ public class TestFuzzyQuery extends LuceneTestCase { Query q = new QueryParser(TEST_VERSION_CURRENT, "field", analyzer).parse( "giga~0.9" ); // 3. search - IndexSearcher searcher = new IndexSearcher(r); + IndexSearcher searcher = newSearcher(r); ScoreDoc[] hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); assertEquals("Giga byte", searcher.doc(hits[0].doc).get("field")); @@ -435,7 +435,7 @@ public class TestFuzzyQuery extends LuceneTestCase { addDoc("test", w); addDoc("working", w); IndexReader reader = w.getReader(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); w.close(); QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer()); diff --git a/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java b/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java index 85f40abfe37..262386a43b6 100644 --- a/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java +++ b/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java @@ -91,7 +91,7 @@ public class TestFuzzyQuery2 extends LuceneTestCase { } IndexReader r = writer.getReader(); - IndexSearcher searcher = new IndexSearcher(r); + IndexSearcher searcher = newSearcher(r); writer.close(); String line; while ((line = reader.readLine()) != null) { diff --git a/lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java b/lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java index 4f7356271ef..8d96c0feb2e 100644 --- a/lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java @@ -47,7 +47,7 @@ public class TestMatchAllDocsQuery extends LuceneTestCase { iw.close(); IndexReader ir = IndexReader.open(dir, false); - IndexSearcher is = new IndexSearcher(ir); + IndexSearcher is = newSearcher(ir); ScoreDoc[] hits; // assert with norms scoring turned off @@ -93,7 +93,7 @@ public class TestMatchAllDocsQuery extends LuceneTestCase { assertEquals(1, hits.length); // delete a document: - is.getIndexReader().deleteDocument(0); + ir.deleteDocument(0); hits = is.search(new MatchAllDocsQuery(), null, 1000).scoreDocs; assertEquals(2, hits.length); diff --git a/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java b/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java index c79e1e52c90..ef860f9737a 100644 --- a/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java @@ -53,7 +53,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { add("piccadilly circus", writer); IndexReader reader = writer.getReader(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); // search for "blueberry pi*": MultiPhraseQuery query1 = new MultiPhraseQuery(); @@ -142,12 +142,13 @@ public class TestMultiPhraseQuery extends LuceneTestCase { IndexReader r = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(r); + IndexSearcher searcher = newSearcher(r); MultiPhraseQuery q = new MultiPhraseQuery(); q.add(new Term("body", "blueberry")); q.add(new Term("body", "chocolate")); q.add(new Term[] {new Term("body", "pie"), new Term("body", "tart")}); assertEquals(2, searcher.search(q, 1).totalHits); + searcher.close(); r.close(); indexStore.close(); } @@ -171,7 +172,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { add("blue raspberry pie", writer); IndexReader reader = writer.getReader(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); // This query will be equivalent to +body:pie +body:"blue*" BooleanQuery q = new BooleanQuery(); q.add(new TermQuery(new Term("body", "pie")), BooleanClause.Occur.MUST); @@ -202,7 +203,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { add("a note", "note", writer); IndexReader reader = writer.getReader(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); // This query will be equivalent to +type:note +body:"a t*" BooleanQuery q = new BooleanQuery(); @@ -229,7 +230,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { add("a note", "note", writer); IndexReader reader = writer.getReader(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); MultiPhraseQuery q = new MultiPhraseQuery(); q.add(new Term("body", "a")); @@ -294,7 +295,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { add("a note", "note", writer); IndexReader reader = writer.getReader(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); searcher.setSimilarityProvider(new DefaultSimilarity() { @Override diff --git a/lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java b/lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java index f62d29620ae..5b52755f3fe 100644 --- a/lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java +++ b/lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java @@ -151,7 +151,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { public void testEqualScores() throws IOException { // NOTE: uses index build in *this* setUp - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); ScoreDoc[] result; @@ -174,13 +174,14 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { result[i].score); } + search.close(); } @Test public void testBoost() throws IOException { // NOTE: uses index build in *this* setUp - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); // test for correct application of query normalization // must use a non score normalizing method for this. @@ -246,13 +247,14 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { Assert.assertEquals(0, hits[0].doc); Assert.assertEquals(1, hits[1].doc); assertTrue(hits[0].score > hits[1].score); + search.close(); } @Test public void testBooleanOrderUnAffected() throws IOException { // NOTE: uses index build in *this* setUp - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); // first do a regular TermRangeQuery which uses term expansion so // docs with more terms in range get higher scores @@ -277,6 +279,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { actual[i].doc); } + search.close(); } @Test @@ -284,7 +287,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { // NOTE: uses index build in *super* setUp IndexReader reader = signedIndexReader; - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); int medId = ((maxId - minId) / 2); @@ -405,6 +408,8 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs; assertEquals("med,med,T,T", 1, result.length); + + search.close(); } @Test @@ -412,7 +417,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { // NOTE: uses index build in *super* setUp IndexReader reader = signedIndexReader; - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); int medId = ((maxId - minId) / 2); @@ -489,6 +494,8 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs; assertEquals("med,med,T,T,c", 1, result.length); + + search.close(); } @Test @@ -496,7 +503,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { // NOTE: uses index build in *super* setUp IndexReader reader = signedIndexReader; - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); String minRP = pad(signedIndexDir.minR); String maxRP = pad(signedIndexDir.maxR); @@ -552,6 +559,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs; assertEquals("max,nul,T,T", 1, result.length); + search.close(); } @Test @@ -560,7 +568,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { // using the unsigned index because collation seems to ignore hyphens IndexReader reader = unsignedIndexReader; - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); String minRP = pad(unsignedIndexDir.minR); String maxRP = pad(unsignedIndexDir.maxR); @@ -617,6 +625,8 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { assertEquals("max,max,T,T,c", 1, result.length); result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs; assertEquals("max,nul,T,T,c", 1, result.length); + + search.close(); } @Test @@ -636,7 +646,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in // RuleBasedCollator. However, the Arabic Locale seems to order the Farsi @@ -681,7 +691,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); Collator c = Collator.getInstance(new Locale("da", "dk")); diff --git a/lucene/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java b/lucene/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java index 5e7f3f5597b..eb02ac03e3c 100644 --- a/lucene/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java +++ b/lucene/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java @@ -61,17 +61,17 @@ public class TestMultiTermQueryRewrites extends LuceneTestCase { writer.close(); swriter1.close(); swriter2.close(); reader = IndexReader.open(dir, true); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); multiReader = new MultiReader(new IndexReader[] { IndexReader.open(sdir1, true), IndexReader.open(sdir2, true) }, true); - multiSearcher = new IndexSearcher(multiReader); + multiSearcher = newSearcher(multiReader); multiReaderDupls = new MultiReader(new IndexReader[] { IndexReader.open(sdir1, true), IndexReader.open(dir, true) }, true); - multiSearcherDupls = new IndexSearcher(multiReaderDupls); + multiSearcherDupls = newSearcher(multiReaderDupls); } @AfterClass diff --git a/lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java b/lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java index df996e604e3..2d0e1a4522c 100644 --- a/lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java @@ -59,7 +59,7 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase { IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher searcher=new IndexSearcher(reader); + IndexSearcher searcher=newSearcher(reader); num = 50 * RANDOM_MULTIPLIER; for (int i = 0; i < num; i++) { int lower=random.nextInt(Integer.MAX_VALUE); diff --git a/lucene/src/test/org/apache/lucene/search/TestNot.java b/lucene/src/test/org/apache/lucene/search/TestNot.java index 20f2d8f61a0..d86f13f8a31 100644 --- a/lucene/src/test/org/apache/lucene/search/TestNot.java +++ b/lucene/src/test/org/apache/lucene/search/TestNot.java @@ -44,7 +44,7 @@ public class TestNot extends LuceneTestCase { writer.addDocument(d1); IndexReader reader = writer.getReader(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer()); Query query = parser.parse("a NOT b"); //System.out.println(query); diff --git a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java index e143d730e34..b53c483dd63 100644 --- a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java +++ b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java @@ -89,7 +89,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase { } reader = writer.getReader(); - searcher=new IndexSearcher(reader); + searcher=newSearcher(reader); writer.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java index d3873fc6c12..321f698dfee 100644 --- a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java +++ b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java @@ -89,7 +89,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase { writer.addDocument(doc); } reader = writer.getReader(); - searcher=new IndexSearcher(reader); + searcher=newSearcher(reader); writer.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java b/lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java index 03985b1e879..b3d0128902d 100644 --- a/lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java @@ -65,7 +65,7 @@ public class TestPhrasePrefixQuery extends LuceneTestCase { IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); // PhrasePrefixQuery query1 = new PhrasePrefixQuery(); MultiPhraseQuery query1 = new MultiPhraseQuery(); diff --git a/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java b/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java index 162befd08ab..35349c696ab 100644 --- a/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java @@ -86,7 +86,7 @@ public class TestPhraseQuery extends LuceneTestCase { reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); query = new PhraseQuery(); } @@ -221,7 +221,7 @@ public class TestPhraseQuery extends LuceneTestCase { IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); // valid exact phrase query PhraseQuery query = new PhraseQuery(); @@ -262,7 +262,7 @@ public class TestPhraseQuery extends LuceneTestCase { IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); PhraseQuery phraseQuery = new PhraseQuery(); phraseQuery.add(new Term("source", "marketing")); @@ -301,7 +301,7 @@ public class TestPhraseQuery extends LuceneTestCase { reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); termQuery = new TermQuery(new Term("contents","woo")); phraseQuery = new PhraseQuery(); @@ -352,7 +352,7 @@ public class TestPhraseQuery extends LuceneTestCase { IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); PhraseQuery query = new PhraseQuery(); query.add(new Term("field", "firstname")); query.add(new Term("field", "lastname")); @@ -649,7 +649,7 @@ public class TestPhraseQuery extends LuceneTestCase { } IndexReader reader = w.getReader(); - IndexSearcher s = new IndexSearcher(reader); + IndexSearcher s = newSearcher(reader); w.close(); // now search diff --git a/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java b/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java index fbc1d4c4f8d..3007d8a102e 100644 --- a/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java +++ b/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java @@ -96,7 +96,7 @@ public class TestPositionIncrement extends LuceneTestCase { writer.close(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); DocsAndPositionsEnum pos = MultiFields.getTermPositionsEnum(searcher.getIndexReader(), MultiFields.getDeletedDocs(searcher.getIndexReader()), @@ -264,7 +264,7 @@ public class TestPositionIncrement extends LuceneTestCase { // only one doc has "a" assertEquals(DocsAndPositionsEnum.NO_MORE_DOCS, tp.nextDoc()); - IndexSearcher is = new IndexSearcher(readerFromWriter); + IndexSearcher is = newSearcher(readerFromWriter); SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a")); SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k")); diff --git a/lucene/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java b/lucene/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java index 42c5ffffa04..4ef962c0518 100644 --- a/lucene/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java +++ b/lucene/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java @@ -74,7 +74,7 @@ public class TestPositiveScoresOnlyCollector extends LuceneTestCase { writer.commit(); IndexReader ir = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(ir); + IndexSearcher searcher = newSearcher(ir); Weight fake = new TermQuery(new Term("fake", "weight")).createWeight(searcher); Scorer s = new SimpleScorer(fake); TopDocsCollector tdc = TopScoreDocCollector.create(scores.length, true); diff --git a/lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java b/lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java index e1fffc98e22..890ffe180e8 100644 --- a/lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java +++ b/lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java @@ -48,7 +48,7 @@ public class TestPrefixFilter extends LuceneTestCase { // PrefixFilter combined with ConstantScoreQuery PrefixFilter filter = new PrefixFilter(new Term("category", "/Computers")); Query query = new ConstantScoreQuery(filter); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(4, hits.length); diff --git a/lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java b/lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java index ddafd662857..1aab6b5be13 100644 --- a/lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java @@ -75,7 +75,7 @@ public class TestPrefixInBooleanQuery extends LuceneTestCase { } reader = writer.getReader(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); writer.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java b/lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java index 0a521e1c124..5ab57283ec2 100644 --- a/lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java @@ -47,7 +47,7 @@ public class TestPrefixQuery extends LuceneTestCase { IndexReader reader = writer.getReader(); PrefixQuery query = new PrefixQuery(new Term("category", "/Computers")); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("All documents in /Computers category and below", 3, hits.length); diff --git a/lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java b/lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java index cb63593b252..46c1a75aab2 100644 --- a/lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java +++ b/lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java @@ -65,7 +65,7 @@ public class TestPrefixRandom extends LuceneTestCase { writer.addDocument(doc); } reader = writer.getReader(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); writer.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java b/lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java index a41e33fbf7f..37d522c3c05 100644 --- a/lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java +++ b/lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java @@ -43,7 +43,7 @@ public class TestQueryWrapperFilter extends LuceneTestCase { // should not throw exception with primitive query QueryWrapperFilter qwf = new QueryWrapperFilter(termQuery); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); TopDocs hits = searcher.search(new MatchAllDocsQuery(), qwf, 10); assertEquals(1, hits.totalHits); hits = searcher.search(new MatchAllDocsQuery(), new CachingWrapperFilter(qwf), 10); diff --git a/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java b/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java index 65552c46f1f..75036679595 100644 --- a/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java @@ -54,7 +54,7 @@ public class TestRegexpQuery extends LuceneTestCase { writer.addDocument(doc); reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); } @Override diff --git a/lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java b/lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java index d50a02400d9..7180d3b82ec 100644 --- a/lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java +++ b/lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java @@ -62,7 +62,7 @@ public class TestRegexpRandom extends LuceneTestCase { reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); } private char N() { diff --git a/lucene/src/test/org/apache/lucene/search/TestRegexpRandom2.java b/lucene/src/test/org/apache/lucene/search/TestRegexpRandom2.java index a6627521831..143d977cfb7 100644 --- a/lucene/src/test/org/apache/lucene/search/TestRegexpRandom2.java +++ b/lucene/src/test/org/apache/lucene/search/TestRegexpRandom2.java @@ -82,7 +82,7 @@ public class TestRegexpRandom2 extends LuceneTestCase { } reader = writer.getReader(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); writer.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java b/lucene/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java index d6bc217c70d..664f1810805 100644 --- a/lucene/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java +++ b/lucene/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java @@ -104,7 +104,7 @@ public class TestScoreCachingWrappingScorer extends LuceneTestCase { writer.commit(); IndexReader ir = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(ir); + IndexSearcher searcher = newSearcher(ir); Weight fake = new TermQuery(new Term("fake", "weight")).createWeight(searcher); Scorer s = new SimpleScorer(fake); ScoreCachingCollector scc = new ScoreCachingCollector(scores.length); diff --git a/lucene/src/test/org/apache/lucene/search/TestSearchWithThreads.java b/lucene/src/test/org/apache/lucene/search/TestSearchWithThreads.java index 0adba6edf49..8e0456bc3ff 100644 --- a/lucene/src/test/org/apache/lucene/search/TestSearchWithThreads.java +++ b/lucene/src/test/org/apache/lucene/search/TestSearchWithThreads.java @@ -63,7 +63,7 @@ public class TestSearchWithThreads extends LuceneTestCase { final long endTime = System.currentTimeMillis(); if (VERBOSE) System.out.println("BUILD took " + (endTime-startTime)); - final IndexSearcher s = new IndexSearcher(r); + final IndexSearcher s = newSearcher(r); final AtomicBoolean failed = new AtomicBoolean(); final AtomicLong netSearch = new AtomicLong(); diff --git a/lucene/src/test/org/apache/lucene/search/TestSimilarity.java b/lucene/src/test/org/apache/lucene/search/TestSimilarity.java index efd6b5892cd..d788799db9d 100644 --- a/lucene/src/test/org/apache/lucene/search/TestSimilarity.java +++ b/lucene/src/test/org/apache/lucene/search/TestSimilarity.java @@ -80,7 +80,7 @@ public class TestSimilarity extends LuceneTestCase { IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); searcher.setSimilarityProvider(new SimpleSimilarity()); Term a = new Term("field", "a"); diff --git a/lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java b/lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java index d626b2d9a48..7a8f123be71 100644 --- a/lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java +++ b/lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java @@ -56,7 +56,7 @@ public class TestSimilarityProvider extends LuceneTestCase { iw.addDocument(doc); reader = iw.getReader(); iw.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); searcher.setSimilarityProvider(sim); } diff --git a/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java b/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java index 0deefbc62d0..f10c5d41e9b 100755 --- a/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java @@ -121,7 +121,7 @@ public class TestSloppyPhraseQuery extends LuceneTestCase { IndexReader reader = writer.getReader(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); TopDocs td = searcher.search(query,null,10); //System.out.println("slop: "+slop+" query: "+query+" doc: "+doc+" Expecting number of hits: "+expectedNumResults+" maxScore="+td.getMaxScore()); assertEquals("slop: "+slop+" query: "+query+" doc: "+doc+" Wrong number of hits", expectedNumResults, td.totalHits); diff --git a/lucene/src/test/org/apache/lucene/search/TestSort.java b/lucene/src/test/org/apache/lucene/search/TestSort.java index dcf07a20a98..39d2f170b63 100644 --- a/lucene/src/test/org/apache/lucene/search/TestSort.java +++ b/lucene/src/test/org/apache/lucene/search/TestSort.java @@ -144,7 +144,7 @@ public class TestSort extends LuceneTestCase implements Serializable { } IndexReader reader = writer.getReader(); writer.close (); - IndexSearcher s = new IndexSearcher (reader); + IndexSearcher s = newSearcher(reader); s.setDefaultFieldSortScoring(true, true); return s; } @@ -1061,12 +1061,13 @@ public class TestSort extends LuceneTestCase implements Serializable { IndexReader r = IndexReader.open(w, true); w.close(); - IndexSearcher s = new IndexSearcher(r); + IndexSearcher s = newSearcher(r); TopDocs hits = s.search(new TermQuery(new Term("t", "1")), null, 10, new Sort(new SortField("f", SortField.STRING))); assertEquals(2, hits.totalHits); // null sorts first assertEquals(1, hits.scoreDocs[0].doc); assertEquals(0, hits.scoreDocs[1].doc); + s.close(); r.close(); dir.close(); } @@ -1105,10 +1106,11 @@ public class TestSort extends LuceneTestCase implements Serializable { IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); TotalHitCountCollector c = new TotalHitCountCollector(); searcher.search(new MatchAllDocsQuery(), null, c); assertEquals(5, c.getTotalHits()); + searcher.close(); reader.close(); indexStore.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/TestSubScorerFreqs.java b/lucene/src/test/org/apache/lucene/search/TestSubScorerFreqs.java index 8c852535332..985cb8010e7 100644 --- a/lucene/src/test/org/apache/lucene/search/TestSubScorerFreqs.java +++ b/lucene/src/test/org/apache/lucene/search/TestSubScorerFreqs.java @@ -54,7 +54,7 @@ public class TestSubScorerFreqs extends LuceneTestCase { w.addDocument(doc); } - s = new IndexSearcher(w.getReader()); + s = newSearcher(w.getReader()); w.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/TestTermRangeFilter.java b/lucene/src/test/org/apache/lucene/search/TestTermRangeFilter.java index 1bcd546fd49..03b2b06f7a5 100644 --- a/lucene/src/test/org/apache/lucene/search/TestTermRangeFilter.java +++ b/lucene/src/test/org/apache/lucene/search/TestTermRangeFilter.java @@ -44,7 +44,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter { public void testRangeFilterId() throws IOException { IndexReader reader = signedIndexReader; - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); int medId = ((maxId - minId) / 2); @@ -141,13 +141,14 @@ public class TestTermRangeFilter extends BaseTestRangeFilter { numDocs).scoreDocs; assertEquals("med,med,T,T", 1, result.length); + search.close(); } @Test public void testRangeFilterIdCollating() throws IOException { IndexReader reader = signedIndexReader; - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); Collator c = Collator.getInstance(Locale.ENGLISH); @@ -243,13 +244,15 @@ public class TestTermRangeFilter extends BaseTestRangeFilter { numHits = search.search(q, new TermRangeFilter("id", medIP, medIP, T, T, c), 1000).totalHits; assertEquals("med,med,T,T", 1, numHits); + + search.close(); } @Test public void testRangeFilterRand() throws IOException { IndexReader reader = signedIndexReader; - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); String minRP = pad(signedIndexDir.minR); String maxRP = pad(signedIndexDir.maxR); @@ -320,6 +323,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter { numDocs).scoreDocs; assertEquals("max,nul,T,T", 1, result.length); + search.close(); } @Test @@ -327,7 +331,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter { // using the unsigned index because collation seems to ignore hyphens IndexReader reader = unsignedIndexReader; - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); Collator c = Collator.getInstance(Locale.ENGLISH); @@ -398,6 +402,8 @@ public class TestTermRangeFilter extends BaseTestRangeFilter { numHits = search.search(q, new TermRangeFilter("rand", maxRP, null, T, F, c), 1000).totalHits; assertEquals("max,nul,T,T", 1, numHits); + + search.close(); } @Test @@ -417,7 +423,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter { IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); Query q = new TermQuery(new Term("body", "body")); // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in @@ -461,7 +467,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter { IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); Query q = new TermQuery(new Term("body", "body")); Collator collator = Collator.getInstance(new Locale("da", "dk")); diff --git a/lucene/src/test/org/apache/lucene/search/TestTermScorer.java b/lucene/src/test/org/apache/lucene/search/TestTermScorer.java index 959dbca3e65..a2d3a5d8c0a 100644 --- a/lucene/src/test/org/apache/lucene/search/TestTermScorer.java +++ b/lucene/src/test/org/apache/lucene/search/TestTermScorer.java @@ -57,7 +57,7 @@ public class TestTermScorer extends LuceneTestCase { } indexReader = new SlowMultiReaderWrapper(writer.getReader()); writer.close(); - indexSearcher = new IndexSearcher(indexReader); + indexSearcher = newSearcher(indexReader); } @Override diff --git a/lucene/src/test/org/apache/lucene/search/TestTermVectors.java b/lucene/src/test/org/apache/lucene/search/TestTermVectors.java index e3a0d7ad8e6..2b4032d6c5e 100644 --- a/lucene/src/test/org/apache/lucene/search/TestTermVectors.java +++ b/lucene/src/test/org/apache/lucene/search/TestTermVectors.java @@ -71,7 +71,7 @@ public class TestTermVectors extends LuceneTestCase { } reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); } @Override @@ -246,7 +246,7 @@ public class TestTermVectors extends LuceneTestCase { writer.addDocument(testDoc4); IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher knownSearcher = new IndexSearcher(reader); + IndexSearcher knownSearcher = newSearcher(reader); FieldsEnum fields = MultiFields.getFields(knownSearcher.reader).iterator(); DocsEnum docs = null; @@ -378,7 +378,7 @@ public class TestTermVectors extends LuceneTestCase { } IndexReader reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); Query query = new TermQuery(new Term("field", "hundred")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; @@ -414,7 +414,7 @@ public class TestTermVectors extends LuceneTestCase { IndexReader reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); Query query = new TermQuery(new Term("field", "one")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; diff --git a/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java b/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java index d31fd21d05e..2fd1374d6da 100644 --- a/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java +++ b/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java @@ -82,7 +82,7 @@ public class TestTimeLimitingCollector extends LuceneTestCase { } reader = iw.getReader(); iw.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); String qtxt = "one"; // start from 1, so that the 0th doc never matches diff --git a/lucene/src/test/org/apache/lucene/search/TestTopDocsCollector.java b/lucene/src/test/org/apache/lucene/search/TestTopDocsCollector.java index 21164680661..417d9c09abc 100644 --- a/lucene/src/test/org/apache/lucene/search/TestTopDocsCollector.java +++ b/lucene/src/test/org/apache/lucene/search/TestTopDocsCollector.java @@ -95,7 +95,7 @@ public class TestTopDocsCollector extends LuceneTestCase { private TopDocsCollector doSearch(int numResults) throws IOException { Query q = new MatchAllDocsQuery(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); TopDocsCollector tdc = new MyTopsDocCollector(numResults); searcher.search(q, tdc); searcher.close(); diff --git a/lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java b/lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java index 036a211dccd..102dce579e3 100644 --- a/lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java +++ b/lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java @@ -47,7 +47,7 @@ public class TestTopScoreDocCollector extends LuceneTestCase { // the clause instead of BQ. bq.setMinimumNumberShouldMatch(1); IndexReader reader = writer.getReader(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); for (int i = 0; i < inOrder.length; i++) { TopDocsCollector tdc = TopScoreDocCollector.create(3, inOrder[i]); assertEquals("org.apache.lucene.search.TopScoreDocCollector$" + actualTSDCClass[i], tdc.getClass().getName()); diff --git a/lucene/src/test/org/apache/lucene/search/TestWildcardRandom.java b/lucene/src/test/org/apache/lucene/search/TestWildcardRandom.java index bb07c16549d..cd685ddaa7c 100644 --- a/lucene/src/test/org/apache/lucene/search/TestWildcardRandom.java +++ b/lucene/src/test/org/apache/lucene/search/TestWildcardRandom.java @@ -61,7 +61,7 @@ public class TestWildcardRandom extends LuceneTestCase { } reader = writer.getReader(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); writer.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java b/lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java index 5349d18eeb6..501dd4d75d4 100644 --- a/lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java +++ b/lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.util.English; +import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.SimilarityProvider; @@ -129,7 +130,7 @@ public class PayloadHelper { reader = IndexReader.open(writer, true); writer.close(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = LuceneTestCase.newSearcher(reader); searcher.setSimilarityProvider(similarity); return searcher; } diff --git a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java index 996d434c805..4bd8a6cb6a4 100644 --- a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java +++ b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java @@ -117,7 +117,7 @@ public class TestPayloadNearQuery extends LuceneTestCase { reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); searcher.setSimilarityProvider(similarity); } diff --git a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java index 0ac527671af..51bb7385c23 100644 --- a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java +++ b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java @@ -124,7 +124,7 @@ public class TestPayloadTermQuery extends LuceneTestCase { reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); searcher.setSimilarityProvider(similarity); } diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java b/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java index 0769a85cb4d..b04f96ae333 100644 --- a/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java +++ b/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java @@ -77,7 +77,7 @@ public class TestBasics extends LuceneTestCase { writer.addDocument(doc); } reader = writer.getReader(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); writer.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java b/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java index d8e16ccc7d5..f86aea3b4b2 100644 --- a/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java +++ b/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java @@ -112,7 +112,7 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase { field("last", "jones") })); reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); } @Override diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java b/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java index 72cae02451c..8316ff8d858 100644 --- a/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java +++ b/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java @@ -65,7 +65,7 @@ public class TestNearSpansOrdered extends LuceneTestCase { } reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); } protected String[] docFields = { diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java b/lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java index 0b04340d8a3..2ae7efd63a3 100644 --- a/lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java +++ b/lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java @@ -178,6 +178,7 @@ public class TestPayloadSpans extends LuceneTestCase { spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), nestedSpanNearQuery); assertTrue("spans is null and it shouldn't be", spans != null); checkSpans(spans, 2, new int[]{3,3}); + searcher.close(); closeIndexReader.close(); directory.close(); } @@ -210,6 +211,7 @@ public class TestPayloadSpans extends LuceneTestCase { assertTrue("spans is null and it shouldn't be", spans != null); checkSpans(spans, 1, new int[]{3}); + searcher.close(); closeIndexReader.close(); directory.close(); } @@ -247,6 +249,7 @@ public class TestPayloadSpans extends LuceneTestCase { spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), nestedSpanNearQuery); assertTrue("spans is null and it shouldn't be", spans != null); checkSpans(spans, 2, new int[]{8, 8}); + searcher.close(); closeIndexReader.close(); directory.close(); } @@ -262,7 +265,7 @@ public class TestPayloadSpans extends LuceneTestCase { writer.addDocument(doc); IndexReader reader = writer.getReader(); - IndexSearcher is = new IndexSearcher(reader); + IndexSearcher is = newSearcher(reader); writer.close(); SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a")); @@ -285,6 +288,7 @@ public class TestPayloadSpans extends LuceneTestCase { assertEquals(2, payloadSet.size()); assertTrue(payloadSet.contains("a:Noise:10")); assertTrue(payloadSet.contains("k:Noise:11")); + is.close(); reader.close(); directory.close(); } @@ -299,7 +303,7 @@ public class TestPayloadSpans extends LuceneTestCase { doc.add(new Field("content", new StringReader("a b a d k f a h i k a k"))); writer.addDocument(doc); IndexReader reader = writer.getReader(); - IndexSearcher is = new IndexSearcher(reader); + IndexSearcher is = newSearcher(reader); writer.close(); SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a")); @@ -321,6 +325,7 @@ public class TestPayloadSpans extends LuceneTestCase { assertEquals(2, payloadSet.size()); assertTrue(payloadSet.contains("a:Noise:10")); assertTrue(payloadSet.contains("k:Noise:11")); + is.close(); reader.close(); directory.close(); } @@ -335,7 +340,7 @@ public class TestPayloadSpans extends LuceneTestCase { doc.add(new Field("content", new StringReader("j k a l f k k p a t a k l k t a"))); writer.addDocument(doc); IndexReader reader = writer.getReader(); - IndexSearcher is = new IndexSearcher(reader); + IndexSearcher is = newSearcher(reader); writer.close(); SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a")); @@ -363,6 +368,7 @@ public class TestPayloadSpans extends LuceneTestCase { } assertTrue(payloadSet.contains("a:Noise:10")); assertTrue(payloadSet.contains("k:Noise:11")); + is.close(); reader.close(); directory.close(); } @@ -378,7 +384,7 @@ public class TestPayloadSpans extends LuceneTestCase { IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); PayloadSpanUtil psu = new PayloadSpanUtil(searcher.getTopReaderContext()); @@ -389,6 +395,7 @@ public class TestPayloadSpans extends LuceneTestCase { if(VERBOSE) System.out.println(new String(bytes)); } + searcher.close(); reader.close(); directory.close(); } @@ -443,7 +450,7 @@ public class TestPayloadSpans extends LuceneTestCase { closeIndexReader = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(closeIndexReader); + IndexSearcher searcher = newSearcher(closeIndexReader); return searcher; } diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java b/lucene/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java index b95e7719b76..583da5191ab 100644 --- a/lucene/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java +++ b/lucene/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java @@ -48,7 +48,7 @@ public class TestSpanFirstQuery extends LuceneTestCase { writer.addDocument(doc2); IndexReader reader = writer.getReader(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); // user queries on "starts-with quick" SpanQuery sfq = new SpanFirstQuery(new SpanTermQuery(new Term("field", "quick")), 1); diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java b/lucene/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java index 48c26c92da1..51ae833705a 100644 --- a/lucene/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java +++ b/lucene/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java @@ -53,7 +53,7 @@ public class TestSpanMultiTermQueryWrapper extends LuceneTestCase { iw.addDocument(doc); reader = iw.getReader(); iw.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); } @Override diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java b/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java index b5f3f521c66..23cdf4786d9 100644 --- a/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java +++ b/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java @@ -61,7 +61,7 @@ public class TestSpans extends LuceneTestCase { } reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); } @Override @@ -486,7 +486,7 @@ public class TestSpans extends LuceneTestCase { // Get searcher final IndexReader reader = IndexReader.open(dir, true); - final IndexSearcher searcher = new IndexSearcher(reader); + final IndexSearcher searcher = newSearcher(reader); // Control (make sure docs indexed) assertEquals(2, hitCount(searcher, "the")); @@ -499,6 +499,7 @@ public class TestSpans extends LuceneTestCase { searcher.search(createSpan(0, true, new SpanQuery[] {createSpan(4, false, "chased", "cat"), createSpan("ate")}), 10).totalHits); + searcher.close(); reader.close(); dir.close(); } diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java b/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java index 43f057d9a71..e3e2e6774e0 100644 --- a/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java +++ b/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java @@ -66,7 +66,7 @@ public class TestSpansAdvanced extends LuceneTestCase { addDocument(writer, "4", "I think it should work."); reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = newSearcher(reader); } @Override diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java b/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java index c590ac05593..6406bddf50c 100644 --- a/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java +++ b/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java @@ -57,7 +57,7 @@ public class TestSpansAdvanced2 extends TestSpansAdvanced { writer.close(); // re-open the searcher since we added more docs - searcher2 = new IndexSearcher(reader2); + searcher2 = newSearcher(reader2); } @Override diff --git a/lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java b/lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java index b91f5938d0c..4a4c2780c80 100755 --- a/lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java +++ b/lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java @@ -271,7 +271,7 @@ public class TestBufferedIndexInput extends LuceneTestCase { assertEquals(reader.docFreq(bbb), 37); dir.tweakBufferSizes(); - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); ScoreDoc[] hits = searcher.search(new TermQuery(bbb), null, 1000).scoreDocs; dir.tweakBufferSizes(); assertEquals(35, hits.length); diff --git a/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java b/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java index d96cfcf47f5..94f71aea46e 100644 --- a/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java +++ b/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java @@ -82,7 +82,7 @@ public class TestRAMDirectory extends LuceneTestCase { assertEquals(docsToAdd, reader.numDocs()); // open search zo check if all doc's are there - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = newSearcher(reader); // search for all documents for (int i = 0; i < docsToAdd; i++) { diff --git a/lucene/src/test/org/apache/lucene/util/LuceneTestCase.java b/lucene/src/test/org/apache/lucene/util/LuceneTestCase.java index c2ce61cacf6..a10689c98be 100644 --- a/lucene/src/test/org/apache/lucene/util/LuceneTestCase.java +++ b/lucene/src/test/org/apache/lucene/util/LuceneTestCase.java @@ -28,6 +28,9 @@ import java.lang.reflect.Constructor; import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.util.*; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -51,6 +54,7 @@ import org.apache.lucene.index.codecs.standard.StandardCodec; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.FieldCache; import org.apache.lucene.search.FieldCache.CacheEntry; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.LockFactory; @@ -564,14 +568,21 @@ public abstract class LuceneTestCase extends Assert { if (t.isAlive() && !rogueThreads.containsKey(t) && - t != Thread.currentThread()) { + t != Thread.currentThread() && + /* its ok to keep your searcher across test cases */ + (t.getName().startsWith("LuceneTestCase") && context.startsWith("test method")) == false) { System.err.println("WARNING: " + context + " left thread running: " + t); rogueThreads.put(t, true); rogueCount++; - // wait on the thread to die of natural causes - try { - t.join(THREAD_STOP_GRACE_MSEC); - } catch (InterruptedException e) { e.printStackTrace(); } + if (t.getName().startsWith("LuceneTestCase")) { + System.err.println("PLEASE CLOSE YOUR INDEXSEARCHERS IN YOUR TEST!!!!"); + continue; + } else { + // wait on the thread to die of natural causes + try { + t.join(THREAD_STOP_GRACE_MSEC); + } catch (InterruptedException e) { e.printStackTrace(); } + } // try to stop the thread: t.setUncaughtExceptionHandler(null); Thread.setDefaultUncaughtExceptionHandler(null); @@ -1018,6 +1029,34 @@ public abstract class LuceneTestCase extends Assert { } } + /** create a new searcher over the reader */ + public static IndexSearcher newSearcher(IndexReader r) throws IOException { + if (random.nextBoolean()) { + return new IndexSearcher(r); + } else { + int threads = 0; + final ExecutorService ex = (random.nextBoolean()) ? null + : Executors.newFixedThreadPool(threads = _TestUtil.nextInt(random, 1, 8), + new NamedThreadFactory("LuceneTestCase")); + if (ex != null && VERBOSE) { + System.out.println("NOTE: newSearcher using ExecutorService with " + threads + " threads"); + } + return new IndexSearcher(r.getTopReaderContext(), ex) { + @Override + public void close() throws IOException { + super.close(); + if (ex != null) { + ex.shutdown(); + try { + ex.awaitTermination(1000, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } + }; + } + } public String getName() { return this.name; diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java index 7f7b6d64ff4..005b2e67eed 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java @@ -76,7 +76,10 @@ public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase { private int search(Analyzer a, String queryString) throws IOException, ParseException { QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "repetitiveField", a); Query q = qp.parse(queryString); - return new IndexSearcher(reader).search(q, null, 1000).totalHits; + IndexSearcher searcher = newSearcher(reader); + int hits = searcher.search(q, null, 1000).totalHits; + searcher.close(); + return hits; } public void testUninitializedAnalyzer() throws Exception { diff --git a/modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java b/modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java index 11b4eb5474e..1f7b511e7ca 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java +++ b/modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java @@ -141,7 +141,7 @@ public abstract class CollationTestBase extends LuceneTestCase { writer.close(); IndexReader reader = IndexReader.open(farsiIndex, true); - IndexSearcher search = new IndexSearcher(reader); + IndexSearcher search = newSearcher(reader); // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi // orders the U+0698 character before the U+0633 character, so the single From 159c733369f8b9cb1da309732d7ce4404113c501 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Thu, 3 Feb 2011 03:11:12 +0000 Subject: [PATCH 081/185] fix false fail, test relies upon docid order git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1066727 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/src/test/org/apache/lucene/search/TestWildcard.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lucene/src/test/org/apache/lucene/search/TestWildcard.java b/lucene/src/test/org/apache/lucene/search/TestWildcard.java index 56e2825e684..22f4cc52cd2 100644 --- a/lucene/src/test/org/apache/lucene/search/TestWildcard.java +++ b/lucene/src/test/org/apache/lucene/search/TestWildcard.java @@ -298,7 +298,9 @@ public class TestWildcard // prepare the index Directory dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir); + RandomIndexWriter iw = new RandomIndexWriter(random, dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()) + .setMergePolicy(newInOrderLogMergePolicy())); for (int i = 0; i < docs.length; i++) { Document doc = new Document(); doc.add(newField(field,docs[i],Store.NO,Index.ANALYZED)); From 7161062eeefc15f9a7d54ee3de62d3eca711c120 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 3 Feb 2011 09:10:48 +0000 Subject: [PATCH 082/185] prevent emtpy TopDocs from contributing to maxScore if searches are concurrent git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1066764 13f79535-47bb-0310-9956-ffa450edef68 --- .../java/org/apache/lucene/search/IndexSearcher.java | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/src/java/org/apache/lucene/search/IndexSearcher.java index 23736d0726c..e3e0a1b6602 100644 --- a/lucene/src/java/org/apache/lucene/search/IndexSearcher.java +++ b/lucene/src/java/org/apache/lucene/search/IndexSearcher.java @@ -373,8 +373,10 @@ public class IndexSearcher { int totalHits = 0; float maxScore = Float.NEGATIVE_INFINITY; for (final TopDocs topDocs : runner) { - totalHits += topDocs.totalHits; - maxScore = Math.max(maxScore, topDocs.getMaxScore()); + if(topDocs.totalHits != 0) { + totalHits += topDocs.totalHits; + maxScore = Math.max(maxScore, topDocs.getMaxScore()); + } } final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()]; @@ -451,8 +453,10 @@ public class IndexSearcher { int totalHits = 0; float maxScore = Float.NEGATIVE_INFINITY; for (final TopFieldDocs topFieldDocs : runner) { - totalHits += topFieldDocs.totalHits; - maxScore = Math.max(maxScore, topFieldDocs.getMaxScore()); + if (topFieldDocs.totalHits != 0) { + totalHits += topFieldDocs.totalHits; + maxScore = Math.max(maxScore, topFieldDocs.getMaxScore()); + } } final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()]; for (int i = hq.size() - 1; i >= 0; i--) // put docs in array From b2e9fa1d26339129915131bd92276b5193610fd7 Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Thu, 3 Feb 2011 14:49:06 +0000 Subject: [PATCH 083/185] SOLR-96: Fix XML parsing in XMLUpdateRequestHandler and DocumentAnalysisRequestHandler to respect charset from XML file and only use HTTP header's "Content-Type" as a "hint" git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1066819 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 4 +++ .../DocumentAnalysisRequestHandler.java | 17 ++++++---- .../org/apache/solr/handler/XMLLoader.java | 31 ++++++++++++------- 3 files changed, 34 insertions(+), 18 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 40519a36325..a40d288259b 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -658,6 +658,10 @@ Bug Fixes * SOLR-2156: SnapPuller fails to clean Old Index Directories on Full Copy (Jayendra Patil via yonik) +* SOLR-96: Fix XML parsing in XMLUpdateRequestHandler and + DocumentAnalysisRequestHandler to respect charset from XML file and only + use HTTP header's "Content-Type" as a "hint". (Uwe Schindler) + Other Changes ---------------------- diff --git a/solr/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java b/solr/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java index 92e1b098265..18dc5da8a05 100644 --- a/solr/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java +++ b/solr/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java @@ -27,6 +27,7 @@ import org.apache.solr.common.params.AnalysisParams; import org.apache.solr.common.params.CommonParams; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.ContentStream; +import org.apache.solr.common.util.ContentStreamBase; import org.apache.solr.common.util.NamedList; import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.request.SolrQueryRequest; @@ -41,7 +42,7 @@ import javax.xml.stream.XMLStreamConstants; import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamReader; import java.io.IOException; -import java.io.Reader; +import java.io.InputStream; import java.util.*; /** @@ -157,10 +158,14 @@ public class DocumentAnalysisRequestHandler extends AnalysisRequestHandlerBase { request.setShowMatch(showMatch); ContentStream stream = extractSingleContentStream(req); - Reader reader = stream.getReader(); - XMLStreamReader parser = inputFactory.createXMLStreamReader(reader); - + InputStream is = null; + XMLStreamReader parser = null; + try { + is = stream.getStream(); + final String charset = ContentStreamBase.getCharsetFromContentType(stream.getContentType()); + parser = (charset == null) ? + inputFactory.createXMLStreamReader(is) : inputFactory.createXMLStreamReader(is, charset); while (true) { int event = parser.next(); @@ -182,8 +187,8 @@ public class DocumentAnalysisRequestHandler extends AnalysisRequestHandlerBase { } } finally { - parser.close(); - IOUtils.closeQuietly(reader); + if (parser != null) parser.close(); + IOUtils.closeQuietly(is); } } diff --git a/solr/src/java/org/apache/solr/handler/XMLLoader.java b/solr/src/java/org/apache/solr/handler/XMLLoader.java index b87c54a9ecd..72aaf25abd8 100644 --- a/solr/src/java/org/apache/solr/handler/XMLLoader.java +++ b/solr/src/java/org/apache/solr/handler/XMLLoader.java @@ -24,6 +24,7 @@ import org.apache.solr.update.DeleteUpdateCommand; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; import org.apache.solr.common.util.ContentStream; +import org.apache.solr.common.util.ContentStreamBase; import org.apache.solr.common.util.StrUtils; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrInputDocument; @@ -36,8 +37,8 @@ import javax.xml.stream.FactoryConfigurationError; import javax.xml.stream.XMLStreamConstants; import javax.xml.stream.XMLInputFactory; import javax.xml.transform.TransformerConfigurationException; -import java.io.Reader; -import java.io.StringReader; +import java.io.ByteArrayInputStream; +import java.io.InputStream; import java.io.IOException; @@ -57,22 +58,28 @@ class XMLLoader extends ContentStreamLoader { @Override public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream) throws Exception { errHeader = "XMLLoader: " + stream.getSourceInfo(); - Reader reader = null; + InputStream is = null; + XMLStreamReader parser = null; try { - reader = stream.getReader(); + is = stream.getStream(); + final String charset = ContentStreamBase.getCharsetFromContentType(stream.getContentType()); if (XmlUpdateRequestHandler.log.isTraceEnabled()) { - String body = IOUtils.toString(reader); - XmlUpdateRequestHandler.log.trace("body", body); - reader = new StringReader(body); + final byte[] body = IOUtils.toByteArray(is); + // TODO: The charset may be wrong, as the real charset is later + // determined by the XML parser, the content-type is only used as a hint! + XmlUpdateRequestHandler.log.trace("body", new String(body, (charset == null) ? + ContentStreamBase.DEFAULT_CHARSET : charset)); + IOUtils.closeQuietly(is); + is = new ByteArrayInputStream(body); } - - XMLStreamReader parser = inputFactory.createXMLStreamReader(reader); + parser = (charset == null) ? + inputFactory.createXMLStreamReader(is) : inputFactory.createXMLStreamReader(is, charset); this.processUpdate(req, processor, parser); - } - catch (XMLStreamException e) { + } catch (XMLStreamException e) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e.getMessage(), e); } finally { - IOUtils.closeQuietly(reader); + if (parser != null) parser.close(); + IOUtils.closeQuietly(is); } } From 30ed34d4fe4e9f8f2711eecd6789891beec19f52 Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Thu, 3 Feb 2011 16:07:26 +0000 Subject: [PATCH 084/185] tests: fix doclist offset in grouping model git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1066847 13f79535-47bb-0310-9956-ffa450edef68 --- solr/src/test/org/apache/solr/TestGroupingSearch.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/solr/src/test/org/apache/solr/TestGroupingSearch.java b/solr/src/test/org/apache/solr/TestGroupingSearch.java index 09ab2a52d2f..2f4775ebdb0 100644 --- a/solr/src/test/org/apache/solr/TestGroupingSearch.java +++ b/solr/src/test/org/apache/solr/TestGroupingSearch.java @@ -526,8 +526,7 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { Map resultSet = new LinkedHashMap(); group.put("doclist", resultSet); resultSet.put("numFound", grp.docs.size()); - resultSet.put("start", start); - + resultSet.put("start", group_offset); List docs = new ArrayList(); resultSet.put("docs", docs); for (int j=group_offset; j Date: Thu, 3 Feb 2011 16:14:07 +0000 Subject: [PATCH 085/185] tests: get model order from index, fix some json numeric comparisons git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1066850 13f79535-47bb-0310-9956-ffa450edef68 --- .../test/org/apache/solr/JSONTestUtil.java | 14 +++++---- .../test/org/apache/solr/SolrTestCaseJ4.java | 30 +++++++++++++------ 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/solr/src/test/org/apache/solr/JSONTestUtil.java b/solr/src/test/org/apache/solr/JSONTestUtil.java index d8cb897a6d9..8bd5a79c8f3 100644 --- a/solr/src/test/org/apache/solr/JSONTestUtil.java +++ b/solr/src/test/org/apache/solr/JSONTestUtil.java @@ -135,14 +135,16 @@ class CollectionTester { if (!expected.equals(val)) { // make an exception for some numerics - if (expected instanceof Integer && val instanceof Long || expected instanceof Long && val instanceof Integer + if ((expected instanceof Integer && val instanceof Long || expected instanceof Long && val instanceof Integer) && ((Number)expected).longValue() == ((Number)val).longValue()) { - // OK - } else if (expected instanceof Float && val instanceof Double || expected instanceof Double && val instanceof Float - && ((Number)expected).doubleValue() == ((Number)val).doubleValue()) - { - // OK + return true; + } else if ((expected instanceof Float && val instanceof Double || expected instanceof Double && val instanceof Float)) { + double a = ((Number)expected).doubleValue(); + double b = ((Number)val).doubleValue(); + if (Double.compare(a,b) == 0) return true; + if (Math.abs(a-b) < 1e-5) return true; + return false; } else { setErr("mismatch: '" + expected + "'!='" + val + "'"); return false; diff --git a/solr/src/test/org/apache/solr/SolrTestCaseJ4.java b/solr/src/test/org/apache/solr/SolrTestCaseJ4.java index 9efe83751eb..17269d477d3 100755 --- a/solr/src/test/org/apache/solr/SolrTestCaseJ4.java +++ b/solr/src/test/org/apache/solr/SolrTestCaseJ4.java @@ -22,6 +22,7 @@ package org.apache.solr; import org.apache.lucene.util.LuceneTestCase; import org.apache.noggit.CharArr; import org.apache.noggit.JSONUtil; +import org.apache.noggit.ObjectBuilder; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrInputDocument; import org.apache.solr.common.SolrInputField; @@ -837,17 +838,9 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { // commit an average of 10 times for large sets, or 10% of the time for small sets int commitOneOutOf = Math.max(nDocs/10, 10); - - // find the max order (docid) and start from there - int order = -1; - for (Doc doc : model.values()) { - order = Math.max(order, doc.order); - } - order++; - for (int i=0; i docList = (List)response; + int order = 0; + for (Map doc : docList) { + Object id = doc.get("id"); + Doc modelDoc = model.get(id); + if (modelDoc == null) continue; // may be some docs in the index that aren't modeled + modelDoc.order = order++; + } + + // make sure we updated the order of all docs in the model + assertEquals(order, model.size()); + return model; } From 69ad01cb981afed02f7c08fe0a109890a62a2753 Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Thu, 3 Feb 2011 17:41:53 +0000 Subject: [PATCH 086/185] Fix issue in CommonsHttpSolrServer where a Reader is copied to an OutputStream using default encoding git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1066889 13f79535-47bb-0310-9956-ffa450edef68 --- .../solr/client/solrj/impl/CommonsHttpSolrServer.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/solr/src/solrj/org/apache/solr/client/solrj/impl/CommonsHttpSolrServer.java b/solr/src/solrj/org/apache/solr/client/solrj/impl/CommonsHttpSolrServer.java index 6c2c1f76e7f..7900ab8ac7c 100644 --- a/solr/src/solrj/org/apache/solr/client/solrj/impl/CommonsHttpSolrServer.java +++ b/solr/src/solrj/org/apache/solr/client/solrj/impl/CommonsHttpSolrServer.java @@ -20,7 +20,6 @@ package org.apache.solr.client.solrj.impl; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.Reader; import java.net.MalformedURLException; import java.net.URL; import java.util.*; @@ -335,11 +334,11 @@ public class CommonsHttpSolrServer extends SolrServer @Override protected void sendData(OutputStream out) throws IOException { - Reader reader = c.getReader(); + InputStream in = c.getStream(); try { - IOUtils.copy(reader, out); + IOUtils.copy(in, out); } finally { - reader.close(); + in.close(); } } }); From dc16b7efea3084d76d3db1fb03eae5d89b9e465f Mon Sep 17 00:00:00 2001 From: "Chris M. Hostetter" Date: Thu, 3 Feb 2011 23:21:44 +0000 Subject: [PATCH 087/185] SOLR-2339: Fix sorting to explicitly generate an error if you attempt to sort on a multiValued field. git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1067030 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 11 + .../org/apache/solr/schema/BoolField.java | 1 + .../org/apache/solr/schema/ByteField.java | 3 +- .../org/apache/solr/schema/DoubleField.java | 1 + .../org/apache/solr/schema/FieldType.java | 6 +- .../org/apache/solr/schema/FloatField.java | 1 + .../java/org/apache/solr/schema/IntField.java | 1 + .../org/apache/solr/schema/LongField.java | 2 +- .../org/apache/solr/schema/SchemaField.java | 28 +++ .../org/apache/solr/schema/ShortField.java | 2 +- .../org/apache/solr/schema/TextField.java | 1 + .../org/apache/solr/schema/TrieDateField.java | 1 + .../org/apache/solr/schema/TrieField.java | 2 + .../org/apache/solr/search/QueryParsing.java | 11 +- solr/src/test-files/solr/conf/schema.xml | 21 +- solr/src/test-files/solr/conf/schema12.xml | 2 + .../solr/BaseDistributedSearchTestCase.java | 5 +- .../apache/solr/BasicFunctionalityTest.java | 37 ++++ .../org/apache/solr/ConvertedLegacyTest.java | 192 +++++++++--------- .../test/org/apache/solr/SolrTestCaseJ4.java | 8 + .../apache/solr/TestDistributedSearch.java | 6 +- .../org/apache/solr/TestGroupingSearch.java | 40 ++-- .../solr/cloud/BasicDistributedZkTest.java | 6 +- .../handler/StandardRequestHandlerTest.java | 16 +- .../QueryElevationComponentTest.java | 14 +- .../search/function/SortByFunctionTest.java | 14 +- 26 files changed, 271 insertions(+), 161 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index a40d288259b..7e6506702f7 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -53,6 +53,15 @@ Upgrading from Solr 3.1-dev legacy behavior should set a default value for the 'mm' param in their solrconfig.xml file. +* In previous releases, sorting on fields that are "multiValued" + (either by explicit declaration in schema.xml or by implict behavior + because the "version" attribute on the schema was less then 1.2) did + not generally work, but it would sometimes silently act as if it + succeeded and order the docs arbitrarily. Solr will now fail on any + attempt to sort on a multivalued field + + + Detailed Change List ---------------------- @@ -170,6 +179,8 @@ Bug Fixes * SOLR-1940: Fix SolrDispatchFilter behavior when Content-Type is unknown (Lance Norskog and hossman) +* SOLR-2339: Fix sorting to explicitly generate an error if you + attempt to sort on a multiValued field. (hossman) Other Changes ---------------------- diff --git a/solr/src/java/org/apache/solr/schema/BoolField.java b/solr/src/java/org/apache/solr/schema/BoolField.java index 0694e4c4c13..19fb3620dd3 100644 --- a/solr/src/java/org/apache/solr/schema/BoolField.java +++ b/solr/src/java/org/apache/solr/schema/BoolField.java @@ -43,6 +43,7 @@ public class BoolField extends FieldType { @Override public SortField getSortField(SchemaField field,boolean reverse) { + field.checkSortability(); return getStringSort(field,reverse); } diff --git a/solr/src/java/org/apache/solr/schema/ByteField.java b/solr/src/java/org/apache/solr/schema/ByteField.java index 204bce94836..dba71d86680 100644 --- a/solr/src/java/org/apache/solr/schema/ByteField.java +++ b/solr/src/java/org/apache/solr/schema/ByteField.java @@ -41,6 +41,7 @@ public class ByteField extends FieldType { ///////////////////////////////////////////////////////////// @Override public SortField getSortField(SchemaField field, boolean reverse) { + field.checkSortability(); return new SortField(field.name, SortField.BYTE, reverse); } @@ -78,4 +79,4 @@ public class ByteField extends FieldType { public Byte toObject(Fieldable f) { return Byte.valueOf(toExternal(f)); } -} \ No newline at end of file +} diff --git a/solr/src/java/org/apache/solr/schema/DoubleField.java b/solr/src/java/org/apache/solr/schema/DoubleField.java index 62e34e7ab82..c668c2a1647 100644 --- a/solr/src/java/org/apache/solr/schema/DoubleField.java +++ b/solr/src/java/org/apache/solr/schema/DoubleField.java @@ -41,6 +41,7 @@ public class DoubleField extends FieldType { ///////////////////////////////////////////////////////////// @Override public SortField getSortField(SchemaField field, boolean reverse) { + field.checkSortability(); return new SortField(field.name, SortField.DOUBLE, reverse); } diff --git a/solr/src/java/org/apache/solr/schema/FieldType.java b/solr/src/java/org/apache/solr/schema/FieldType.java index 712c22519f7..30e0a3130ac 100644 --- a/solr/src/java/org/apache/solr/schema/FieldType.java +++ b/solr/src/java/org/apache/solr/schema/FieldType.java @@ -478,13 +478,17 @@ public abstract class FieldType extends FieldProperties { /** * Returns the SortField instance that should be used to sort fields * of this type. + * @see SchemaField#checkSortability */ public abstract SortField getSortField(SchemaField field, boolean top); /** - * Utility usable by subclasses when they want to get basic String sorting. + * Utility usable by subclasses when they want to get basic String sorting + * using common checks. + * @see SchemaField#checkSortability */ protected SortField getStringSort(SchemaField field, boolean reverse) { + field.checkSortability(); return Sorting.getStringSortField(field.name, reverse, field.sortMissingLast(),field.sortMissingFirst()); } diff --git a/solr/src/java/org/apache/solr/schema/FloatField.java b/solr/src/java/org/apache/solr/schema/FloatField.java index 2df5ec9c345..51c7b646c30 100644 --- a/solr/src/java/org/apache/solr/schema/FloatField.java +++ b/solr/src/java/org/apache/solr/schema/FloatField.java @@ -39,6 +39,7 @@ public class FloatField extends FieldType { @Override public SortField getSortField(SchemaField field,boolean reverse) { + field.checkSortability(); return new SortField(field.name,SortField.FLOAT, reverse); } diff --git a/solr/src/java/org/apache/solr/schema/IntField.java b/solr/src/java/org/apache/solr/schema/IntField.java index 5d8182d52ae..73ff10a02c0 100644 --- a/solr/src/java/org/apache/solr/schema/IntField.java +++ b/solr/src/java/org/apache/solr/schema/IntField.java @@ -39,6 +39,7 @@ public class IntField extends FieldType { @Override public SortField getSortField(SchemaField field,boolean reverse) { + field.checkSortability(); return new SortField(field.name,SortField.INT, reverse); } diff --git a/solr/src/java/org/apache/solr/schema/LongField.java b/solr/src/java/org/apache/solr/schema/LongField.java index f1189b6adb7..a6a6dd01432 100644 --- a/solr/src/java/org/apache/solr/schema/LongField.java +++ b/solr/src/java/org/apache/solr/schema/LongField.java @@ -41,7 +41,7 @@ public class LongField extends FieldType { @Override public SortField getSortField(SchemaField field,boolean reverse) { - + field.checkSortability(); return new SortField(field.name,SortField.LONG, reverse); } diff --git a/solr/src/java/org/apache/solr/schema/SchemaField.java b/solr/src/java/org/apache/solr/schema/SchemaField.java index 2618fa987b3..5b9e417c411 100644 --- a/solr/src/java/org/apache/solr/schema/SchemaField.java +++ b/solr/src/java/org/apache/solr/schema/SchemaField.java @@ -17,9 +17,12 @@ package org.apache.solr.schema; +import org.apache.solr.common.SolrException; +import org.apache.solr.common.SolrException.ErrorCode; import org.apache.lucene.document.Field; import org.apache.lucene.document.Fieldable; import org.apache.lucene.search.SortField; + import org.apache.solr.response.TextResponseWriter; import java.util.Map; @@ -120,10 +123,35 @@ public final class SchemaField extends FieldProperties { type.write(writer,name,val); } + /** + * Delegates to the FieldType for this field + * @see FieldType#getSortField + */ public SortField getSortField(boolean top) { return type.getSortField(this, top); } + /** + * Sanity checks that the properties of this field type are plausible + * for a field that may be used in sorting, throwing an appropraite + * exception (including hte field name) if it is not. FieldType subclasses + * can choose to call this method in their getSortField implementation + * @see FieldType#getSortField + */ + public void checkSortability() throws SolrException { + if (! indexed() ) { + throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, + "can not sort on unindexed field: " + + getName()); + } + if ( multiValued() ) { + throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, + "can not sort on multivalued field: " + + getName()); + } + + } + static SchemaField create(String name, FieldType ft, Map props) { diff --git a/solr/src/java/org/apache/solr/schema/ShortField.java b/solr/src/java/org/apache/solr/schema/ShortField.java index 2db6861ccc0..8283ffb0f89 100644 --- a/solr/src/java/org/apache/solr/schema/ShortField.java +++ b/solr/src/java/org/apache/solr/schema/ShortField.java @@ -44,7 +44,7 @@ public class ShortField extends FieldType { @Override public SortField getSortField(SchemaField field, boolean reverse) { - + field.checkSortability(); return new SortField(field.name, SortField.SHORT, reverse); } diff --git a/solr/src/java/org/apache/solr/schema/TextField.java b/solr/src/java/org/apache/solr/schema/TextField.java index d43cf54efa6..a30bbfe2a54 100644 --- a/solr/src/java/org/apache/solr/schema/TextField.java +++ b/solr/src/java/org/apache/solr/schema/TextField.java @@ -65,6 +65,7 @@ public class TextField extends FieldType { @Override public SortField getSortField(SchemaField field, boolean reverse) { + /* :TODO: maybe warn if isTokenized(), but doesn't use LimitTokenCountFilter in it's chain? */ return getStringSort(field, reverse); } diff --git a/solr/src/java/org/apache/solr/schema/TrieDateField.java b/solr/src/java/org/apache/solr/schema/TrieDateField.java index 7f118704f56..74db2991aaf 100755 --- a/solr/src/java/org/apache/solr/schema/TrieDateField.java +++ b/solr/src/java/org/apache/solr/schema/TrieDateField.java @@ -78,6 +78,7 @@ public class TrieDateField extends DateField { @Override public SortField getSortField(SchemaField field, boolean top) { + field.checkSortability(); return new SortField(new LongValuesCreator( field.getName(), FieldCache.NUMERIC_UTILS_LONG_PARSER, CachedArrayCreator.CACHE_VALUES_AND_BITS ), top); } diff --git a/solr/src/java/org/apache/solr/schema/TrieField.java b/solr/src/java/org/apache/solr/schema/TrieField.java index 6f6d01c7023..7f980283230 100644 --- a/solr/src/java/org/apache/solr/schema/TrieField.java +++ b/solr/src/java/org/apache/solr/schema/TrieField.java @@ -123,6 +123,8 @@ public class TrieField extends FieldType { @Override public SortField getSortField(SchemaField field, boolean top) { + field.checkSortability(); + int flags = CachedArrayCreator.CACHE_VALUES_AND_BITS; Object missingValue = null; boolean sortMissingLast = on( SORT_MISSING_LAST, properties ); diff --git a/solr/src/java/org/apache/solr/search/QueryParsing.java b/solr/src/java/org/apache/solr/search/QueryParsing.java index 1ffbdefb828..1c9a5826428 100644 --- a/solr/src/java/org/apache/solr/search/QueryParsing.java +++ b/solr/src/java/org/apache/solr/search/QueryParsing.java @@ -330,16 +330,9 @@ public class QueryParsing { } throw new SolrException (SolrException.ErrorCode.BAD_REQUEST, - "sort param fiedl can't be found: " + field); + "sort param field can't be found: " + field); } - - // TODO: remove this - it should be up to the FieldType - if (!sf.indexed()) { - throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, - "can not sort on unindexed field: " - + field); - } - lst.add(sf.getType().getSortField(sf, top)); + lst.add(sf.getSortField(top)); } } diff --git a/solr/src/test-files/solr/conf/schema.xml b/solr/src/test-files/solr/conf/schema.xml index 6590e20cc95..490bfc75b05 100644 --- a/solr/src/test-files/solr/conf/schema.xml +++ b/solr/src/test-files/solr/conf/schema.xml @@ -402,8 +402,8 @@ - - + + @@ -480,7 +480,7 @@ - + @@ -508,27 +508,40 @@ both match, the first appearing in the schema will be used. --> + + + + + + + + + + + - + + + diff --git a/solr/src/test-files/solr/conf/schema12.xml b/solr/src/test-files/solr/conf/schema12.xml index eadcfdc4a12..c8a60840ee1 100755 --- a/solr/src/test-files/solr/conf/schema12.xml +++ b/solr/src/test-files/solr/conf/schema12.xml @@ -545,6 +545,8 @@ + + diff --git a/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java b/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java index 1934bf7cd31..b1ea44c5de2 100644 --- a/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java +++ b/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java @@ -134,7 +134,7 @@ public abstract class BaseDistributedSearchTestCase extends SolrTestCaseJ4 { */ public abstract void doTest() throws Exception; - public static String[] fieldNames = new String[]{"n_ti", "n_f", "n_tf", "n_d", "n_td", "n_l", "n_tl", "n_dt", "n_tdt"}; + public static String[] fieldNames = new String[]{"n_ti1", "n_f1", "n_tf1", "n_d1", "n_td1", "n_l1", "n_tl1", "n_dt1", "n_tdt1"}; public static RandVal[] randVals = new RandVal[]{rint, rfloat, rfloat, rdouble, rdouble, rlong, rlong, rdate, rdate}; protected String[] getFieldNames() { @@ -580,7 +580,8 @@ public abstract class BaseDistributedSearchTestCase extends SolrTestCaseJ4 { String cmp; cmp = compare(a.getResponse(), b.getResponse(), flags, handle); if (cmp != null) { - log.info("Mismatched responses:\n" + a + "\n" + b); + //log.info("Mismatched responses:\n" + a + "\n" + b); + System.err.println("Mismatched responses:\n" + a + "\n" + b); // :nocommit: TestCase.fail(cmp); } } diff --git a/solr/src/test/org/apache/solr/BasicFunctionalityTest.java b/solr/src/test/org/apache/solr/BasicFunctionalityTest.java index 3847825c7a5..a69fe52c927 100644 --- a/solr/src/test/org/apache/solr/BasicFunctionalityTest.java +++ b/solr/src/test/org/apache/solr/BasicFunctionalityTest.java @@ -29,6 +29,8 @@ import javax.xml.parsers.DocumentBuilderFactory; import org.apache.lucene.document.Field; import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.LogMergePolicy; +import org.apache.solr.common.SolrException; +import org.apache.solr.common.SolrException.ErrorCode; import org.apache.solr.common.params.AppendedSolrParams; import org.apache.solr.common.params.CommonParams; import org.apache.solr.common.params.DefaultSolrParams; @@ -47,6 +49,8 @@ import org.apache.solr.schema.SchemaField; import org.apache.solr.search.DocIterator; import org.apache.solr.search.DocList; import org.apache.solr.update.SolrIndexWriter; + + import org.junit.BeforeClass; import org.junit.Test; @@ -658,6 +662,39 @@ public class BasicFunctionalityTest extends SolrTestCaseJ4 { "*[count(//doc)=1]"); } + @Test + public void testAbuseOfSort() { + + assertU(adoc("id", "9999991", + "sortabuse_b", "true", + "sortabuse_t", "zzz xxx ccc vvv bbb nnn aaa sss ddd fff ggg")); + assertU(adoc("id", "9999992", + "sortabuse_b", "true", + "sortabuse_t", "zzz xxx ccc vvv bbb nnn qqq www eee rrr ttt")); + + assertU(commit()); + + try { + assertQ("sort on something that shouldn't work", + req("q", "sortabuse_b:true", + "sort", "sortabuse_t asc"), + "*[count(//doc)=2]"); + fail("no error encountered when sorting on sortabuse_t"); + } catch (Exception outer) { + // EXPECTED + Throwable root = getRootCause(outer); + assertEquals("sort exception root cause", + SolrException.class, root.getClass()); + SolrException e = (SolrException) root; + assertEquals("incorrect error type", + SolrException.ErrorCode.BAD_REQUEST, + SolrException.ErrorCode.getErrorCode(e.code())); + assertTrue("exception doesn't contain field name", + -1 != e.getMessage().indexOf("sortabuse_t")); + } + } + + // /** this doesn't work, but if it did, this is how we'd test it. */ // public void testOverwriteFalse() { diff --git a/solr/src/test/org/apache/solr/ConvertedLegacyTest.java b/solr/src/test/org/apache/solr/ConvertedLegacyTest.java index f6f9d1b0c14..96dd599d172 100644 --- a/solr/src/test/org/apache/solr/ConvertedLegacyTest.java +++ b/solr/src/test/org/apache/solr/ConvertedLegacyTest.java @@ -123,9 +123,9 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { // test range assertU("44"); - assertU("44apple"); - assertU("44banana"); - assertU("44pear"); + assertU("44appleapple"); + assertU("44bananabanana"); + assertU("44pearpear"); assertU(""); assertQ(req("val_s:[a TO z]") ,"//*[@numFound='3'] " @@ -228,7 +228,7 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { args = new HashMap(); args.put("version","2.0"); args.put("defType","lucenePlusSort"); - req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z];val_s asc", + req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z];val_s1 asc", "standard", 0, 0 , args); assertQ(req ,"//*[@numFound='3'] " @@ -237,7 +237,7 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { args = new HashMap(); args.put("version","2.0"); args.put("defType","lucenePlusSort"); - req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z];val_s desc", + req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z];val_s1 desc", "standard", 0, 0 , args); assertQ(req ,"//*[@numFound='3'] " @@ -509,133 +509,133 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { // test integer ranges and sorting assertU("44"); - assertU("441234567890"); - assertU("4410"); - assertU("441"); - assertU("442"); - assertU("4415"); - assertU("44-1"); - assertU("44-987654321"); - assertU("442147483647"); - assertU("44-2147483648"); - assertU("440"); + assertU("441234567890"); + assertU("4410"); + assertU("441"); + assertU("442"); + assertU("4415"); + assertU("44-1"); + assertU("44-987654321"); + assertU("442147483647"); + assertU("44-2147483648"); + assertU("440"); assertU(""); assertQ(req("id:44") ,"*[count(//doc)=10]" ); - assertQ(req("num_i:2147483647") + assertQ(req("num_i1:2147483647") ,"//@numFound[.='1'] " ,"//int[.='2147483647']" ); - assertQ(req("num_i:\"-2147483648\"") + assertQ(req("num_i1:\"-2147483648\"") ,"//@numFound[.='1'] " ,"//int[.='-2147483648']" ); - assertQ(req("id:44;num_i asc;") + assertQ(req("id:44;num_i1 asc;") ,"//doc[1]/int[.='-2147483648'] " ,"//doc[last()]/int[.='2147483647']" ); - assertQ(req("id:44;num_i desc;") + assertQ(req("id:44;num_i1 desc;") ,"//doc[1]/int[.='2147483647'] " ,"//doc[last()]/int[.='-2147483648']" ); - assertQ(req("num_i:[0 TO 9]") + assertQ(req("num_i1:[0 TO 9]") ,"*[count(//doc)=3]" ); - assertQ(req("num_i:[-2147483648 TO 2147483647]") + assertQ(req("num_i1:[-2147483648 TO 2147483647]") ,"*[count(//doc)=10]" ); - assertQ(req("num_i:[-10 TO -1]") + assertQ(req("num_i1:[-10 TO -1]") ,"*[count(//doc)=1]" ); // test long ranges and sorting assertU("44"); - assertU("441234567890"); - assertU("4410"); - assertU("441"); - assertU("442"); - assertU("4415"); - assertU("44-1"); - assertU("44-987654321"); - assertU("449223372036854775807"); - assertU("44-9223372036854775808"); - assertU("440"); + assertU("441234567890"); + assertU("4410"); + assertU("441"); + assertU("442"); + assertU("4415"); + assertU("44-1"); + assertU("44-987654321"); + assertU("449223372036854775807"); + assertU("44-9223372036854775808"); + assertU("440"); assertU(""); assertQ(req("id:44") ,"*[count(//doc)=10]" ); - assertQ(req("num_l:9223372036854775807") + assertQ(req("num_l1:9223372036854775807") ,"//@numFound[.='1'] " ,"//long[.='9223372036854775807']" ); - assertQ(req("num_l:\"-9223372036854775808\"") + assertQ(req("num_l1:\"-9223372036854775808\"") ,"//@numFound[.='1'] " ,"//long[.='-9223372036854775808']" ); - assertQ(req("id:44;num_l asc;") + assertQ(req("id:44;num_l1 asc;") ,"//doc[1]/long[.='-9223372036854775808'] " ,"//doc[last()]/long[.='9223372036854775807']" ); - assertQ(req("id:44;num_l desc;") + assertQ(req("id:44;num_l1 desc;") ,"//doc[1]/long[.='9223372036854775807'] " ,"//doc[last()]/long[.='-9223372036854775808']" ); - assertQ(req("num_l:[-1 TO 9]") + assertQ(req("num_l1:[-1 TO 9]") ,"*[count(//doc)=4]" ); - assertQ(req("num_l:[-9223372036854775808 TO 9223372036854775807]") + assertQ(req("num_l1:[-9223372036854775808 TO 9223372036854775807]") ,"*[count(//doc)=10]" ); - assertQ(req("num_l:[-10 TO -1]") + assertQ(req("num_l1:[-10 TO -1]") ,"*[count(//doc)=1]" ); // test binary float ranges and sorting assertU("44"); - assertU("441.4142135"); - assertU("44Infinity"); - assertU("44-Infinity"); - assertU("44NaN"); - assertU("442"); - assertU("44-1"); - assertU("44-987654321"); - assertU("44-999999.99"); - assertU("44-1e20"); - assertU("440"); + assertU("441.4142135"); + assertU("44Infinity"); + assertU("44-Infinity"); + assertU("44NaN"); + assertU("442"); + assertU("44-1"); + assertU("44-987654321"); + assertU("44-999999.99"); + assertU("44-1e20"); + assertU("440"); assertU(""); assertQ(req("id:44") ,"*[count(//doc)=10]" ); - assertQ(req("num_sf:Infinity") + assertQ(req("num_sf1:Infinity") ,"//@numFound[.='1'] " ,"//float[.='Infinity']" ); - assertQ(req("num_sf:\"-Infinity\"") + assertQ(req("num_sf1:\"-Infinity\"") ,"//@numFound[.='1'] " ,"//float[.='-Infinity']" ); - assertQ(req("num_sf:\"NaN\"") + assertQ(req("num_sf1:\"NaN\"") ,"//@numFound[.='1'] " ,"//float[.='NaN']" ); - assertQ(req("num_sf:\"-1e20\"") + assertQ(req("num_sf1:\"-1e20\"") ,"//@numFound[.='1']" ); - assertQ(req("id:44;num_sf asc;") + assertQ(req("id:44;num_sf1 asc;") ,"//doc[1]/float[.='-Infinity'] " ,"//doc[last()]/float[.='NaN']" ); - assertQ(req("id:44;num_sf desc;") + assertQ(req("id:44;num_sf1 desc;") ,"//doc[1]/float[.='NaN'] " ,"//doc[last()]/float[.='-Infinity']" ); - assertQ(req("num_sf:[-1 TO 2]") + assertQ(req("num_sf1:[-1 TO 2]") ,"*[count(//doc)=4]" ); - assertQ(req("num_sf:[-Infinity TO Infinity]") + assertQ(req("num_sf1:[-Infinity TO Infinity]") ,"*[count(//doc)=9]" ); @@ -644,50 +644,50 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { // test binary double ranges and sorting assertU("44"); - assertU("441.4142135"); - assertU("44Infinity"); - assertU("44-Infinity"); - assertU("44NaN"); - assertU("442"); - assertU("44-1"); - assertU("441e-100"); - assertU("44-999999.99"); - assertU("44-1e100"); - assertU("440"); + assertU("441.4142135"); + assertU("44Infinity"); + assertU("44-Infinity"); + assertU("44NaN"); + assertU("442"); + assertU("44-1"); + assertU("441e-100"); + assertU("44-999999.99"); + assertU("44-1e100"); + assertU("440"); assertU(""); assertQ(req("id:44") ,"*[count(//doc)=10]" ); - assertQ(req("num_sd:Infinity") + assertQ(req("num_sd1:Infinity") ,"//@numFound[.='1'] " ,"//double[.='Infinity']" ); - assertQ(req("num_sd:\"-Infinity\"") + assertQ(req("num_sd1:\"-Infinity\"") ,"//@numFound[.='1'] " ,"//double[.='-Infinity']" ); - assertQ(req("num_sd:\"NaN\"") + assertQ(req("num_sd1:\"NaN\"") ,"//@numFound[.='1'] " ,"//double[.='NaN']" ); - assertQ(req("num_sd:\"-1e100\"") + assertQ(req("num_sd1:\"-1e100\"") ,"//@numFound[.='1']" ); - assertQ(req("num_sd:\"1e-100\"") + assertQ(req("num_sd1:\"1e-100\"") ,"//@numFound[.='1']" ); - assertQ(req("id:44;num_sd asc;") + assertQ(req("id:44;num_sd1 asc;") ,"//doc[1]/double[.='-Infinity'] " ,"//doc[last()]/double[.='NaN']" ); - assertQ(req("id:44;num_sd desc;") + assertQ(req("id:44;num_sd1 desc;") ,"//doc[1]/double[.='NaN'] " ,"//doc[last()]/double[.='-Infinity']" ); - assertQ(req("num_sd:[-1 TO 2]") + assertQ(req("num_sd1:[-1 TO 2]") ,"*[count(//doc)=5]" ); - assertQ(req("num_sd:[-Infinity TO Infinity]") + assertQ(req("num_sd1:[-Infinity TO Infinity]") ,"*[count(//doc)=9]" ); @@ -695,38 +695,38 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { // test sorting on multiple fields assertU("44"); - assertU("4410"); - assertU("441100"); - assertU("44-1"); - assertU("4415"); - assertU("44150"); - assertU("440"); + assertU("4410"); + assertU("441100"); + assertU("44-1"); + assertU("4415"); + assertU("44150"); + assertU("440"); assertU(""); assertQ(req("id:44") ,"*[count(//doc)=6]" ); - assertQ(req("id:44; a_i asc,b_i desc") + assertQ(req("id:44; a_i1 asc,b_i1 desc") ,"*[count(//doc)=6] " ,"//doc[3]/int[.='100'] " ,"//doc[4]/int[.='50']" ); - assertQ(req("id:44;a_i asc , b_i asc;") + assertQ(req("id:44;a_i1 asc , b_i1 asc;") ,"*[count(//doc)=6] " ,"//doc[3]/int[.='50'] " ,"//doc[4]/int[.='100']" ); - assertQ(req("id:44;a_i asc;") + assertQ(req("id:44;a_i1 asc;") ,"*[count(//doc)=6] " ,"//doc[1]/int[.='-1'] " ,"//doc[last()]/int[.='15']" ); - assertQ(req("id:44;a_i asc , score top;") + assertQ(req("id:44;a_i1 asc , score top;") ,"*[count(//doc)=6] " ,"//doc[1]/int[.='-1'] " ,"//doc[last()]/int[.='15']" ); - assertQ(req("id:44; score top , a_i top, b_i bottom ;") + assertQ(req("id:44; score top , a_i1 top, b_i1 bottom ;") ,"*[count(//doc)=6] " ,"//doc[last()]/int[.='-1'] " ,"//doc[1]/int[.='15'] " @@ -738,13 +738,13 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { // test sorting with some docs missing the sort field assertU("id_i:[1000 TO 1010]"); - assertU("10001Z"); - assertU("100110A"); - assertU("10021100"); - assertU("1003-1"); - assertU("100415"); - assertU("1005150"); - assertU("10060"); + assertU("10001Z"); + assertU("100110A"); + assertU("10021100"); + assertU("1003-1"); + assertU("100415"); + assertU("1005150"); + assertU("10060"); assertU(""); assertQ(req("id_i:[1000 TO 1010]") ,"*[count(//doc)=7]" @@ -759,13 +759,13 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { ,"//doc[1]/int[.='100'] " ,"//doc[2]/int[.='50']" ); - assertQ(req("id_i:[1000 TO 1010]; a_i asc,b_si desc") + assertQ(req("id_i:[1000 TO 1010]; a_i1 asc,b_si desc") ,"*[count(//doc)=7] " ,"//doc[3]/int[.='100'] " ,"//doc[4]/int[.='50'] " ,"//doc[5]/int[.='1000']" ); - assertQ(req("id_i:[1000 TO 1010]; a_i asc,b_si asc") + assertQ(req("id_i:[1000 TO 1010]; a_i1 asc,b_si asc") ,"*[count(//doc)=7] " ,"//doc[3]/int[.='50'] " ,"//doc[4]/int[.='100'] " diff --git a/solr/src/test/org/apache/solr/SolrTestCaseJ4.java b/solr/src/test/org/apache/solr/SolrTestCaseJ4.java index 17269d477d3..ecbe82f4382 100755 --- a/solr/src/test/org/apache/solr/SolrTestCaseJ4.java +++ b/solr/src/test/org/apache/solr/SolrTestCaseJ4.java @@ -1073,4 +1073,12 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { } return new File(base, "solr/").getAbsolutePath(); } + + public static Throwable getRootCause(Throwable t) { + Throwable result = t; + for (Throwable cause = t; null != cause; cause = cause.getCause()) { + result = cause; + } + return result; + } } diff --git a/solr/src/test/org/apache/solr/TestDistributedSearch.java b/solr/src/test/org/apache/solr/TestDistributedSearch.java index d1cd535941d..5151564fedd 100755 --- a/solr/src/test/org/apache/solr/TestDistributedSearch.java +++ b/solr/src/test/org/apache/solr/TestDistributedSearch.java @@ -38,7 +38,7 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase { String ndouble = "n_d"; String tdouble = "n_td"; String nlong = "n_l"; - String tlong = "n_tl"; + String tlong = "other_tl1"; String ndate = "n_dt"; String tdate = "n_tdt"; @@ -98,8 +98,8 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase { query("q","*:*", "sort","{!func}add("+i1+",5)"+" desc"); query("q","*:*", "sort",i1+" asc"); query("q","*:*", "sort",i1+" desc", "fl","*,score"); - query("q","*:*", "sort",tlong+" asc", "fl","score"); // test legacy behavior - "score"=="*,score" - query("q","*:*", "sort",tlong+" desc"); + query("q","*:*", "sort","n_tl1 asc", "fl","score"); // test legacy behavior - "score"=="*,score" + query("q","*:*", "sort","n_tl1 desc"); handle.put("maxScore", SKIPVAL); query("q","{!func}"+i1);// does not expect maxScore. So if it comes ,ignore it. JavaBinCodec.writeSolrDocumentList() //is agnostic of request params. diff --git a/solr/src/test/org/apache/solr/TestGroupingSearch.java b/solr/src/test/org/apache/solr/TestGroupingSearch.java index 2f4775ebdb0..f0b53bc32fe 100644 --- a/solr/src/test/org/apache/solr/TestGroupingSearch.java +++ b/solr/src/test/org/apache/solr/TestGroupingSearch.java @@ -30,6 +30,10 @@ import java.util.*; public class TestGroupingSearch extends SolrTestCaseJ4 { + public static final String FOO_STRING_FIELD = "foo_s1"; + public static final String SMALL_STRING_FIELD = "small_s1"; + public static final String SMALL_INT_FIELD = "small_i"; + @BeforeClass public static void beforeTests() throws Exception { initCore("solrconfig.xml","schema12.xml"); @@ -376,9 +380,9 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { types.add(new FldType("id",ONE_ONE, new SVal('A','Z',4,4))); types.add(new FldType("score_f",ONE_ONE, new FVal(1,100))); // field used to score types.add(new FldType("foo_i",ZERO_ONE, new IRange(0,indexSize))); - types.add(new FldType("foo_s",ZERO_ONE, new SVal('a','z',1,2))); - types.add(new FldType("small_s",ZERO_ONE, new SVal('a',(char)('c'+indexSize/10),1,1))); - types.add(new FldType("small_i",ZERO_ONE, new IRange(0,5+indexSize/10))); + types.add(new FldType(FOO_STRING_FIELD,ZERO_ONE, new SVal('a','z',1,2))); + types.add(new FldType(SMALL_STRING_FIELD,ZERO_ONE, new SVal('a',(char)('c'+indexSize/10),1,1))); + types.add(new FldType(SMALL_INT_FIELD,ZERO_ONE, new IRange(0,5+indexSize/10))); clearIndex(); Map model = indexDocs(types, null, indexSize); @@ -389,36 +393,36 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { clearIndex(); model.clear(); Doc d1 = createDoc(types); - d1.getValues("small_s").set(0,"c"); - d1.getValues("small_i").set(0,5); + d1.getValues(SMALL_STRING_FIELD).set(0,"c"); + d1.getValues(SMALL_INT_FIELD).set(0,5); d1.order = 0; updateJ(toJSON(d1), params("commit","true")); model.put(d1.id, d1); d1 = createDoc(types); - d1.getValues("small_s").set(0,"b"); - d1.getValues("small_i").set(0,5); + d1.getValues(SMALL_STRING_FIELD).set(0,"b"); + d1.getValues(SMALL_INT_FIELD).set(0,5); d1.order = 1; updateJ(toJSON(d1), params("commit","false")); model.put(d1.id, d1); d1 = createDoc(types); - d1.getValues("small_s").set(0,"c"); - d1.getValues("small_i").set(0,5); + d1.getValues(SMALL_STRING_FIELD).set(0,"c"); + d1.getValues(SMALL_INT_FIELD).set(0,5); d1.order = 2; updateJ(toJSON(d1), params("commit","false")); model.put(d1.id, d1); d1 = createDoc(types); - d1.getValues("small_s").set(0,"c"); - d1.getValues("small_i").set(0,5); + d1.getValues(SMALL_STRING_FIELD).set(0,"c"); + d1.getValues(SMALL_INT_FIELD).set(0,5); d1.order = 3; updateJ(toJSON(d1), params("commit","false")); model.put(d1.id, d1); d1 = createDoc(types); - d1.getValues("small_s").set(0,"b"); - d1.getValues("small_i").set(0,2); + d1.getValues(SMALL_STRING_FIELD).set(0,"b"); + d1.getValues(SMALL_INT_FIELD).set(0,2); d1.order = 4; updateJ(toJSON(d1), params("commit","true")); model.put(d1.id, d1); @@ -447,11 +451,11 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { // Test specific case if (false) { - groupField="small_i"; - sortComparator=createComparator(Arrays.asList(createComparator("small_s", true, true, false, true))); - sortStr = "small_s asc"; - groupComparator = createComparator(Arrays.asList(createComparator("small_s", true, true, false, false))); - groupSortStr = "small_s asc"; + groupField=SMALL_INT_FIELD; + sortComparator=createComparator(Arrays.asList(createComparator(SMALL_STRING_FIELD, true, true, false, true))); + sortStr = SMALL_STRING_FIELD + " asc"; + groupComparator = createComparator(Arrays.asList(createComparator(SMALL_STRING_FIELD, true, true, false, false))); + groupSortStr = SMALL_STRING_FIELD + " asc"; rows=1; start=0; group_offset=1; group_limit=1; } diff --git a/solr/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java index 35d86e0ac5c..2452a90f498 100644 --- a/solr/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java +++ b/solr/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java @@ -43,7 +43,7 @@ public class BasicDistributedZkTest extends AbstractDistributedZkTestCase { String ndouble = "n_d"; String tdouble = "n_td"; String nlong = "n_l"; - String tlong = "n_tl"; + String tlong = "other_tl1"; String ndate = "n_dt"; String tdate = "n_tdt"; @@ -133,8 +133,8 @@ public class BasicDistributedZkTest extends AbstractDistributedZkTestCase { query("q","*:*", "sort",i1+" desc"); query("q","*:*", "sort",i1+" asc"); query("q","*:*", "sort",i1+" desc", "fl","*,score"); - query("q","*:*", "sort",tlong+" asc", "fl","score"); // test legacy behavior - "score"=="*,score" - query("q","*:*", "sort",tlong+" desc"); + query("q","*:*", "sort","n_tl1 asc", "fl","score"); // test legacy behavior - "score"=="*,score" + query("q","*:*", "sort","n_tl1 desc"); handle.put("maxScore", SKIPVAL); query("q","{!func}"+i1);// does not expect maxScore. So if it comes ,ignore it. JavaBinCodec.writeSolrDocumentList() //is agnostic of request params. diff --git a/solr/src/test/org/apache/solr/handler/StandardRequestHandlerTest.java b/solr/src/test/org/apache/solr/handler/StandardRequestHandlerTest.java index e9dd455da55..1cb930f94d6 100644 --- a/solr/src/test/org/apache/solr/handler/StandardRequestHandlerTest.java +++ b/solr/src/test/org/apache/solr/handler/StandardRequestHandlerTest.java @@ -43,9 +43,9 @@ public class StandardRequestHandlerTest extends AbstractSolrTestCase { public void testSorting() throws Exception { SolrCore core = h.getCore(); - assertU(adoc("id", "10", "title", "test", "val_s", "aaa")); - assertU(adoc("id", "11", "title", "test", "val_s", "bbb")); - assertU(adoc("id", "12", "title", "test", "val_s", "ccc")); + assertU(adoc("id", "10", "title", "test", "val_s1", "aaa")); + assertU(adoc("id", "11", "title", "test", "val_s1", "bbb")); + assertU(adoc("id", "12", "title", "test", "val_s1", "ccc")); assertU(commit()); Map args = new HashMap(); @@ -58,7 +58,7 @@ public class StandardRequestHandlerTest extends AbstractSolrTestCase { ,"//*[@numFound='3']" ); - args.put( CommonParams.SORT, "val_s asc" ); + args.put( CommonParams.SORT, "val_s1 asc" ); assertQ("with sort param [asc]", req ,"//*[@numFound='3']" ,"//result/doc[1]/int[@name='id'][.='10']" @@ -66,7 +66,7 @@ public class StandardRequestHandlerTest extends AbstractSolrTestCase { ,"//result/doc[3]/int[@name='id'][.='12']" ); - args.put( CommonParams.SORT, "val_s desc" ); + args.put( CommonParams.SORT, "val_s1 desc" ); assertQ("with sort param [desc]", req ,"//*[@numFound='3']" ,"//result/doc[1]/int[@name='id'][.='12']" @@ -84,7 +84,7 @@ public class StandardRequestHandlerTest extends AbstractSolrTestCase { // Using legacy ';' param args.remove( CommonParams.SORT ); args.put( QueryParsing.DEFTYPE, "lucenePlusSort" ); - args.put( CommonParams.Q, "title:test; val_s desc" ); + args.put( CommonParams.Q, "title:test; val_s1 desc" ); assertQ("with sort param [desc]", req ,"//*[@numFound='3']" ,"//result/doc[1]/int[@name='id'][.='12']" @@ -92,8 +92,8 @@ public class StandardRequestHandlerTest extends AbstractSolrTestCase { ,"//result/doc[3]/int[@name='id'][.='10']" ); - args.put( CommonParams.Q, "title:test; val_s asc" ); - assertQ("with sort param [desc]", req + args.put( CommonParams.Q, "title:test; val_s1 asc" ); + assertQ("with sort param [asc]", req ,"//*[@numFound='3']" ,"//result/doc[1]/int[@name='id'][.='10']" ,"//result/doc[2]/int[@name='id'][.='11']" diff --git a/solr/src/test/org/apache/solr/handler/component/QueryElevationComponentTest.java b/solr/src/test/org/apache/solr/handler/component/QueryElevationComponentTest.java index 349521d0901..821c838af7c 100644 --- a/solr/src/test/org/apache/solr/handler/component/QueryElevationComponentTest.java +++ b/solr/src/test/org/apache/solr/handler/component/QueryElevationComponentTest.java @@ -120,13 +120,13 @@ public class QueryElevationComponentTest extends SolrTestCaseJ4 { @Test public void testSorting() throws IOException { - assertU(adoc("id", "a", "title", "ipod", "str_s", "a" )); - assertU(adoc("id", "b", "title", "ipod ipod", "str_s", "b" )); - assertU(adoc("id", "c", "title", "ipod ipod ipod", "str_s", "c" )); + assertU(adoc("id", "a", "title", "ipod", "str_s1", "a" )); + assertU(adoc("id", "b", "title", "ipod ipod", "str_s1", "b" )); + assertU(adoc("id", "c", "title", "ipod ipod ipod", "str_s1", "c" )); - assertU(adoc("id", "x", "title", "boosted", "str_s", "x" )); - assertU(adoc("id", "y", "title", "boosted boosted", "str_s", "y" )); - assertU(adoc("id", "z", "title", "boosted boosted boosted", "str_s", "z" )); + assertU(adoc("id", "x", "title", "boosted", "str_s1", "x" )); + assertU(adoc("id", "y", "title", "boosted boosted", "str_s1", "y" )); + assertU(adoc("id", "z", "title", "boosted boosted boosted", "str_s1", "z" )); assertU(commit()); String query = "title:ipod"; @@ -188,7 +188,7 @@ public class QueryElevationComponentTest extends SolrTestCaseJ4 { // Try normal sort by 'id' // default 'forceBoost' should be false assertEquals( false, booster.forceElevation ); - args.put( CommonParams.SORT, "str_s asc" ); + args.put( CommonParams.SORT, "str_s1 asc" ); assertQ( null, req ,"//*[@numFound='4']" ,"//result/doc[1]/str[@name='id'][.='a']" diff --git a/solr/src/test/org/apache/solr/search/function/SortByFunctionTest.java b/solr/src/test/org/apache/solr/search/function/SortByFunctionTest.java index cd06c5077ee..48d160af9d4 100644 --- a/solr/src/test/org/apache/solr/search/function/SortByFunctionTest.java +++ b/solr/src/test/org/apache/solr/search/function/SortByFunctionTest.java @@ -35,10 +35,10 @@ public class SortByFunctionTest extends AbstractSolrTestCase { } public void test() throws Exception { - assertU(adoc("id", "1", "x_td", "0", "y_td", "2", "w_td", "25", "z_td", "5", "f_t", "ipod")); - assertU(adoc("id", "2", "x_td", "2", "y_td", "2", "w_td", "15", "z_td", "5", "f_t", "ipod ipod ipod ipod ipod")); - assertU(adoc("id", "3", "x_td", "3", "y_td", "2", "w_td", "55", "z_td", "5", "f_t", "ipod ipod ipod ipod ipod ipod ipod ipod ipod")); - assertU(adoc("id", "4", "x_td", "4", "y_td", "2", "w_td", "45", "z_td", "5", "f_t", "ipod ipod ipod ipod ipod ipod ipod")); + assertU(adoc("id", "1", "x_td", "0", "y_td", "2", "w_td1", "25", "z_td", "5", "f_t", "ipod")); + assertU(adoc("id", "2", "x_td", "2", "y_td", "2", "w_td1", "15", "z_td", "5", "f_t", "ipod ipod ipod ipod ipod")); + assertU(adoc("id", "3", "x_td", "3", "y_td", "2", "w_td1", "55", "z_td", "5", "f_t", "ipod ipod ipod ipod ipod ipod ipod ipod ipod")); + assertU(adoc("id", "4", "x_td", "4", "y_td", "2", "w_td1", "45", "z_td", "5", "f_t", "ipod ipod ipod ipod ipod ipod ipod")); assertU(commit()); assertQ(req("fl", "*,score", "q", "*:*"), @@ -82,8 +82,8 @@ public class SortByFunctionTest extends AbstractSolrTestCase { "//result/doc[3]/int[@name='id'][.='3']", "//result/doc[4]/int[@name='id'][.='4']" ); - //the function is equal, w_td separates - assertQ(req("q", "*:*", "fl", "id", "sort", "sum(z_td, y_td) asc, w_td asc"), + //the function is equal, w_td1 separates + assertQ(req("q", "*:*", "fl", "id", "sort", "sum(z_td, y_td) asc, w_td1 asc"), "//*[@numFound='4']", "//result/doc[1]/int[@name='id'][.='2']", "//result/doc[2]/int[@name='id'][.='1']", @@ -127,4 +127,4 @@ public class SortByFunctionTest extends AbstractSolrTestCase { /* 0931.0442muLti-Default2009-12-12T12:59:46.412Z4.02.01.0342muLti-Default2009-12-12T12:59:46.409Z3.02.01.0242muLti-Default2009-12-12T12:59:46.406Z2.02.01.0142muLti-Default2009-12-12T12:59:46.361Z0.02.0 -*/ \ No newline at end of file +*/ From e2970ad77c676abe4351a792ecb58490f3fc54e2 Mon Sep 17 00:00:00 2001 From: "Chris M. Hostetter" Date: Fri, 4 Feb 2011 00:16:49 +0000 Subject: [PATCH 088/185] clean up my nocommit mess git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1067044 13f79535-47bb-0310-9956-ffa450edef68 --- .../test/org/apache/solr/BaseDistributedSearchTestCase.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java b/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java index b1ea44c5de2..c32ccc52920 100644 --- a/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java +++ b/solr/src/test/org/apache/solr/BaseDistributedSearchTestCase.java @@ -580,8 +580,7 @@ public abstract class BaseDistributedSearchTestCase extends SolrTestCaseJ4 { String cmp; cmp = compare(a.getResponse(), b.getResponse(), flags, handle); if (cmp != null) { - //log.info("Mismatched responses:\n" + a + "\n" + b); - System.err.println("Mismatched responses:\n" + a + "\n" + b); // :nocommit: + log.info("Mismatched responses:\n" + a + "\n" + b); TestCase.fail(cmp); } } From 1f9a474116b331a4ac9d213e476a1ec7f2507a2d Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Fri, 4 Feb 2011 09:27:40 +0000 Subject: [PATCH 089/185] SOLR-96: Add test case git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1067119 13f79535-47bb-0310-9956-ffa450edef68 --- .../DocumentAnalysisRequestHandlerTest.java | 99 ++++++++++++++++++- 1 file changed, 95 insertions(+), 4 deletions(-) diff --git a/solr/src/test/org/apache/solr/handler/DocumentAnalysisRequestHandlerTest.java b/solr/src/test/org/apache/solr/handler/DocumentAnalysisRequestHandlerTest.java index 2454bfba7cd..f33cfc8300f 100644 --- a/solr/src/test/org/apache/solr/handler/DocumentAnalysisRequestHandlerTest.java +++ b/solr/src/test/org/apache/solr/handler/DocumentAnalysisRequestHandlerTest.java @@ -30,8 +30,12 @@ import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; -import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.io.IOException; +import java.io.Reader; /** * A test for {@link DocumentAnalysisRequestHandler}. @@ -71,15 +75,14 @@ public class DocumentAnalysisRequestHandlerTest extends AnalysisRequestHandlerTe "" + ""; - final List contentStreams = new ArrayList(1); - contentStreams.add(new ContentStreamBase.StringStream(docsInput)); + final ContentStream cs = new ContentStreamBase.StringStream(docsInput); ModifiableSolrParams params = new ModifiableSolrParams(); params.add("analysis.query", "The Query String"); params.add("analysis.showmatch", "true"); SolrQueryRequest req = new SolrQueryRequestBase(h.getCore(), params) { @Override public Iterable getContentStreams() { - return contentStreams; + return Collections.singleton(cs); } }; @@ -106,6 +109,94 @@ public class DocumentAnalysisRequestHandlerTest extends AnalysisRequestHandlerTe req.close(); } + /** A binary-only ContentStream */ + static class ByteStream extends ContentStreamBase { + private final byte[] bytes; + + public ByteStream(byte[] bytes, String contentType) { + this.bytes = bytes; + this.contentType = contentType; + name = null; + size = Long.valueOf(bytes.length); + sourceInfo = "rawBytes"; + } + + public InputStream getStream() throws IOException { + return new ByteArrayInputStream(bytes); + } + + @Override + public Reader getReader() throws IOException { + throw new IOException("This is a byte stream, Readers are not supported."); + } + } + + + // This test should also test charset detection in UpdateRequestHandler, + // but the DocumentAnalysisRequestHandler is simplier to use/check. + @Test + public void testCharsetInDocument() throws Exception { + final byte[] xmlBytes = ( + "\r\n" + + "\r\n" + + " \r\n" + + " Müller\r\n" + + " " + + "" + ).getBytes("ISO-8859-1"); + + // we declare a content stream without charset: + final ContentStream cs = new ByteStream(xmlBytes, "application/xml"); + + ModifiableSolrParams params = new ModifiableSolrParams(); + SolrQueryRequest req = new SolrQueryRequestBase(h.getCore(), params) { + @Override + public Iterable getContentStreams() { + return Collections.singleton(cs); + } + }; + + DocumentAnalysisRequest request = handler.resolveAnalysisRequest(req); + assertNotNull(request); + final List documents = request.getDocuments(); + assertNotNull(documents); + assertEquals(1, documents.size()); + SolrInputDocument doc = documents.get(0); + assertEquals("Müller", doc.getField("id").getValue()); + } + + // This test should also test charset detection in UpdateRequestHandler, + // but the DocumentAnalysisRequestHandler is simplier to use/check. + @Test + public void testCharsetOutsideDocument() throws Exception { + final byte[] xmlBytes = ( + "\r\n" + + " \r\n" + + " Müller\r\n" + + " " + + "" + ).getBytes("ISO-8859-1"); + + // we declare a content stream without charset: + final ContentStream cs = new ByteStream(xmlBytes, "application/xml; charset=ISO-8859-1"); + + ModifiableSolrParams params = new ModifiableSolrParams(); + SolrQueryRequest req = new SolrQueryRequestBase(h.getCore(), params) { + @Override + public Iterable getContentStreams() { + return Collections.singleton(cs); + } + }; + + DocumentAnalysisRequest request = handler.resolveAnalysisRequest(req); + assertNotNull(request); + final List documents = request.getDocuments(); + assertNotNull(documents); + assertEquals(1, documents.size()); + SolrInputDocument doc = documents.get(0); + assertEquals("Müller", doc.getField("id").getValue()); + } + /** * Tests the {@link DocumentAnalysisRequestHandler#handleAnalysisRequest(org.apache.solr.client.solrj.request.DocumentAnalysisRequest, * org.apache.solr.schema.IndexSchema)} From 6f314071094e70ec392c7cee5a8278d412c0fb50 Mon Sep 17 00:00:00 2001 From: Koji Sekiguchi Date: Fri, 4 Feb 2011 10:19:52 +0000 Subject: [PATCH 090/185] SOLR-1057: Add PathHierarchyTokenizer git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1067131 13f79535-47bb-0310-9956-ffa450edef68 --- modules/analysis/CHANGES.txt | 3 + .../analysis/path/PathHierarchyTokenizer.java | 150 ++++++++++++++++++ .../path/TestPathHierarchyTokenizer.java | 130 +++++++++++++++ solr/CHANGES.txt | 2 + solr/example/solr/conf/schema.xml | 5 + .../PathHierarchyTokenizerFactory.java | 73 +++++++++ 6 files changed, 363 insertions(+) create mode 100644 modules/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizer.java create mode 100644 modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java create mode 100644 solr/src/java/org/apache/solr/analysis/PathHierarchyTokenizerFactory.java diff --git a/modules/analysis/CHANGES.txt b/modules/analysis/CHANGES.txt index d5bd6e3942f..69c72793c10 100644 --- a/modules/analysis/CHANGES.txt +++ b/modules/analysis/CHANGES.txt @@ -80,6 +80,9 @@ New Features - o.a.l.analysis.StopwordAnalyzerBase -> o.a.l.analysis.util.StopwordAnalyzerBase - o.a.l.analysis.WordListLoader -> o.a.l.analysis.util.WordListLoader + * SOLR-1057: Add PathHierarchyTokenizer that represents file path hierarchies as synonyms of + /something, /something/something, /something/something/else. (Ryan McKinley, Koji Sekiguchi) + Build * LUCENE-2413: All analyzers in contrib/analyzers and contrib/icu were moved to the diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizer.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizer.java new file mode 100644 index 00000000000..b0cd8d60cfc --- /dev/null +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizer.java @@ -0,0 +1,150 @@ +package org.apache.lucene.analysis.path; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.io.Reader; + +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; + +/** + * + * Take something like: + * + *

    + *  /soemthing/something/else
    + * 
    + * + * and make: + * + *
    + *  /soemthing
    + *  /soemthing/something
    + *  /soemthing/something/else
    + * 
    + * + */ +public class PathHierarchyTokenizer extends Tokenizer { + + public PathHierarchyTokenizer(Reader input) { + this(input, DEFAULT_BUFFER_SIZE, DEFAULT_DELIMITER); + } + + public PathHierarchyTokenizer(Reader input, int bufferSize, char delimiter) { + this(input, bufferSize, delimiter, delimiter); + } + + public PathHierarchyTokenizer(Reader input, char delimiter, char replacement) { + this(input, DEFAULT_BUFFER_SIZE, delimiter, replacement); + } + + public PathHierarchyTokenizer(Reader input, int bufferSize, char delimiter, char replacement) { + super(input); + termAtt.resizeBuffer(bufferSize); + this.delimiter = delimiter; + this.replacement = replacement; + endDelimiter = false; + resultToken = new StringBuilder(bufferSize); + } + + private static final int DEFAULT_BUFFER_SIZE = 1024; + public static final char DEFAULT_DELIMITER = '/'; + private final char delimiter; + private final char replacement; + + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); + private final PositionIncrementAttribute posAtt = addAttribute(PositionIncrementAttribute.class); + private int finalOffset = 0; + private boolean endDelimiter; + private StringBuilder resultToken; + + @Override + public final boolean incrementToken() throws IOException { + clearAttributes(); + termAtt.append( resultToken ); + if(resultToken.length() == 0){ + posAtt.setPositionIncrement(1); + } + else{ + posAtt.setPositionIncrement(0); + } + int length = 0; + boolean added = false; + if( endDelimiter ){ + termAtt.append(replacement); + length++; + endDelimiter = false; + added = true; + } + + while (true) { + int c = input.read(); + if( c < 0 ) { + length += resultToken.length(); + termAtt.setLength(length); + finalOffset = correctOffset(length); + offsetAtt.setOffset(correctOffset(0), finalOffset); + if( added ){ + resultToken.setLength(0); + resultToken.append(termAtt.buffer(), 0, length); + } + return added; + } + added = true; + if( c == delimiter ) { + if( length > 0 ){ + endDelimiter = true; + break; + } + else{ + termAtt.append(replacement); + length++; + } + } + else { + termAtt.append((char)c); + length++; + } + } + + length += resultToken.length(); + termAtt.setLength(length); + finalOffset = correctOffset(length); + offsetAtt.setOffset(correctOffset(0), finalOffset); + resultToken.setLength(0); + resultToken.append(termAtt.buffer(), 0, length); + return true; + } + + @Override + public final void end() { + // set final offset + offsetAtt.setOffset(finalOffset, finalOffset); + } + + @Override + public void reset(Reader input) throws IOException { + super.reset(input); + resultToken.setLength(0); + finalOffset = 0; + endDelimiter = false; + } +} diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java new file mode 100644 index 00000000000..cb0adc9e474 --- /dev/null +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java @@ -0,0 +1,130 @@ +package org.apache.lucene.analysis.path; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.StringReader; + +import org.apache.lucene.analysis.BaseTokenStreamTestCase; +import org.apache.lucene.analysis.CharStream; +import org.apache.lucene.analysis.charfilter.MappingCharFilter; +import org.apache.lucene.analysis.charfilter.NormalizeCharMap; + +public class TestPathHierarchyTokenizer extends BaseTokenStreamTestCase { + + public void testBasic() throws Exception { + String path = "/a/b/c"; + PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path) ); + assertTokenStreamContents(t, + new String[]{"/a", "/a/b", "/a/b/c"}, + new int[]{0, 0, 0}, + new int[]{2, 4, 6}, + new int[]{1, 0, 0}, + path.length()); + } + + public void testEndOfDelimiter() throws Exception { + String path = "/a/b/c/"; + PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path) ); + assertTokenStreamContents(t, + new String[]{"/a", "/a/b", "/a/b/c", "/a/b/c/"}, + new int[]{0, 0, 0, 0}, + new int[]{2, 4, 6, 7}, + new int[]{1, 0, 0, 0}, + path.length()); + } + + public void testStartOfChar() throws Exception { + String path = "a/b/c"; + PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path) ); + assertTokenStreamContents(t, + new String[]{"a", "a/b", "a/b/c"}, + new int[]{0, 0, 0}, + new int[]{1, 3, 5}, + new int[]{1, 0, 0}, + path.length()); + } + + public void testStartOfCharEndOfDelimiter() throws Exception { + String path = "a/b/c/"; + PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path) ); + assertTokenStreamContents(t, + new String[]{"a", "a/b", "a/b/c", "a/b/c/"}, + new int[]{0, 0, 0, 0}, + new int[]{1, 3, 5, 6}, + new int[]{1, 0, 0, 0}, + path.length()); + } + + public void testOnlyDelimiter() throws Exception { + String path = "/"; + PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path) ); + assertTokenStreamContents(t, + new String[]{"/"}, + new int[]{0}, + new int[]{1}, + new int[]{1}, + path.length()); + } + + public void testOnlyDelimiters() throws Exception { + String path = "//"; + PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path) ); + assertTokenStreamContents(t, + new String[]{"/", "//"}, + new int[]{0, 0}, + new int[]{1, 2}, + new int[]{1, 0}, + path.length()); + } + + public void testReplace() throws Exception { + String path = "/a/b/c"; + PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), '/', '\\' ); + assertTokenStreamContents(t, + new String[]{"\\a", "\\a\\b", "\\a\\b\\c"}, + new int[]{0, 0, 0}, + new int[]{2, 4, 6}, + new int[]{1, 0, 0}, + path.length()); + } + + public void testWindowsPath() throws Exception { + String path = "c:\\a\\b\\c"; + PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), '\\', '\\' ); + assertTokenStreamContents(t, + new String[]{"c:", "c:\\a", "c:\\a\\b", "c:\\a\\b\\c"}, + new int[]{0, 0, 0, 0}, + new int[]{2, 4, 6, 8}, + new int[]{1, 0, 0, 0}, + path.length()); + } + + public void testNormalizeWinDelimToLinuxDelim() throws Exception { + NormalizeCharMap normMap = new NormalizeCharMap(); + normMap.add("\\", "/"); + String path = "c:\\a\\b\\c"; + CharStream cs = new MappingCharFilter(normMap, new StringReader(path)); + PathHierarchyTokenizer t = new PathHierarchyTokenizer( cs ); + assertTokenStreamContents(t, + new String[]{"c:", "c:/a", "c:/a/b", "c:/a/b/c"}, + new int[]{0, 0, 0, 0}, + new int[]{2, 4, 6, 8}, + new int[]{1, 0, 0, 0}, + path.length()); + } +} diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 7e6506702f7..48c0ed88545 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -441,6 +441,8 @@ New Features * SOLR-860: Add debug output for MoreLikeThis. (koji) +* SOLR-1057: Add PathHierarchyTokenizerFactory. (ryan, koji) + Optimizations ---------------------- diff --git a/solr/example/solr/conf/schema.xml b/solr/example/solr/conf/schema.xml index 563b6732ae6..dc6afdcc785 100755 --- a/solr/example/solr/conf/schema.xml +++ b/solr/example/solr/conf/schema.xml @@ -376,6 +376,11 @@ + + + + + diff --git a/solr/src/java/org/apache/solr/analysis/PathHierarchyTokenizerFactory.java b/solr/src/java/org/apache/solr/analysis/PathHierarchyTokenizerFactory.java new file mode 100644 index 00000000000..50380764923 --- /dev/null +++ b/solr/src/java/org/apache/solr/analysis/PathHierarchyTokenizerFactory.java @@ -0,0 +1,73 @@ +package org.apache.solr.analysis; +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.Reader; +import java.util.Map; + +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.path.PathHierarchyTokenizer; + + +/** + * @version $Id$ + */ +public class PathHierarchyTokenizerFactory extends BaseTokenizerFactory { + + private char delimiter; + private char replacement; + + /** + * Require a configured pattern + */ + @Override + public void init(Map args){ + super.init( args ); + + String v = args.get( "delimiter" ); + if( v != null ){ + if( v.length() != 1 ){ + throw new IllegalArgumentException( "delimiter should be a char. \"" + v + "\" is invalid" ); + } + else{ + delimiter = v.charAt(0); + } + } + else{ + delimiter = PathHierarchyTokenizer.DEFAULT_DELIMITER; + } + + v = args.get( "replace" ); + if( v != null ){ + if( v.length() != 1 ){ + throw new IllegalArgumentException( "replace should be a char. \"" + v + "\" is invalid" ); + } + else{ + replacement = v.charAt(0); + } + } + else{ + replacement = delimiter; + } + } + + public Tokenizer create(Reader input) { + return new PathHierarchyTokenizer(input, delimiter, replacement); + } +} + + From 0188da6a73130c6314a6a5dc9fd873fc49f64963 Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Fri, 4 Feb 2011 12:01:49 +0000 Subject: [PATCH 091/185] Fix lots of default charset violations in Solr caused by String.getBytes() and IOUtils.toString() [and others] git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1067160 13f79535-47bb-0310-9956-ffa450edef68 --- .../handler/dataimport/TestDataConfig.java | 5 +++-- .../org/apache/solr/core/CoreContainer.java | 2 +- .../solr/handler/DumpRequestHandler.java | 8 +++---- .../handler/admin/ShowFileRequestHandler.java | 2 +- .../solr/handler/admin/SystemInfoHandler.java | 1 + .../solrj/embedded/JettyWebappTest.java | 20 +++++++++--------- .../org/apache/solr/cloud/ZkTestServer.java | 2 +- .../solr/common/util/ContentStreamTest.java | 21 ++++++++++++------- .../apache/solr/common/util/DOMUtilTest.java | 5 +++-- .../DocumentAnalysisRequestHandlerTest.java | 2 +- .../solr/servlet/SolrRequestParserTest.java | 14 ++++++------- .../solr/servlet/SolrDispatchFilter.java | 2 +- 12 files changed, 47 insertions(+), 37 deletions(-) diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDataConfig.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDataConfig.java index cef348493dd..39b01ddedce 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDataConfig.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestDataConfig.java @@ -19,9 +19,10 @@ package org.apache.solr.handler.dataimport; import org.junit.BeforeClass; import org.junit.Test; import org.w3c.dom.Document; +import org.xml.sax.InputSource; import javax.xml.parsers.DocumentBuilderFactory; -import java.io.ByteArrayInputStream; +import java.io.StringReader; import java.util.ArrayList; import java.util.List; @@ -55,7 +56,7 @@ public class TestDataConfig extends AbstractDataImportHandlerTestCase { public void testBasic() throws Exception { javax.xml.parsers.DocumentBuilder builder = DocumentBuilderFactory .newInstance().newDocumentBuilder(); - Document doc = builder.parse(new ByteArrayInputStream(xml.getBytes())); + Document doc = builder.parse(new InputSource(new StringReader(xml))); DataConfig dc = new DataConfig(); dc.readFromXml(doc.getDocumentElement()); diff --git a/solr/src/java/org/apache/solr/core/CoreContainer.java b/solr/src/java/org/apache/solr/core/CoreContainer.java index c26d66737bf..c8d158540c5 100644 --- a/solr/src/java/org/apache/solr/core/CoreContainer.java +++ b/solr/src/java/org/apache/solr/core/CoreContainer.java @@ -232,7 +232,7 @@ public class CoreContainer cores.load(solrHome, fconf); } else { log.info("no solr.xml file found - using default"); - cores.load(solrHome, new ByteArrayInputStream(DEF_SOLR_XML.getBytes())); + cores.load(solrHome, new ByteArrayInputStream(DEF_SOLR_XML.getBytes("UTF-8"))); cores.configFile = fconf; } diff --git a/solr/src/java/org/apache/solr/handler/DumpRequestHandler.java b/solr/src/java/org/apache/solr/handler/DumpRequestHandler.java index d858ef66e15..1d74889afaf 100644 --- a/solr/src/java/org/apache/solr/handler/DumpRequestHandler.java +++ b/solr/src/java/org/apache/solr/handler/DumpRequestHandler.java @@ -18,7 +18,7 @@ package org.apache.solr.handler; import java.io.IOException; -import java.io.InputStream; +import java.io.Reader; import java.util.ArrayList; import org.apache.commons.io.IOUtils; @@ -46,11 +46,11 @@ public class DumpRequestHandler extends RequestHandlerBase stream.add( "sourceInfo", content.getSourceInfo() ); stream.add( "size", content.getSize() ); stream.add( "contentType", content.getContentType() ); - InputStream is = content.getStream(); + Reader reader = content.getReader(); try { - stream.add( "stream", IOUtils.toString(is) ); + stream.add( "stream", IOUtils.toString(reader) ); } finally { - is.close(); + reader.close(); } streams.add( stream ); } diff --git a/solr/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java b/solr/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java index 96c53ceea97..cda4b94bd81 100644 --- a/solr/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java +++ b/solr/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java @@ -210,7 +210,7 @@ public class ShowFileRequestHandler extends RequestHandlerBase } try { InputStream input = core.getResourceLoader().openResource(path); - return IOUtils.toString( input ); + return IOUtils.toString( input, "UTF-8" ); } catch( Exception ex ) {} // ignore it return ""; diff --git a/solr/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java b/solr/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java index dfdd41c81aa..4a123944624 100644 --- a/solr/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java +++ b/solr/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java @@ -185,6 +185,7 @@ public class SystemInfoHandler extends RequestHandlerBase try { Process process = Runtime.getRuntime().exec(cmd); in = new DataInputStream( process.getInputStream() ); + // use default charset from locale here, because the command invoked also uses the default locale: return IOUtils.toString( in ); } catch( Exception ex ) { diff --git a/solr/src/test/org/apache/solr/client/solrj/embedded/JettyWebappTest.java b/solr/src/test/org/apache/solr/client/solrj/embedded/JettyWebappTest.java index 7a6068c7bb4..cce5d3dae04 100644 --- a/solr/src/test/org/apache/solr/client/solrj/embedded/JettyWebappTest.java +++ b/solr/src/test/org/apache/solr/client/solrj/embedded/JettyWebappTest.java @@ -86,23 +86,23 @@ public class JettyWebappTest extends LuceneTestCase // sure they compile ok String adminPath = "http://localhost:"+port+context+"/"; - String html = IOUtils.toString( new URL(adminPath).openStream() ); - assertNotNull( html ); // real error will be an exception + byte[] bytes = IOUtils.toByteArray( new URL(adminPath).openStream() ); + assertNotNull( bytes ); // real error will be an exception adminPath += "admin/"; - html = IOUtils.toString( new URL(adminPath).openStream() ); - assertNotNull( html ); // real error will be an exception + bytes = IOUtils.toByteArray( new URL(adminPath).openStream() ); + assertNotNull( bytes ); // real error will be an exception // analysis - html = IOUtils.toString( new URL(adminPath+"analysis.jsp").openStream() ); - assertNotNull( html ); // real error will be an exception + bytes = IOUtils.toByteArray( new URL(adminPath+"analysis.jsp").openStream() ); + assertNotNull( bytes ); // real error will be an exception // schema browser - html = IOUtils.toString( new URL(adminPath+"schema.jsp").openStream() ); - assertNotNull( html ); // real error will be an exception + bytes = IOUtils.toByteArray( new URL(adminPath+"schema.jsp").openStream() ); + assertNotNull( bytes ); // real error will be an exception // schema browser - html = IOUtils.toString( new URL(adminPath+"threaddump.jsp").openStream() ); - assertNotNull( html ); // real error will be an exception + bytes = IOUtils.toByteArray( new URL(adminPath+"threaddump.jsp").openStream() ); + assertNotNull( bytes ); // real error will be an exception } } diff --git a/solr/src/test/org/apache/solr/cloud/ZkTestServer.java b/solr/src/test/org/apache/solr/cloud/ZkTestServer.java index d435910d790..e5dcfed39fe 100644 --- a/solr/src/test/org/apache/solr/cloud/ZkTestServer.java +++ b/solr/src/test/org/apache/solr/cloud/ZkTestServer.java @@ -279,7 +279,7 @@ public class ZkTestServer { BufferedReader reader = null; try { OutputStream outstream = sock.getOutputStream(); - outstream.write(cmd.getBytes()); + outstream.write(cmd.getBytes("US-ASCII")); outstream.flush(); // this replicates NC - close the output stream before reading sock.shutdownOutput(); diff --git a/solr/src/test/org/apache/solr/common/util/ContentStreamTest.java b/solr/src/test/org/apache/solr/common/util/ContentStreamTest.java index 01e0b6985f9..27b12dc7254 100755 --- a/solr/src/test/org/apache/solr/common/util/ContentStreamTest.java +++ b/solr/src/test/org/apache/solr/common/util/ContentStreamTest.java @@ -27,6 +27,7 @@ import java.io.InputStream; import java.io.StringReader; import java.net.ConnectException; import java.net.URL; +import java.net.URLConnection; import org.apache.commons.io.IOUtils; import org.apache.lucene.util.LuceneTestCase; @@ -64,12 +65,15 @@ public class ContentStreamTest extends LuceneTestCase public void testURLStream() throws IOException { - String content = null; + byte[] content = null; + String contentType = null; URL url = new URL( "http://svn.apache.org/repos/asf/lucene/dev/trunk/" ); InputStream in = null; try { - in = url.openStream(); - content = IOUtils.toString( in ); + URLConnection conn = url.openConnection(); + in = conn.getInputStream(); + contentType = conn.getContentType(); + content = IOUtils.toByteArray(in); } catch (ConnectException ex) { assumeNoException("Unable to connect to " + url + " to run the test.", ex); }finally { @@ -78,23 +82,26 @@ public class ContentStreamTest extends LuceneTestCase } } - assertTrue( content.length() > 10 ); // found something... + assertTrue( content.length > 10 ); // found something... ContentStreamBase stream = new ContentStreamBase.URLStream( url ); - assertEquals( content.length(), stream.getSize().intValue() ); + assertEquals( content.length, stream.getSize().intValue() ); // Test the stream in = stream.getStream(); try { assertTrue( IOUtils.contentEquals( - new ByteArrayInputStream( content.getBytes() ), in ) ); + new ByteArrayInputStream(content), in ) ); } finally { IOUtils.closeQuietly(in); } + String charset = ContentStreamBase.getCharsetFromContentType(contentType); + if (charset == null) + charset = ContentStreamBase.DEFAULT_CHARSET; // Re-open the stream and this time use a reader stream = new ContentStreamBase.URLStream( url ); - assertTrue( IOUtils.contentEquals( new StringReader( content ), stream.getReader() ) ); + assertTrue( IOUtils.contentEquals( new StringReader(new String(content, charset)), stream.getReader() ) ); } } diff --git a/solr/src/test/org/apache/solr/common/util/DOMUtilTest.java b/solr/src/test/org/apache/solr/common/util/DOMUtilTest.java index 87403355472..069c2f933e1 100644 --- a/solr/src/test/org/apache/solr/common/util/DOMUtilTest.java +++ b/solr/src/test/org/apache/solr/common/util/DOMUtilTest.java @@ -17,7 +17,7 @@ package org.apache.solr.common.util; -import java.io.ByteArrayInputStream; +import java.io.StringReader; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; @@ -27,6 +27,7 @@ import javax.xml.xpath.XPathFactory; import org.w3c.dom.Document; import org.w3c.dom.Node; +import org.xml.sax.InputSource; import org.apache.lucene.util.LuceneTestCase; @@ -85,6 +86,6 @@ public class DOMUtilTest extends LuceneTestCase { } public Document getDocument( String xml ) throws Exception { - return builder.parse( new ByteArrayInputStream( xml.getBytes() ) ); + return builder.parse(new InputSource(new StringReader(xml))); } } diff --git a/solr/src/test/org/apache/solr/handler/DocumentAnalysisRequestHandlerTest.java b/solr/src/test/org/apache/solr/handler/DocumentAnalysisRequestHandlerTest.java index f33cfc8300f..1753d77cb84 100644 --- a/solr/src/test/org/apache/solr/handler/DocumentAnalysisRequestHandlerTest.java +++ b/solr/src/test/org/apache/solr/handler/DocumentAnalysisRequestHandlerTest.java @@ -177,7 +177,7 @@ public class DocumentAnalysisRequestHandlerTest extends AnalysisRequestHandlerTe "" ).getBytes("ISO-8859-1"); - // we declare a content stream without charset: + // we declare a content stream with charset: final ContentStream cs = new ByteStream(xmlBytes, "application/xml; charset=ISO-8859-1"); ModifiableSolrParams params = new ModifiableSolrParams(); diff --git a/solr/src/test/org/apache/solr/servlet/SolrRequestParserTest.java b/solr/src/test/org/apache/solr/servlet/SolrRequestParserTest.java index cc763c885b6..6f7cb427282 100644 --- a/solr/src/test/org/apache/solr/servlet/SolrRequestParserTest.java +++ b/solr/src/test/org/apache/solr/servlet/SolrRequestParserTest.java @@ -74,7 +74,7 @@ public class SolrRequestParserTest extends SolrTestCaseJ4 { List streams = new ArrayList(); SolrQueryRequest req = parser.buildRequestFrom( core, new MultiMapSolrParams( args ), streams ); assertEquals( 1, streams.size() ); - assertEquals( body1, IOUtils.toString( streams.get(0).getStream() ) ); + assertEquals( body1, IOUtils.toString( streams.get(0).getReader() ) ); req.close(); // Now add three and make sure they come out ok @@ -87,9 +87,9 @@ public class SolrRequestParserTest extends SolrTestCaseJ4 { input.add( body1 ); input.add( body2 ); input.add( body3 ); - output.add( IOUtils.toString( streams.get(0).getStream() ) ); - output.add( IOUtils.toString( streams.get(1).getStream() ) ); - output.add( IOUtils.toString( streams.get(2).getStream() ) ); + output.add( IOUtils.toString( streams.get(0).getReader() ) ); + output.add( IOUtils.toString( streams.get(1).getReader() ) ); + output.add( IOUtils.toString( streams.get(2).getReader() ) ); // sort them so the output is consistent Collections.sort( input ); Collections.sort( output ); @@ -112,13 +112,13 @@ public class SolrRequestParserTest extends SolrTestCaseJ4 { { boolean ok = false; String url = "http://www.apache.org/dist/lucene/solr/"; - String txt = null; + byte[] bytes = null; try { URLConnection connection = new URL(url).openConnection(); connection.setConnectTimeout(5000); connection.setReadTimeout(5000); connection.connect(); - txt = IOUtils.toString( connection.getInputStream()); + bytes = IOUtils.toByteArray( connection.getInputStream()); } catch( Exception ex ) { assumeNoException("Unable to connect to " + url + " to run the test.", ex); @@ -134,7 +134,7 @@ public class SolrRequestParserTest extends SolrTestCaseJ4 { List streams = new ArrayList(); SolrQueryRequest req = parser.buildRequestFrom( core, new MultiMapSolrParams( args ), streams ); assertEquals( 1, streams.size() ); - assertEquals( txt, IOUtils.toString( streams.get(0).getStream() ) ); + assertArrayEquals( bytes, IOUtils.toByteArray( streams.get(0).getStream() ) ); req.close(); } diff --git a/solr/src/webapp/src/org/apache/solr/servlet/SolrDispatchFilter.java b/solr/src/webapp/src/org/apache/solr/servlet/SolrDispatchFilter.java index 0dcc8373d95..4250dd4e41a 100644 --- a/solr/src/webapp/src/org/apache/solr/servlet/SolrDispatchFilter.java +++ b/solr/src/webapp/src/org/apache/solr/servlet/SolrDispatchFilter.java @@ -65,7 +65,7 @@ public class SolrDispatchFilter implements Filter public SolrDispatchFilter() { try { - adminRequestParser = new SolrRequestParsers(new Config(null,"solr",new ByteArrayInputStream("".getBytes()),"") ); + adminRequestParser = new SolrRequestParsers(new Config(null,"solr",new ByteArrayInputStream("".getBytes("UTF-8")),"") ); } catch (Exception e) { //unlikely throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,e); From 783c8fd7b58521fd4d798a391606225dc12240eb Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Fri, 4 Feb 2011 12:27:16 +0000 Subject: [PATCH 092/185] More charset violations git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1067163 13f79535-47bb-0310-9956-ffa450edef68 --- .../dataimport/TestXPathEntityProcessor.java | 4 ++-- .../solr/response/SolrParamResourceLoader.java | 7 ++++++- .../solr/servlet/cache/HttpCacheHeaderUtil.java | 15 ++++++++++----- 3 files changed, 18 insertions(+), 8 deletions(-) diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestXPathEntityProcessor.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestXPathEntityProcessor.java index 6f6fd817fa6..596fa33b9d9 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestXPathEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestXPathEntityProcessor.java @@ -46,7 +46,7 @@ public class TestXPathEntityProcessor extends AbstractDataImportHandlerTestCase tmpdir.delete(); tmpdir.mkdir(); tmpdir.deleteOnExit(); - createFile(tmpdir, "x.xsl", xsl.getBytes(), false); + createFile(tmpdir, "x.xsl", xsl.getBytes("UTF-8"), false); Map entityAttrs = createMap("name", "e", "url", "cd.xml", XPathEntityProcessor.FOR_EACH, "/catalog/cd"); List fields = new ArrayList(); @@ -211,7 +211,7 @@ public class TestXPathEntityProcessor extends AbstractDataImportHandlerTestCase tmpdir.delete(); tmpdir.mkdir(); tmpdir.deleteOnExit(); - TestFileListEntityProcessor.createFile(tmpdir, "x.xsl", xsl.getBytes(), + TestFileListEntityProcessor.createFile(tmpdir, "x.xsl", xsl.getBytes("UTF-8"), false); Map entityAttrs = createMap("name", "e", XPathEntityProcessor.USE_SOLR_ADD_SCHEMA, "true", "xsl", "" diff --git a/solr/src/java/org/apache/solr/response/SolrParamResourceLoader.java b/solr/src/java/org/apache/solr/response/SolrParamResourceLoader.java index 9784fe01efa..6319a6026ce 100644 --- a/solr/src/java/org/apache/solr/response/SolrParamResourceLoader.java +++ b/solr/src/java/org/apache/solr/response/SolrParamResourceLoader.java @@ -25,6 +25,7 @@ import org.apache.commons.collections.ExtendedProperties; import java.io.ByteArrayInputStream; import java.io.InputStream; +import java.io.UnsupportedEncodingException; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -56,7 +57,11 @@ public class SolrParamResourceLoader extends ResourceLoader { @Override public InputStream getResourceStream(String s) throws ResourceNotFoundException { String template = templates.get(s); - return template == null ? null : new ByteArrayInputStream(template.getBytes()); + try { + return template == null ? null : new ByteArrayInputStream(template.getBytes("UTF-8")); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); // may not happen + } } @Override diff --git a/solr/src/webapp/src/org/apache/solr/servlet/cache/HttpCacheHeaderUtil.java b/solr/src/webapp/src/org/apache/solr/servlet/cache/HttpCacheHeaderUtil.java index 3a905cc545c..ce1d55680be 100644 --- a/solr/src/webapp/src/org/apache/solr/servlet/cache/HttpCacheHeaderUtil.java +++ b/solr/src/webapp/src/org/apache/solr/servlet/cache/HttpCacheHeaderUtil.java @@ -18,6 +18,7 @@ package org.apache.solr.servlet.cache; import java.io.IOException; +import java.io.UnsupportedEncodingException; import java.util.Collections; import java.util.Map; import java.util.WeakHashMap; @@ -75,11 +76,15 @@ public final class HttpCacheHeaderUtil { if (currentIndexVersion != indexVersionCache) { indexVersionCache=currentIndexVersion; - etagCache = "\"" - + new String(Base64.encodeBase64((Long.toHexString - (Long.reverse(indexVersionCache)) - + etagSeed).getBytes())) - + "\""; + try { + etagCache = "\"" + + new String(Base64.encodeBase64((Long.toHexString + (Long.reverse(indexVersionCache)) + + etagSeed).getBytes()), "US-ASCII") + + "\""; + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); // may not happen + } } return etagCache; From 7c69aee7cc6e05d0f924a8b13bbd97d26b738bcb Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Fri, 4 Feb 2011 12:37:31 +0000 Subject: [PATCH 093/185] More charset violations git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1067165 13f79535-47bb-0310-9956-ffa450edef68 --- .../src/test/org/apache/solr/common/util/ContentStreamTest.java | 2 +- .../test/org/apache/solr/handler/TestReplicationHandler.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/solr/src/test/org/apache/solr/common/util/ContentStreamTest.java b/solr/src/test/org/apache/solr/common/util/ContentStreamTest.java index 27b12dc7254..ec989f8a0c9 100755 --- a/solr/src/test/org/apache/solr/common/util/ContentStreamTest.java +++ b/solr/src/test/org/apache/solr/common/util/ContentStreamTest.java @@ -43,7 +43,7 @@ public class ContentStreamTest extends LuceneTestCase String input = "aads ghaskdgasgldj asl sadg ajdsg &jag # @ hjsakg hsakdg hjkas s"; ContentStreamBase stream = new ContentStreamBase.StringStream( input ); assertEquals( input.length(), stream.getSize().intValue() ); - assertEquals( input, IOUtils.toString( stream.getStream() ) ); + assertEquals( input, IOUtils.toString( stream.getStream(), "UTF-8" ) ); assertEquals( input, IOUtils.toString( stream.getReader() ) ); } diff --git a/solr/src/test/org/apache/solr/handler/TestReplicationHandler.java b/solr/src/test/org/apache/solr/handler/TestReplicationHandler.java index da6013b5097..ca75ff98c9f 100644 --- a/solr/src/test/org/apache/solr/handler/TestReplicationHandler.java +++ b/solr/src/test/org/apache/solr/handler/TestReplicationHandler.java @@ -655,7 +655,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 { try { url = new URL(masterUrl); stream = url.openStream(); - response = IOUtils.toString(stream); + response = IOUtils.toString(stream, "UTF-8"); if(response.contains("success")) { success = true; } From cdb47ea15f85619aa31b6fa995c3aad7cdc99b5f Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 4 Feb 2011 13:11:40 +0000 Subject: [PATCH 094/185] SOLR-1057: fix wrong classname in example schema git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1067176 13f79535-47bb-0310-9956-ffa450edef68 --- solr/example/solr/conf/schema.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solr/example/solr/conf/schema.xml b/solr/example/solr/conf/schema.xml index dc6afdcc785..bad81dbbba8 100755 --- a/solr/example/solr/conf/schema.xml +++ b/solr/example/solr/conf/schema.xml @@ -378,7 +378,7 @@ - + From 2de3b26a09423fbeb88713a8f86f13bcef45bbf1 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Fri, 4 Feb 2011 21:54:32 +0000 Subject: [PATCH 095/185] LUCENE-2904: LogMP pays attention to which segments are already being merged git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1067299 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/lucene/index/IndexWriter.java | 12 ++++ .../apache/lucene/index/LogMergePolicy.java | 59 ++++++++++++------- 2 files changed, 51 insertions(+), 20 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java index 8da73a3cdd0..44d909265b3 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java @@ -1829,6 +1829,18 @@ public class IndexWriter implements Closeable { } } + /** Expert: to be used by a {@link MergePolicy} to avoid + * selecting merges for segments already being merged. + * The returned collection is not cloned, and thus is + * only safe to access if you hold IndexWriter's lock + * (which you do when IndexWriter invokes the + * MergePolicy). + * + *

    Do not alter the returned collection! */ + public synchronized Collection getMergingSegments() { + return mergingSegments; + } + /** Expert: the {@link MergeScheduler} calls this method * to retrieve the next merge requested by the * MergePolicy */ diff --git a/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java b/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java index 8c53d24bc0e..1925a78d74d 100644 --- a/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java +++ b/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java @@ -18,9 +18,12 @@ package org.apache.lucene.index; */ import java.io.IOException; -import java.util.Set; -import java.util.Arrays; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.Comparator; +import java.util.List; +import java.util.Set; /**

    This class implements a {@link MergePolicy} that tries * to merge segments into levels of exponentially @@ -474,7 +477,7 @@ public abstract class LogMergePolicy extends MergePolicy { return spec; } - private static class SegmentInfoAndLevel implements Comparable { + private static class SegmentInfoAndLevel implements Comparable { SegmentInfo info; float level; int index; @@ -486,8 +489,7 @@ public abstract class LogMergePolicy extends MergePolicy { } // Sorts largest to smallest - public int compareTo(Object o) { - SegmentInfoAndLevel other = (SegmentInfoAndLevel) o; + public int compareTo(SegmentInfoAndLevel other) { if (level < other.level) return 1; else if (level > other.level) @@ -521,22 +523,37 @@ public abstract class LogMergePolicy extends MergePolicy { // Compute levels, which is just log (base mergeFactor) // of the size of each segment - SegmentInfoAndLevel[] levels = new SegmentInfoAndLevel[numSegments]; + final List levels = new ArrayList(); final float norm = (float) Math.log(mergeFactor); + final Collection mergingSegments = writer.get().getMergingSegments(); + for(int i=0;i Date: Sat, 5 Feb 2011 00:35:09 +0000 Subject: [PATCH 096/185] LUCENE-1540: Improvements to contrib.benchmark for TREC collections - port/merge from 3x. git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1067359 13f79535-47bb-0310-9956-ffa450edef68 --- .../index/TestBackwardsCompatibility.java | 173 ++++++----------- .../org/apache/lucene/util/_TestUtil.java | 50 +++++ modules/benchmark/CHANGES.txt | 6 + .../benchmark/byTask/feeds/ContentSource.java | 31 +-- .../byTask/feeds/DemoHTMLParser.java | 7 +- .../benchmark/byTask/feeds/HTMLParser.java | 14 +- .../byTask/feeds/TrecContentSource.java | 176 +++++++++--------- .../benchmark/byTask/feeds/TrecDocParser.java | 135 ++++++++++++++ .../byTask/feeds/TrecFBISParser.java | 65 +++++++ .../byTask/feeds/TrecFR94Parser.java | 66 +++++++ .../benchmark/byTask/feeds/TrecFTParser.java | 57 ++++++ .../byTask/feeds/TrecGov2Parser.java | 59 ++++++ .../byTask/feeds/TrecLATimesParser.java | 71 +++++++ .../byTask/feeds/TrecParserByPath.java | 33 ++++ .../byTask/utils/StringBuilderReader.java | 2 + .../byTask/feeds/TrecContentSourceTest.java | 63 +++++++ .../benchmark/byTask/feeds/trecdocs.zip | Bin 0 -> 2676 bytes 17 files changed, 785 insertions(+), 223 deletions(-) create mode 100644 modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java create mode 100644 modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFBISParser.java create mode 100644 modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFR94Parser.java create mode 100644 modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFTParser.java create mode 100755 modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecGov2Parser.java create mode 100644 modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecLATimesParser.java create mode 100644 modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecParserByPath.java create mode 100644 modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/trecdocs.zip diff --git a/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java index 729c64ff531..39eeef0f81b 100644 --- a/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java +++ b/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java @@ -17,20 +17,13 @@ package org.apache.lucene.index; * limitations under the License. */ -import java.io.BufferedOutputStream; import java.io.ByteArrayOutputStream; import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; import java.io.PrintStream; import java.util.Arrays; -import java.util.Enumeration; import java.util.List; import java.util.Random; -import java.util.zip.ZipEntry; -import java.util.zip.ZipFile; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; @@ -78,39 +71,6 @@ public class TestBackwardsCompatibility extends LuceneTestCase { } */ - /* Unzips zipName --> dirName, removing dirName - first */ - public void unzip(File zipName, String destDirName) throws IOException { - - ZipFile zipFile = new ZipFile(zipName); - - Enumeration entries = zipFile.entries(); - - String dirName = fullDir(destDirName); - - File fileDir = new File(dirName); - rmDir(destDirName); - - fileDir.mkdir(); - - while (entries.hasMoreElements()) { - ZipEntry entry = entries.nextElement(); - - InputStream in = zipFile.getInputStream(entry); - OutputStream out = new BufferedOutputStream(new FileOutputStream(new File(fileDir, entry.getName()))); - - byte[] buffer = new byte[8192]; - int len; - while((len = in.read(buffer)) >= 0) { - out.write(buffer, 0, len); - } - - in.close(); - out.close(); - } - - zipFile.close(); - } /* public void testCreateCFS() throws IOException { String dirName = "testindex.cfs"; @@ -153,10 +113,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase { if (VERBOSE) { System.out.println("TEST: index " + unsupportedNames[i]); } - unzip(getDataFile("unsupported." + unsupportedNames[i] + ".zip"), unsupportedNames[i]); - - String fullPath = fullDir(unsupportedNames[i]); - Directory dir = newFSDirectory(new File(fullPath)); + File oldIndxeDir = _TestUtil.getTempDir(unsupportedNames[i]); + _TestUtil.unzip(getDataFile("unsupported." + unsupportedNames[i] + ".zip"), oldIndxeDir); + Directory dir = newFSDirectory(oldIndxeDir); IndexReader reader = null; IndexWriter writer = null; @@ -200,7 +159,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase { assertTrue(bos.toString().contains(IndexFormatTooOldException.class.getName())); dir.close(); - rmDir(unsupportedNames[i]); + _TestUtil.rmDir(oldIndxeDir); } } @@ -209,10 +168,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase { if (VERBOSE) { System.out.println("\nTEST: index=" + oldNames[i]); } - unzip(getDataFile("index." + oldNames[i] + ".zip"), oldNames[i]); - - String fullPath = fullDir(oldNames[i]); - Directory dir = newFSDirectory(new File(fullPath)); + File oldIndxeDir = _TestUtil.getTempDir(oldNames[i]); + _TestUtil.unzip(getDataFile("index." + oldNames[i] + ".zip"), oldIndxeDir); + Directory dir = newFSDirectory(oldIndxeDir); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); @@ -223,15 +181,15 @@ public class TestBackwardsCompatibility extends LuceneTestCase { _TestUtil.checkIndex(dir); dir.close(); - rmDir(oldNames[i]); + _TestUtil.rmDir(oldIndxeDir); } } public void testAddOldIndexes() throws IOException { for (String name : oldNames) { - unzip(getDataFile("index." + name + ".zip"), name); - String fullPath = fullDir(name); - Directory dir = newFSDirectory(new File(fullPath)); + File oldIndxeDir = _TestUtil.getTempDir(name); + _TestUtil.unzip(getDataFile("index." + name + ".zip"), oldIndxeDir); + Directory dir = newFSDirectory(oldIndxeDir); Directory targetDir = newDirectory(); IndexWriter w = new IndexWriter(targetDir, newIndexWriterConfig( @@ -243,15 +201,15 @@ public class TestBackwardsCompatibility extends LuceneTestCase { dir.close(); targetDir.close(); - rmDir(name); + _TestUtil.rmDir(oldIndxeDir); } } public void testAddOldIndexesReader() throws IOException { for (String name : oldNames) { - unzip(getDataFile("index." + name + ".zip"), name); - String fullPath = fullDir(name); - Directory dir = newFSDirectory(new File(fullPath)); + File oldIndxeDir = _TestUtil.getTempDir(name); + _TestUtil.unzip(getDataFile("index." + name + ".zip"), oldIndxeDir); + Directory dir = newFSDirectory(oldIndxeDir); IndexReader reader = IndexReader.open(dir); Directory targetDir = newDirectory(); @@ -265,23 +223,25 @@ public class TestBackwardsCompatibility extends LuceneTestCase { dir.close(); targetDir.close(); - rmDir(name); + _TestUtil.rmDir(oldIndxeDir); } } public void testSearchOldIndex() throws IOException { for(int i=0;i entries = zipFile.entries(); + + rmDir(destDir); + + destDir.mkdir(); + + while (entries.hasMoreElements()) { + ZipEntry entry = entries.nextElement(); + + InputStream in = zipFile.getInputStream(entry); + File targetFile = new File(destDir, entry.getName()); + if (entry.isDirectory()) { + // allow unzipping with directory structure + targetFile.mkdirs(); + } else { + if (targetFile.getParentFile()!=null) { + // be on the safe side: do not rely on that directories are always extracted + // before their children (although this makes sense, but is it guaranteed?) + targetFile.getParentFile().mkdirs(); + } + OutputStream out = new BufferedOutputStream(new FileOutputStream(targetFile)); + + byte[] buffer = new byte[8192]; + int len; + while((len = in.read(buffer)) >= 0) { + out.write(buffer, 0, len); + } + + in.close(); + out.close(); + } + } + + zipFile.close(); + } + public static void syncConcurrentMerges(IndexWriter writer) { syncConcurrentMerges(writer.getConfig().getMergeScheduler()); } diff --git a/modules/benchmark/CHANGES.txt b/modules/benchmark/CHANGES.txt index 3811723e38b..8f5f082e7dd 100644 --- a/modules/benchmark/CHANGES.txt +++ b/modules/benchmark/CHANGES.txt @@ -2,6 +2,12 @@ Lucene Benchmark Contrib Change Log The Benchmark contrib package contains code for benchmarking Lucene in a variety of ways. +02/05/2011 + LUCENE-1540: Improvements to contrib.benchmark for TREC collections. + ContentSource can now process plain text files, gzip files, and bzip2 files. + TREC doc parsing now handles the TREC gov2 collection and TREC disks 4&5-CR + collection (both used by many TREC tasks). (Shai Erera, Doron Cohen) + 01/26/2011 LUCENE-929: ExtractReuters first extracts to a tmp dir and then renames. That way, if a previous extract attempt failed, "ant extract-reuters" will still diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ContentSource.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ContentSource.java index 817e57d1c03..b831e69adab 100644 --- a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ContentSource.java +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ContentSource.java @@ -56,11 +56,14 @@ import org.apache.lucene.benchmark.byTask.utils.Config; public abstract class ContentSource { private static final int BZIP = 0; - private static final int OTHER = 1; + private static final int GZIP = 1; + private static final int OTHER = 2; private static final Map extensionToType = new HashMap(); static { extensionToType.put(".bz2", Integer.valueOf(BZIP)); extensionToType.put(".bzip", Integer.valueOf(BZIP)); + extensionToType.put(".gz", Integer.valueOf(GZIP)); + extensionToType.put(".gzip", Integer.valueOf(GZIP)); } protected static final int BUFFER_SIZE = 1 << 16; // 64K @@ -78,11 +81,13 @@ public abstract class ContentSource { private CompressorStreamFactory csFactory = new CompressorStreamFactory(); + /** update count of bytes generated by this source */ protected final synchronized void addBytes(long numBytes) { bytesCount += numBytes; totalBytesCount += numBytes; } + /** update count of documents generated by this source */ protected final synchronized void addDoc() { ++docsCount; ++totalDocsCount; @@ -130,21 +135,25 @@ public abstract class ContentSource { type = typeInt.intValue(); } } - switch (type) { - case BZIP: - try { + + try { + switch (type) { + case BZIP: // According to BZip2CompressorInputStream's code, it reads the first // two file header chars ('B' and 'Z'). It is important to wrap the // underlying input stream with a buffered one since // Bzip2CompressorInputStream uses the read() method exclusively. is = csFactory.createCompressorInputStream("bzip2", is); - } catch (CompressorException e) { - IOException ioe = new IOException(e.getMessage()); - ioe.initCause(e); - throw ioe; - } - break; - default: // Do nothing, stay with FileInputStream + break; + case GZIP: + is = csFactory.createCompressorInputStream("gz", is); + break; + default: // Do nothing, stay with FileInputStream + } + } catch (CompressorException e) { + IOException ioe = new IOException(e.getMessage()); + ioe.initCause(e); + throw ioe; } return is; diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DemoHTMLParser.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DemoHTMLParser.java index d57777a0036..873c658a338 100755 --- a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DemoHTMLParser.java +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DemoHTMLParser.java @@ -29,11 +29,14 @@ import java.util.Properties; */ public class DemoHTMLParser implements org.apache.lucene.benchmark.byTask.feeds.HTMLParser { - public DocData parse(DocData docData, String name, Date date, Reader reader, DateFormat dateFormat) throws IOException, InterruptedException { + public DocData parse(DocData docData, String name, Date date, String title, Reader reader, DateFormat dateFormat) throws IOException, InterruptedException { org.apache.lucene.demo.html.HTMLParser p = new org.apache.lucene.demo.html.HTMLParser(reader); // title - String title = p.getTitle(); + if (title==null) { + title = p.getTitle(); + } + // properties Properties props = p.getMetaTags(); // body diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/HTMLParser.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/HTMLParser.java index 6c8b9fa4a87..47eed373e5f 100755 --- a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/HTMLParser.java +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/HTMLParser.java @@ -29,16 +29,18 @@ public interface HTMLParser { /** * Parse the input Reader and return DocData. - * A provided name or date is used for the result, otherwise an attempt is - * made to set them from the parsed data. - * @param dateFormat date formatter to use for extracting the date. - * @param name name of the result doc data. If null, attempt to set by parsed data. + * The provided name,title,date are used for the result, unless when they're null, + * in which case an attempt is made to set them from the parsed data. + * @param docData result reused + * @param name name of the result doc data. * @param date date of the result doc data. If null, attempt to set by parsed data. - * @param reader of html text to parse. + * @param title title of the result doc data. If null, attempt to set by parsed data. + * @param reader reader of html text to parse. + * @param dateFormat date formatter to use for extracting the date. * @return Parsed doc data. * @throws IOException * @throws InterruptedException */ - public DocData parse(DocData docData, String name, Date date, Reader reader, DateFormat dateFormat) throws IOException, InterruptedException; + public DocData parse(DocData docData, String name, Date date, String title, Reader reader, DateFormat dateFormat) throws IOException, InterruptedException; } diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecContentSource.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecContentSource.java index 1101e661c91..d60a12ccf90 100644 --- a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecContentSource.java +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecContentSource.java @@ -19,8 +19,8 @@ package org.apache.lucene.benchmark.byTask.feeds; import java.io.BufferedReader; import java.io.File; -import java.io.FileInputStream; import java.io.IOException; +import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; import java.text.DateFormat; @@ -29,8 +29,8 @@ import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; import java.util.Locale; -import java.util.zip.GZIPInputStream; +import org.apache.lucene.benchmark.byTask.feeds.TrecDocParser.ParsePathType; import org.apache.lucene.benchmark.byTask.utils.Config; import org.apache.lucene.benchmark.byTask.utils.StringBuilderReader; import org.apache.lucene.util.ThreadInterruptedException; @@ -46,8 +46,10 @@ import org.apache.lucene.util.ThreadInterruptedException; *

  • docs.dir - specifies the directory where the TREC files reside. * Can be set to a relative path if "work.dir" is also specified * (default=trec). + *
  • trec.doc.parser - specifies the {@link TrecDocParser} class to use for + * parsing the TREC documents content (default=TrecGov2Parser). *
  • html.parser - specifies the {@link HTMLParser} class to use for - * parsing the TREC documents content (default=DemoHTMLParser). + * parsing the HTML parts of the TREC documents content (default=DemoHTMLParser). *
  • content.source.encoding - if not specified, ISO-8859-1 is used. *
  • content.source.excludeIteration - if true, do not append iteration number to docname * @@ -59,22 +61,24 @@ public class TrecContentSource extends ContentSource { ParsePosition pos; } - private static final String DATE = "Date: "; - private static final String DOCHDR = ""; - private static final String TERMINATING_DOCHDR = ""; - private static final String DOCNO = ""; - private static final String TERMINATING_DOCNO = ""; - private static final String DOC = ""; - private static final String TERMINATING_DOC = ""; + public static final String DOCNO = ""; + public static final String TERMINATING_DOCNO = ""; + public static final String DOC = ""; + public static final String TERMINATING_DOC = ""; - private static final String NEW_LINE = System.getProperty("line.separator"); + /** separator between lines in the byffer */ + public static final String NEW_LINE = System.getProperty("line.separator"); private static final String DATE_FORMATS [] = { - "EEE, dd MMM yyyy kk:mm:ss z", // Tue, 09 Dec 2003 22:39:08 GMT - "EEE MMM dd kk:mm:ss yyyy z", // Tue Dec 09 16:45:08 2003 EST - "EEE, dd-MMM-':'y kk:mm:ss z", // Tue, 09 Dec 2003 22:39:08 GMT - "EEE, dd-MMM-yyy kk:mm:ss z", // Tue, 09 Dec 2003 22:39:08 GMT - "EEE MMM dd kk:mm:ss yyyy", // Tue Dec 09 16:45:08 2003 + "EEE, dd MMM yyyy kk:mm:ss z", // Tue, 09 Dec 2003 22:39:08 GMT + "EEE MMM dd kk:mm:ss yyyy z", // Tue Dec 09 16:45:08 2003 EST + "EEE, dd-MMM-':'y kk:mm:ss z", // Tue, 09 Dec 2003 22:39:08 GMT + "EEE, dd-MMM-yyy kk:mm:ss z", // Tue, 09 Dec 2003 22:39:08 GMT + "EEE MMM dd kk:mm:ss yyyy", // Tue Dec 09 16:45:08 2003 + "dd MMM yyyy", // 1 March 1994 + "MMM dd, yyyy", // February 3, 1994 + "yyMMdd", // 910513 + "hhmm z.z.z. MMM dd, yyyy", // 0901 u.t.c. April 28, 1994 }; private ThreadLocal dateFormats = new ThreadLocal(); @@ -83,7 +87,7 @@ public class TrecContentSource extends ContentSource { private File dataDir = null; private ArrayList inputFiles = new ArrayList(); private int nextFile = 0; - private int rawDocSize; + private int rawDocSize = 0; // Use to synchronize threads on reading from the TREC documents. private Object lock = new Object(); @@ -92,7 +96,10 @@ public class TrecContentSource extends ContentSource { BufferedReader reader; int iteration = 0; HTMLParser htmlParser; + private boolean excludeDocnameIteration; + private TrecDocParser trecDocParser = new TrecGov2Parser(); // default + ParsePathType currPathType; // not private for tests private DateFormatInfo getDateFormatInfo() { DateFormatInfo dfi = dateFormats.get(); @@ -118,7 +125,7 @@ public class TrecContentSource extends ContentSource { return sb; } - private Reader getTrecDocReader(StringBuilder docBuffer) { + Reader getTrecDocReader(StringBuilder docBuffer) { StringBuilderReader r = trecDocReader.get(); if (r == null) { r = new StringBuilderReader(docBuffer); @@ -129,10 +136,21 @@ public class TrecContentSource extends ContentSource { return r; } - // read until finding a line that starts with the specified prefix, or a terminating tag has been found. - private void read(StringBuilder buf, String prefix, boolean collectMatchLine, - boolean collectAll, String terminatingTag) - throws IOException, NoMoreDataException { + HTMLParser getHtmlParser() { + return htmlParser; + } + + /** + * Read until a line starting with the specified lineStart. + * @param buf buffer for collecting the data if so specified/ + * @param lineStart line start to look for, must not be null. + * @param collectMatchLine whether to collect the matching line into buffer. + * @param collectAll whether to collect all lines into buffer. + * @throws IOException + * @throws NoMoreDataException + */ + private void read(StringBuilder buf, String lineStart, + boolean collectMatchLine, boolean collectAll) throws IOException, NoMoreDataException { String sep = ""; while (true) { String line = reader.readLine(); @@ -144,20 +162,12 @@ public class TrecContentSource extends ContentSource { rawDocSize += line.length(); - if (line.startsWith(prefix)) { + if (lineStart!=null && line.startsWith(lineStart)) { if (collectMatchLine) { buf.append(sep).append(line); sep = NEW_LINE; } - break; - } - - if (terminatingTag != null && line.startsWith(terminatingTag)) { - // didn't find the prefix that was asked, but the terminating - // tag was found. set the length to 0 to signal no match was - // found. - buf.setLength(0); - break; + return; } if (collectAll) { @@ -169,7 +179,7 @@ public class TrecContentSource extends ContentSource { void openNextFile() throws NoMoreDataException, IOException { close(); - int retries = 0; + currPathType = null; while (true) { if (nextFile >= inputFiles.size()) { // exhausted files, start a new round, unless forever set to false. @@ -184,13 +194,13 @@ public class TrecContentSource extends ContentSource { System.out.println("opening: " + f + " length: " + f.length()); } try { - GZIPInputStream zis = new GZIPInputStream(new FileInputStream(f), BUFFER_SIZE); - reader = new BufferedReader(new InputStreamReader(zis, encoding), BUFFER_SIZE); + InputStream inputStream = getInputStream(f); // support either gzip, bzip2, or regular text file, by extension + reader = new BufferedReader(new InputStreamReader(inputStream, encoding), BUFFER_SIZE); + currPathType = TrecDocParser.pathType(f); return; } catch (Exception e) { - retries++; - if (retries < 20 && verbose) { - System.out.println("Skipping 'bad' file " + f.getAbsolutePath() + " #retries=" + retries); + if (verbose) { + System.out.println("Skipping 'bad' file " + f.getAbsolutePath()+" due to "+e.getMessage()); continue; } throw new NoMoreDataException(); @@ -198,7 +208,7 @@ public class TrecContentSource extends ContentSource { } } - Date parseDate(String dateStr) { + public Date parseDate(String dateStr) { dateStr = dateStr.trim(); DateFormatInfo dfi = getDateFormatInfo(); for (int i = 0; i < dfi.dfs.length; i++) { @@ -237,70 +247,47 @@ public class TrecContentSource extends ContentSource { @Override public DocData getNextDocData(DocData docData) throws NoMoreDataException, IOException { - String dateStr = null, name = null; - Reader r = null; + String name = null; + StringBuilder docBuf = getDocBuffer(); + ParsePathType parsedPathType; + // protect reading from the TREC files by multiple threads. The rest of the - // method, i.e., parsing the content and returning the DocData can run - // unprotected. + // method, i.e., parsing the content and returning the DocData can run unprotected. synchronized (lock) { if (reader == null) { openNextFile(); } - - StringBuilder docBuf = getDocBuffer(); - // 1. skip until doc start + // 1. skip until doc start - required for all TREC formats docBuf.setLength(0); - read(docBuf, DOC, false, false, null); - - // 2. name + read(docBuf, DOC, false, false); + + // save parsedFile for passing trecDataParser after the sync block, in + // case another thread will open another file in between. + parsedPathType = currPathType; + + // 2. name - required for all TREC formats docBuf.setLength(0); - read(docBuf, DOCNO, true, false, null); + read(docBuf, DOCNO, true, false); name = docBuf.substring(DOCNO.length(), docBuf.indexOf(TERMINATING_DOCNO, - DOCNO.length())); - if (!excludeDocnameIteration) + DOCNO.length())).trim(); + + if (!excludeDocnameIteration) { name = name + "_" + iteration; - - // 3. skip until doc header - docBuf.setLength(0); - read(docBuf, DOCHDR, false, false, null); - - boolean findTerminatingDocHdr = false; - - // 4. date - look for the date only until /DOCHDR - docBuf.setLength(0); - read(docBuf, DATE, true, false, TERMINATING_DOCHDR); - if (docBuf.length() != 0) { - // Date found. - dateStr = docBuf.substring(DATE.length()); - findTerminatingDocHdr = true; } - // 5. skip until end of doc header - if (findTerminatingDocHdr) { - docBuf.setLength(0); - read(docBuf, TERMINATING_DOCHDR, false, false, null); - } - - // 6. collect until end of doc + // 3. read all until end of doc docBuf.setLength(0); - read(docBuf, TERMINATING_DOC, false, true, null); - - // 7. Set up a Reader over the read content - r = getTrecDocReader(docBuf); - // Resetting the thread's reader means it will reuse the instance - // allocated as well as re-read from docBuf. - r.reset(); - - // count char length of parsed html text (larger than the plain doc body text). - addBytes(docBuf.length()); + read(docBuf, TERMINATING_DOC, false, true); } + + // count char length of text to be parsed (may be larger than the resulted plain doc body text). + addBytes(docBuf.length()); // This code segment relies on HtmlParser being thread safe. When we get // here, everything else is already private to that thread, so we're safe. - Date date = dateStr != null ? parseDate(dateStr) : null; try { - docData = htmlParser.parse(docData, name, date, r, null); + docData = trecDocParser.parse(docData, name, this, docBuf, parsedPathType); addDoc(); } catch (InterruptedException ie) { throw new ThreadInterruptedException(ie); @@ -322,27 +309,40 @@ public class TrecContentSource extends ContentSource { @Override public void setConfig(Config config) { super.setConfig(config); + // dirs File workDir = new File(config.get("work.dir", "work")); String d = config.get("docs.dir", "trec"); dataDir = new File(d); if (!dataDir.isAbsolute()) { dataDir = new File(workDir, d); } + // files collectFiles(dataDir, inputFiles); if (inputFiles.size() == 0) { throw new IllegalArgumentException("No files in dataDir: " + dataDir); } + // trec doc parser try { - String parserClassName = config.get("html.parser", - "org.apache.lucene.benchmark.byTask.feeds.DemoHTMLParser"); - htmlParser = Class.forName(parserClassName).asSubclass(HTMLParser.class).newInstance(); + String trecDocParserClassName = config.get("trec.doc.parser", "org.apache.lucene.benchmark.byTask.feeds.TrecGov2Parser"); + trecDocParser = Class.forName(trecDocParserClassName).asSubclass(TrecDocParser.class).newInstance(); } catch (Exception e) { // Should not get here. Throw runtime exception. throw new RuntimeException(e); } + // html parser + try { + String htmlParserClassName = config.get("html.parser", + "org.apache.lucene.benchmark.byTask.feeds.DemoHTMLParser"); + htmlParser = Class.forName(htmlParserClassName).asSubclass(HTMLParser.class).newInstance(); + } catch (Exception e) { + // Should not get here. Throw runtime exception. + throw new RuntimeException(e); + } + // encoding if (encoding == null) { encoding = "ISO-8859-1"; } + // iteration exclusion in doc name excludeDocnameIteration = config.get("content.source.excludeIteration", false); } diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java new file mode 100644 index 00000000000..d87aa3ab679 --- /dev/null +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java @@ -0,0 +1,135 @@ +package org.apache.lucene.benchmark.byTask.feeds; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * Parser for trec doc content, invoked on doc text excluding and + * which are handled in TrecContentSource. Required to be stateless and hence thread safe. + */ +public abstract class TrecDocParser { + + /** Types of trec parse paths, */ + public enum ParsePathType { GOV2, FBIS, FT, FR94, LATIMES } + + /** trec parser type used for unknown extensions */ + public static final ParsePathType DEFAULT_PATH_TYPE = ParsePathType.GOV2; + + static final Map pathType2parser = new HashMap(); + static { + pathType2parser.put(ParsePathType.GOV2, new TrecGov2Parser()); + pathType2parser.put(ParsePathType.FBIS, new TrecFBISParser()); + pathType2parser.put(ParsePathType.FR94, new TrecFR94Parser()); + pathType2parser.put(ParsePathType.FT, new TrecFTParser()); + pathType2parser.put(ParsePathType.LATIMES, new TrecLATimesParser()); + } + + static final Map pathName2Type = new HashMap(); + static { + for (ParsePathType ppt : ParsePathType.values()) { + pathName2Type.put(ppt.name(),ppt); + } + } + + /** max length of walk up from file to its ancestors when looking for a known path type */ + private static final int MAX_PATH_LENGTH = 10; + + /** + * Compute the path type of a file by inspecting name of file and its parents + */ + public static ParsePathType pathType(File f) { + int pathLength = 0; + while (f != null && ++pathLength < MAX_PATH_LENGTH) { + ParsePathType ppt = pathName2Type.get(f.getName().toUpperCase()); + if (ppt!=null) { + return ppt; + } + f = f.getParentFile(); + } + return DEFAULT_PATH_TYPE; + } + + /** + * parse the text prepared in docBuf into a result DocData, + * no synchronization is required. + * @param docData reusable result + * @param name name that should be set to the result + * @param trecSrc calling trec content source + * @param docBuf text to parse + * @param pathType type of parsed file, or null if unknown - may be used by + * parsers to alter their behavior according to the file path type. + */ + public abstract DocData parse(DocData docData, String name, TrecContentSource trecSrc, + StringBuilder docBuf, ParsePathType pathType) throws IOException, InterruptedException; + + /** + * strip tags from buf: each tag is replaced by a single blank. + * @return text obtained when stripping all tags from buf (Input StringBuilder is unmodified). + */ + public static String stripTags(StringBuilder buf, int start) { + return stripTags(buf.substring(start),0); + } + + /** + * strip tags from input. + * @see #stripTags(StringBuilder, int) + */ + public static String stripTags(String buf, int start) { + if (start>0) { + buf = buf.substring(0); + } + return buf.replaceAll("<[^>]*>", " "); + } + + /** + * Extract from buf the text of interest within specified tags + * @param buf entire input text + * @param startTag tag marking start of text of interest + * @param endTag tag marking end of text of interest + * @param maxPos if ≥ 0 sets a limit on start of text of interest + * @return text of interest or null if not found + */ + public static String extract(StringBuilder buf, String startTag, String endTag, int maxPos, String noisePrefixes[]) { + int k1 = buf.indexOf(startTag); + if (k1>=0 && (maxPos<0 || k1=0 && (maxPos<0 || k2=0 && k1a2<>1?",0)); + //} + +} diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFBISParser.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFBISParser.java new file mode 100644 index 00000000000..8efcd04e91d --- /dev/null +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFBISParser.java @@ -0,0 +1,65 @@ +package org.apache.lucene.benchmark.byTask.feeds; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.util.Date; + +/** + * Parser for the FBIS docs in trec disks 4+5 collection format + */ +public class TrecFBISParser extends TrecDocParser { + + private static final String HEADER = "
    "; + private static final String HEADER_END = "
    "; + private static final int HEADER_END_LENGTH = HEADER_END.length(); + + private static final String DATE1 = ""; + private static final String DATE1_END = ""; + + private static final String TI = ""; + private static final String TI_END = ""; + + @Override + public DocData parse(DocData docData, String name, TrecContentSource trecSrc, + StringBuilder docBuf, ParsePathType pathType) throws IOException, InterruptedException { + int mark = 0; // that much is skipped + // optionally skip some of the text, set date, title + Date date = null; + String title = null; + int h1 = docBuf.indexOf(HEADER); + if (h1>=0) { + int h2 = docBuf.indexOf(HEADER_END,h1); + mark = h2+HEADER_END_LENGTH; + // date... + String dateStr = extract(docBuf, DATE1, DATE1_END, h2, null); + if (dateStr != null) { + date = trecSrc.parseDate(dateStr); + } + // title... + title = extract(docBuf, TI, TI_END, h2, null); + } + docData.clear(); + docData.setName(name); + docData.setDate(date); + docData.setTitle(title); + docData.setBody(stripTags(docBuf, mark).toString()); + return docData; + } + +} diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFR94Parser.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFR94Parser.java new file mode 100644 index 00000000000..ce6492120d7 --- /dev/null +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFR94Parser.java @@ -0,0 +1,66 @@ +package org.apache.lucene.benchmark.byTask.feeds; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.util.Date; + +/** + * Parser for the FR94 docs in trec disks 4+5 collection format + */ +public class TrecFR94Parser extends TrecDocParser { + + private static final String TEXT = ""; + private static final int TEXT_LENGTH = TEXT.length(); + private static final String TEXT_END = ""; + + private static final String DATE = ""; + private static final String[] DATE_NOISE_PREFIXES = { + "DATE:", + "date:", //TODO improve date extraction for this format + "t.c.", + }; + private static final String DATE_END = ""; + + //TODO can we also extract title for this format? + + @Override + public DocData parse(DocData docData, String name, TrecContentSource trecSrc, + StringBuilder docBuf, ParsePathType pathType) throws IOException, InterruptedException { + int mark = 0; // that much is skipped + // optionally skip some of the text, set date (no title?) + Date date = null; + int h1 = docBuf.indexOf(TEXT); + if (h1>=0) { + int h2 = docBuf.indexOf(TEXT_END,h1); + mark = h1+TEXT_LENGTH; + // date... + String dateStr = extract(docBuf, DATE, DATE_END, h2, DATE_NOISE_PREFIXES); + if (dateStr != null) { + dateStr = stripTags(dateStr,0).toString(); + date = trecSrc.parseDate(dateStr.trim()); + } + } + docData.clear(); + docData.setName(name); + docData.setDate(date); + docData.setBody(stripTags(docBuf, mark).toString()); + return docData; + } + +} diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFTParser.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFTParser.java new file mode 100644 index 00000000000..ab39d9c2860 --- /dev/null +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecFTParser.java @@ -0,0 +1,57 @@ +package org.apache.lucene.benchmark.byTask.feeds; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.util.Date; + +/** + * Parser for the FT docs in trec disks 4+5 collection format + */ +public class TrecFTParser extends TrecDocParser { + + private static final String DATE = ""; + private static final String DATE_END = ""; + + private static final String HEADLINE = ""; + private static final String HEADLINE_END = ""; + + @Override + public DocData parse(DocData docData, String name, TrecContentSource trecSrc, + StringBuilder docBuf, ParsePathType pathType) throws IOException, InterruptedException { + int mark = 0; // that much is skipped + + // date... + Date date = null; + String dateStr = extract(docBuf, DATE, DATE_END, -1, null); + if (dateStr != null) { + date = trecSrc.parseDate(dateStr); + } + + // title... + String title = extract(docBuf, HEADLINE, HEADLINE_END, -1, null); + + docData.clear(); + docData.setName(name); + docData.setDate(date); + docData.setTitle(title); + docData.setBody(stripTags(docBuf, mark).toString()); + return docData; + } + +} diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecGov2Parser.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecGov2Parser.java new file mode 100755 index 00000000000..ef8371d1735 --- /dev/null +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecGov2Parser.java @@ -0,0 +1,59 @@ +package org.apache.lucene.benchmark.byTask.feeds; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.io.Reader; +import java.util.Date; + +/** + * Parser for the GOV2 collection format + */ +public class TrecGov2Parser extends TrecDocParser { + + private static final String DATE = "Date: "; + private static final String DATE_END = TrecContentSource.NEW_LINE; + + private static final String DOCHDR = ""; + private static final String TERMINATING_DOCHDR = ""; + private static final int TERMINATING_DOCHDR_LENGTH = TERMINATING_DOCHDR.length(); + + @Override + public DocData parse(DocData docData, String name, TrecContentSource trecSrc, + StringBuilder docBuf, ParsePathType pathType) throws IOException, InterruptedException { + // Set up a (per-thread) reused Reader over the read content, reset it to re-read from docBuf + Reader r = trecSrc.getTrecDocReader(docBuf); + + // skip some of the text, optionally set date + Date date = null; + int h1 = docBuf.indexOf(DOCHDR); + if (h1>=0) { + int h2 = docBuf.indexOf(TERMINATING_DOCHDR,h1); + String dateStr = extract(docBuf, DATE, DATE_END, h2, null); + if (dateStr != null) { + date = trecSrc.parseDate(dateStr); + } + r.mark(h2+TERMINATING_DOCHDR_LENGTH); + } + + r.reset(); + HTMLParser htmlParser = trecSrc.getHtmlParser(); + return htmlParser.parse(docData, name, date, null, r, null); + } + +} diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecLATimesParser.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecLATimesParser.java new file mode 100644 index 00000000000..367015bee36 --- /dev/null +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecLATimesParser.java @@ -0,0 +1,71 @@ +package org.apache.lucene.benchmark.byTask.feeds; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.util.Date; + +/** + * Parser for the FT docs in trec disks 4+5 collection format + */ +public class TrecLATimesParser extends TrecDocParser { + + private static final String DATE = ""; + private static final String DATE_END = ""; + private static final String DATE_NOISE = "day,"; // anything aftre the ',' + + private static final String SUBJECT = ""; + private static final String SUBJECT_END = ""; + private static final String HEADLINE = ""; + private static final String HEADLINE_END = ""; + + @Override + public DocData parse(DocData docData, String name, TrecContentSource trecSrc, + StringBuilder docBuf, ParsePathType pathType) throws IOException, InterruptedException { + int mark = 0; // that much is skipped + + // date... + Date date = null; + String dateStr = extract(docBuf, DATE, DATE_END, -1, null); + if (dateStr != null) { + int d2a = dateStr.indexOf(DATE_NOISE); + if (d2a > 0) { + dateStr = dateStr.substring(0,d2a+3); // we need the "day" part + } + dateStr = stripTags(dateStr,0).toString(); + date = trecSrc.parseDate(dateStr.trim()); + } + + // title... first try with SUBJECT, them with HEADLINE + String title = extract(docBuf, SUBJECT, SUBJECT_END, -1, null); + if (title==null) { + title = extract(docBuf, HEADLINE, HEADLINE_END, -1, null); + } + if (title!=null) { + title = stripTags(title,0).toString().trim(); + } + + docData.clear(); + docData.setName(name); + docData.setDate(date); + docData.setTitle(title); + docData.setBody(stripTags(docBuf, mark).toString()); + return docData; + } + +} diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecParserByPath.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecParserByPath.java new file mode 100644 index 00000000000..fc882035a01 --- /dev/null +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecParserByPath.java @@ -0,0 +1,33 @@ +package org.apache.lucene.benchmark.byTask.feeds; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +/** + * Parser for trec docs which selects the parser to apply according + * to the source files path, defaulting to {@link TrecGov2Parser}. + */ +public class TrecParserByPath extends TrecDocParser { + + @Override + public DocData parse(DocData docData, String name, TrecContentSource trecSrc, + StringBuilder docBuf, ParsePathType pathType) throws IOException, InterruptedException { + return pathType2parser.get(pathType).parse(docData, name, trecSrc, docBuf, pathType); + } + +} diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/StringBuilderReader.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/StringBuilderReader.java index c6e9510e01d..a10d5371c72 100644 --- a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/StringBuilderReader.java +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/StringBuilderReader.java @@ -158,8 +158,10 @@ public class StringBuilderReader extends Reader { synchronized (lock) { this.sb = sb; length = sb.length(); + next = mark = 0; } } + @Override public long skip(long ns) throws IOException { synchronized (lock) { diff --git a/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TrecContentSourceTest.java b/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TrecContentSourceTest.java index a178c6a6b1f..8222e5782ff 100644 --- a/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TrecContentSourceTest.java +++ b/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TrecContentSourceTest.java @@ -18,14 +18,20 @@ package org.apache.lucene.benchmark.byTask.feeds; */ import java.io.BufferedReader; +import java.io.File; import java.io.IOException; import java.io.StringReader; import java.text.ParseException; +import java.util.Arrays; import java.util.Date; +import java.util.HashSet; +import java.util.Properties; +import org.apache.lucene.benchmark.byTask.feeds.TrecDocParser.ParsePathType; import org.apache.lucene.benchmark.byTask.utils.Config; import org.apache.lucene.document.DateTools; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util._TestUtil; public class TrecContentSourceTest extends LuceneTestCase { @@ -329,5 +335,62 @@ public class TrecContentSourceTest extends LuceneTestCase { // Don't test that NoMoreDataException is thrown, since the forever flag is // turned on. } + + /** + * Open a trec content source over a directory with files of all trec path types and all + * supported formats - bzip, gzip, txt. + */ + public void testTrecFeedDirAllTypes() throws Exception { + File dataDir = _TestUtil.getTempDir("trecFeedAllTypes"); + _TestUtil.unzip(getDataFile("trecdocs.zip"), dataDir); + TrecContentSource tcs = new TrecContentSource(); + Properties props = new Properties(); + props.setProperty("print.props", "false"); + props.setProperty("content.source.verbose", "false"); + props.setProperty("content.source.excludeIteration", "true"); + props.setProperty("doc.maker.forever", "false"); + props.setProperty("docs.dir", dataDir.getCanonicalPath().replace('\\','/')); + props.setProperty("trec.doc.parser", TrecParserByPath.class.getName()); + props.setProperty("content.source.forever", "false"); + tcs.setConfig(new Config(props)); + tcs.resetInputs(); + DocData dd = new DocData(); + int n = 0; + boolean gotExpectedException = false; + HashSet unseenTypes = new HashSet(Arrays.asList(ParsePathType.values())); + try { + while (n<100) { // arbiterary limit to prevent looping forever in case of test failure + dd = tcs.getNextDocData(dd); + ++n; + assertNotNull("doc data "+n+" should not be null!", dd); + unseenTypes.remove(tcs.currPathType); + switch(tcs.currPathType) { + case GOV2: + assertDocData(dd, "TEST-000", "TEST-000 title", "TEST-000 text", tcs.parseDate("Sun, 11 Jan 2009 08:00:00 GMT")); + break; + case FBIS: + assertDocData(dd, "TEST-001", "TEST-001 Title", "TEST-001 text", tcs.parseDate("1 January 1991")); + break; + case FR94: + // no title extraction in this source for now + assertDocData(dd, "TEST-002", null, "DEPARTMENT OF SOMETHING", tcs.parseDate("February 3, 1994")); + break; + case FT: + assertDocData(dd, "TEST-003", "Test-003 title", "Some pub text", tcs.parseDate("980424")); + break; + case LATIMES: + assertDocData(dd, "TEST-004", "Test-004 Title", "Some paragraph", tcs.parseDate("January 17, 1997, Sunday")); + break; + default: + assertTrue("Should never get here!", false); + } + } + } catch (NoMoreDataException e) { + gotExpectedException = true; + } + assertTrue("Should have gotten NoMoreDataException!", gotExpectedException); + assertEquals("Wrong numbre of documents created by osurce!",5,n); + assertTrue("Did not see all types!",unseenTypes.isEmpty()); + } } diff --git a/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/trecdocs.zip b/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/trecdocs.zip new file mode 100644 index 0000000000000000000000000000000000000000..850d5b6aad2024209ed4a776531f046ad85a9db2 GIT binary patch literal 2676 zcmai$3p7-D9LKL`#tbIUl*b&MRcfP;h|N9#YGd_M{06uDN8yhUY zS$RPKazKC|*_Gt&>aSsS1_AJ)590|ZC(&uU|C93)e;0v0X^3L{ZyMmmjGsuxF5iv6}I5 zzrB{K)<(^IRE>A~bnCNGZlTk&LdKoQDVoSl{eIQso;1^^VPQhb@(Y8GH~X3<9cXdk zp;&!OtgCk2xn3FAKUN9!GS`49g(IWwik&=4`s;Y$%MBwsZ}-t8G#Ge^EB4n z+Fh}gTYd+%SY4~+^&Pih4z;DC_XXtC6x6lr-GVtFsJZGEjN{@~U?Mgic58ZGxWZZG zViRw<(l8B)qMRC1`iVc~zRj6n@%a#OdxW&cpOefAL$k(8N7?8c=>i_*reN(k-V=hR zZ`!QSl=1XvE2uZMEln}xtqKxG(0VGF?jk)q^r{FFmNxBrg8WV{qn7>m5r0`{UJJBv zd)<;5RPjT1dTcFS$!n%)U-^Se3O$us*O-I<>FVtqzgsf#%DRq7oJ`AAOvNYjrqj_y zWbtDu;SANE+C7_7M*5lgXnLLFyB;OyNGVvX&T zPuq#7YK8;AuE7WZZN&SEY&gs`+F!^t!T?DKQ$l_i&Aq{$p@kL0L|G(h7q(Cice*Pi zXo(2;jC_$rpZmA0BqgHk1|`q5p&cBRL8G73rhv#v=-Ih z#`xr5_NbB;*DQRw!NTqAKiwQT13 z;AJm$At<~y1h?W%AtCr(a1xyQ-$!o7o=5lM$qeA(YrN7|2kOkHAaYWd4crvh7ofpJ@3y#~#h$&6^V? zL!PV6DGu!>P59ktYsF_OOc2wNeP$ViGAl9d`a(vEEd9|5C%2T6*D<~aa@&sjHOhXK zOo}8rZxdZyl(ey1 Date: Sat, 5 Feb 2011 11:51:23 +0000 Subject: [PATCH 097/185] force in-order merging for this test git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1067427 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java b/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java index cfe6ecb423f..98cb5b50e96 100644 --- a/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java +++ b/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java @@ -619,7 +619,7 @@ public class TestDeletionPolicy extends LuceneTestCase { Directory dir = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()) - .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy); + .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy).setMergePolicy(newInOrderLogMergePolicy()); MergePolicy mp = conf.getMergePolicy(); if (mp instanceof LogMergePolicy) { ((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile); From a3dbc979782ceb54c9935c4ff215e218db76ce30 Mon Sep 17 00:00:00 2001 From: Koji Sekiguchi Date: Sat, 5 Feb 2011 23:36:32 +0000 Subject: [PATCH 098/185] LUCENE-2894: Use google-code-prettify for syntax highlighting in javadocs git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1067551 13f79535-47bb-0310-9956-ffa450edef68 --- dev-tools/prettify/lang-apollo.js | 1 + dev-tools/prettify/lang-css.js | 1 + dev-tools/prettify/lang-hs.js | 1 + dev-tools/prettify/lang-lisp.js | 1 + dev-tools/prettify/lang-lua.js | 1 + dev-tools/prettify/lang-ml.js | 1 + dev-tools/prettify/lang-proto.js | 1 + dev-tools/prettify/lang-sql.js | 1 + dev-tools/prettify/lang-vb.js | 1 + dev-tools/prettify/lang-wiki.js | 1 + dev-tools/prettify/prettify.css | 1 + dev-tools/prettify/prettify.js | 46 ++++++++++++ dev-tools/prettify/stylesheet+prettify.css | 30 ++++++++ lucene/CHANGES.txt | 3 + lucene/NOTICE.txt | 3 + lucene/common-build.xml | 11 +++ .../search/vectorhighlight/package.html | 2 +- .../lucene/search/similar/MoreLikeThis.java | 14 ++-- .../lucene/spatial/geohash/GeoHashUtils.java | 2 +- .../org/apache/lucene/wordnet/SynonymMap.java | 6 +- lucene/src/java/overview.html | 70 ++++++++----------- solr/NOTICE.txt | 11 ++- solr/build.xml | 2 + solr/common-build.xml | 11 +++ .../clustering/ClusteringComponent.java | 7 ++ .../solr/analysis/StopFilterFactory.java | 9 +++ .../component/TermVectorComponent.java | 14 ++++ .../handler/component/TermsComponent.java | 13 ++++ .../solrj/impl/CommonsHttpSolrServer.java | 2 + 29 files changed, 214 insertions(+), 53 deletions(-) create mode 100644 dev-tools/prettify/lang-apollo.js create mode 100644 dev-tools/prettify/lang-css.js create mode 100644 dev-tools/prettify/lang-hs.js create mode 100644 dev-tools/prettify/lang-lisp.js create mode 100644 dev-tools/prettify/lang-lua.js create mode 100644 dev-tools/prettify/lang-ml.js create mode 100644 dev-tools/prettify/lang-proto.js create mode 100644 dev-tools/prettify/lang-sql.js create mode 100644 dev-tools/prettify/lang-vb.js create mode 100644 dev-tools/prettify/lang-wiki.js create mode 100644 dev-tools/prettify/prettify.css create mode 100644 dev-tools/prettify/prettify.js create mode 100644 dev-tools/prettify/stylesheet+prettify.css diff --git a/dev-tools/prettify/lang-apollo.js b/dev-tools/prettify/lang-apollo.js new file mode 100644 index 00000000000..40420308094 --- /dev/null +++ b/dev-tools/prettify/lang-apollo.js @@ -0,0 +1 @@ +PR.registerLangHandler(PR.createSimpleLexer([[PR.PR_COMMENT,/^#[^\r\n]*/,null,'#'],[PR.PR_PLAIN,/^[\t\n\r \xA0]+/,null,' \n\r \xa0'],[PR.PR_STRING,/^\"(?:[^\"\\]|\\[\s\S])*(?:\"|$)/,null,'\"']],[[PR.PR_KEYWORD,/^(?:ADS|AD|AUG|BZF|BZMF|CAE|CAF|CA|CCS|COM|CS|DAS|DCA|DCOM|DCS|DDOUBL|DIM|DOUBLE|DTCB|DTCF|DV|DXCH|EDRUPT|EXTEND|INCR|INDEX|NDX|INHINT|LXCH|MASK|MSK|MP|MSU|NOOP|OVSK|QXCH|RAND|READ|RELINT|RESUME|RETURN|ROR|RXOR|SQUARE|SU|TCR|TCAA|OVSK|TCF|TC|TS|WAND|WOR|WRITE|XCH|XLQ|XXALQ|ZL|ZQ|ADD|ADZ|SUB|SUZ|MPY|MPR|MPZ|DVP|COM|ABS|CLA|CLZ|LDQ|STO|STQ|ALS|LLS|LRS|TRA|TSQ|TMI|TOV|AXT|TIX|DLY|INP|OUT)\s/,null],[PR.PR_TYPE,/^(?:-?GENADR|=MINUS|2BCADR|VN|BOF|MM|-?2CADR|-?[1-6]DNADR|ADRES|BBCON|[SE]?BANK\=?|BLOCK|BNKSUM|E?CADR|COUNT\*?|2?DEC\*?|-?DNCHAN|-?DNPTR|EQUALS|ERASE|MEMORY|2?OCT|REMADR|SETLOC|SUBRO|ORG|BSS|BES|SYN|EQU|DEFINE|END)\s/,null],[PR.PR_LITERAL,/^\'(?:-*(?:\w|\\[\x21-\x7e])(?:[\w-]*|\\[\x21-\x7e])[=!?]?)?/],[PR.PR_PLAIN,/^-*(?:[!-z_]|\\[\x21-\x7e])(?:[\w-]*|\\[\x21-\x7e])[=!?]?/i],[PR.PR_PUNCTUATION,/^[^\w\t\n\r \xA0()\"\\\';]+/]]),['apollo','agc','aea']) \ No newline at end of file diff --git a/dev-tools/prettify/lang-css.js b/dev-tools/prettify/lang-css.js new file mode 100644 index 00000000000..c650d8f0fdb --- /dev/null +++ b/dev-tools/prettify/lang-css.js @@ -0,0 +1 @@ +PR.registerLangHandler(PR.createSimpleLexer([[PR.PR_PLAIN,/^[ \t\r\n\f]+/,null,' \r\n ']],[[PR.PR_STRING,/^\"(?:[^\n\r\f\\\"]|\\(?:\r\n?|\n|\f)|\\[\s\S])*\"/,null],[PR.PR_STRING,/^\'(?:[^\n\r\f\\\']|\\(?:\r\n?|\n|\f)|\\[\s\S])*\'/,null],['lang-css-str',/^url\(([^\)\"\']*)\)/i],[PR.PR_KEYWORD,/^(?:url|rgb|\!important|@import|@page|@media|@charset|inherit)(?=[^\-\w]|$)/i,null],['lang-css-kw',/^(-?(?:[_a-z]|(?:\\[0-9a-f]+ ?))(?:[_a-z0-9\-]|\\(?:\\[0-9a-f]+ ?))*)\s*:/i],[PR.PR_COMMENT,/^\/\*[^*]*\*+(?:[^\/*][^*]*\*+)*\//],[PR.PR_COMMENT,/^(?:)/],[PR.PR_LITERAL,/^(?:\d+|\d*\.\d+)(?:%|[a-z]+)?/i],[PR.PR_LITERAL,/^#(?:[0-9a-f]{3}){1,2}/i],[PR.PR_PLAIN,/^-?(?:[_a-z]|(?:\\[\da-f]+ ?))(?:[_a-z\d\-]|\\(?:\\[\da-f]+ ?))*/i],[PR.PR_PUNCTUATION,/^[^\s\w\'\"]+/]]),['css']),PR.registerLangHandler(PR.createSimpleLexer([],[[PR.PR_KEYWORD,/^-?(?:[_a-z]|(?:\\[\da-f]+ ?))(?:[_a-z\d\-]|\\(?:\\[\da-f]+ ?))*/i]]),['css-kw']),PR.registerLangHandler(PR.createSimpleLexer([],[[PR.PR_STRING,/^[^\)\"\']+/]]),['css-str']) \ No newline at end of file diff --git a/dev-tools/prettify/lang-hs.js b/dev-tools/prettify/lang-hs.js new file mode 100644 index 00000000000..27b221acd7c --- /dev/null +++ b/dev-tools/prettify/lang-hs.js @@ -0,0 +1 @@ +PR.registerLangHandler(PR.createSimpleLexer([[PR.PR_PLAIN,/^[\t\n\x0B\x0C\r ]+/,null,' \n \r '],[PR.PR_STRING,/^\"(?:[^\"\\\n\x0C\r]|\\[\s\S])*(?:\"|$)/,null,'\"'],[PR.PR_STRING,/^\'(?:[^\'\\\n\x0C\r]|\\[^&])\'?/,null,'\''],[PR.PR_LITERAL,/^(?:0o[0-7]+|0x[\da-f]+|\d+(?:\.\d+)?(?:e[+\-]?\d+)?)/i,null,'0123456789']],[[PR.PR_COMMENT,/^(?:(?:--+(?:[^\r\n\x0C]*)?)|(?:\{-(?:[^-]|-+[^-\}])*-\}))/],[PR.PR_KEYWORD,/^(?:case|class|data|default|deriving|do|else|if|import|in|infix|infixl|infixr|instance|let|module|newtype|of|then|type|where|_)(?=[^a-zA-Z0-9\']|$)/,null],[PR.PR_PLAIN,/^(?:[A-Z][\w\']*\.)*[a-zA-Z][\w\']*/],[PR.PR_PUNCTUATION,/^[^\t\n\x0B\x0C\r a-zA-Z0-9\'\"]+/]]),['hs']) \ No newline at end of file diff --git a/dev-tools/prettify/lang-lisp.js b/dev-tools/prettify/lang-lisp.js new file mode 100644 index 00000000000..85c6c23d0d3 --- /dev/null +++ b/dev-tools/prettify/lang-lisp.js @@ -0,0 +1 @@ +PR.registerLangHandler(PR.createSimpleLexer([['opn',/^\(/,null,'('],['clo',/^\)/,null,')'],[PR.PR_COMMENT,/^;[^\r\n]*/,null,';'],[PR.PR_PLAIN,/^[\t\n\r \xA0]+/,null,' \n\r \xa0'],[PR.PR_STRING,/^\"(?:[^\"\\]|\\[\s\S])*(?:\"|$)/,null,'\"']],[[PR.PR_KEYWORD,/^(?:block|c[ad]+r|catch|cons|defun|do|eq|eql|equal|equalp|eval-when|flet|format|go|if|labels|lambda|let|load-time-value|locally|macrolet|multiple-value-call|nil|progn|progv|quote|require|return-from|setq|symbol-macrolet|t|tagbody|the|throw|unwind)\b/,null],[PR.PR_LITERAL,/^[+\-]?(?:0x[0-9a-f]+|\d+\/\d+|(?:\.\d+|\d+(?:\.\d*)?)(?:[ed][+\-]?\d+)?)/i],[PR.PR_LITERAL,/^\'(?:-*(?:\w|\\[\x21-\x7e])(?:[\w-]*|\\[\x21-\x7e])[=!?]?)?/],[PR.PR_PLAIN,/^-*(?:[a-z_]|\\[\x21-\x7e])(?:[\w-]*|\\[\x21-\x7e])[=!?]?/i],[PR.PR_PUNCTUATION,/^[^\w\t\n\r \xA0()\"\\\';]+/]]),['cl','el','lisp','scm']) \ No newline at end of file diff --git a/dev-tools/prettify/lang-lua.js b/dev-tools/prettify/lang-lua.js new file mode 100644 index 00000000000..d107bab01eb --- /dev/null +++ b/dev-tools/prettify/lang-lua.js @@ -0,0 +1 @@ +PR.registerLangHandler(PR.createSimpleLexer([[PR.PR_PLAIN,/^[\t\n\r \xA0]+/,null,' \n\r \xa0'],[PR.PR_STRING,/^(?:\"(?:[^\"\\]|\\[\s\S])*(?:\"|$)|\'(?:[^\'\\]|\\[\s\S])*(?:\'|$))/,null,'\"\'']],[[PR.PR_COMMENT,/^--(?:\[(=*)\[[\s\S]*?(?:\]\1\]|$)|[^\r\n]*)/],[PR.PR_STRING,/^\[(=*)\[[\s\S]*?(?:\]\1\]|$)/],[PR.PR_KEYWORD,/^(?:and|break|do|else|elseif|end|false|for|function|if|in|local|nil|not|or|repeat|return|then|true|until|while)\b/,null],[PR.PR_LITERAL,/^[+-]?(?:0x[\da-f]+|(?:(?:\.\d+|\d+(?:\.\d*)?)(?:e[+\-]?\d+)?))/i],[PR.PR_PLAIN,/^[a-z_]\w*/i],[PR.PR_PUNCTUATION,/^[^\w\t\n\r \xA0][^\w\t\n\r \xA0\"\'\-\+=]*/]]),['lua']) \ No newline at end of file diff --git a/dev-tools/prettify/lang-ml.js b/dev-tools/prettify/lang-ml.js new file mode 100644 index 00000000000..698d6de4e98 --- /dev/null +++ b/dev-tools/prettify/lang-ml.js @@ -0,0 +1 @@ +PR.registerLangHandler(PR.createSimpleLexer([[PR.PR_PLAIN,/^[\t\n\r \xA0]+/,null,' \n\r \xa0'],[PR.PR_COMMENT,/^#(?:if[\t\n\r \xA0]+(?:[a-z_$][\w\']*|``[^\r\n\t`]*(?:``|$))|else|endif|light)/i,null,'#'],[PR.PR_STRING,/^(?:\"(?:[^\"\\]|\\[\s\S])*(?:\"|$)|\'(?:[^\'\\]|\\[\s\S])*(?:\'|$))/,null,'\"\'']],[[PR.PR_COMMENT,/^(?:\/\/[^\r\n]*|\(\*[\s\S]*?\*\))/],[PR.PR_KEYWORD,/^(?:abstract|and|as|assert|begin|class|default|delegate|do|done|downcast|downto|elif|else|end|exception|extern|false|finally|for|fun|function|if|in|inherit|inline|interface|internal|lazy|let|match|member|module|mutable|namespace|new|null|of|open|or|override|private|public|rec|return|static|struct|then|to|true|try|type|upcast|use|val|void|when|while|with|yield|asr|land|lor|lsl|lsr|lxor|mod|sig|atomic|break|checked|component|const|constraint|constructor|continue|eager|event|external|fixed|functor|global|include|method|mixin|object|parallel|process|protected|pure|sealed|trait|virtual|volatile)\b/],[PR.PR_LITERAL,/^[+\-]?(?:0x[\da-f]+|(?:(?:\.\d+|\d+(?:\.\d*)?)(?:e[+\-]?\d+)?))/i],[PR.PR_PLAIN,/^(?:[a-z_]\w*[!?#]?|``[^\r\n\t`]*(?:``|$))/i],[PR.PR_PUNCTUATION,/^[^\t\n\r \xA0\"\'\w]+/]]),['fs','ml']) \ No newline at end of file diff --git a/dev-tools/prettify/lang-proto.js b/dev-tools/prettify/lang-proto.js new file mode 100644 index 00000000000..e67967f3e64 --- /dev/null +++ b/dev-tools/prettify/lang-proto.js @@ -0,0 +1 @@ +PR.registerLangHandler(PR.sourceDecorator({keywords:'bool bytes default double enum extend extensions false fixed32 fixed64 float group import int32 int64 max message option optional package repeated required returns rpc service sfixed32 sfixed64 sint32 sint64 string syntax to true uint32 uint64',cStyleComments:true}),['proto']) \ No newline at end of file diff --git a/dev-tools/prettify/lang-sql.js b/dev-tools/prettify/lang-sql.js new file mode 100644 index 00000000000..ff381cd4a54 --- /dev/null +++ b/dev-tools/prettify/lang-sql.js @@ -0,0 +1 @@ +PR.registerLangHandler(PR.createSimpleLexer([[PR.PR_PLAIN,/^[\t\n\r \xA0]+/,null,' \n\r \xa0'],[PR.PR_STRING,/^(?:"(?:[^\"\\]|\\.)*"|'(?:[^\'\\]|\\.)*')/,null,'\"\'']],[[PR.PR_COMMENT,/^(?:--[^\r\n]*|\/\*[\s\S]*?(?:\*\/|$))/],[PR.PR_KEYWORD,/^(?:ADD|ALL|ALTER|AND|ANY|AS|ASC|AUTHORIZATION|BACKUP|BEGIN|BETWEEN|BREAK|BROWSE|BULK|BY|CASCADE|CASE|CHECK|CHECKPOINT|CLOSE|CLUSTERED|COALESCE|COLLATE|COLUMN|COMMIT|COMPUTE|CONSTRAINT|CONTAINS|CONTAINSTABLE|CONTINUE|CONVERT|CREATE|CROSS|CURRENT|CURRENT_DATE|CURRENT_TIME|CURRENT_TIMESTAMP|CURRENT_USER|CURSOR|DATABASE|DBCC|DEALLOCATE|DECLARE|DEFAULT|DELETE|DENY|DESC|DISK|DISTINCT|DISTRIBUTED|DOUBLE|DROP|DUMMY|DUMP|ELSE|END|ERRLVL|ESCAPE|EXCEPT|EXEC|EXECUTE|EXISTS|EXIT|FETCH|FILE|FILLFACTOR|FOR|FOREIGN|FREETEXT|FREETEXTTABLE|FROM|FULL|FUNCTION|GOTO|GRANT|GROUP|HAVING|HOLDLOCK|IDENTITY|IDENTITYCOL|IDENTITY_INSERT|IF|IN|INDEX|INNER|INSERT|INTERSECT|INTO|IS|JOIN|KEY|KILL|LEFT|LIKE|LINENO|LOAD|NATIONAL|NOCHECK|NONCLUSTERED|NOT|NULL|NULLIF|OF|OFF|OFFSETS|ON|OPEN|OPENDATASOURCE|OPENQUERY|OPENROWSET|OPENXML|OPTION|OR|ORDER|OUTER|OVER|PERCENT|PLAN|PRECISION|PRIMARY|PRINT|PROC|PROCEDURE|PUBLIC|RAISERROR|READ|READTEXT|RECONFIGURE|REFERENCES|REPLICATION|RESTORE|RESTRICT|RETURN|REVOKE|RIGHT|ROLLBACK|ROWCOUNT|ROWGUIDCOL|RULE|SAVE|SCHEMA|SELECT|SESSION_USER|SET|SETUSER|SHUTDOWN|SOME|STATISTICS|SYSTEM_USER|TABLE|TEXTSIZE|THEN|TO|TOP|TRAN|TRANSACTION|TRIGGER|TRUNCATE|TSEQUAL|UNION|UNIQUE|UPDATE|UPDATETEXT|USE|USER|VALUES|VARYING|VIEW|WAITFOR|WHEN|WHERE|WHILE|WITH|WRITETEXT)(?=[^\w-]|$)/i,null],[PR.PR_LITERAL,/^[+-]?(?:0x[\da-f]+|(?:(?:\.\d+|\d+(?:\.\d*)?)(?:e[+\-]?\d+)?))/i],[PR.PR_PLAIN,/^[a-z_][\w-]*/i],[PR.PR_PUNCTUATION,/^[^\w\t\n\r \xA0\"\'][^\w\t\n\r \xA0+\-\"\']*/]]),['sql']) \ No newline at end of file diff --git a/dev-tools/prettify/lang-vb.js b/dev-tools/prettify/lang-vb.js new file mode 100644 index 00000000000..cabce853999 --- /dev/null +++ b/dev-tools/prettify/lang-vb.js @@ -0,0 +1 @@ +PR.registerLangHandler(PR.createSimpleLexer([[PR.PR_PLAIN,/^[\t\n\r \xA0\u2028\u2029]+/,null,' \n\r \xa0\u2028\u2029'],[PR.PR_STRING,/^(?:[\"\u201C\u201D](?:[^\"\u201C\u201D]|[\"\u201C\u201D]{2})(?:[\"\u201C\u201D]c|$)|[\"\u201C\u201D](?:[^\"\u201C\u201D]|[\"\u201C\u201D]{2})*(?:[\"\u201C\u201D]|$))/i,null,'\"\u201c\u201d'],[PR.PR_COMMENT,/^[\'\u2018\u2019][^\r\n\u2028\u2029]*/,null,'\'\u2018\u2019']],[[PR.PR_KEYWORD,/^(?:AddHandler|AddressOf|Alias|And|AndAlso|Ansi|As|Assembly|Auto|Boolean|ByRef|Byte|ByVal|Call|Case|Catch|CBool|CByte|CChar|CDate|CDbl|CDec|Char|CInt|Class|CLng|CObj|Const|CShort|CSng|CStr|CType|Date|Decimal|Declare|Default|Delegate|Dim|DirectCast|Do|Double|Each|Else|ElseIf|End|EndIf|Enum|Erase|Error|Event|Exit|Finally|For|Friend|Function|Get|GetType|GoSub|GoTo|Handles|If|Implements|Imports|In|Inherits|Integer|Interface|Is|Let|Lib|Like|Long|Loop|Me|Mod|Module|MustInherit|MustOverride|MyBase|MyClass|Namespace|New|Next|Not|NotInheritable|NotOverridable|Object|On|Option|Optional|Or|OrElse|Overloads|Overridable|Overrides|ParamArray|Preserve|Private|Property|Protected|Public|RaiseEvent|ReadOnly|ReDim|RemoveHandler|Resume|Return|Select|Set|Shadows|Shared|Short|Single|Static|Step|Stop|String|Structure|Sub|SyncLock|Then|Throw|To|Try|TypeOf|Unicode|Until|Variant|Wend|When|While|With|WithEvents|WriteOnly|Xor|EndIf|GoSub|Let|Variant|Wend)\b/i,null],[PR.PR_COMMENT,/^REM[^\r\n\u2028\u2029]*/i],[PR.PR_LITERAL,/^(?:True\b|False\b|Nothing\b|\d+(?:E[+\-]?\d+[FRD]?|[FRDSIL])?|(?:&H[0-9A-F]+|&O[0-7]+)[SIL]?|\d*\.\d+(?:E[+\-]?\d+)?[FRD]?|#\s+(?:\d+[\-\/]\d+[\-\/]\d+(?:\s+\d+:\d+(?::\d+)?(\s*(?:AM|PM))?)?|\d+:\d+(?::\d+)?(\s*(?:AM|PM))?)\s+#)/i],[PR.PR_PLAIN,/^(?:(?:[a-z]|_\w)\w*|\[(?:[a-z]|_\w)\w*\])/i],[PR.PR_PUNCTUATION,/^[^\w\t\n\r \"\'\[\]\xA0\u2018\u2019\u201C\u201D\u2028\u2029]+/],[PR.PR_PUNCTUATION,/^(?:\[|\])/]]),['vb','vbs']) \ No newline at end of file diff --git a/dev-tools/prettify/lang-wiki.js b/dev-tools/prettify/lang-wiki.js new file mode 100644 index 00000000000..00a1b6b4bf2 --- /dev/null +++ b/dev-tools/prettify/lang-wiki.js @@ -0,0 +1 @@ +PR.registerLangHandler(PR.createSimpleLexer([[PR.PR_PLAIN,/^[\t \xA0a-gi-z0-9]+/,null,' \xa0abcdefgijklmnopqrstuvwxyz0123456789'],[PR.PR_PUNCTUATION,/^[=*~\^\[\]]+/,null,'=*~^[]']],[['lang-wiki.meta',/(?:^^|\r\n?|\n)(#[a-z]+)\b/],[PR.PR_LITERAL,/^(?:[A-Z][a-z][a-z0-9]+[A-Z][a-z][a-zA-Z0-9]+)\b/],['lang-',/^\{\{\{([\s\S]+?)\}\}\}/],['lang-',/^`([^\r\n`]+)`/],[PR.PR_STRING,/^https?:\/\/[^\/?#\s]*(?:\/[^?#\s]*)?(?:\?[^#\s]*)?(?:#\S*)?/i],[PR.PR_PLAIN,/^(?:\r\n|[\s\S])[^#=*~^A-Zh\{`\[\r\n]*/]]),['wiki']),PR.registerLangHandler(PR.createSimpleLexer([[PR.PR_KEYWORD,/^#[a-z]+/i,null,'#']],[]),['wiki.meta']) \ No newline at end of file diff --git a/dev-tools/prettify/prettify.css b/dev-tools/prettify/prettify.css new file mode 100644 index 00000000000..2eb91bf01a9 --- /dev/null +++ b/dev-tools/prettify/prettify.css @@ -0,0 +1 @@ +.str,.atv{color:#080}.kwd,.tag{color:#008}.com{color:#800}.typ,.atn,.dec{color:#606}.lit{color:#066}.pun{color:#660}.pln{color:#000}pre.prettyprint{padding:2px;border:1px solid #888}@media print{.str{color:#060}.kwd,.tag{color:#006;font-weight:bold}.com{color:#600;font-style:italic}.typ{font-weight:bold}.lit{color:#044}.pun{color:#440}.atn,.typ{color:#404}.atv{color:#060}} \ No newline at end of file diff --git a/dev-tools/prettify/prettify.js b/dev-tools/prettify/prettify.js new file mode 100644 index 00000000000..29b5e738e45 --- /dev/null +++ b/dev-tools/prettify/prettify.js @@ -0,0 +1,46 @@ +window.PR_SHOULD_USE_CONTINUATION=true,window.PR_TAB_WIDTH=8,window.PR_normalizedHtml=window.PR=window.prettyPrintOne=window.prettyPrint=void +0,window._pr_isIE6=function(){var a=navigator&&navigator.userAgent&&navigator.userAgent.match(/\bMSIE ([678])\./);return a=a?+a[1]:false,window._pr_isIE6=function(){return a},a},(function(){var +a=true,b=null,c='break continue do else for if return while auto case char const default double enum extern float goto int long register short signed sizeof static struct switch typedef union unsigned void volatile catch class delete false import new operator private protected public this throw true try typeof ',d=c+'alignof align_union asm axiom bool '+'concept concept_map const_cast constexpr decltype '+'dynamic_cast explicit export friend inline late_check '+'mutable namespace nullptr reinterpret_cast static_assert static_cast '+'template typeid typename using virtual wchar_t where ',e=c+'abstract boolean byte extends final finally implements import '+'instanceof null native package strictfp super synchronized throws '+'transient ',f=e+'as base by checked decimal delegate descending event '+'fixed foreach from group implicit in interface internal into is lock '+'object out override orderby params partial readonly ref sbyte sealed '+'stackalloc string select uint ulong unchecked unsafe ushort var ',g=c+'debugger eval export function get null set undefined var with '+'Infinity NaN ',h='caller delete die do dump elsif eval exit foreach for goto if import last local my next no our print package redo require sub undef unless until use wantarray while BEGIN END ',i='break continue do else for if return while and as assert class def del elif except exec finally from global import in is lambda nonlocal not or pass print raise try with yield False True None ',j='break continue do else for if return while alias and begin case class def defined elsif end ensure false in module next nil not or redo rescue retry self super then true undef unless until when yield BEGIN END ',k='break continue do else for if return while case done elif esac eval fi function in local set then until ',l=d+f+g+h+i+j+k,m=(function(){var +a=['!','!=','!==','#','%','%=','&','&&','&&=','&=','(','*','*=','+=',',','-=','->','/','/=',':','::',';','<','<<','<<=','<=','=','==','===','>','>=','>>','>>=','>>>','>>>=','?','@','[','^','^=','^^','^^=','{','|','|=','||','||=','~','break','case','continue','delete','do','else','finally','instanceof','return','throw','try','typeof'],b='(?:^^|[+-]',c;for(c=0;c:&a-z])/g,'\\$1');return b+=')\\s*',b})(),n=/&/g,o=//g,q=/\"/g,r,s,t,u,v,w,x,y,z,A,B,C,D,E,F;function +G(a){return a.replace(n,'&').replace(o,'<').replace(p,'>').replace(q,'"')}function +H(a){return a.replace(n,'&').replace(o,'<').replace(p,'>')}C=/</g,B=/>/g,w=/'/g,E=/"/g,v=/&/g,D=/ /g;function +I(a){var b=a.indexOf('&'),c,d,e,f;if(b<0)return a;for(--b;(b=a.indexOf('&#',b+1))>=0;)d=a.indexOf(';',b),d>=0&&(e=a.substring(b+3,d),f=10,e&&e.charAt(0)==='x'&&(e=e.substring(1),f=16),c=parseInt(e,f),isNaN(c)||(a=a.substring(0,b)+String.fromCharCode(c)+a.substring(d+1)));return a.replace(C,'<').replace(B,'>').replace(w,'\'').replace(E,'\"').replace(D,' ').replace(v,'&')}function +J(a){return'XMP'===a.tagName}u=/[\r\n]/g;function K(c,d){var e;return'PRE'===c.tagName?a:u.test(d)?(e='',c.currentStyle?(e=c.currentStyle.whiteSpace):window.getComputedStyle&&(e=window.getComputedStyle(c,b).whiteSpace),!e||e==='pre'):a}function +L(a,b){var c,d,e,f;switch(a.nodeType){case 1:f=a.tagName.toLowerCase(),b.push('<',f);for(e=0;e');for(d=a.firstChild;d;d=d.nextSibling)L(d,b);(a.firstChild||!/^(?:br|link|img)$/.test(f))&&b.push('');break;case +2:b.push(a.name.toLowerCase(),'=\"',G(a.value),'\"');break;case 3:case 4:b.push(H(a.nodeValue))}}function +M(b){var c=0,d=false,e=false,f,g,h,i;for(f=0,g=b.length;f122||(g<65||q>90||d.push([Math.max(65,q)|32,Math.min(g,90)|32]),g<97||q>122||d.push([Math.max(97,q)&-33,Math.min(g,122)&-33]))}d.sort(function(a,b){return a[0]-b[0]||b[1]-a[1]}),f=[],i=[NaN,NaN];for(h=0;hp[0]&&(p[1]+1>p[0]&&n.push('-'),n.push(k(p[1])));return n.push(']'),n.join('')}function +m(a){var b=a.source.match(new RegExp('(?:\\[(?:[^\\x5C\\x5D]|\\\\[\\s\\S])*\\]|\\\\u[A-Fa-f0-9]{4}|\\\\x[A-Fa-f0-9]{2}|\\\\[0-9]+|\\\\[^ux0-9]|\\(\\?[:!=]|[\\(\\)\\^]|[^\\x5B\\x5C\\(\\)\\^]+)','g')),e=b.length,f=[],g,h,i,j,k;for(j=0,i=0;j=2&&g==='['?(b[j]=l(k)):g!=='\\'&&(b[j]=k.replace(/[a-zA-Z]/g,function(a){var +b=a.charCodeAt(0);return'['+String.fromCharCode(b&-33,b|32)+']'}));return b.join('')}i=[];for(f=0,g=b.length;f\n')),r=!/)[\r\n]+/g,'$1').replace(/(?:[\r\n]+[ \t]*)+/g,' ')),d;e=[];for(c=a.firstChild;c;c=c.nextSibling)L(c,e);return e.join('')}function +O(a){var c=0;return function(d){var e=b,f=0,g,h,i,j;for(h=0,i=d.length;h=0;j-=' '.length)e.push(' '.substring(0,j));f=h+1;break;case'\n':c=0;break;default:++c}}return e?(e.push(d.substring(f)),e.join('')):d}}z=new +RegExp('[^<]+|||\"\']|\'[^\']*\'|\"[^\"]*\")*>|<','g'),A=/^<\!--/,y=/^1&&j.charAt(0)==='<'){if(A.test(j))continue;if(y.test(j))c.push(j.substring(9,j.length-3)),d+=j.length-12;else +if(x.test(j))c.push('\n'),++d;else if(j.indexOf('nocode')>=0&&Q(j)){l=(j.match(F))[2],f=1;for(h=g+1;h=0;)d[o.charAt(i)]=m;n=m[1],k=''+n,g.hasOwnProperty(k)||(f.push(n),g[k]=b)}f.push(/[\0-\uffff]/),h=M(f)})(),f=c.length,g=/\S/,e=function(a){var +b=a.source,g=a.basePos,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y;i=[g,'pln'],s=0,y=b.match(h)||[],u={};for(v=0,q=y.length;v=5&&'lang-'===t.substring(0,5),n&&!(p&&typeof +p[1]==='string')&&(n=false,t='src'),n||(u[w]=t)}x=s,s+=w.length,n?(j=p[1],l=w.indexOf(j),k=l+j.length,p[2]&&(k=w.length-p[2].length,l=k-j.length),o=t.substring(5),R(g+x,w.substring(0,l),e,i),R(g+x+l,j,W(o,j),i),R(g+x+k,w.substring(k),e,i)):i.push(g+x,t)}a.decorations=i},e}function +T(a){var c=[],d=[],e,f;return a.tripleQuotedStrings?c.push(['str',/^(?:\'\'\'(?:[^\'\\]|\\[\s\S]|\'{1,2}(?=[^\']))*(?:\'\'\'|$)|\"\"\"(?:[^\"\\]|\\[\s\S]|\"{1,2}(?=[^\"]))*(?:\"\"\"|$)|\'(?:[^\\\']|\\[\s\S])*(?:\'|$)|\"(?:[^\\\"]|\\[\s\S])*(?:\"|$))/,b,'\'\"']):a.multiLineStrings?c.push(['str',/^(?:\'(?:[^\\\']|\\[\s\S])*(?:\'|$)|\"(?:[^\\\"]|\\[\s\S])*(?:\"|$)|\`(?:[^\\\`]|\\[\s\S])*(?:\`|$))/,b,'\'\"`']):c.push(['str',/^(?:\'(?:[^\\\'\r\n]|\\.)*(?:\'|$)|\"(?:[^\\\"\r\n]|\\.)*(?:\"|$))/,b,'\"\'']),a.verbatimStrings&&d.push(['str',/^@\"(?:[^\"]|\"\")*(?:\"|$)/,b]),a.hashComments&&(a.cStyleComments?(c.push(['com',/^#(?:(?:define|elif|else|endif|error|ifdef|include|ifndef|line|pragma|undef|warning)\b|[^\r\n]*)/,b,'#']),d.push(['str',/^<(?:(?:(?:\.\.\/)*|\/?)(?:[\w-]+(?:\/[\w-]+)+)?[\w-]+\.h|[a-z]\w*)>/,b])):c.push(['com',/^#[^\r\n]*/,b,'#'])),a.cStyleComments&&(d.push(['com',/^\/\/[^\r\n]*/,b]),d.push(['com',/^\/\*[\s\S]*?(?:\*\/|$)/,b])),a.regexLiterals&&(e='/(?=[^/*])(?:[^/\\x5B\\x5C]|\\x5C[\\s\\S]|\\x5B(?:[^\\x5C\\x5D]|\\x5C[\\s\\S])*(?:\\x5D|$))+/',d.push(['lang-regex',new +RegExp('^'+m+'('+e+')')])),f=a.keywords.replace(/^\s+|\s+$/g,''),f.length&&d.push(['kwd',new +RegExp('^(?:'+f.replace(/\s+/g,'|')+')\\b'),b]),c.push(['pln',/^\s+/,b,' \r\n \xa0']),d.push(['lit',/^@[a-z_$][a-z_$@0-9]*/i,b],['typ',/^@?[A-Z]+[a-z][A-Za-z_$@0-9]*/,b],['pln',/^[a-z_$][a-z_$@0-9]*/i,b],['lit',new +RegExp('^(?:0x[a-f0-9]+|(?:\\d(?:_\\d+)*\\d*(?:\\.\\d*)?|\\.\\d\\+)(?:e[+\\-]?\\d+)?)[a-z]*','i'),b,'0123456789'],['pun',/^.[^\s\w\.$@\'\"\`\/\#]*/,b]),S(c,d)}s=T({keywords:l,hashComments:a,cStyleComments:a,multiLineStrings:a,regexLiterals:a});function +U(c){var d=c.source,e=c.extractedTags,f=c.decorations,g=[],h=0,i=b,j=b,k=0,l=0,m=O(window.PR_TAB_WIDTH),n=/([\r\n ]) /g,o=/(^| ) /gm,p=/\r\n?|\n/g,q=/[ \r\n]$/,r=a,s;function +t(a){var c,e;a>h&&(i&&i!==j&&(g.push(''),i=b),!i&&j&&(i=j,g.push('')),c=H(m(d.substring(h,a))).replace(r?o:n,'$1 '),r=q.test(c),e=window._pr_isIE6()?' 
    ':'
    ',g.push(c.replace(p,e)),h=a)}while(a){k'),i=b),g.push(e[k+1]),k+=2;else +if(l'),c.prettyPrintedHtml=g.join('')}t={};function +V(a,b){var c,d;for(d=b.length;--d>=0;)c=b[d],t.hasOwnProperty(c)?'console'in window&&console.warn('cannot override language handler %s',c):(t[c]=a)}function +W(a,b){return a&&t.hasOwnProperty(a)||(a=/^\s*]*(?:>|$)/],['com',/^<\!--[\s\S]*?(?:-\->|$)/],['lang-',/^<\?([\s\S]+?)(?:\?>|$)/],['lang-',/^<%([\s\S]+?)(?:%>|$)/],['pun',/^(?:<[%?]|[%?]>)/],['lang-',/^]*>([\s\S]+?)<\/xmp\b[^>]*>/i],['lang-js',/^]*>([\s\S]*?)(<\/script\b[^>]*>)/i],['lang-css',/^]*>([\s\S]*?)(<\/style\b[^>]*>)/i],['lang-in.tag',/^(<\/?[a-z][^<>]*>)/i]]),['default-markup','htm','html','mxml','xhtml','xml','xsl']),V(S([['pln',/^[\s]+/,b,' \r\n'],['atv',/^(?:\"[^\"]*\"?|\'[^\']*\'?)/,b,'\"\'']],[['tag',/^^<\/?[a-z](?:[\w.:-]*\w)?|\/?>$/i],['atn',/^(?!style[\s=]|on)[a-z](?:[\w:-]*\w)?/i],['lang-uq.val',/^=\s*([^>\'\"\s]*(?:[^>\'\"\s\/]|\/(?=\s)))/],['pun',/^[=<>\/]+/],['lang-js',/^on\w+\s*=\s*\"([^\"]+)\"/i],['lang-js',/^on\w+\s*=\s*\'([^\']+)\'/i],['lang-js',/^on\w+\s*=\s*([^\"\'>\s]+)/i],['lang-css',/^style\s*=\s*\"([^\"]+)\"/i],['lang-css',/^style\s*=\s*\'([^\']+)\'/i],['lang-css',/^style\s*=\s*([^\"\'>\s]+)/i]]),['in.tag']),V(S([],[['atv',/^[\s\S]+/]]),['uq.val']),V(T({keywords:d,hashComments:a,cStyleComments:a}),['c','cc','cpp','cxx','cyc','m']),V(T({keywords:'null true false'}),['json']),V(T({keywords:f,hashComments:a,cStyleComments:a,verbatimStrings:a}),['cs']),V(T({keywords:e,cStyleComments:a}),['java']),V(T({keywords:k,hashComments:a,multiLineStrings:a}),['bsh','csh','sh']),V(T({keywords:i,hashComments:a,multiLineStrings:a,tripleQuotedStrings:a}),['cv','py']),V(T({keywords:h,hashComments:a,multiLineStrings:a,regexLiterals:a}),['perl','pl','pm']),V(T({keywords:j,hashComments:a,multiLineStrings:a,regexLiterals:a}),['rb']),V(T({keywords:g,cStyleComments:a,regexLiterals:a}),['js']),V(S([],[['str',/^[\s\S]+/]]),['regex']);function +X(a){var b=a.sourceCodeHtml,c=a.langExtension,d,e;a.prettyPrintedHtml=b;try{e=P(b),d=e.source,a.source=d,a.basePos=0,a.extractedTags=e.tags,W(c,d)(a),U(a)}catch(f){'console'in +window&&(console.log(f),console.trace())}}function Y(a,b){var c={sourceCodeHtml:a,langExtension:b};return X(c),c.prettyPrintedHtml}function +Z(c){var d=window._pr_isIE6(),e=d===6?'\r\n':'\r',f=[document.getElementsByTagName('pre'),document.getElementsByTagName('code'),document.getElementsByTagName('xmp')],g=[],h,i,j,k,l,m;for(i=0;i=0){f=e.className.match(/\blang-(\w+)\b/),f&&(f=f[1]),i=false;for(j=e.parentNode;j;j=j.parentNode)if((j.tagName==='pre'||j.tagName==='code'||j.tagName==='xmp')&&j.className&&j.className.indexOf('prettyprint')>=0){i=a;break}i||(d=N(e),d=d.replace(/(?:\r\n?|\n)$/,''),m={sourceCodeHtml:d,langExtension:f,sourceNode:e},X(m),o())}}k=0;)i=j[h],i.parentNode.replaceChild(document.createTextNode(e),i)}}n()}window.PR_normalizedHtml=L,window.prettyPrintOne=Y,window.prettyPrint=Z,window.PR={combinePrefixPatterns:M,createSimpleLexer:S,registerLangHandler:V,sourceDecorator:T,PR_ATTRIB_NAME:'atn',PR_ATTRIB_VALUE:'atv',PR_COMMENT:'com',PR_DECLARATION:'dec',PR_KEYWORD:'kwd',PR_LITERAL:'lit',PR_NOCODE:'nocode',PR_PLAIN:'pln',PR_PUNCTUATION:'pun',PR_SOURCE:'src',PR_STRING:'str',PR_TAG:'tag',PR_TYPE:'typ'}})() \ No newline at end of file diff --git a/dev-tools/prettify/stylesheet+prettify.css b/dev-tools/prettify/stylesheet+prettify.css new file mode 100644 index 00000000000..1ceb0297b49 --- /dev/null +++ b/dev-tools/prettify/stylesheet+prettify.css @@ -0,0 +1,30 @@ +/* Javadoc style sheet */ + +/* Define colors, fonts and other style attributes here to override the defaults */ +.str,.atv{color:#080}.kwd,.tag{color:#008}.com{color:#800}.typ,.atn,.dec{color:#606}.lit{color:#066}.pun{color:#660}.pln{color:#000}pre.prettyprint{padding:2px;border:1px solid #888}@media print{.str{color:#060}.kwd,.tag{color:#006;font-weight:bold}.com{color:#600;font-style:italic}.typ{font-weight:bold}.lit{color:#044}.pun{color:#440}.atn,.typ{color:#404}.atv{color:#060}} + +/* Page background color */ +body { background-color: #FFFFFF; color:#000000 } + +/* Headings */ +h1 { font-size: 145% } + +/* Table colors */ +.TableHeadingColor { background: #CCCCFF; color:#000000 } /* Dark mauve */ +.TableSubHeadingColor { background: #EEEEFF; color:#000000 } /* Light mauve */ +.TableRowColor { background: #FFFFFF; color:#000000 } /* White */ + +/* Font used in left-hand frame lists */ +.FrameTitleFont { font-size: 100%; font-family: Helvetica, Arial, sans-serif; color:#000000 } +.FrameHeadingFont { font-size: 90%; font-family: Helvetica, Arial, sans-serif; color:#000000 } +.FrameItemFont { font-size: 90%; font-family: Helvetica, Arial, sans-serif; color:#000000 } + +/* Navigation bar fonts and colors */ +.NavBarCell1 { background-color:#EEEEFF; color:#000000} /* Light mauve */ +.NavBarCell1Rev { background-color:#00008B; color:#FFFFFF} /* Dark Blue */ +.NavBarFont1 { font-family: Arial, Helvetica, sans-serif; color:#000000;color:#000000;} +.NavBarFont1Rev { font-family: Arial, Helvetica, sans-serif; color:#FFFFFF;color:#FFFFFF;} + +.NavBarCell2 { font-family: Arial, Helvetica, sans-serif; background-color:#FFFFFF; color:#000000} +.NavBarCell3 { font-family: Arial, Helvetica, sans-serif; background-color:#FFFFFF; color:#000000} + diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index 9a77cc1956d..32b9edeb024 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -1027,6 +1027,9 @@ Documentation * LUCENE-2625: Add a note to IndexReader.termDocs() with additional verbiage that the TermEnum must be seeked since it is unpositioned. (Adriano Crestani via Robert Muir) + +* LUCENE-2894: Use google-code-prettify for syntax highlighting in javadoc. + (Koji Sekiguchi) ================== Release 2.9.4 / 3.0.3 2010-12-03 ==================== diff --git a/lucene/NOTICE.txt b/lucene/NOTICE.txt index faeb75525b8..46f1322b35d 100644 --- a/lucene/NOTICE.txt +++ b/lucene/NOTICE.txt @@ -27,3 +27,6 @@ The class org.apache.lucene.SorterTemplate was inspired by CGLIB's class with the same name. The implementation part is mainly done using pre-existing Lucene sorting code. In-place stable mergesort was borrowed from CGLIB, which is Apache-licensed. + +The Google Code Prettify is Apache License 2.0. +See http://code.google.com/p/google-code-prettify/ diff --git a/lucene/common-build.xml b/lucene/common-build.xml index 86e31dd6382..b98368c1b3e 100644 --- a/lucene/common-build.xml +++ b/lucene/common-build.xml @@ -25,6 +25,9 @@ + + + @@ -709,6 +712,9 @@ + + + +
    + + ]]>
    diff --git a/lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/package.html b/lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/package.html index ee023abfa70..a71dfb3040d 100644 --- a/lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/package.html +++ b/lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/package.html @@ -66,7 +66,7 @@ sample text.

    Step 1.

    In Step 1, Fast Vector Highlighter generates {@link org.apache.lucene.search.vectorhighlight.FieldQuery.QueryPhraseMap} from the user query. QueryPhraseMap consists of the following members:

    -
    +
     public class QueryPhraseMap {
       boolean terminal;
       int slop;   // valid if terminal == true and phraseHighlight == true
    diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java b/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java
    index d8d4af6e044..2b9b429c47a 100644
    --- a/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java
    +++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java
    @@ -92,20 +92,20 @@ import org.apache.lucene.util.PriorityQueue;
      * if you want pseudo code, the simplest possible usage is as follows. The bold
      * fragment is specific to this class.
      *
    - * 
    + * 
      *
      * IndexReader ir = ...
      * IndexSearcher is = ...
    - * 
    + * 
      * MoreLikeThis mlt = new MoreLikeThis(ir);
    - * Reader target = ... // orig source of doc you want to find similarities to
    + * Reader target = ... // orig source of doc you want to find similarities to
      * Query query = mlt.like( target);
    - * 
    + * 
      * Hits hits = is.search(query);
    - * // now the usual iteration thru 'hits' - the only thing to watch for is to make sure
    - * you ignore the doc if it matches your 'target' document, as it should be similar to itself 
    + * // now the usual iteration thru 'hits' - the only thing to watch for is to make sure
    + * //you ignore the doc if it matches your 'target' document, as it should be similar to itself
      *
    - * 
    + *
    * * Thus you: *
      diff --git a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/geohash/GeoHashUtils.java b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/geohash/GeoHashUtils.java index be7c1433d54..5ace9adc87e 100644 --- a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/geohash/GeoHashUtils.java +++ b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/geohash/GeoHashUtils.java @@ -22,7 +22,7 @@ import java.util.Map; /** * Utilities for encoding and decoding geohashes. Based on - * http://en.wikipedia.org/wiki/Geohash. + * http://en.wikipedia.org/wiki/Geohash. */ public class GeoHashUtils { diff --git a/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java b/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java index ee7eabd9cae..099d653bef1 100644 --- a/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java +++ b/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java @@ -52,15 +52,17 @@ import java.util.TreeSet; * high-frequency lookups of medium size synonym tables. *

      * Example Usage: - *

      + * 
        * String[] words = new String[] { "hard", "woods", "forest", "wolfish", "xxxx"};
        * SynonymMap map = new SynonymMap(new FileInputStream("samples/fulltext/wn_s.pl"));
        * for (int i = 0; i < words.length; i++) {
        *     String[] synonyms = map.getSynonyms(words[i]);
        *     System.out.println(words[i] + ":" + java.util.Arrays.asList(synonyms).toString());
        * }
      - * 
      + * 
      + * * Example output: + *
        * hard:[arduous, backbreaking, difficult, fermented, firmly, grueling, gruelling, heavily, heavy, intemperately, knockout, laborious, punishing, severe, severely, strong, toilsome, tough]
        * woods:[forest, wood]
        * forest:[afforest, timber, timberland, wood, woodland, woods]
      diff --git a/lucene/src/java/overview.html b/lucene/src/java/overview.html
      index cf1da4fd65c..486da10133a 100644
      --- a/lucene/src/java/overview.html
      +++ b/lucene/src/java/overview.html
      @@ -45,48 +45,36 @@ to check if the results are what we expect):

      -
      - - - - - - +
      +    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
       
      -
      - -    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
      -
      -    // Store the index in memory:
      -    Directory directory = new RAMDirectory();
      -    // To store an index on disk, use this instead:
      -    //Directory directory = FSDirectory.open("/tmp/testindex");
      -    IndexWriter iwriter = new IndexWriter(directory, analyzer, true,
      -                                          new IndexWriter.MaxFieldLength(25000));
      -    Document doc = new Document();
      -    String text = "This is the text to be indexed.";
      -    doc.add(new Field("fieldname", text, Field.Store.YES,
      -        Field.Index.ANALYZED));
      -    iwriter.addDocument(doc);
      -    iwriter.close();
      -    
      -    // Now search the index:
      -    IndexSearcher isearcher = new IndexSearcher(directory, true)// read-only=true
      -    // Parse a simple query that searches for "text":
      -    QueryParser parser = new QueryParser("fieldname", analyzer);
      -    Query query = parser.parse("text");
      -    ScoreDoc[] hits = isearcher.search(query, null, 1000).scoreDocs;
      -    assertEquals(1, hits.length);
      -    // Iterate through the results:
      -    for (int i = 0; i < hits.length; i++) {
      -      Document hitDoc = isearcher.doc(hits[i].doc);
      -      assertEquals("This is the text to be indexed.", hitDoc.get("fieldname"));
      -    }
      -    isearcher.close();
      -    directory.close();
      - -
      -
      + // Store the index in memory: + Directory directory = new RAMDirectory(); + // To store an index on disk, use this instead: + //Directory directory = FSDirectory.open("/tmp/testindex"); + IndexWriter iwriter = new IndexWriter(directory, analyzer, true, + new IndexWriter.MaxFieldLength(25000)); + Document doc = new Document(); + String text = "This is the text to be indexed."; + doc.add(new Field("fieldname", text, Field.Store.YES, + Field.Index.ANALYZED)); + iwriter.addDocument(doc); + iwriter.close(); + + // Now search the index: + IndexSearcher isearcher = new IndexSearcher(directory, true); // read-only=true + // Parse a simple query that searches for "text": + QueryParser parser = new QueryParser("fieldname", analyzer); + Query query = parser.parse("text"); + ScoreDoc[] hits = isearcher.search(query, null, 1000).scoreDocs; + assertEquals(1, hits.length); + // Iterate through the results: + for (int i = 0; i < hits.length; i++) { + Document hitDoc = isearcher.doc(hits[i].doc); + assertEquals("This is the text to be indexed.", hitDoc.get("fieldname")); + } + isearcher.close(); + directory.close();
      diff --git a/solr/NOTICE.txt b/solr/NOTICE.txt index 10632d30b19..fa2fc0556c7 100644 --- a/solr/NOTICE.txt +++ b/solr/NOTICE.txt @@ -156,7 +156,6 @@ This product includes software developed by the Carrot2 Project. See http://project.carrot2.org/ - ========================================================================= == Guava Notice == ========================================================================= @@ -167,6 +166,16 @@ This product includes software developed by the Google Guava project. See http://code.google.com/p/guava-libraries/ +========================================================================= +== Prettify Notice == +========================================================================= + +Copyright ???? Google, Inc. + +This product includes software developed by the Google Prettify project. + +See http://code.google.com/p/google-code-prettify/ + ========================================================================= == Jackson Notice == ========================================================================= diff --git a/solr/build.xml b/solr/build.xml index 9214ee4d6ce..06fcb9df1de 100644 --- a/solr/build.xml +++ b/solr/build.xml @@ -18,6 +18,8 @@ --> + + diff --git a/solr/common-build.xml b/solr/common-build.xml index f5aae9dae88..966607530b1 100644 --- a/solr/common-build.xml +++ b/solr/common-build.xml @@ -33,6 +33,9 @@ + + + @@ -344,6 +347,9 @@ + + + @@ -368,6 +375,10 @@ +
      + + ]]>
      diff --git a/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/ClusteringComponent.java b/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/ClusteringComponent.java index 5996b60c73f..0259bb80b62 100644 --- a/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/ClusteringComponent.java +++ b/solr/contrib/clustering/src/main/java/org/apache/solr/handler/clustering/ClusteringComponent.java @@ -46,6 +46,13 @@ import java.util.Set; *

      * This engine is experimental. Output from this engine is subject to change in future releases. * + *

      + * <searchComponent class="org.apache.solr.handler.clustering.ClusteringComponent" name="clustering">
      + *   <lst name="engine">
      + *     <str name="name">default</str>
      + *     <str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
      + *   </lst>
      + * </searchComponent>
      */ public class ClusteringComponent extends SearchComponent implements SolrCoreAware { private transient static Logger log = LoggerFactory.getLogger(ClusteringComponent.class); diff --git a/solr/src/java/org/apache/solr/analysis/StopFilterFactory.java b/solr/src/java/org/apache/solr/analysis/StopFilterFactory.java index 6f88050ea94..67fa11c17f8 100644 --- a/solr/src/java/org/apache/solr/analysis/StopFilterFactory.java +++ b/solr/src/java/org/apache/solr/analysis/StopFilterFactory.java @@ -29,6 +29,15 @@ import java.util.Set; import java.io.IOException; /** + * Factory for {@link StopFilter}. + *
      + * <fieldType name="text_stop" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
      + *   <analyzer>
      + *     <tokenizer class="solr.WhitespaceTokenizerFactory"/>
      + *     <filter class="solr.StopFilterFactory" ignoreCase="true"
      + *             words="stopwords.txt" enablePositionIncrements="true"/>
      + *   </analyzer>
      + * </fieldType>
      * @version $Id$ */ public class StopFilterFactory extends BaseTokenFilterFactory implements ResourceLoaderAware { diff --git a/solr/src/java/org/apache/solr/handler/component/TermVectorComponent.java b/solr/src/java/org/apache/solr/handler/component/TermVectorComponent.java index 8e3e8c8ef3c..56b9d4826f2 100644 --- a/solr/src/java/org/apache/solr/handler/component/TermVectorComponent.java +++ b/solr/src/java/org/apache/solr/handler/component/TermVectorComponent.java @@ -61,6 +61,20 @@ import java.util.Map; * term, frequency, position, offset, IDF. *

      * Note Returning IDF can be expensive. + * + *

      + * <searchComponent name="tvComponent" class="solr.TermVectorComponent"/>
      + * 
      + * <requestHandler name="/terms" class="solr.SearchHandler">
      + *   <lst name="defaults">
      + *     <bool name="tv">true</bool>
      + *   </lst>
      + *   <arr name="last-component">
      + *     <str>tvComponent</str>
      + *   </arr>
      + * </requestHandler>
      + * + * @version $Id$ */ public class TermVectorComponent extends SearchComponent implements SolrCoreAware { diff --git a/solr/src/java/org/apache/solr/handler/component/TermsComponent.java b/solr/src/java/org/apache/solr/handler/component/TermsComponent.java index 93929eb2fab..ba99b3fe3b0 100644 --- a/solr/src/java/org/apache/solr/handler/component/TermsComponent.java +++ b/solr/src/java/org/apache/solr/handler/component/TermsComponent.java @@ -41,9 +41,22 @@ import java.util.regex.Pattern; /** * Return TermEnum information, useful for things like auto suggest. + * + *
      + * <searchComponent name="termsComponent" class="solr.TermsComponent"/>
      + * 
      + * <requestHandler name="/terms" class="solr.SearchHandler">
      + *   <lst name="defaults">
      + *     <bool name="terms">true</bool>
      + *   </lst>
      + *   <arr name="components">
      + *     <str>termsComponent</str>
      + *   </arr>
      + * </requestHandler>
      * * @see org.apache.solr.common.params.TermsParams * See Lucene's TermEnum class + * @version $Id$ */ public class TermsComponent extends SearchComponent { public static final int UNLIMITED_MAX_COUNT = -1; diff --git a/solr/src/solrj/org/apache/solr/client/solrj/impl/CommonsHttpSolrServer.java b/solr/src/solrj/org/apache/solr/client/solrj/impl/CommonsHttpSolrServer.java index 7900ab8ac7c..477a90035b7 100644 --- a/solr/src/solrj/org/apache/solr/client/solrj/impl/CommonsHttpSolrServer.java +++ b/solr/src/solrj/org/apache/solr/client/solrj/impl/CommonsHttpSolrServer.java @@ -61,6 +61,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** + * The {@link CommonsHttpSolrServer} uses the Apache Commons HTTP Client to connect to solr. + *
      SolrServer server = new CommonsHttpSolrServer( url );
      * * @version $Id$ * @since solr 1.3 From 5c1983b27b922943fd2d5737727b4aba29fd163c Mon Sep 17 00:00:00 2001 From: Steven Rowe Date: Sun, 6 Feb 2011 01:16:15 +0000 Subject: [PATCH 099/185] Make the lucene module's run configuration appear on the list, and make it selected by default git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1067566 13f79535-47bb-0310-9956-ffa450edef68 --- dev-tools/idea/.idea/workspace.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-tools/idea/.idea/workspace.xml b/dev-tools/idea/.idea/workspace.xml index f3f0c4d4c0a..150a2334ca6 100644 --- a/dev-tools/idea/.idea/workspace.xml +++ b/dev-tools/idea/.idea/workspace.xml @@ -1,6 +1,6 @@ - + - + + + org.codehaus.mojo + build-helper-maven-plugin + + + add-test-source + generate-test-sources + + add-test-source + + + + test-framework + + + + + diff --git a/dev-tools/maven/lucene/src/test-framework/pom.xml.template b/dev-tools/maven/lucene/src/test-framework/pom.xml.template new file mode 100644 index 00000000000..e391e5f83ba --- /dev/null +++ b/dev-tools/maven/lucene/src/test-framework/pom.xml.template @@ -0,0 +1,89 @@ + + + 4.0.0 + + org.apache.lucene + lucene-parent + @version@ + ../../pom.xml + + org.apache.lucene + lucene-test-framework + jar + Lucene Test Framework + Apache Lucene Java Test Framework + + lucene/src/test-framework + ../../build + + + + ${project.groupId} + lucene-core + ${project.version} + test + + + junit + junit + test + + + org.apache.ant + ant-junit + test + + + + ${build-directory}/classes/test-framework + ${build-directory}/classes/test-framework + . + + + ${project.build.testSourceDirectory} + + **/*.java + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + true + + + + + diff --git a/dev-tools/maven/modules/analysis/common/pom.xml.template b/dev-tools/maven/modules/analysis/common/pom.xml.template index 99eb404b37c..19f8615459e 100644 --- a/dev-tools/maven/modules/analysis/common/pom.xml.template +++ b/dev-tools/maven/modules/analysis/common/pom.xml.template @@ -48,6 +48,13 @@ test-jar test + + ${project.groupId} + lucene-test-framework + ${project.version} + test-jar + test + junit junit diff --git a/dev-tools/maven/modules/analysis/icu/pom.xml.template b/dev-tools/maven/modules/analysis/icu/pom.xml.template index e98fff3f525..363364c88b6 100644 --- a/dev-tools/maven/modules/analysis/icu/pom.xml.template +++ b/dev-tools/maven/modules/analysis/icu/pom.xml.template @@ -51,6 +51,13 @@ test-jar test + + ${project.groupId} + lucene-test-framework + ${project.version} + test-jar + test + ${project.groupId} lucene-analyzers-common diff --git a/dev-tools/maven/modules/analysis/smartcn/pom.xml.template b/dev-tools/maven/modules/analysis/smartcn/pom.xml.template index 10f1e915651..26a7e4f8a13 100644 --- a/dev-tools/maven/modules/analysis/smartcn/pom.xml.template +++ b/dev-tools/maven/modules/analysis/smartcn/pom.xml.template @@ -43,7 +43,7 @@ ${project.groupId} - lucene-core + lucene-test-framework ${project.version} test-jar test diff --git a/dev-tools/maven/modules/analysis/stempel/pom.xml.template b/dev-tools/maven/modules/analysis/stempel/pom.xml.template index 979498fd370..cacb74d7df9 100644 --- a/dev-tools/maven/modules/analysis/stempel/pom.xml.template +++ b/dev-tools/maven/modules/analysis/stempel/pom.xml.template @@ -43,7 +43,7 @@ ${project.groupId} - lucene-core + lucene-test-framework ${project.version} test-jar test diff --git a/dev-tools/maven/modules/benchmark/pom.xml.template b/dev-tools/maven/modules/benchmark/pom.xml.template index 263a38fa5aa..184c190434a 100755 --- a/dev-tools/maven/modules/benchmark/pom.xml.template +++ b/dev-tools/maven/modules/benchmark/pom.xml.template @@ -43,7 +43,7 @@ ${project.groupId} - lucene-core + lucene-test-framework ${project.version} test-jar test diff --git a/dev-tools/maven/solr/contrib/analysis-extras/pom.xml.template b/dev-tools/maven/solr/contrib/analysis-extras/pom.xml.template index ff4e14d7aba..0ff4b13d8fe 100644 --- a/dev-tools/maven/solr/contrib/analysis-extras/pom.xml.template +++ b/dev-tools/maven/solr/contrib/analysis-extras/pom.xml.template @@ -76,7 +76,7 @@ org.apache.lucene - lucene-core + lucene-test-framework ${project.version} test-jar test diff --git a/dev-tools/maven/solr/contrib/clustering/pom.xml.template b/dev-tools/maven/solr/contrib/clustering/pom.xml.template index 18afe9e58e2..3b47b219e92 100644 --- a/dev-tools/maven/solr/contrib/clustering/pom.xml.template +++ b/dev-tools/maven/solr/contrib/clustering/pom.xml.template @@ -61,7 +61,7 @@ org.apache.lucene - lucene-core + lucene-test-framework ${project.version} test-jar test diff --git a/dev-tools/maven/solr/contrib/dataimporthandler/src/extras/pom.xml.template b/dev-tools/maven/solr/contrib/dataimporthandler/src/extras/pom.xml.template index 6a93cfb2102..739465af469 100644 --- a/dev-tools/maven/solr/contrib/dataimporthandler/src/extras/pom.xml.template +++ b/dev-tools/maven/solr/contrib/dataimporthandler/src/extras/pom.xml.template @@ -68,7 +68,7 @@ org.apache.lucene - lucene-core + lucene-test-framework ${project.version} test-jar test diff --git a/dev-tools/maven/solr/contrib/dataimporthandler/src/pom.xml.template b/dev-tools/maven/solr/contrib/dataimporthandler/src/pom.xml.template index ccf9242c062..e0ea149c3c4 100644 --- a/dev-tools/maven/solr/contrib/dataimporthandler/src/pom.xml.template +++ b/dev-tools/maven/solr/contrib/dataimporthandler/src/pom.xml.template @@ -61,7 +61,7 @@ org.apache.lucene - lucene-core + lucene-test-framework ${project.version} test-jar test diff --git a/dev-tools/maven/solr/contrib/extraction/pom.xml.template b/dev-tools/maven/solr/contrib/extraction/pom.xml.template index 8bd83173f7c..6d76eaece14 100644 --- a/dev-tools/maven/solr/contrib/extraction/pom.xml.template +++ b/dev-tools/maven/solr/contrib/extraction/pom.xml.template @@ -64,7 +64,7 @@ org.apache.lucene - lucene-core + lucene-test-framework ${project.version} test-jar test diff --git a/dev-tools/maven/solr/contrib/uima/pom.xml.template b/dev-tools/maven/solr/contrib/uima/pom.xml.template index a7802c5b21b..ef314604ba1 100644 --- a/dev-tools/maven/solr/contrib/uima/pom.xml.template +++ b/dev-tools/maven/solr/contrib/uima/pom.xml.template @@ -56,7 +56,7 @@ org.apache.lucene - lucene-core + lucene-test-framework ${project.version} test-jar test diff --git a/dev-tools/maven/solr/src/pom.xml.template b/dev-tools/maven/solr/src/pom.xml.template index df9bb5025ff..ec3d6258643 100644 --- a/dev-tools/maven/solr/src/pom.xml.template +++ b/dev-tools/maven/solr/src/pom.xml.template @@ -48,7 +48,7 @@ org.apache.lucene - lucene-core + lucene-test-framework ${project.version} test-jar test diff --git a/dev-tools/maven/solr/src/solrj/pom.xml.template b/dev-tools/maven/solr/src/solrj/pom.xml.template index dcf961b2dbe..e4ed4c7cf2f 100644 --- a/dev-tools/maven/solr/src/solrj/pom.xml.template +++ b/dev-tools/maven/solr/src/solrj/pom.xml.template @@ -44,7 +44,7 @@ org.apache.lucene - lucene-core + lucene-test-framework ${project.version} test-jar test diff --git a/dev-tools/testjar/testfiles b/dev-tools/testjar/testfiles deleted file mode 100755 index 84d8bfb2eab..00000000000 --- a/dev-tools/testjar/testfiles +++ /dev/null @@ -1,24 +0,0 @@ -core.test.files=\ - org/apache/lucene/util/_TestUtil.java,\ - org/apache/lucene/util/LineFileDocs.java,\ - org/apache/lucene/util/LuceneJUnitDividingSelector.java,\ - org/apache/lucene/util/LuceneJUnitResultFormatter.java,\ - org/apache/lucene/util/LuceneTestCase.java,\ - org/apache/lucene/util/automaton/AutomatonTestUtil.java,\ - org/apache/lucene/search/QueryUtils.java,\ - org/apache/lucene/analysis/BaseTokenStreamTestCase.java,\ - org/apache/lucene/analysis/MockAnalyzer.java,\ - org/apache/lucene/analysis/MockPayloadAnalyzer.java,\ - org/apache/lucene/analysis/MockTokenFilter.java,\ - org/apache/lucene/analysis/MockTokenizer.java,\ - org/apache/lucene/index/MockIndexInput.java,\ - org/apache/lucene/index/RandomIndexWriter.java,\ - org/apache/lucene/index/DocHelper.java,\ - org/apache/lucene/codecs/preflexrw/PreFlexFieldsWriter.java,\ - org/apache/lucene/codecs/preflexrw/PreFlexRWCodec.java,\ - org/apache/lucene/codecs/preflexrw/TermInfosWriter.java,\ - org/apache/lucene/codecs/mockrandom/MockRandomCodec.java,\ - org/apache/lucene/store/_TestHelper.java,\ - org/apache/lucene/store/MockDirectoryWrapper.java,\ - org/apache/lucene/store/MockIndexInputWrapper.java,\ - org/apache/lucene/store/MockIndexOutputWrapper.java,\ diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index 32b9edeb024..bdd4940c487 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -978,6 +978,10 @@ Build * LUCENE-2657: Switch from using Maven POM templates to full POMs when generating Maven artifacts (Steven Rowe) +* LUCENE-2609: Added jar-test-framework Ant target which packages Lucene's + tests' framework classes. (Drew Farris, Grant Ingersoll, Shai Erera, Steven + Rowe) + Test Cases * LUCENE-2037 Allow Junit4 tests in our environment (Erick Erickson diff --git a/lucene/build.xml b/lucene/build.xml index 3fe5b815403..4cb7a82a532 100644 --- a/lucene/build.xml +++ b/lucene/build.xml @@ -17,7 +17,8 @@ limitations under the License. --> - + @@ -32,12 +33,13 @@ + - + @@ -153,7 +155,7 @@ + depends="javadocs-all, javadocs-core, javadocs-contrib, javadocs-test-framework"> ${Name} ${version} Javadoc Index @@ -259,7 +261,7 @@ - + @@ -389,7 +391,7 @@ - + @@ -405,6 +407,22 @@ classifier="tests"/> + + + + + + + + + + + + + @@ -602,16 +620,30 @@ - - - - - - - - - + + + + + + + + + + + + + + + + + + + diff --git a/lucene/common-build.xml b/lucene/common-build.xml index b98368c1b3e..70baa822b5b 100644 --- a/lucene/common-build.xml +++ b/lucene/common-build.xml @@ -113,6 +113,7 @@ + @@ -363,6 +364,8 @@ + + @@ -372,12 +375,13 @@ - + + manifest="${manifest.file}" + excludes="@{excludes}"> @@ -390,7 +394,12 @@ - + + + + + @@ -553,6 +562,9 @@ + + + @@ -583,6 +595,9 @@ + + + @@ -653,6 +668,7 @@ description="runs the tasks over source and test files"> + diff --git a/lucene/contrib/contrib-build.xml b/lucene/contrib/contrib-build.xml index 77b1dd04bc6..bbd35082ac2 100644 --- a/lucene/contrib/contrib-build.xml +++ b/lucene/contrib/contrib-build.xml @@ -40,6 +40,7 @@ + @@ -50,7 +51,6 @@ - diff --git a/lucene/contrib/db/bdb-je/build.xml b/lucene/contrib/db/bdb-je/build.xml index b751d84287d..cc8c1c8d96b 100644 --- a/lucene/contrib/db/bdb-je/build.xml +++ b/lucene/contrib/db/bdb-je/build.xml @@ -39,13 +39,6 @@ - - - - - - - - - - - - - - + diff --git a/modules/analysis/icu/build.xml b/modules/analysis/icu/build.xml index db7969a114b..2b443ce464b 100644 --- a/modules/analysis/icu/build.xml +++ b/modules/analysis/icu/build.xml @@ -49,6 +49,7 @@ + diff --git a/modules/analysis/phonetic/build.xml b/modules/analysis/phonetic/build.xml index 9efd18a94b8..e8625d49221 100644 --- a/modules/analysis/phonetic/build.xml +++ b/modules/analysis/phonetic/build.xml @@ -48,6 +48,7 @@ + diff --git a/modules/analysis/smartcn/build.xml b/modules/analysis/smartcn/build.xml index 841e680b651..075f8f497e6 100644 --- a/modules/analysis/smartcn/build.xml +++ b/modules/analysis/smartcn/build.xml @@ -39,6 +39,7 @@ + diff --git a/modules/analysis/stempel/build.xml b/modules/analysis/stempel/build.xml index 90c5065eb2a..517591f2727 100644 --- a/modules/analysis/stempel/build.xml +++ b/modules/analysis/stempel/build.xml @@ -38,6 +38,7 @@ + diff --git a/solr/build.xml b/solr/build.xml index 06fcb9df1de..d8021276039 100644 --- a/solr/build.xml +++ b/solr/build.xml @@ -342,7 +342,7 @@ - + @@ -350,7 +350,7 @@ - + diff --git a/solr/contrib/analysis-extras/build.xml b/solr/contrib/analysis-extras/build.xml index 2babe1a7e96..1b135e3c4af 100644 --- a/solr/contrib/analysis-extras/build.xml +++ b/solr/contrib/analysis-extras/build.xml @@ -73,7 +73,7 @@ - + diff --git a/solr/contrib/clustering/build.xml b/solr/contrib/clustering/build.xml index a8036428c9a..0621df640d3 100644 --- a/solr/contrib/clustering/build.xml +++ b/solr/contrib/clustering/build.xml @@ -42,7 +42,7 @@ - + diff --git a/solr/contrib/dataimporthandler/build.xml b/solr/contrib/dataimporthandler/build.xml index c21c0f9a648..7772fcbb4e8 100644 --- a/solr/contrib/dataimporthandler/build.xml +++ b/solr/contrib/dataimporthandler/build.xml @@ -56,7 +56,7 @@ - + @@ -68,7 +68,7 @@ - + diff --git a/solr/contrib/extraction/build.xml b/solr/contrib/extraction/build.xml index 73182c45264..de7542d54b4 100644 --- a/solr/contrib/extraction/build.xml +++ b/solr/contrib/extraction/build.xml @@ -40,7 +40,7 @@ - + diff --git a/solr/contrib/uima/build.xml b/solr/contrib/uima/build.xml index 34b190b7727..34dbefec748 100644 --- a/solr/contrib/uima/build.xml +++ b/solr/contrib/uima/build.xml @@ -41,7 +41,7 @@ - + From 5ab6a5e7ddbdf14fed1ae51e4f5d1b976cf3a8c5 Mon Sep 17 00:00:00 2001 From: Doron Cohen Date: Sun, 6 Feb 2011 21:25:53 +0000 Subject: [PATCH 103/185] LUCENE-1540: Improvements to contrib.benchmark for TREC collections - bring back case insensitivity to path names using Locale.ENGLISH - port/merged from 3x r1067705. git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1067772 13f79535-47bb-0310-9956-ffa450edef68 --- .../lucene/benchmark/byTask/feeds/TrecDocParser.java | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java index 5b319d8a8fa..216cdebd7c7 100644 --- a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java +++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java @@ -20,6 +20,7 @@ package org.apache.lucene.benchmark.byTask.feeds; import java.io.File; import java.io.IOException; import java.util.HashMap; +import java.util.Locale; import java.util.Map; /** @@ -29,12 +30,7 @@ import java.util.Map; public abstract class TrecDocParser { /** Types of trec parse paths, */ - public enum ParsePathType { GOV2("gov2"), FBIS("fbis"), FT("ft"), FR94("fr94"), LATIMES("latimes"); - public final String dirName; - private ParsePathType(String dirName) { - this.dirName = dirName; - } - } + public enum ParsePathType { GOV2, FBIS, FT, FR94, LATIMES } /** trec parser type used for unknown extensions */ public static final ParsePathType DEFAULT_PATH_TYPE = ParsePathType.GOV2; @@ -51,7 +47,7 @@ public abstract class TrecDocParser { static final Map pathName2Type = new HashMap(); static { for (ParsePathType ppt : ParsePathType.values()) { - pathName2Type.put(ppt.dirName,ppt); + pathName2Type.put(ppt.name().toUpperCase(Locale.ENGLISH),ppt); } } @@ -64,7 +60,7 @@ public abstract class TrecDocParser { public static ParsePathType pathType(File f) { int pathLength = 0; while (f != null && ++pathLength < MAX_PATH_LENGTH) { - ParsePathType ppt = pathName2Type.get(f.getName()); + ParsePathType ppt = pathName2Type.get(f.getName().toUpperCase(Locale.ENGLISH)); if (ppt!=null) { return ppt; } From 47b102fe2ab62af0aeb704d74ee99dc4bcaf821c Mon Sep 17 00:00:00 2001 From: Steven Rowe Date: Sun, 6 Feb 2011 22:55:43 +0000 Subject: [PATCH 104/185] LUCENE-2894: make prettify findable from everywhere git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1067796 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/common-build.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucene/common-build.xml b/lucene/common-build.xml index 70baa822b5b..80dbcce6368 100644 --- a/lucene/common-build.xml +++ b/lucene/common-build.xml @@ -25,7 +25,7 @@ - + + + + + GB18030TEST + Test with some GB18030 encoded characters + No accents here + ÕâÊÇÒ»¸ö¹¦ÄÜ + This is a feature (translated) + Õâ·ÝÎļþÊǺÜÓйâÔó + This document is very shiny (translated) + 0 + true + + + diff --git a/solr/src/java/org/apache/solr/util/SimplePostTool.java b/solr/src/java/org/apache/solr/util/SimplePostTool.java index ed71cd6ec92..9eeba852a72 100644 --- a/solr/src/java/org/apache/solr/util/SimplePostTool.java +++ b/solr/src/java/org/apache/solr/util/SimplePostTool.java @@ -22,14 +22,9 @@ import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; -import java.io.InputStreamReader; +import java.io.ByteArrayInputStream; import java.io.OutputStream; -import java.io.OutputStreamWriter; -import java.io.Reader; -import java.io.StringReader; -import java.io.StringWriter; import java.io.UnsupportedEncodingException; -import java.io.Writer; import java.util.Set; import java.util.HashSet; import java.net.HttpURLConnection; @@ -44,12 +39,14 @@ import java.net.URL; */ public class SimplePostTool { public static final String DEFAULT_POST_URL = "http://localhost:8983/solr/update"; - public static final String POST_ENCODING = "UTF-8"; - public static final String VERSION_OF_THIS_TOOL = "1.2"; + public static final String VERSION_OF_THIS_TOOL = "1.3"; private static final String SOLR_OK_RESPONSE_EXCERPT = "0"; private static final String DEFAULT_COMMIT = "yes"; - + private static final String DEFAULT_OUT = "no"; + + private static final String DEFAULT_DATA_TYPE = "application/xml"; + private static final String DATA_MODE_FILES = "files"; private static final String DATA_MODE_ARGS = "args"; private static final String DATA_MODE_STDIN = "stdin"; @@ -61,37 +58,35 @@ public class SimplePostTool { DATA_MODES.add(DATA_MODE_ARGS); DATA_MODES.add(DATA_MODE_STDIN); } - + protected URL solrUrl; - private class PostException extends RuntimeException { - PostException(String reason,Throwable cause) { - super(reason + " (POST URL=" + solrUrl + ")",cause); - } - } - public static void main(String[] args) { info("version " + VERSION_OF_THIS_TOOL); if (0 < args.length && "-help".equals(args[0])) { System.out.println - ("This is a simple command line tool for POSTing raw XML to a Solr\n"+ - "port. XML data can be read from files specified as commandline\n"+ - "args; as raw commandline arg strings; or via STDIN.\n"+ + ("This is a simple command line tool for POSTing raw data to a Solr\n"+ + "port. Data can be read from files specified as commandline args,\n"+ + "as raw commandline arg strings, or via STDIN.\n"+ "Examples:\n"+ " java -Ddata=files -jar post.jar *.xml\n"+ " java -Ddata=args -jar post.jar '42'\n"+ " java -Ddata=stdin -jar post.jar < hd.xml\n"+ "Other options controlled by System Properties include the Solr\n"+ - "URL to POST to, and whether a commit should be executed. These\n"+ - "are the defaults for all System Properties...\n"+ + "URL to POST to, the Content-Type of the data, whether a commit\n"+ + "should be executed, and whether the response should be written\n"+ + "to STDOUT. These are the defaults for all System Properties...\n"+ " -Ddata=" + DEFAULT_DATA_MODE + "\n"+ + " -Dtype=" + DEFAULT_DATA_TYPE + "\n"+ " -Durl=" + DEFAULT_POST_URL + "\n"+ - " -Dcommit=" + DEFAULT_COMMIT + "\n"); + " -Dcommit=" + DEFAULT_COMMIT + "\n"+ + " -Dout=" + DEFAULT_OUT + "\n"); return; } - + OutputStream out = null; + URL u = null; try { u = new URL(System.getProperty("url", DEFAULT_POST_URL)); @@ -105,53 +100,49 @@ public class SimplePostTool { fatal("System Property 'data' is not valid for this tool: " + mode); } + final String doOut = System.getProperty("out", DEFAULT_OUT); + if ("yes".equals(System.getProperty("out", DEFAULT_OUT))) { + out = System.out; + } + try { if (DATA_MODE_FILES.equals(mode)) { if (0 < args.length) { info("POSTing files to " + u + ".."); - final int posted = t.postFiles(args,0); + final int posted = t.postFiles(args, 0, out); } } else if (DATA_MODE_ARGS.equals(mode)) { if (0 < args.length) { info("POSTing args to " + u + ".."); for (String a : args) { - final StringWriter sw = new StringWriter(); - t.postData(new StringReader(a), sw); - warnIfNotExpectedResponse(sw.toString(),SOLR_OK_RESPONSE_EXCERPT); + t.postData(t.stringToStream(a), null, out); } } } else if (DATA_MODE_STDIN.equals(mode)) { info("POSTing stdin to " + u + ".."); - final StringWriter sw = new StringWriter(); - t.postData(new InputStreamReader(System.in,POST_ENCODING), sw); - warnIfNotExpectedResponse(sw.toString(),SOLR_OK_RESPONSE_EXCERPT); + t.postData(System.in, null, out); } if ("yes".equals(System.getProperty("commit",DEFAULT_COMMIT))) { info("COMMITting Solr index changes.."); - final StringWriter sw = new StringWriter(); - t.commit(sw); - warnIfNotExpectedResponse(sw.toString(),SOLR_OK_RESPONSE_EXCERPT); + t.commit(out); } - } catch(IOException ioe) { - fatal("Unexpected IOException " + ioe); + } catch(RuntimeException e) { + fatal("RuntimeException " + e); } } /** Post all filenames provided in args, return the number of files posted*/ - int postFiles(String [] args,int startIndexInArgs) throws IOException { + int postFiles(String [] args,int startIndexInArgs, OutputStream out) { int filesPosted = 0; for (int j = startIndexInArgs; j < args.length; j++) { File srcFile = new File(args[j]); - final StringWriter sw = new StringWriter(); - if (srcFile.canRead()) { info("POSTing file " + srcFile.getName()); - postFile(srcFile, sw); + postFile(srcFile, out); filesPosted++; - warnIfNotExpectedResponse(sw.toString(),SOLR_OK_RESPONSE_EXCERPT); } else { warn("Cannot read input file: " + srcFile); } @@ -159,15 +150,6 @@ public class SimplePostTool { return filesPosted; } - /** Check what Solr replied to a POST, and complain if it's not what we expected. - * TODO: parse the response and check it XMLwise, here we just check it as an unparsed String - */ - static void warnIfNotExpectedResponse(String actual,String expected) { - if(actual.indexOf(expected) < 0) { - warn("Unexpected response from Solr: '" + actual + "' does not contain '" + expected + "'"); - } - } - static void warn(String msg) { System.err.println("SimplePostTool: WARNING: " + msg); } @@ -187,15 +169,13 @@ public class SimplePostTool { */ public SimplePostTool(URL solrUrl) { this.solrUrl = solrUrl; - warn("Make sure your XML documents are encoded in " + POST_ENCODING - + ", other encodings are not currently supported"); } /** * Does a simple commit operation */ - public void commit(Writer output) throws IOException { - postData(new StringReader(""), output); + public void commit(OutputStream output) { + postData(stringToStream(""), null, output); } /** @@ -203,85 +183,103 @@ public class SimplePostTool { * writes to response to output. * @throws UnsupportedEncodingException */ - public void postFile(File file, Writer output) - throws FileNotFoundException, UnsupportedEncodingException { + public void postFile(File file, OutputStream output) { - // FIXME; use a real XML parser to read files, so as to support various encodings - // (and we can only post well-formed XML anyway) - Reader reader = new InputStreamReader(new FileInputStream(file),POST_ENCODING); + InputStream is = null; try { - postData(reader, output); + is = new FileInputStream(file); + postData(is, (int)file.length(), output); + } catch (IOException e) { + fatal("Can't open/read file: " + file); } finally { try { - if(reader!=null) reader.close(); + if(is!=null) is.close(); } catch (IOException e) { - throw new PostException("IOException while closing file", e); + fatal("IOException while closing file: "+ e); } } } /** - * Reads data from the data reader and posts it to solr, + * Reads data from the data stream and posts it to solr, * writes to the response to output */ - public void postData(Reader data, Writer output) { + public void postData(InputStream data, Integer length, OutputStream output) { + + final String type = System.getProperty("type", DEFAULT_DATA_TYPE); HttpURLConnection urlc = null; try { - urlc = (HttpURLConnection) solrUrl.openConnection(); try { - urlc.setRequestMethod("POST"); - } catch (ProtocolException e) { - throw new PostException("Shouldn't happen: HttpURLConnection doesn't support POST??", e); - } - urlc.setDoOutput(true); - urlc.setDoInput(true); - urlc.setUseCaches(false); - urlc.setAllowUserInteraction(false); - urlc.setRequestProperty("Content-type", "text/xml; charset=" + POST_ENCODING); - - OutputStream out = urlc.getOutputStream(); - - try { - Writer writer = new OutputStreamWriter(out, POST_ENCODING); - pipe(data, writer); - writer.close(); + urlc = (HttpURLConnection) solrUrl.openConnection(); + try { + urlc.setRequestMethod("POST"); + } catch (ProtocolException e) { + fatal("Shouldn't happen: HttpURLConnection doesn't support POST??"+e); + + } + urlc.setDoOutput(true); + urlc.setDoInput(true); + urlc.setUseCaches(false); + urlc.setAllowUserInteraction(false); + urlc.setRequestProperty("Content-type", type); + + if (null != length) urlc.setFixedLengthStreamingMode(length); + } catch (IOException e) { - throw new PostException("IOException while posting data", e); - } finally { - if(out!=null) out.close(); + fatal("Connection error (is Solr running at " + solrUrl + " ?): " + e); } - InputStream in = urlc.getInputStream(); + OutputStream out = null; try { - Reader reader = new InputStreamReader(in); - pipe(reader, output); - reader.close(); + out = urlc.getOutputStream(); + pipe(data, out); } catch (IOException e) { - throw new PostException("IOException while reading response", e); + fatal("IOException while posting data: " + e); } finally { - if(in!=null) in.close(); + try { if(out!=null) out.close(); } catch (IOException x) { /*NOOP*/ } } - } catch (IOException e) { + InputStream in = null; try { - fatal("Solr returned an error: " + urlc.getResponseMessage()); - } catch (IOException f) { } - fatal("Connection error (is Solr running at " + solrUrl + " ?): " + e); + if (HttpURLConnection.HTTP_OK != urlc.getResponseCode()) { + fatal("Solr returned an error #" + urlc.getResponseCode() + + " " + urlc.getResponseMessage()); + } + + in = urlc.getInputStream(); + pipe(in, output); + } catch (IOException e) { + fatal("IOException while reading response: " + e); + } finally { + try { if(in!=null) in.close(); } catch (IOException x) { /*NOOP*/ } + } + } finally { if(urlc!=null) urlc.disconnect(); } } - /** - * Pipes everything from the reader to the writer via a buffer - */ - private static void pipe(Reader reader, Writer writer) throws IOException { - char[] buf = new char[1024]; - int read = 0; - while ( (read = reader.read(buf) ) >= 0) { - writer.write(buf, 0, read); + private static InputStream stringToStream(String s) { + InputStream is = null; + try { + is = new ByteArrayInputStream(s.getBytes("UTF-8")); + } catch (UnsupportedEncodingException e) { + fatal("Shouldn't happen: UTF-8 not supported?!?!?!"); } - writer.flush(); + return is; + } + + /** + * Pipes everything from the source to the dest. If dest is null, + * then everything is read fro msource and thrown away. + */ + private static void pipe(InputStream source, OutputStream dest) throws IOException { + byte[] buf = new byte[1024]; + int read = 0; + while ( (read = source.read(buf) ) >= 0) { + if (null != dest) dest.write(buf, 0, read); + } + if (null != dest) dest.flush(); } } From 684281a9171287bf11f73058b4987b27a71acd1c Mon Sep 17 00:00:00 2001 From: "Chris M. Hostetter" Date: Mon, 7 Feb 2011 21:41:18 +0000 Subject: [PATCH 110/185] SOLR-2350: forgot attribution in CHANGES.txt git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1068151 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 13960ca4760..7beb1d35d61 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -205,7 +205,7 @@ Other Changes * SOLR-2350: Since Solr no longer requires XML files to be in UTF-8 (see SOLR-96) SimplePostTool (aka: post.jar) has been improved to - work with files of any mime-type or charset. + work with files of any mime-type or charset. (hossman) Documentation ---------------------- From ebfa92f6dbe8a37384ae713e256804901790cfb8 Mon Sep 17 00:00:00 2001 From: "Chris M. Hostetter" Date: Mon, 7 Feb 2011 23:18:20 +0000 Subject: [PATCH 111/185] SOLR-96: followup fix to post.sh git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1068214 13f79535-47bb-0310-9956-ffa450edef68 --- solr/example/exampledocs/post.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/solr/example/exampledocs/post.sh b/solr/example/exampledocs/post.sh index ee5fdbe9f0d..d9dd4ed718e 100755 --- a/solr/example/exampledocs/post.sh +++ b/solr/example/exampledocs/post.sh @@ -19,10 +19,10 @@ URL=http://localhost:8983/solr/update for f in $FILES; do echo Posting file $f to $URL - curl $URL --data-binary @$f -H 'Content-type:text/xml; charset=utf-8' + curl $URL --data-binary @$f -H 'Content-type:application/xml' echo done #send the commit command to make sure all the changes are flushed and visible -curl $URL --data-binary '' -H 'Content-type:text/xml; charset=utf-8' +curl $URL --data-binary '' -H 'Content-type:application/xml' echo From c9c569476544283264b8d88931486579113fdda5 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Tue, 8 Feb 2011 13:42:16 +0000 Subject: [PATCH 112/185] add test case for deletes git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1068387 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/lucene/index/TestIndexWriter.java | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java index c668c6a809f..298fdcf9e8d 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java @@ -2874,4 +2874,36 @@ public class TestIndexWriter extends LuceneTestCase { dir.close(); } + + public void testDeleteAllSlowly() throws Exception { + final Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random, dir); + final int NUM_DOCS = 1000 * RANDOM_MULTIPLIER; + final List ids = new ArrayList(NUM_DOCS); + for(int id=0;id Date: Tue, 8 Feb 2011 19:05:28 +0000 Subject: [PATCH 113/185] LUCENE-2908: clean up serialization in the codebase git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1068526 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/CHANGES.txt | 4 ++ .../store/instantiated/FieldSetting.java | 4 +- .../store/instantiated/FieldSettings.java | 3 +- .../instantiated/InstantiatedDocument.java | 6 +- .../store/instantiated/InstantiatedIndex.java | 5 +- .../store/instantiated/InstantiatedTerm.java | 6 +- .../InstantiatedTermDocumentInformation.java | 6 +- .../InstantiatedTermFreqVector.java | 5 +- .../InstantiatedTermPositionVector.java | 6 +- .../store/instantiated/TestSerialization.java | 56 ------------------- .../lucene/index/memory/MemoryIndex.java | 13 +---- .../apache/lucene/search/FilterClause.java | 2 +- .../search/regex/RegexCapabilities.java | 4 +- .../queryParser/core/QueryNodeError.java | 2 - .../queryParser/core/QueryNodeException.java | 2 - .../core/QueryNodeParseException.java | 2 - .../queryParser/core/nodes/AndQueryNode.java | 2 - .../queryParser/core/nodes/AnyQueryNode.java | 2 - .../core/nodes/BooleanQueryNode.java | 2 - .../core/nodes/BoostQueryNode.java | 2 - .../core/nodes/DeletedQueryNode.java | 2 - .../core/nodes/FieldQueryNode.java | 2 - .../core/nodes/FuzzyQueryNode.java | 2 - .../core/nodes/GroupQueryNode.java | 2 - .../core/nodes/MatchAllDocsQueryNode.java | 2 - .../core/nodes/MatchNoDocsQueryNode.java | 2 - .../core/nodes/ModifierQueryNode.java | 2 - .../core/nodes/NoTokenFoundQueryNode.java | 2 - .../core/nodes/OpaqueQueryNode.java | 2 - .../queryParser/core/nodes/OrQueryNode.java | 2 - .../core/nodes/ParametricQueryNode.java | 2 - .../core/nodes/ParametricRangeQueryNode.java | 2 - .../queryParser/core/nodes/PathQueryNode.java | 2 - .../core/nodes/PhraseSlopQueryNode.java | 2 - .../core/nodes/ProximityQueryNode.java | 2 - .../queryParser/core/nodes/QueryNode.java | 3 +- .../queryParser/core/nodes/QueryNodeImpl.java | 2 - .../core/nodes/QuotedFieldQueryNode.java | 2 - .../queryParser/core/nodes/SlopQueryNode.java | 2 - .../core/nodes/TokenizedPhraseQueryNode.java | 2 - .../queryParser/core/nodes/package.html | 2 +- .../processors/QueryNodeProcessorImpl.java | 2 - .../AllowLeadingWildcardAttributeImpl.java | 2 - .../config/AnalyzerAttributeImpl.java | 2 - .../standard/config/BoostAttributeImpl.java | 2 - .../config/DateResolutionAttributeImpl.java | 2 - .../config/DefaultOperatorAttributeImpl.java | 2 - .../DefaultPhraseSlopAttributeImpl.java | 2 - .../config/FieldBoostMapAttributeImpl.java | 2 - .../config/FieldBoostMapFCListener.java | 2 - .../config/FieldDateResolutionFCListener.java | 2 - .../FieldDateResolutionMapAttributeImpl.java | 2 - .../standard/config/FuzzyAttributeImpl.java | 2 - .../standard/config/LocaleAttributeImpl.java | 2 - .../LowercaseExpandedTermsAttributeImpl.java | 2 - .../config/MultiFieldAttributeImpl.java | 2 - .../MultiTermRewriteMethodAttributeImpl.java | 2 - .../PositionIncrementsAttributeImpl.java | 2 - .../config/RangeCollatorAttributeImpl.java | 2 - .../standard/nodes/BooleanModifierNode.java | 2 - .../standard/nodes/MultiPhraseQueryNode.java | 2 - .../nodes/PrefixWildcardQueryNode.java | 2 - .../standard/nodes/RangeQueryNode.java | 2 - .../standard/nodes/RegexpQueryNode.java | 1 - .../nodes/StandardBooleanQueryNode.java | 2 - .../standard/nodes/WildcardQueryNode.java | 1 - .../spans/UniqueFieldAttributeImpl.java | 2 - .../geohash/GeoHashDistanceFilter.java | 5 -- .../tier/DistanceFieldComparatorSource.java | 2 - .../lucene/spatial/tier/DistanceFilter.java | 8 --- .../spatial/tier/DistanceQueryBuilder.java | 2 - .../spatial/tier/InvalidGeoException.java | 5 -- .../spatial/tier/LatLongDistanceFilter.java | 5 -- .../org/apache/lucene/spatial/tier/Shape.java | 3 +- .../tier/TestCartesianShapeFilter.java | 45 --------------- .../builders/NumericRangeFilterBuilder.java | 1 - .../CharTermAttributeImpl.java | 3 +- .../tokenattributes/FlagsAttributeImpl.java | 4 +- .../tokenattributes/OffsetAttributeImpl.java | 4 +- .../tokenattributes/PayloadAttributeImpl.java | 4 +- .../PositionIncrementAttributeImpl.java | 4 +- .../tokenattributes/TypeAttributeImpl.java | 4 +- .../org/apache/lucene/document/Document.java | 2 +- .../org/apache/lucene/document/Field.java | 3 +- .../apache/lucene/document/FieldSelector.java | 3 +- .../org/apache/lucene/document/Fieldable.java | 3 +- .../java/org/apache/lucene/index/Payload.java | 4 +- .../java/org/apache/lucene/index/Term.java | 9 +-- .../lucene/index/TermVectorOffsetInfo.java | 4 +- .../org/apache/lucene/messages/Message.java | 3 +- .../apache/lucene/messages/MessageImpl.java | 2 - .../apache/lucene/search/AutomatonQuery.java | 3 +- .../apache/lucene/search/BooleanClause.java | 2 +- .../lucene/search/CachingWrapperFilter.java | 3 +- .../org/apache/lucene/search/Explanation.java | 5 +- .../org/apache/lucene/search/FieldCache.java | 3 +- .../lucene/search/FieldComparatorSource.java | 3 +- .../java/org/apache/lucene/search/Filter.java | 2 +- .../apache/lucene/search/MultiTermQuery.java | 13 +---- .../java/org/apache/lucene/search/Query.java | 2 +- .../org/apache/lucene/search/ScoreDoc.java | 2 +- .../org/apache/lucene/search/Similarity.java | 3 +- .../java/org/apache/lucene/search/Sort.java | 4 +- .../org/apache/lucene/search/SortField.java | 11 +--- .../org/apache/lucene/search/TopDocs.java | 2 +- .../java/org/apache/lucene/search/Weight.java | 3 +- .../lucene/search/cache/EntryCreator.java | 3 +- .../lucene/search/function/ValueSource.java | 3 +- .../search/payloads/PayloadFunction.java | 3 +- .../org/apache/lucene/store/RAMDirectory.java | 6 +- .../java/org/apache/lucene/store/RAMFile.java | 6 +- .../org/apache/lucene/util/AttributeImpl.java | 3 +- .../java/org/apache/lucene/util/BytesRef.java | 24 +------- .../org/apache/lucene/util/MapBackedSet.java | 6 +- .../org/apache/lucene/util/OpenBitSet.java | 3 +- .../lucene/util/automaton/Automaton.java | 3 +- .../lucene/util/automaton/RunAutomaton.java | 4 +- .../apache/lucene/util/automaton/State.java | 3 +- .../lucene/util/automaton/Transition.java | 3 +- .../org/apache/lucene/search/QueryUtils.java | 31 ---------- .../lucene/search/TestCustomSearcherSort.java | 3 +- .../org/apache/lucene/search/TestSort.java | 3 +- .../apache/lucene/store/TestRAMDirectory.java | 12 ---- .../compound/hyphenation/ByteVector.java | 4 +- .../compound/hyphenation/CharVector.java | 4 +- .../analysis/compound/hyphenation/Hyphen.java | 4 +- .../compound/hyphenation/HyphenationTree.java | 6 +- .../compound/hyphenation/TernaryTree.java | 3 +- .../tokenattributes/ScriptAttributeImpl.java | 4 +- 129 files changed, 71 insertions(+), 493 deletions(-) delete mode 100644 lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestSerialization.java delete mode 100644 lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesianShapeFilter.java diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index bdd4940c487..b41106f77b5 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -118,6 +118,10 @@ Changes in backwards compatibility policy ParallelMultiSearcher into IndexSearcher as an optional ExecutorServiced passed to its ctor. (Mike McCandless) +* LUCENE-2908: Removed serialization code from lucene classes. It is recommended + that you serialize user search needs at a higher level in your application. + (Robert Muir) + * LUCENE-2831: Changed Weight#scorer, Weight#explain & Filter#getDocIdSet to operate on a AtomicReaderContext instead of directly on IndexReader to enable searches to be aware of IndexSearcher's context. (Simon Willnauer) diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/FieldSetting.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/FieldSetting.java index 50435f3ee05..bd2441b245c 100644 --- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/FieldSetting.java +++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/FieldSetting.java @@ -1,7 +1,5 @@ package org.apache.lucene.store.instantiated; -import java.io.Serializable; - /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -22,7 +20,7 @@ import java.io.Serializable; /** * For non package access see {@link org.apache.lucene.index.IndexReader#getFieldNames(org.apache.lucene.index.IndexReader.FieldOption)} */ -class FieldSetting implements Serializable { +class FieldSetting { String fieldName; boolean storeTermVector = false; diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/FieldSettings.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/FieldSettings.java index 2eb9c72309a..4b0662c7323 100644 --- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/FieldSettings.java +++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/FieldSettings.java @@ -3,7 +3,6 @@ package org.apache.lucene.store.instantiated; import java.util.HashMap; import java.util.Map; import java.util.Collection; -import java.io.Serializable; /** * Licensed to the Apache Software Foundation (ASF) under one or more @@ -25,7 +24,7 @@ import java.io.Serializable; /** * Essentially a Map */ -class FieldSettings implements Serializable { +class FieldSettings { FieldSettings() { diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java index 5154f267e2b..889f749d47e 100644 --- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java +++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java @@ -18,7 +18,6 @@ package org.apache.lucene.store.instantiated; import org.apache.lucene.document.Document; -import java.io.Serializable; import java.util.List; import java.util.Map; @@ -27,10 +26,7 @@ import java.util.Map; * * @see org.apache.lucene.document.Document */ -public class InstantiatedDocument - implements Serializable { - - private static final long serialVersionUID = 1l; +public class InstantiatedDocument { private Document document; diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java index 76faa4602bd..b98f3cb337e 100644 --- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java +++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java @@ -18,7 +18,6 @@ package org.apache.lucene.store.instantiated; import java.io.Closeable; import java.io.IOException; -import java.io.Serializable; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -60,9 +59,7 @@ import org.apache.lucene.util.BytesRef; * Consider using InstantiatedIndex as if it was immutable. */ public class InstantiatedIndex - implements Serializable,Closeable { - - private static final long serialVersionUID = 1l; + implements Closeable { private long version = System.currentTimeMillis(); diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTerm.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTerm.java index cdd2197c89c..67e7f7805fa 100644 --- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTerm.java +++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTerm.java @@ -16,7 +16,6 @@ package org.apache.lucene.store.instantiated; * limitations under the License. */ -import java.io.Serializable; import java.util.Comparator; import org.apache.lucene.index.Term; @@ -26,10 +25,7 @@ import org.apache.lucene.index.Term; * * @see org.apache.lucene.index.Term */ -public class InstantiatedTerm - implements Serializable { - - private static final long serialVersionUID = 1l; +public class InstantiatedTerm { public static final Comparator comparator = new Comparator() { public int compare(InstantiatedTerm instantiatedTerm, InstantiatedTerm instantiatedTerm1) { diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermDocumentInformation.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermDocumentInformation.java index 843eb08e140..c8d9138ac6a 100644 --- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermDocumentInformation.java +++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermDocumentInformation.java @@ -2,7 +2,6 @@ package org.apache.lucene.store.instantiated; import org.apache.lucene.index.TermVectorOffsetInfo; -import java.io.Serializable; import java.util.Comparator; /** @@ -38,10 +37,7 @@ import java.util.Comparator; *
      * */ -public class InstantiatedTermDocumentInformation - implements Serializable { - - private static final long serialVersionUID = 1l; +public class InstantiatedTermDocumentInformation { public static final Comparator termComparator = new Comparator() { public int compare(InstantiatedTermDocumentInformation instantiatedTermDocumentInformation, InstantiatedTermDocumentInformation instantiatedTermDocumentInformation1) { diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermFreqVector.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermFreqVector.java index e688b6fe6e7..dcbc49185d9 100644 --- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermFreqVector.java +++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermFreqVector.java @@ -3,7 +3,6 @@ package org.apache.lucene.store.instantiated; import org.apache.lucene.index.TermFreqVector; import org.apache.lucene.util.BytesRef; -import java.io.Serializable; import java.util.Arrays; import java.util.List; @@ -29,9 +28,7 @@ import java.util.List; * @see org.apache.lucene.index.TermFreqVector */ public class InstantiatedTermFreqVector - implements TermFreqVector, Serializable { - - private static final long serialVersionUID = 1l; + implements TermFreqVector { private final List termDocumentInformations; private final String field; diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermPositionVector.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermPositionVector.java index 56d8e02a2ed..8d40063ed25 100644 --- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermPositionVector.java +++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermPositionVector.java @@ -19,8 +19,6 @@ package org.apache.lucene.store.instantiated; import org.apache.lucene.index.TermPositionVector; import org.apache.lucene.index.TermVectorOffsetInfo; -import java.io.Serializable; - /** * Extended vector space view of a document in an {@link InstantiatedIndexReader}. * @@ -28,9 +26,7 @@ import java.io.Serializable; */ public class InstantiatedTermPositionVector extends InstantiatedTermFreqVector - implements TermPositionVector, Serializable { - - private static final long serialVersionUID = 1l; + implements TermPositionVector { public InstantiatedTermPositionVector(InstantiatedDocument document, String field) { super(document, field); diff --git a/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestSerialization.java b/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestSerialization.java deleted file mode 100644 index 49dc1604157..00000000000 --- a/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestSerialization.java +++ /dev/null @@ -1,56 +0,0 @@ -package org.apache.lucene.store.instantiated; - -/** - * Copyright 2006 The Apache Software Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -import org.apache.lucene.store.Directory; -import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; - -import java.io.ByteArrayOutputStream; -import java.io.ObjectOutputStream; - -public class TestSerialization extends LuceneTestCase { - - public void test() throws Exception { - Directory dir = newDirectory(); - - IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); - Document doc = new Document(); - doc.add(new Field("foo", "bar rab abr bra rba", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - doc.add(new Field("moo", "bar rab abr bra rba", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - iw.addDocument(doc); - iw.close(); - - IndexReader ir = IndexReader.open(dir, false); - InstantiatedIndex ii = new InstantiatedIndex(ir); - ir.close(); - - ByteArrayOutputStream baos = new ByteArrayOutputStream(5000); - ObjectOutputStream oos = new ObjectOutputStream(baos); - oos.writeObject(ii); - oos.close(); - baos.close(); - dir.close(); - - } - -} diff --git a/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java index 437d313b9c9..4ffac05bcf4 100644 --- a/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java +++ b/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java @@ -18,7 +18,6 @@ package org.apache.lucene.index.memory; */ import java.io.IOException; -import java.io.Serializable; import java.io.StringReader; import java.util.Arrays; import java.util.Collection; @@ -186,7 +185,7 @@ import org.apache.lucene.util.Constants; // for javadocs * hprof tracing ). * */ -public class MemoryIndex implements Serializable { +public class MemoryIndex { /** info for each field: Map */ private final HashMap fields = new HashMap(); @@ -199,8 +198,6 @@ public class MemoryIndex implements Serializable { /** Could be made configurable; See {@link Document#setBoost(float)} */ private static final float docBoost = 1.0f; - - private static final long serialVersionUID = 2782195016849084649L; private static final boolean DEBUG = false; @@ -589,7 +586,7 @@ public class MemoryIndex implements Serializable { * Index data structure for a field; Contains the tokenized term texts and * their positions. */ - private static final class Info implements Serializable { + private static final class Info { /** * Term strings and their positions for this field: Map terms, int numTokens, int numOverlapTokens, float boost) { this.terms = terms; this.numTokens = numTokens; @@ -668,12 +663,10 @@ public class MemoryIndex implements Serializable { * Efficient resizable auto-expanding list holding int elements; * implemented with arrays. */ - private static final class ArrayIntList implements Serializable { + private static final class ArrayIntList { private int[] elements; private int size = 0; - - private static final long serialVersionUID = 2282195016849084649L; public ArrayIntList() { this(10); diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/FilterClause.java b/lucene/contrib/queries/src/java/org/apache/lucene/search/FilterClause.java index d816066a31b..26937ab6e0b 100644 --- a/lucene/contrib/queries/src/java/org/apache/lucene/search/FilterClause.java +++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/FilterClause.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.BooleanClause.Occur; * of queries.) */ -public class FilterClause implements java.io.Serializable +public class FilterClause { Occur occur = null; Filter filter = null; diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/RegexCapabilities.java b/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/RegexCapabilities.java index e8c6daed713..c14053c0db3 100644 --- a/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/RegexCapabilities.java +++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/RegexCapabilities.java @@ -1,7 +1,5 @@ package org.apache.lucene.search.regex; -import java.io.Serializable; - import org.apache.lucene.util.BytesRef; /** @@ -25,7 +23,7 @@ import org.apache.lucene.util.BytesRef; * Defines basic operations needed by {@link RegexQuery} for a regular * expression implementation. */ -public interface RegexCapabilities extends Serializable { +public interface RegexCapabilities { /** * Called by the constructor of {@link RegexTermsEnum} allowing * implementations to cache a compiled version of the regular diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/QueryNodeError.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/QueryNodeError.java index 95b25722d1f..cb3e77e25c6 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/QueryNodeError.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/QueryNodeError.java @@ -27,8 +27,6 @@ import org.apache.lucene.messages.NLSException; * @see org.apache.lucene.messages.Message */ public class QueryNodeError extends Error implements NLSException { - - private static final long serialVersionUID = 1804855832182710327L; private Message message; /** diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/QueryNodeException.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/QueryNodeException.java index 9a7223b6d3f..22db1d5b37a 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/QueryNodeException.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/QueryNodeException.java @@ -42,8 +42,6 @@ import org.apache.lucene.queryParser.core.nodes.QueryNode; */ public class QueryNodeException extends Exception implements NLSException { - private static final long serialVersionUID = -5962648855261624214L; - protected Message message = new MessageImpl(QueryParserMessages.EMPTY_MESSAGE); public QueryNodeException(Message message) { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/QueryNodeParseException.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/QueryNodeParseException.java index c60f657b140..a628574eaf4 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/QueryNodeParseException.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/QueryNodeParseException.java @@ -33,8 +33,6 @@ import org.apache.lucene.queryParser.core.parser.SyntaxParser; */ public class QueryNodeParseException extends QueryNodeException { - private static final long serialVersionUID = 8197535103538766773L; - private CharSequence query; private int beginColumn = -1; diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/AndQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/AndQueryNode.java index 5cdd2f752b1..421be88ee33 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/AndQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/AndQueryNode.java @@ -27,8 +27,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; */ public class AndQueryNode extends BooleanQueryNode { - private static final long serialVersionUID = 118496077529151825L; - /** * @param clauses * - the query nodes to be and'ed diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/AnyQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/AnyQueryNode.java index 80818d8888f..cd6d7e25395 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/AnyQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/AnyQueryNode.java @@ -26,8 +26,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; * nodes. */ public class AnyQueryNode extends AndQueryNode { - private static final long serialVersionUID = 1000791433562954187L; - private CharSequence field = null; private int minimumMatchingmElements = 0; diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/BooleanQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/BooleanQueryNode.java index 8a1832566fe..56cd2203cda 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/BooleanQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/BooleanQueryNode.java @@ -28,8 +28,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; */ public class BooleanQueryNode extends QueryNodeImpl { - private static final long serialVersionUID = -2206623652088638072L; - /** * @param clauses * - the query nodes to be and'ed diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/BoostQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/BoostQueryNode.java index b337be4cb8d..cd8620d782b 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/BoostQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/BoostQueryNode.java @@ -34,8 +34,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; */ public class BoostQueryNode extends QueryNodeImpl { - private static final long serialVersionUID = -3929082630855807593L; - private float value = 0; /** diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/DeletedQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/DeletedQueryNode.java index daa7d69ae74..cb859f65c7e 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/DeletedQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/DeletedQueryNode.java @@ -27,8 +27,6 @@ import org.apache.lucene.queryParser.core.processors.RemoveDeletedQueryNodesProc */ public class DeletedQueryNode extends QueryNodeImpl { - private static final long serialVersionUID = -9151675506000425293L; - public DeletedQueryNode() { // empty constructor } diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/FieldQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/FieldQueryNode.java index f39016c37e6..33614977285 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/FieldQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/FieldQueryNode.java @@ -28,8 +28,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax.Type; public class FieldQueryNode extends QueryNodeImpl implements TextableQueryNode, FieldableNode { - private static final long serialVersionUID = 3634521145130758265L; - /** * The term's field */ diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/FuzzyQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/FuzzyQueryNode.java index c3e4ade6384..e86b3ca0637 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/FuzzyQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/FuzzyQueryNode.java @@ -25,8 +25,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; */ public class FuzzyQueryNode extends FieldQueryNode { - private static final long serialVersionUID = -1794537213032589441L; - private float similarity; private int prefixLength; diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/GroupQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/GroupQueryNode.java index 60e1f14051f..dcf1788fa43 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/GroupQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/GroupQueryNode.java @@ -34,8 +34,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; */ public class GroupQueryNode extends QueryNodeImpl { - private static final long serialVersionUID = -9204673493869114999L; - /** * This QueryNode is used to identify parenthesis on the original query string */ diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/MatchAllDocsQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/MatchAllDocsQueryNode.java index fea22340259..45507250c48 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/MatchAllDocsQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/MatchAllDocsQueryNode.java @@ -25,8 +25,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; */ public class MatchAllDocsQueryNode extends QueryNodeImpl { - private static final long serialVersionUID = -7050381275423477809L; - public MatchAllDocsQueryNode() { // empty constructor } diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/MatchNoDocsQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/MatchNoDocsQueryNode.java index f7ce910f4c6..c6147f3450c 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/MatchNoDocsQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/MatchNoDocsQueryNode.java @@ -24,8 +24,6 @@ package org.apache.lucene.queryParser.core.nodes; */ public class MatchNoDocsQueryNode extends DeletedQueryNode { - private static final long serialVersionUID = 8081805751679581497L; - public MatchNoDocsQueryNode() { // empty constructor } diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/ModifierQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/ModifierQueryNode.java index 829f53f8bd5..cd35224bd81 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/ModifierQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/ModifierQueryNode.java @@ -36,8 +36,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; */ public class ModifierQueryNode extends QueryNodeImpl { - private static final long serialVersionUID = -391209837953928169L; - public enum Modifier { MOD_NONE, MOD_NOT, MOD_REQ; diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/NoTokenFoundQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/NoTokenFoundQueryNode.java index 057f6af288b..19083fe8682 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/NoTokenFoundQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/NoTokenFoundQueryNode.java @@ -25,8 +25,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; */ public class NoTokenFoundQueryNode extends DeletedQueryNode { - private static final long serialVersionUID = 7332975497586993833L; - public NoTokenFoundQueryNode() { super(); } diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/OpaqueQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/OpaqueQueryNode.java index 5ca0b72fc48..68ad4d4fafd 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/OpaqueQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/OpaqueQueryNode.java @@ -26,8 +26,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; */ public class OpaqueQueryNode extends QueryNodeImpl { - private static final long serialVersionUID = 0L; - private CharSequence schema = null; private CharSequence value = null; diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/OrQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/OrQueryNode.java index e93c18e8269..eeb58695b41 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/OrQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/OrQueryNode.java @@ -29,8 +29,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; */ public class OrQueryNode extends BooleanQueryNode { - private static final long serialVersionUID = -3692323307688017852L; - /** * @param clauses * - the query nodes to be or'ed diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/ParametricQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/ParametricQueryNode.java index 8ccebf652ee..ce5cff6b819 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/ParametricQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/ParametricQueryNode.java @@ -25,8 +25,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; */ public class ParametricQueryNode extends FieldQueryNode { - private static final long serialVersionUID = -5770038129741218116L; - private CompareOperator operator; public enum CompareOperator { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/ParametricRangeQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/ParametricRangeQueryNode.java index 240d6010951..4580d84ed2d 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/ParametricRangeQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/ParametricRangeQueryNode.java @@ -29,8 +29,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; public class ParametricRangeQueryNode extends QueryNodeImpl implements FieldableNode { - private static final long serialVersionUID = 7120958816535573935L; - public ParametricRangeQueryNode(ParametricQueryNode lowerBound, ParametricQueryNode upperBound) { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/PathQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/PathQueryNode.java index 7364cbd2609..dfa3f7a775f 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/PathQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/PathQueryNode.java @@ -41,8 +41,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax.Type; */ public class PathQueryNode extends QueryNodeImpl { - private static final long serialVersionUID = -8325921322405804789L; - public static class QueryText implements Cloneable { CharSequence value = null; /** diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/PhraseSlopQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/PhraseSlopQueryNode.java index 882c29f7912..46561f1afe5 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/PhraseSlopQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/PhraseSlopQueryNode.java @@ -26,8 +26,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; public class PhraseSlopQueryNode extends QueryNodeImpl implements FieldableNode { - private static final long serialVersionUID = 0L; - private int value = 0; /** diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/ProximityQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/ProximityQueryNode.java index b100b1aceb0..80fbcde5828 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/ProximityQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/ProximityQueryNode.java @@ -33,8 +33,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; */ public class ProximityQueryNode extends BooleanQueryNode { - private static final long serialVersionUID = 9018220596680832916L; - public enum Type { PARAGRAPH { @Override diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QueryNode.java index fac89f59f10..dcb980562e7 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QueryNode.java @@ -17,7 +17,6 @@ package org.apache.lucene.queryParser.core.nodes; * limitations under the License. */ -import java.io.Serializable; import java.util.List; import java.util.Map; @@ -27,7 +26,7 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; * A {@link QueryNode} is a interface implemented by all nodes on a QueryNode * tree. */ -public interface QueryNode extends Serializable { +public interface QueryNode { /** convert to a query string understood by the query parser */ // TODO: this interface might be changed in the future diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QueryNodeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QueryNodeImpl.java index 6b48cabb869..745d8f1529c 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QueryNodeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QueryNodeImpl.java @@ -33,8 +33,6 @@ import org.apache.lucene.queryParser.core.util.StringUtils; */ public abstract class QueryNodeImpl implements QueryNode, Cloneable { - private static final long serialVersionUID = 5569870883474845989L; - /* index default field */ // TODO remove PLAINTEXT_FIELD_NAME replacing it with configuration APIs public static final String PLAINTEXT_FIELD_NAME = "_plain"; diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QuotedFieldQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QuotedFieldQueryNode.java index 3daf42cbd23..7c39f57abad 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QuotedFieldQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/QuotedFieldQueryNode.java @@ -25,8 +25,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; */ public class QuotedFieldQueryNode extends FieldQueryNode { - private static final long serialVersionUID = -6675157780051428987L; - /** * @param field * - field name diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/SlopQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/SlopQueryNode.java index 6b83580b6ed..fe87560025e 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/SlopQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/SlopQueryNode.java @@ -34,8 +34,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; */ public class SlopQueryNode extends QueryNodeImpl implements FieldableNode { - private static final long serialVersionUID = 0L; - private int value = 0; /** diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/TokenizedPhraseQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/TokenizedPhraseQueryNode.java index d90c17ed489..0529d73a8dc 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/TokenizedPhraseQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/TokenizedPhraseQueryNode.java @@ -28,8 +28,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; public class TokenizedPhraseQueryNode extends QueryNodeImpl implements FieldableNode { - private static final long serialVersionUID = -7185108320787917541L; - public TokenizedPhraseQueryNode() { setLeaf(false); allocate(); diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/package.html b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/package.html index a03738e3f21..6d9b507d265 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/package.html +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/nodes/package.html @@ -26,7 +26,7 @@ Contains query nodes that are commonly used by query parser implementations

      Query Nodes

      The package org.apache.lucene.queryParser.nodes contains all the basic query nodes. The interface -that represents a query node is {@link org.apache.lucene.queryParser.core.nodes.QueryNode}. Every query node must be serializable. +that represents a query node is {@link org.apache.lucene.queryParser.core.nodes.QueryNode}.

      {@link org.apache.lucene.queryParser.core.nodes.QueryNode}s are used by the text parser to create a syntax tree. diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/processors/QueryNodeProcessorImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/processors/QueryNodeProcessorImpl.java index 97bedd043e5..3a9d9bb0c1c 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/processors/QueryNodeProcessorImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/processors/QueryNodeProcessorImpl.java @@ -244,8 +244,6 @@ public abstract class QueryNodeProcessorImpl implements QueryNodeProcessor { private static class ChildrenList extends ArrayList { - private static final long serialVersionUID = -2613518456949297135L; - boolean beingUsed; } diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/AllowLeadingWildcardAttributeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/AllowLeadingWildcardAttributeImpl.java index c4f167bf8e4..97427870157 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/AllowLeadingWildcardAttributeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/AllowLeadingWildcardAttributeImpl.java @@ -31,8 +31,6 @@ import org.apache.lucene.util.AttributeImpl; public class AllowLeadingWildcardAttributeImpl extends AttributeImpl implements AllowLeadingWildcardAttribute { - private static final long serialVersionUID = -2804763012723049527L; - private boolean allowLeadingWildcard = false; // default in 2.9 public void setAllowLeadingWildcard(boolean allowLeadingWildcard) { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/AnalyzerAttributeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/AnalyzerAttributeImpl.java index a9185bba491..d6f588c4135 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/AnalyzerAttributeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/AnalyzerAttributeImpl.java @@ -33,8 +33,6 @@ import org.apache.lucene.util.AttributeImpl; public class AnalyzerAttributeImpl extends AttributeImpl implements AnalyzerAttribute { - private static final long serialVersionUID = -6804760312723049526L; - private Analyzer analyzer; public AnalyzerAttributeImpl() { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/BoostAttributeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/BoostAttributeImpl.java index fbcc46432c7..7ec16cb23df 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/BoostAttributeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/BoostAttributeImpl.java @@ -33,8 +33,6 @@ import org.apache.lucene.util.AttributeImpl; public class BoostAttributeImpl extends AttributeImpl implements BoostAttribute { - private static final long serialVersionUID = -2104763012523049527L; - private float boost = 1.0f; public BoostAttributeImpl() { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DateResolutionAttributeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DateResolutionAttributeImpl.java index 621d90718e9..42b29d93235 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DateResolutionAttributeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DateResolutionAttributeImpl.java @@ -33,8 +33,6 @@ import org.apache.lucene.util.AttributeImpl; public class DateResolutionAttributeImpl extends AttributeImpl implements DateResolutionAttribute { - private static final long serialVersionUID = -6804360312723049526L; - private DateTools.Resolution dateResolution = null; public DateResolutionAttributeImpl() { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DefaultOperatorAttributeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DefaultOperatorAttributeImpl.java index e99022da5a8..0899cc791a6 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DefaultOperatorAttributeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DefaultOperatorAttributeImpl.java @@ -32,8 +32,6 @@ import org.apache.lucene.util.AttributeImpl; public class DefaultOperatorAttributeImpl extends AttributeImpl implements DefaultOperatorAttribute { - private static final long serialVersionUID = -6804760312723049526L; - private Operator operator = Operator.OR; public DefaultOperatorAttributeImpl() { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DefaultPhraseSlopAttributeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DefaultPhraseSlopAttributeImpl.java index 1f0ee2bf6c7..b457777eef6 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DefaultPhraseSlopAttributeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DefaultPhraseSlopAttributeImpl.java @@ -32,8 +32,6 @@ import org.apache.lucene.util.AttributeImpl; public class DefaultPhraseSlopAttributeImpl extends AttributeImpl implements DefaultPhraseSlopAttribute { - private static final long serialVersionUID = -2104763012527049527L; - private int defaultPhraseSlop = 0; public DefaultPhraseSlopAttributeImpl() { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapAttributeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapAttributeImpl.java index debcc563450..852e931d188 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapAttributeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapAttributeImpl.java @@ -36,8 +36,6 @@ import org.apache.lucene.util.AttributeImpl; public class FieldBoostMapAttributeImpl extends AttributeImpl implements FieldBoostMapAttribute { - private static final long serialVersionUID = -2104763012523049527L; - private Map boosts = new LinkedHashMap(); diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapFCListener.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapFCListener.java index ab93ee9d531..fc4307aea7c 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapFCListener.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapFCListener.java @@ -34,8 +34,6 @@ import org.apache.lucene.queryParser.core.config.QueryConfigHandler; */ public class FieldBoostMapFCListener implements FieldConfigListener { - private static final long serialVersionUID = -5929802948798314067L; - private QueryConfigHandler config = null; public FieldBoostMapFCListener(QueryConfigHandler config) { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionFCListener.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionFCListener.java index 0bdb9abc8ac..8afe94aaf60 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionFCListener.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionFCListener.java @@ -36,8 +36,6 @@ import org.apache.lucene.queryParser.core.config.QueryConfigHandler; */ public class FieldDateResolutionFCListener implements FieldConfigListener { - private static final long serialVersionUID = -5929802948798314067L; - private QueryConfigHandler config = null; public FieldDateResolutionFCListener(QueryConfigHandler config) { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java index ae9015e3f0f..f46efc4af99 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java @@ -33,8 +33,6 @@ import org.apache.lucene.util.AttributeImpl; public class FieldDateResolutionMapAttributeImpl extends AttributeImpl implements FieldDateResolutionMapAttribute { - private static final long serialVersionUID = -2104763012523049527L; - private Map dateRes = new HashMap(); diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FuzzyAttributeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FuzzyAttributeImpl.java index 4cb85c4a6e3..4a6163400bd 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FuzzyAttributeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FuzzyAttributeImpl.java @@ -33,8 +33,6 @@ import org.apache.lucene.util.AttributeImpl; public class FuzzyAttributeImpl extends AttributeImpl implements FuzzyAttribute { - private static final long serialVersionUID = -2104763012527049527L; - private int prefixLength = FuzzyQuery.defaultPrefixLength; private float minSimilarity = FuzzyQuery.defaultMinSimilarity; diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/LocaleAttributeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/LocaleAttributeImpl.java index 495ab2a678b..e97595da1e1 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/LocaleAttributeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/LocaleAttributeImpl.java @@ -33,8 +33,6 @@ import org.apache.lucene.util.AttributeImpl; public class LocaleAttributeImpl extends AttributeImpl implements LocaleAttribute { - private static final long serialVersionUID = -6804760312720049526L; - private Locale locale = Locale.getDefault(); public LocaleAttributeImpl() { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/LowercaseExpandedTermsAttributeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/LowercaseExpandedTermsAttributeImpl.java index a18c8e28e32..ea85911a1a6 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/LowercaseExpandedTermsAttributeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/LowercaseExpandedTermsAttributeImpl.java @@ -33,8 +33,6 @@ import org.apache.lucene.util.AttributeImpl; public class LowercaseExpandedTermsAttributeImpl extends AttributeImpl implements LowercaseExpandedTermsAttribute { - private static final long serialVersionUID = -2804760312723049527L; - private boolean lowercaseExpandedTerms = true; public LowercaseExpandedTermsAttributeImpl() { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/MultiFieldAttributeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/MultiFieldAttributeImpl.java index 7890a0148e7..8ffeb6dc0d3 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/MultiFieldAttributeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/MultiFieldAttributeImpl.java @@ -33,8 +33,6 @@ import org.apache.lucene.util.AttributeImpl; public class MultiFieldAttributeImpl extends AttributeImpl implements MultiFieldAttribute { - private static final long serialVersionUID = -6809760312720049526L; - private CharSequence[] fields; public MultiFieldAttributeImpl() { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/MultiTermRewriteMethodAttributeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/MultiTermRewriteMethodAttributeImpl.java index a6dda510361..6ef930361b7 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/MultiTermRewriteMethodAttributeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/MultiTermRewriteMethodAttributeImpl.java @@ -33,8 +33,6 @@ import org.apache.lucene.util.AttributeImpl; */ public class MultiTermRewriteMethodAttributeImpl extends AttributeImpl implements MultiTermRewriteMethodAttribute { - - private static final long serialVersionUID = -2104763012723049527L; private MultiTermQuery.RewriteMethod multiTermRewriteMethod = MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT; diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/PositionIncrementsAttributeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/PositionIncrementsAttributeImpl.java index 529d459d9a9..e4ec12e1c6d 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/PositionIncrementsAttributeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/PositionIncrementsAttributeImpl.java @@ -31,8 +31,6 @@ import org.apache.lucene.util.AttributeImpl; public class PositionIncrementsAttributeImpl extends AttributeImpl implements PositionIncrementsAttribute { - private static final long serialVersionUID = -2804763012793049527L; - private boolean positionIncrementsEnabled = false; public PositionIncrementsAttributeImpl() { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/RangeCollatorAttributeImpl.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/RangeCollatorAttributeImpl.java index 70ac5e69636..6f3e1869544 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/RangeCollatorAttributeImpl.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/RangeCollatorAttributeImpl.java @@ -35,8 +35,6 @@ import org.apache.lucene.util.AttributeImpl; public class RangeCollatorAttributeImpl extends AttributeImpl implements RangeCollatorAttribute { - private static final long serialVersionUID = -6804360312723049526L; - private Collator rangeCollator; public RangeCollatorAttributeImpl() { diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/BooleanModifierNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/BooleanModifierNode.java index 41bf1455d31..93b11008e04 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/BooleanModifierNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/BooleanModifierNode.java @@ -30,8 +30,6 @@ import org.apache.lucene.queryParser.standard.processors.GroupQueryNodeProcessor */ public class BooleanModifierNode extends ModifierQueryNode { - private static final long serialVersionUID = -557816496416587068L; - public BooleanModifierNode(QueryNode node, Modifier mod) { super(node, mod); } diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/MultiPhraseQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/MultiPhraseQueryNode.java index 38c5f3d740b..2736789c1e3 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/MultiPhraseQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/MultiPhraseQueryNode.java @@ -33,8 +33,6 @@ import org.apache.lucene.search.PhraseQuery; public class MultiPhraseQueryNode extends QueryNodeImpl implements FieldableNode { - private static final long serialVersionUID = -2138501723963320158L; - public MultiPhraseQueryNode() { setLeaf(false); allocate(); diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/PrefixWildcardQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/PrefixWildcardQueryNode.java index b1cc2629e0d..7741d6021de 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/PrefixWildcardQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/PrefixWildcardQueryNode.java @@ -27,8 +27,6 @@ import org.apache.lucene.queryParser.core.nodes.FieldQueryNode; */ public class PrefixWildcardQueryNode extends WildcardQueryNode { - private static final long serialVersionUID = 6851557641826407515L; - /** * @param field * - field name diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/RangeQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/RangeQueryNode.java index 6578ab0aa78..290f5ecdff0 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/RangeQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/RangeQueryNode.java @@ -34,8 +34,6 @@ import org.apache.lucene.queryParser.standard.processors.ParametricRangeQueryNod */ public class RangeQueryNode extends ParametricRangeQueryNode { - private static final long serialVersionUID = 7400866652044314657L; - private Collator collator; /** diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/RegexpQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/RegexpQueryNode.java index 7e4f9896a12..be8659d5c95 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/RegexpQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/RegexpQueryNode.java @@ -29,7 +29,6 @@ import org.apache.lucene.util.BytesRef; */ public class RegexpQueryNode extends QueryNodeImpl implements TextableQueryNode, FieldableNode { - private static final long serialVersionUID = 0L; private CharSequence text; private CharSequence field; /** diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/StandardBooleanQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/StandardBooleanQueryNode.java index d09d234a04d..13e922121d3 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/StandardBooleanQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/StandardBooleanQueryNode.java @@ -34,8 +34,6 @@ import org.apache.lucene.search.SimilarityProvider; */ public class StandardBooleanQueryNode extends BooleanQueryNode { - private static final long serialVersionUID = 1938287817191138787L; - private boolean disableCoord; /** diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/WildcardQueryNode.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/WildcardQueryNode.java index 3bc32672140..3e4d938101d 100644 --- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/WildcardQueryNode.java +++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/nodes/WildcardQueryNode.java @@ -25,7 +25,6 @@ import org.apache.lucene.queryParser.core.parser.EscapeQuerySyntax; * phrases. Examples: a*b*c Fl?w? m?ke*g */ public class WildcardQueryNode extends FieldQueryNode { - private static final long serialVersionUID = 0L; /** * @param field diff --git a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/spans/UniqueFieldAttributeImpl.java b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/spans/UniqueFieldAttributeImpl.java index 0cb8b4ab7dd..7fce3a8460c 100644 --- a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/spans/UniqueFieldAttributeImpl.java +++ b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/spans/UniqueFieldAttributeImpl.java @@ -31,8 +31,6 @@ import org.apache.lucene.util.AttributeImpl; public class UniqueFieldAttributeImpl extends AttributeImpl implements UniqueFieldAttribute { - private static final long serialVersionUID = 8553318595851064232L; - private CharSequence uniqueField; public UniqueFieldAttributeImpl() { diff --git a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/geohash/GeoHashDistanceFilter.java b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/geohash/GeoHashDistanceFilter.java index 2751dbc9e34..875a56a0fbd 100644 --- a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/geohash/GeoHashDistanceFilter.java +++ b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/geohash/GeoHashDistanceFilter.java @@ -36,11 +36,6 @@ import org.apache.lucene.spatial.tier.DistanceFilter; */ public class GeoHashDistanceFilter extends DistanceFilter { - - /** - * - */ - private static final long serialVersionUID = 1L; private double lat; private double lng; diff --git a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFieldComparatorSource.java b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFieldComparatorSource.java index dec1f88b14c..f9564694297 100644 --- a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFieldComparatorSource.java +++ b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFieldComparatorSource.java @@ -31,8 +31,6 @@ import org.apache.lucene.search.FieldComparatorSource; */ public class DistanceFieldComparatorSource extends FieldComparatorSource { - private static final long serialVersionUID = 1L; - private DistanceFilter distanceFilter; private DistanceScoreDocLookupComparator dsdlc; diff --git a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFilter.java b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFilter.java index 497c45e297e..22a5385d081 100644 --- a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFilter.java +++ b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFilter.java @@ -19,8 +19,6 @@ package org.apache.lucene.spatial.tier; import java.util.Map; import java.util.WeakHashMap; import java.util.HashMap; -import java.io.IOException; -import java.io.ObjectInputStream; import org.apache.lucene.search.Filter; import org.apache.lucene.spatial.tier.DistanceHandler.Precision; @@ -63,12 +61,6 @@ public abstract class DistanceFilter extends Filter { distanceLookupCache = new WeakHashMap(); } - /** needed for deserialization, because the cache is transient */ - private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException { - stream.defaultReadObject(); - distanceLookupCache = new WeakHashMap(); - } - public Map getDistances(){ return distances; } diff --git a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceQueryBuilder.java b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceQueryBuilder.java index ed7393651b1..418a4f7d092 100644 --- a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceQueryBuilder.java +++ b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceQueryBuilder.java @@ -30,8 +30,6 @@ import org.apache.lucene.spatial.geohash.GeoHashDistanceFilter; * release. */ public class DistanceQueryBuilder { - - private static final long serialVersionUID = 1L; private final double lat; private final double lng; diff --git a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/InvalidGeoException.java b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/InvalidGeoException.java index 1338b94cae3..0f8daacf800 100644 --- a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/InvalidGeoException.java +++ b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/InvalidGeoException.java @@ -24,11 +24,6 @@ package org.apache.lucene.spatial.tier; */ public class InvalidGeoException extends Exception { - /** - * - */ - private static final long serialVersionUID = 1L; - public InvalidGeoException(String message){ super(message); } diff --git a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/LatLongDistanceFilter.java b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/LatLongDistanceFilter.java index 94c3bd86ba0..4574e773890 100644 --- a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/LatLongDistanceFilter.java +++ b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/LatLongDistanceFilter.java @@ -33,11 +33,6 @@ import org.apache.lucene.spatial.DistanceUtils; * release. */ public class LatLongDistanceFilter extends DistanceFilter { - - /** - * - */ - private static final long serialVersionUID = 1L; double lat; double lng; diff --git a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/Shape.java b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/Shape.java index 2a5bdbbf087..5b95b3f8421 100644 --- a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/Shape.java +++ b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/Shape.java @@ -17,7 +17,6 @@ package org.apache.lucene.spatial.tier; -import java.io.Serializable; import java.util.ArrayList; import java.util.List; @@ -26,7 +25,7 @@ import java.util.List; * flux and might change in incompatible ways in the next * release. */ -public class Shape implements Serializable{ +public class Shape { private List area = new ArrayList(); private String tierId; diff --git a/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesianShapeFilter.java b/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesianShapeFilter.java deleted file mode 100644 index 55a941fedf5..00000000000 --- a/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesianShapeFilter.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.lucene.spatial.tier; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.NotSerializableException; -import java.io.ObjectOutputStream; - -import org.apache.lucene.util.LuceneTestCase; - -/** - * - * Test for {@link CartesianShapeFilter} - * - */ -public class TestCartesianShapeFilter extends LuceneTestCase { - - public void testSerializable() throws IOException { - CartesianShapeFilter filter = new CartesianShapeFilter(new Shape("1"), - "test"); - try { - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - ObjectOutputStream oos = new ObjectOutputStream(bos); - oos.writeObject(filter); - } catch (NotSerializableException e) { - fail("Filter should be serializable but raised a NotSerializableException ["+e.getMessage()+"]"); - } - } - -} diff --git a/lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/NumericRangeFilterBuilder.java b/lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/NumericRangeFilterBuilder.java index ea5f5741c34..b6ff2950649 100644 --- a/lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/NumericRangeFilterBuilder.java +++ b/lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/NumericRangeFilterBuilder.java @@ -154,7 +154,6 @@ public class NumericRangeFilterBuilder implements FilterBuilder { } static class NoMatchFilter extends Filter { - private static final long serialVersionUID = 1L; @Override public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException { diff --git a/lucene/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttributeImpl.java b/lucene/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttributeImpl.java index d45d280f73c..677f8d53da8 100644 --- a/lucene/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttributeImpl.java +++ b/lucene/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttributeImpl.java @@ -17,7 +17,6 @@ package org.apache.lucene.analysis.tokenattributes; * limitations under the License. */ -import java.io.Serializable; import java.nio.CharBuffer; import org.apache.lucene.util.ArrayUtil; @@ -30,7 +29,7 @@ import org.apache.lucene.util.UnicodeUtil; /** * The term text of a Token. */ -public class CharTermAttributeImpl extends AttributeImpl implements CharTermAttribute, TermToBytesRefAttribute, Cloneable, Serializable { +public class CharTermAttributeImpl extends AttributeImpl implements CharTermAttribute, TermToBytesRefAttribute, Cloneable { private static int MIN_BUFFER_SIZE = 10; private char[] termBuffer = new char[ArrayUtil.oversize(MIN_BUFFER_SIZE, RamUsageEstimator.NUM_BYTES_CHAR)]; diff --git a/lucene/src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttributeImpl.java b/lucene/src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttributeImpl.java index 986864a3470..eae06b2636a 100644 --- a/lucene/src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttributeImpl.java +++ b/lucene/src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttributeImpl.java @@ -17,8 +17,6 @@ package org.apache.lucene.analysis.tokenattributes; * limitations under the License. */ -import java.io.Serializable; - import org.apache.lucene.util.AttributeImpl; /** @@ -26,7 +24,7 @@ import org.apache.lucene.util.AttributeImpl; * eg from one TokenFilter to another one. * @lucene.experimental While we think this is here to stay, we may want to change it to be a long. */ -public class FlagsAttributeImpl extends AttributeImpl implements FlagsAttribute, Cloneable, Serializable { +public class FlagsAttributeImpl extends AttributeImpl implements FlagsAttribute, Cloneable { private int flags = 0; /** diff --git a/lucene/src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java b/lucene/src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java index 0593200a18e..72191f42b54 100644 --- a/lucene/src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java +++ b/lucene/src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java @@ -17,14 +17,12 @@ package org.apache.lucene.analysis.tokenattributes; * limitations under the License. */ -import java.io.Serializable; - import org.apache.lucene.util.AttributeImpl; /** * The start and end character offset of a Token. */ -public class OffsetAttributeImpl extends AttributeImpl implements OffsetAttribute, Cloneable, Serializable { +public class OffsetAttributeImpl extends AttributeImpl implements OffsetAttribute, Cloneable { private int startOffset; private int endOffset; diff --git a/lucene/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttributeImpl.java b/lucene/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttributeImpl.java index 5c697c509df..22e5903e2af 100644 --- a/lucene/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttributeImpl.java +++ b/lucene/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttributeImpl.java @@ -17,15 +17,13 @@ package org.apache.lucene.analysis.tokenattributes; * limitations under the License. */ -import java.io.Serializable; - import org.apache.lucene.index.Payload; import org.apache.lucene.util.AttributeImpl; /** * The payload of a Token. See also {@link Payload}. */ -public class PayloadAttributeImpl extends AttributeImpl implements PayloadAttribute, Cloneable, Serializable { +public class PayloadAttributeImpl extends AttributeImpl implements PayloadAttribute, Cloneable { private Payload payload; /** diff --git a/lucene/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java b/lucene/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java index 7ab4c69f142..7d3239abbae 100644 --- a/lucene/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java +++ b/lucene/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java @@ -17,8 +17,6 @@ package org.apache.lucene.analysis.tokenattributes; * limitations under the License. */ -import java.io.Serializable; - import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.util.AttributeImpl; @@ -46,7 +44,7 @@ import org.apache.lucene.util.AttributeImpl; * * */ -public class PositionIncrementAttributeImpl extends AttributeImpl implements PositionIncrementAttribute, Cloneable, Serializable { +public class PositionIncrementAttributeImpl extends AttributeImpl implements PositionIncrementAttribute, Cloneable { private int positionIncrement = 1; /** Set the position increment. The default value is one. diff --git a/lucene/src/java/org/apache/lucene/analysis/tokenattributes/TypeAttributeImpl.java b/lucene/src/java/org/apache/lucene/analysis/tokenattributes/TypeAttributeImpl.java index 3c3b7abc70e..045b30dda33 100644 --- a/lucene/src/java/org/apache/lucene/analysis/tokenattributes/TypeAttributeImpl.java +++ b/lucene/src/java/org/apache/lucene/analysis/tokenattributes/TypeAttributeImpl.java @@ -17,14 +17,12 @@ package org.apache.lucene.analysis.tokenattributes; * limitations under the License. */ -import java.io.Serializable; - import org.apache.lucene.util.AttributeImpl; /** * A Token's lexical type. The Default value is "word". */ -public class TypeAttributeImpl extends AttributeImpl implements TypeAttribute, Cloneable, Serializable { +public class TypeAttributeImpl extends AttributeImpl implements TypeAttribute, Cloneable { private String type; public TypeAttributeImpl() { diff --git a/lucene/src/java/org/apache/lucene/document/Document.java b/lucene/src/java/org/apache/lucene/document/Document.java index 58e7a7ee3c3..0343f673e5a 100644 --- a/lucene/src/java/org/apache/lucene/document/Document.java +++ b/lucene/src/java/org/apache/lucene/document/Document.java @@ -35,7 +35,7 @@ import org.apache.lucene.index.IndexReader; // for javadoc * ScoreDoc#doc} or {@link IndexReader#document(int)}. */ -public final class Document implements java.io.Serializable { +public final class Document { List fields = new ArrayList(); private float boost = 1.0f; diff --git a/lucene/src/java/org/apache/lucene/document/Field.java b/lucene/src/java/org/apache/lucene/document/Field.java index 8ab55f390c2..6c6bccb79f1 100644 --- a/lucene/src/java/org/apache/lucene/document/Field.java +++ b/lucene/src/java/org/apache/lucene/document/Field.java @@ -18,7 +18,6 @@ package org.apache.lucene.document; */ import java.io.Reader; -import java.io.Serializable; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.index.IndexWriter; @@ -32,7 +31,7 @@ import org.apache.lucene.util.StringHelper; index, so that they may be returned with hits on the document. */ -public final class Field extends AbstractField implements Fieldable, Serializable { +public final class Field extends AbstractField implements Fieldable { /** Specifies whether and how a field should be stored. */ public static enum Store { diff --git a/lucene/src/java/org/apache/lucene/document/FieldSelector.java b/lucene/src/java/org/apache/lucene/document/FieldSelector.java index 079ade6ffb5..da98add236e 100755 --- a/lucene/src/java/org/apache/lucene/document/FieldSelector.java +++ b/lucene/src/java/org/apache/lucene/document/FieldSelector.java @@ -1,6 +1,5 @@ package org.apache.lucene.document; -import java.io.Serializable; /** * Copyright 2004 The Apache Software Foundation * @@ -22,7 +21,7 @@ import java.io.Serializable; * what Fields get loaded on a {@link Document} by {@link org.apache.lucene.index.IndexReader#document(int,org.apache.lucene.document.FieldSelector)} * **/ -public interface FieldSelector extends Serializable { +public interface FieldSelector { /** * diff --git a/lucene/src/java/org/apache/lucene/document/Fieldable.java b/lucene/src/java/org/apache/lucene/document/Fieldable.java index 561d4df8140..20616d473c9 100755 --- a/lucene/src/java/org/apache/lucene/document/Fieldable.java +++ b/lucene/src/java/org/apache/lucene/document/Fieldable.java @@ -22,7 +22,6 @@ import org.apache.lucene.search.PhraseQuery; // for javadocs import org.apache.lucene.search.spans.SpanQuery; // for javadocs import java.io.Reader; -import java.io.Serializable; /** * Synonymous with {@link Field}. @@ -34,7 +33,7 @@ import java.io.Serializable; *

      * **/ -public interface Fieldable extends Serializable { +public interface Fieldable { /** Sets the boost factor hits on this field. This value will be * multiplied into the score of all hits on this this field of this * document. diff --git a/lucene/src/java/org/apache/lucene/index/Payload.java b/lucene/src/java/org/apache/lucene/index/Payload.java index f7639a3830f..34b91a18630 100644 --- a/lucene/src/java/org/apache/lucene/index/Payload.java +++ b/lucene/src/java/org/apache/lucene/index/Payload.java @@ -17,8 +17,6 @@ package org.apache.lucene.index; * limitations under the License. */ -import java.io.Serializable; - import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.util.ArrayUtil; @@ -34,7 +32,7 @@ import org.apache.lucene.util.ArrayUtil; * to retrieve the payloads from the index.
      * */ -public class Payload implements Serializable, Cloneable { +public class Payload implements Cloneable { /** the byte array containing the payload data */ protected byte[] data; diff --git a/lucene/src/java/org/apache/lucene/index/Term.java b/lucene/src/java/org/apache/lucene/index/Term.java index 04e275ec4d3..914684a640e 100644 --- a/lucene/src/java/org/apache/lucene/index/Term.java +++ b/lucene/src/java/org/apache/lucene/index/Term.java @@ -30,7 +30,7 @@ import org.apache.lucene.util.StringHelper; Note that terms may represent more than words from text fields, but also things like dates, email addresses, urls, etc. */ -public final class Term implements Comparable, java.io.Serializable { +public final class Term implements Comparable { String field; BytesRef bytes; @@ -199,11 +199,4 @@ public final class Term implements Comparable, java.io.Serializable { @Override public final String toString() { return field + ":" + bytes.utf8ToString(); } - - private void readObject(java.io.ObjectInputStream in) - throws java.io.IOException, ClassNotFoundException - { - in.defaultReadObject(); - field = StringHelper.intern(field); - } } diff --git a/lucene/src/java/org/apache/lucene/index/TermVectorOffsetInfo.java b/lucene/src/java/org/apache/lucene/index/TermVectorOffsetInfo.java index 5deb626df94..428894532b2 100644 --- a/lucene/src/java/org/apache/lucene/index/TermVectorOffsetInfo.java +++ b/lucene/src/java/org/apache/lucene/index/TermVectorOffsetInfo.java @@ -1,7 +1,5 @@ package org.apache.lucene.index; -import java.io.Serializable; - /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -24,7 +22,7 @@ import java.io.Serializable; * offset information. This offset information is the character offset as set during the Analysis phase (and thus may not be the actual offset in the * original content). */ -public class TermVectorOffsetInfo implements Serializable { +public class TermVectorOffsetInfo { /** * Convenience declaration when creating a {@link org.apache.lucene.index.TermPositionVector} that stores only position information. */ diff --git a/lucene/src/java/org/apache/lucene/messages/Message.java b/lucene/src/java/org/apache/lucene/messages/Message.java index f56f57999bd..df7ac827019 100644 --- a/lucene/src/java/org/apache/lucene/messages/Message.java +++ b/lucene/src/java/org/apache/lucene/messages/Message.java @@ -17,14 +17,13 @@ package org.apache.lucene.messages; * limitations under the License. */ -import java.io.Serializable; import java.util.Locale; /** * Message Interface for a lazy loading. * For Native Language Support (NLS), system of software internationalization. */ -public interface Message extends Serializable { +public interface Message { public String getKey(); diff --git a/lucene/src/java/org/apache/lucene/messages/MessageImpl.java b/lucene/src/java/org/apache/lucene/messages/MessageImpl.java index cb837351f12..78d8e2b42ef 100644 --- a/lucene/src/java/org/apache/lucene/messages/MessageImpl.java +++ b/lucene/src/java/org/apache/lucene/messages/MessageImpl.java @@ -25,8 +25,6 @@ import java.util.Locale; */ public class MessageImpl implements Message { - private static final long serialVersionUID = -3077643314630884523L; - private String key; private Object[] arguments = new Object[0]; diff --git a/lucene/src/java/org/apache/lucene/search/AutomatonQuery.java b/lucene/src/java/org/apache/lucene/search/AutomatonQuery.java index 4df94c4486e..a8368339d4d 100644 --- a/lucene/src/java/org/apache/lucene/search/AutomatonQuery.java +++ b/lucene/src/java/org/apache/lucene/search/AutomatonQuery.java @@ -18,7 +18,6 @@ package org.apache.lucene.search; */ import java.io.IOException; -import java.io.Serializable; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; @@ -61,7 +60,7 @@ public class AutomatonQuery extends MultiTermQuery { * in the ctor the query computes one of these, the actual * implementation depends upon the automaton's structure. */ - private abstract class TermsEnumFactory implements Serializable { + private abstract class TermsEnumFactory { protected abstract TermsEnum getTermsEnum(Terms terms, AttributeSource atts) throws IOException; } diff --git a/lucene/src/java/org/apache/lucene/search/BooleanClause.java b/lucene/src/java/org/apache/lucene/search/BooleanClause.java index 6212a3703a0..91af8e60eec 100644 --- a/lucene/src/java/org/apache/lucene/search/BooleanClause.java +++ b/lucene/src/java/org/apache/lucene/search/BooleanClause.java @@ -18,7 +18,7 @@ package org.apache.lucene.search; */ /** A clause in a BooleanQuery. */ -public class BooleanClause implements java.io.Serializable { +public class BooleanClause { /** Specifies how clauses are to occur in matching documents. */ public static enum Occur { diff --git a/lucene/src/java/org/apache/lucene/search/CachingWrapperFilter.java b/lucene/src/java/org/apache/lucene/search/CachingWrapperFilter.java index 1f865670b56..e726aa8d268 100644 --- a/lucene/src/java/org/apache/lucene/search/CachingWrapperFilter.java +++ b/lucene/src/java/org/apache/lucene/search/CachingWrapperFilter.java @@ -17,7 +17,6 @@ package org.apache.lucene.search; * limitations under the License. */ -import java.io.Serializable; import java.io.IOException; import java.util.Map; import java.util.WeakHashMap; @@ -68,7 +67,7 @@ public class CachingWrapperFilter extends Filter { protected final FilterCache cache; - static abstract class FilterCache implements Serializable { + static abstract class FilterCache { /** * A transient Filter cache (package private because of test) diff --git a/lucene/src/java/org/apache/lucene/search/Explanation.java b/lucene/src/java/org/apache/lucene/search/Explanation.java index 6798c1c0d3c..299752841b5 100644 --- a/lucene/src/java/org/apache/lucene/search/Explanation.java +++ b/lucene/src/java/org/apache/lucene/search/Explanation.java @@ -17,11 +17,10 @@ package org.apache.lucene.search; * limitations under the License. */ -import java.io.Serializable; import java.util.ArrayList; /** Expert: Describes the score computation for document and query. */ -public class Explanation implements java.io.Serializable { +public class Explanation { private float value; // the value of this node private String description; // what it represents private ArrayList details; // sub-explanations @@ -135,7 +134,7 @@ public class Explanation implements java.io.Serializable { * before storing any large or un-serializable fields. * */ - public static abstract class IDFExplanation implements Serializable { + public static abstract class IDFExplanation { /** * @return the idf factor */ diff --git a/lucene/src/java/org/apache/lucene/search/FieldCache.java b/lucene/src/java/org/apache/lucene/search/FieldCache.java index 169a0e46c0e..e286a3414b3 100644 --- a/lucene/src/java/org/apache/lucene/search/FieldCache.java +++ b/lucene/src/java/org/apache/lucene/search/FieldCache.java @@ -29,7 +29,6 @@ import org.apache.lucene.analysis.NumericTokenStream; // for javadocs import org.apache.lucene.util.packed.PackedInts; import java.io.IOException; -import java.io.Serializable; import java.io.PrintStream; import java.text.DecimalFormat; @@ -61,7 +60,7 @@ public interface FieldCache { * is used to specify a custom parser to {@link * SortField#SortField(String, FieldCache.Parser)}. */ - public interface Parser extends Serializable { + public interface Parser { } /** Interface to parse bytes from document fields. diff --git a/lucene/src/java/org/apache/lucene/search/FieldComparatorSource.java b/lucene/src/java/org/apache/lucene/search/FieldComparatorSource.java index a0613afb73b..f7ca0642ac9 100644 --- a/lucene/src/java/org/apache/lucene/search/FieldComparatorSource.java +++ b/lucene/src/java/org/apache/lucene/search/FieldComparatorSource.java @@ -18,7 +18,6 @@ package org.apache.lucene.search; */ import java.io.IOException; -import java.io.Serializable; /** * Provides a {@link FieldComparator} for custom field sorting. @@ -26,7 +25,7 @@ import java.io.Serializable; * @lucene.experimental * */ -public abstract class FieldComparatorSource implements Serializable { +public abstract class FieldComparatorSource { /** * Creates a comparator for the field in the given index. diff --git a/lucene/src/java/org/apache/lucene/search/Filter.java b/lucene/src/java/org/apache/lucene/search/Filter.java index 468a43b663c..22bd820c798 100644 --- a/lucene/src/java/org/apache/lucene/search/Filter.java +++ b/lucene/src/java/org/apache/lucene/search/Filter.java @@ -27,7 +27,7 @@ import org.apache.lucene.util.DocIdBitSet; * Abstract base class for restricting which documents may * be returned during searching. */ -public abstract class Filter implements java.io.Serializable { +public abstract class Filter { /** * Creates a {@link DocIdSet} enumerating the documents that should be diff --git a/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java b/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java index 8051d7fc160..363922a0b1d 100644 --- a/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java +++ b/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java @@ -18,7 +18,6 @@ package org.apache.lucene.search; */ import java.io.IOException; -import java.io.Serializable; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; @@ -67,7 +66,7 @@ public abstract class MultiTermQuery extends Query { transient int numberOfTerms = 0; /** Abstract class that defines how the query is rewritten. */ - public static abstract class RewriteMethod implements Serializable { + public static abstract class RewriteMethod { public abstract Query rewrite(IndexReader reader, MultiTermQuery query) throws IOException; } @@ -90,11 +89,6 @@ public abstract class MultiTermQuery extends Query { result.setBoost(query.getBoost()); return result; } - - // Make sure we are still a singleton even after deserializing - protected Object readResolve() { - return CONSTANT_SCORE_FILTER_REWRITE; - } }; /** A rewrite method that first translates each term into @@ -240,11 +234,6 @@ public abstract class MultiTermQuery extends Query { public void setDocCountPercent(double percent) { throw new UnsupportedOperationException("Please create a private instance"); } - - // Make sure we are still a singleton even after deserializing - protected Object readResolve() { - return CONSTANT_SCORE_AUTO_REWRITE_DEFAULT; - } }; /** diff --git a/lucene/src/java/org/apache/lucene/search/Query.java b/lucene/src/java/org/apache/lucene/search/Query.java index 8b937aa5b32..40ec80d44a5 100644 --- a/lucene/src/java/org/apache/lucene/search/Query.java +++ b/lucene/src/java/org/apache/lucene/search/Query.java @@ -44,7 +44,7 @@ import org.apache.lucene.index.Term;
    1. {@link org.apache.lucene.queryParser.QueryParser QueryParser} */ -public abstract class Query implements java.io.Serializable, Cloneable { +public abstract class Query implements Cloneable { private float boost = 1.0f; // query boost factor /** Sets the boost for this query clause to b. Documents diff --git a/lucene/src/java/org/apache/lucene/search/ScoreDoc.java b/lucene/src/java/org/apache/lucene/search/ScoreDoc.java index f2828d509a8..47d9fa7d5a7 100644 --- a/lucene/src/java/org/apache/lucene/search/ScoreDoc.java +++ b/lucene/src/java/org/apache/lucene/search/ScoreDoc.java @@ -19,7 +19,7 @@ package org.apache.lucene.search; /** Expert: Returned by low-level search implementations. * @see TopDocs */ -public class ScoreDoc implements java.io.Serializable { +public class ScoreDoc { /** Expert: The score of this document for the query. */ public float score; diff --git a/lucene/src/java/org/apache/lucene/search/Similarity.java b/lucene/src/java/org/apache/lucene/search/Similarity.java index 306f904c270..4333bccda2e 100644 --- a/lucene/src/java/org/apache/lucene/search/Similarity.java +++ b/lucene/src/java/org/apache/lucene/search/Similarity.java @@ -19,7 +19,6 @@ package org.apache.lucene.search; import java.io.IOException; -import java.io.Serializable; import java.util.Collection; import org.apache.lucene.index.FieldInvertState; @@ -525,7 +524,7 @@ import org.apache.lucene.util.SmallFloat; * @see org.apache.lucene.index.IndexWriterConfig#setSimilarityProvider(SimilarityProvider) * @see IndexSearcher#setSimilarityProvider(SimilarityProvider) */ -public abstract class Similarity implements Serializable { +public abstract class Similarity { public static final int NO_DOC_ID_PROVIDED = -1; diff --git a/lucene/src/java/org/apache/lucene/search/Sort.java b/lucene/src/java/org/apache/lucene/search/Sort.java index 7969c991609..c06d79b36b2 100644 --- a/lucene/src/java/org/apache/lucene/search/Sort.java +++ b/lucene/src/java/org/apache/lucene/search/Sort.java @@ -17,7 +17,6 @@ package org.apache.lucene.search; * limitations under the License. */ -import java.io.Serializable; import java.util.Arrays; @@ -97,8 +96,7 @@ import java.util.Arrays; * * @since lucene 1.4 */ -public class Sort -implements Serializable { +public class Sort { /** * Represents sorting by computed relevance. Using this sort criteria returns diff --git a/lucene/src/java/org/apache/lucene/search/SortField.java b/lucene/src/java/org/apache/lucene/search/SortField.java index 663261ebd59..cf2cc8a1f26 100644 --- a/lucene/src/java/org/apache/lucene/search/SortField.java +++ b/lucene/src/java/org/apache/lucene/search/SortField.java @@ -18,7 +18,6 @@ package org.apache.lucene.search; */ import java.io.IOException; -import java.io.Serializable; import java.util.Locale; import org.apache.lucene.search.cache.*; @@ -33,8 +32,7 @@ import org.apache.lucene.util.StringHelper; * @since lucene 1.4 * @see Sort */ -public class SortField -implements Serializable { +public class SortField { /** Sort by document score (relevance). Sort values are Float and higher * values are at the front. */ @@ -427,13 +425,6 @@ implements Serializable { return hash; } - // field must be interned after reading from stream - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - in.defaultReadObject(); - if (field != null) - field = StringHelper.intern(field); - } - /** Returns the {@link FieldComparator} to use for * sorting. * diff --git a/lucene/src/java/org/apache/lucene/search/TopDocs.java b/lucene/src/java/org/apache/lucene/search/TopDocs.java index 6d14f88bb20..ba5f3e45252 100644 --- a/lucene/src/java/org/apache/lucene/search/TopDocs.java +++ b/lucene/src/java/org/apache/lucene/search/TopDocs.java @@ -20,7 +20,7 @@ package org.apache.lucene.search; /** Represents hits returned by {@link * IndexSearcher#search(Query,Filter,int)} and {@link * IndexSearcher#search(Query,int)}. */ -public class TopDocs implements java.io.Serializable { +public class TopDocs { /** The total number of hits for the query. */ public int totalHits; diff --git a/lucene/src/java/org/apache/lucene/search/Weight.java b/lucene/src/java/org/apache/lucene/search/Weight.java index 7ea739b7e1e..3fb892714c6 100644 --- a/lucene/src/java/org/apache/lucene/search/Weight.java +++ b/lucene/src/java/org/apache/lucene/search/Weight.java @@ -18,7 +18,6 @@ package org.apache.lucene.search; */ import java.io.IOException; -import java.io.Serializable; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; @@ -54,7 +53,7 @@ import org.apache.lucene.index.IndexReader.ReaderContext; * * @since 2.9 */ -public abstract class Weight implements Serializable { +public abstract class Weight { /** * An explanation of the score computation for the named document. diff --git a/lucene/src/java/org/apache/lucene/search/cache/EntryCreator.java b/lucene/src/java/org/apache/lucene/search/cache/EntryCreator.java index 362cc83a71e..43e42f2ff06 100644 --- a/lucene/src/java/org/apache/lucene/search/cache/EntryCreator.java +++ b/lucene/src/java/org/apache/lucene/search/cache/EntryCreator.java @@ -18,7 +18,6 @@ package org.apache.lucene.search.cache; */ import java.io.IOException; -import java.io.Serializable; import org.apache.lucene.index.IndexReader; @@ -27,7 +26,7 @@ import org.apache.lucene.index.IndexReader; * * @lucene.experimental */ -public abstract class EntryCreator implements Serializable +public abstract class EntryCreator { public abstract T create( IndexReader reader ) throws IOException; public abstract T validate( T entry, IndexReader reader ) throws IOException; diff --git a/lucene/src/java/org/apache/lucene/search/function/ValueSource.java b/lucene/src/java/org/apache/lucene/search/function/ValueSource.java index debaa1615b5..58485aeee65 100755 --- a/lucene/src/java/org/apache/lucene/search/function/ValueSource.java +++ b/lucene/src/java/org/apache/lucene/search/function/ValueSource.java @@ -23,7 +23,6 @@ import org.apache.lucene.index.IndexReader.ReaderContext; import org.apache.lucene.search.function.DocValues; import java.io.IOException; -import java.io.Serializable; /** * Expert: source of values for basic function queries. @@ -37,7 +36,7 @@ import java.io.Serializable; * * */ -public abstract class ValueSource implements Serializable { +public abstract class ValueSource { /** * Return the DocValues used by the function query. diff --git a/lucene/src/java/org/apache/lucene/search/payloads/PayloadFunction.java b/lucene/src/java/org/apache/lucene/search/payloads/PayloadFunction.java index f4c34c2ca7e..b6c60350992 100644 --- a/lucene/src/java/org/apache/lucene/search/payloads/PayloadFunction.java +++ b/lucene/src/java/org/apache/lucene/search/payloads/PayloadFunction.java @@ -16,7 +16,6 @@ package org.apache.lucene.search.payloads; * limitations under the License. */ -import java.io.Serializable; import org.apache.lucene.search.Explanation; /** @@ -29,7 +28,7 @@ import org.apache.lucene.search.Explanation; * change * **/ -public abstract class PayloadFunction implements Serializable { +public abstract class PayloadFunction { /** * Calculate the score up to this point for this doc and field diff --git a/lucene/src/java/org/apache/lucene/store/RAMDirectory.java b/lucene/src/java/org/apache/lucene/store/RAMDirectory.java index e7ed3044fb9..9d07160a7b2 100644 --- a/lucene/src/java/org/apache/lucene/store/RAMDirectory.java +++ b/lucene/src/java/org/apache/lucene/store/RAMDirectory.java @@ -19,7 +19,6 @@ package org.apache.lucene.store; import java.io.IOException; import java.io.FileNotFoundException; -import java.io.Serializable; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -35,10 +34,7 @@ import org.apache.lucene.util.ThreadInterruptedException; * implementation is by default the {@link SingleInstanceLockFactory} * but can be changed with {@link #setLockFactory}. */ -public class RAMDirectory extends Directory implements Serializable { - - private static final long serialVersionUID = 1l; - +public class RAMDirectory extends Directory { protected final Map fileMap = new ConcurrentHashMap(); protected final AtomicLong sizeInBytes = new AtomicLong(); diff --git a/lucene/src/java/org/apache/lucene/store/RAMFile.java b/lucene/src/java/org/apache/lucene/store/RAMFile.java index 36306725cc1..123f800fba8 100644 --- a/lucene/src/java/org/apache/lucene/store/RAMFile.java +++ b/lucene/src/java/org/apache/lucene/store/RAMFile.java @@ -18,13 +18,9 @@ package org.apache.lucene.store; */ import java.util.ArrayList; -import java.io.Serializable; /** @lucene.internal */ -public class RAMFile implements Serializable { - - private static final long serialVersionUID = 1l; - +public class RAMFile { protected ArrayList buffers = new ArrayList(); long length; RAMDirectory directory; diff --git a/lucene/src/java/org/apache/lucene/util/AttributeImpl.java b/lucene/src/java/org/apache/lucene/util/AttributeImpl.java index d22491bf2c6..7d713358204 100644 --- a/lucene/src/java/org/apache/lucene/util/AttributeImpl.java +++ b/lucene/src/java/org/apache/lucene/util/AttributeImpl.java @@ -17,7 +17,6 @@ package org.apache.lucene.util; * limitations under the License. */ -import java.io.Serializable; import java.lang.reflect.Field; import java.lang.reflect.Modifier; import java.lang.ref.WeakReference; @@ -30,7 +29,7 @@ import java.util.LinkedList; * Attributes are used to add data in a dynamic, yet type-safe way to a source * of usually streamed objects, e. g. a {@link org.apache.lucene.analysis.TokenStream}. */ -public abstract class AttributeImpl implements Cloneable, Serializable, Attribute { +public abstract class AttributeImpl implements Cloneable, Attribute { /** * Clears the values in this AttributeImpl and resets it to its * default value. If this implementation implements more than one Attribute interface diff --git a/lucene/src/java/org/apache/lucene/util/BytesRef.java b/lucene/src/java/org/apache/lucene/util/BytesRef.java index a90b6fb682d..6ad185f4ffa 100644 --- a/lucene/src/java/org/apache/lucene/util/BytesRef.java +++ b/lucene/src/java/org/apache/lucene/util/BytesRef.java @@ -21,14 +21,13 @@ import java.util.Comparator; import java.io.UnsupportedEncodingException; import java.io.ObjectInput; import java.io.ObjectOutput; -import java.io.Externalizable; import java.io.IOException; /** Represents byte[], as a slice (offset + length) into an * existing byte[]. * * @lucene.experimental */ -public final class BytesRef implements Comparable, Externalizable { +public final class BytesRef implements Comparable { static final int HASH_PRIME = 31; public static final byte[] EMPTY_BYTES = new byte[0]; @@ -365,25 +364,4 @@ public final class BytesRef implements Comparable, Externalizable { return a.length - b.length; } } - - public void writeExternal(ObjectOutput out) - throws IOException - { - out.writeInt(length); - if (length > 0) { - out.write(bytes, offset, length); - } - } - - public void readExternal( ObjectInput in ) throws - IOException, ClassNotFoundException { - length = in.readInt(); - offset = 0; - if (length > 0) { - bytes = new byte[length]; - in.read(bytes, 0, length); - } else { - bytes = EMPTY_BYTES; - } - } } diff --git a/lucene/src/java/org/apache/lucene/util/MapBackedSet.java b/lucene/src/java/org/apache/lucene/util/MapBackedSet.java index 9db05ec86ba..d6c6567b778 100644 --- a/lucene/src/java/org/apache/lucene/util/MapBackedSet.java +++ b/lucene/src/java/org/apache/lucene/util/MapBackedSet.java @@ -17,7 +17,6 @@ package org.apache.lucene.util; * limitations under the License. */ -import java.io.Serializable; import java.util.AbstractSet; import java.util.Iterator; import java.util.Map; @@ -28,10 +27,7 @@ import java.util.Map; * * @lucene.internal */ -public final class MapBackedSet extends AbstractSet implements Serializable { - - private static final long serialVersionUID = -6761513279741915432L; - +public final class MapBackedSet extends AbstractSet { private final Map map; /** diff --git a/lucene/src/java/org/apache/lucene/util/OpenBitSet.java b/lucene/src/java/org/apache/lucene/util/OpenBitSet.java index 9815ad7dec0..8e093b70f4f 100644 --- a/lucene/src/java/org/apache/lucene/util/OpenBitSet.java +++ b/lucene/src/java/org/apache/lucene/util/OpenBitSet.java @@ -18,7 +18,6 @@ package org.apache.lucene.util; import java.util.Arrays; -import java.io.Serializable; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; @@ -75,7 +74,7 @@ Test system: AMD Opteron, 64 bit linux, Sun Java 1.5_06 -server -Xbatch -Xmx64M */ -public class OpenBitSet extends DocIdSet implements Bits, Cloneable, Serializable { +public class OpenBitSet extends DocIdSet implements Bits, Cloneable { protected long[] bits; protected int wlen; // number of words (elements) used in the array diff --git a/lucene/src/java/org/apache/lucene/util/automaton/Automaton.java b/lucene/src/java/org/apache/lucene/util/automaton/Automaton.java index d4f0f229ba4..4311ece1032 100644 --- a/lucene/src/java/org/apache/lucene/util/automaton/Automaton.java +++ b/lucene/src/java/org/apache/lucene/util/automaton/Automaton.java @@ -29,7 +29,6 @@ package org.apache.lucene.util.automaton; -import java.io.Serializable; import java.util.Arrays; import java.util.BitSet; import java.util.Collection; @@ -75,7 +74,7 @@ import org.apache.lucene.util.RamUsageEstimator; *

      * @lucene.experimental */ -public class Automaton implements Serializable, Cloneable { +public class Automaton implements Cloneable { /** * Minimize using Hopcroft's O(n log n) algorithm. This is regarded as one of diff --git a/lucene/src/java/org/apache/lucene/util/automaton/RunAutomaton.java b/lucene/src/java/org/apache/lucene/util/automaton/RunAutomaton.java index eb20f9be7c3..2d9c0d0a72d 100644 --- a/lucene/src/java/org/apache/lucene/util/automaton/RunAutomaton.java +++ b/lucene/src/java/org/apache/lucene/util/automaton/RunAutomaton.java @@ -29,14 +29,12 @@ package org.apache.lucene.util.automaton; -import java.io.Serializable; - /** * Finite-state automaton with fast run operation. * * @lucene.experimental */ -public abstract class RunAutomaton implements Serializable { +public abstract class RunAutomaton { final int maxInterval; final int size; final boolean[] accept; diff --git a/lucene/src/java/org/apache/lucene/util/automaton/State.java b/lucene/src/java/org/apache/lucene/util/automaton/State.java index 6aa04e51d96..486452fd347 100644 --- a/lucene/src/java/org/apache/lucene/util/automaton/State.java +++ b/lucene/src/java/org/apache/lucene/util/automaton/State.java @@ -31,7 +31,6 @@ package org.apache.lucene.util.automaton; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.RamUsageEstimator; -import java.io.Serializable; import java.util.Collection; import java.util.Comparator; import java.util.Iterator; @@ -41,7 +40,7 @@ import java.util.Iterator; * * @lucene.experimental */ -public class State implements Serializable, Comparable { +public class State implements Comparable { boolean accept; public Transition[] transitionsArray; diff --git a/lucene/src/java/org/apache/lucene/util/automaton/Transition.java b/lucene/src/java/org/apache/lucene/util/automaton/Transition.java index 8cdfe76098a..d03bcbc69ba 100644 --- a/lucene/src/java/org/apache/lucene/util/automaton/Transition.java +++ b/lucene/src/java/org/apache/lucene/util/automaton/Transition.java @@ -29,7 +29,6 @@ package org.apache.lucene.util.automaton; -import java.io.Serializable; import java.util.Comparator; /** @@ -40,7 +39,7 @@ import java.util.Comparator; * * @lucene.experimental */ -public class Transition implements Serializable, Cloneable { +public class Transition implements Cloneable { /* * CLASS INVARIANT: min<=max diff --git a/lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java b/lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java index e84b2f9a8b9..483106d9fef 100644 --- a/lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java +++ b/lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java @@ -1,10 +1,6 @@ package org.apache.lucene.search; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; import java.util.Random; import java.lang.reflect.Method; @@ -102,7 +98,6 @@ public class QueryUtils { * @see #checkFirstSkipTo * @see #checkSkipTo * @see #checkExplanations - * @see #checkSerialization * @see #checkEqual */ public static void check(Random random, Query q1, IndexSearcher s) { @@ -124,7 +119,6 @@ public class QueryUtils { wrapped.close(); } checkExplanations(q1,s); - checkSerialization(q1,s); Query q2 = (Query)q1.clone(); checkEqual(s.rewrite(q1), @@ -204,31 +198,6 @@ public class QueryUtils { r.close(); return d; } - - - /** check that the query weight is serializable. - * @throws IOException if serialization check fail. - */ - private static void checkSerialization(Query q, IndexSearcher s) throws IOException { - Weight w = q.weight(s); - try { - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - ObjectOutputStream oos = new ObjectOutputStream(bos); - oos.writeObject(w); - oos.close(); - ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray())); - ois.readObject(); - ois.close(); - - //skip equals() test for now - most weights don't override equals() and we won't add this just for the tests. - //TestCase.assertEquals("writeObject(w) != w. ("+w+")",w2,w); - - } catch (Exception e) { - IOException e2 = new IOException("Serialization failed for "+w); - e2.initCause(e); - throw e2; - } - } /** alternate scorer skipTo(),skipTo(),next(),next(),skipTo(),skipTo(), etc * and ensure a hitcollector receives same docs and scores diff --git a/lucene/src/test/org/apache/lucene/search/TestCustomSearcherSort.java b/lucene/src/test/org/apache/lucene/search/TestCustomSearcherSort.java index 08781570e59..7e2982cac2e 100644 --- a/lucene/src/test/org/apache/lucene/search/TestCustomSearcherSort.java +++ b/lucene/src/test/org/apache/lucene/search/TestCustomSearcherSort.java @@ -17,7 +17,6 @@ package org.apache.lucene.search; */ import java.io.IOException; -import java.io.Serializable; import java.util.Calendar; import java.util.GregorianCalendar; import java.util.Map; @@ -34,7 +33,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; /** Unit test for sorting code. */ -public class TestCustomSearcherSort extends LuceneTestCase implements Serializable { +public class TestCustomSearcherSort extends LuceneTestCase { private Directory index = null; private IndexReader reader; diff --git a/lucene/src/test/org/apache/lucene/search/TestSort.java b/lucene/src/test/org/apache/lucene/search/TestSort.java index 39d2f170b63..746a70dc471 100644 --- a/lucene/src/test/org/apache/lucene/search/TestSort.java +++ b/lucene/src/test/org/apache/lucene/search/TestSort.java @@ -18,7 +18,6 @@ package org.apache.lucene.search; */ import java.io.IOException; -import java.io.Serializable; import java.text.Collator; import java.util.ArrayList; import java.util.BitSet; @@ -65,7 +64,7 @@ import org.apache.lucene.util._TestUtil; * @since lucene 1.4 */ -public class TestSort extends LuceneTestCase implements Serializable { +public class TestSort extends LuceneTestCase { private static final int NUM_STRINGS = 6000 * RANDOM_MULTIPLIER; private IndexSearcher full; diff --git a/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java b/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java index 94f71aea46e..eb6901dc5bf 100644 --- a/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java +++ b/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java @@ -139,18 +139,6 @@ public class TestRAMDirectory extends LuceneTestCase { writer.close(); } - - public void testSerializable() throws IOException { - Directory dir = new RAMDirectory(); - ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); - assertEquals("initially empty", 0, bos.size()); - ObjectOutput out = new ObjectOutputStream(bos); - int headerSize = bos.size(); - out.writeObject(dir); - out.close(); - assertTrue("contains more then just header", headerSize < bos.size()); - } - @Override public void tearDown() throws Exception { // cleanup diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/ByteVector.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/ByteVector.java index 64768d435c7..e8ad2d6e269 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/ByteVector.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/ByteVector.java @@ -18,14 +18,12 @@ package org.apache.lucene.analysis.compound.hyphenation; -import java.io.Serializable; - /** * This class implements a simple byte vector with access to the underlying * array. * This class has been taken from the Apache FOP project (http://xmlgraphics.apache.org/fop/). They have been slightly modified. */ -public class ByteVector implements Serializable { +public class ByteVector { /** * Capacity increment size diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/CharVector.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/CharVector.java index 373935ab6bc..15a7aae7d10 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/CharVector.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/CharVector.java @@ -17,15 +17,13 @@ package org.apache.lucene.analysis.compound.hyphenation; -import java.io.Serializable; - /** * This class implements a simple char vector with access to the underlying * array. * * This class has been taken from the Apache FOP project (http://xmlgraphics.apache.org/fop/). They have been slightly modified. */ -public class CharVector implements Cloneable, Serializable { +public class CharVector implements Cloneable { /** * Capacity increment size diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/Hyphen.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/Hyphen.java index 7ecbdf384b8..151c37fb92c 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/Hyphen.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/Hyphen.java @@ -17,8 +17,6 @@ package org.apache.lucene.analysis.compound.hyphenation; -import java.io.Serializable; - /** * This class represents a hyphen. A 'full' hyphen is made of 3 parts: the * pre-break text, post-break text and no-break. If no line-break is generated @@ -32,7 +30,7 @@ import java.io.Serializable; * This class has been taken from the Apache FOP project (http://xmlgraphics.apache.org/fop/). They have been slightly modified. */ -public class Hyphen implements Serializable { +public class Hyphen { public String preBreak; public String noBreak; diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java index c61a8d06d1b..810e808503c 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java @@ -18,7 +18,6 @@ package org.apache.lucene.analysis.compound.hyphenation; import java.io.File; -import java.io.Serializable; import java.net.MalformedURLException; import java.util.ArrayList; import java.util.HashMap; @@ -31,10 +30,7 @@ import org.xml.sax.InputSource; * * This class has been taken from the Apache FOP project (http://xmlgraphics.apache.org/fop/). They have been slightly modified. */ -public class HyphenationTree extends TernaryTree implements PatternConsumer, - Serializable { - - private static final long serialVersionUID = -7842107987915665573L; +public class HyphenationTree extends TernaryTree implements PatternConsumer { /** * value space: stores the interletter values diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java index 61a68f7ac72..178fcb44eee 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java @@ -19,7 +19,6 @@ package org.apache.lucene.analysis.compound.hyphenation; import java.util.Enumeration; import java.util.Stack; -import java.io.Serializable; /** *

      Ternary Search Tree.

      @@ -63,7 +62,7 @@ import java.io.Serializable; * This class has been taken from the Apache FOP project (http://xmlgraphics.apache.org/fop/). They have been slightly modified. */ -public class TernaryTree implements Cloneable, Serializable { +public class TernaryTree implements Cloneable { /** * We use 4 arrays to represent a node. I guess I should have created a proper diff --git a/modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/tokenattributes/ScriptAttributeImpl.java b/modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/tokenattributes/ScriptAttributeImpl.java index 3a54af94b58..1a14647e306 100644 --- a/modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/tokenattributes/ScriptAttributeImpl.java +++ b/modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/tokenattributes/ScriptAttributeImpl.java @@ -17,8 +17,6 @@ package org.apache.lucene.analysis.icu.tokenattributes; * limitations under the License. */ -import java.io.Serializable; - import org.apache.lucene.util.AttributeImpl; import org.apache.lucene.util.AttributeReflector; @@ -29,7 +27,7 @@ import com.ibm.icu.lang.UScript; * as an integer. * @lucene.experimental */ -public class ScriptAttributeImpl extends AttributeImpl implements ScriptAttribute, Cloneable, Serializable { +public class ScriptAttributeImpl extends AttributeImpl implements ScriptAttribute, Cloneable { private int code = UScript.COMMON; public int getCode() { From 762272e48ae5b2392bd9f711f42f9ea1bdbc2731 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Wed, 9 Feb 2011 16:10:00 +0000 Subject: [PATCH 114/185] resolve TODO: run the dfas backwards git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1068957 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/lucene/search/FuzzyTermsEnum.java | 42 ++++++++++++------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/search/FuzzyTermsEnum.java b/lucene/src/java/org/apache/lucene/search/FuzzyTermsEnum.java index b9694d10aa2..655630954f8 100644 --- a/lucene/src/java/org/apache/lucene/search/FuzzyTermsEnum.java +++ b/lucene/src/java/org/apache/lucene/search/FuzzyTermsEnum.java @@ -325,22 +325,26 @@ public final class FuzzyTermsEnum extends TermsEnum { /** finds the smallest Lev(n) DFA that accepts the term. */ @Override - protected AcceptStatus accept(BytesRef term) { - if (term.equals(termRef)) { // ed = 0 - boostAtt.setBoost(1.0F); - return AcceptStatus.YES_AND_SEEK; - } + protected AcceptStatus accept(BytesRef term) { + int ed = matchers.length - 1; - int codePointCount = -1; - - // TODO: benchmark doing this backwards - for (int i = 1; i < matchers.length; i++) - if (matchers[i].run(term.bytes, term.offset, term.length)) { - // this sucks, we convert just to score based on length. - if (codePointCount == -1) { - codePointCount = UnicodeUtil.codePointCount(term); + if (matches(term, ed)) { // we match the outer dfa + // now compute exact edit distance + while (ed > 0) { + if (matches(term, ed - 1)) { + ed--; + } else { + break; } - final float similarity = 1.0f - ((float) i / (float) + } + + // scale to a boost and return (if similarity > minSimilarity) + if (ed == 0) { // exact match + boostAtt.setBoost(1.0F); + return AcceptStatus.YES_AND_SEEK; + } else { + final int codePointCount = UnicodeUtil.codePointCount(term); + final float similarity = 1.0f - ((float) ed / (float) (Math.min(codePointCount, termLength))); if (similarity > minSimilarity) { boostAtt.setBoost((similarity - minSimilarity) * scale_factor); @@ -349,8 +353,14 @@ public final class FuzzyTermsEnum extends TermsEnum { return AcceptStatus.NO_AND_SEEK; } } - - return AcceptStatus.NO_AND_SEEK; + } else { + return AcceptStatus.NO_AND_SEEK; + } + } + + /** returns true if term is within k edits of the query term */ + final boolean matches(BytesRef term, int k) { + return k == 0 ? term.equals(termRef) : matchers[k].run(term.bytes, term.offset, term.length); } /** defers to superclass, except can start at an arbitrary location */ From 6386f7713801fdbb5cce4af5c24c28fa0a2b2d03 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Wed, 9 Feb 2011 17:07:46 +0000 Subject: [PATCH 115/185] LUCENE-2911: synchronize grammar/token types across StandardTokenizer, UAX29EmailURLTokenizer, ICUTokenizer; add CJK types git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1068979 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/CHANGES.txt | 7 + .../analysis/standard/ASCIITLD.jflex-macro | 15 +- .../standard/ClassicTokenizerImpl.java | 67 +- .../standard/SUPPLEMENTARY.jflex-macro | 2 +- .../analysis/standard/StandardTokenizer.java | 6 +- .../standard/StandardTokenizerImpl.java | 767 +-- .../standard/StandardTokenizerImpl.jflex | 12 + .../standard/UAX29URLEmailTokenizer.java | 5627 +++++++++-------- .../standard/UAX29URLEmailTokenizer.jflex | 24 +- .../wikipedia/WikipediaTokenizerImpl.java | 14 +- .../analysis/core/TestStandardAnalyzer.java | 12 + .../core/TestUAX29URLEmailTokenizer.java | 12 + .../analysis/icu/src/data/uax29/Default.rbbi | 127 + .../DefaultICUTokenizerConfig.java | 22 +- .../analysis/icu/segmentation/Default.brk | Bin 0 -> 27088 bytes .../icu/segmentation/TestICUTokenizer.java | 15 +- 16 files changed, 3625 insertions(+), 3104 deletions(-) create mode 100644 modules/analysis/icu/src/data/uax29/Default.rbbi create mode 100644 modules/analysis/icu/src/resources/org/apache/lucene/analysis/icu/segmentation/Default.brk diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index b41106f77b5..402bb3dcc17 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -849,6 +849,13 @@ New features IndexReader, to allow apps that maintain external per-segment caches to evict entries when a segment is finished. (Shay Banon, Yonik Seeley, Mike McCandless) + +* LUCENE-2911: The new StandardTokenizer, UAX29URLEmailTokenizer, and + the ICUTokenizer in contrib now all tag types with a consistent set + of token types (defined in StandardTokenizer). Tokens in the major + CJK types are explicitly marked to allow for custom downstream handling: + , , , and . + (Robert Muir, Steven Rowe) Optimizations diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ASCIITLD.jflex-macro b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ASCIITLD.jflex-macro index 0557740e6d0..ed8a0ab2713 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ASCIITLD.jflex-macro +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ASCIITLD.jflex-macro @@ -15,8 +15,8 @@ */ // Generated from IANA Root Zone Database -// file version from Wednesday, January 5, 2011 12:34:09 PM UTC -// generated on Thursday, January 6, 2011 5:09:41 AM UTC +// file version from Wednesday, February 9, 2011 12:34:10 PM UTC +// generated on Wednesday, February 9, 2011 4:45:18 PM UTC // by org.apache.lucene.analysis.standard.GenerateJflexTLDMacros ASCIITLD = "." ( @@ -285,13 +285,19 @@ ASCIITLD = "." ( | [wW][sS] | [xX][nN]--0[zZ][wW][mM]56[dD] | [xX][nN]--11[bB]5[bB][sS]3[aA]9[aA][jJ]6[gG] + | [xX][nN]--3[eE]0[bB]707[eE] + | [xX][nN]--45[bB][rR][jJ]9[cC] | [xX][nN]--80[aA][kK][hH][bB][yY][kK][nN][jJ]4[fF] | [xX][nN]--9[tT]4[bB]11[yY][iI]5[aA] + | [xX][nN]--[cC][lL][cC][hH][cC]0[eE][aA]0[bB]2[gG]2[aA]9[gG][cC][dD] | [xX][nN]--[dD][eE][bB][aA]0[aA][dD] | [xX][nN]--[fF][iI][qQ][sS]8[sS] | [xX][nN]--[fF][iI][qQ][zZ]9[sS] + | [xX][nN]--[fF][pP][cC][rR][jJ]9[cC]3[dD] | [xX][nN]--[fF][zZ][cC]2[cC]9[eE]2[cC] | [xX][nN]--[gG]6[wW]251[dD] + | [xX][nN]--[gG][eE][cC][rR][jJ]9[cC] + | [xX][nN]--[hH]2[bB][rR][jJ]9[cC] | [xX][nN]--[hH][gG][bB][kK]6[aA][jJ]7[fF]53[bB][bB][aA] | [xX][nN]--[hH][lL][cC][jJ]6[aA][yY][aA]9[eE][sS][cC]7[aA] | [xX][nN]--[jJ]6[wW]193[gG] @@ -301,13 +307,18 @@ ASCIITLD = "." ( | [xX][nN]--[kK][pP][rR][yY]57[dD] | [xX][nN]--[mM][gG][bB][aA][aA][mM]7[aA]8[hH] | [xX][nN]--[mM][gG][bB][aA][yY][hH]7[gG][pP][aA] + | [xX][nN]--[mM][gG][bB][bB][hH]1[aA]71[eE] | [xX][nN]--[mM][gG][bB][eE][rR][pP]4[aA]5[dD]4[aA][rR] | [xX][nN]--[oO]3[cC][wW]4[hH] + | [xX][nN]--[oO][gG][bB][pP][fF]8[fF][lL] | [xX][nN]--[pP]1[aA][iI] | [xX][nN]--[pP][gG][bB][sS]0[dD][hH] + | [xX][nN]--[sS]9[bB][rR][jJ]9[cC] | [xX][nN]--[wW][gG][bB][hH]1[cC] | [xX][nN]--[wW][gG][bB][lL]6[aA] | [xX][nN]--[xX][kK][cC]2[aA][lL]3[hH][yY][eE]2[aA] + | [xX][nN]--[xX][kK][cC]2[dD][lL]3[aA]5[eE][eE]0[hH] + | [xX][nN]--[yY][fF][rR][oO]4[iI]67[oO] | [xX][nN]--[yY][gG][bB][iI]2[aA][mM][mM][xX] | [xX][nN]--[zZ][cC][kK][zZ][aA][hH] | [yY][eE] diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.java index 8116d197179..1b9bf9f2674 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.java @@ -1,4 +1,4 @@ -/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 12/4/10 7:24 PM */ +/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 2/9/11 11:45 AM */ package org.apache.lucene.analysis.standard; @@ -26,14 +26,15 @@ WARNING: if you change ClassicTokenizerImpl.jflex and need to regenerate */ +import java.io.Reader; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; /** * This class is a scanner generated by * JFlex 1.5.0-SNAPSHOT - * on 12/4/10 7:24 PM from the specification file - * C:/cygwin/home/us/svn/lucene/dev/trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.jflex + * on 2/9/11 11:45 AM from the specification file + * C:/Users/rmuir/workspace/lucene-2911/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.jflex */ class ClassicTokenizerImpl implements StandardTokenizerInterface { @@ -681,45 +682,45 @@ public final void getText(CharTermAttribute t) { zzMarkedPos = zzMarkedPosL; switch (zzAction < 0 ? zzAction : ZZ_ACTION[zzAction]) { - case 5: - { return NUM; - } - case 11: break; - case 9: - { return ACRONYM; - } - case 12: break; - case 7: - { return COMPANY; - } - case 13: break; case 10: { return EMAIL; } - case 14: break; - case 1: - { /* ignore */ - } - case 15: break; - case 6: - { return APOSTROPHE; - } - case 16: break; - case 3: - { return CJ; - } - case 17: break; - case 8: - { return ACRONYM_DEP; - } - case 18: break; + case 11: break; case 2: { return ALPHANUM; } - case 19: break; + case 12: break; case 4: { return HOST; } + case 13: break; + case 1: + { /* ignore */ + } + case 14: break; + case 8: + { return ACRONYM_DEP; + } + case 15: break; + case 5: + { return NUM; + } + case 16: break; + case 9: + { return ACRONYM; + } + case 17: break; + case 7: + { return COMPANY; + } + case 18: break; + case 6: + { return APOSTROPHE; + } + case 19: break; + case 3: + { return CJ; + } case 20: break; default: if (zzInput == YYEOF && zzStartRead == zzCurrentPos) { diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/SUPPLEMENTARY.jflex-macro b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/SUPPLEMENTARY.jflex-macro index fecf9777f9a..c505bf46c15 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/SUPPLEMENTARY.jflex-macro +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/SUPPLEMENTARY.jflex-macro @@ -14,7 +14,7 @@ * limitations under the License. */ -// Generated using ICU4J 4.6.0.0 on Thursday, January 6, 2011 7:02:52 PM UTC +// Generated using ICU4J 4.6.0.0 on Wednesday, February 9, 2011 4:45:11 PM UTC // by org.apache.lucene.analysis.icu.GenerateJFlexSupplementaryMacros diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java index 5b2b84a59fa..4051cdfb254 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java @@ -78,6 +78,8 @@ public final class StandardTokenizer extends Tokenizer { public static final int SOUTHEAST_ASIAN = 9; public static final int IDEOGRAPHIC = 10; public static final int HIRAGANA = 11; + public static final int KATAKANA = 12; + public static final int HANGUL = 13; /** String token types that correspond to token type int constants */ public static final String [] TOKEN_TYPES = new String [] { @@ -92,7 +94,9 @@ public final class StandardTokenizer extends Tokenizer { "", "", "", - "" + "", + "", + "" }; private int maxTokenLength = StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH; diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java index 83c17d0b61e..34e325cd122 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java @@ -1,4 +1,4 @@ -/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 1/6/11 12:09 AM */ +/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 2/9/11 11:45 AM */ package org.apache.lucene.analysis.standard; @@ -116,84 +116,85 @@ public final class StandardTokenizerImpl implements StandardTokenizerInterface { "\1\133\71\0\53\142\24\143\1\142\12\134\6\0\6\142\4\143\4\142"+ "\3\143\1\142\3\143\2\142\7\143\3\142\4\143\15\142\14\143\1\142"+ "\1\143\12\134\4\143\2\142\46\132\12\0\53\132\1\0\1\132\3\0"+ - "\u0149\132\1\0\4\132\2\0\7\132\1\0\1\132\1\0\4\132\2\0"+ - "\51\132\1\0\4\132\2\0\41\132\1\0\4\132\2\0\7\132\1\0"+ - "\1\132\1\0\4\132\2\0\17\132\1\0\71\132\1\0\4\132\2\0"+ - "\103\132\2\0\3\133\40\0\20\132\20\0\125\132\14\0\u026c\132\2\0"+ - "\21\132\1\0\32\132\5\0\113\132\3\0\3\132\17\0\15\132\1\0"+ - "\4\132\3\133\13\0\22\132\3\133\13\0\22\132\2\133\14\0\15\132"+ - "\1\0\3\132\1\0\2\133\14\0\64\142\2\143\36\143\3\0\1\142"+ - "\4\0\1\142\1\143\2\0\12\134\41\0\3\133\2\0\12\134\6\0"+ - "\130\132\10\0\51\132\1\133\1\132\5\0\106\132\12\0\35\132\3\0"+ - "\14\133\4\0\14\133\12\0\12\134\36\142\2\0\5\142\13\0\54\142"+ - "\4\0\21\143\7\142\2\143\6\0\12\134\1\142\3\0\2\142\40\0"+ - "\27\132\5\133\4\0\65\142\12\143\1\0\35\143\2\0\1\133\12\134"+ - "\6\0\12\134\6\0\16\142\122\0\5\133\57\132\21\133\7\132\4\0"+ - "\12\134\21\0\11\133\14\0\3\133\36\132\12\133\3\0\2\132\12\134"+ - "\6\0\46\132\16\133\14\0\44\132\24\133\10\0\12\134\3\0\3\132"+ - "\12\134\44\132\122\0\3\133\1\0\25\133\4\132\1\133\4\132\1\133"+ - "\15\0\300\132\47\133\25\0\4\133\u0116\132\2\0\6\132\2\0\46\132"+ - "\2\0\6\132\2\0\10\132\1\0\1\132\1\0\1\132\1\0\1\132"+ - "\1\0\37\132\2\0\65\132\1\0\7\132\1\0\1\132\3\0\3\132"+ - "\1\0\7\132\3\0\4\132\2\0\6\132\4\0\15\132\5\0\3\132"+ - "\1\0\7\132\17\0\2\133\2\133\10\0\2\140\12\0\1\140\2\0"+ - "\1\136\2\0\5\133\20\0\2\141\3\0\1\137\17\0\1\141\13\0"+ - "\5\133\5\0\6\133\1\0\1\132\15\0\1\132\20\0\15\132\63\0"+ - "\41\133\21\0\1\132\4\0\1\132\2\0\12\132\1\0\1\132\3\0"+ - "\5\132\6\0\1\132\1\0\1\132\1\0\1\132\1\0\4\132\1\0"+ - "\13\132\2\0\4\132\5\0\5\132\4\0\1\132\21\0\51\132\u032d\0"+ - "\64\132\u0716\0\57\132\1\0\57\132\1\0\205\132\6\0\4\132\3\133"+ - "\16\0\46\132\12\0\66\132\11\0\1\132\17\0\1\133\27\132\11\0"+ - "\7\132\1\0\7\132\1\0\7\132\1\0\7\132\1\0\7\132\1\0"+ - "\7\132\1\0\7\132\1\0\7\132\1\0\40\133\57\0\1\132\120\0"+ - "\32\144\1\0\131\144\14\0\326\144\57\0\1\132\1\0\1\144\31\0"+ - "\11\144\6\133\1\0\5\135\2\0\3\144\1\132\1\132\4\0\126\145"+ - "\2\0\2\133\2\135\3\145\133\135\1\0\4\135\5\0\51\132\3\0"+ - "\136\132\21\0\33\132\65\0\20\135\320\0\57\135\1\0\130\135\250\0"+ - "\u19b6\144\112\0\u51cc\144\64\0\u048d\132\103\0\56\132\2\0\u010d\132\3\0"+ - "\20\132\12\134\2\132\24\0\57\132\4\133\11\0\2\133\1\0\31\132"+ - "\10\0\120\132\2\133\45\0\11\132\2\0\147\132\2\0\4\132\1\0"+ - "\2\132\16\0\12\132\120\0\10\132\1\133\3\132\1\133\4\132\1\133"+ - "\27\132\5\133\30\0\64\132\14\0\2\133\62\132\21\133\13\0\12\134"+ - "\6\0\22\133\6\132\3\0\1\132\4\0\12\134\34\132\10\133\2\0"+ - "\27\132\15\133\14\0\35\132\3\0\4\133\57\132\16\133\16\0\1\132"+ - "\12\134\46\0\51\132\16\133\11\0\3\132\1\133\10\132\2\133\2\0"+ - "\12\134\6\0\33\142\1\143\4\0\60\142\1\143\1\142\3\143\2\142"+ - "\2\143\5\142\2\143\1\142\1\143\1\142\30\0\5\142\41\0\6\132"+ - "\2\0\6\132\2\0\6\132\11\0\7\132\1\0\7\132\221\0\43\132"+ - "\10\133\1\0\2\133\2\0\12\134\6\0\u2ba4\132\14\0\27\132\4\0"+ - "\61\132\4\0\1\31\1\25\1\46\1\43\1\13\3\0\1\7\1\5"+ - "\2\0\1\3\1\1\14\0\1\11\21\0\1\112\7\0\1\65\1\17"+ - "\6\0\1\130\3\0\1\120\1\120\1\120\1\120\1\120\1\120\1\120"+ + "\u0100\146\111\132\1\0\4\132\2\0\7\132\1\0\1\132\1\0\4\132"+ + "\2\0\51\132\1\0\4\132\2\0\41\132\1\0\4\132\2\0\7\132"+ + "\1\0\1\132\1\0\4\132\2\0\17\132\1\0\71\132\1\0\4\132"+ + "\2\0\103\132\2\0\3\133\40\0\20\132\20\0\125\132\14\0\u026c\132"+ + "\2\0\21\132\1\0\32\132\5\0\113\132\3\0\3\132\17\0\15\132"+ + "\1\0\4\132\3\133\13\0\22\132\3\133\13\0\22\132\2\133\14\0"+ + "\15\132\1\0\3\132\1\0\2\133\14\0\64\142\2\143\36\143\3\0"+ + "\1\142\4\0\1\142\1\143\2\0\12\134\41\0\3\133\2\0\12\134"+ + "\6\0\130\132\10\0\51\132\1\133\1\132\5\0\106\132\12\0\35\132"+ + "\3\0\14\133\4\0\14\133\12\0\12\134\36\142\2\0\5\142\13\0"+ + "\54\142\4\0\21\143\7\142\2\143\6\0\12\134\1\142\3\0\2\142"+ + "\40\0\27\132\5\133\4\0\65\142\12\143\1\0\35\143\2\0\1\133"+ + "\12\134\6\0\12\134\6\0\16\142\122\0\5\133\57\132\21\133\7\132"+ + "\4\0\12\134\21\0\11\133\14\0\3\133\36\132\12\133\3\0\2\132"+ + "\12\134\6\0\46\132\16\133\14\0\44\132\24\133\10\0\12\134\3\0"+ + "\3\132\12\134\44\132\122\0\3\133\1\0\25\133\4\132\1\133\4\132"+ + "\1\133\15\0\300\132\47\133\25\0\4\133\u0116\132\2\0\6\132\2\0"+ + "\46\132\2\0\6\132\2\0\10\132\1\0\1\132\1\0\1\132\1\0"+ + "\1\132\1\0\37\132\2\0\65\132\1\0\7\132\1\0\1\132\3\0"+ + "\3\132\1\0\7\132\3\0\4\132\2\0\6\132\4\0\15\132\5\0"+ + "\3\132\1\0\7\132\17\0\2\133\2\133\10\0\2\140\12\0\1\140"+ + "\2\0\1\136\2\0\5\133\20\0\2\141\3\0\1\137\17\0\1\141"+ + "\13\0\5\133\5\0\6\133\1\0\1\132\15\0\1\132\20\0\15\132"+ + "\63\0\41\133\21\0\1\132\4\0\1\132\2\0\12\132\1\0\1\132"+ + "\3\0\5\132\6\0\1\132\1\0\1\132\1\0\1\132\1\0\4\132"+ + "\1\0\13\132\2\0\4\132\5\0\5\132\4\0\1\132\21\0\51\132"+ + "\u032d\0\64\132\u0716\0\57\132\1\0\57\132\1\0\205\132\6\0\4\132"+ + "\3\133\16\0\46\132\12\0\66\132\11\0\1\132\17\0\1\133\27\132"+ + "\11\0\7\132\1\0\7\132\1\0\7\132\1\0\7\132\1\0\7\132"+ + "\1\0\7\132\1\0\7\132\1\0\7\132\1\0\40\133\57\0\1\132"+ + "\120\0\32\144\1\0\131\144\14\0\326\144\57\0\1\132\1\0\1\144"+ + "\31\0\11\144\4\133\2\133\1\0\5\135\2\0\3\144\1\132\1\132"+ + "\4\0\126\145\2\0\2\133\2\135\3\145\133\135\1\0\4\135\5\0"+ + "\51\132\3\0\136\146\21\0\33\132\65\0\20\135\37\0\101\0\37\0"+ + "\121\0\57\135\1\0\130\135\250\0\u19b6\144\112\0\u51cc\144\64\0\u048d\132"+ + "\103\0\56\132\2\0\u010d\132\3\0\20\132\12\134\2\132\24\0\57\132"+ + "\4\133\11\0\2\133\1\0\31\132\10\0\120\132\2\133\45\0\11\132"+ + "\2\0\147\132\2\0\4\132\1\0\2\132\16\0\12\132\120\0\10\132"+ + "\1\133\3\132\1\133\4\132\1\133\27\132\5\133\30\0\64\132\14\0"+ + "\2\133\62\132\21\133\13\0\12\134\6\0\22\133\6\132\3\0\1\132"+ + "\4\0\12\134\34\132\10\133\2\0\27\132\15\133\14\0\35\146\3\0"+ + "\4\133\57\132\16\133\16\0\1\132\12\134\46\0\51\132\16\133\11\0"+ + "\3\132\1\133\10\132\2\133\2\0\12\134\6\0\33\142\1\143\4\0"+ + "\60\142\1\143\1\142\3\143\2\142\2\143\5\142\2\143\1\142\1\143"+ + "\1\142\30\0\5\142\41\0\6\132\2\0\6\132\2\0\6\132\11\0"+ + "\7\132\1\0\7\132\221\0\43\132\10\133\1\0\2\133\2\0\12\134"+ + "\6\0\u2ba4\146\14\0\27\146\4\0\61\146\4\0\1\31\1\25\1\46"+ + "\1\43\1\13\3\0\1\7\1\5\2\0\1\3\1\1\14\0\1\11"+ + "\21\0\1\112\7\0\1\65\1\17\6\0\1\130\3\0\1\120\1\120"+ "\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120"+ "\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120"+ "\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120"+ - "\1\120\1\120\1\120\1\120\1\121\1\120\1\120\1\120\1\125\1\123"+ - "\17\0\1\114\u02c1\0\1\70\277\0\1\113\1\71\1\2\3\124\2\35"+ - "\1\124\1\35\2\124\1\14\21\124\2\60\7\73\1\72\7\73\7\52"+ - "\1\15\1\52\1\75\2\45\1\44\1\75\1\45\1\44\10\75\2\63"+ - "\5\61\2\54\5\61\1\6\10\37\5\21\3\27\12\106\20\27\3\42"+ - "\32\30\1\26\2\24\2\110\1\111\2\110\2\111\2\110\1\111\3\24"+ - "\1\16\2\24\12\64\1\74\1\41\1\34\1\64\6\41\1\34\66\41"+ - "\5\115\6\103\1\51\4\103\2\51\10\103\1\51\7\100\1\12\2\100"+ - "\32\103\1\12\4\100\1\12\5\102\1\101\1\102\3\101\7\102\1\101"+ - "\23\102\5\67\3\102\6\67\2\67\6\66\10\66\2\100\7\66\36\100"+ - "\4\66\102\100\15\115\1\77\2\115\1\131\3\117\1\115\2\117\5\115"+ - "\4\117\4\116\1\115\3\116\1\115\5\116\26\56\4\23\1\105\2\104"+ - "\4\122\1\104\2\122\3\76\33\122\35\55\3\122\35\126\3\122\6\126"+ - "\2\33\31\126\1\33\17\126\6\122\4\22\1\10\37\22\1\10\4\22"+ - "\25\62\1\127\11\62\21\55\5\62\1\57\12\40\13\62\4\55\1\50"+ - "\6\55\12\122\17\55\1\47\3\53\15\20\11\36\1\32\24\36\2\20"+ - "\11\36\1\32\31\36\1\32\4\20\4\36\2\32\2\107\1\4\5\107"+ - "\52\4\u1900\0\u012e\144\2\0\76\144\2\0\152\144\46\0\7\132\14\0"+ - "\5\132\5\0\1\132\1\133\12\132\1\0\15\132\1\0\5\132\1\0"+ - "\1\132\1\0\2\132\1\0\2\132\1\0\154\132\41\0\u016b\132\22\0"+ - "\100\132\2\0\66\132\50\0\14\132\4\0\20\133\1\137\2\0\1\136"+ - "\1\137\13\0\7\133\14\0\2\141\30\0\3\141\1\137\1\0\1\140"+ - "\1\0\1\137\1\136\32\0\5\132\1\0\207\132\2\0\1\133\7\0"+ - "\1\140\4\0\1\137\1\0\1\140\1\0\12\134\1\136\1\137\5\0"+ - "\32\132\4\0\1\141\1\0\32\132\13\0\70\135\2\133\37\132\3\0"+ - "\6\132\2\0\6\132\2\0\6\132\2\0\3\132\34\0\3\133\4\0"; + "\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\121"+ + "\1\120\1\120\1\120\1\125\1\123\17\0\1\114\u02c1\0\1\70\277\0"+ + "\1\113\1\71\1\2\3\124\2\35\1\124\1\35\2\124\1\14\21\124"+ + "\2\60\7\73\1\72\7\73\7\52\1\15\1\52\1\75\2\45\1\44"+ + "\1\75\1\45\1\44\10\75\2\63\5\61\2\54\5\61\1\6\10\37"+ + "\5\21\3\27\12\106\20\27\3\42\32\30\1\26\2\24\2\110\1\111"+ + "\2\110\2\111\2\110\1\111\3\24\1\16\2\24\12\64\1\74\1\41"+ + "\1\34\1\64\6\41\1\34\66\41\5\115\6\103\1\51\4\103\2\51"+ + "\10\103\1\51\7\100\1\12\2\100\32\103\1\12\4\100\1\12\5\102"+ + "\1\101\1\102\3\101\7\102\1\101\23\102\5\67\3\102\6\67\2\67"+ + "\6\66\10\66\2\100\7\66\36\100\4\66\102\100\15\115\1\77\2\115"+ + "\1\131\3\117\1\115\2\117\5\115\4\117\4\116\1\115\3\116\1\115"+ + "\5\116\26\56\4\23\1\105\2\104\4\122\1\104\2\122\3\76\33\122"+ + "\35\55\3\122\35\126\3\122\6\126\2\33\31\126\1\33\17\126\6\122"+ + "\4\22\1\10\37\22\1\10\4\22\25\62\1\127\11\62\21\55\5\62"+ + "\1\57\12\40\13\62\4\55\1\50\6\55\12\122\17\55\1\47\3\53"+ + "\15\20\11\36\1\32\24\36\2\20\11\36\1\32\31\36\1\32\4\20"+ + "\4\36\2\32\2\107\1\4\5\107\52\4\u1900\0\u012e\144\2\0\76\144"+ + "\2\0\152\144\46\0\7\132\14\0\5\132\5\0\1\132\1\133\12\132"+ + "\1\0\15\132\1\0\5\132\1\0\1\132\1\0\2\132\1\0\2\132"+ + "\1\0\154\132\41\0\u016b\132\22\0\100\132\2\0\66\132\50\0\14\132"+ + "\4\0\20\133\1\137\2\0\1\136\1\137\13\0\7\133\14\0\2\141"+ + "\30\0\3\141\1\137\1\0\1\140\1\0\1\137\1\136\32\0\5\132"+ + "\1\0\207\132\2\0\1\133\7\0\1\140\4\0\1\137\1\0\1\140"+ + "\1\0\12\134\1\136\1\137\5\0\32\132\4\0\1\141\1\0\32\132"+ + "\13\0\70\135\2\133\37\146\3\0\6\146\2\0\6\146\2\0\6\146"+ + "\2\0\3\146\34\0\3\133\4\0"; /** * Translates characters to character classes @@ -206,11 +207,12 @@ public final class StandardTokenizerImpl implements StandardTokenizerInterface { private static final int [] ZZ_ACTION = zzUnpackAction(); private static final String ZZ_ACTION_PACKED_0 = - "\1\0\23\1\1\2\1\3\1\2\1\1\1\4\1\5"+ - "\1\6\15\0\1\2\1\0\1\2\10\0\1\3\61\0"; + "\1\0\23\1\1\2\1\3\1\4\1\1\1\5\1\6"+ + "\1\7\1\10\15\0\1\2\1\0\1\2\10\0\1\3"+ + "\15\0\1\2\57\0"; private static int [] zzUnpackAction() { - int [] result = new int[101]; + int [] result = new int[114]; int offset = 0; offset = zzUnpackAction(ZZ_ACTION_PACKED_0, offset, result); return result; @@ -235,22 +237,24 @@ public final class StandardTokenizerImpl implements StandardTokenizerInterface { private static final int [] ZZ_ROWMAP = zzUnpackRowMap(); private static final String ZZ_ROWMAP_PACKED_0 = - "\0\0\0\146\0\314\0\u0132\0\u0198\0\u01fe\0\u0264\0\u02ca"+ - "\0\u0330\0\u0396\0\u03fc\0\u0462\0\u04c8\0\u052e\0\u0594\0\u05fa"+ - "\0\u0660\0\u06c6\0\u072c\0\u0792\0\u07f8\0\u085e\0\u08c4\0\u092a"+ - "\0\u0990\0\146\0\146\0\314\0\u0132\0\u0198\0\u01fe\0\u0264"+ - "\0\u09f6\0\u0a5c\0\u0ac2\0\u0b28\0\u0462\0\u0b8e\0\u0bf4\0\u0c5a"+ - "\0\u0cc0\0\u0d26\0\u0d8c\0\u0df2\0\u0330\0\u0396\0\u0e58\0\u0ebe"+ - "\0\u0f24\0\u0f8a\0\u0ff0\0\u1056\0\u10bc\0\u1122\0\u1188\0\u11ee"+ - "\0\u1254\0\u12ba\0\u1320\0\u1386\0\u13ec\0\u1452\0\u14b8\0\u092a"+ - "\0\u151e\0\u1584\0\u15ea\0\u1650\0\u16b6\0\u171c\0\u1782\0\u17e8"+ - "\0\u184e\0\u18b4\0\u191a\0\u1980\0\u19e6\0\u1a4c\0\u1ab2\0\u1b18"+ - "\0\u1b7e\0\u1be4\0\u1c4a\0\u1cb0\0\u1d16\0\u1d7c\0\u1de2\0\u1e48"+ - "\0\u1eae\0\u1f14\0\u1f7a\0\u1fe0\0\u2046\0\u20ac\0\u2112\0\u2178"+ - "\0\u21de\0\u2244\0\u22aa\0\u2310\0\u2376"; + "\0\0\0\147\0\316\0\u0135\0\u019c\0\u0203\0\u026a\0\u02d1"+ + "\0\u0338\0\u039f\0\u0406\0\u046d\0\u04d4\0\u053b\0\u05a2\0\u0609"+ + "\0\u0670\0\u06d7\0\u073e\0\u07a5\0\u080c\0\u0873\0\u08da\0\u0941"+ + "\0\u09a8\0\147\0\147\0\u0a0f\0\316\0\u0135\0\u019c\0\u0203"+ + "\0\u026a\0\u0a76\0\u0add\0\u0b44\0\u0bab\0\u046d\0\u0c12\0\u0c79"+ + "\0\u0ce0\0\u0d47\0\u0dae\0\u0e15\0\u0e7c\0\u0338\0\u039f\0\u0ee3"+ + "\0\u0f4a\0\u0fb1\0\u1018\0\u107f\0\u10e6\0\u114d\0\u11b4\0\u121b"+ + "\0\u1282\0\u12e9\0\u1350\0\u13b7\0\u141e\0\u1485\0\u14ec\0\u1553"+ + "\0\u15ba\0\u0941\0\u1621\0\u1688\0\u16ef\0\u1756\0\u17bd\0\u1824"+ + "\0\u188b\0\u18f2\0\u1959\0\u19c0\0\u1a27\0\u1a8e\0\u1af5\0\u1b5c"+ + "\0\u1bc3\0\u1c2a\0\u1c91\0\u1cf8\0\u1d5f\0\u1dc6\0\u1e2d\0\u1e94"+ + "\0\u1efb\0\u1f62\0\u1fc9\0\u2030\0\u2097\0\u20fe\0\u2165\0\u21cc"+ + "\0\u2233\0\u229a\0\u2301\0\u2368\0\u23cf\0\u2436\0\u249d\0\u2504"+ + "\0\u256b\0\u25d2\0\u2639\0\u26a0\0\u2707\0\u276e\0\u27d5\0\u283c"+ + "\0\u28a3\0\u290a"; private static int [] zzUnpackRowMap() { - int [] result = new int[101]; + int [] result = new int[114]; int offset = 0; offset = zzUnpackRowMap(ZZ_ROWMAP_PACKED_0, offset, result); return result; @@ -278,275 +282,308 @@ public final class StandardTokenizerImpl implements StandardTokenizerInterface { "\3\2\1\13\11\2\1\14\2\2\1\15\43\2\1\16"+ "\1\2\1\17\3\2\1\20\1\21\1\2\1\22\1\2"+ "\1\23\2\2\1\24\1\2\1\25\1\2\1\26\1\27"+ - "\3\2\1\30\2\31\1\32\1\33\150\0\1\25\11\0"+ - "\1\25\20\0\1\25\22\0\1\25\10\0\3\25\17\0"+ - "\1\25\10\0\1\25\23\0\1\25\1\0\1\25\1\0"+ - "\1\25\1\0\1\25\1\0\1\25\1\0\3\25\1\0"+ - "\5\25\1\0\3\25\1\0\11\25\1\0\2\25\1\0"+ - "\16\25\1\0\2\25\1\0\21\25\1\0\1\25\1\0"+ - "\3\25\2\0\1\25\1\0\1\25\1\0\2\25\1\0"+ - "\1\25\16\0\1\25\3\0\1\25\5\0\2\25\3\0"+ - "\1\25\13\0\1\25\1\0\1\25\4\0\2\25\4\0"+ - "\1\25\1\0\1\25\3\0\2\25\1\0\1\25\5\0"+ - "\3\25\1\0\1\25\15\0\1\25\10\0\1\25\23\0"+ - "\1\25\3\0\1\25\1\0\1\25\1\0\1\25\1\0"+ - "\3\25\2\0\4\25\1\0\3\25\2\0\3\25\1\0"+ - "\4\25\1\0\2\25\2\0\3\25\1\0\11\25\1\0"+ - "\2\25\1\0\16\25\1\0\2\25\1\0\1\25\1\0"+ - "\3\25\2\0\1\25\1\0\1\25\1\0\2\25\1\0"+ - "\1\25\16\0\1\25\3\0\1\25\3\0\1\25\1\0"+ - "\3\25\2\0\1\25\1\0\2\25\1\0\3\25\3\0"+ - "\2\25\1\0\1\25\1\0\2\25\1\0\2\25\3\0"+ - "\2\25\1\0\1\25\1\0\1\25\1\0\2\25\1\0"+ - "\2\25\1\0\2\25\1\0\5\25\1\0\5\25\1\0"+ - "\2\25\1\0\2\25\1\0\1\25\1\0\3\25\4\0"+ - "\1\25\4\0\1\25\30\0\3\25\5\0\1\25\1\0"+ - "\1\25\1\0\1\25\4\0\1\25\14\0\1\25\5\0"+ - "\1\25\11\0\2\25\12\0\1\26\1\0\2\25\12\0"+ - "\1\25\23\0\1\25\1\0\1\26\7\0\2\25\2\0"+ + "\3\2\1\30\2\31\1\32\1\33\1\34\151\0\1\25"+ + "\11\0\1\25\20\0\1\25\22\0\1\25\10\0\3\25"+ + "\17\0\1\25\10\0\1\25\24\0\1\25\1\0\1\25"+ + "\1\0\1\25\1\0\1\25\1\0\1\25\1\0\3\25"+ + "\1\0\5\25\1\0\3\25\1\0\11\25\1\0\2\25"+ + "\1\0\16\25\1\0\2\25\1\0\21\25\1\0\1\25"+ + "\1\0\3\25\2\0\1\25\1\0\1\25\1\0\2\25"+ + "\1\0\1\25\17\0\1\25\3\0\1\25\5\0\2\25"+ + "\3\0\1\25\13\0\1\25\1\0\1\25\4\0\2\25"+ + "\4\0\1\25\1\0\1\25\3\0\2\25\1\0\1\25"+ + "\5\0\3\25\1\0\1\25\15\0\1\25\10\0\1\25"+ + "\24\0\1\25\3\0\1\25\1\0\1\25\1\0\1\25"+ + "\1\0\3\25\2\0\4\25\1\0\3\25\2\0\3\25"+ + "\1\0\4\25\1\0\2\25\2\0\3\25\1\0\11\25"+ + "\1\0\2\25\1\0\16\25\1\0\2\25\1\0\1\25"+ + "\1\0\3\25\2\0\1\25\1\0\1\25\1\0\2\25"+ + "\1\0\1\25\17\0\1\25\3\0\1\25\3\0\1\25"+ + "\1\0\3\25\2\0\1\25\1\0\2\25\1\0\3\25"+ + "\3\0\2\25\1\0\1\25\1\0\2\25\1\0\2\25"+ + "\3\0\2\25\1\0\1\25\1\0\1\25\1\0\2\25"+ + "\1\0\2\25\1\0\2\25\1\0\5\25\1\0\5\25"+ + "\1\0\2\25\1\0\2\25\1\0\1\25\1\0\3\25"+ + "\4\0\1\25\4\0\1\25\31\0\3\25\5\0\1\25"+ + "\1\0\1\25\1\0\1\25\4\0\1\25\14\0\1\25"+ + "\5\0\1\25\11\0\2\25\12\0\1\26\1\0\2\25"+ + "\12\0\1\25\24\0\1\25\1\0\1\26\7\0\2\25"+ + "\2\0\5\25\2\0\2\25\4\0\6\25\1\0\2\25"+ + "\4\0\5\25\1\0\5\25\1\0\2\25\1\0\3\25"+ + "\1\0\4\25\1\0\5\25\1\26\1\0\1\25\1\0"+ + "\1\25\1\0\3\25\2\0\1\25\1\0\1\25\1\0"+ + "\1\25\2\0\1\25\17\0\1\25\3\0\1\25\5\0"+ + "\2\25\3\0\1\25\4\0\3\25\4\0\1\25\1\0"+ + "\1\25\2\0\1\25\1\0\2\25\4\0\1\25\1\0"+ + "\1\25\3\0\2\25\1\0\1\25\5\0\3\25\1\0"+ + "\1\25\10\0\1\25\1\0\2\26\1\0\1\25\10\0"+ + "\1\25\24\0\1\25\3\0\1\25\6\0\2\25\5\0"+ + "\1\25\1\0\1\25\1\0\1\25\1\0\11\25\2\0"+ + "\1\25\4\0\1\25\4\0\6\25\2\0\1\25\1\0"+ + "\1\25\1\0\3\25\3\0\2\25\4\0\3\25\1\0"+ + "\1\25\10\0\1\25\1\0\2\25\21\0\1\25\11\0"+ + "\2\25\17\0\1\25\6\0\2\25\4\0\1\25\5\0"+ + "\1\25\2\0\1\25\5\0\3\25\1\0\1\25\15\0"+ + "\1\25\10\0\1\25\24\0\1\25\3\0\1\25\5\0"+ + "\1\25\32\0\15\25\5\0\3\25\1\0\1\25\5\0"+ + "\1\25\7\0\1\25\2\0\1\25\5\0\1\25\2\0"+ + "\1\25\1\0\1\25\106\0\1\33\21\0\1\27\35\0"+ + "\1\32\3\0\1\32\3\0\1\32\1\0\3\32\2\0"+ + "\1\32\2\0\1\32\1\0\3\32\3\0\2\32\1\0"+ + "\1\32\1\0\2\32\1\0\2\32\3\0\2\32\1\0"+ + "\1\32\3\0\2\32\1\0\2\32\1\0\2\32\1\0"+ + "\5\32\1\0\5\32\2\0\1\32\1\0\2\32\1\0"+ + "\1\32\1\0\3\32\4\0\1\32\4\0\1\32\17\0"+ + "\1\32\1\0\1\32\1\0\1\32\1\0\1\32\1\0"+ + "\1\32\1\0\3\32\1\0\5\32\1\0\3\32\1\0"+ + "\11\32\1\0\2\32\1\0\16\32\1\0\2\32\1\0"+ + "\21\32\1\0\1\32\1\0\3\32\2\0\1\32\1\0"+ + "\1\32\1\0\2\32\1\0\1\32\17\0\1\32\1\0"+ + "\1\32\1\0\1\32\3\0\1\32\1\0\3\32\1\0"+ + "\2\32\1\0\2\32\1\0\3\32\1\0\11\32\1\0"+ + "\2\32\1\0\16\32\1\0\2\32\1\0\21\32\1\0"+ + "\1\32\1\0\3\32\2\0\1\32\1\0\1\32\1\0"+ + "\2\32\1\0\1\32\17\0\1\32\11\0\1\32\20\0"+ + "\1\32\33\0\1\32\21\0\1\32\10\0\1\32\24\0"+ + "\1\32\1\0\1\32\1\0\1\32\1\0\1\32\1\0"+ + "\1\32\1\0\3\32\1\0\5\32\1\0\3\32\1\0"+ + "\6\32\1\0\2\32\1\0\2\32\1\0\10\32\1\0"+ + "\5\32\1\0\2\32\1\0\21\32\1\0\1\32\1\0"+ + "\3\32\2\0\1\32\1\0\1\32\1\0\2\32\1\0"+ + "\1\32\146\0\1\33\16\0\1\35\1\0\1\36\1\0"+ + "\1\37\1\0\1\40\1\0\1\41\1\0\1\42\3\0"+ + "\1\43\5\0\1\44\3\0\1\45\11\0\1\46\2\0"+ + "\1\47\16\0\1\50\2\0\1\51\41\0\2\25\1\52"+ + "\1\0\1\53\1\0\1\53\1\54\1\0\1\25\2\0"+ + "\1\25\1\0\1\35\1\0\1\36\1\0\1\37\1\0"+ + "\1\40\1\0\1\41\1\0\1\55\3\0\1\56\5\0"+ + "\1\57\3\0\1\60\11\0\1\46\2\0\1\61\16\0"+ + "\1\62\2\0\1\63\41\0\1\25\2\26\2\0\2\64"+ + "\1\65\1\0\1\26\2\0\1\25\13\0\1\66\15\0"+ + "\1\67\14\0\1\70\16\0\1\71\2\0\1\72\21\0"+ + "\1\73\20\0\1\27\1\0\1\27\3\0\1\54\1\0"+ + "\1\27\4\0\1\35\1\0\1\36\1\0\1\37\1\0"+ + "\1\40\1\0\1\41\1\0\1\74\3\0\1\56\5\0"+ + "\1\57\3\0\1\75\11\0\1\46\2\0\1\76\16\0"+ + "\1\77\2\0\1\100\21\0\1\101\17\0\1\25\1\102"+ + "\1\26\1\103\3\0\1\102\1\0\1\102\2\0\1\25"+ + "\142\0\2\31\4\0\1\35\1\0\1\36\1\0\1\37"+ + "\1\0\1\40\1\0\1\41\1\0\1\104\3\0\1\43"+ + "\5\0\1\44\3\0\1\105\11\0\1\46\2\0\1\106"+ + "\16\0\1\107\2\0\1\110\41\0\1\25\1\34\1\52"+ + "\1\0\1\53\1\0\1\53\1\54\1\0\1\34\2\0"+ + "\1\34\2\0\1\25\11\0\3\25\5\0\1\25\1\0"+ + "\1\25\1\0\1\25\4\0\1\25\4\0\1\25\1\0"+ + "\2\25\4\0\1\25\5\0\1\25\3\0\1\25\4\0"+ + "\5\25\10\0\1\52\1\0\2\25\1\0\1\25\10\0"+ + "\1\25\24\0\1\25\1\0\1\52\7\0\2\25\2\0"+ "\5\25\2\0\2\25\4\0\6\25\1\0\2\25\4\0"+ "\5\25\1\0\5\25\1\0\2\25\1\0\3\25\1\0"+ - "\4\25\1\0\5\25\1\26\1\0\1\25\1\0\1\25"+ + "\4\25\1\0\5\25\1\52\1\0\1\25\1\0\1\25"+ "\1\0\3\25\2\0\1\25\1\0\1\25\1\0\1\25"+ - "\2\0\1\25\16\0\1\25\3\0\1\25\5\0\2\25"+ + "\2\0\1\25\17\0\1\25\3\0\1\25\5\0\2\25"+ "\3\0\1\25\4\0\3\25\4\0\1\25\1\0\1\25"+ "\2\0\1\25\1\0\2\25\4\0\1\25\1\0\1\25"+ "\3\0\2\25\1\0\1\25\5\0\3\25\1\0\1\25"+ - "\10\0\1\25\1\0\2\26\1\0\1\25\10\0\1\25"+ - "\23\0\1\25\3\0\1\25\6\0\2\25\5\0\1\25"+ + "\10\0\1\25\1\0\2\52\1\0\1\25\10\0\1\25"+ + "\24\0\1\25\3\0\1\25\6\0\2\25\5\0\1\25"+ "\1\0\1\25\1\0\1\25\1\0\11\25\2\0\1\25"+ "\4\0\1\25\4\0\6\25\2\0\1\25\1\0\1\25"+ - "\1\0\3\25\3\0\2\25\4\0\3\25\1\0\1\25"+ - "\10\0\1\25\1\0\2\25\20\0\1\25\11\0\2\25"+ - "\17\0\1\25\6\0\2\25\4\0\1\25\5\0\1\25"+ - "\2\0\1\25\5\0\3\25\1\0\1\25\15\0\1\25"+ - "\10\0\1\25\23\0\1\25\3\0\1\25\5\0\1\25"+ - "\32\0\15\25\5\0\3\25\1\0\1\25\5\0\1\25"+ - "\7\0\1\25\2\0\1\25\5\0\1\25\2\0\1\25"+ - "\1\0\1\25\105\0\1\33\21\0\1\27\34\0\1\32"+ - "\3\0\1\32\3\0\1\32\1\0\3\32\2\0\1\32"+ - "\2\0\1\32\1\0\3\32\3\0\2\32\1\0\1\32"+ - "\1\0\2\32\1\0\2\32\3\0\2\32\1\0\1\32"+ - "\3\0\2\32\1\0\2\32\1\0\2\32\1\0\5\32"+ - "\1\0\5\32\2\0\1\32\1\0\2\32\1\0\1\32"+ - "\1\0\3\32\4\0\1\32\4\0\1\32\16\0\1\32"+ - "\1\0\1\32\1\0\1\32\1\0\1\32\1\0\1\32"+ - "\1\0\3\32\1\0\5\32\1\0\3\32\1\0\11\32"+ - "\1\0\2\32\1\0\16\32\1\0\2\32\1\0\21\32"+ - "\1\0\1\32\1\0\3\32\2\0\1\32\1\0\1\32"+ - "\1\0\2\32\1\0\1\32\16\0\1\32\1\0\1\32"+ - "\1\0\1\32\3\0\1\32\1\0\3\32\1\0\2\32"+ - "\1\0\2\32\1\0\3\32\1\0\11\32\1\0\2\32"+ - "\1\0\16\32\1\0\2\32\1\0\21\32\1\0\1\32"+ - "\1\0\3\32\2\0\1\32\1\0\1\32\1\0\2\32"+ - "\1\0\1\32\16\0\1\32\11\0\1\32\20\0\1\32"+ - "\33\0\1\32\21\0\1\32\10\0\1\32\23\0\1\32"+ - "\1\0\1\32\1\0\1\32\1\0\1\32\1\0\1\32"+ - "\1\0\3\32\1\0\5\32\1\0\3\32\1\0\6\32"+ - "\1\0\2\32\1\0\2\32\1\0\10\32\1\0\5\32"+ - "\1\0\2\32\1\0\21\32\1\0\1\32\1\0\3\32"+ - "\2\0\1\32\1\0\1\32\1\0\2\32\1\0\1\32"+ - "\145\0\1\33\15\0\1\34\1\0\1\35\1\0\1\36"+ - "\1\0\1\37\1\0\1\40\1\0\1\41\3\0\1\42"+ - "\5\0\1\43\3\0\1\44\11\0\1\45\2\0\1\46"+ - "\16\0\1\47\2\0\1\50\41\0\2\25\1\51\1\0"+ - "\1\52\1\0\1\52\1\53\1\0\1\25\3\0\1\34"+ - "\1\0\1\35\1\0\1\36\1\0\1\37\1\0\1\40"+ - "\1\0\1\54\3\0\1\55\5\0\1\56\3\0\1\57"+ - "\11\0\1\45\2\0\1\60\16\0\1\61\2\0\1\62"+ - "\41\0\1\25\2\26\2\0\2\63\1\64\1\0\1\26"+ - "\15\0\1\65\15\0\1\66\14\0\1\67\16\0\1\70"+ - "\2\0\1\71\21\0\1\72\20\0\1\27\1\0\1\27"+ - "\3\0\1\53\1\0\1\27\3\0\1\34\1\0\1\35"+ - "\1\0\1\36\1\0\1\37\1\0\1\40\1\0\1\73"+ - "\3\0\1\55\5\0\1\56\3\0\1\74\11\0\1\45"+ - "\2\0\1\75\16\0\1\76\2\0\1\77\21\0\1\72"+ - "\17\0\1\25\1\100\1\26\1\27\3\0\1\100\1\0"+ - "\1\100\144\0\2\31\4\0\1\25\11\0\3\25\5\0"+ - "\1\25\1\0\1\25\1\0\1\25\4\0\1\25\4\0"+ - "\1\25\1\0\2\25\4\0\1\25\5\0\1\25\3\0"+ - "\1\25\4\0\5\25\10\0\1\51\1\0\2\25\1\0"+ - "\1\25\10\0\1\25\23\0\1\25\1\0\1\51\7\0"+ - "\2\25\2\0\5\25\2\0\2\25\4\0\6\25\1\0"+ - "\2\25\4\0\5\25\1\0\5\25\1\0\2\25\1\0"+ - "\3\25\1\0\4\25\1\0\5\25\1\51\1\0\1\25"+ - "\1\0\1\25\1\0\3\25\2\0\1\25\1\0\1\25"+ - "\1\0\1\25\2\0\1\25\16\0\1\25\3\0\1\25"+ - "\5\0\2\25\3\0\1\25\4\0\3\25\4\0\1\25"+ - "\1\0\1\25\2\0\1\25\1\0\2\25\4\0\1\25"+ - "\1\0\1\25\3\0\2\25\1\0\1\25\5\0\3\25"+ - "\1\0\1\25\10\0\1\25\1\0\2\51\1\0\1\25"+ - "\10\0\1\25\23\0\1\25\3\0\1\25\6\0\2\25"+ - "\5\0\1\25\1\0\1\25\1\0\1\25\1\0\11\25"+ - "\2\0\1\25\4\0\1\25\4\0\6\25\2\0\1\25"+ - "\1\0\1\25\1\0\3\25\1\0\1\25\1\0\2\25"+ - "\4\0\3\25\1\0\1\25\10\0\1\25\1\0\2\25"+ - "\20\0\1\25\3\0\1\25\5\0\1\25\32\0\15\25"+ - "\5\0\3\25\1\0\1\25\5\0\3\25\5\0\1\25"+ - "\2\0\2\25\4\0\1\25\2\0\1\25\1\0\1\25"+ - "\102\0\2\25\6\0\1\25\55\0\1\25\3\0\1\25"+ - "\2\0\1\25\3\0\1\25\5\0\1\25\7\0\1\25"+ - "\4\0\2\25\3\0\2\25\1\0\1\25\4\0\1\25"+ - "\1\0\1\25\2\0\2\25\1\0\3\25\1\0\1\25"+ - "\2\0\4\25\2\0\1\25\40\0\1\34\1\0\1\35"+ - "\1\0\1\36\1\0\1\37\1\0\1\40\1\0\1\101"+ - "\3\0\1\42\5\0\1\43\3\0\1\102\11\0\1\45"+ - "\2\0\1\103\16\0\1\104\2\0\1\105\41\0\1\25"+ - "\2\51\2\0\2\106\1\53\1\0\1\51\3\0\1\34"+ - "\1\0\1\35\1\0\1\36\1\0\1\37\1\0\1\40"+ - "\1\0\1\107\3\0\1\110\5\0\1\111\3\0\1\112"+ - "\11\0\1\45\2\0\1\113\16\0\1\114\2\0\1\115"+ - "\41\0\1\25\1\52\7\0\1\52\3\0\1\34\1\0"+ + "\1\0\3\25\1\0\1\25\1\0\2\25\4\0\3\25"+ + "\1\0\1\25\10\0\1\25\1\0\2\25\21\0\1\25"+ + "\3\0\1\25\5\0\1\25\32\0\15\25\5\0\3\25"+ + "\1\0\1\25\5\0\3\25\5\0\1\25\2\0\2\25"+ + "\4\0\1\25\2\0\1\25\1\0\1\25\103\0\2\25"+ + "\6\0\1\25\56\0\1\25\3\0\1\25\2\0\1\25"+ + "\3\0\1\25\5\0\1\25\7\0\1\25\4\0\2\25"+ + "\3\0\2\25\1\0\1\25\4\0\1\25\1\0\1\25"+ + "\2\0\2\25\1\0\3\25\1\0\1\25\2\0\4\25"+ + "\2\0\1\25\41\0\1\35\1\0\1\36\1\0\1\37"+ + "\1\0\1\40\1\0\1\41\1\0\1\111\3\0\1\43"+ + "\5\0\1\44\3\0\1\112\11\0\1\46\2\0\1\113"+ + "\16\0\1\114\2\0\1\115\41\0\1\25\2\52\2\0"+ + "\2\116\1\54\1\0\1\52\2\0\1\25\1\0\1\35"+ + "\1\0\1\36\1\0\1\37\1\0\1\40\1\0\1\41"+ + "\1\0\1\117\3\0\1\120\5\0\1\121\3\0\1\122"+ + "\11\0\1\46\2\0\1\123\16\0\1\124\2\0\1\125"+ + "\41\0\1\25\1\53\7\0\1\53\2\0\1\25\1\0"+ "\1\35\1\0\1\36\1\0\1\37\1\0\1\40\1\0"+ - "\1\116\3\0\1\42\5\0\1\43\3\0\1\117\11\0"+ - "\1\45\2\0\1\120\16\0\1\121\2\0\1\122\21\0"+ - "\1\72\17\0\1\25\1\53\1\51\1\27\3\0\1\53"+ - "\1\0\1\53\4\0\1\26\11\0\3\25\5\0\1\25"+ - "\1\0\1\25\1\0\1\25\4\0\1\25\4\0\1\26"+ - "\1\0\2\26\4\0\1\25\5\0\1\25\3\0\1\26"+ - "\4\0\1\26\2\25\2\26\10\0\1\26\1\0\2\25"+ - "\1\0\1\26\10\0\1\25\23\0\1\25\3\0\1\25"+ - "\6\0\2\25\5\0\1\25\1\0\1\25\1\0\1\25"+ - "\1\0\11\25\2\0\1\25\4\0\1\25\4\0\6\25"+ - "\2\0\1\25\1\0\1\25\1\0\3\25\1\0\1\26"+ - "\1\0\2\25\4\0\3\25\1\0\1\25\10\0\1\25"+ - "\1\0\2\25\20\0\1\25\3\0\1\25\5\0\1\25"+ - "\32\0\15\25\5\0\3\25\1\0\1\25\5\0\1\25"+ - "\2\26\5\0\1\25\2\0\1\25\1\26\4\0\1\25"+ - "\2\0\1\25\1\0\1\25\102\0\2\26\6\0\1\26"+ - "\55\0\1\26\3\0\1\26\2\0\1\26\3\0\1\26"+ - "\5\0\1\26\7\0\1\26\4\0\2\26\3\0\2\26"+ - "\1\0\1\26\4\0\1\26\1\0\1\26\2\0\2\26"+ - "\1\0\3\26\1\0\1\26\2\0\4\26\2\0\1\26"+ - "\52\0\1\123\3\0\1\124\5\0\1\125\3\0\1\126"+ - "\14\0\1\127\16\0\1\130\2\0\1\131\42\0\1\63"+ - "\1\26\6\0\1\63\3\0\1\34\1\0\1\35\1\0"+ - "\1\36\1\0\1\37\1\0\1\40\1\0\1\132\3\0"+ - "\1\55\5\0\1\56\3\0\1\133\11\0\1\45\2\0"+ - "\1\134\16\0\1\135\2\0\1\136\21\0\1\72\17\0"+ - "\1\25\1\64\1\26\1\27\3\0\1\64\1\0\1\64"+ - "\4\0\1\27\37\0\1\27\1\0\2\27\16\0\1\27"+ - "\4\0\1\27\2\0\2\27\15\0\1\27\131\0\1\27"+ - "\152\0\2\27\11\0\1\27\114\0\2\27\6\0\1\27"+ - "\55\0\1\27\3\0\1\27\2\0\1\27\3\0\1\27"+ - "\5\0\1\27\7\0\1\27\4\0\2\27\3\0\2\27"+ - "\1\0\1\27\4\0\1\27\1\0\1\27\2\0\2\27"+ - "\1\0\3\27\1\0\1\27\2\0\4\27\2\0\1\27"+ - "\152\0\1\27\34\0\1\100\11\0\3\25\5\0\1\25"+ - "\1\0\1\25\1\0\1\25\4\0\1\25\4\0\1\100"+ - "\1\0\2\100\4\0\1\25\5\0\1\25\3\0\1\100"+ - "\4\0\1\100\2\25\2\100\10\0\1\26\1\0\2\25"+ - "\1\0\1\100\10\0\1\25\23\0\1\25\3\0\1\25"+ - "\6\0\2\25\5\0\1\25\1\0\1\25\1\0\1\25"+ - "\1\0\11\25\2\0\1\25\4\0\1\25\4\0\6\25"+ - "\2\0\1\25\1\0\1\25\1\0\3\25\1\0\1\100"+ - "\1\0\2\25\4\0\3\25\1\0\1\25\10\0\1\25"+ - "\1\0\2\25\20\0\1\25\3\0\1\25\5\0\1\25"+ - "\32\0\15\25\5\0\3\25\1\0\1\25\5\0\1\25"+ - "\2\100\5\0\1\25\2\0\1\25\1\100\4\0\1\25"+ - "\2\0\1\25\1\0\1\25\102\0\2\100\6\0\1\100"+ - "\55\0\1\100\3\0\1\100\2\0\1\100\3\0\1\100"+ - "\5\0\1\100\7\0\1\100\4\0\2\100\3\0\2\100"+ - "\1\0\1\100\4\0\1\100\1\0\1\100\2\0\2\100"+ - "\1\0\3\100\1\0\1\100\2\0\4\100\2\0\1\100"+ - "\41\0\1\51\11\0\3\25\5\0\1\25\1\0\1\25"+ - "\1\0\1\25\4\0\1\25\4\0\1\51\1\0\2\51"+ - "\4\0\1\25\5\0\1\25\3\0\1\51\4\0\1\51"+ - "\2\25\2\51\10\0\1\51\1\0\2\25\1\0\1\51"+ - "\10\0\1\25\23\0\1\25\3\0\1\25\6\0\2\25"+ + "\1\41\1\0\1\126\3\0\1\43\5\0\1\44\3\0"+ + "\1\127\11\0\1\46\2\0\1\130\16\0\1\131\2\0"+ + "\1\132\21\0\1\101\17\0\1\25\1\54\1\52\1\103"+ + "\3\0\1\54\1\0\1\54\2\0\1\25\2\0\1\26"+ + "\11\0\3\25\5\0\1\25\1\0\1\25\1\0\1\25"+ + "\4\0\1\25\4\0\1\26\1\0\2\26\4\0\1\25"+ + "\5\0\1\25\3\0\1\26\4\0\1\26\2\25\2\26"+ + "\10\0\1\26\1\0\2\25\1\0\1\26\10\0\1\25"+ + "\24\0\1\25\3\0\1\25\6\0\2\25\5\0\1\25"+ + "\1\0\1\25\1\0\1\25\1\0\11\25\2\0\1\25"+ + "\4\0\1\25\4\0\6\25\2\0\1\25\1\0\1\25"+ + "\1\0\3\25\1\0\1\26\1\0\2\25\4\0\3\25"+ + "\1\0\1\25\10\0\1\25\1\0\2\25\21\0\1\25"+ + "\3\0\1\25\5\0\1\25\32\0\15\25\5\0\3\25"+ + "\1\0\1\25\5\0\1\25\2\26\5\0\1\25\2\0"+ + "\1\25\1\26\4\0\1\25\2\0\1\25\1\0\1\25"+ + "\103\0\2\26\6\0\1\26\56\0\1\26\3\0\1\26"+ + "\2\0\1\26\3\0\1\26\5\0\1\26\7\0\1\26"+ + "\4\0\2\26\3\0\2\26\1\0\1\26\4\0\1\26"+ + "\1\0\1\26\2\0\2\26\1\0\3\26\1\0\1\26"+ + "\2\0\4\26\2\0\1\26\53\0\1\133\3\0\1\134"+ + "\5\0\1\135\3\0\1\136\14\0\1\137\16\0\1\140"+ + "\2\0\1\141\42\0\1\64\1\26\6\0\1\64\4\0"+ + "\1\35\1\0\1\36\1\0\1\37\1\0\1\40\1\0"+ + "\1\41\1\0\1\142\3\0\1\56\5\0\1\57\3\0"+ + "\1\143\11\0\1\46\2\0\1\144\16\0\1\145\2\0"+ + "\1\146\21\0\1\101\17\0\1\25\1\65\1\26\1\103"+ + "\3\0\1\65\1\0\1\65\2\0\1\25\2\0\1\27"+ + "\37\0\1\27\1\0\2\27\16\0\1\27\4\0\1\27"+ + "\2\0\2\27\15\0\1\27\132\0\1\27\153\0\2\27"+ + "\11\0\1\27\115\0\2\27\6\0\1\27\56\0\1\27"+ + "\3\0\1\27\2\0\1\27\3\0\1\27\5\0\1\27"+ + "\7\0\1\27\4\0\2\27\3\0\2\27\1\0\1\27"+ + "\4\0\1\27\1\0\1\27\2\0\2\27\1\0\3\27"+ + "\1\0\1\27\2\0\4\27\2\0\1\27\153\0\1\27"+ + "\35\0\1\102\11\0\3\25\5\0\1\25\1\0\1\25"+ + "\1\0\1\25\4\0\1\25\4\0\1\102\1\0\2\102"+ + "\4\0\1\25\5\0\1\25\3\0\1\102\4\0\1\102"+ + "\2\25\2\102\10\0\1\26\1\0\2\25\1\0\1\102"+ + "\10\0\1\25\24\0\1\25\3\0\1\25\6\0\2\25"+ "\5\0\1\25\1\0\1\25\1\0\1\25\1\0\11\25"+ "\2\0\1\25\4\0\1\25\4\0\6\25\2\0\1\25"+ - "\1\0\1\25\1\0\3\25\1\0\1\51\1\0\2\25"+ + "\1\0\1\25\1\0\3\25\1\0\1\102\1\0\2\25"+ "\4\0\3\25\1\0\1\25\10\0\1\25\1\0\2\25"+ - "\20\0\1\25\3\0\1\25\5\0\1\25\32\0\15\25"+ - "\5\0\3\25\1\0\1\25\5\0\1\25\2\51\5\0"+ - "\1\25\2\0\1\25\1\51\4\0\1\25\2\0\1\25"+ - "\1\0\1\25\102\0\2\51\6\0\1\51\55\0\1\51"+ - "\3\0\1\51\2\0\1\51\3\0\1\51\5\0\1\51"+ - "\7\0\1\51\4\0\2\51\3\0\2\51\1\0\1\51"+ - "\4\0\1\51\1\0\1\51\2\0\2\51\1\0\3\51"+ - "\1\0\1\51\2\0\4\51\2\0\1\51\52\0\1\137"+ - "\3\0\1\140\5\0\1\141\3\0\1\142\14\0\1\143"+ - "\16\0\1\144\2\0\1\145\42\0\1\106\1\51\6\0"+ - "\1\106\4\0\1\52\11\0\3\25\5\0\1\25\1\0"+ - "\1\25\1\0\1\25\4\0\1\25\4\0\1\52\1\0"+ - "\2\52\4\0\1\25\5\0\1\25\3\0\1\52\4\0"+ - "\1\52\2\25\2\52\12\0\2\25\1\0\1\52\10\0"+ - "\1\25\23\0\1\25\11\0\2\25\2\0\5\25\2\0"+ - "\2\25\4\0\6\25\1\0\2\25\4\0\5\25\1\0"+ - "\5\25\1\0\2\25\1\0\3\25\1\0\4\25\1\0"+ - "\5\25\2\0\1\25\1\0\1\25\1\0\3\25\2\0"+ - "\1\25\1\0\1\25\1\0\1\25\2\0\1\25\16\0"+ - "\1\25\3\0\1\25\5\0\2\25\3\0\1\25\4\0"+ - "\3\25\4\0\1\25\1\0\1\25\2\0\1\25\1\0"+ - "\2\25\4\0\1\25\1\0\1\25\3\0\2\25\1\0"+ - "\1\25\5\0\3\25\1\0\1\25\10\0\1\25\4\0"+ - "\1\25\10\0\1\25\23\0\1\25\3\0\1\25\6\0"+ + "\21\0\1\25\3\0\1\25\5\0\1\25\32\0\15\25"+ + "\5\0\3\25\1\0\1\25\5\0\1\25\2\102\5\0"+ + "\1\25\2\0\1\25\1\102\4\0\1\25\2\0\1\25"+ + "\1\0\1\25\103\0\2\102\6\0\1\102\56\0\1\102"+ + "\3\0\1\102\2\0\1\102\3\0\1\102\5\0\1\102"+ + "\7\0\1\102\4\0\2\102\3\0\2\102\1\0\1\102"+ + "\4\0\1\102\1\0\1\102\2\0\2\102\1\0\3\102"+ + "\1\0\1\102\2\0\4\102\2\0\1\102\153\0\1\103"+ + "\46\0\1\147\15\0\1\150\14\0\1\151\16\0\1\152"+ + "\2\0\1\153\21\0\1\101\20\0\1\103\1\0\1\103"+ + "\3\0\1\54\1\0\1\103\5\0\1\34\11\0\3\25"+ + "\5\0\1\25\1\0\1\25\1\0\1\25\4\0\1\25"+ + "\4\0\1\34\1\0\2\34\4\0\1\25\5\0\1\25"+ + "\3\0\1\34\4\0\1\34\2\25\2\34\10\0\1\52"+ + "\1\0\2\25\1\0\1\34\10\0\1\25\24\0\1\25"+ + "\3\0\1\25\6\0\2\25\5\0\1\25\1\0\1\25"+ + "\1\0\1\25\1\0\11\25\2\0\1\25\4\0\1\25"+ + "\4\0\6\25\2\0\1\25\1\0\1\25\1\0\3\25"+ + "\1\0\1\34\1\0\2\25\4\0\3\25\1\0\1\25"+ + "\10\0\1\25\1\0\2\25\21\0\1\25\3\0\1\25"+ + "\5\0\1\25\32\0\15\25\5\0\3\25\1\0\1\25"+ + "\5\0\1\25\2\34\5\0\1\25\2\0\1\25\1\34"+ + "\4\0\1\25\2\0\1\25\1\0\1\25\103\0\2\34"+ + "\6\0\1\34\56\0\1\34\3\0\1\34\2\0\1\34"+ + "\3\0\1\34\5\0\1\34\7\0\1\34\4\0\2\34"+ + "\3\0\2\34\1\0\1\34\4\0\1\34\1\0\1\34"+ + "\2\0\2\34\1\0\3\34\1\0\1\34\2\0\4\34"+ + "\2\0\1\34\42\0\1\52\11\0\3\25\5\0\1\25"+ + "\1\0\1\25\1\0\1\25\4\0\1\25\4\0\1\52"+ + "\1\0\2\52\4\0\1\25\5\0\1\25\3\0\1\52"+ + "\4\0\1\52\2\25\2\52\10\0\1\52\1\0\2\25"+ + "\1\0\1\52\10\0\1\25\24\0\1\25\3\0\1\25"+ + "\6\0\2\25\5\0\1\25\1\0\1\25\1\0\1\25"+ + "\1\0\11\25\2\0\1\25\4\0\1\25\4\0\6\25"+ + "\2\0\1\25\1\0\1\25\1\0\3\25\1\0\1\52"+ + "\1\0\2\25\4\0\3\25\1\0\1\25\10\0\1\25"+ + "\1\0\2\25\21\0\1\25\3\0\1\25\5\0\1\25"+ + "\32\0\15\25\5\0\3\25\1\0\1\25\5\0\1\25"+ + "\2\52\5\0\1\25\2\0\1\25\1\52\4\0\1\25"+ + "\2\0\1\25\1\0\1\25\103\0\2\52\6\0\1\52"+ + "\56\0\1\52\3\0\1\52\2\0\1\52\3\0\1\52"+ + "\5\0\1\52\7\0\1\52\4\0\2\52\3\0\2\52"+ + "\1\0\1\52\4\0\1\52\1\0\1\52\2\0\2\52"+ + "\1\0\3\52\1\0\1\52\2\0\4\52\2\0\1\52"+ + "\53\0\1\154\3\0\1\155\5\0\1\156\3\0\1\157"+ + "\14\0\1\160\16\0\1\161\2\0\1\162\42\0\1\116"+ + "\1\52\6\0\1\116\5\0\1\53\11\0\3\25\5\0"+ + "\1\25\1\0\1\25\1\0\1\25\4\0\1\25\4\0"+ + "\1\53\1\0\2\53\4\0\1\25\5\0\1\25\3\0"+ + "\1\53\4\0\1\53\2\25\2\53\12\0\2\25\1\0"+ + "\1\53\10\0\1\25\24\0\1\25\11\0\2\25\2\0"+ + "\5\25\2\0\2\25\4\0\6\25\1\0\2\25\4\0"+ + "\5\25\1\0\5\25\1\0\2\25\1\0\3\25\1\0"+ + "\4\25\1\0\5\25\2\0\1\25\1\0\1\25\1\0"+ + "\3\25\2\0\1\25\1\0\1\25\1\0\1\25\2\0"+ + "\1\25\17\0\1\25\3\0\1\25\5\0\2\25\3\0"+ + "\1\25\4\0\3\25\4\0\1\25\1\0\1\25\2\0"+ + "\1\25\1\0\2\25\4\0\1\25\1\0\1\25\3\0"+ + "\2\25\1\0\1\25\5\0\3\25\1\0\1\25\10\0"+ + "\1\25\4\0\1\25\10\0\1\25\24\0\1\25\3\0"+ + "\1\25\6\0\2\25\5\0\1\25\1\0\1\25\1\0"+ + "\1\25\1\0\11\25\2\0\1\25\4\0\1\25\4\0"+ + "\6\25\2\0\1\25\1\0\1\25\1\0\3\25\1\0"+ + "\1\53\1\0\2\25\4\0\3\25\1\0\1\25\10\0"+ + "\1\25\1\0\2\25\21\0\1\25\3\0\1\25\5\0"+ + "\1\25\32\0\15\25\5\0\3\25\1\0\1\25\5\0"+ + "\1\25\2\53\5\0\1\25\2\0\1\25\1\53\4\0"+ + "\1\25\2\0\1\25\1\0\1\25\103\0\2\53\6\0"+ + "\1\53\56\0\1\53\3\0\1\53\2\0\1\53\3\0"+ + "\1\53\5\0\1\53\7\0\1\53\4\0\2\53\3\0"+ + "\2\53\1\0\1\53\4\0\1\53\1\0\1\53\2\0"+ + "\2\53\1\0\3\53\1\0\1\53\2\0\4\53\2\0"+ + "\1\53\42\0\1\54\11\0\3\25\5\0\1\25\1\0"+ + "\1\25\1\0\1\25\4\0\1\25\4\0\1\54\1\0"+ + "\2\54\4\0\1\25\5\0\1\25\3\0\1\54\4\0"+ + "\1\54\2\25\2\54\10\0\1\52\1\0\2\25\1\0"+ + "\1\54\10\0\1\25\24\0\1\25\3\0\1\25\6\0"+ "\2\25\5\0\1\25\1\0\1\25\1\0\1\25\1\0"+ "\11\25\2\0\1\25\4\0\1\25\4\0\6\25\2\0"+ - "\1\25\1\0\1\25\1\0\3\25\1\0\1\52\1\0"+ + "\1\25\1\0\1\25\1\0\3\25\1\0\1\54\1\0"+ "\2\25\4\0\3\25\1\0\1\25\10\0\1\25\1\0"+ - "\2\25\20\0\1\25\3\0\1\25\5\0\1\25\32\0"+ - "\15\25\5\0\3\25\1\0\1\25\5\0\1\25\2\52"+ - "\5\0\1\25\2\0\1\25\1\52\4\0\1\25\2\0"+ - "\1\25\1\0\1\25\102\0\2\52\6\0\1\52\55\0"+ - "\1\52\3\0\1\52\2\0\1\52\3\0\1\52\5\0"+ - "\1\52\7\0\1\52\4\0\2\52\3\0\2\52\1\0"+ - "\1\52\4\0\1\52\1\0\1\52\2\0\2\52\1\0"+ - "\3\52\1\0\1\52\2\0\4\52\2\0\1\52\41\0"+ - "\1\53\11\0\3\25\5\0\1\25\1\0\1\25\1\0"+ - "\1\25\4\0\1\25\4\0\1\53\1\0\2\53\4\0"+ - "\1\25\5\0\1\25\3\0\1\53\4\0\1\53\2\25"+ - "\2\53\10\0\1\51\1\0\2\25\1\0\1\53\10\0"+ - "\1\25\23\0\1\25\3\0\1\25\6\0\2\25\5\0"+ + "\2\25\21\0\1\25\3\0\1\25\5\0\1\25\32\0"+ + "\15\25\5\0\3\25\1\0\1\25\5\0\1\25\2\54"+ + "\5\0\1\25\2\0\1\25\1\54\4\0\1\25\2\0"+ + "\1\25\1\0\1\25\103\0\2\54\6\0\1\54\56\0"+ + "\1\54\3\0\1\54\2\0\1\54\3\0\1\54\5\0"+ + "\1\54\7\0\1\54\4\0\2\54\3\0\2\54\1\0"+ + "\1\54\4\0\1\54\1\0\1\54\2\0\2\54\1\0"+ + "\3\54\1\0\1\54\2\0\4\54\2\0\1\54\42\0"+ + "\1\64\37\0\1\64\1\0\2\64\16\0\1\64\4\0"+ + "\1\64\2\0\2\64\10\0\1\26\4\0\1\64\37\0"+ + "\1\26\102\0\1\26\147\0\2\26\134\0\1\64\153\0"+ + "\2\64\11\0\1\64\115\0\2\64\6\0\1\64\56\0"+ + "\1\64\3\0\1\64\2\0\1\64\3\0\1\64\5\0"+ + "\1\64\7\0\1\64\4\0\2\64\3\0\2\64\1\0"+ + "\1\64\4\0\1\64\1\0\1\64\2\0\2\64\1\0"+ + "\3\64\1\0\1\64\2\0\4\64\2\0\1\64\42\0"+ + "\1\65\11\0\3\25\5\0\1\25\1\0\1\25\1\0"+ + "\1\25\4\0\1\25\4\0\1\65\1\0\2\65\4\0"+ + "\1\25\5\0\1\25\3\0\1\65\4\0\1\65\2\25"+ + "\2\65\10\0\1\26\1\0\2\25\1\0\1\65\10\0"+ + "\1\25\24\0\1\25\3\0\1\25\6\0\2\25\5\0"+ "\1\25\1\0\1\25\1\0\1\25\1\0\11\25\2\0"+ "\1\25\4\0\1\25\4\0\6\25\2\0\1\25\1\0"+ - "\1\25\1\0\3\25\1\0\1\53\1\0\2\25\4\0"+ - "\3\25\1\0\1\25\10\0\1\25\1\0\2\25\20\0"+ + "\1\25\1\0\3\25\1\0\1\65\1\0\2\25\4\0"+ + "\3\25\1\0\1\25\10\0\1\25\1\0\2\25\21\0"+ "\1\25\3\0\1\25\5\0\1\25\32\0\15\25\5\0"+ - "\3\25\1\0\1\25\5\0\1\25\2\53\5\0\1\25"+ - "\2\0\1\25\1\53\4\0\1\25\2\0\1\25\1\0"+ - "\1\25\102\0\2\53\6\0\1\53\55\0\1\53\3\0"+ - "\1\53\2\0\1\53\3\0\1\53\5\0\1\53\7\0"+ - "\1\53\4\0\2\53\3\0\2\53\1\0\1\53\4\0"+ - "\1\53\1\0\1\53\2\0\2\53\1\0\3\53\1\0"+ - "\1\53\2\0\4\53\2\0\1\53\41\0\1\63\37\0"+ - "\1\63\1\0\2\63\16\0\1\63\4\0\1\63\2\0"+ - "\2\63\10\0\1\26\4\0\1\63\36\0\1\26\102\0"+ - "\1\26\146\0\2\26\133\0\1\63\152\0\2\63\11\0"+ - "\1\63\114\0\2\63\6\0\1\63\55\0\1\63\3\0"+ - "\1\63\2\0\1\63\3\0\1\63\5\0\1\63\7\0"+ - "\1\63\4\0\2\63\3\0\2\63\1\0\1\63\4\0"+ - "\1\63\1\0\1\63\2\0\2\63\1\0\3\63\1\0"+ - "\1\63\2\0\4\63\2\0\1\63\41\0\1\64\11\0"+ - "\3\25\5\0\1\25\1\0\1\25\1\0\1\25\4\0"+ - "\1\25\4\0\1\64\1\0\2\64\4\0\1\25\5\0"+ - "\1\25\3\0\1\64\4\0\1\64\2\25\2\64\10\0"+ - "\1\26\1\0\2\25\1\0\1\64\10\0\1\25\23\0"+ - "\1\25\3\0\1\25\6\0\2\25\5\0\1\25\1\0"+ - "\1\25\1\0\1\25\1\0\11\25\2\0\1\25\4\0"+ - "\1\25\4\0\6\25\2\0\1\25\1\0\1\25\1\0"+ - "\3\25\1\0\1\64\1\0\2\25\4\0\3\25\1\0"+ - "\1\25\10\0\1\25\1\0\2\25\20\0\1\25\3\0"+ - "\1\25\5\0\1\25\32\0\15\25\5\0\3\25\1\0"+ - "\1\25\5\0\1\25\2\64\5\0\1\25\2\0\1\25"+ - "\1\64\4\0\1\25\2\0\1\25\1\0\1\25\102\0"+ - "\2\64\6\0\1\64\55\0\1\64\3\0\1\64\2\0"+ - "\1\64\3\0\1\64\5\0\1\64\7\0\1\64\4\0"+ - "\2\64\3\0\2\64\1\0\1\64\4\0\1\64\1\0"+ - "\1\64\2\0\2\64\1\0\3\64\1\0\1\64\2\0"+ - "\4\64\2\0\1\64\41\0\1\106\37\0\1\106\1\0"+ - "\2\106\16\0\1\106\4\0\1\106\2\0\2\106\10\0"+ - "\1\51\4\0\1\106\36\0\1\51\102\0\1\51\146\0"+ - "\2\51\133\0\1\106\152\0\2\106\11\0\1\106\114\0"+ - "\2\106\6\0\1\106\55\0\1\106\3\0\1\106\2\0"+ - "\1\106\3\0\1\106\5\0\1\106\7\0\1\106\4\0"+ - "\2\106\3\0\2\106\1\0\1\106\4\0\1\106\1\0"+ - "\1\106\2\0\2\106\1\0\3\106\1\0\1\106\2\0"+ - "\4\106\2\0\1\106\37\0"; + "\3\25\1\0\1\25\5\0\1\25\2\65\5\0\1\25"+ + "\2\0\1\25\1\65\4\0\1\25\2\0\1\25\1\0"+ + "\1\25\103\0\2\65\6\0\1\65\56\0\1\65\3\0"+ + "\1\65\2\0\1\65\3\0\1\65\5\0\1\65\7\0"+ + "\1\65\4\0\2\65\3\0\2\65\1\0\1\65\4\0"+ + "\1\65\1\0\1\65\2\0\2\65\1\0\3\65\1\0"+ + "\1\65\2\0\4\65\2\0\1\65\42\0\1\103\37\0"+ + "\1\103\1\0\2\103\16\0\1\103\4\0\1\103\2\0"+ + "\2\103\15\0\1\103\132\0\1\103\153\0\2\103\11\0"+ + "\1\103\115\0\2\103\6\0\1\103\56\0\1\103\3\0"+ + "\1\103\2\0\1\103\3\0\1\103\5\0\1\103\7\0"+ + "\1\103\4\0\2\103\3\0\2\103\1\0\1\103\4\0"+ + "\1\103\1\0\1\103\2\0\2\103\1\0\3\103\1\0"+ + "\1\103\2\0\4\103\2\0\1\103\42\0\1\116\37\0"+ + "\1\116\1\0\2\116\16\0\1\116\4\0\1\116\2\0"+ + "\2\116\10\0\1\52\4\0\1\116\37\0\1\52\102\0"+ + "\1\52\147\0\2\52\134\0\1\116\153\0\2\116\11\0"+ + "\1\116\115\0\2\116\6\0\1\116\56\0\1\116\3\0"+ + "\1\116\2\0\1\116\3\0\1\116\5\0\1\116\7\0"+ + "\1\116\4\0\2\116\3\0\2\116\1\0\1\116\4\0"+ + "\1\116\1\0\1\116\2\0\2\116\1\0\3\116\1\0"+ + "\1\116\2\0\4\116\2\0\1\116\40\0"; private static int [] zzUnpackTrans() { - int [] result = new int[9180]; + int [] result = new int[10609]; int offset = 0; offset = zzUnpackTrans(ZZ_TRANS_PACKED_0, offset, result); return result; @@ -584,11 +621,11 @@ public final class StandardTokenizerImpl implements StandardTokenizerInterface { private static final int [] ZZ_ATTRIBUTE = zzUnpackAttribute(); private static final String ZZ_ATTRIBUTE_PACKED_0 = - "\1\0\1\11\27\1\2\11\15\0\1\1\1\0\1\1"+ - "\10\0\1\1\61\0"; + "\1\0\1\11\27\1\2\11\1\1\15\0\1\1\1\0"+ + "\1\1\10\0\1\1\15\0\1\1\57\0"; private static int [] zzUnpackAttribute() { - int [] result = new int[101]; + int [] result = new int[114]; int offset = 0; offset = zzUnpackAttribute(ZZ_ATTRIBUTE_PACKED_0, offset, result); return result; @@ -675,6 +712,10 @@ public final class StandardTokenizerImpl implements StandardTokenizerInterface { public static final int IDEOGRAPHIC_TYPE = StandardTokenizer.IDEOGRAPHIC; public static final int HIRAGANA_TYPE = StandardTokenizer.HIRAGANA; + + public static final int KATAKANA_TYPE = StandardTokenizer.KATAKANA; + + public static final int HANGUL_TYPE = StandardTokenizer.HANGUL; public final int yychar() { @@ -719,7 +760,7 @@ public final class StandardTokenizerImpl implements StandardTokenizerInterface { char [] map = new char[0x10000]; int i = 0; /* index in packed string */ int j = 0; /* index in unpacked array */ - while (i < 2640) { + while (i < 2650) { int count = packed.charAt(i++); char value = packed.charAt(i++); do map[j++] = value; while (--count > 0); @@ -1001,27 +1042,35 @@ public final class StandardTokenizerImpl implements StandardTokenizerInterface { case 2: { return WORD_TYPE; } - case 7: break; - case 4: + case 9: break; + case 5: { return SOUTH_EAST_ASIAN_TYPE; } - case 8: break; - case 5: - { return IDEOGRAPHIC_TYPE; - } - case 9: break; - case 1: - { /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */ - } case 10: break; - case 3: - { return NUMERIC_TYPE; + case 4: + { return KATAKANA_TYPE; } case 11: break; case 6: - { return HIRAGANA_TYPE; + { return IDEOGRAPHIC_TYPE; } case 12: break; + case 1: + { /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */ + } + case 13: break; + case 8: + { return HANGUL_TYPE; + } + case 14: break; + case 3: + { return NUMERIC_TYPE; + } + case 15: break; + case 7: + { return HIRAGANA_TYPE; + } + case 16: break; default: if (zzInput == YYEOF && zzStartRead == zzCurrentPos) { zzAtEOF = true; diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex index 8c805923466..219488375f0 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex @@ -59,6 +59,8 @@ ComplexContext = ([\p{LB:Complex_Context}] | {ComplexContextSupp}) Han = ([\p{Script:Han}] | {HanSupp}) Hiragana = ([\p{Script:Hiragana}] | {HiraganaSupp}) +// Script=Hangul & Aletter +HangulEx = (!(!\p{Script:Hangul}|!\p{WB:ALetter})) ({Format} | {Extend})* // UAX#29 WB4. X (Extend | Format)* --> X // ALetterEx = {ALetter} ({Format} | {Extend})* @@ -90,6 +92,10 @@ ExtendNumLetEx = {ExtendNumLet} ({Format} | {Extend})* public static final int IDEOGRAPHIC_TYPE = StandardTokenizer.IDEOGRAPHIC; public static final int HIRAGANA_TYPE = StandardTokenizer.HIRAGANA; + + public static final int KATAKANA_TYPE = StandardTokenizer.KATAKANA; + + public static final int HANGUL_TYPE = StandardTokenizer.HANGUL; public final int yychar() { @@ -123,6 +129,12 @@ ExtendNumLetEx = {ExtendNumLet} ({Format} | {Extend})* {ExtendNumLetEx}* { return NUMERIC_TYPE; } +// subset of the below for typing purposes only! +{HangulEx}+ + { return HANGUL_TYPE; } + +{KatakanaEx}+ + { return KATAKANA_TYPE; } // UAX#29 WB5. ALetter × ALetter // WB6. ALetter × (MidLetter | MidNumLet) ALetter diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java index f440611eeb6..d1835e3d57d 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.java @@ -1,4 +1,4 @@ -/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 1/6/11 12:09 AM */ +/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 2/9/11 11:45 AM */ package org.apache.lucene.analysis.standard; @@ -74,16 +74,16 @@ public final class UAX29URLEmailTokenizer extends Tokenizer { * Translates characters to character classes */ private static final String ZZ_CMAP_PACKED = - "\1\236\10\234\2\236\2\234\1\236\23\234\1\237\1\233\1\226\1\237"+ - "\1\217\1\215\1\222\2\220\2\237\1\221\1\201\1\146\1\225\1\202"+ - "\1\205\1\212\1\206\1\211\1\203\1\204\1\213\1\210\1\207\1\214"+ - "\1\231\1\234\1\232\1\234\1\224\1\223\1\147\1\173\1\150\1\151"+ - "\1\152\1\155\1\156\1\174\1\157\1\175\1\200\1\160\1\161\1\162"+ - "\1\154\1\164\1\163\1\153\1\165\1\166\1\167\1\176\1\170\1\171"+ - "\1\177\1\172\1\227\1\235\1\230\1\240\1\216\1\240\1\147\1\173"+ - "\1\150\1\151\1\152\1\155\1\156\1\174\1\157\1\175\1\200\1\160"+ - "\1\161\1\162\1\154\1\164\1\163\1\153\1\165\1\166\1\167\1\176"+ - "\1\170\1\171\1\177\1\172\3\240\1\215\1\241\52\0\1\132\2\0"+ + "\1\237\10\235\2\237\2\235\1\237\23\235\1\240\1\234\1\227\1\240"+ + "\1\220\1\216\1\223\2\221\2\240\1\222\1\202\1\147\1\226\1\203"+ + "\1\206\1\214\1\207\1\212\1\204\1\205\1\211\1\213\1\210\1\215"+ + "\1\232\1\235\1\233\1\235\1\225\1\224\1\150\1\174\1\151\1\152"+ + "\1\153\1\156\1\157\1\175\1\160\1\176\1\201\1\161\1\162\1\163"+ + "\1\155\1\165\1\164\1\154\1\166\1\167\1\170\1\177\1\171\1\172"+ + "\1\200\1\173\1\230\1\236\1\231\1\241\1\217\1\241\1\150\1\174"+ + "\1\151\1\152\1\153\1\156\1\157\1\175\1\160\1\176\1\201\1\161"+ + "\1\162\1\163\1\155\1\165\1\164\1\154\1\166\1\167\1\170\1\177"+ + "\1\171\1\172\1\200\1\173\3\241\1\216\1\242\52\0\1\132\2\0"+ "\1\133\7\0\1\132\1\0\1\136\2\0\1\132\5\0\27\132\1\0"+ "\37\132\1\0\u01ca\132\4\0\14\132\16\0\5\132\7\0\1\132\1\0"+ "\1\132\21\0\160\133\5\132\1\0\2\132\2\0\4\132\1\137\7\0"+ @@ -136,85 +136,85 @@ public final class UAX29URLEmailTokenizer extends Tokenizer { "\11\0\1\133\71\0\53\142\24\143\1\142\12\134\6\0\6\142\4\143"+ "\4\142\3\143\1\142\3\143\2\142\7\143\3\142\4\143\15\142\14\143"+ "\1\142\1\143\12\134\4\143\2\142\46\132\12\0\53\132\1\0\1\132"+ - "\3\0\u0149\132\1\0\4\132\2\0\7\132\1\0\1\132\1\0\4\132"+ - "\2\0\51\132\1\0\4\132\2\0\41\132\1\0\4\132\2\0\7\132"+ - "\1\0\1\132\1\0\4\132\2\0\17\132\1\0\71\132\1\0\4\132"+ - "\2\0\103\132\2\0\3\133\40\0\20\132\20\0\125\132\14\0\u026c\132"+ - "\2\0\21\132\1\0\32\132\5\0\113\132\3\0\3\132\17\0\15\132"+ - "\1\0\4\132\3\133\13\0\22\132\3\133\13\0\22\132\2\133\14\0"+ - "\15\132\1\0\3\132\1\0\2\133\14\0\64\142\2\143\36\143\3\0"+ - "\1\142\4\0\1\142\1\143\2\0\12\134\41\0\3\133\2\0\12\134"+ - "\6\0\130\132\10\0\51\132\1\133\1\132\5\0\106\132\12\0\35\132"+ - "\3\0\14\133\4\0\14\133\12\0\12\134\36\142\2\0\5\142\13\0"+ - "\54\142\4\0\21\143\7\142\2\143\6\0\12\134\1\142\3\0\2\142"+ - "\40\0\27\132\5\133\4\0\65\142\12\143\1\0\35\143\2\0\1\133"+ - "\12\134\6\0\12\134\6\0\16\142\122\0\5\133\57\132\21\133\7\132"+ - "\4\0\12\134\21\0\11\133\14\0\3\133\36\132\12\133\3\0\2\132"+ - "\12\134\6\0\46\132\16\133\14\0\44\132\24\133\10\0\12\134\3\0"+ - "\3\132\12\134\44\132\122\0\3\133\1\0\25\133\4\132\1\133\4\132"+ - "\1\133\15\0\300\132\47\133\25\0\4\133\u0116\132\2\0\6\132\2\0"+ - "\46\132\2\0\6\132\2\0\10\132\1\0\1\132\1\0\1\132\1\0"+ - "\1\132\1\0\37\132\2\0\65\132\1\0\7\132\1\0\1\132\3\0"+ - "\3\132\1\0\7\132\3\0\4\132\2\0\6\132\4\0\15\132\5\0"+ - "\3\132\1\0\7\132\17\0\2\133\2\133\10\0\2\140\12\0\1\140"+ - "\2\0\1\136\2\0\5\133\20\0\2\141\3\0\1\137\17\0\1\141"+ - "\13\0\5\133\5\0\6\133\1\0\1\132\15\0\1\132\20\0\15\132"+ - "\63\0\41\133\21\0\1\132\4\0\1\132\2\0\12\132\1\0\1\132"+ - "\3\0\5\132\6\0\1\132\1\0\1\132\1\0\1\132\1\0\4\132"+ - "\1\0\13\132\2\0\4\132\5\0\5\132\4\0\1\132\21\0\51\132"+ - "\u032d\0\64\132\u0716\0\57\132\1\0\57\132\1\0\205\132\6\0\4\132"+ - "\3\133\16\0\46\132\12\0\66\132\11\0\1\132\17\0\1\133\27\132"+ - "\11\0\7\132\1\0\7\132\1\0\7\132\1\0\7\132\1\0\7\132"+ - "\1\0\7\132\1\0\7\132\1\0\7\132\1\0\40\133\57\0\1\132"+ - "\120\0\32\144\1\0\131\144\14\0\326\144\57\0\1\132\1\0\1\144"+ - "\31\0\11\144\6\133\1\0\5\135\2\0\3\144\1\132\1\132\4\0"+ - "\126\145\2\0\2\133\2\135\3\145\133\135\1\0\4\135\5\0\51\132"+ - "\3\0\136\132\21\0\33\132\65\0\20\135\320\0\57\135\1\0\130\135"+ - "\250\0\u19b6\144\112\0\u51cc\144\64\0\u048d\132\103\0\56\132\2\0\u010d\132"+ - "\3\0\20\132\12\134\2\132\24\0\57\132\4\133\11\0\2\133\1\0"+ - "\31\132\10\0\120\132\2\133\45\0\11\132\2\0\147\132\2\0\4\132"+ - "\1\0\2\132\16\0\12\132\120\0\10\132\1\133\3\132\1\133\4\132"+ - "\1\133\27\132\5\133\30\0\64\132\14\0\2\133\62\132\21\133\13\0"+ - "\12\134\6\0\22\133\6\132\3\0\1\132\4\0\12\134\34\132\10\133"+ - "\2\0\27\132\15\133\14\0\35\132\3\0\4\133\57\132\16\133\16\0"+ - "\1\132\12\134\46\0\51\132\16\133\11\0\3\132\1\133\10\132\2\133"+ - "\2\0\12\134\6\0\33\142\1\143\4\0\60\142\1\143\1\142\3\143"+ - "\2\142\2\143\5\142\2\143\1\142\1\143\1\142\30\0\5\142\41\0"+ - "\6\132\2\0\6\132\2\0\6\132\11\0\7\132\1\0\7\132\221\0"+ - "\43\132\10\133\1\0\2\133\2\0\12\134\6\0\u2ba4\132\14\0\27\132"+ - "\4\0\61\132\4\0\1\31\1\25\1\46\1\43\1\13\3\0\1\7"+ - "\1\5\2\0\1\3\1\1\14\0\1\11\21\0\1\112\7\0\1\65"+ - "\1\17\6\0\1\130\3\0\1\120\1\120\1\120\1\120\1\120\1\120"+ + "\3\0\u0100\146\111\132\1\0\4\132\2\0\7\132\1\0\1\132\1\0"+ + "\4\132\2\0\51\132\1\0\4\132\2\0\41\132\1\0\4\132\2\0"+ + "\7\132\1\0\1\132\1\0\4\132\2\0\17\132\1\0\71\132\1\0"+ + "\4\132\2\0\103\132\2\0\3\133\40\0\20\132\20\0\125\132\14\0"+ + "\u026c\132\2\0\21\132\1\0\32\132\5\0\113\132\3\0\3\132\17\0"+ + "\15\132\1\0\4\132\3\133\13\0\22\132\3\133\13\0\22\132\2\133"+ + "\14\0\15\132\1\0\3\132\1\0\2\133\14\0\64\142\2\143\36\143"+ + "\3\0\1\142\4\0\1\142\1\143\2\0\12\134\41\0\3\133\2\0"+ + "\12\134\6\0\130\132\10\0\51\132\1\133\1\132\5\0\106\132\12\0"+ + "\35\132\3\0\14\133\4\0\14\133\12\0\12\134\36\142\2\0\5\142"+ + "\13\0\54\142\4\0\21\143\7\142\2\143\6\0\12\134\1\142\3\0"+ + "\2\142\40\0\27\132\5\133\4\0\65\142\12\143\1\0\35\143\2\0"+ + "\1\133\12\134\6\0\12\134\6\0\16\142\122\0\5\133\57\132\21\133"+ + "\7\132\4\0\12\134\21\0\11\133\14\0\3\133\36\132\12\133\3\0"+ + "\2\132\12\134\6\0\46\132\16\133\14\0\44\132\24\133\10\0\12\134"+ + "\3\0\3\132\12\134\44\132\122\0\3\133\1\0\25\133\4\132\1\133"+ + "\4\132\1\133\15\0\300\132\47\133\25\0\4\133\u0116\132\2\0\6\132"+ + "\2\0\46\132\2\0\6\132\2\0\10\132\1\0\1\132\1\0\1\132"+ + "\1\0\1\132\1\0\37\132\2\0\65\132\1\0\7\132\1\0\1\132"+ + "\3\0\3\132\1\0\7\132\3\0\4\132\2\0\6\132\4\0\15\132"+ + "\5\0\3\132\1\0\7\132\17\0\2\133\2\133\10\0\2\140\12\0"+ + "\1\140\2\0\1\136\2\0\5\133\20\0\2\141\3\0\1\137\17\0"+ + "\1\141\13\0\5\133\5\0\6\133\1\0\1\132\15\0\1\132\20\0"+ + "\15\132\63\0\41\133\21\0\1\132\4\0\1\132\2\0\12\132\1\0"+ + "\1\132\3\0\5\132\6\0\1\132\1\0\1\132\1\0\1\132\1\0"+ + "\4\132\1\0\13\132\2\0\4\132\5\0\5\132\4\0\1\132\21\0"+ + "\51\132\u032d\0\64\132\u0716\0\57\132\1\0\57\132\1\0\205\132\6\0"+ + "\4\132\3\133\16\0\46\132\12\0\66\132\11\0\1\132\17\0\1\133"+ + "\27\132\11\0\7\132\1\0\7\132\1\0\7\132\1\0\7\132\1\0"+ + "\7\132\1\0\7\132\1\0\7\132\1\0\7\132\1\0\40\133\57\0"+ + "\1\132\120\0\32\144\1\0\131\144\14\0\326\144\57\0\1\132\1\0"+ + "\1\144\31\0\11\144\4\133\2\133\1\0\5\135\2\0\3\144\1\132"+ + "\1\132\4\0\126\145\2\0\2\133\2\135\3\145\133\135\1\0\4\135"+ + "\5\0\51\132\3\0\136\146\21\0\33\132\65\0\20\135\37\0\101\0"+ + "\37\0\121\0\57\135\1\0\130\135\250\0\u19b6\144\112\0\u51cc\144\64\0"+ + "\u048d\132\103\0\56\132\2\0\u010d\132\3\0\20\132\12\134\2\132\24\0"+ + "\57\132\4\133\11\0\2\133\1\0\31\132\10\0\120\132\2\133\45\0"+ + "\11\132\2\0\147\132\2\0\4\132\1\0\2\132\16\0\12\132\120\0"+ + "\10\132\1\133\3\132\1\133\4\132\1\133\27\132\5\133\30\0\64\132"+ + "\14\0\2\133\62\132\21\133\13\0\12\134\6\0\22\133\6\132\3\0"+ + "\1\132\4\0\12\134\34\132\10\133\2\0\27\132\15\133\14\0\35\146"+ + "\3\0\4\133\57\132\16\133\16\0\1\132\12\134\46\0\51\132\16\133"+ + "\11\0\3\132\1\133\10\132\2\133\2\0\12\134\6\0\33\142\1\143"+ + "\4\0\60\142\1\143\1\142\3\143\2\142\2\143\5\142\2\143\1\142"+ + "\1\143\1\142\30\0\5\142\41\0\6\132\2\0\6\132\2\0\6\132"+ + "\11\0\7\132\1\0\7\132\221\0\43\132\10\133\1\0\2\133\2\0"+ + "\12\134\6\0\u2ba4\146\14\0\27\146\4\0\61\146\4\0\1\31\1\25"+ + "\1\46\1\43\1\13\3\0\1\7\1\5\2\0\1\3\1\1\14\0"+ + "\1\11\21\0\1\112\7\0\1\65\1\17\6\0\1\130\3\0\1\120"+ "\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120"+ "\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120"+ "\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120"+ - "\1\120\1\120\1\120\1\120\1\120\1\121\1\120\1\120\1\120\1\125"+ - "\1\123\17\0\1\114\u02c1\0\1\70\277\0\1\113\1\71\1\2\3\124"+ - "\2\35\1\124\1\35\2\124\1\14\21\124\2\60\7\73\1\72\7\73"+ - "\7\52\1\15\1\52\1\75\2\45\1\44\1\75\1\45\1\44\10\75"+ - "\2\63\5\61\2\54\5\61\1\6\10\37\5\21\3\27\12\106\20\27"+ - "\3\42\32\30\1\26\2\24\2\110\1\111\2\110\2\111\2\110\1\111"+ - "\3\24\1\16\2\24\12\64\1\74\1\41\1\34\1\64\6\41\1\34"+ - "\66\41\5\115\6\103\1\51\4\103\2\51\10\103\1\51\7\100\1\12"+ - "\2\100\32\103\1\12\4\100\1\12\5\102\1\101\1\102\3\101\7\102"+ - "\1\101\23\102\5\67\3\102\6\67\2\67\6\66\10\66\2\100\7\66"+ - "\36\100\4\66\102\100\15\115\1\77\2\115\1\131\3\117\1\115\2\117"+ - "\5\115\4\117\4\116\1\115\3\116\1\115\5\116\26\56\4\23\1\105"+ - "\2\104\4\122\1\104\2\122\3\76\33\122\35\55\3\122\35\126\3\122"+ - "\6\126\2\33\31\126\1\33\17\126\6\122\4\22\1\10\37\22\1\10"+ - "\4\22\25\62\1\127\11\62\21\55\5\62\1\57\12\40\13\62\4\55"+ - "\1\50\6\55\12\122\17\55\1\47\3\53\15\20\11\36\1\32\24\36"+ - "\2\20\11\36\1\32\31\36\1\32\4\20\4\36\2\32\2\107\1\4"+ - "\5\107\52\4\u1900\0\u012e\144\2\0\76\144\2\0\152\144\46\0\7\132"+ - "\14\0\5\132\5\0\1\132\1\133\12\132\1\0\15\132\1\0\5\132"+ - "\1\0\1\132\1\0\2\132\1\0\2\132\1\0\154\132\41\0\u016b\132"+ - "\22\0\100\132\2\0\66\132\50\0\14\132\4\0\20\133\1\137\2\0"+ - "\1\136\1\137\13\0\7\133\14\0\2\141\30\0\3\141\1\137\1\0"+ - "\1\140\1\0\1\137\1\136\32\0\5\132\1\0\207\132\2\0\1\133"+ - "\7\0\1\140\4\0\1\137\1\0\1\140\1\0\12\134\1\136\1\137"+ - "\5\0\32\132\4\0\1\141\1\0\32\132\13\0\70\135\2\133\37\132"+ - "\3\0\6\132\2\0\6\132\2\0\6\132\2\0\3\132\34\0\3\133"+ - "\4\0"; + "\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120\1\120"+ + "\1\121\1\120\1\120\1\120\1\125\1\123\17\0\1\114\u02c1\0\1\70"+ + "\277\0\1\113\1\71\1\2\3\124\2\35\1\124\1\35\2\124\1\14"+ + "\21\124\2\60\7\73\1\72\7\73\7\52\1\15\1\52\1\75\2\45"+ + "\1\44\1\75\1\45\1\44\10\75\2\63\5\61\2\54\5\61\1\6"+ + "\10\37\5\21\3\27\12\106\20\27\3\42\32\30\1\26\2\24\2\110"+ + "\1\111\2\110\2\111\2\110\1\111\3\24\1\16\2\24\12\64\1\74"+ + "\1\41\1\34\1\64\6\41\1\34\66\41\5\115\6\103\1\51\4\103"+ + "\2\51\10\103\1\51\7\100\1\12\2\100\32\103\1\12\4\100\1\12"+ + "\5\102\1\101\1\102\3\101\7\102\1\101\23\102\5\67\3\102\6\67"+ + "\2\67\6\66\10\66\2\100\7\66\36\100\4\66\102\100\15\115\1\77"+ + "\2\115\1\131\3\117\1\115\2\117\5\115\4\117\4\116\1\115\3\116"+ + "\1\115\5\116\26\56\4\23\1\105\2\104\4\122\1\104\2\122\3\76"+ + "\33\122\35\55\3\122\35\126\3\122\6\126\2\33\31\126\1\33\17\126"+ + "\6\122\4\22\1\10\37\22\1\10\4\22\25\62\1\127\11\62\21\55"+ + "\5\62\1\57\12\40\13\62\4\55\1\50\6\55\12\122\17\55\1\47"+ + "\3\53\15\20\11\36\1\32\24\36\2\20\11\36\1\32\31\36\1\32"+ + "\4\20\4\36\2\32\2\107\1\4\5\107\52\4\u1900\0\u012e\144\2\0"+ + "\76\144\2\0\152\144\46\0\7\132\14\0\5\132\5\0\1\132\1\133"+ + "\12\132\1\0\15\132\1\0\5\132\1\0\1\132\1\0\2\132\1\0"+ + "\2\132\1\0\154\132\41\0\u016b\132\22\0\100\132\2\0\66\132\50\0"+ + "\14\132\4\0\20\133\1\137\2\0\1\136\1\137\13\0\7\133\14\0"+ + "\2\141\30\0\3\141\1\137\1\0\1\140\1\0\1\137\1\136\32\0"+ + "\5\132\1\0\207\132\2\0\1\133\7\0\1\140\4\0\1\137\1\0"+ + "\1\140\1\0\12\134\1\136\1\137\5\0\32\132\4\0\1\141\1\0"+ + "\32\132\13\0\70\135\2\133\37\146\3\0\6\146\2\0\6\146\2\0"+ + "\6\146\2\0\3\146\34\0\3\133\4\0"; /** * Translates characters to character classes @@ -227,26 +227,26 @@ public final class UAX29URLEmailTokenizer extends Tokenizer { private static final int [] ZZ_ACTION = zzUnpackAction(); private static final String ZZ_ACTION_PACKED_0 = - "\1\0\23\1\1\2\1\3\1\2\1\1\1\4\1\5"+ - "\1\6\1\1\3\2\3\3\3\1\15\0\1\2\1\0"+ - "\1\2\10\0\1\3\21\0\2\2\1\0\3\2\1\0"+ - "\1\3\1\0\2\3\1\2\1\3\46\0\32\2\3\0"+ - "\4\2\32\0\4\3\17\0\1\7\1\0\6\10\3\2"+ - "\2\10\1\2\4\10\1\2\2\10\2\0\1\2\1\0"+ - "\1\2\6\10\3\0\2\10\1\0\4\10\1\0\2\10"+ - "\1\0\2\3\10\0\1\10\32\0\1\10\1\0\3\10"+ - "\6\2\1\0\1\2\2\0\1\2\1\0\1\10\10\0"+ - "\3\3\15\0\3\10\6\7\3\0\2\7\1\0\4\7"+ - "\1\0\2\7\2\10\1\0\2\10\1\0\2\10\1\0"+ - "\1\10\2\2\7\0\2\3\20\0\1\7\10\0\1\10"+ - "\3\0\1\2\32\0\3\10\23\0\1\10\27\0\1\10"+ - "\4\0\1\10\6\0\1\10\4\0\2\10\36\0\1\10"+ - "\51\0\1\10\42\0\1\10\51\0\1\10\122\0\1\10"+ - "\117\0\1\10\107\0\1\10\74\0\1\10\51\0\1\10"+ - "\333\0"; + "\1\0\23\1\1\2\1\3\1\4\1\1\1\5\1\6"+ + "\1\7\1\10\1\1\3\2\3\3\3\1\15\0\1\2"+ + "\1\0\1\2\10\0\1\3\15\0\1\2\12\0\2\2"+ + "\1\0\3\2\1\0\1\3\1\0\2\3\1\2\1\3"+ + "\53\0\32\2\3\0\4\2\32\0\4\3\17\0\1\11"+ + "\1\0\6\12\3\2\2\12\1\2\4\12\1\2\2\12"+ + "\2\0\1\2\1\0\1\2\6\12\3\0\2\12\1\0"+ + "\4\12\1\0\2\12\1\0\2\3\10\0\1\12\32\0"+ + "\1\12\1\0\3\12\6\2\1\0\1\2\2\0\1\2"+ + "\1\0\1\12\10\0\3\3\15\0\3\12\6\11\3\0"+ + "\2\11\1\0\4\11\1\0\2\11\2\12\1\0\2\12"+ + "\1\0\2\12\1\0\1\12\2\2\7\0\2\3\20\0"+ + "\1\11\10\0\1\12\3\0\1\2\36\0\3\12\23\0"+ + "\1\12\36\0\1\12\4\0\1\12\6\0\1\12\4\0"+ + "\2\12\42\0\1\12\57\0\1\12\51\0\1\12\60\0"+ + "\1\12\140\0\1\12\135\0\1\12\123\0\1\12\106\0"+ + "\1\12\57\0\1\12\362\0"; private static int [] zzUnpackAction() { - int [] result = new int[1204]; + int [] result = new int[1331]; int offset = 0; offset = zzUnpackAction(ZZ_ACTION_PACKED_0, offset, result); return result; @@ -271,160 +271,176 @@ public final class UAX29URLEmailTokenizer extends Tokenizer { private static final int [] ZZ_ROWMAP = zzUnpackRowMap(); private static final String ZZ_ROWMAP_PACKED_0 = - "\0\0\0\242\0\u0144\0\u01e6\0\u0288\0\u032a\0\u03cc\0\u046e"+ - "\0\u0510\0\u05b2\0\u0654\0\u06f6\0\u0798\0\u083a\0\u08dc\0\u097e"+ - "\0\u0a20\0\u0ac2\0\u0b64\0\u0c06\0\u0ca8\0\u0d4a\0\u0dec\0\u0e8e"+ - "\0\u0f30\0\242\0\242\0\u0fd2\0\u1074\0\u1116\0\u11b8\0\u125a"+ - "\0\u12fc\0\u139e\0\u1440\0\u14e2\0\u1584\0\u0144\0\u01e6\0\u0288"+ - "\0\u032a\0\u03cc\0\u1626\0\u16c8\0\u176a\0\u180c\0\u06f6\0\u18ae"+ - "\0\u1950\0\u19f2\0\u1a94\0\u1b36\0\u1bd8\0\u1c7a\0\u0510\0\u05b2"+ - "\0\u1d1c\0\u1dbe\0\u1e60\0\u1f02\0\u1fa4\0\u2046\0\u20e8\0\u218a"+ - "\0\u222c\0\u22ce\0\u2370\0\u2412\0\u24b4\0\u2556\0\u25f8\0\u269a"+ - "\0\u273c\0\u0e8e\0\u27de\0\u0fd2\0\u2880\0\u2922\0\u29c4\0\u2a66"+ - "\0\u2b08\0\u2baa\0\u2c4c\0\u2cee\0\u2d90\0\u2e32\0\u2ed4\0\u2f76"+ - "\0\u3018\0\u30ba\0\u315c\0\u31fe\0\u1440\0\u32a0\0\u3342\0\u1584"+ - "\0\u33e4\0\u3486\0\u3528\0\u35ca\0\u366c\0\u370e\0\u37b0\0\u3852"+ - "\0\u38f4\0\u3996\0\u3a38\0\u3ada\0\u3b7c\0\u3c1e\0\u3cc0\0\u3d62"+ - "\0\u3e04\0\u3ea6\0\u3f48\0\u3fea\0\u408c\0\u412e\0\u41d0\0\u4272"+ - "\0\u4314\0\u43b6\0\u4458\0\u44fa\0\u459c\0\u463e\0\u46e0\0\u4782"+ - "\0\u4824\0\u48c6\0\u4968\0\u4a0a\0\u4aac\0\u4b4e\0\u4bf0\0\u4c92"+ - "\0\u4d34\0\u4dd6\0\u4e78\0\u4f1a\0\u4fbc\0\u505e\0\u5100\0\u51a2"+ - "\0\u5244\0\u52e6\0\u5388\0\u542a\0\u54cc\0\u556e\0\u5610\0\u56b2"+ - "\0\u5754\0\u57f6\0\u5898\0\u593a\0\u59dc\0\u5a7e\0\u5b20\0\u5bc2"+ - "\0\u5c64\0\u5d06\0\u5da8\0\u5e4a\0\u5eec\0\u5f8e\0\u6030\0\u60d2"+ - "\0\u6174\0\u6216\0\u62b8\0\u635a\0\u63fc\0\u649e\0\u6540\0\u65e2"+ - "\0\u6684\0\u6726\0\u67c8\0\u686a\0\u690c\0\u69ae\0\u6a50\0\u6af2"+ - "\0\u6b94\0\u6c36\0\u6cd8\0\u6d7a\0\u6e1c\0\u6ebe\0\u6f60\0\u7002"+ - "\0\u70a4\0\u7146\0\u71e8\0\u728a\0\u732c\0\u73ce\0\u7470\0\u7512"+ - "\0\u75b4\0\u7656\0\u76f8\0\u779a\0\u783c\0\u78de\0\u7980\0\u7a22"+ - "\0\242\0\u7ac4\0\u7b66\0\u7c08\0\u7caa\0\u7d4c\0\u7dee\0\u7e90"+ - "\0\u7f32\0\u7fd4\0\u8076\0\u8118\0\u81ba\0\u825c\0\u82fe\0\u83a0"+ - "\0\u8442\0\u84e4\0\u8586\0\u8628\0\u86ca\0\u876c\0\u880e\0\u88b0"+ - "\0\u8952\0\u89f4\0\u8a96\0\u8b38\0\u8bda\0\u8c7c\0\u8d1e\0\u8dc0"+ - "\0\u8e62\0\u8f04\0\u8fa6\0\u9048\0\u90ea\0\u918c\0\u922e\0\u92d0"+ - "\0\u9372\0\u9414\0\u94b6\0\u9558\0\u95fa\0\u969c\0\u973e\0\u97e0"+ - "\0\u9882\0\u9924\0\u99c6\0\u9a68\0\u9b0a\0\u9bac\0\u9c4e\0\u9cf0"+ - "\0\u9d92\0\u9e34\0\u9ed6\0\u9f78\0\ua01a\0\ua0bc\0\ua15e\0\ua200"+ - "\0\ua2a2\0\ua344\0\ua3e6\0\ua488\0\ua52a\0\ua5cc\0\ua66e\0\ua710"+ - "\0\ua7b2\0\ua854\0\ua8f6\0\ua998\0\uaa3a\0\uaadc\0\uab7e\0\uac20"+ - "\0\uacc2\0\uad64\0\uae06\0\uaea8\0\uaf4a\0\uafec\0\ub08e\0\ub130"+ - "\0\ub1d2\0\ub274\0\ub316\0\ub3b8\0\ub45a\0\ub4fc\0\ub59e\0\ub640"+ - "\0\ub6e2\0\ub784\0\ub826\0\ub8c8\0\ub96a\0\uba0c\0\ubaae\0\ubb50"+ - "\0\ubbf2\0\ubc94\0\ubd36\0\ubdd8\0\ube7a\0\ubf1c\0\ubfbe\0\uc060"+ - "\0\uc102\0\uc1a4\0\uc246\0\uc2e8\0\uc38a\0\uc42c\0\uc4ce\0\uc570"+ - "\0\uc612\0\uc6b4\0\uc756\0\uc7f8\0\uc89a\0\uc93c\0\uc9de\0\uca80"+ - "\0\ucb22\0\ucbc4\0\ucc66\0\ucd08\0\ucdaa\0\uce4c\0\uceee\0\ucf90"+ - "\0\ud032\0\ud0d4\0\ud176\0\ud218\0\ud2ba\0\ud35c\0\ud3fe\0\ud4a0"+ - "\0\ud542\0\ud5e4\0\ud686\0\ud728\0\ud7ca\0\ud86c\0\ud90e\0\ud9b0"+ - "\0\uda52\0\udaf4\0\udb96\0\udc38\0\udcda\0\udd7c\0\ude1e\0\udec0"+ - "\0\udf62\0\ue004\0\ue0a6\0\ue148\0\ue1ea\0\ue28c\0\ue32e\0\ue3d0"+ - "\0\ue472\0\ue514\0\ue5b6\0\ue658\0\ue6fa\0\ue79c\0\ue83e\0\ue8e0"+ - "\0\ue982\0\uea24\0\ueac6\0\ueb68\0\uec0a\0\uecac\0\ued4e\0\uedf0"+ - "\0\u7980\0\uee92\0\uef34\0\uefd6\0\uf078\0\uf11a\0\uf1bc\0\uf25e"+ - "\0\uf300\0\uf3a2\0\uf444\0\uf4e6\0\uf588\0\uf62a\0\uf6cc\0\uf76e"+ - "\0\uf810\0\uf8b2\0\uf954\0\uf9f6\0\ufa98\0\ufb3a\0\ufbdc\0\ufc7e"+ - "\0\ufd20\0\ufdc2\0\ufe64\0\uff06\0\uffa8\1\112\1\354\1\u018e"+ - "\1\u0230\1\u02d2\1\u0374\1\u0416\1\u04b8\1\u055a\1\u05fc\1\u069e"+ - "\1\u0740\1\u07e2\1\u0884\1\u0926\1\u09c8\1\u0a6a\1\u0b0c\1\u0bae"+ - "\1\u0c50\1\u0cf2\1\u0d94\1\u0e36\1\u0ed8\1\u0f7a\1\u101c\1\u10be"+ - "\1\u1160\1\u1202\1\u12a4\1\u1346\1\u13e8\1\u148a\1\u152c\1\u15ce"+ - "\1\u1670\1\u1712\1\u17b4\1\u1856\1\u18f8\1\u199a\1\u1a3c\1\u1ade"+ - "\1\u1b80\1\u1c22\1\u1cc4\1\u1d66\1\u1e08\1\u1eaa\1\u1f4c\1\u1fee"+ - "\1\u2090\1\u2132\1\u21d4\1\u2276\1\u2318\1\u23ba\1\u245c\1\u24fe"+ - "\1\u25a0\1\u2642\1\u26e4\1\u2786\1\u2828\1\u28ca\1\u296c\1\u2a0e"+ - "\1\u2ab0\1\u2b52\1\u2bf4\1\u2c96\1\u2d38\1\u2dda\0\u14e2\1\u2e7c"+ - "\1\u2f1e\1\u2fc0\1\u3062\1\u3104\1\u31a6\1\u3248\1\u32ea\1\u338c"+ - "\1\u342e\1\u34d0\1\u3572\1\u3614\1\u36b6\1\u3758\1\u37fa\1\u389c"+ - "\1\u393e\1\u39e0\1\u3a82\1\u3b24\1\u3bc6\1\u3c68\1\u3d0a\1\u3dac"+ - "\1\u3e4e\1\u3ef0\1\u3f92\1\u4034\1\u40d6\1\u4178\1\u421a\1\u42bc"+ - "\1\u435e\1\u4400\1\u44a2\1\u4544\1\u45e6\1\u4688\1\u472a\1\u47cc"+ - "\1\u486e\1\u4910\1\u49b2\1\u4a54\1\u4af6\1\u4b98\1\u4c3a\1\u4cdc"+ - "\1\u4d7e\1\u4e20\1\u4ec2\1\u4f64\1\u5006\1\u50a8\1\u514a\1\u51ec"+ - "\1\u528e\1\u5330\1\u53d2\1\u5474\1\u5516\1\u55b8\1\u565a\1\u56fc"+ - "\1\u579e\1\u5840\1\u58e2\1\u5984\1\u5a26\1\u5ac8\1\u5b6a\1\u5c0c"+ - "\1\u5cae\1\u5d50\1\u5df2\1\u5e94\1\u5f36\1\u5fd8\1\u607a\1\u611c"+ - "\1\u61be\1\u6260\1\u6302\1\u63a4\1\u6446\1\u64e8\1\u658a\1\u662c"+ - "\1\u66ce\1\u6770\1\u6812\1\u68b4\1\u6956\1\u69f8\1\u6a9a\1\u6b3c"+ - "\1\u6bde\1\u6c80\1\u6d22\1\u6dc4\1\u6e66\1\u6f08\1\u6faa\1\u704c"+ - "\1\u70ee\1\u7190\1\u7232\1\u72d4\1\u7376\1\u7418\1\u74ba\1\u755c"+ - "\1\u75fe\1\u76a0\1\u7742\1\u77e4\1\u7886\1\u7928\1\u79ca\1\u7a6c"+ - "\1\u7b0e\1\u7bb0\1\u7c52\1\u7cf4\1\u7d96\1\u7e38\1\u7eda\1\u7f7c"+ - "\1\u801e\1\u80c0\1\u8162\1\u8204\1\u82a6\1\u8348\1\u83ea\1\u848c"+ - "\1\u852e\1\u85d0\1\u8672\1\u8714\1\u87b6\1\u8858\1\u88fa\1\u899c"+ - "\1\u8a3e\1\u8ae0\1\u8b82\1\u8c24\1\u8cc6\1\u8d68\1\u8e0a\1\u8eac"+ - "\1\u8f4e\1\u8ff0\1\u9092\1\u9134\1\u91d6\1\u9278\1\u931a\1\u93bc"+ - "\1\u945e\1\u9500\1\u95a2\1\u9644\1\u96e6\1\u9788\1\u982a\1\u98cc"+ - "\1\u996e\1\u9a10\1\u9ab2\1\u9b54\1\u9bf6\1\u9c98\1\u9d3a\1\u9ddc"+ - "\1\u9e7e\1\u9f20\1\u9fc2\1\ua064\1\ua106\1\ua1a8\1\ua24a\1\ua2ec"+ - "\1\ua38e\1\ua430\1\ua4d2\1\ua574\1\ua616\1\ua6b8\1\ua75a\1\ua7fc"+ - "\1\ua89e\1\ua940\1\ua9e2\1\uaa84\1\uab26\1\uabc8\1\uac6a\1\uad0c"+ - "\1\uadae\1\uae50\1\uaef2\1\uaf94\1\ub036\1\ub0d8\1\ub17a\1\ub21c"+ - "\1\ub2be\1\ub360\1\ub402\1\ub4a4\1\ub546\1\ub5e8\1\ub68a\1\ub72c"+ - "\1\ub7ce\1\ub870\1\ub912\1\ub9b4\1\uba56\1\ubaf8\1\ubb9a\1\ubc3c"+ - "\1\ubcde\1\ubd80\1\ube22\1\ubec4\1\ubf66\1\uc008\1\uc0aa\1\uc14c"+ - "\1\uc1ee\1\uc290\1\uc332\1\uc3d4\1\uc476\1\uc518\1\uc5ba\1\uc65c"+ - "\1\uc6fe\1\uc7a0\1\uc842\1\uc8e4\1\uc986\1\uca28\1\ucaca\1\ucb6c"+ - "\1\ucc0e\1\uccb0\1\ucd52\1\ucdf4\1\uce96\1\ucf38\1\ucfda\1\ud07c"+ - "\1\ud11e\1\ud1c0\1\ud262\1\ud304\1\ud3a6\1\ud448\1\ud4ea\1\ud58c"+ - "\1\ud62e\1\ud6d0\1\ud772\1\ud814\1\ud8b6\1\ud958\1\ud9fa\1\uda9c"+ - "\1\udb3e\1\udbe0\1\udc82\1\udd24\1\uddc6\1\ude68\1\udf0a\1\udfac"+ - "\1\ue04e\1\ue0f0\1\ue192\1\ue234\1\ue2d6\1\ue378\1\ue41a\1\ue4bc"+ - "\1\ue55e\1\ue600\1\ue6a2\1\ue744\1\ue7e6\1\ue888\1\ue92a\1\ue9cc"+ - "\1\uea6e\1\ueb10\1\uebb2\1\uec54\1\uecf6\1\ued98\1\uee3a\1\ueedc"+ - "\1\uef7e\1\uf020\1\uf0c2\1\uf164\1\uf206\1\uf2a8\1\uf34a\1\uf3ec"+ - "\1\uf48e\1\uf530\1\uf5d2\1\uf674\1\uf716\1\uf7b8\1\uf85a\1\uf8fc"+ - "\1\uf99e\1\ufa40\1\ufae2\1\ufb84\1\ufc26\1\ufcc8\1\ufd6a\1\ufe0c"+ - "\1\ufeae\1\uff50\1\ufff2\2\224\2\u0136\2\u01d8\2\u027a\2\u031c"+ - "\2\u03be\2\u0460\2\u0502\2\u05a4\2\u0646\2\u06e8\2\u078a\2\u082c"+ - "\2\u08ce\2\u0970\2\u0a12\2\u0ab4\2\u0b56\2\u0bf8\2\u0c9a\2\u0d3c"+ - "\2\u0dde\2\u0e80\2\u0f22\2\u0fc4\2\u1066\2\u1108\2\u11aa\2\u124c"+ - "\2\u12ee\2\u1390\2\u1432\2\u14d4\2\u1576\2\u1618\2\u16ba\2\u175c"+ - "\2\u17fe\2\u18a0\2\u1942\2\u19e4\2\u1a86\2\u1b28\2\u1bca\2\u1c6c"+ - "\2\u1d0e\2\u1db0\2\u1e52\2\u1ef4\2\u1f96\2\u2038\2\u20da\2\u217c"+ - "\2\u221e\2\u22c0\2\u2362\2\u2404\2\u24a6\2\u2548\2\u25ea\2\u268c"+ - "\2\u272e\2\u27d0\2\u2872\2\u2914\2\u29b6\2\u2a58\2\u2afa\2\u2b9c"+ - "\2\u2c3e\2\u2ce0\2\u2d82\2\u2e24\2\u2ec6\2\u2f68\2\u300a\2\u30ac"+ - "\2\u314e\2\u31f0\2\u3292\2\u3334\2\u33d6\2\u3478\2\u351a\2\u35bc"+ - "\2\u365e\2\u3700\2\u37a2\2\u3844\2\u38e6\2\u3988\2\u3a2a\2\u3acc"+ - "\2\u3b6e\2\u3c10\2\u3cb2\2\u3d54\2\u3df6\2\u3e98\2\u3f3a\2\u3fdc"+ - "\2\u407e\2\u4120\2\u41c2\2\u4264\2\u4306\2\u43a8\2\u444a\2\u44ec"+ - "\2\u458e\2\u4630\2\u46d2\2\u4774\2\u4816\2\u48b8\2\u495a\2\u49fc"+ - "\2\u4a9e\2\u4b40\2\u4be2\2\u4c84\2\u4d26\2\u4dc8\2\u4e6a\2\u4f0c"+ - "\2\u4fae\2\u5050\2\u50f2\2\u5194\2\u5236\2\u52d8\2\u537a\2\u541c"+ - "\2\u54be\2\u5560\2\u5602\2\u56a4\2\u5746\2\u57e8\2\u588a\2\u592c"+ - "\2\u59ce\2\u5a70\2\u5b12\2\u5bb4\2\u5c56\2\u5cf8\2\u5d9a\2\u5e3c"+ - "\2\u5ede\2\u5f80\2\u6022\2\u60c4\2\u6166\2\u6208\2\u62aa\2\u634c"+ - "\2\u63ee\2\u6490\2\u6532\2\u65d4\2\u6676\2\u6718\2\u67ba\2\u685c"+ - "\2\u68fe\2\u69a0\2\u6a42\2\u6ae4\2\u6b86\2\u6c28\2\u6cca\2\u6d6c"+ - "\2\u6e0e\2\u6eb0\2\u6f52\2\u6ff4\2\u7096\2\u7138\2\u71da\2\u727c"+ - "\2\u731e\2\u73c0\2\u7462\2\u7504\2\u75a6\2\u7648\2\u76ea\2\u778c"+ - "\2\u782e\2\u78d0\2\u7972\2\u7a14\2\u7ab6\2\u7b58\2\u7bfa\2\u7c9c"+ - "\2\u7d3e\2\u7de0\2\u7e82\2\u7f24\2\u7fc6\2\u8068\2\u810a\2\u81ac"+ - "\2\u824e\2\u82f0\2\u8392\2\u8434\2\u84d6\2\u8578\2\u861a\2\u86bc"+ - "\2\u875e\2\u8800\2\u88a2\2\u8944\2\u89e6\2\u8a88\2\u8b2a\2\u8bcc"+ - "\2\u8c6e\2\u8d10\2\u8db2\2\u8e54\2\u8ef6\2\u8f98\2\u903a\2\u90dc"+ - "\2\u917e\2\u9220\2\u92c2\2\u9364\2\u9406\2\u94a8\2\u954a\2\u95ec"+ - "\2\u968e\2\u9730\2\u97d2\2\u9874\2\u9916\2\u99b8\2\u9a5a\2\u9afc"+ - "\2\u9b9e\2\u9c40\2\u9ce2\2\u9d84\2\u9e26\2\u9ec8\2\u9f6a\2\ua00c"+ - "\2\ua0ae\2\ua150\2\ua1f2\2\ua294\2\ua336\2\ua3d8\2\ua47a\2\ua51c"+ - "\2\ua5be\2\ua660\2\ua702\2\ua7a4\2\ua846\2\ua8e8\2\ua98a\2\uaa2c"+ - "\2\uaace\2\uab70\2\uac12\2\uacb4\2\uad56\2\uadf8\2\uae9a\2\uaf3c"+ - "\2\uafde\2\ub080\2\ub122\2\ub1c4\2\ub266\2\ub308\2\ub3aa\2\ub44c"+ - "\2\ub4ee\2\ub590\2\ub632\2\ub6d4\2\ub776\2\ub818\2\ub8ba\2\ub95c"+ - "\2\ub9fe\2\ubaa0\2\ubb42\2\ubbe4\2\ubc86\2\ubd28\2\ubdca\2\ube6c"+ - "\2\ubf0e\2\ubfb0\2\uc052\2\uc0f4\2\uc196\2\uc238\2\uc2da\2\uc37c"+ - "\2\uc41e\2\uc4c0\2\uc562\2\uc604\2\uc6a6\2\uc748\2\uc7ea\2\uc88c"+ - "\2\uc92e\2\uc9d0\2\uca72\2\ucb14\2\ucbb6\2\ucc58\2\uccfa\2\ucd9c"+ - "\2\uce3e\2\ucee0\2\ucf82\2\ud024\2\ud0c6\2\ud168\2\ud20a\2\ud2ac"+ - "\2\ud34e\2\ud3f0\2\ud492\2\ud534\2\ud5d6\2\ud678\2\ud71a\2\ud7bc"+ - "\2\ud85e\2\ud900\2\ud9a2\2\uda44\2\udae6\2\udb88\2\udc2a\2\udccc"+ - "\2\udd6e\2\ude10\2\udeb2\2\udf54\2\udff6\2\ue098\2\ue13a\2\ue1dc"+ - "\2\ue27e\2\ue320\2\ue3c2\2\ue464\2\ue506\2\ue5a8\2\ue64a\2\ue6ec"+ - "\2\ue78e\2\ue830\2\ue8d2\2\ue974\2\uea16\2\ueab8\2\ueb5a\2\uebfc"+ - "\2\uec9e\2\ued40\2\uede2\2\uee84"; + "\0\0\0\243\0\u0146\0\u01e9\0\u028c\0\u032f\0\u03d2\0\u0475"+ + "\0\u0518\0\u05bb\0\u065e\0\u0701\0\u07a4\0\u0847\0\u08ea\0\u098d"+ + "\0\u0a30\0\u0ad3\0\u0b76\0\u0c19\0\u0cbc\0\u0d5f\0\u0e02\0\u0ea5"+ + "\0\u0f48\0\243\0\243\0\u0feb\0\u108e\0\u1131\0\u11d4\0\u1277"+ + "\0\u131a\0\u13bd\0\u1460\0\u1503\0\u15a6\0\u1649\0\u0146\0\u01e9"+ + "\0\u028c\0\u032f\0\u03d2\0\u16ec\0\u178f\0\u1832\0\u18d5\0\u0701"+ + "\0\u1978\0\u1a1b\0\u1abe\0\u1b61\0\u1c04\0\u1ca7\0\u1d4a\0\u0518"+ + "\0\u05bb\0\u1ded\0\u1e90\0\u1f33\0\u1fd6\0\u2079\0\u211c\0\u21bf"+ + "\0\u2262\0\u2305\0\u23a8\0\u244b\0\u24ee\0\u2591\0\u2634\0\u26d7"+ + "\0\u277a\0\u281d\0\u28c0\0\u0ea5\0\u2963\0\u2a06\0\u2aa9\0\u2b4c"+ + "\0\u2bef\0\u2c92\0\u2d35\0\u108e\0\u2dd8\0\u2e7b\0\u2f1e\0\u2fc1"+ + "\0\u3064\0\u3107\0\u31aa\0\u324d\0\u32f0\0\u3393\0\u3436\0\u34d9"+ + "\0\u357c\0\u361f\0\u36c2\0\u3765\0\u1503\0\u3808\0\u38ab\0\u1649"+ + "\0\u394e\0\u39f1\0\u3a94\0\u3b37\0\u3bda\0\u3c7d\0\u3d20\0\u3dc3"+ + "\0\u3e66\0\u3f09\0\u3fac\0\u404f\0\u40f2\0\u4195\0\u4238\0\u42db"+ + "\0\u437e\0\u4421\0\u44c4\0\u4567\0\u460a\0\u46ad\0\u4750\0\u47f3"+ + "\0\u4896\0\u4939\0\u49dc\0\u4a7f\0\u4b22\0\u4bc5\0\u4c68\0\u4d0b"+ + "\0\u4dae\0\u4e51\0\u4ef4\0\u4f97\0\u503a\0\u50dd\0\u5180\0\u5223"+ + "\0\u52c6\0\u5369\0\u540c\0\u54af\0\u5552\0\u55f5\0\u5698\0\u573b"+ + "\0\u57de\0\u5881\0\u5924\0\u59c7\0\u5a6a\0\u5b0d\0\u5bb0\0\u5c53"+ + "\0\u5cf6\0\u5d99\0\u5e3c\0\u5edf\0\u5f82\0\u6025\0\u60c8\0\u616b"+ + "\0\u620e\0\u62b1\0\u6354\0\u63f7\0\u649a\0\u653d\0\u65e0\0\u6683"+ + "\0\u6726\0\u67c9\0\u686c\0\u690f\0\u69b2\0\u6a55\0\u6af8\0\u6b9b"+ + "\0\u6c3e\0\u6ce1\0\u6d84\0\u6e27\0\u6eca\0\u6f6d\0\u7010\0\u70b3"+ + "\0\u7156\0\u71f9\0\u729c\0\u733f\0\u73e2\0\u7485\0\u7528\0\u75cb"+ + "\0\u766e\0\u7711\0\u77b4\0\u7857\0\u78fa\0\u799d\0\u7a40\0\u7ae3"+ + "\0\u7b86\0\u7c29\0\u7ccc\0\u7d6f\0\u7e12\0\u7eb5\0\u7f58\0\u7ffb"+ + "\0\u809e\0\u8141\0\u81e4\0\u8287\0\u832a\0\243\0\u83cd\0\u8470"+ + "\0\u8513\0\u85b6\0\u8659\0\u86fc\0\u879f\0\u8842\0\u88e5\0\u8988"+ + "\0\u8a2b\0\u8ace\0\u8b71\0\u8c14\0\u8cb7\0\u8d5a\0\u8dfd\0\u8ea0"+ + "\0\u8f43\0\u8fe6\0\u9089\0\u912c\0\u91cf\0\u9272\0\u9315\0\u93b8"+ + "\0\u945b\0\u94fe\0\u95a1\0\u9644\0\u96e7\0\u978a\0\u982d\0\u98d0"+ + "\0\u9973\0\u9a16\0\u9ab9\0\u9b5c\0\u9bff\0\u9ca2\0\u9d45\0\u9de8"+ + "\0\u9e8b\0\u9f2e\0\u9fd1\0\ua074\0\ua117\0\ua1ba\0\ua25d\0\ua300"+ + "\0\ua3a3\0\ua446\0\ua4e9\0\ua58c\0\ua62f\0\ua6d2\0\ua775\0\ua818"+ + "\0\ua8bb\0\ua95e\0\uaa01\0\uaaa4\0\uab47\0\uabea\0\uac8d\0\uad30"+ + "\0\uadd3\0\uae76\0\uaf19\0\uafbc\0\ub05f\0\ub102\0\ub1a5\0\ub248"+ + "\0\ub2eb\0\ub38e\0\ub431\0\ub4d4\0\ub577\0\ub61a\0\ub6bd\0\ub760"+ + "\0\ub803\0\ub8a6\0\ub949\0\ub9ec\0\uba8f\0\ubb32\0\ubbd5\0\ubc78"+ + "\0\ubd1b\0\ubdbe\0\ube61\0\ubf04\0\ubfa7\0\uc04a\0\uc0ed\0\uc190"+ + "\0\uc233\0\uc2d6\0\uc379\0\uc41c\0\uc4bf\0\uc562\0\uc605\0\uc6a8"+ + "\0\uc74b\0\uc7ee\0\uc891\0\uc934\0\uc9d7\0\uca7a\0\ucb1d\0\ucbc0"+ + "\0\ucc63\0\ucd06\0\ucda9\0\uce4c\0\uceef\0\ucf92\0\ud035\0\ud0d8"+ + "\0\ud17b\0\ud21e\0\ud2c1\0\ud364\0\ud407\0\ud4aa\0\ud54d\0\ud5f0"+ + "\0\ud693\0\ud736\0\ud7d9\0\ud87c\0\ud91f\0\ud9c2\0\uda65\0\udb08"+ + "\0\udbab\0\udc4e\0\udcf1\0\udd94\0\ude37\0\udeda\0\udf7d\0\ue020"+ + "\0\ue0c3\0\ue166\0\ue209\0\ue2ac\0\ue34f\0\ue3f2\0\ue495\0\ue538"+ + "\0\ue5db\0\ue67e\0\ue721\0\ue7c4\0\ue867\0\ue90a\0\ue9ad\0\uea50"+ + "\0\ueaf3\0\ueb96\0\uec39\0\uecdc\0\ued7f\0\uee22\0\ueec5\0\uef68"+ + "\0\uf00b\0\uf0ae\0\uf151\0\uf1f4\0\uf297\0\uf33a\0\uf3dd\0\uf480"+ + "\0\uf523\0\uf5c6\0\uf669\0\uf70c\0\uf7af\0\u8287\0\uf852\0\uf8f5"+ + "\0\uf998\0\ufa3b\0\ufade\0\ufb81\0\ufc24\0\ufcc7\0\ufd6a\0\ufe0d"+ + "\0\ufeb0\0\uff53\0\ufff6\1\231\1\u013c\1\u01df\1\u0282\1\u0325"+ + "\1\u03c8\1\u046b\1\u050e\1\u05b1\1\u0654\1\u06f7\1\u079a\1\u083d"+ + "\1\u08e0\1\u0983\1\u0a26\1\u0ac9\1\u0b6c\1\u0c0f\1\u0cb2\1\u0d55"+ + "\1\u0df8\1\u0e9b\1\u0f3e\1\u0fe1\1\u1084\1\u1127\1\u11ca\1\u126d"+ + "\1\u1310\1\u13b3\1\u1456\1\u14f9\1\u159c\1\u163f\1\u16e2\1\u1785"+ + "\1\u1828\1\u18cb\1\u196e\1\u1a11\1\u1ab4\1\u1b57\1\u1bfa\1\u1c9d"+ + "\1\u1d40\1\u1de3\1\u1e86\1\u1f29\1\u1fcc\1\u206f\1\u2112\1\u21b5"+ + "\1\u2258\1\u22fb\1\u239e\1\u2441\1\u24e4\1\u2587\1\u262a\1\u26cd"+ + "\1\u2770\1\u2813\1\u28b6\1\u2959\1\u29fc\1\u2a9f\1\u2b42\1\u2be5"+ + "\1\u2c88\1\u2d2b\1\u2dce\1\u2e71\1\u2f14\1\u2fb7\1\u305a\1\u30fd"+ + "\1\u31a0\1\u3243\1\u32e6\1\u3389\1\u342c\1\u34cf\1\u3572\1\u3615"+ + "\1\u36b8\1\u375b\1\u37fe\1\u38a1\1\u3944\1\u39e7\1\u3a8a\1\u3b2d"+ + "\1\u3bd0\1\u3c73\1\u3d16\1\u3db9\1\u3e5c\1\u3eff\0\u15a6\1\u3fa2"+ + "\1\u4045\1\u40e8\1\u418b\1\u422e\1\u42d1\1\u4374\1\u4417\1\u44ba"+ + "\1\u455d\1\u4600\1\u46a3\1\u4746\1\u47e9\1\u488c\1\u492f\1\u49d2"+ + "\1\u4a75\1\u4b18\1\u4bbb\1\u4c5e\1\u4d01\1\u4da4\1\u4e47\1\u4eea"+ + "\1\u4f8d\1\u5030\1\u50d3\1\u5176\1\u5219\1\u52bc\1\u535f\1\u5402"+ + "\1\u54a5\1\u5548\1\u55eb\1\u568e\1\u5731\1\u57d4\1\u5877\1\u591a"+ + "\1\u59bd\1\u5a60\1\u5b03\1\u5ba6\1\u5c49\1\u5cec\1\u5d8f\1\u5e32"+ + "\1\u5ed5\1\u5f78\1\u601b\1\u60be\1\u6161\1\u6204\1\u62a7\1\u634a"+ + "\1\u63ed\1\u6490\1\u6533\1\u65d6\1\u6679\1\u671c\1\u67bf\1\u6862"+ + "\1\u6905\1\u69a8\1\u6a4b\1\u6aee\1\u6b91\1\u6c34\1\u6cd7\1\u6d7a"+ + "\1\u6e1d\1\u6ec0\1\u6f63\1\u7006\1\u70a9\1\u714c\1\u71ef\1\u7292"+ + "\1\u7335\1\u73d8\1\u747b\1\u751e\1\u75c1\1\u7664\1\u7707\1\u77aa"+ + "\1\u784d\1\u78f0\1\u7993\1\u7a36\1\u7ad9\1\u7b7c\1\u7c1f\1\u7cc2"+ + "\1\u7d65\1\u7e08\1\u7eab\1\u7f4e\1\u7ff1\1\u8094\1\u8137\1\u81da"+ + "\1\u827d\1\u8320\1\u83c3\1\u8466\1\u8509\1\u85ac\1\u864f\1\u86f2"+ + "\1\u8795\1\u8838\1\u88db\1\u897e\1\u8a21\1\u8ac4\1\u8b67\1\u8c0a"+ + "\1\u8cad\1\u8d50\1\u8df3\1\u8e96\1\u8f39\1\u8fdc\1\u907f\1\u9122"+ + "\1\u91c5\1\u9268\1\u930b\1\u93ae\1\u9451\1\u94f4\1\u9597\1\u963a"+ + "\1\u96dd\1\u9780\1\u9823\1\u98c6\1\u9969\1\u9a0c\1\u9aaf\1\u9b52"+ + "\1\u9bf5\1\u9c98\1\u9d3b\1\u9dde\1\u9e81\1\u9f24\1\u9fc7\1\ua06a"+ + "\1\ua10d\1\ua1b0\1\ua253\1\ua2f6\1\ua399\1\ua43c\1\ua4df\1\ua582"+ + "\1\ua625\1\ua6c8\1\ua76b\1\ua80e\1\ua8b1\1\ua954\1\ua9f7\1\uaa9a"+ + "\1\uab3d\1\uabe0\1\uac83\1\uad26\1\uadc9\1\uae6c\1\uaf0f\1\uafb2"+ + "\1\ub055\1\ub0f8\1\ub19b\1\ub23e\1\ub2e1\1\ub384\1\ub427\1\ub4ca"+ + "\1\ub56d\1\ub610\1\ub6b3\1\ub756\1\ub7f9\1\ub89c\1\ub93f\1\ub9e2"+ + "\1\uba85\1\ubb28\1\ubbcb\1\ubc6e\1\ubd11\1\ubdb4\1\ube57\1\ubefa"+ + "\1\ubf9d\1\uc040\1\uc0e3\1\uc186\1\uc229\1\uc2cc\1\uc36f\1\uc412"+ + "\1\uc4b5\1\uc558\1\uc5fb\1\uc69e\1\uc741\1\uc7e4\1\uc887\1\uc92a"+ + "\1\uc9cd\1\uca70\1\ucb13\1\ucbb6\1\ucc59\1\uccfc\1\ucd9f\1\uce42"+ + "\1\ucee5\1\ucf88\1\ud02b\1\ud0ce\1\ud171\1\ud214\1\ud2b7\1\ud35a"+ + "\1\ud3fd\1\ud4a0\1\ud543\1\ud5e6\1\ud689\1\ud72c\1\ud7cf\1\ud872"+ + "\1\ud915\1\ud9b8\1\uda5b\1\udafe\1\udba1\1\udc44\1\udce7\1\udd8a"+ + "\1\ude2d\1\uded0\1\udf73\1\ue016\1\ue0b9\1\ue15c\1\ue1ff\1\ue2a2"+ + "\1\ue345\1\ue3e8\1\ue48b\1\ue52e\1\ue5d1\1\ue674\1\ue717\1\ue7ba"+ + "\1\ue85d\1\ue900\1\ue9a3\1\uea46\1\ueae9\1\ueb8c\1\uec2f\1\uecd2"+ + "\1\ued75\1\uee18\1\ueebb\1\uef5e\1\uf001\1\uf0a4\1\uf147\1\uf1ea"+ + "\1\uf28d\1\uf330\1\uf3d3\1\uf476\1\uf519\1\uf5bc\1\uf65f\1\uf702"+ + "\1\uf7a5\1\uf848\1\uf8eb\1\uf98e\1\ufa31\1\ufad4\1\ufb77\1\ufc1a"+ + "\1\ufcbd\1\ufd60\1\ufe03\1\ufea6\1\uff49\1\uffec\2\217\2\u0132"+ + "\2\u01d5\2\u0278\2\u031b\2\u03be\2\u0461\2\u0504\2\u05a7\2\u064a"+ + "\2\u06ed\2\u0790\2\u0833\2\u08d6\2\u0979\2\u0a1c\2\u0abf\2\u0b62"+ + "\2\u0c05\2\u0ca8\2\u0d4b\2\u0dee\2\u0e91\2\u0f34\2\u0fd7\2\u107a"+ + "\2\u111d\2\u11c0\2\u1263\2\u1306\2\u13a9\2\u144c\2\u14ef\2\u1592"+ + "\2\u1635\2\u16d8\2\u177b\2\u181e\2\u18c1\2\u1964\2\u1a07\2\u1aaa"+ + "\2\u1b4d\2\u1bf0\2\u1c93\2\u1d36\2\u1dd9\2\u1e7c\2\u1f1f\2\u1fc2"+ + "\2\u2065\2\u2108\2\u21ab\2\u224e\2\u22f1\2\u2394\2\u2437\2\u24da"+ + "\2\u257d\2\u2620\2\u26c3\2\u2766\2\u2809\2\u28ac\2\u294f\2\u29f2"+ + "\2\u2a95\2\u2b38\2\u2bdb\2\u2c7e\2\u2d21\2\u2dc4\2\u2e67\2\u2f0a"+ + "\2\u2fad\2\u3050\2\u30f3\2\u3196\2\u3239\2\u32dc\2\u337f\2\u3422"+ + "\2\u34c5\2\u3568\2\u360b\2\u36ae\2\u3751\2\u37f4\2\u3897\2\u393a"+ + "\2\u39dd\2\u3a80\2\u3b23\2\u3bc6\2\u3c69\2\u3d0c\2\u3daf\2\u3e52"+ + "\2\u3ef5\2\u3f98\2\u403b\2\u40de\2\u4181\2\u4224\2\u42c7\2\u436a"+ + "\2\u440d\2\u44b0\2\u4553\2\u45f6\2\u4699\2\u473c\2\u47df\2\u4882"+ + "\2\u4925\2\u49c8\2\u4a6b\2\u4b0e\2\u4bb1\2\u4c54\2\u4cf7\2\u4d9a"+ + "\2\u4e3d\2\u4ee0\2\u4f83\2\u5026\2\u50c9\2\u516c\2\u520f\2\u52b2"+ + "\2\u5355\2\u53f8\2\u549b\2\u553e\2\u55e1\2\u5684\2\u5727\2\u57ca"+ + "\2\u586d\2\u5910\2\u59b3\2\u5a56\2\u5af9\2\u5b9c\2\u5c3f\2\u5ce2"+ + "\2\u5d85\2\u5e28\2\u5ecb\2\u5f6e\2\u6011\2\u60b4\2\u6157\2\u61fa"+ + "\2\u629d\2\u6340\2\u63e3\2\u6486\2\u6529\2\u65cc\2\u666f\2\u6712"+ + "\2\u67b5\2\u6858\2\u68fb\2\u699e\2\u6a41\2\u6ae4\2\u6b87\2\u6c2a"+ + "\2\u6ccd\2\u6d70\2\u6e13\2\u6eb6\2\u6f59\2\u6ffc\2\u709f\2\u7142"+ + "\2\u71e5\2\u7288\2\u732b\2\u73ce\2\u7471\2\u7514\2\u75b7\2\u765a"+ + "\2\u76fd\2\u77a0\2\u7843\2\u78e6\2\u7989\2\u7a2c\2\u7acf\2\u7b72"+ + "\2\u7c15\2\u7cb8\2\u7d5b\2\u7dfe\2\u7ea1\2\u7f44\2\u7fe7\2\u808a"+ + "\2\u812d\2\u81d0\2\u8273\2\u8316\2\u83b9\2\u845c\2\u84ff\2\u85a2"+ + "\2\u8645\2\u86e8\2\u878b\2\u882e\2\u88d1\2\u8974\2\u8a17\2\u8aba"+ + "\2\u8b5d\2\u8c00\2\u8ca3\2\u8d46\2\u8de9\2\u8e8c\2\u8f2f\2\u8fd2"+ + "\2\u9075\2\u9118\2\u91bb\2\u925e\2\u9301\2\u93a4\2\u9447\2\u94ea"+ + "\2\u958d\2\u9630\2\u96d3\2\u9776\2\u9819\2\u98bc\2\u995f\2\u9a02"+ + "\2\u9aa5\2\u9b48\2\u9beb\2\u9c8e\2\u9d31\2\u9dd4\2\u9e77\2\u9f1a"+ + "\2\u9fbd\2\ua060\2\ua103\2\ua1a6\2\ua249\2\ua2ec\2\ua38f\2\ua432"+ + "\2\ua4d5\2\ua578\2\ua61b\2\ua6be\2\ua761\2\ua804\2\ua8a7\2\ua94a"+ + "\2\ua9ed\2\uaa90\2\uab33\2\uabd6\2\uac79\2\uad1c\2\uadbf\2\uae62"+ + "\2\uaf05\2\uafa8\2\ub04b\2\ub0ee\2\ub191\2\ub234\2\ub2d7\2\ub37a"+ + "\2\ub41d\2\ub4c0\2\ub563\2\ub606\2\ub6a9\2\ub74c\2\ub7ef\2\ub892"+ + "\2\ub935\2\ub9d8\2\uba7b\2\ubb1e\2\ubbc1\2\ubc64\2\ubd07\2\ubdaa"+ + "\2\ube4d\2\ubef0\2\ubf93\2\uc036\2\uc0d9\2\uc17c\2\uc21f\2\uc2c2"+ + "\2\uc365\2\uc408\2\uc4ab\2\uc54e\2\uc5f1\2\uc694\2\uc737\2\uc7da"+ + "\2\uc87d\2\uc920\2\uc9c3\2\uca66\2\ucb09\2\ucbac\2\ucc4f\2\uccf2"+ + "\2\ucd95\2\uce38\2\ucedb\2\ucf7e\2\ud021\2\ud0c4\2\ud167\2\ud20a"+ + "\2\ud2ad\2\ud350\2\ud3f3\2\ud496\2\ud539\2\ud5dc\2\ud67f\2\ud722"+ + "\2\ud7c5\2\ud868\2\ud90b\2\ud9ae\2\uda51\2\udaf4\2\udb97\2\udc3a"+ + "\2\udcdd\2\udd80\2\ude23\2\udec6\2\udf69\2\ue00c\2\ue0af\2\ue152"+ + "\2\ue1f5\2\ue298\2\ue33b\2\ue3de\2\ue481\2\ue524\2\ue5c7\2\ue66a"+ + "\2\ue70d\2\ue7b0\2\ue853\2\ue8f6\2\ue999\2\uea3c\2\ueadf\2\ueb82"+ + "\2\uec25\2\uecc8\2\ued6b\2\uee0e\2\ueeb1\2\uef54\2\ueff7\2\uf09a"+ + "\2\uf13d\2\uf1e0\2\uf283\2\uf326\2\uf3c9\2\uf46c\2\uf50f\2\uf5b2"+ + "\2\uf655\2\uf6f8\2\uf79b\2\uf83e\2\uf8e1\2\uf984\2\ufa27\2\ufaca"+ + "\2\ufb6d\2\ufc10\2\ufcb3\2\ufd56\2\ufdf9\2\ufe9c\2\uff3f\2\uffe2"+ + "\3\205\3\u0128\3\u01cb\3\u026e\3\u0311\3\u03b4\3\u0457\3\u04fa"+ + "\3\u059d\3\u0640\3\u06e3\3\u0786\3\u0829\3\u08cc\3\u096f\3\u0a12"+ + "\3\u0ab5\3\u0b58\3\u0bfb\3\u0c9e\3\u0d41\3\u0de4\3\u0e87\3\u0f2a"+ + "\3\u0fcd\3\u1070\3\u1113\3\u11b6\3\u1259\3\u12fc\3\u139f\3\u1442"+ + "\3\u14e5\3\u1588\3\u162b\3\u16ce\3\u1771\3\u1814\3\u18b7\3\u195a"+ + "\3\u19fd\3\u1aa0\3\u1b43\3\u1be6\3\u1c89\3\u1d2c\3\u1dcf\3\u1e72"+ + "\3\u1f15\3\u1fb8\3\u205b\3\u20fe\3\u21a1\3\u2244\3\u22e7\3\u238a"+ + "\3\u242d\3\u24d0\3\u2573\3\u2616\3\u26b9\3\u275c\3\u27ff\3\u28a2"+ + "\3\u2945\3\u29e8\3\u2a8b\3\u2b2e\3\u2bd1\3\u2c74\3\u2d17\3\u2dba"+ + "\3\u2e5d\3\u2f00\3\u2fa3\3\u3046\3\u30e9\3\u318c\3\u322f\3\u32d2"+ + "\3\u3375\3\u3418\3\u34bb\3\u355e\3\u3601\3\u36a4\3\u3747\3\u37ea"+ + "\3\u388d\3\u3930\3\u39d3\3\u3a76\3\u3b19\3\u3bbc\3\u3c5f\3\u3d02"+ + "\3\u3da5\3\u3e48\3\u3eeb\3\u3f8e\3\u4031\3\u40d4\3\u4177\3\u421a"+ + "\3\u42bd\3\u4360\3\u4403"; private static int [] zzUnpackRowMap() { - int [] result = new int[1204]; + int [] result = new int[1331]; int offset = 0; offset = zzUnpackRowMap(ZZ_ROWMAP_PACKED_0, offset, result); return result; @@ -452,2470 +468,2693 @@ public final class UAX29URLEmailTokenizer extends Tokenizer { "\3\2\1\13\11\2\1\14\2\2\1\15\43\2\1\16"+ "\1\2\1\17\3\2\1\20\1\21\1\2\1\22\1\2"+ "\1\23\2\2\1\24\1\2\1\25\1\2\1\26\1\27"+ - "\3\2\1\30\2\31\1\32\1\33\1\34\6\35\1\36"+ - "\16\35\1\37\4\35\1\34\1\40\2\41\1\40\4\41"+ - "\1\42\1\41\1\2\1\34\1\43\1\34\1\2\2\34"+ - "\1\2\3\34\1\44\2\2\1\34\1\45\3\2\2\34"+ - "\1\2\244\0\1\25\11\0\1\25\20\0\1\25\22\0"+ - "\1\25\10\0\3\25\17\0\1\25\10\0\1\25\117\0"+ + "\3\2\1\30\2\31\1\32\1\33\1\34\1\35\6\36"+ + "\1\37\16\36\1\40\4\36\1\35\1\41\2\42\1\41"+ + "\5\42\1\43\1\2\1\35\1\44\1\35\1\2\2\35"+ + "\1\2\3\35\1\45\2\2\1\35\1\46\3\2\2\35"+ + "\1\2\245\0\1\25\11\0\1\25\20\0\1\25\22\0"+ + "\1\25\10\0\3\25\17\0\1\25\10\0\1\25\120\0"+ "\1\25\1\0\1\25\1\0\1\25\1\0\1\25\1\0"+ "\1\25\1\0\3\25\1\0\5\25\1\0\3\25\1\0"+ "\11\25\1\0\2\25\1\0\16\25\1\0\2\25\1\0"+ "\21\25\1\0\1\25\1\0\3\25\2\0\1\25\1\0"+ - "\1\25\1\0\2\25\1\0\1\25\112\0\1\25\3\0"+ + "\1\25\1\0\2\25\1\0\1\25\113\0\1\25\3\0"+ "\1\25\5\0\2\25\3\0\1\25\13\0\1\25\1\0"+ "\1\25\4\0\2\25\4\0\1\25\1\0\1\25\3\0"+ "\2\25\1\0\1\25\5\0\3\25\1\0\1\25\15\0"+ - "\1\25\10\0\1\25\117\0\1\25\3\0\1\25\1\0"+ + "\1\25\10\0\1\25\120\0\1\25\3\0\1\25\1\0"+ "\1\25\1\0\1\25\1\0\3\25\2\0\4\25\1\0"+ "\3\25\2\0\3\25\1\0\4\25\1\0\2\25\2\0"+ "\3\25\1\0\11\25\1\0\2\25\1\0\16\25\1\0"+ "\2\25\1\0\1\25\1\0\3\25\2\0\1\25\1\0"+ - "\1\25\1\0\2\25\1\0\1\25\112\0\1\25\3\0"+ + "\1\25\1\0\2\25\1\0\1\25\113\0\1\25\3\0"+ "\1\25\3\0\1\25\1\0\3\25\2\0\1\25\1\0"+ "\2\25\1\0\3\25\3\0\2\25\1\0\1\25\1\0"+ "\2\25\1\0\2\25\3\0\2\25\1\0\1\25\1\0"+ "\1\25\1\0\2\25\1\0\2\25\1\0\2\25\1\0"+ "\5\25\1\0\5\25\1\0\2\25\1\0\2\25\1\0"+ - "\1\25\1\0\3\25\4\0\1\25\4\0\1\25\124\0"+ + "\1\25\1\0\3\25\4\0\1\25\4\0\1\25\125\0"+ "\3\25\5\0\1\25\1\0\1\25\1\0\1\25\4\0"+ "\1\25\14\0\1\25\5\0\1\25\11\0\2\25\12\0"+ - "\1\26\1\0\2\25\12\0\1\25\117\0\1\25\1\0"+ + "\1\26\1\0\2\25\12\0\1\25\120\0\1\25\1\0"+ "\1\26\7\0\2\25\2\0\5\25\2\0\2\25\4\0"+ "\6\25\1\0\2\25\4\0\5\25\1\0\5\25\1\0"+ "\2\25\1\0\3\25\1\0\4\25\1\0\5\25\1\26"+ "\1\0\1\25\1\0\1\25\1\0\3\25\2\0\1\25"+ - "\1\0\1\25\1\0\1\25\2\0\1\25\112\0\1\25"+ + "\1\0\1\25\1\0\1\25\2\0\1\25\113\0\1\25"+ "\3\0\1\25\5\0\2\25\3\0\1\25\4\0\3\25"+ "\4\0\1\25\1\0\1\25\2\0\1\25\1\0\2\25"+ "\4\0\1\25\1\0\1\25\3\0\2\25\1\0\1\25"+ "\5\0\3\25\1\0\1\25\10\0\1\25\1\0\2\26"+ - "\1\0\1\25\10\0\1\25\117\0\1\25\3\0\1\25"+ + "\1\0\1\25\10\0\1\25\120\0\1\25\3\0\1\25"+ "\6\0\2\25\5\0\1\25\1\0\1\25\1\0\1\25"+ "\1\0\11\25\2\0\1\25\4\0\1\25\4\0\6\25"+ "\2\0\1\25\1\0\1\25\1\0\3\25\3\0\2\25"+ "\4\0\3\25\1\0\1\25\10\0\1\25\1\0\2\25"+ - "\114\0\1\25\11\0\2\25\17\0\1\25\6\0\2\25"+ + "\115\0\1\25\11\0\2\25\17\0\1\25\6\0\2\25"+ "\4\0\1\25\5\0\1\25\2\0\1\25\5\0\3\25"+ - "\1\0\1\25\15\0\1\25\10\0\1\25\117\0\1\25"+ + "\1\0\1\25\15\0\1\25\10\0\1\25\120\0\1\25"+ "\3\0\1\25\5\0\1\25\32\0\15\25\5\0\3\25"+ "\1\0\1\25\5\0\1\25\7\0\1\25\2\0\1\25"+ - "\5\0\1\25\2\0\1\25\1\0\1\25\201\0\1\33"+ - "\21\0\1\27\130\0\1\32\3\0\1\32\3\0\1\32"+ + "\5\0\1\25\2\0\1\25\1\0\1\25\202\0\1\33"+ + "\21\0\1\27\131\0\1\32\3\0\1\32\3\0\1\32"+ "\1\0\3\32\2\0\1\32\2\0\1\32\1\0\3\32"+ "\3\0\2\32\1\0\1\32\1\0\2\32\1\0\2\32"+ "\3\0\2\32\1\0\1\32\3\0\2\32\1\0\2\32"+ "\1\0\2\32\1\0\5\32\1\0\5\32\2\0\1\32"+ "\1\0\2\32\1\0\1\32\1\0\3\32\4\0\1\32"+ - "\4\0\1\32\112\0\1\32\1\0\1\32\1\0\1\32"+ + "\4\0\1\32\113\0\1\32\1\0\1\32\1\0\1\32"+ "\1\0\1\32\1\0\1\32\1\0\3\32\1\0\5\32"+ "\1\0\3\32\1\0\11\32\1\0\2\32\1\0\16\32"+ "\1\0\2\32\1\0\21\32\1\0\1\32\1\0\3\32"+ "\2\0\1\32\1\0\1\32\1\0\2\32\1\0\1\32"+ - "\112\0\1\32\1\0\1\32\1\0\1\32\3\0\1\32"+ + "\113\0\1\32\1\0\1\32\1\0\1\32\3\0\1\32"+ "\1\0\3\32\1\0\2\32\1\0\2\32\1\0\3\32"+ "\1\0\11\32\1\0\2\32\1\0\16\32\1\0\2\32"+ "\1\0\21\32\1\0\1\32\1\0\3\32\2\0\1\32"+ - "\1\0\1\32\1\0\2\32\1\0\1\32\112\0\1\32"+ + "\1\0\1\32\1\0\2\32\1\0\1\32\113\0\1\32"+ "\11\0\1\32\20\0\1\32\33\0\1\32\21\0\1\32"+ - "\10\0\1\32\117\0\1\32\1\0\1\32\1\0\1\32"+ + "\10\0\1\32\120\0\1\32\1\0\1\32\1\0\1\32"+ "\1\0\1\32\1\0\1\32\1\0\3\32\1\0\5\32"+ "\1\0\3\32\1\0\6\32\1\0\2\32\1\0\2\32"+ "\1\0\10\32\1\0\5\32\1\0\2\32\1\0\21\32"+ "\1\0\1\32\1\0\3\32\2\0\1\32\1\0\1\32"+ - "\1\0\2\32\1\0\1\32\241\0\1\33\111\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\53\3\0\1\54\5\0\1\55\3\0\1\56"+ - "\11\0\1\57\2\0\1\60\16\0\1\61\2\0\1\62"+ - "\41\0\2\25\1\63\1\0\1\64\1\0\1\64\1\65"+ - "\1\0\1\25\2\0\1\64\32\25\1\0\12\63\1\64"+ - "\1\0\1\65\3\0\1\64\20\0\1\46\1\0\1\47"+ - "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\66"+ - "\3\0\1\67\5\0\1\70\3\0\1\71\11\0\1\57"+ - "\2\0\1\72\16\0\1\73\2\0\1\74\41\0\1\25"+ - "\2\26\2\0\2\75\1\76\1\0\1\26\2\0\1\75"+ - "\32\25\1\0\12\26\2\0\1\76\2\0\2\75\6\0"+ - "\1\75\23\0\1\77\15\0\1\100\14\0\1\101\16\0"+ - "\1\102\2\0\1\103\21\0\1\104\20\0\1\27\1\0"+ - "\1\27\3\0\1\65\1\0\1\27\52\0\1\65\24\0"+ - "\1\46\1\0\1\47\1\0\1\50\1\0\1\51\1\0"+ - "\1\52\1\0\1\105\3\0\1\67\5\0\1\70\3\0"+ - "\1\106\11\0\1\57\2\0\1\107\16\0\1\110\2\0"+ - "\1\111\21\0\1\104\17\0\1\25\1\112\1\26\1\27"+ - "\3\0\1\112\1\0\1\112\3\0\32\25\1\0\12\26"+ - "\2\0\1\112\165\0\2\31\244\0\1\113\45\114\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\2\0\1\46\1\0\1\47\1\0\1\50\1\0"+ - "\1\51\1\0\1\52\1\0\1\53\3\0\1\54\5\0"+ - "\1\55\3\0\1\56\11\0\1\57\2\0\1\60\16\0"+ - "\1\61\2\0\1\62\41\0\2\25\1\63\1\0\1\64"+ - "\1\0\1\64\1\65\1\0\1\25\2\0\1\116\32\35"+ - "\1\117\12\120\1\64\1\114\1\121\1\114\1\0\1\114"+ - "\1\122\1\115\3\114\3\0\1\114\4\0\2\114\2\0"+ - "\1\46\1\0\1\47\1\0\1\50\1\0\1\51\1\0"+ - "\1\52\1\0\1\53\3\0\1\54\5\0\1\55\3\0"+ - "\1\56\11\0\1\57\2\0\1\60\16\0\1\61\2\0"+ - "\1\62\41\0\2\25\1\63\1\0\1\64\1\0\1\64"+ - "\1\65\1\0\1\25\2\0\1\116\10\35\1\123\6\35"+ - "\1\124\12\35\1\117\12\120\1\64\1\114\1\121\1\114"+ - "\1\0\1\114\1\122\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\2\0\1\46\1\0\1\47\1\0\1\50\1\0"+ - "\1\51\1\0\1\52\1\0\1\53\3\0\1\54\5\0"+ - "\1\55\3\0\1\56\11\0\1\57\2\0\1\60\16\0"+ - "\1\61\2\0\1\62\41\0\2\25\1\63\1\0\1\64"+ - "\1\0\1\64\1\65\1\0\1\25\2\0\1\116\17\35"+ - "\1\125\12\35\1\117\12\120\1\64\1\114\1\121\1\114"+ - "\1\0\1\114\1\122\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\2\0\1\46\1\0\1\47\1\0\1\50\1\0"+ - "\1\51\1\0\1\52\1\0\1\66\3\0\1\67\5\0"+ - "\1\70\3\0\1\71\11\0\1\57\2\0\1\72\16\0"+ - "\1\73\2\0\1\74\41\0\1\25\2\26\2\0\2\75"+ - "\1\76\1\0\1\26\2\0\1\126\32\35\1\117\12\41"+ - "\1\0\1\114\1\127\1\114\1\0\2\130\1\115\3\114"+ - "\2\0\1\75\1\114\4\0\2\114\2\0\1\46\1\0"+ + "\1\0\2\32\1\0\1\32\242\0\1\33\112\0\1\47"+ + "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ + "\1\0\1\54\3\0\1\55\5\0\1\56\3\0\1\57"+ + "\11\0\1\60\2\0\1\61\16\0\1\62\2\0\1\63"+ + "\41\0\2\25\1\64\1\0\1\65\1\0\1\65\1\66"+ + "\1\0\1\25\2\0\1\25\1\65\32\25\1\0\12\64"+ + "\1\65\1\0\1\66\3\0\1\65\20\0\1\47\1\0"+ + "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\1\0"+ + "\1\67\3\0\1\70\5\0\1\71\3\0\1\72\11\0"+ + "\1\60\2\0\1\73\16\0\1\74\2\0\1\75\41\0"+ + "\1\25\2\26\2\0\2\76\1\77\1\0\1\26\2\0"+ + "\1\25\1\76\32\25\1\0\12\26\2\0\1\77\2\0"+ + "\2\76\6\0\1\76\23\0\1\100\15\0\1\101\14\0"+ + "\1\102\16\0\1\103\2\0\1\104\21\0\1\105\20\0"+ + "\1\27\1\0\1\27\3\0\1\66\1\0\1\27\53\0"+ + "\1\66\24\0\1\47\1\0\1\50\1\0\1\51\1\0"+ + "\1\52\1\0\1\53\1\0\1\106\3\0\1\70\5\0"+ + "\1\71\3\0\1\107\11\0\1\60\2\0\1\110\16\0"+ + "\1\111\2\0\1\112\21\0\1\113\17\0\1\25\1\114"+ + "\1\26\1\115\3\0\1\114\1\0\1\114\2\0\1\25"+ + "\1\0\32\25\1\0\12\26\2\0\1\114\165\0\2\31"+ + "\100\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\116\3\0\1\55\5\0\1\56"+ + "\3\0\1\117\11\0\1\60\2\0\1\120\16\0\1\121"+ + "\2\0\1\122\41\0\1\25\1\34\1\64\1\0\1\65"+ + "\1\0\1\65\1\66\1\0\1\34\2\0\1\34\1\65"+ + "\32\25\1\0\12\64\1\65\1\0\1\66\3\0\1\65"+ + "\166\0\1\123\45\124\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\2\0\1\47\1\0"+ + "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\1\0"+ + "\1\54\3\0\1\55\5\0\1\56\3\0\1\57\11\0"+ + "\1\60\2\0\1\61\16\0\1\62\2\0\1\63\41\0"+ + "\2\25\1\64\1\0\1\65\1\0\1\65\1\66\1\0"+ + "\1\25\2\0\1\25\1\126\32\36\1\127\12\130\1\65"+ + "\1\124\1\131\1\124\1\0\1\124\1\132\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\2\0\1\47\1\0\1\50"+ + "\1\0\1\51\1\0\1\52\1\0\1\53\1\0\1\54"+ + "\3\0\1\55\5\0\1\56\3\0\1\57\11\0\1\60"+ + "\2\0\1\61\16\0\1\62\2\0\1\63\41\0\2\25"+ + "\1\64\1\0\1\65\1\0\1\65\1\66\1\0\1\25"+ + "\2\0\1\25\1\126\10\36\1\133\6\36\1\134\12\36"+ + "\1\127\12\130\1\65\1\124\1\131\1\124\1\0\1\124"+ + "\1\132\1\125\3\124\3\0\1\124\4\0\2\124\2\0"+ "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ - "\1\66\3\0\1\67\5\0\1\70\3\0\1\71\11\0"+ - "\1\57\2\0\1\72\16\0\1\73\2\0\1\74\41\0"+ - "\1\25\2\26\2\0\2\75\1\76\1\0\1\26\2\0"+ - "\1\126\32\35\1\117\12\131\1\0\1\114\1\127\1\114"+ - "\1\0\2\130\1\115\3\114\2\0\1\75\1\114\4\0"+ - "\2\114\2\0\1\46\1\0\1\47\1\0\1\50\1\0"+ - "\1\51\1\0\1\52\1\0\1\66\3\0\1\67\5\0"+ - "\1\70\3\0\1\71\11\0\1\57\2\0\1\72\16\0"+ - "\1\73\2\0\1\74\41\0\1\25\2\26\2\0\2\75"+ - "\1\76\1\0\1\26\2\0\1\126\32\35\1\117\1\41"+ - "\1\132\1\131\2\41\2\131\2\41\1\131\1\0\1\114"+ - "\1\127\1\114\1\0\2\130\1\115\3\114\2\0\1\75"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\105\3\0"+ - "\1\67\5\0\1\70\3\0\1\106\11\0\1\57\2\0"+ - "\1\107\16\0\1\110\2\0\1\111\21\0\1\104\17\0"+ - "\1\25\1\112\1\26\1\27\3\0\1\112\1\0\1\112"+ - "\2\0\1\113\32\133\1\114\12\134\1\0\1\114\1\135"+ - "\1\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\150\0\4\136\2\0\1\136\15\0\1\136\6\0"+ - "\12\136\1\137\173\0\65\140\1\141\1\140\1\142\1\0"+ - "\2\140\3\0\1\25\11\0\3\25\5\0\1\25\1\0"+ - "\1\25\1\0\1\25\4\0\1\25\4\0\1\25\1\0"+ - "\2\25\4\0\1\25\5\0\1\25\3\0\1\25\4\0"+ - "\5\25\10\0\1\63\1\0\2\25\1\0\1\25\10\0"+ - "\1\25\117\0\1\25\1\0\1\63\7\0\2\25\2\0"+ - "\5\25\2\0\2\25\4\0\6\25\1\0\2\25\4\0"+ - "\5\25\1\0\5\25\1\0\2\25\1\0\3\25\1\0"+ - "\4\25\1\0\5\25\1\63\1\0\1\25\1\0\1\25"+ - "\1\0\3\25\2\0\1\25\1\0\1\25\1\0\1\25"+ - "\2\0\1\25\112\0\1\25\3\0\1\25\5\0\2\25"+ - "\3\0\1\25\4\0\3\25\4\0\1\25\1\0\1\25"+ - "\2\0\1\25\1\0\2\25\4\0\1\25\1\0\1\25"+ - "\3\0\2\25\1\0\1\25\5\0\3\25\1\0\1\25"+ - "\10\0\1\25\1\0\2\63\1\0\1\25\10\0\1\25"+ - "\117\0\1\25\3\0\1\25\6\0\2\25\5\0\1\25"+ - "\1\0\1\25\1\0\1\25\1\0\11\25\2\0\1\25"+ - "\4\0\1\25\4\0\6\25\2\0\1\25\1\0\1\25"+ - "\1\0\3\25\1\0\1\25\1\0\2\25\4\0\3\25"+ - "\1\0\1\25\10\0\1\25\1\0\2\25\114\0\1\25"+ - "\3\0\1\25\5\0\1\25\32\0\15\25\5\0\3\25"+ - "\1\0\1\25\5\0\3\25\5\0\1\25\2\0\2\25"+ - "\4\0\1\25\2\0\1\25\1\0\1\25\176\0\2\25"+ - "\6\0\1\25\151\0\1\25\3\0\1\25\2\0\1\25"+ - "\3\0\1\25\5\0\1\25\7\0\1\25\4\0\2\25"+ - "\3\0\2\25\1\0\1\25\4\0\1\25\1\0\1\25"+ - "\2\0\2\25\1\0\3\25\1\0\1\25\2\0\4\25"+ - "\2\0\1\25\134\0\1\46\1\0\1\47\1\0\1\50"+ - "\1\0\1\51\1\0\1\52\1\0\1\143\3\0\1\54"+ - "\5\0\1\55\3\0\1\144\11\0\1\57\2\0\1\145"+ - "\16\0\1\146\2\0\1\147\41\0\1\25\2\63\2\0"+ - "\2\150\1\65\1\0\1\63\2\0\1\150\32\25\1\0"+ - "\12\63\2\0\1\65\2\0\2\150\6\0\1\150\11\0"+ - "\1\46\1\0\1\47\1\0\1\50\1\0\1\51\1\0"+ - "\1\52\1\0\1\151\3\0\1\152\5\0\1\153\3\0"+ - "\1\154\11\0\1\57\2\0\1\155\16\0\1\156\2\0"+ - "\1\157\41\0\1\25\1\64\7\0\1\64\3\0\32\25"+ - "\42\0\1\46\1\0\1\47\1\0\1\50\1\0\1\51"+ - "\1\0\1\52\1\0\1\160\3\0\1\54\5\0\1\55"+ - "\3\0\1\161\11\0\1\57\2\0\1\162\16\0\1\163"+ - "\2\0\1\164\21\0\1\104\17\0\1\25\1\65\1\63"+ - "\1\27\3\0\1\65\1\0\1\65\3\0\32\25\1\0"+ - "\12\63\2\0\1\65\25\0\1\26\11\0\3\25\5\0"+ - "\1\25\1\0\1\25\1\0\1\25\4\0\1\25\4\0"+ - "\1\26\1\0\2\26\4\0\1\25\5\0\1\25\3\0"+ - "\1\26\4\0\1\26\2\25\2\26\10\0\1\26\1\0"+ - "\2\25\1\0\1\26\10\0\1\25\117\0\1\25\3\0"+ - "\1\25\6\0\2\25\5\0\1\25\1\0\1\25\1\0"+ - "\1\25\1\0\11\25\2\0\1\25\4\0\1\25\4\0"+ - "\6\25\2\0\1\25\1\0\1\25\1\0\3\25\1\0"+ - "\1\26\1\0\2\25\4\0\3\25\1\0\1\25\10\0"+ - "\1\25\1\0\2\25\114\0\1\25\3\0\1\25\5\0"+ - "\1\25\32\0\15\25\5\0\3\25\1\0\1\25\5\0"+ - "\1\25\2\26\5\0\1\25\2\0\1\25\1\26\4\0"+ - "\1\25\2\0\1\25\1\0\1\25\176\0\2\26\6\0"+ - "\1\26\151\0\1\26\3\0\1\26\2\0\1\26\3\0"+ - "\1\26\5\0\1\26\7\0\1\26\4\0\2\26\3\0"+ - "\2\26\1\0\1\26\4\0\1\26\1\0\1\26\2\0"+ - "\2\26\1\0\3\26\1\0\1\26\2\0\4\26\2\0"+ - "\1\26\146\0\1\165\3\0\1\166\5\0\1\167\3\0"+ - "\1\170\14\0\1\171\16\0\1\172\2\0\1\173\42\0"+ - "\1\75\1\26\6\0\1\75\36\0\12\26\27\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\174\3\0\1\67\5\0\1\70\3\0\1\175"+ - "\11\0\1\57\2\0\1\176\16\0\1\177\2\0\1\200"+ - "\21\0\1\104\17\0\1\25\1\76\1\26\1\27\3\0"+ - "\1\76\1\0\1\76\3\0\32\25\1\0\12\26\2\0"+ - "\1\76\25\0\1\27\37\0\1\27\1\0\2\27\16\0"+ - "\1\27\4\0\1\27\2\0\2\27\15\0\1\27\225\0"+ - "\1\27\246\0\2\27\11\0\1\27\210\0\2\27\6\0"+ - "\1\27\151\0\1\27\3\0\1\27\2\0\1\27\3\0"+ - "\1\27\5\0\1\27\7\0\1\27\4\0\2\27\3\0"+ - "\2\27\1\0\1\27\4\0\1\27\1\0\1\27\2\0"+ - "\2\27\1\0\3\27\1\0\1\27\2\0\4\27\2\0"+ - "\1\27\246\0\1\27\130\0\1\112\11\0\3\25\5\0"+ - "\1\25\1\0\1\25\1\0\1\25\4\0\1\25\4\0"+ - "\1\112\1\0\2\112\4\0\1\25\5\0\1\25\3\0"+ - "\1\112\4\0\1\112\2\25\2\112\10\0\1\26\1\0"+ - "\2\25\1\0\1\112\10\0\1\25\117\0\1\25\3\0"+ - "\1\25\6\0\2\25\5\0\1\25\1\0\1\25\1\0"+ - "\1\25\1\0\11\25\2\0\1\25\4\0\1\25\4\0"+ - "\6\25\2\0\1\25\1\0\1\25\1\0\3\25\1\0"+ - "\1\112\1\0\2\25\4\0\3\25\1\0\1\25\10\0"+ - "\1\25\1\0\2\25\114\0\1\25\3\0\1\25\5\0"+ - "\1\25\32\0\15\25\5\0\3\25\1\0\1\25\5\0"+ - "\1\25\2\112\5\0\1\25\2\0\1\25\1\112\4\0"+ - "\1\25\2\0\1\25\1\0\1\25\176\0\2\112\6\0"+ - "\1\112\151\0\1\112\3\0\1\112\2\0\1\112\3\0"+ - "\1\112\5\0\1\112\7\0\1\112\4\0\2\112\3\0"+ - "\2\112\1\0\1\112\4\0\1\112\1\0\1\112\2\0"+ - "\2\112\1\0\3\112\1\0\1\112\2\0\4\112\2\0"+ - "\1\112\301\0\1\113\45\114\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\1\140\3\0\2\114\150\0"+ - "\32\201\1\0\12\201\13\0\1\202\13\0\1\46\1\0"+ + "\1\53\1\0\1\54\3\0\1\55\5\0\1\56\3\0"+ + "\1\57\11\0\1\60\2\0\1\61\16\0\1\62\2\0"+ + "\1\63\41\0\2\25\1\64\1\0\1\65\1\0\1\65"+ + "\1\66\1\0\1\25\2\0\1\25\1\126\17\36\1\135"+ + "\12\36\1\127\12\130\1\65\1\124\1\131\1\124\1\0"+ + "\1\124\1\132\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\67\3\0\1\70\5\0\1\71"+ + "\3\0\1\72\11\0\1\60\2\0\1\73\16\0\1\74"+ + "\2\0\1\75\41\0\1\25\2\26\2\0\2\76\1\77"+ + "\1\0\1\26\2\0\1\25\1\136\32\36\1\127\12\42"+ + "\1\0\1\124\1\137\1\124\1\0\2\140\1\125\3\124"+ + "\2\0\1\76\1\124\4\0\2\124\2\0\1\47\1\0"+ + "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\1\0"+ + "\1\67\3\0\1\70\5\0\1\71\3\0\1\72\11\0"+ + "\1\60\2\0\1\73\16\0\1\74\2\0\1\75\41\0"+ + "\1\25\2\26\2\0\2\76\1\77\1\0\1\26\2\0"+ + "\1\25\1\136\32\36\1\127\12\141\1\0\1\124\1\137"+ + "\1\124\1\0\2\140\1\125\3\124\2\0\1\76\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\67\3\0\1\70"+ + "\5\0\1\71\3\0\1\72\11\0\1\60\2\0\1\73"+ + "\16\0\1\74\2\0\1\75\41\0\1\25\2\26\2\0"+ + "\2\76\1\77\1\0\1\26\2\0\1\25\1\136\32\36"+ + "\1\127\1\42\1\142\1\141\2\42\2\141\1\42\1\141"+ + "\1\42\1\0\1\124\1\137\1\124\1\0\2\140\1\125"+ + "\3\124\2\0\1\76\1\124\4\0\2\124\2\0\1\47"+ + "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ + "\1\0\1\106\3\0\1\70\5\0\1\71\3\0\1\107"+ + "\11\0\1\60\2\0\1\110\16\0\1\111\2\0\1\112"+ + "\21\0\1\113\17\0\1\25\1\114\1\26\1\115\3\0"+ + "\1\114\1\0\1\114\2\0\1\25\1\123\32\143\1\124"+ + "\12\144\1\0\1\124\1\145\1\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\151\0\4\146\2\0"+ + "\1\146\15\0\1\146\6\0\12\146\1\147\174\0\65\150"+ + "\1\151\1\150\1\152\1\0\2\150\3\0\1\25\11\0"+ + "\3\25\5\0\1\25\1\0\1\25\1\0\1\25\4\0"+ + "\1\25\4\0\1\25\1\0\2\25\4\0\1\25\5\0"+ + "\1\25\3\0\1\25\4\0\5\25\10\0\1\64\1\0"+ + "\2\25\1\0\1\25\10\0\1\25\120\0\1\25\1\0"+ + "\1\64\7\0\2\25\2\0\5\25\2\0\2\25\4\0"+ + "\6\25\1\0\2\25\4\0\5\25\1\0\5\25\1\0"+ + "\2\25\1\0\3\25\1\0\4\25\1\0\5\25\1\64"+ + "\1\0\1\25\1\0\1\25\1\0\3\25\2\0\1\25"+ + "\1\0\1\25\1\0\1\25\2\0\1\25\113\0\1\25"+ + "\3\0\1\25\5\0\2\25\3\0\1\25\4\0\3\25"+ + "\4\0\1\25\1\0\1\25\2\0\1\25\1\0\2\25"+ + "\4\0\1\25\1\0\1\25\3\0\2\25\1\0\1\25"+ + "\5\0\3\25\1\0\1\25\10\0\1\25\1\0\2\64"+ + "\1\0\1\25\10\0\1\25\120\0\1\25\3\0\1\25"+ + "\6\0\2\25\5\0\1\25\1\0\1\25\1\0\1\25"+ + "\1\0\11\25\2\0\1\25\4\0\1\25\4\0\6\25"+ + "\2\0\1\25\1\0\1\25\1\0\3\25\1\0\1\25"+ + "\1\0\2\25\4\0\3\25\1\0\1\25\10\0\1\25"+ + "\1\0\2\25\115\0\1\25\3\0\1\25\5\0\1\25"+ + "\32\0\15\25\5\0\3\25\1\0\1\25\5\0\3\25"+ + "\5\0\1\25\2\0\2\25\4\0\1\25\2\0\1\25"+ + "\1\0\1\25\177\0\2\25\6\0\1\25\152\0\1\25"+ + "\3\0\1\25\2\0\1\25\3\0\1\25\5\0\1\25"+ + "\7\0\1\25\4\0\2\25\3\0\2\25\1\0\1\25"+ + "\4\0\1\25\1\0\1\25\2\0\2\25\1\0\3\25"+ + "\1\0\1\25\2\0\4\25\2\0\1\25\135\0\1\47"+ + "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ + "\1\0\1\153\3\0\1\55\5\0\1\56\3\0\1\154"+ + "\11\0\1\60\2\0\1\155\16\0\1\156\2\0\1\157"+ + "\41\0\1\25\2\64\2\0\2\160\1\66\1\0\1\64"+ + "\2\0\1\25\1\160\32\25\1\0\12\64\2\0\1\66"+ + "\2\0\2\160\6\0\1\160\11\0\1\47\1\0\1\50"+ + "\1\0\1\51\1\0\1\52\1\0\1\53\1\0\1\161"+ + "\3\0\1\162\5\0\1\163\3\0\1\164\11\0\1\60"+ + "\2\0\1\165\16\0\1\166\2\0\1\167\41\0\1\25"+ + "\1\65\7\0\1\65\2\0\1\25\1\0\32\25\42\0"+ "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ - "\1\151\3\0\1\152\5\0\1\153\3\0\1\154\11\0"+ - "\1\57\2\0\1\155\16\0\1\156\2\0\1\157\41\0"+ - "\1\25\1\64\7\0\1\64\2\0\1\113\1\203\1\204"+ - "\1\205\1\206\1\207\1\210\1\211\1\212\1\213\1\214"+ - "\1\215\1\216\1\217\1\220\1\221\1\222\1\223\1\224"+ - "\1\225\1\226\1\227\1\230\1\231\1\232\1\233\1\234"+ - "\1\114\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\1\140\3\0\2\114\147\0\1\113\32\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\2\0\1\46\1\0\1\47"+ - "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\143"+ - "\3\0\1\54\5\0\1\55\3\0\1\144\11\0\1\57"+ - "\2\0\1\145\16\0\1\146\2\0\1\147\41\0\1\25"+ - "\2\63\2\0\2\150\1\65\1\0\1\63\2\0\1\236"+ - "\32\35\1\117\12\120\1\0\1\114\1\121\1\114\1\0"+ - "\2\237\1\115\3\114\2\0\1\150\1\114\4\0\2\114"+ - "\2\0\1\46\1\0\1\47\1\0\1\50\1\0\1\51"+ - "\1\0\1\52\1\0\1\160\3\0\1\54\5\0\1\55"+ - "\3\0\1\161\11\0\1\57\2\0\1\162\16\0\1\163"+ - "\2\0\1\164\21\0\1\104\17\0\1\25\1\65\1\63"+ - "\1\27\3\0\1\65\1\0\1\65\2\0\1\113\32\133"+ - "\1\114\12\240\1\0\1\114\1\121\1\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\2\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\151\3\0\1\152\5\0\1\153\3\0\1\154"+ - "\11\0\1\57\2\0\1\155\16\0\1\156\2\0\1\157"+ - "\41\0\1\25\1\64\7\0\1\64\2\0\1\113\32\133"+ - "\13\114\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\3\0"+ - "\1\54\5\0\1\55\3\0\1\56\11\0\1\57\2\0"+ - "\1\60\16\0\1\61\2\0\1\62\41\0\2\25\1\63"+ - "\1\0\1\64\1\0\1\64\1\65\1\0\1\25\2\0"+ - "\1\116\11\35\1\241\20\35\1\117\12\120\1\64\1\114"+ - "\1\121\1\114\1\0\1\114\1\122\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\3\0"+ - "\1\54\5\0\1\55\3\0\1\56\11\0\1\57\2\0"+ - "\1\60\16\0\1\61\2\0\1\62\41\0\2\25\1\63"+ - "\1\0\1\64\1\0\1\64\1\65\1\0\1\25\2\0"+ - "\1\116\15\35\1\242\14\35\1\117\12\120\1\64\1\114"+ - "\1\121\1\114\1\0\1\114\1\122\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\3\0"+ - "\1\54\5\0\1\55\3\0\1\56\11\0\1\57\2\0"+ - "\1\60\16\0\1\61\2\0\1\62\41\0\2\25\1\63"+ - "\1\0\1\64\1\0\1\64\1\65\1\0\1\25\2\0"+ - "\1\116\17\35\1\243\12\35\1\117\12\120\1\64\1\114"+ - "\1\121\1\114\1\0\1\114\1\122\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\14\0\1\165\3\0\1\166\5\0"+ - "\1\167\3\0\1\170\14\0\1\171\16\0\1\172\2\0"+ - "\1\173\42\0\1\75\1\26\6\0\1\75\2\0\1\113"+ - "\1\244\1\245\1\246\1\247\1\250\1\251\1\252\1\253"+ - "\1\254\1\255\1\256\1\257\1\260\1\261\1\262\1\263"+ - "\1\264\1\265\1\266\1\267\1\270\1\271\1\272\1\273"+ - "\1\274\1\275\1\114\1\276\2\277\1\276\4\277\1\300"+ - "\1\277\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\1\140\3\0\2\114\2\0\1\46\1\0\1\47"+ - "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\174"+ - "\3\0\1\67\5\0\1\70\3\0\1\175\11\0\1\57"+ - "\2\0\1\176\16\0\1\177\2\0\1\200\21\0\1\104"+ - "\17\0\1\25\1\76\1\26\1\27\3\0\1\76\1\0"+ - "\1\76\2\0\1\113\32\133\1\114\12\134\1\0\1\114"+ - "\1\127\1\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\14\0\1\165\3\0\1\166\5\0\1\167"+ - "\3\0\1\170\14\0\1\171\16\0\1\172\2\0\1\173"+ - "\42\0\1\75\1\26\6\0\1\75\2\0\1\113\33\114"+ - "\12\134\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\66\3\0"+ - "\1\67\5\0\1\70\3\0\1\71\11\0\1\57\2\0"+ - "\1\72\16\0\1\73\2\0\1\74\41\0\1\25\2\26"+ - "\2\0\2\75\1\76\1\0\1\26\2\0\1\126\32\35"+ - "\1\117\12\301\1\0\1\114\1\127\1\114\1\0\2\130"+ - "\1\115\3\114\2\0\1\75\1\114\4\0\2\114\2\0"+ - "\1\46\1\0\1\47\1\0\1\50\1\0\1\51\1\0"+ - "\1\52\1\0\1\66\3\0\1\67\5\0\1\70\3\0"+ - "\1\71\11\0\1\57\2\0\1\72\16\0\1\73\2\0"+ - "\1\74\41\0\1\25\2\26\2\0\2\75\1\76\1\0"+ - "\1\26\2\0\1\126\32\35\1\117\2\131\1\301\2\131"+ - "\2\301\2\131\1\301\1\0\1\114\1\127\1\114\1\0"+ - "\2\130\1\115\3\114\2\0\1\75\1\114\4\0\2\114"+ - "\2\0\1\46\1\0\1\47\1\0\1\50\1\0\1\51"+ - "\1\0\1\52\1\0\1\53\3\0\1\54\5\0\1\55"+ - "\3\0\1\56\11\0\1\57\2\0\1\60\16\0\1\61"+ - "\2\0\1\62\41\0\2\25\1\63\1\0\1\64\1\0"+ - "\1\64\1\65\1\0\1\25\2\0\1\302\32\133\1\114"+ - "\12\240\1\64\1\114\1\121\1\114\1\0\1\114\1\122"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\2\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\66\3\0\1\67\5\0\1\70\3\0\1\71"+ - "\11\0\1\57\2\0\1\72\16\0\1\73\2\0\1\74"+ - "\41\0\1\25\2\26\2\0\2\75\1\76\1\0\1\26"+ - "\2\0\1\303\32\133\1\114\12\134\1\0\1\114\1\127"+ - "\1\114\1\0\2\130\1\115\3\114\2\0\1\75\1\114"+ - "\4\0\2\114\150\0\4\304\2\0\1\304\15\0\1\304"+ - "\6\0\12\304\1\305\241\0\1\306\173\0\1\307\54\0"+ - "\1\115\164\0\74\140\2\0\1\63\11\0\3\25\5\0"+ - "\1\25\1\0\1\25\1\0\1\25\4\0\1\25\4\0"+ - "\1\63\1\0\2\63\4\0\1\25\5\0\1\25\3\0"+ - "\1\63\4\0\1\63\2\25\2\63\10\0\1\63\1\0"+ - "\2\25\1\0\1\63\10\0\1\25\117\0\1\25\3\0"+ - "\1\25\6\0\2\25\5\0\1\25\1\0\1\25\1\0"+ - "\1\25\1\0\11\25\2\0\1\25\4\0\1\25\4\0"+ - "\6\25\2\0\1\25\1\0\1\25\1\0\3\25\1\0"+ - "\1\63\1\0\2\25\4\0\3\25\1\0\1\25\10\0"+ - "\1\25\1\0\2\25\114\0\1\25\3\0\1\25\5\0"+ - "\1\25\32\0\15\25\5\0\3\25\1\0\1\25\5\0"+ - "\1\25\2\63\5\0\1\25\2\0\1\25\1\63\4\0"+ - "\1\25\2\0\1\25\1\0\1\25\176\0\2\63\6\0"+ - "\1\63\151\0\1\63\3\0\1\63\2\0\1\63\3\0"+ - "\1\63\5\0\1\63\7\0\1\63\4\0\2\63\3\0"+ - "\2\63\1\0\1\63\4\0\1\63\1\0\1\63\2\0"+ - "\2\63\1\0\3\63\1\0\1\63\2\0\4\63\2\0"+ - "\1\63\146\0\1\310\3\0\1\311\5\0\1\312\3\0"+ - "\1\313\14\0\1\314\16\0\1\315\2\0\1\316\42\0"+ - "\1\150\1\63\6\0\1\150\36\0\12\63\30\0\1\64"+ - "\11\0\3\25\5\0\1\25\1\0\1\25\1\0\1\25"+ - "\4\0\1\25\4\0\1\64\1\0\2\64\4\0\1\25"+ - "\5\0\1\25\3\0\1\64\4\0\1\64\2\25\2\64"+ - "\12\0\2\25\1\0\1\64\10\0\1\25\117\0\1\25"+ - "\11\0\2\25\2\0\5\25\2\0\2\25\4\0\6\25"+ - "\1\0\2\25\4\0\5\25\1\0\5\25\1\0\2\25"+ - "\1\0\3\25\1\0\4\25\1\0\5\25\2\0\1\25"+ - "\1\0\1\25\1\0\3\25\2\0\1\25\1\0\1\25"+ - "\1\0\1\25\2\0\1\25\112\0\1\25\3\0\1\25"+ - "\5\0\2\25\3\0\1\25\4\0\3\25\4\0\1\25"+ - "\1\0\1\25\2\0\1\25\1\0\2\25\4\0\1\25"+ - "\1\0\1\25\3\0\2\25\1\0\1\25\5\0\3\25"+ - "\1\0\1\25\10\0\1\25\4\0\1\25\10\0\1\25"+ - "\117\0\1\25\3\0\1\25\6\0\2\25\5\0\1\25"+ - "\1\0\1\25\1\0\1\25\1\0\11\25\2\0\1\25"+ - "\4\0\1\25\4\0\6\25\2\0\1\25\1\0\1\25"+ - "\1\0\3\25\1\0\1\64\1\0\2\25\4\0\3\25"+ - "\1\0\1\25\10\0\1\25\1\0\2\25\114\0\1\25"+ - "\3\0\1\25\5\0\1\25\32\0\15\25\5\0\3\25"+ - "\1\0\1\25\5\0\1\25\2\64\5\0\1\25\2\0"+ - "\1\25\1\64\4\0\1\25\2\0\1\25\1\0\1\25"+ - "\176\0\2\64\6\0\1\64\151\0\1\64\3\0\1\64"+ - "\2\0\1\64\3\0\1\64\5\0\1\64\7\0\1\64"+ - "\4\0\2\64\3\0\2\64\1\0\1\64\4\0\1\64"+ - "\1\0\1\64\2\0\2\64\1\0\3\64\1\0\1\64"+ - "\2\0\4\64\2\0\1\64\135\0\1\65\11\0\3\25"+ + "\1\53\1\0\1\170\3\0\1\55\5\0\1\56\3\0"+ + "\1\171\11\0\1\60\2\0\1\172\16\0\1\173\2\0"+ + "\1\174\21\0\1\113\17\0\1\25\1\66\1\64\1\115"+ + "\3\0\1\66\1\0\1\66\2\0\1\25\1\0\32\25"+ + "\1\0\12\64\2\0\1\66\25\0\1\26\11\0\3\25"+ "\5\0\1\25\1\0\1\25\1\0\1\25\4\0\1\25"+ - "\4\0\1\65\1\0\2\65\4\0\1\25\5\0\1\25"+ - "\3\0\1\65\4\0\1\65\2\25\2\65\10\0\1\63"+ - "\1\0\2\25\1\0\1\65\10\0\1\25\117\0\1\25"+ + "\4\0\1\26\1\0\2\26\4\0\1\25\5\0\1\25"+ + "\3\0\1\26\4\0\1\26\2\25\2\26\10\0\1\26"+ + "\1\0\2\25\1\0\1\26\10\0\1\25\120\0\1\25"+ "\3\0\1\25\6\0\2\25\5\0\1\25\1\0\1\25"+ "\1\0\1\25\1\0\11\25\2\0\1\25\4\0\1\25"+ "\4\0\6\25\2\0\1\25\1\0\1\25\1\0\3\25"+ - "\1\0\1\65\1\0\2\25\4\0\3\25\1\0\1\25"+ - "\10\0\1\25\1\0\2\25\114\0\1\25\3\0\1\25"+ + "\1\0\1\26\1\0\2\25\4\0\3\25\1\0\1\25"+ + "\10\0\1\25\1\0\2\25\115\0\1\25\3\0\1\25"+ "\5\0\1\25\32\0\15\25\5\0\3\25\1\0\1\25"+ - "\5\0\1\25\2\65\5\0\1\25\2\0\1\25\1\65"+ - "\4\0\1\25\2\0\1\25\1\0\1\25\176\0\2\65"+ - "\6\0\1\65\151\0\1\65\3\0\1\65\2\0\1\65"+ - "\3\0\1\65\5\0\1\65\7\0\1\65\4\0\2\65"+ - "\3\0\2\65\1\0\1\65\4\0\1\65\1\0\1\65"+ - "\2\0\2\65\1\0\3\65\1\0\1\65\2\0\4\65"+ - "\2\0\1\65\135\0\1\75\37\0\1\75\1\0\2\75"+ - "\16\0\1\75\4\0\1\75\2\0\2\75\10\0\1\26"+ - "\4\0\1\75\132\0\1\26\102\0\1\26\242\0\2\26"+ - "\227\0\1\75\246\0\2\75\11\0\1\75\210\0\2\75"+ - "\6\0\1\75\151\0\1\75\3\0\1\75\2\0\1\75"+ - "\3\0\1\75\5\0\1\75\7\0\1\75\4\0\2\75"+ - "\3\0\2\75\1\0\1\75\4\0\1\75\1\0\1\75"+ - "\2\0\2\75\1\0\3\75\1\0\1\75\2\0\4\75"+ - "\2\0\1\75\135\0\1\76\11\0\3\25\5\0\1\25"+ - "\1\0\1\25\1\0\1\25\4\0\1\25\4\0\1\76"+ - "\1\0\2\76\4\0\1\25\5\0\1\25\3\0\1\76"+ - "\4\0\1\76\2\25\2\76\10\0\1\26\1\0\2\25"+ - "\1\0\1\76\10\0\1\25\117\0\1\25\3\0\1\25"+ - "\6\0\2\25\5\0\1\25\1\0\1\25\1\0\1\25"+ - "\1\0\11\25\2\0\1\25\4\0\1\25\4\0\6\25"+ - "\2\0\1\25\1\0\1\25\1\0\3\25\1\0\1\76"+ - "\1\0\2\25\4\0\3\25\1\0\1\25\10\0\1\25"+ - "\1\0\2\25\114\0\1\25\3\0\1\25\5\0\1\25"+ - "\32\0\15\25\5\0\3\25\1\0\1\25\5\0\1\25"+ - "\2\76\5\0\1\25\2\0\1\25\1\76\4\0\1\25"+ - "\2\0\1\25\1\0\1\25\176\0\2\76\6\0\1\76"+ - "\151\0\1\76\3\0\1\76\2\0\1\76\3\0\1\76"+ - "\5\0\1\76\7\0\1\76\4\0\2\76\3\0\2\76"+ - "\1\0\1\76\4\0\1\76\1\0\1\76\2\0\2\76"+ - "\1\0\3\76\1\0\1\76\2\0\4\76\2\0\1\76"+ - "\301\0\1\317\32\201\1\320\12\201\174\0\61\202\1\0"+ - "\1\321\4\202\1\322\1\0\3\202\1\0\1\46\1\0"+ + "\5\0\1\25\2\26\5\0\1\25\2\0\1\25\1\26"+ + "\4\0\1\25\2\0\1\25\1\0\1\25\177\0\2\26"+ + "\6\0\1\26\152\0\1\26\3\0\1\26\2\0\1\26"+ + "\3\0\1\26\5\0\1\26\7\0\1\26\4\0\2\26"+ + "\3\0\2\26\1\0\1\26\4\0\1\26\1\0\1\26"+ + "\2\0\2\26\1\0\3\26\1\0\1\26\2\0\4\26"+ + "\2\0\1\26\147\0\1\175\3\0\1\176\5\0\1\177"+ + "\3\0\1\200\14\0\1\201\16\0\1\202\2\0\1\203"+ + "\42\0\1\76\1\26\6\0\1\76\37\0\12\26\27\0"+ "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ - "\1\53\3\0\1\54\5\0\1\55\3\0\1\56\11\0"+ - "\1\57\2\0\1\60\16\0\1\61\2\0\1\62\41\0"+ - "\2\25\1\63\1\0\1\64\1\0\1\64\1\65\1\0"+ - "\1\25\2\0\1\116\1\35\2\323\1\324\1\325\10\323"+ - "\1\35\1\326\5\323\6\35\1\117\12\120\1\64\1\114"+ - "\1\121\1\114\1\0\1\114\1\122\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\3\0"+ - "\1\54\5\0\1\55\3\0\1\56\11\0\1\57\2\0"+ - "\1\60\16\0\1\61\2\0\1\62\41\0\2\25\1\63"+ - "\1\0\1\64\1\0\1\64\1\65\1\0\1\25\2\0"+ - "\1\116\1\327\2\323\1\35\1\323\1\330\6\323\4\35"+ - "\1\323\1\35\2\323\1\35\1\323\1\35\3\323\1\117"+ - "\12\120\1\64\1\114\1\121\1\114\1\0\1\114\1\122"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\2\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\53\3\0\1\54\5\0\1\55\3\0\1\56"+ - "\11\0\1\57\2\0\1\60\16\0\1\61\2\0\1\62"+ - "\41\0\2\25\1\63\1\0\1\64\1\0\1\64\1\65"+ - "\1\0\1\25\2\0\1\116\3\35\1\323\1\35\1\323"+ - "\4\35\1\323\10\35\1\323\2\35\1\323\2\35\1\323"+ - "\1\117\12\120\1\64\1\114\1\121\1\114\1\0\1\114"+ - "\1\122\1\115\3\114\3\0\1\114\4\0\2\114\2\0"+ - "\1\46\1\0\1\47\1\0\1\50\1\0\1\51\1\0"+ - "\1\52\1\0\1\53\3\0\1\54\5\0\1\55\3\0"+ - "\1\56\11\0\1\57\2\0\1\60\16\0\1\61\2\0"+ - "\1\62\41\0\2\25\1\63\1\0\1\64\1\0\1\64"+ - "\1\65\1\0\1\25\2\0\1\116\1\35\1\323\1\331"+ - "\2\323\2\35\1\323\6\35\3\323\11\35\1\117\12\120"+ - "\1\64\1\114\1\121\1\114\1\0\1\114\1\122\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\2\0\1\46\1\0"+ - "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ - "\1\53\3\0\1\54\5\0\1\55\3\0\1\56\11\0"+ - "\1\57\2\0\1\60\16\0\1\61\2\0\1\62\41\0"+ - "\2\25\1\63\1\0\1\64\1\0\1\64\1\65\1\0"+ - "\1\25\2\0\1\116\3\35\1\323\1\35\1\323\10\35"+ - "\1\323\1\35\2\323\10\35\1\117\12\120\1\64\1\114"+ - "\1\121\1\114\1\0\1\114\1\122\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\3\0"+ - "\1\54\5\0\1\55\3\0\1\56\11\0\1\57\2\0"+ - "\1\60\16\0\1\61\2\0\1\62\41\0\2\25\1\63"+ - "\1\0\1\64\1\0\1\64\1\65\1\0\1\25\2\0"+ - "\1\116\4\35\1\332\5\35\1\323\17\35\1\117\12\120"+ - "\1\64\1\114\1\121\1\114\1\0\1\114\1\122\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\2\0\1\46\1\0"+ - "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ - "\1\53\3\0\1\54\5\0\1\55\3\0\1\56\11\0"+ - "\1\57\2\0\1\60\16\0\1\61\2\0\1\62\41\0"+ - "\2\25\1\63\1\0\1\64\1\0\1\64\1\65\1\0"+ - "\1\25\2\0\1\116\4\35\2\323\2\35\1\323\1\35"+ - "\1\323\13\35\1\323\2\35\1\323\1\117\12\120\1\64"+ - "\1\114\1\121\1\114\1\0\1\114\1\122\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\2\0\1\46\1\0\1\47"+ + "\1\53\1\0\1\204\3\0\1\70\5\0\1\71\3\0"+ + "\1\205\11\0\1\60\2\0\1\206\16\0\1\207\2\0"+ + "\1\210\21\0\1\113\17\0\1\25\1\77\1\26\1\115"+ + "\3\0\1\77\1\0\1\77\2\0\1\25\1\0\32\25"+ + "\1\0\12\26\2\0\1\77\25\0\1\27\37\0\1\27"+ + "\1\0\2\27\16\0\1\27\4\0\1\27\2\0\2\27"+ + "\15\0\1\27\226\0\1\27\247\0\2\27\11\0\1\27"+ + "\211\0\2\27\6\0\1\27\152\0\1\27\3\0\1\27"+ + "\2\0\1\27\3\0\1\27\5\0\1\27\7\0\1\27"+ + "\4\0\2\27\3\0\2\27\1\0\1\27\4\0\1\27"+ + "\1\0\1\27\2\0\2\27\1\0\3\27\1\0\1\27"+ + "\2\0\4\27\2\0\1\27\247\0\1\27\131\0\1\114"+ + "\11\0\3\25\5\0\1\25\1\0\1\25\1\0\1\25"+ + "\4\0\1\25\4\0\1\114\1\0\2\114\4\0\1\25"+ + "\5\0\1\25\3\0\1\114\4\0\1\114\2\25\2\114"+ + "\10\0\1\26\1\0\2\25\1\0\1\114\10\0\1\25"+ + "\120\0\1\25\3\0\1\25\6\0\2\25\5\0\1\25"+ + "\1\0\1\25\1\0\1\25\1\0\11\25\2\0\1\25"+ + "\4\0\1\25\4\0\6\25\2\0\1\25\1\0\1\25"+ + "\1\0\3\25\1\0\1\114\1\0\2\25\4\0\3\25"+ + "\1\0\1\25\10\0\1\25\1\0\2\25\115\0\1\25"+ + "\3\0\1\25\5\0\1\25\32\0\15\25\5\0\3\25"+ + "\1\0\1\25\5\0\1\25\2\114\5\0\1\25\2\0"+ + "\1\25\1\114\4\0\1\25\2\0\1\25\1\0\1\25"+ + "\177\0\2\114\6\0\1\114\152\0\1\114\3\0\1\114"+ + "\2\0\1\114\3\0\1\114\5\0\1\114\7\0\1\114"+ + "\4\0\2\114\3\0\2\114\1\0\1\114\4\0\1\114"+ + "\1\0\1\114\2\0\2\114\1\0\3\114\1\0\1\114"+ + "\2\0\4\114\2\0\1\114\247\0\1\115\142\0\1\211"+ + "\15\0\1\212\14\0\1\213\16\0\1\214\2\0\1\215"+ + "\21\0\1\113\20\0\1\115\1\0\1\115\3\0\1\66"+ + "\1\0\1\115\53\0\1\66\25\0\1\34\11\0\3\25"+ + "\5\0\1\25\1\0\1\25\1\0\1\25\4\0\1\25"+ + "\4\0\1\34\1\0\2\34\4\0\1\25\5\0\1\25"+ + "\3\0\1\34\4\0\1\34\2\25\2\34\10\0\1\64"+ + "\1\0\2\25\1\0\1\34\10\0\1\25\120\0\1\25"+ + "\3\0\1\25\6\0\2\25\5\0\1\25\1\0\1\25"+ + "\1\0\1\25\1\0\11\25\2\0\1\25\4\0\1\25"+ + "\4\0\6\25\2\0\1\25\1\0\1\25\1\0\3\25"+ + "\1\0\1\34\1\0\2\25\4\0\3\25\1\0\1\25"+ + "\10\0\1\25\1\0\2\25\115\0\1\25\3\0\1\25"+ + "\5\0\1\25\32\0\15\25\5\0\3\25\1\0\1\25"+ + "\5\0\1\25\2\34\5\0\1\25\2\0\1\25\1\34"+ + "\4\0\1\25\2\0\1\25\1\0\1\25\177\0\2\34"+ + "\6\0\1\34\152\0\1\34\3\0\1\34\2\0\1\34"+ + "\3\0\1\34\5\0\1\34\7\0\1\34\4\0\2\34"+ + "\3\0\2\34\1\0\1\34\4\0\1\34\1\0\1\34"+ + "\2\0\2\34\1\0\3\34\1\0\1\34\2\0\4\34"+ + "\2\0\1\34\303\0\1\123\45\124\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\1\150\3\0\2\124"+ + "\151\0\32\216\1\0\12\216\13\0\1\217\13\0\1\47"+ "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ - "\3\0\1\54\5\0\1\55\3\0\1\56\11\0\1\57"+ - "\2\0\1\60\16\0\1\61\2\0\1\62\41\0\2\25"+ - "\1\63\1\0\1\64\1\0\1\64\1\65\1\0\1\25"+ - "\2\0\1\116\1\323\1\35\3\323\1\333\14\323\2\35"+ - "\2\323\2\35\1\323\1\35\1\117\12\120\1\64\1\114"+ - "\1\121\1\114\1\0\1\114\1\122\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\3\0"+ - "\1\54\5\0\1\55\3\0\1\56\11\0\1\57\2\0"+ - "\1\60\16\0\1\61\2\0\1\62\41\0\2\25\1\63"+ - "\1\0\1\64\1\0\1\64\1\65\1\0\1\25\2\0"+ - "\1\116\2\35\4\323\3\35\2\323\1\334\1\323\1\35"+ - "\2\323\12\35\1\117\12\120\1\64\1\114\1\121\1\114"+ - "\1\0\1\114\1\122\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\2\0\1\46\1\0\1\47\1\0\1\50\1\0"+ - "\1\51\1\0\1\52\1\0\1\53\3\0\1\54\5\0"+ - "\1\55\3\0\1\56\11\0\1\57\2\0\1\60\16\0"+ - "\1\61\2\0\1\62\41\0\2\25\1\63\1\0\1\64"+ - "\1\0\1\64\1\65\1\0\1\25\2\0\1\116\2\323"+ - "\2\35\1\323\3\35\1\323\5\35\3\323\3\35\1\323"+ - "\2\35\3\323\1\117\12\120\1\64\1\114\1\121\1\114"+ - "\1\0\1\114\1\122\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\2\0\1\46\1\0\1\47\1\0\1\50\1\0"+ - "\1\51\1\0\1\52\1\0\1\53\3\0\1\54\5\0"+ - "\1\55\3\0\1\56\11\0\1\57\2\0\1\60\16\0"+ - "\1\61\2\0\1\62\41\0\2\25\1\63\1\0\1\64"+ - "\1\0\1\64\1\65\1\0\1\25\2\0\1\116\5\323"+ - "\1\335\1\35\1\323\1\336\7\323\1\337\3\323\1\35"+ - "\1\323\1\35\3\323\1\117\12\120\1\64\1\114\1\121"+ - "\1\114\1\0\1\114\1\122\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\2\0\1\46\1\0\1\47\1\0\1\50"+ - "\1\0\1\51\1\0\1\52\1\0\1\53\3\0\1\54"+ - "\5\0\1\55\3\0\1\56\11\0\1\57\2\0\1\60"+ - "\16\0\1\61\2\0\1\62\41\0\2\25\1\63\1\0"+ - "\1\64\1\0\1\64\1\65\1\0\1\25\2\0\1\116"+ - "\1\340\1\323\1\35\1\327\6\323\3\35\1\323\2\35"+ - "\1\323\2\35\1\323\6\35\1\117\12\120\1\64\1\114"+ - "\1\121\1\114\1\0\1\114\1\122\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\3\0"+ - "\1\54\5\0\1\55\3\0\1\56\11\0\1\57\2\0"+ - "\1\60\16\0\1\61\2\0\1\62\41\0\2\25\1\63"+ - "\1\0\1\64\1\0\1\64\1\65\1\0\1\25\2\0"+ - "\1\116\1\323\31\35\1\117\12\120\1\64\1\114\1\121"+ - "\1\114\1\0\1\114\1\122\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\2\0\1\46\1\0\1\47\1\0\1\50"+ - "\1\0\1\51\1\0\1\52\1\0\1\53\3\0\1\54"+ - "\5\0\1\55\3\0\1\56\11\0\1\57\2\0\1\60"+ - "\16\0\1\61\2\0\1\62\41\0\2\25\1\63\1\0"+ - "\1\64\1\0\1\64\1\65\1\0\1\25\2\0\1\116"+ - "\1\323\2\35\1\323\1\341\1\35\2\323\1\35\3\323"+ - "\2\35\2\323\1\35\1\323\3\35\1\323\2\35\2\323"+ - "\1\117\12\120\1\64\1\114\1\121\1\114\1\0\1\114"+ - "\1\122\1\115\3\114\3\0\1\114\4\0\2\114\2\0"+ - "\1\46\1\0\1\47\1\0\1\50\1\0\1\51\1\0"+ - "\1\52\1\0\1\53\3\0\1\54\5\0\1\55\3\0"+ - "\1\56\11\0\1\57\2\0\1\60\16\0\1\61\2\0"+ - "\1\62\41\0\2\25\1\63\1\0\1\64\1\0\1\64"+ - "\1\65\1\0\1\25\2\0\1\116\6\323\1\35\5\323"+ - "\3\35\2\323\2\35\7\323\1\117\12\120\1\64\1\114"+ - "\1\121\1\114\1\0\1\114\1\122\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\3\0"+ - "\1\54\5\0\1\55\3\0\1\56\11\0\1\57\2\0"+ - "\1\60\16\0\1\61\2\0\1\62\41\0\2\25\1\63"+ - "\1\0\1\64\1\0\1\64\1\65\1\0\1\25\2\0"+ - "\1\116\1\35\2\323\1\336\1\342\3\323\1\35\3\323"+ - "\1\35\1\323\1\35\1\323\1\35\1\323\1\35\1\323"+ - "\1\35\3\323\1\35\1\323\1\117\12\120\1\64\1\114"+ - "\1\121\1\114\1\0\1\114\1\122\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\3\0"+ - "\1\54\5\0\1\55\3\0\1\56\11\0\1\57\2\0"+ - "\1\60\16\0\1\61\2\0\1\62\41\0\2\25\1\63"+ - "\1\0\1\64\1\0\1\64\1\65\1\0\1\25\2\0"+ - "\1\116\1\323\6\35\1\323\6\35\1\323\4\35\1\323"+ - "\4\35\2\323\1\117\12\120\1\64\1\114\1\121\1\114"+ - "\1\0\1\114\1\122\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\2\0\1\46\1\0\1\47\1\0\1\50\1\0"+ - "\1\51\1\0\1\52\1\0\1\53\3\0\1\54\5\0"+ - "\1\55\3\0\1\56\11\0\1\57\2\0\1\60\16\0"+ - "\1\61\2\0\1\62\41\0\2\25\1\63\1\0\1\64"+ - "\1\0\1\64\1\65\1\0\1\25\2\0\1\116\6\35"+ - "\1\323\7\35\1\323\13\35\1\117\12\120\1\64\1\114"+ - "\1\121\1\114\1\0\1\114\1\122\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\3\0"+ - "\1\54\5\0\1\55\3\0\1\56\11\0\1\57\2\0"+ - "\1\60\16\0\1\61\2\0\1\62\41\0\2\25\1\63"+ - "\1\0\1\64\1\0\1\64\1\65\1\0\1\25\2\0"+ - "\1\116\13\35\1\343\16\35\1\117\12\120\1\64\1\114"+ - "\1\121\1\114\1\0\1\114\1\122\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\3\0"+ - "\1\54\5\0\1\55\3\0\1\56\11\0\1\57\2\0"+ - "\1\60\16\0\1\61\2\0\1\62\41\0\2\25\1\63"+ - "\1\0\1\64\1\0\1\64\1\65\1\0\1\25\2\0"+ - "\1\116\1\323\11\35\1\323\6\35\1\323\10\35\1\117"+ - "\12\120\1\64\1\114\1\121\1\114\1\0\1\114\1\122"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\2\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\53\3\0\1\54\5\0\1\55\3\0\1\56"+ - "\11\0\1\57\2\0\1\60\16\0\1\61\2\0\1\62"+ - "\41\0\2\25\1\63\1\0\1\64\1\0\1\64\1\65"+ - "\1\0\1\25\2\0\1\116\1\323\1\35\6\323\1\344"+ - "\1\35\2\323\2\35\2\323\1\35\1\323\1\35\6\323"+ - "\1\35\1\117\12\120\1\64\1\114\1\121\1\114\1\0"+ - "\1\114\1\122\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\2\0\1\46\1\0\1\47\1\0\1\50\1\0\1\51"+ - "\1\0\1\52\1\0\1\53\3\0\1\54\5\0\1\55"+ - "\3\0\1\56\11\0\1\57\2\0\1\60\16\0\1\61"+ - "\2\0\1\62\41\0\2\25\1\63\1\0\1\64\1\0"+ - "\1\64\1\65\1\0\1\25\2\0\1\116\4\35\1\323"+ - "\5\35\2\323\3\35\2\323\10\35\1\323\1\117\12\120"+ - "\1\64\1\114\1\121\1\114\1\0\1\114\1\122\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\2\0\1\46\1\0"+ - "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ - "\1\53\3\0\1\54\5\0\1\55\3\0\1\56\11\0"+ - "\1\57\2\0\1\60\16\0\1\61\2\0\1\62\41\0"+ - "\2\25\1\63\1\0\1\64\1\0\1\64\1\65\1\0"+ - "\1\25\2\0\1\116\3\35\1\323\1\35\1\345\4\35"+ - "\1\323\2\35\1\323\14\35\1\117\12\120\1\64\1\114"+ - "\1\121\1\114\1\0\1\114\1\122\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\3\0"+ - "\1\54\5\0\1\55\3\0\1\56\11\0\1\57\2\0"+ - "\1\60\16\0\1\61\2\0\1\62\41\0\2\25\1\63"+ - "\1\0\1\64\1\0\1\64\1\65\1\0\1\25\2\0"+ - "\1\116\2\323\1\35\1\323\3\35\2\323\2\35\1\323"+ - "\4\35\1\323\11\35\1\117\12\120\1\64\1\114\1\121"+ - "\1\114\1\0\1\114\1\122\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\2\0\1\46\1\0\1\47\1\0\1\50"+ - "\1\0\1\51\1\0\1\52\1\0\1\53\3\0\1\54"+ - "\5\0\1\55\3\0\1\56\11\0\1\57\2\0\1\60"+ - "\16\0\1\61\2\0\1\62\41\0\2\25\1\63\1\0"+ - "\1\64\1\0\1\64\1\65\1\0\1\25\2\0\1\116"+ - "\3\35\1\323\13\35\1\323\12\35\1\117\12\120\1\64"+ - "\1\114\1\121\1\114\1\0\1\114\1\122\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\2\0\1\46\1\0\1\47"+ + "\1\0\1\161\3\0\1\162\5\0\1\163\3\0\1\164"+ + "\11\0\1\60\2\0\1\165\16\0\1\166\2\0\1\167"+ + "\41\0\1\25\1\65\7\0\1\65\2\0\1\25\1\123"+ + "\1\220\1\221\1\222\1\223\1\224\1\225\1\226\1\227"+ + "\1\230\1\231\1\232\1\233\1\234\1\235\1\236\1\237"+ + "\1\240\1\241\1\242\1\243\1\244\1\245\1\246\1\247"+ + "\1\250\1\251\1\124\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\1\150\3\0\2\124\150\0"+ + "\1\123\32\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\2\0\1\47"+ "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ - "\3\0\1\54\5\0\1\55\3\0\1\56\11\0\1\57"+ - "\2\0\1\60\16\0\1\61\2\0\1\62\41\0\2\25"+ - "\1\63\1\0\1\64\1\0\1\64\1\65\1\0\1\25"+ - "\2\0\1\116\3\35\2\323\2\35\2\323\1\35\2\323"+ - "\1\35\1\323\3\35\1\323\1\35\1\323\1\35\1\323"+ - "\2\35\1\323\1\35\1\117\12\120\1\64\1\114\1\121"+ - "\1\114\1\0\1\114\1\122\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\32\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\14\0\1\310\3\0\1\311\5\0\1\312\3\0"+ - "\1\313\14\0\1\314\16\0\1\315\2\0\1\316\42\0"+ - "\1\150\1\63\6\0\1\150\2\0\1\113\1\244\1\245"+ - "\1\246\1\247\1\250\1\251\1\252\1\253\1\254\1\255"+ - "\1\256\1\257\1\260\1\261\1\262\1\263\1\264\1\265"+ - "\1\266\1\267\1\270\1\271\1\272\1\273\1\274\1\275"+ - "\1\114\12\120\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\1\140\3\0\2\114\14\0\1\310\3\0"+ - "\1\311\5\0\1\312\3\0\1\313\14\0\1\314\16\0"+ - "\1\315\2\0\1\316\42\0\1\150\1\63\6\0\1\150"+ - "\2\0\1\113\33\114\12\240\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\2\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\143\3\0\1\54\5\0\1\55\3\0\1\144"+ - "\11\0\1\57\2\0\1\145\16\0\1\146\2\0\1\147"+ - "\41\0\1\25\2\63\2\0\2\150\1\65\1\0\1\63"+ - "\2\0\1\347\32\133\1\114\12\240\1\0\1\114\1\121"+ - "\1\114\1\0\2\237\1\115\3\114\2\0\1\150\1\114"+ - "\4\0\2\114\2\0\1\46\1\0\1\47\1\0\1\50"+ - "\1\0\1\51\1\0\1\52\1\0\1\53\3\0\1\54"+ - "\5\0\1\55\3\0\1\56\11\0\1\57\2\0\1\60"+ - "\16\0\1\61\2\0\1\62\41\0\2\25\1\63\1\0"+ - "\1\64\1\0\1\64\1\65\1\0\1\25\2\0\1\116"+ - "\3\35\1\350\26\35\1\117\12\120\1\64\1\114\1\121"+ - "\1\114\1\0\1\114\1\122\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\2\0\1\46\1\0\1\47\1\0\1\50"+ - "\1\0\1\51\1\0\1\52\1\0\1\53\3\0\1\54"+ - "\5\0\1\55\3\0\1\56\11\0\1\57\2\0\1\60"+ - "\16\0\1\61\2\0\1\62\41\0\2\25\1\63\1\0"+ - "\1\64\1\0\1\64\1\65\1\0\1\25\2\0\1\116"+ - "\32\35\1\117\12\120\1\351\1\114\1\121\1\114\1\0"+ - "\1\114\1\122\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\2\0\1\46\1\0\1\47\1\0\1\50\1\0\1\51"+ - "\1\0\1\52\1\0\1\53\3\0\1\54\5\0\1\55"+ - "\3\0\1\56\11\0\1\57\2\0\1\60\16\0\1\61"+ - "\2\0\1\62\41\0\2\25\1\63\1\0\1\64\1\0"+ - "\1\64\1\65\1\0\1\25\2\0\1\116\15\35\1\352"+ - "\14\35\1\117\12\120\1\64\1\114\1\121\1\114\1\0"+ - "\1\114\1\122\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\1\235\2\353\1\354\1\355\10\353\1\235"+ - "\1\356\5\353\6\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\1\357\2\353\1\235\1\353\1\360\6\353\4\235"+ - "\1\353\1\235\2\353\1\235\1\353\1\235\3\353\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\3\235\1\353\1\235"+ - "\1\353\4\235\1\353\10\235\1\353\2\235\1\353\2\235"+ - "\1\353\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\1\235"+ - "\1\353\1\361\2\353\2\235\1\353\6\235\3\353\11\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\3\235\1\353"+ - "\1\235\1\353\10\235\1\353\1\235\2\353\10\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\4\235\1\362\5\235"+ - "\1\353\17\235\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\4\235\2\353\2\235\1\353\1\235\1\353\13\235\1\353"+ - "\2\235\1\353\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\1\353\1\235\3\353\1\363\14\353\2\235\2\353\2\235"+ - "\1\353\1\235\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\2\235\4\353\3\235\2\353\1\364\1\353\1\235\2\353"+ - "\12\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\2\353"+ - "\2\235\1\353\3\235\1\353\5\235\3\353\3\235\1\353"+ - "\2\235\3\353\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\5\353\1\365\1\235\1\353\1\366\7\353\1\367\3\353"+ - "\1\235\1\353\1\235\3\353\1\117\12\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\1\370\1\353\1\235\1\357\6\353\3\235"+ - "\1\353\2\235\1\353\2\235\1\353\6\235\1\117\12\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\1\353\31\235\1\117\12\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\1\353\2\235\1\353\1\371"+ - "\1\235\2\353\1\235\3\353\2\235\2\353\1\235\1\353"+ - "\3\235\1\353\2\235\2\353\1\117\12\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\6\353\1\235\5\353\3\235\2\353\2\235"+ - "\7\353\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\1\235"+ - "\2\353\1\366\1\372\3\353\1\235\3\353\1\235\1\353"+ - "\1\235\1\353\1\235\1\353\1\235\1\353\1\235\3\353"+ - "\1\235\1\353\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\1\353\6\235\1\353\6\235\1\353\4\235\1\353\4\235"+ - "\2\353\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\6\235"+ - "\1\353\7\235\1\353\13\235\1\117\12\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\13\235\1\373\16\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\1\353\11\235\1\353\6\235\1\353"+ - "\10\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\1\353"+ - "\1\235\6\353\1\374\1\235\2\353\2\235\2\353\1\235"+ - "\1\353\1\235\6\353\1\235\1\117\12\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\4\235\1\353\5\235\2\353\3\235\2\353"+ - "\10\235\1\353\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\3\235\1\353\1\235\1\375\4\235\1\353\2\235\1\353"+ - "\14\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\2\353"+ - "\1\235\1\353\3\235\2\353\2\235\1\353\4\235\1\353"+ - "\11\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\3\235"+ - "\1\353\13\235\1\353\12\235\1\117\12\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\3\235\2\353\2\235\2\353\1\235\2\353"+ - "\1\235\1\353\3\235\1\353\1\235\1\353\1\235\1\353"+ - "\2\235\1\353\1\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\2\0"+ - "\1\46\1\0\1\47\1\0\1\50\1\0\1\51\1\0"+ - "\1\52\1\0\1\66\3\0\1\67\5\0\1\70\3\0"+ - "\1\71\11\0\1\57\2\0\1\72\16\0\1\73\2\0"+ - "\1\74\41\0\1\25\2\26\2\0\2\75\1\76\1\0"+ - "\1\26\2\0\1\376\32\35\1\117\12\277\1\0\1\114"+ - "\1\127\1\114\1\0\2\130\1\115\3\114\2\0\1\75"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\66\3\0"+ - "\1\67\5\0\1\70\3\0\1\71\11\0\1\57\2\0"+ - "\1\72\16\0\1\73\2\0\1\74\41\0\1\25\2\26"+ - "\2\0\2\75\1\76\1\0\1\26\2\0\1\376\32\35"+ - "\1\117\12\377\1\0\1\114\1\127\1\114\1\0\2\130"+ - "\1\115\3\114\2\0\1\75\1\114\4\0\2\114\2\0"+ - "\1\46\1\0\1\47\1\0\1\50\1\0\1\51\1\0"+ - "\1\52\1\0\1\66\3\0\1\67\5\0\1\70\3\0"+ - "\1\71\11\0\1\57\2\0\1\72\16\0\1\73\2\0"+ - "\1\74\41\0\1\25\2\26\2\0\2\75\1\76\1\0"+ - "\1\26\2\0\1\376\32\35\1\117\1\277\1\u0100\1\377"+ - "\2\277\2\377\2\277\1\377\1\0\1\114\1\127\1\114"+ - "\1\0\2\130\1\115\3\114\2\0\1\75\1\114\4\0"+ - "\2\114\2\0\1\46\1\0\1\47\1\0\1\50\1\0"+ - "\1\51\1\0\1\52\1\0\1\66\3\0\1\67\5\0"+ - "\1\70\3\0\1\71\11\0\1\57\2\0\1\72\16\0"+ - "\1\73\2\0\1\74\41\0\1\25\2\26\2\0\2\75"+ - "\1\76\1\0\1\26\2\0\1\u0101\32\35\1\117\12\301"+ - "\1\0\1\114\1\127\1\114\1\0\2\130\1\115\3\114"+ - "\2\0\1\75\1\114\4\0\2\114\2\0\1\46\1\0"+ - "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ - "\1\151\3\0\1\152\5\0\1\153\3\0\1\154\11\0"+ - "\1\57\2\0\1\155\16\0\1\156\2\0\1\157\41\0"+ - "\1\25\1\64\7\0\1\64\2\0\1\113\32\133\13\114"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\1\140\3\0\2\114\14\0\1\165\3\0\1\166\5\0"+ - "\1\167\3\0\1\170\14\0\1\171\16\0\1\172\2\0"+ - "\1\173\42\0\1\75\1\26\6\0\1\75\2\0\1\113"+ - "\33\114\12\134\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\1\140\3\0\2\114\150\0\4\u0102\2\0"+ - "\1\u0102\15\0\1\u0102\6\0\12\u0102\1\305\174\0\4\u0103"+ - "\2\0\1\u0103\15\0\1\u0103\6\0\12\u0103\1\u0104\174\0"+ - "\4\u0105\2\0\1\u0105\15\0\1\u0105\6\0\1\u0106\2\u0107"+ - "\1\u0106\4\u0107\1\u0108\1\u0107\14\0\1\u0109\157\0\46\114"+ - "\1\0\3\114\1\0\2\114\1\0\3\114\3\0\1\114"+ - "\1\140\3\0\2\114\3\0\1\150\37\0\1\150\1\0"+ - "\2\150\16\0\1\150\4\0\1\150\2\0\2\150\10\0"+ - "\1\63\4\0\1\150\132\0\1\63\102\0\1\63\242\0"+ - "\2\63\227\0\1\150\246\0\2\150\11\0\1\150\210\0"+ - "\2\150\6\0\1\150\151\0\1\150\3\0\1\150\2\0"+ - "\1\150\3\0\1\150\5\0\1\150\7\0\1\150\4\0"+ - "\2\150\3\0\2\150\1\0\1\150\4\0\1\150\1\0"+ - "\1\150\2\0\2\150\1\0\3\150\1\0\1\150\2\0"+ - "\4\150\2\0\1\150\302\0\1\u010a\1\u010b\1\u010c\1\u010d"+ - "\1\u010e\1\u010f\1\u0110\1\u0111\1\u0112\1\u0113\1\u0114\1\u0115"+ - "\1\u0116\1\u0117\1\u0118\1\u0119\1\u011a\1\u011b\1\u011c\1\u011d"+ - "\1\u011e\1\u011f\1\u0120\1\u0121\1\u0122\1\u0123\1\0\12\201"+ - "\175\0\32\201\1\320\12\201\174\0\74\202\1\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\53\3\0\1\54\5\0\1\55\3\0\1\56"+ - "\11\0\1\57\2\0\1\60\16\0\1\61\2\0\1\62"+ - "\41\0\2\25\1\63\1\0\1\64\1\0\1\64\1\65"+ - "\1\0\1\25\2\0\1\u0124\32\35\1\117\12\120\1\u0125"+ - "\1\114\1\121\1\114\1\0\1\114\1\122\1\115\1\u0126"+ - "\1\u0127\1\u0128\3\0\1\114\4\0\2\114\2\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\53\3\0\1\54\5\0\1\55\3\0\1\56"+ - "\11\0\1\57\2\0\1\60\16\0\1\61\2\0\1\62"+ - "\41\0\2\25\1\63\1\0\1\64\1\0\1\64\1\65"+ - "\1\0\1\25\2\0\1\u0124\4\35\1\u0129\25\35\1\117"+ - "\12\120\1\u0125\1\114\1\121\1\114\1\0\1\114\1\122"+ - "\1\115\1\u0126\1\u0127\1\u0128\3\0\1\114\4\0\2\114"+ - "\2\0\1\46\1\0\1\47\1\0\1\50\1\0\1\51"+ - "\1\0\1\52\1\0\1\53\3\0\1\54\5\0\1\55"+ - "\3\0\1\56\11\0\1\57\2\0\1\60\16\0\1\61"+ - "\2\0\1\62\41\0\2\25\1\63\1\0\1\64\1\0"+ - "\1\64\1\65\1\0\1\25\2\0\1\u0124\15\35\1\217"+ - "\14\35\1\117\12\120\1\u0125\1\114\1\121\1\114\1\0"+ - "\1\114\1\122\1\115\1\u0126\1\u0127\1\u0128\3\0\1\114"+ - "\4\0\2\114\2\0\1\46\1\0\1\47\1\0\1\50"+ - "\1\0\1\51\1\0\1\52\1\0\1\53\3\0\1\54"+ - "\5\0\1\55\3\0\1\56\11\0\1\57\2\0\1\60"+ - "\16\0\1\61\2\0\1\62\41\0\2\25\1\63\1\0"+ - "\1\64\1\0\1\64\1\65\1\0\1\25\2\0\1\u0124"+ - "\10\35\1\217\21\35\1\117\12\120\1\u0125\1\114\1\121"+ - "\1\114\1\0\1\114\1\122\1\115\1\u0126\1\u0127\1\u0128"+ - "\3\0\1\114\4\0\2\114\2\0\1\46\1\0\1\47"+ - "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ - "\3\0\1\54\5\0\1\55\3\0\1\56\11\0\1\57"+ - "\2\0\1\60\16\0\1\61\2\0\1\62\41\0\2\25"+ - "\1\63\1\0\1\64\1\0\1\64\1\65\1\0\1\25"+ - "\2\0\1\u0124\17\35\1\323\12\35\1\117\12\120\1\u0125"+ - "\1\114\1\121\1\114\1\0\1\114\1\122\1\115\1\u0126"+ - "\1\u0127\1\u0128\3\0\1\114\4\0\2\114\2\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\53\3\0\1\54\5\0\1\55\3\0\1\56"+ - "\11\0\1\57\2\0\1\60\16\0\1\61\2\0\1\62"+ - "\41\0\2\25\1\63\1\0\1\64\1\0\1\64\1\65"+ - "\1\0\1\25\2\0\1\u0124\5\35\1\u012a\4\35\1\323"+ - "\17\35\1\117\12\120\1\u0125\1\114\1\121\1\114\1\0"+ - "\1\114\1\122\1\115\1\u0126\1\u0127\1\u0128\3\0\1\114"+ - "\4\0\2\114\2\0\1\46\1\0\1\47\1\0\1\50"+ - "\1\0\1\51\1\0\1\52\1\0\1\53\3\0\1\54"+ - "\5\0\1\55\3\0\1\56\11\0\1\57\2\0\1\60"+ - "\16\0\1\61\2\0\1\62\41\0\2\25\1\63\1\0"+ - "\1\64\1\0\1\64\1\65\1\0\1\25\2\0\1\116"+ - "\20\35\1\323\11\35\1\117\12\120\1\64\1\114\1\121"+ - "\1\114\1\0\1\114\1\122\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\2\0\1\46\1\0\1\47\1\0\1\50"+ - "\1\0\1\51\1\0\1\52\1\0\1\53\3\0\1\54"+ - "\5\0\1\55\3\0\1\56\11\0\1\57\2\0\1\60"+ - "\16\0\1\61\2\0\1\62\41\0\2\25\1\63\1\0"+ - "\1\64\1\0\1\64\1\65\1\0\1\25\2\0\1\116"+ - "\7\35\1\323\22\35\1\117\12\120\1\64\1\114\1\121"+ - "\1\114\1\0\1\114\1\122\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\2\0\1\46\1\0\1\47\1\0\1\50"+ - "\1\0\1\51\1\0\1\52\1\0\1\53\3\0\1\54"+ - "\5\0\1\55\3\0\1\56\11\0\1\57\2\0\1\60"+ - "\16\0\1\61\2\0\1\62\41\0\2\25\1\63\1\0"+ - "\1\64\1\0\1\64\1\65\1\0\1\25\2\0\1\116"+ - "\27\35\1\323\2\35\1\117\12\120\1\64\1\114\1\121"+ - "\1\114\1\0\1\114\1\122\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\2\0\1\46\1\0\1\47\1\0\1\50"+ - "\1\0\1\51\1\0\1\52\1\0\1\53\3\0\1\54"+ - "\5\0\1\55\3\0\1\56\11\0\1\57\2\0\1\60"+ - "\16\0\1\61\2\0\1\62\41\0\2\25\1\63\1\0"+ - "\1\64\1\0\1\64\1\65\1\0\1\25\2\0\1\u0124"+ - "\6\35\1\u0129\10\35\1\323\12\35\1\117\12\120\1\u0125"+ - "\1\114\1\121\1\114\1\0\1\114\1\122\1\115\1\u0126"+ - "\1\u0127\1\u0128\3\0\1\114\4\0\2\114\2\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\53\3\0\1\54\5\0\1\55\3\0\1\56"+ - "\11\0\1\57\2\0\1\60\16\0\1\61\2\0\1\62"+ - "\41\0\2\25\1\63\1\0\1\64\1\0\1\64\1\65"+ - "\1\0\1\25\2\0\1\u0124\24\35\1\u012b\5\35\1\117"+ - "\12\120\1\u0125\1\114\1\121\1\114\1\0\1\114\1\122"+ - "\1\115\1\u0126\1\u0127\1\u0128\3\0\1\114\4\0\2\114"+ - "\2\0\1\46\1\0\1\47\1\0\1\50\1\0\1\51"+ - "\1\0\1\52\1\0\1\53\3\0\1\54\5\0\1\55"+ - "\3\0\1\56\11\0\1\57\2\0\1\60\16\0\1\61"+ - "\2\0\1\62\41\0\2\25\1\63\1\0\1\64\1\0"+ - "\1\64\1\65\1\0\1\25\2\0\1\116\11\35\1\323"+ - "\20\35\1\117\12\120\1\64\1\114\1\121\1\114\1\0"+ - "\1\114\1\122\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\2\0\1\46\1\0\1\47\1\0\1\50\1\0\1\51"+ - "\1\0\1\52\1\0\1\53\3\0\1\54\5\0\1\55"+ - "\3\0\1\56\11\0\1\57\2\0\1\60\16\0\1\61"+ - "\2\0\1\62\41\0\2\25\1\63\1\0\1\64\1\0"+ - "\1\64\1\65\1\0\1\25\2\0\1\u0124\16\35\1\u012c"+ - "\13\35\1\117\12\120\1\u0125\1\114\1\121\1\114\1\0"+ - "\1\114\1\122\1\115\1\u0126\1\u0127\1\u0128\3\0\1\114"+ - "\4\0\2\114\2\0\1\46\1\0\1\47\1\0\1\50"+ - "\1\0\1\51\1\0\1\52\1\0\1\53\3\0\1\54"+ - "\5\0\1\55\3\0\1\56\11\0\1\57\2\0\1\60"+ - "\16\0\1\61\2\0\1\62\41\0\2\25\1\63\1\0"+ - "\1\64\1\0\1\64\1\65\1\0\1\25\2\0\1\u0124"+ - "\12\35\1\u012d\17\35\1\117\12\120\1\u0125\1\114\1\121"+ - "\1\114\1\0\1\114\1\122\1\115\1\u0126\1\u0127\1\u0128"+ - "\3\0\1\114\4\0\2\114\2\0\1\46\1\0\1\47"+ - "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ - "\3\0\1\54\5\0\1\55\3\0\1\56\11\0\1\57"+ - "\2\0\1\60\16\0\1\61\2\0\1\62\41\0\2\25"+ - "\1\63\1\0\1\64\1\0\1\64\1\65\1\0\1\25"+ - "\2\0\1\u0124\5\35\1\323\24\35\1\117\12\120\1\u0125"+ - "\1\114\1\121\1\114\1\0\1\114\1\122\1\115\1\u0126"+ - "\1\u0127\1\u0128\3\0\1\114\4\0\2\114\2\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\53\3\0\1\54\5\0\1\55\3\0\1\56"+ - "\11\0\1\57\2\0\1\60\16\0\1\61\2\0\1\62"+ - "\41\0\2\25\1\63\1\0\1\64\1\0\1\64\1\65"+ - "\1\0\1\25\2\0\1\u0124\1\u012e\31\35\1\117\12\120"+ - "\1\u0125\1\114\1\121\1\114\1\0\1\114\1\122\1\115"+ - "\1\u0126\1\u0127\1\u0128\3\0\1\114\4\0\2\114\2\0"+ - "\1\46\1\0\1\47\1\0\1\50\1\0\1\51\1\0"+ - "\1\52\1\0\1\53\3\0\1\54\5\0\1\55\3\0"+ - "\1\56\11\0\1\57\2\0\1\60\16\0\1\61\2\0"+ - "\1\62\41\0\2\25\1\63\1\0\1\64\1\0\1\64"+ - "\1\65\1\0\1\25\2\0\1\116\32\35\1\u012f\12\120"+ - "\1\64\1\114\1\121\1\114\1\0\1\114\1\122\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\2\0\1\46\1\0"+ - "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ - "\1\53\3\0\1\54\5\0\1\55\3\0\1\56\11\0"+ - "\1\57\2\0\1\60\16\0\1\61\2\0\1\62\41\0"+ - "\2\25\1\63\1\0\1\64\1\0\1\64\1\65\1\0"+ - "\1\25\2\0\1\u0124\23\35\1\323\6\35\1\117\12\120"+ - "\1\u0125\1\114\1\121\1\114\1\0\1\114\1\122\1\115"+ - "\1\u0126\1\u0127\1\u0128\3\0\1\114\4\0\2\114\2\0"+ - "\1\46\1\0\1\47\1\0\1\50\1\0\1\51\1\0"+ - "\1\52\1\0\1\53\3\0\1\54\5\0\1\55\3\0"+ - "\1\56\11\0\1\57\2\0\1\60\16\0\1\61\2\0"+ - "\1\62\41\0\2\25\1\63\1\0\1\64\1\0\1\64"+ - "\1\65\1\0\1\25\2\0\1\u0124\24\35\1\u0130\5\35"+ - "\1\117\12\120\1\u0125\1\114\1\121\1\114\1\0\1\114"+ - "\1\122\1\115\1\u0126\1\u0127\1\u0128\3\0\1\114\4\0"+ - "\2\114\147\0\1\113\1\244\1\245\1\246\1\247\1\250"+ - "\1\251\1\252\1\253\1\254\1\255\1\256\1\257\1\260"+ + "\1\0\1\153\3\0\1\55\5\0\1\56\3\0\1\154"+ + "\11\0\1\60\2\0\1\155\16\0\1\156\2\0\1\157"+ + "\41\0\1\25\2\64\2\0\2\160\1\66\1\0\1\64"+ + "\2\0\1\25\1\253\32\36\1\127\12\130\1\0\1\124"+ + "\1\131\1\124\1\0\2\254\1\125\3\124\2\0\1\160"+ + "\1\124\4\0\2\124\2\0\1\47\1\0\1\50\1\0"+ + "\1\51\1\0\1\52\1\0\1\53\1\0\1\170\3\0"+ + "\1\55\5\0\1\56\3\0\1\171\11\0\1\60\2\0"+ + "\1\172\16\0\1\173\2\0\1\174\21\0\1\113\17\0"+ + "\1\25\1\66\1\64\1\115\3\0\1\66\1\0\1\66"+ + "\2\0\1\25\1\123\32\143\1\124\12\255\1\0\1\124"+ + "\1\131\1\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\161\3\0\1\162"+ + "\5\0\1\163\3\0\1\164\11\0\1\60\2\0\1\165"+ + "\16\0\1\166\2\0\1\167\41\0\1\25\1\65\7\0"+ + "\1\65\2\0\1\25\1\123\32\143\13\124\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\54\3\0\1\55\5\0\1\56"+ + "\3\0\1\57\11\0\1\60\2\0\1\61\16\0\1\62"+ + "\2\0\1\63\41\0\2\25\1\64\1\0\1\65\1\0"+ + "\1\65\1\66\1\0\1\25\2\0\1\25\1\126\11\36"+ + "\1\256\20\36\1\127\12\130\1\65\1\124\1\131\1\124"+ + "\1\0\1\124\1\132\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\2\0\1\47\1\0\1\50\1\0\1\51\1\0"+ + "\1\52\1\0\1\53\1\0\1\54\3\0\1\55\5\0"+ + "\1\56\3\0\1\57\11\0\1\60\2\0\1\61\16\0"+ + "\1\62\2\0\1\63\41\0\2\25\1\64\1\0\1\65"+ + "\1\0\1\65\1\66\1\0\1\25\2\0\1\25\1\126"+ + "\15\36\1\257\14\36\1\127\12\130\1\65\1\124\1\131"+ + "\1\124\1\0\1\124\1\132\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\54\3\0\1\55"+ + "\5\0\1\56\3\0\1\57\11\0\1\60\2\0\1\61"+ + "\16\0\1\62\2\0\1\63\41\0\2\25\1\64\1\0"+ + "\1\65\1\0\1\65\1\66\1\0\1\25\2\0\1\25"+ + "\1\126\17\36\1\260\12\36\1\127\12\130\1\65\1\124"+ + "\1\131\1\124\1\0\1\124\1\132\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\14\0\1\175\3\0\1\176\5\0"+ + "\1\177\3\0\1\200\14\0\1\201\16\0\1\202\2\0"+ + "\1\203\42\0\1\76\1\26\6\0\1\76\3\0\1\123"+ "\1\261\1\262\1\263\1\264\1\265\1\266\1\267\1\270"+ - "\1\271\1\272\1\273\1\274\1\275\1\114\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\1\140"+ - "\3\0\2\114\14\0\1\310\3\0\1\311\5\0\1\312"+ - "\3\0\1\313\14\0\1\314\16\0\1\315\2\0\1\316"+ - "\42\0\1\150\1\63\6\0\1\150\2\0\1\113\33\114"+ - "\12\240\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\1\140\3\0\2\114\2\0\1\46\1\0\1\47"+ - "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ - "\3\0\1\54\5\0\1\55\3\0\1\56\11\0\1\57"+ - "\2\0\1\60\16\0\1\61\2\0\1\62\41\0\2\25"+ - "\1\63\1\0\1\64\1\0\1\64\1\65\1\0\1\25"+ - "\2\0\1\116\32\35\1\117\12\120\1\u0131\1\114\1\121"+ - "\1\114\1\0\1\114\1\122\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\2\0\1\46\1\0\1\47\1\0\1\50"+ - "\1\0\1\51\1\0\1\52\1\0\1\151\3\0\1\152"+ - "\5\0\1\153\3\0\1\154\11\0\1\57\2\0\1\155"+ - "\16\0\1\156\2\0\1\157\41\0\1\25\1\64\7\0"+ - "\1\64\3\0\32\25\24\0\1\u0132\15\0\1\46\1\0"+ + "\1\271\1\272\1\273\1\274\1\275\1\276\1\277\1\300"+ + "\1\301\1\302\1\303\1\304\1\305\1\306\1\307\1\310"+ + "\1\311\1\312\1\124\1\313\2\314\1\313\5\314\1\315"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\1\150\3\0\2\124\2\0\1\47\1\0\1\50\1\0"+ + "\1\51\1\0\1\52\1\0\1\53\1\0\1\204\3\0"+ + "\1\70\5\0\1\71\3\0\1\205\11\0\1\60\2\0"+ + "\1\206\16\0\1\207\2\0\1\210\21\0\1\113\17\0"+ + "\1\25\1\77\1\26\1\115\3\0\1\77\1\0\1\77"+ + "\2\0\1\25\1\123\32\143\1\124\12\144\1\0\1\124"+ + "\1\137\1\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\14\0\1\175\3\0\1\176\5\0\1\177"+ + "\3\0\1\200\14\0\1\201\16\0\1\202\2\0\1\203"+ + "\42\0\1\76\1\26\6\0\1\76\3\0\1\123\33\124"+ + "\12\144\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\2\0\1\47\1\0\1\50\1\0"+ + "\1\51\1\0\1\52\1\0\1\53\1\0\1\67\3\0"+ + "\1\70\5\0\1\71\3\0\1\72\11\0\1\60\2\0"+ + "\1\73\16\0\1\74\2\0\1\75\41\0\1\25\2\26"+ + "\2\0\2\76\1\77\1\0\1\26\2\0\1\25\1\136"+ + "\32\36\1\127\12\316\1\0\1\124\1\137\1\124\1\0"+ + "\2\140\1\125\3\124\2\0\1\76\1\124\4\0\2\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\67\3\0\1\70\5\0\1\71"+ + "\3\0\1\72\11\0\1\60\2\0\1\73\16\0\1\74"+ + "\2\0\1\75\41\0\1\25\2\26\2\0\2\76\1\77"+ + "\1\0\1\26\2\0\1\25\1\136\32\36\1\127\2\141"+ + "\1\316\2\141\2\316\1\141\1\316\1\141\1\0\1\124"+ + "\1\137\1\124\1\0\2\140\1\125\3\124\2\0\1\76"+ + "\1\124\4\0\2\124\2\0\1\47\1\0\1\50\1\0"+ + "\1\51\1\0\1\52\1\0\1\53\1\0\1\54\3\0"+ + "\1\55\5\0\1\56\3\0\1\57\11\0\1\60\2\0"+ + "\1\61\16\0\1\62\2\0\1\63\41\0\2\25\1\64"+ + "\1\0\1\65\1\0\1\65\1\66\1\0\1\25\2\0"+ + "\1\25\1\317\32\143\1\124\12\255\1\65\1\124\1\131"+ + "\1\124\1\0\1\124\1\132\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\67\3\0\1\70"+ + "\5\0\1\71\3\0\1\72\11\0\1\60\2\0\1\73"+ + "\16\0\1\74\2\0\1\75\41\0\1\25\2\26\2\0"+ + "\2\76\1\77\1\0\1\26\2\0\1\25\1\320\32\143"+ + "\1\124\12\144\1\0\1\124\1\137\1\124\1\0\2\140"+ + "\1\125\3\124\2\0\1\76\1\124\4\0\2\124\151\0"+ + "\4\321\2\0\1\321\15\0\1\321\6\0\12\321\1\322"+ + "\242\0\1\323\174\0\1\324\54\0\1\125\165\0\74\150"+ + "\2\0\1\64\11\0\3\25\5\0\1\25\1\0\1\25"+ + "\1\0\1\25\4\0\1\25\4\0\1\64\1\0\2\64"+ + "\4\0\1\25\5\0\1\25\3\0\1\64\4\0\1\64"+ + "\2\25\2\64\10\0\1\64\1\0\2\25\1\0\1\64"+ + "\10\0\1\25\120\0\1\25\3\0\1\25\6\0\2\25"+ + "\5\0\1\25\1\0\1\25\1\0\1\25\1\0\11\25"+ + "\2\0\1\25\4\0\1\25\4\0\6\25\2\0\1\25"+ + "\1\0\1\25\1\0\3\25\1\0\1\64\1\0\2\25"+ + "\4\0\3\25\1\0\1\25\10\0\1\25\1\0\2\25"+ + "\115\0\1\25\3\0\1\25\5\0\1\25\32\0\15\25"+ + "\5\0\3\25\1\0\1\25\5\0\1\25\2\64\5\0"+ + "\1\25\2\0\1\25\1\64\4\0\1\25\2\0\1\25"+ + "\1\0\1\25\177\0\2\64\6\0\1\64\152\0\1\64"+ + "\3\0\1\64\2\0\1\64\3\0\1\64\5\0\1\64"+ + "\7\0\1\64\4\0\2\64\3\0\2\64\1\0\1\64"+ + "\4\0\1\64\1\0\1\64\2\0\2\64\1\0\3\64"+ + "\1\0\1\64\2\0\4\64\2\0\1\64\147\0\1\325"+ + "\3\0\1\326\5\0\1\327\3\0\1\330\14\0\1\331"+ + "\16\0\1\332\2\0\1\333\42\0\1\160\1\64\6\0"+ + "\1\160\37\0\12\64\30\0\1\65\11\0\3\25\5\0"+ + "\1\25\1\0\1\25\1\0\1\25\4\0\1\25\4\0"+ + "\1\65\1\0\2\65\4\0\1\25\5\0\1\25\3\0"+ + "\1\65\4\0\1\65\2\25\2\65\12\0\2\25\1\0"+ + "\1\65\10\0\1\25\120\0\1\25\11\0\2\25\2\0"+ + "\5\25\2\0\2\25\4\0\6\25\1\0\2\25\4\0"+ + "\5\25\1\0\5\25\1\0\2\25\1\0\3\25\1\0"+ + "\4\25\1\0\5\25\2\0\1\25\1\0\1\25\1\0"+ + "\3\25\2\0\1\25\1\0\1\25\1\0\1\25\2\0"+ + "\1\25\113\0\1\25\3\0\1\25\5\0\2\25\3\0"+ + "\1\25\4\0\3\25\4\0\1\25\1\0\1\25\2\0"+ + "\1\25\1\0\2\25\4\0\1\25\1\0\1\25\3\0"+ + "\2\25\1\0\1\25\5\0\3\25\1\0\1\25\10\0"+ + "\1\25\4\0\1\25\10\0\1\25\120\0\1\25\3\0"+ + "\1\25\6\0\2\25\5\0\1\25\1\0\1\25\1\0"+ + "\1\25\1\0\11\25\2\0\1\25\4\0\1\25\4\0"+ + "\6\25\2\0\1\25\1\0\1\25\1\0\3\25\1\0"+ + "\1\65\1\0\2\25\4\0\3\25\1\0\1\25\10\0"+ + "\1\25\1\0\2\25\115\0\1\25\3\0\1\25\5\0"+ + "\1\25\32\0\15\25\5\0\3\25\1\0\1\25\5\0"+ + "\1\25\2\65\5\0\1\25\2\0\1\25\1\65\4\0"+ + "\1\25\2\0\1\25\1\0\1\25\177\0\2\65\6\0"+ + "\1\65\152\0\1\65\3\0\1\65\2\0\1\65\3\0"+ + "\1\65\5\0\1\65\7\0\1\65\4\0\2\65\3\0"+ + "\2\65\1\0\1\65\4\0\1\65\1\0\1\65\2\0"+ + "\2\65\1\0\3\65\1\0\1\65\2\0\4\65\2\0"+ + "\1\65\136\0\1\66\11\0\3\25\5\0\1\25\1\0"+ + "\1\25\1\0\1\25\4\0\1\25\4\0\1\66\1\0"+ + "\2\66\4\0\1\25\5\0\1\25\3\0\1\66\4\0"+ + "\1\66\2\25\2\66\10\0\1\64\1\0\2\25\1\0"+ + "\1\66\10\0\1\25\120\0\1\25\3\0\1\25\6\0"+ + "\2\25\5\0\1\25\1\0\1\25\1\0\1\25\1\0"+ + "\11\25\2\0\1\25\4\0\1\25\4\0\6\25\2\0"+ + "\1\25\1\0\1\25\1\0\3\25\1\0\1\66\1\0"+ + "\2\25\4\0\3\25\1\0\1\25\10\0\1\25\1\0"+ + "\2\25\115\0\1\25\3\0\1\25\5\0\1\25\32\0"+ + "\15\25\5\0\3\25\1\0\1\25\5\0\1\25\2\66"+ + "\5\0\1\25\2\0\1\25\1\66\4\0\1\25\2\0"+ + "\1\25\1\0\1\25\177\0\2\66\6\0\1\66\152\0"+ + "\1\66\3\0\1\66\2\0\1\66\3\0\1\66\5\0"+ + "\1\66\7\0\1\66\4\0\2\66\3\0\2\66\1\0"+ + "\1\66\4\0\1\66\1\0\1\66\2\0\2\66\1\0"+ + "\3\66\1\0\1\66\2\0\4\66\2\0\1\66\136\0"+ + "\1\76\37\0\1\76\1\0\2\76\16\0\1\76\4\0"+ + "\1\76\2\0\2\76\10\0\1\26\4\0\1\76\133\0"+ + "\1\26\102\0\1\26\243\0\2\26\230\0\1\76\247\0"+ + "\2\76\11\0\1\76\211\0\2\76\6\0\1\76\152\0"+ + "\1\76\3\0\1\76\2\0\1\76\3\0\1\76\5\0"+ + "\1\76\7\0\1\76\4\0\2\76\3\0\2\76\1\0"+ + "\1\76\4\0\1\76\1\0\1\76\2\0\2\76\1\0"+ + "\3\76\1\0\1\76\2\0\4\76\2\0\1\76\136\0"+ + "\1\77\11\0\3\25\5\0\1\25\1\0\1\25\1\0"+ + "\1\25\4\0\1\25\4\0\1\77\1\0\2\77\4\0"+ + "\1\25\5\0\1\25\3\0\1\77\4\0\1\77\2\25"+ + "\2\77\10\0\1\26\1\0\2\25\1\0\1\77\10\0"+ + "\1\25\120\0\1\25\3\0\1\25\6\0\2\25\5\0"+ + "\1\25\1\0\1\25\1\0\1\25\1\0\11\25\2\0"+ + "\1\25\4\0\1\25\4\0\6\25\2\0\1\25\1\0"+ + "\1\25\1\0\3\25\1\0\1\77\1\0\2\25\4\0"+ + "\3\25\1\0\1\25\10\0\1\25\1\0\2\25\115\0"+ + "\1\25\3\0\1\25\5\0\1\25\32\0\15\25\5\0"+ + "\3\25\1\0\1\25\5\0\1\25\2\77\5\0\1\25"+ + "\2\0\1\25\1\77\4\0\1\25\2\0\1\25\1\0"+ + "\1\25\177\0\2\77\6\0\1\77\152\0\1\77\3\0"+ + "\1\77\2\0\1\77\3\0\1\77\5\0\1\77\7\0"+ + "\1\77\4\0\2\77\3\0\2\77\1\0\1\77\4\0"+ + "\1\77\1\0\1\77\2\0\2\77\1\0\3\77\1\0"+ + "\1\77\2\0\4\77\2\0\1\77\136\0\1\115\37\0"+ + "\1\115\1\0\2\115\16\0\1\115\4\0\1\115\2\0"+ + "\2\115\15\0\1\115\226\0\1\115\247\0\2\115\11\0"+ + "\1\115\211\0\2\115\6\0\1\115\152\0\1\115\3\0"+ + "\1\115\2\0\1\115\3\0\1\115\5\0\1\115\7\0"+ + "\1\115\4\0\2\115\3\0\2\115\1\0\1\115\4\0"+ + "\1\115\1\0\1\115\2\0\2\115\1\0\3\115\1\0"+ + "\1\115\2\0\4\115\2\0\1\115\303\0\1\334\32\216"+ + "\1\335\12\216\175\0\61\217\1\0\1\336\4\217\1\337"+ + "\1\0\3\217\1\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\54\3\0\1\55"+ + "\5\0\1\56\3\0\1\57\11\0\1\60\2\0\1\61"+ + "\16\0\1\62\2\0\1\63\41\0\2\25\1\64\1\0"+ + "\1\65\1\0\1\65\1\66\1\0\1\25\2\0\1\25"+ + "\1\126\1\36\2\340\1\341\1\342\10\340\1\36\1\343"+ + "\5\340\6\36\1\127\12\130\1\65\1\124\1\131\1\124"+ + "\1\0\1\124\1\132\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\2\0\1\47\1\0\1\50\1\0\1\51\1\0"+ + "\1\52\1\0\1\53\1\0\1\54\3\0\1\55\5\0"+ + "\1\56\3\0\1\57\11\0\1\60\2\0\1\61\16\0"+ + "\1\62\2\0\1\63\41\0\2\25\1\64\1\0\1\65"+ + "\1\0\1\65\1\66\1\0\1\25\2\0\1\25\1\126"+ + "\1\344\2\340\1\36\1\340\1\345\6\340\4\36\1\340"+ + "\1\36\2\340\1\36\1\340\1\36\3\340\1\127\12\130"+ + "\1\65\1\124\1\131\1\124\1\0\1\124\1\132\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\2\0\1\47\1\0"+ + "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\1\0"+ + "\1\54\3\0\1\55\5\0\1\56\3\0\1\57\11\0"+ + "\1\60\2\0\1\61\16\0\1\62\2\0\1\63\41\0"+ + "\2\25\1\64\1\0\1\65\1\0\1\65\1\66\1\0"+ + "\1\25\2\0\1\25\1\126\3\36\1\340\1\36\1\340"+ + "\4\36\1\340\10\36\1\340\2\36\1\340\2\36\1\340"+ + "\1\127\12\130\1\65\1\124\1\131\1\124\1\0\1\124"+ + "\1\132\1\125\3\124\3\0\1\124\4\0\2\124\2\0"+ "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ - "\1\53\3\0\1\54\5\0\1\55\3\0\1\56\11\0"+ - "\1\57\2\0\1\60\16\0\1\61\2\0\1\62\41\0"+ - "\2\25\1\63\1\0\1\64\1\0\1\64\1\65\1\0"+ - "\1\25\2\0\1\116\16\35\1\u0133\13\35\1\117\12\120"+ - "\1\u0134\1\114\1\121\1\114\1\0\1\114\1\122\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\u0135\32\235"+ - "\1\117\12\235\1\u0136\3\114\1\0\2\114\1\115\1\u0126"+ - "\1\u0127\1\u0128\3\0\1\114\4\0\2\114\147\0\1\u0135"+ - "\4\235\1\u0137\25\235\1\117\12\235\1\u0136\3\114\1\0"+ - "\2\114\1\115\1\u0126\1\u0127\1\u0128\3\0\1\114\4\0"+ - "\2\114\147\0\1\u0135\15\235\1\260\14\235\1\117\12\235"+ - "\1\u0136\3\114\1\0\2\114\1\115\1\u0126\1\u0127\1\u0128"+ - "\3\0\1\114\4\0\2\114\147\0\1\u0135\10\235\1\260"+ - "\21\235\1\117\12\235\1\u0136\3\114\1\0\2\114\1\115"+ - "\1\u0126\1\u0127\1\u0128\3\0\1\114\4\0\2\114\147\0"+ - "\1\u0135\17\235\1\353\12\235\1\117\12\235\1\u0136\3\114"+ - "\1\0\2\114\1\115\1\u0126\1\u0127\1\u0128\3\0\1\114"+ - "\4\0\2\114\147\0\1\u0135\5\235\1\u0138\4\235\1\353"+ - "\17\235\1\117\12\235\1\u0136\3\114\1\0\2\114\1\115"+ - "\1\u0126\1\u0127\1\u0128\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\20\235\1\353\11\235\1\117\12\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\7\235\1\353\22\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\27\235\1\353\2\235\1\117\12\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\u0135\6\235\1\u0137\10\235\1\353"+ - "\12\235\1\117\12\235\1\u0136\3\114\1\0\2\114\1\115"+ - "\1\u0126\1\u0127\1\u0128\3\0\1\114\4\0\2\114\147\0"+ - "\1\u0135\24\235\1\u0139\5\235\1\117\12\235\1\u0136\3\114"+ - "\1\0\2\114\1\115\1\u0126\1\u0127\1\u0128\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\11\235\1\353\20\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\u0135\16\235\1\u013a\13\235"+ - "\1\117\12\235\1\u0136\3\114\1\0\2\114\1\115\1\u0126"+ - "\1\u0127\1\u0128\3\0\1\114\4\0\2\114\147\0\1\u0135"+ - "\12\235\1\u013b\17\235\1\117\12\235\1\u0136\3\114\1\0"+ - "\2\114\1\115\1\u0126\1\u0127\1\u0128\3\0\1\114\4\0"+ - "\2\114\147\0\1\u0135\5\235\1\353\24\235\1\117\12\235"+ - "\1\u0136\3\114\1\0\2\114\1\115\1\u0126\1\u0127\1\u0128"+ - "\3\0\1\114\4\0\2\114\147\0\1\u0135\1\u013c\31\235"+ - "\1\117\12\235\1\u0136\3\114\1\0\2\114\1\115\1\u0126"+ - "\1\u0127\1\u0128\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\32\235\1\u012f\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\u0135\23\235"+ - "\1\353\6\235\1\117\12\235\1\u0136\3\114\1\0\2\114"+ - "\1\115\1\u0126\1\u0127\1\u0128\3\0\1\114\4\0\2\114"+ - "\147\0\1\u0135\24\235\1\u013d\5\235\1\117\12\235\1\u0136"+ - "\3\114\1\0\2\114\1\115\1\u0126\1\u0127\1\u0128\3\0"+ - "\1\114\4\0\2\114\14\0\1\165\3\0\1\166\5\0"+ - "\1\167\3\0\1\170\14\0\1\171\16\0\1\172\2\0"+ - "\1\173\42\0\1\75\1\26\6\0\1\75\2\0\1\113"+ - "\1\244\1\245\1\246\1\247\1\250\1\251\1\252\1\253"+ - "\1\254\1\255\1\256\1\257\1\260\1\261\1\262\1\263"+ + "\1\53\1\0\1\54\3\0\1\55\5\0\1\56\3\0"+ + "\1\57\11\0\1\60\2\0\1\61\16\0\1\62\2\0"+ + "\1\63\41\0\2\25\1\64\1\0\1\65\1\0\1\65"+ + "\1\66\1\0\1\25\2\0\1\25\1\126\1\36\1\340"+ + "\1\346\2\340\2\36\1\340\6\36\3\340\11\36\1\127"+ + "\12\130\1\65\1\124\1\131\1\124\1\0\1\124\1\132"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\2\0\1\47"+ + "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ + "\1\0\1\54\3\0\1\55\5\0\1\56\3\0\1\57"+ + "\11\0\1\60\2\0\1\61\16\0\1\62\2\0\1\63"+ + "\41\0\2\25\1\64\1\0\1\65\1\0\1\65\1\66"+ + "\1\0\1\25\2\0\1\25\1\126\3\36\1\340\1\36"+ + "\1\340\10\36\1\340\1\36\2\340\10\36\1\127\12\130"+ + "\1\65\1\124\1\131\1\124\1\0\1\124\1\132\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\2\0\1\47\1\0"+ + "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\1\0"+ + "\1\54\3\0\1\55\5\0\1\56\3\0\1\57\11\0"+ + "\1\60\2\0\1\61\16\0\1\62\2\0\1\63\41\0"+ + "\2\25\1\64\1\0\1\65\1\0\1\65\1\66\1\0"+ + "\1\25\2\0\1\25\1\126\4\36\1\347\5\36\1\340"+ + "\17\36\1\127\12\130\1\65\1\124\1\131\1\124\1\0"+ + "\1\124\1\132\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\54\3\0\1\55\5\0\1\56"+ + "\3\0\1\57\11\0\1\60\2\0\1\61\16\0\1\62"+ + "\2\0\1\63\41\0\2\25\1\64\1\0\1\65\1\0"+ + "\1\65\1\66\1\0\1\25\2\0\1\25\1\126\4\36"+ + "\2\340\2\36\1\340\1\36\1\340\13\36\1\340\2\36"+ + "\1\340\1\127\12\130\1\65\1\124\1\131\1\124\1\0"+ + "\1\124\1\132\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\54\3\0\1\55\5\0\1\56"+ + "\3\0\1\57\11\0\1\60\2\0\1\61\16\0\1\62"+ + "\2\0\1\63\41\0\2\25\1\64\1\0\1\65\1\0"+ + "\1\65\1\66\1\0\1\25\2\0\1\25\1\126\1\340"+ + "\1\36\3\340\1\350\14\340\2\36\2\340\2\36\1\340"+ + "\1\36\1\127\12\130\1\65\1\124\1\131\1\124\1\0"+ + "\1\124\1\132\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\54\3\0\1\55\5\0\1\56"+ + "\3\0\1\57\11\0\1\60\2\0\1\61\16\0\1\62"+ + "\2\0\1\63\41\0\2\25\1\64\1\0\1\65\1\0"+ + "\1\65\1\66\1\0\1\25\2\0\1\25\1\126\2\36"+ + "\4\340\3\36\2\340\1\351\1\340\1\36\2\340\12\36"+ + "\1\127\12\130\1\65\1\124\1\131\1\124\1\0\1\124"+ + "\1\132\1\125\3\124\3\0\1\124\4\0\2\124\2\0"+ + "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ + "\1\53\1\0\1\54\3\0\1\55\5\0\1\56\3\0"+ + "\1\57\11\0\1\60\2\0\1\61\16\0\1\62\2\0"+ + "\1\63\41\0\2\25\1\64\1\0\1\65\1\0\1\65"+ + "\1\66\1\0\1\25\2\0\1\25\1\126\2\340\2\36"+ + "\1\340\3\36\1\340\5\36\3\340\3\36\1\340\2\36"+ + "\3\340\1\127\12\130\1\65\1\124\1\131\1\124\1\0"+ + "\1\124\1\132\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\54\3\0\1\55\5\0\1\56"+ + "\3\0\1\57\11\0\1\60\2\0\1\61\16\0\1\62"+ + "\2\0\1\63\41\0\2\25\1\64\1\0\1\65\1\0"+ + "\1\65\1\66\1\0\1\25\2\0\1\25\1\126\5\340"+ + "\1\352\1\36\1\340\1\353\7\340\1\354\3\340\1\36"+ + "\1\340\1\36\3\340\1\127\12\130\1\65\1\124\1\131"+ + "\1\124\1\0\1\124\1\132\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\54\3\0\1\55"+ + "\5\0\1\56\3\0\1\57\11\0\1\60\2\0\1\61"+ + "\16\0\1\62\2\0\1\63\41\0\2\25\1\64\1\0"+ + "\1\65\1\0\1\65\1\66\1\0\1\25\2\0\1\25"+ + "\1\126\1\355\1\340\1\36\1\344\6\340\3\36\1\340"+ + "\2\36\1\340\2\36\1\340\6\36\1\127\12\130\1\65"+ + "\1\124\1\131\1\124\1\0\1\124\1\132\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\2\0\1\47\1\0\1\50"+ + "\1\0\1\51\1\0\1\52\1\0\1\53\1\0\1\54"+ + "\3\0\1\55\5\0\1\56\3\0\1\57\11\0\1\60"+ + "\2\0\1\61\16\0\1\62\2\0\1\63\41\0\2\25"+ + "\1\64\1\0\1\65\1\0\1\65\1\66\1\0\1\25"+ + "\2\0\1\25\1\126\1\340\31\36\1\127\12\130\1\65"+ + "\1\124\1\131\1\124\1\0\1\124\1\132\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\2\0\1\47\1\0\1\50"+ + "\1\0\1\51\1\0\1\52\1\0\1\53\1\0\1\54"+ + "\3\0\1\55\5\0\1\56\3\0\1\57\11\0\1\60"+ + "\2\0\1\61\16\0\1\62\2\0\1\63\41\0\2\25"+ + "\1\64\1\0\1\65\1\0\1\65\1\66\1\0\1\25"+ + "\2\0\1\25\1\126\1\340\2\36\1\340\1\356\1\36"+ + "\2\340\1\36\3\340\2\36\2\340\1\36\1\340\3\36"+ + "\1\340\2\36\2\340\1\127\12\130\1\65\1\124\1\131"+ + "\1\124\1\0\1\124\1\132\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\54\3\0\1\55"+ + "\5\0\1\56\3\0\1\57\11\0\1\60\2\0\1\61"+ + "\16\0\1\62\2\0\1\63\41\0\2\25\1\64\1\0"+ + "\1\65\1\0\1\65\1\66\1\0\1\25\2\0\1\25"+ + "\1\126\6\340\1\36\5\340\3\36\2\340\2\36\7\340"+ + "\1\127\12\130\1\65\1\124\1\131\1\124\1\0\1\124"+ + "\1\132\1\125\3\124\3\0\1\124\4\0\2\124\2\0"+ + "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ + "\1\53\1\0\1\54\3\0\1\55\5\0\1\56\3\0"+ + "\1\57\11\0\1\60\2\0\1\61\16\0\1\62\2\0"+ + "\1\63\41\0\2\25\1\64\1\0\1\65\1\0\1\65"+ + "\1\66\1\0\1\25\2\0\1\25\1\126\1\36\2\340"+ + "\1\353\1\357\3\340\1\36\3\340\1\36\1\340\1\36"+ + "\1\340\1\36\1\340\1\36\1\340\1\36\3\340\1\36"+ + "\1\340\1\127\12\130\1\65\1\124\1\131\1\124\1\0"+ + "\1\124\1\132\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\54\3\0\1\55\5\0\1\56"+ + "\3\0\1\57\11\0\1\60\2\0\1\61\16\0\1\62"+ + "\2\0\1\63\41\0\2\25\1\64\1\0\1\65\1\0"+ + "\1\65\1\66\1\0\1\25\2\0\1\25\1\126\1\340"+ + "\6\36\1\340\6\36\1\340\4\36\1\340\4\36\2\340"+ + "\1\127\12\130\1\65\1\124\1\131\1\124\1\0\1\124"+ + "\1\132\1\125\3\124\3\0\1\124\4\0\2\124\2\0"+ + "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ + "\1\53\1\0\1\54\3\0\1\55\5\0\1\56\3\0"+ + "\1\57\11\0\1\60\2\0\1\61\16\0\1\62\2\0"+ + "\1\63\41\0\2\25\1\64\1\0\1\65\1\0\1\65"+ + "\1\66\1\0\1\25\2\0\1\25\1\126\6\36\1\340"+ + "\7\36\1\340\13\36\1\127\12\130\1\65\1\124\1\131"+ + "\1\124\1\0\1\124\1\132\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\54\3\0\1\55"+ + "\5\0\1\56\3\0\1\57\11\0\1\60\2\0\1\61"+ + "\16\0\1\62\2\0\1\63\41\0\2\25\1\64\1\0"+ + "\1\65\1\0\1\65\1\66\1\0\1\25\2\0\1\25"+ + "\1\126\13\36\1\360\16\36\1\127\12\130\1\65\1\124"+ + "\1\131\1\124\1\0\1\124\1\132\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\2\0\1\47\1\0\1\50\1\0"+ + "\1\51\1\0\1\52\1\0\1\53\1\0\1\54\3\0"+ + "\1\55\5\0\1\56\3\0\1\57\11\0\1\60\2\0"+ + "\1\61\16\0\1\62\2\0\1\63\41\0\2\25\1\64"+ + "\1\0\1\65\1\0\1\65\1\66\1\0\1\25\2\0"+ + "\1\25\1\126\1\340\11\36\1\340\6\36\1\340\10\36"+ + "\1\127\12\130\1\65\1\124\1\131\1\124\1\0\1\124"+ + "\1\132\1\125\3\124\3\0\1\124\4\0\2\124\2\0"+ + "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ + "\1\53\1\0\1\54\3\0\1\55\5\0\1\56\3\0"+ + "\1\57\11\0\1\60\2\0\1\61\16\0\1\62\2\0"+ + "\1\63\41\0\2\25\1\64\1\0\1\65\1\0\1\65"+ + "\1\66\1\0\1\25\2\0\1\25\1\126\1\340\1\36"+ + "\6\340\1\361\1\36\2\340\2\36\2\340\1\36\1\340"+ + "\1\36\6\340\1\36\1\127\12\130\1\65\1\124\1\131"+ + "\1\124\1\0\1\124\1\132\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\54\3\0\1\55"+ + "\5\0\1\56\3\0\1\57\11\0\1\60\2\0\1\61"+ + "\16\0\1\62\2\0\1\63\41\0\2\25\1\64\1\0"+ + "\1\65\1\0\1\65\1\66\1\0\1\25\2\0\1\25"+ + "\1\126\4\36\1\340\5\36\2\340\3\36\2\340\10\36"+ + "\1\340\1\127\12\130\1\65\1\124\1\131\1\124\1\0"+ + "\1\124\1\132\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\54\3\0\1\55\5\0\1\56"+ + "\3\0\1\57\11\0\1\60\2\0\1\61\16\0\1\62"+ + "\2\0\1\63\41\0\2\25\1\64\1\0\1\65\1\0"+ + "\1\65\1\66\1\0\1\25\2\0\1\25\1\126\3\36"+ + "\1\340\1\36\1\362\4\36\1\340\2\36\1\340\14\36"+ + "\1\127\12\130\1\65\1\124\1\131\1\124\1\0\1\124"+ + "\1\132\1\125\3\124\3\0\1\124\4\0\2\124\2\0"+ + "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ + "\1\53\1\0\1\54\3\0\1\55\5\0\1\56\3\0"+ + "\1\57\11\0\1\60\2\0\1\61\16\0\1\62\2\0"+ + "\1\63\41\0\2\25\1\64\1\0\1\65\1\0\1\65"+ + "\1\66\1\0\1\25\2\0\1\25\1\126\2\340\1\36"+ + "\1\340\3\36\2\340\2\36\1\340\4\36\1\340\11\36"+ + "\1\127\12\130\1\65\1\124\1\131\1\124\1\0\1\124"+ + "\1\132\1\125\3\124\3\0\1\124\4\0\2\124\2\0"+ + "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ + "\1\53\1\0\1\54\3\0\1\55\5\0\1\56\3\0"+ + "\1\57\11\0\1\60\2\0\1\61\16\0\1\62\2\0"+ + "\1\63\41\0\2\25\1\64\1\0\1\65\1\0\1\65"+ + "\1\66\1\0\1\25\2\0\1\25\1\126\3\36\1\340"+ + "\13\36\1\340\12\36\1\127\12\130\1\65\1\124\1\131"+ + "\1\124\1\0\1\124\1\132\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\54\3\0\1\55"+ + "\5\0\1\56\3\0\1\57\11\0\1\60\2\0\1\61"+ + "\16\0\1\62\2\0\1\63\41\0\2\25\1\64\1\0"+ + "\1\65\1\0\1\65\1\66\1\0\1\25\2\0\1\25"+ + "\1\126\3\36\2\340\2\36\2\340\1\36\2\340\1\36"+ + "\1\340\3\36\1\340\1\36\1\340\1\36\1\340\2\36"+ + "\1\340\1\36\1\127\12\130\1\65\1\124\1\131\1\124"+ + "\1\0\1\124\1\132\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\32\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\14\0\1\325\3\0\1\326\5\0\1\327\3\0\1\330"+ + "\14\0\1\331\16\0\1\332\2\0\1\333\42\0\1\160"+ + "\1\64\6\0\1\160\3\0\1\123\1\261\1\262\1\263"+ "\1\264\1\265\1\266\1\267\1\270\1\271\1\272\1\273"+ - "\1\274\1\275\1\114\1\u013e\2\u013f\1\u013e\4\u013f\1\u0140"+ - "\1\u013f\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\1\140\3\0\2\114\2\0\1\46\1\0\1\47"+ - "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\66"+ - "\3\0\1\67\5\0\1\70\3\0\1\71\11\0\1\57"+ - "\2\0\1\72\16\0\1\73\2\0\1\74\41\0\1\25"+ - "\2\26\2\0\2\75\1\76\1\0\1\26\2\0\1\376"+ - "\32\35\1\117\12\301\1\0\1\114\1\127\1\114\1\0"+ - "\2\130\1\115\3\114\2\0\1\75\1\114\4\0\2\114"+ - "\2\0\1\46\1\0\1\47\1\0\1\50\1\0\1\51"+ - "\1\0\1\52\1\0\1\66\3\0\1\67\5\0\1\70"+ - "\3\0\1\71\11\0\1\57\2\0\1\72\16\0\1\73"+ - "\2\0\1\74\41\0\1\25\2\26\2\0\2\75\1\76"+ - "\1\0\1\26\2\0\1\376\32\35\1\117\2\377\1\301"+ - "\2\377\2\301\2\377\1\301\1\0\1\114\1\127\1\114"+ - "\1\0\2\130\1\115\3\114\2\0\1\75\1\114\4\0"+ - "\2\114\14\0\1\165\3\0\1\166\5\0\1\167\3\0"+ - "\1\170\14\0\1\171\16\0\1\172\2\0\1\173\42\0"+ - "\1\75\1\26\6\0\1\75\2\0\1\113\1\244\1\245"+ - "\1\246\1\247\1\250\1\251\1\252\1\253\1\254\1\255"+ - "\1\256\1\257\1\260\1\261\1\262\1\263\1\264\1\265"+ + "\1\274\1\275\1\276\1\277\1\300\1\301\1\302\1\303"+ + "\1\304\1\305\1\306\1\307\1\310\1\311\1\312\1\124"+ + "\12\130\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\1\150\3\0\2\124\14\0\1\325\3\0\1\326"+ + "\5\0\1\327\3\0\1\330\14\0\1\331\16\0\1\332"+ + "\2\0\1\333\42\0\1\160\1\64\6\0\1\160\3\0"+ + "\1\123\33\124\12\255\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\2\0\1\47\1\0"+ + "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\1\0"+ + "\1\153\3\0\1\55\5\0\1\56\3\0\1\154\11\0"+ + "\1\60\2\0\1\155\16\0\1\156\2\0\1\157\41\0"+ + "\1\25\2\64\2\0\2\160\1\66\1\0\1\64\2\0"+ + "\1\25\1\364\32\143\1\124\12\255\1\0\1\124\1\131"+ + "\1\124\1\0\2\254\1\125\3\124\2\0\1\160\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\54\3\0\1\55"+ + "\5\0\1\56\3\0\1\57\11\0\1\60\2\0\1\61"+ + "\16\0\1\62\2\0\1\63\41\0\2\25\1\64\1\0"+ + "\1\65\1\0\1\65\1\66\1\0\1\25\2\0\1\25"+ + "\1\126\3\36\1\365\26\36\1\127\12\130\1\65\1\124"+ + "\1\131\1\124\1\0\1\124\1\132\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\2\0\1\47\1\0\1\50\1\0"+ + "\1\51\1\0\1\52\1\0\1\53\1\0\1\54\3\0"+ + "\1\55\5\0\1\56\3\0\1\57\11\0\1\60\2\0"+ + "\1\61\16\0\1\62\2\0\1\63\41\0\2\25\1\64"+ + "\1\0\1\65\1\0\1\65\1\66\1\0\1\25\2\0"+ + "\1\25\1\126\32\36\1\127\12\130\1\366\1\124\1\131"+ + "\1\124\1\0\1\124\1\132\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\54\3\0\1\55"+ + "\5\0\1\56\3\0\1\57\11\0\1\60\2\0\1\61"+ + "\16\0\1\62\2\0\1\63\41\0\2\25\1\64\1\0"+ + "\1\65\1\0\1\65\1\66\1\0\1\25\2\0\1\25"+ + "\1\126\15\36\1\367\14\36\1\127\12\130\1\65\1\124"+ + "\1\131\1\124\1\0\1\124\1\132\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\1\252\2\370\1\371"+ + "\1\372\10\370\1\252\1\373\5\370\6\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\1\374\2\370\1\252\1\370"+ + "\1\375\6\370\4\252\1\370\1\252\2\370\1\252\1\370"+ + "\1\252\3\370\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\3\252\1\370\1\252\1\370\4\252\1\370\10\252\1\370"+ + "\2\252\1\370\2\252\1\370\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\1\252\1\370\1\376\2\370\2\252\1\370"+ + "\6\252\3\370\11\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\3\252\1\370\1\252\1\370\10\252\1\370\1\252"+ + "\2\370\10\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\4\252\1\377\5\252\1\370\17\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\4\252\2\370\2\252\1\370\1\252"+ + "\1\370\13\252\1\370\2\252\1\370\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\1\370\1\252\3\370\1\u0100\14\370"+ + "\2\252\2\370\2\252\1\370\1\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\2\252\4\370\3\252\2\370\1\u0101"+ + "\1\370\1\252\2\370\12\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\2\370\2\252\1\370\3\252\1\370\5\252"+ + "\3\370\3\252\1\370\2\252\3\370\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\5\370\1\u0102\1\252\1\370\1\u0103"+ + "\7\370\1\u0104\3\370\1\252\1\370\1\252\3\370\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\1\u0105\1\370\1\252"+ + "\1\374\6\370\3\252\1\370\2\252\1\370\2\252\1\370"+ + "\6\252\1\127\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\1\370"+ + "\31\252\1\127\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\1\370"+ + "\2\252\1\370\1\u0106\1\252\2\370\1\252\3\370\2\252"+ + "\2\370\1\252\1\370\3\252\1\370\2\252\2\370\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\6\370\1\252\5\370"+ + "\3\252\2\370\2\252\7\370\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\1\252\2\370\1\u0103\1\u0107\3\370\1\252"+ + "\3\370\1\252\1\370\1\252\1\370\1\252\1\370\1\252"+ + "\1\370\1\252\3\370\1\252\1\370\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\1\370\6\252\1\370\6\252\1\370"+ + "\4\252\1\370\4\252\2\370\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\6\252\1\370\7\252\1\370\13\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\13\252\1\u0108\16\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\1\370\11\252"+ + "\1\370\6\252\1\370\10\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\1\370\1\252\6\370\1\u0109\1\252\2\370"+ + "\2\252\2\370\1\252\1\370\1\252\6\370\1\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\4\252\1\370\5\252"+ + "\2\370\3\252\2\370\10\252\1\370\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\3\252\1\370\1\252\1\u010a\4\252"+ + "\1\370\2\252\1\370\14\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\2\370\1\252\1\370\3\252\2\370\2\252"+ + "\1\370\4\252\1\370\11\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\3\252\1\370\13\252\1\370\12\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\3\252\2\370\2\252"+ + "\2\370\1\252\2\370\1\252\1\370\3\252\1\370\1\252"+ + "\1\370\1\252\1\370\2\252\1\370\1\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\67\3\0\1\70"+ + "\5\0\1\71\3\0\1\72\11\0\1\60\2\0\1\73"+ + "\16\0\1\74\2\0\1\75\41\0\1\25\2\26\2\0"+ + "\2\76\1\77\1\0\1\26\2\0\1\25\1\u010b\32\36"+ + "\1\127\12\314\1\0\1\124\1\137\1\124\1\0\2\140"+ + "\1\125\3\124\2\0\1\76\1\124\4\0\2\124\2\0"+ + "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ + "\1\53\1\0\1\67\3\0\1\70\5\0\1\71\3\0"+ + "\1\72\11\0\1\60\2\0\1\73\16\0\1\74\2\0"+ + "\1\75\41\0\1\25\2\26\2\0\2\76\1\77\1\0"+ + "\1\26\2\0\1\25\1\u010b\32\36\1\127\12\u010c\1\0"+ + "\1\124\1\137\1\124\1\0\2\140\1\125\3\124\2\0"+ + "\1\76\1\124\4\0\2\124\2\0\1\47\1\0\1\50"+ + "\1\0\1\51\1\0\1\52\1\0\1\53\1\0\1\67"+ + "\3\0\1\70\5\0\1\71\3\0\1\72\11\0\1\60"+ + "\2\0\1\73\16\0\1\74\2\0\1\75\41\0\1\25"+ + "\2\26\2\0\2\76\1\77\1\0\1\26\2\0\1\25"+ + "\1\u010b\32\36\1\127\1\314\1\u010d\1\u010c\2\314\2\u010c"+ + "\1\314\1\u010c\1\314\1\0\1\124\1\137\1\124\1\0"+ + "\2\140\1\125\3\124\2\0\1\76\1\124\4\0\2\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\67\3\0\1\70\5\0\1\71"+ + "\3\0\1\72\11\0\1\60\2\0\1\73\16\0\1\74"+ + "\2\0\1\75\41\0\1\25\2\26\2\0\2\76\1\77"+ + "\1\0\1\26\2\0\1\25\1\u010e\32\36\1\127\12\316"+ + "\1\0\1\124\1\137\1\124\1\0\2\140\1\125\3\124"+ + "\2\0\1\76\1\124\4\0\2\124\2\0\1\47\1\0"+ + "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\1\0"+ + "\1\161\3\0\1\162\5\0\1\163\3\0\1\164\11\0"+ + "\1\60\2\0\1\165\16\0\1\166\2\0\1\167\41\0"+ + "\1\25\1\65\7\0\1\65\2\0\1\25\1\123\32\143"+ + "\13\124\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\1\150\3\0\2\124\14\0\1\175\3\0\1\176"+ + "\5\0\1\177\3\0\1\200\14\0\1\201\16\0\1\202"+ + "\2\0\1\203\42\0\1\76\1\26\6\0\1\76\3\0"+ + "\1\123\33\124\12\144\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\1\150\3\0\2\124\151\0\4\u010f"+ + "\2\0\1\u010f\15\0\1\u010f\6\0\12\u010f\1\322\175\0"+ + "\4\u0110\2\0\1\u0110\15\0\1\u0110\6\0\12\u0110\1\u0111"+ + "\175\0\4\u0112\2\0\1\u0112\15\0\1\u0112\6\0\1\u0113"+ + "\2\u0114\1\u0113\5\u0114\1\u0115\14\0\1\u0116\160\0\46\124"+ + "\1\0\3\124\1\0\2\124\1\0\3\124\3\0\1\124"+ + "\1\150\3\0\2\124\3\0\1\160\37\0\1\160\1\0"+ + "\2\160\16\0\1\160\4\0\1\160\2\0\2\160\10\0"+ + "\1\64\4\0\1\160\133\0\1\64\102\0\1\64\243\0"+ + "\2\64\230\0\1\160\247\0\2\160\11\0\1\160\211\0"+ + "\2\160\6\0\1\160\152\0\1\160\3\0\1\160\2\0"+ + "\1\160\3\0\1\160\5\0\1\160\7\0\1\160\4\0"+ + "\2\160\3\0\2\160\1\0\1\160\4\0\1\160\1\0"+ + "\1\160\2\0\2\160\1\0\3\160\1\0\1\160\2\0"+ + "\4\160\2\0\1\160\304\0\1\u0117\1\u0118\1\u0119\1\u011a"+ + "\1\u011b\1\u011c\1\u011d\1\u011e\1\u011f\1\u0120\1\u0121\1\u0122"+ + "\1\u0123\1\u0124\1\u0125\1\u0126\1\u0127\1\u0128\1\u0129\1\u012a"+ + "\1\u012b\1\u012c\1\u012d\1\u012e\1\u012f\1\u0130\1\0\12\216"+ + "\176\0\32\216\1\335\12\216\175\0\74\217\1\0\1\47"+ + "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ + "\1\0\1\54\3\0\1\55\5\0\1\56\3\0\1\57"+ + "\11\0\1\60\2\0\1\61\16\0\1\62\2\0\1\63"+ + "\41\0\2\25\1\64\1\0\1\65\1\0\1\65\1\66"+ + "\1\0\1\25\2\0\1\25\1\u0131\32\36\1\127\12\130"+ + "\1\u0132\1\124\1\131\1\124\1\0\1\124\1\132\1\125"+ + "\1\u0133\1\u0134\1\u0135\3\0\1\124\4\0\2\124\2\0"+ + "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ + "\1\53\1\0\1\54\3\0\1\55\5\0\1\56\3\0"+ + "\1\57\11\0\1\60\2\0\1\61\16\0\1\62\2\0"+ + "\1\63\41\0\2\25\1\64\1\0\1\65\1\0\1\65"+ + "\1\66\1\0\1\25\2\0\1\25\1\u0131\4\36\1\u0136"+ + "\25\36\1\127\12\130\1\u0132\1\124\1\131\1\124\1\0"+ + "\1\124\1\132\1\125\1\u0133\1\u0134\1\u0135\3\0\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\54\3\0\1\55"+ + "\5\0\1\56\3\0\1\57\11\0\1\60\2\0\1\61"+ + "\16\0\1\62\2\0\1\63\41\0\2\25\1\64\1\0"+ + "\1\65\1\0\1\65\1\66\1\0\1\25\2\0\1\25"+ + "\1\u0131\15\36\1\234\14\36\1\127\12\130\1\u0132\1\124"+ + "\1\131\1\124\1\0\1\124\1\132\1\125\1\u0133\1\u0134"+ + "\1\u0135\3\0\1\124\4\0\2\124\2\0\1\47\1\0"+ + "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\1\0"+ + "\1\54\3\0\1\55\5\0\1\56\3\0\1\57\11\0"+ + "\1\60\2\0\1\61\16\0\1\62\2\0\1\63\41\0"+ + "\2\25\1\64\1\0\1\65\1\0\1\65\1\66\1\0"+ + "\1\25\2\0\1\25\1\u0131\10\36\1\234\21\36\1\127"+ + "\12\130\1\u0132\1\124\1\131\1\124\1\0\1\124\1\132"+ + "\1\125\1\u0133\1\u0134\1\u0135\3\0\1\124\4\0\2\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\54\3\0\1\55\5\0\1\56"+ + "\3\0\1\57\11\0\1\60\2\0\1\61\16\0\1\62"+ + "\2\0\1\63\41\0\2\25\1\64\1\0\1\65\1\0"+ + "\1\65\1\66\1\0\1\25\2\0\1\25\1\u0131\17\36"+ + "\1\340\12\36\1\127\12\130\1\u0132\1\124\1\131\1\124"+ + "\1\0\1\124\1\132\1\125\1\u0133\1\u0134\1\u0135\3\0"+ + "\1\124\4\0\2\124\2\0\1\47\1\0\1\50\1\0"+ + "\1\51\1\0\1\52\1\0\1\53\1\0\1\54\3\0"+ + "\1\55\5\0\1\56\3\0\1\57\11\0\1\60\2\0"+ + "\1\61\16\0\1\62\2\0\1\63\41\0\2\25\1\64"+ + "\1\0\1\65\1\0\1\65\1\66\1\0\1\25\2\0"+ + "\1\25\1\u0131\5\36\1\u0137\4\36\1\340\17\36\1\127"+ + "\12\130\1\u0132\1\124\1\131\1\124\1\0\1\124\1\132"+ + "\1\125\1\u0133\1\u0134\1\u0135\3\0\1\124\4\0\2\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\54\3\0\1\55\5\0\1\56"+ + "\3\0\1\57\11\0\1\60\2\0\1\61\16\0\1\62"+ + "\2\0\1\63\41\0\2\25\1\64\1\0\1\65\1\0"+ + "\1\65\1\66\1\0\1\25\2\0\1\25\1\126\20\36"+ + "\1\340\11\36\1\127\12\130\1\65\1\124\1\131\1\124"+ + "\1\0\1\124\1\132\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\2\0\1\47\1\0\1\50\1\0\1\51\1\0"+ + "\1\52\1\0\1\53\1\0\1\54\3\0\1\55\5\0"+ + "\1\56\3\0\1\57\11\0\1\60\2\0\1\61\16\0"+ + "\1\62\2\0\1\63\41\0\2\25\1\64\1\0\1\65"+ + "\1\0\1\65\1\66\1\0\1\25\2\0\1\25\1\126"+ + "\7\36\1\340\22\36\1\127\12\130\1\65\1\124\1\131"+ + "\1\124\1\0\1\124\1\132\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\54\3\0\1\55"+ + "\5\0\1\56\3\0\1\57\11\0\1\60\2\0\1\61"+ + "\16\0\1\62\2\0\1\63\41\0\2\25\1\64\1\0"+ + "\1\65\1\0\1\65\1\66\1\0\1\25\2\0\1\25"+ + "\1\126\27\36\1\340\2\36\1\127\12\130\1\65\1\124"+ + "\1\131\1\124\1\0\1\124\1\132\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\2\0\1\47\1\0\1\50\1\0"+ + "\1\51\1\0\1\52\1\0\1\53\1\0\1\54\3\0"+ + "\1\55\5\0\1\56\3\0\1\57\11\0\1\60\2\0"+ + "\1\61\16\0\1\62\2\0\1\63\41\0\2\25\1\64"+ + "\1\0\1\65\1\0\1\65\1\66\1\0\1\25\2\0"+ + "\1\25\1\u0131\6\36\1\u0136\10\36\1\340\12\36\1\127"+ + "\12\130\1\u0132\1\124\1\131\1\124\1\0\1\124\1\132"+ + "\1\125\1\u0133\1\u0134\1\u0135\3\0\1\124\4\0\2\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\54\3\0\1\55\5\0\1\56"+ + "\3\0\1\57\11\0\1\60\2\0\1\61\16\0\1\62"+ + "\2\0\1\63\41\0\2\25\1\64\1\0\1\65\1\0"+ + "\1\65\1\66\1\0\1\25\2\0\1\25\1\u0131\24\36"+ + "\1\u0138\5\36\1\127\12\130\1\u0132\1\124\1\131\1\124"+ + "\1\0\1\124\1\132\1\125\1\u0133\1\u0134\1\u0135\3\0"+ + "\1\124\4\0\2\124\2\0\1\47\1\0\1\50\1\0"+ + "\1\51\1\0\1\52\1\0\1\53\1\0\1\54\3\0"+ + "\1\55\5\0\1\56\3\0\1\57\11\0\1\60\2\0"+ + "\1\61\16\0\1\62\2\0\1\63\41\0\2\25\1\64"+ + "\1\0\1\65\1\0\1\65\1\66\1\0\1\25\2\0"+ + "\1\25\1\126\11\36\1\340\20\36\1\127\12\130\1\65"+ + "\1\124\1\131\1\124\1\0\1\124\1\132\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\2\0\1\47\1\0\1\50"+ + "\1\0\1\51\1\0\1\52\1\0\1\53\1\0\1\54"+ + "\3\0\1\55\5\0\1\56\3\0\1\57\11\0\1\60"+ + "\2\0\1\61\16\0\1\62\2\0\1\63\41\0\2\25"+ + "\1\64\1\0\1\65\1\0\1\65\1\66\1\0\1\25"+ + "\2\0\1\25\1\u0131\16\36\1\u0139\13\36\1\127\12\130"+ + "\1\u0132\1\124\1\131\1\124\1\0\1\124\1\132\1\125"+ + "\1\u0133\1\u0134\1\u0135\3\0\1\124\4\0\2\124\2\0"+ + "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ + "\1\53\1\0\1\54\3\0\1\55\5\0\1\56\3\0"+ + "\1\57\11\0\1\60\2\0\1\61\16\0\1\62\2\0"+ + "\1\63\41\0\2\25\1\64\1\0\1\65\1\0\1\65"+ + "\1\66\1\0\1\25\2\0\1\25\1\u0131\12\36\1\u013a"+ + "\17\36\1\127\12\130\1\u0132\1\124\1\131\1\124\1\0"+ + "\1\124\1\132\1\125\1\u0133\1\u0134\1\u0135\3\0\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\54\3\0\1\55"+ + "\5\0\1\56\3\0\1\57\11\0\1\60\2\0\1\61"+ + "\16\0\1\62\2\0\1\63\41\0\2\25\1\64\1\0"+ + "\1\65\1\0\1\65\1\66\1\0\1\25\2\0\1\25"+ + "\1\u0131\5\36\1\340\24\36\1\127\12\130\1\u0132\1\124"+ + "\1\131\1\124\1\0\1\124\1\132\1\125\1\u0133\1\u0134"+ + "\1\u0135\3\0\1\124\4\0\2\124\2\0\1\47\1\0"+ + "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\1\0"+ + "\1\54\3\0\1\55\5\0\1\56\3\0\1\57\11\0"+ + "\1\60\2\0\1\61\16\0\1\62\2\0\1\63\41\0"+ + "\2\25\1\64\1\0\1\65\1\0\1\65\1\66\1\0"+ + "\1\25\2\0\1\25\1\u0131\1\u013b\31\36\1\127\12\130"+ + "\1\u0132\1\124\1\131\1\124\1\0\1\124\1\132\1\125"+ + "\1\u0133\1\u0134\1\u0135\3\0\1\124\4\0\2\124\2\0"+ + "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ + "\1\53\1\0\1\54\3\0\1\55\5\0\1\56\3\0"+ + "\1\57\11\0\1\60\2\0\1\61\16\0\1\62\2\0"+ + "\1\63\41\0\2\25\1\64\1\0\1\65\1\0\1\65"+ + "\1\66\1\0\1\25\2\0\1\25\1\126\32\36\1\u013c"+ + "\12\130\1\65\1\124\1\131\1\124\1\0\1\124\1\132"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\2\0\1\47"+ + "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ + "\1\0\1\54\3\0\1\55\5\0\1\56\3\0\1\57"+ + "\11\0\1\60\2\0\1\61\16\0\1\62\2\0\1\63"+ + "\41\0\2\25\1\64\1\0\1\65\1\0\1\65\1\66"+ + "\1\0\1\25\2\0\1\25\1\u0131\23\36\1\340\6\36"+ + "\1\127\12\130\1\u0132\1\124\1\131\1\124\1\0\1\124"+ + "\1\132\1\125\1\u0133\1\u0134\1\u0135\3\0\1\124\4\0"+ + "\2\124\2\0\1\47\1\0\1\50\1\0\1\51\1\0"+ + "\1\52\1\0\1\53\1\0\1\54\3\0\1\55\5\0"+ + "\1\56\3\0\1\57\11\0\1\60\2\0\1\61\16\0"+ + "\1\62\2\0\1\63\41\0\2\25\1\64\1\0\1\65"+ + "\1\0\1\65\1\66\1\0\1\25\2\0\1\25\1\u0131"+ + "\24\36\1\u013d\5\36\1\127\12\130\1\u0132\1\124\1\131"+ + "\1\124\1\0\1\124\1\132\1\125\1\u0133\1\u0134\1\u0135"+ + "\3\0\1\124\4\0\2\124\150\0\1\123\1\261\1\262"+ + "\1\263\1\264\1\265\1\266\1\267\1\270\1\271\1\272"+ + "\1\273\1\274\1\275\1\276\1\277\1\300\1\301\1\302"+ + "\1\303\1\304\1\305\1\306\1\307\1\310\1\311\1\312"+ + "\1\124\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\1\150\3\0\2\124\14\0\1\325\3\0"+ + "\1\326\5\0\1\327\3\0\1\330\14\0\1\331\16\0"+ + "\1\332\2\0\1\333\42\0\1\160\1\64\6\0\1\160"+ + "\3\0\1\123\33\124\12\255\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\1\150\3\0\2\124\2\0"+ + "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ + "\1\53\1\0\1\54\3\0\1\55\5\0\1\56\3\0"+ + "\1\57\11\0\1\60\2\0\1\61\16\0\1\62\2\0"+ + "\1\63\41\0\2\25\1\64\1\0\1\65\1\0\1\65"+ + "\1\66\1\0\1\25\2\0\1\25\1\126\32\36\1\127"+ + "\12\130\1\u013e\1\124\1\131\1\124\1\0\1\124\1\132"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\2\0\1\47"+ + "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ + "\1\0\1\161\3\0\1\162\5\0\1\163\3\0\1\164"+ + "\11\0\1\60\2\0\1\165\16\0\1\166\2\0\1\167"+ + "\41\0\1\25\1\65\7\0\1\65\2\0\1\25\1\0"+ + "\32\25\24\0\1\u013f\15\0\1\47\1\0\1\50\1\0"+ + "\1\51\1\0\1\52\1\0\1\53\1\0\1\54\3\0"+ + "\1\55\5\0\1\56\3\0\1\57\11\0\1\60\2\0"+ + "\1\61\16\0\1\62\2\0\1\63\41\0\2\25\1\64"+ + "\1\0\1\65\1\0\1\65\1\66\1\0\1\25\2\0"+ + "\1\25\1\126\16\36\1\u0140\13\36\1\127\12\130\1\u0141"+ + "\1\124\1\131\1\124\1\0\1\124\1\132\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\u0142\32\252\1\127"+ + "\12\252\1\u0143\3\124\1\0\2\124\1\125\1\u0133\1\u0134"+ + "\1\u0135\3\0\1\124\4\0\2\124\150\0\1\u0142\4\252"+ + "\1\u0144\25\252\1\127\12\252\1\u0143\3\124\1\0\2\124"+ + "\1\125\1\u0133\1\u0134\1\u0135\3\0\1\124\4\0\2\124"+ + "\150\0\1\u0142\15\252\1\275\14\252\1\127\12\252\1\u0143"+ + "\3\124\1\0\2\124\1\125\1\u0133\1\u0134\1\u0135\3\0"+ + "\1\124\4\0\2\124\150\0\1\u0142\10\252\1\275\21\252"+ + "\1\127\12\252\1\u0143\3\124\1\0\2\124\1\125\1\u0133"+ + "\1\u0134\1\u0135\3\0\1\124\4\0\2\124\150\0\1\u0142"+ + "\17\252\1\370\12\252\1\127\12\252\1\u0143\3\124\1\0"+ + "\2\124\1\125\1\u0133\1\u0134\1\u0135\3\0\1\124\4\0"+ + "\2\124\150\0\1\u0142\5\252\1\u0145\4\252\1\370\17\252"+ + "\1\127\12\252\1\u0143\3\124\1\0\2\124\1\125\1\u0133"+ + "\1\u0134\1\u0135\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\20\252\1\370\11\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\7\252\1\370\22\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\27\252\1\370\2\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\u0142\6\252\1\u0144\10\252\1\370\12\252"+ + "\1\127\12\252\1\u0143\3\124\1\0\2\124\1\125\1\u0133"+ + "\1\u0134\1\u0135\3\0\1\124\4\0\2\124\150\0\1\u0142"+ + "\24\252\1\u0146\5\252\1\127\12\252\1\u0143\3\124\1\0"+ + "\2\124\1\125\1\u0133\1\u0134\1\u0135\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\11\252\1\370\20\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\u0142\16\252\1\u0147\13\252\1\127"+ + "\12\252\1\u0143\3\124\1\0\2\124\1\125\1\u0133\1\u0134"+ + "\1\u0135\3\0\1\124\4\0\2\124\150\0\1\u0142\12\252"+ + "\1\u0148\17\252\1\127\12\252\1\u0143\3\124\1\0\2\124"+ + "\1\125\1\u0133\1\u0134\1\u0135\3\0\1\124\4\0\2\124"+ + "\150\0\1\u0142\5\252\1\370\24\252\1\127\12\252\1\u0143"+ + "\3\124\1\0\2\124\1\125\1\u0133\1\u0134\1\u0135\3\0"+ + "\1\124\4\0\2\124\150\0\1\u0142\1\u0149\31\252\1\127"+ + "\12\252\1\u0143\3\124\1\0\2\124\1\125\1\u0133\1\u0134"+ + "\1\u0135\3\0\1\124\4\0\2\124\150\0\1\363\32\252"+ + "\1\u013c\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\u0142\23\252\1\370"+ + "\6\252\1\127\12\252\1\u0143\3\124\1\0\2\124\1\125"+ + "\1\u0133\1\u0134\1\u0135\3\0\1\124\4\0\2\124\150\0"+ + "\1\u0142\24\252\1\u014a\5\252\1\127\12\252\1\u0143\3\124"+ + "\1\0\2\124\1\125\1\u0133\1\u0134\1\u0135\3\0\1\124"+ + "\4\0\2\124\14\0\1\175\3\0\1\176\5\0\1\177"+ + "\3\0\1\200\14\0\1\201\16\0\1\202\2\0\1\203"+ + "\42\0\1\76\1\26\6\0\1\76\3\0\1\123\1\261"+ + "\1\262\1\263\1\264\1\265\1\266\1\267\1\270\1\271"+ + "\1\272\1\273\1\274\1\275\1\276\1\277\1\300\1\301"+ + "\1\302\1\303\1\304\1\305\1\306\1\307\1\310\1\311"+ + "\1\312\1\124\1\u014b\2\u014c\1\u014b\5\u014c\1\u014d\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\1\150"+ + "\3\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\67\3\0\1\70"+ + "\5\0\1\71\3\0\1\72\11\0\1\60\2\0\1\73"+ + "\16\0\1\74\2\0\1\75\41\0\1\25\2\26\2\0"+ + "\2\76\1\77\1\0\1\26\2\0\1\25\1\u010b\32\36"+ + "\1\127\12\316\1\0\1\124\1\137\1\124\1\0\2\140"+ + "\1\125\3\124\2\0\1\76\1\124\4\0\2\124\2\0"+ + "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ + "\1\53\1\0\1\67\3\0\1\70\5\0\1\71\3\0"+ + "\1\72\11\0\1\60\2\0\1\73\16\0\1\74\2\0"+ + "\1\75\41\0\1\25\2\26\2\0\2\76\1\77\1\0"+ + "\1\26\2\0\1\25\1\u010b\32\36\1\127\2\u010c\1\316"+ + "\2\u010c\2\316\1\u010c\1\316\1\u010c\1\0\1\124\1\137"+ + "\1\124\1\0\2\140\1\125\3\124\2\0\1\76\1\124"+ + "\4\0\2\124\14\0\1\175\3\0\1\176\5\0\1\177"+ + "\3\0\1\200\14\0\1\201\16\0\1\202\2\0\1\203"+ + "\42\0\1\76\1\26\6\0\1\76\3\0\1\123\1\261"+ + "\1\262\1\263\1\264\1\265\1\266\1\267\1\270\1\271"+ + "\1\272\1\273\1\274\1\275\1\276\1\277\1\300\1\301"+ + "\1\302\1\303\1\304\1\305\1\306\1\307\1\310\1\311"+ + "\1\312\1\124\12\316\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\1\150\3\0\2\124\151\0\4\u014e"+ + "\2\0\1\u014e\15\0\1\u014e\6\0\12\u014e\1\322\175\0"+ + "\4\u014f\2\0\1\u014f\15\0\1\u014f\6\0\12\u014f\1\u0150"+ + "\175\0\4\u0151\2\0\1\u0151\15\0\1\u0151\6\0\1\u0152"+ + "\2\u0153\1\u0152\5\u0153\1\u0154\14\0\1\u0116\161\0\4\u0155"+ + "\2\0\1\u0155\15\0\1\u0155\6\0\12\u0155\1\u0156\13\0"+ + "\1\u0116\160\0\1\u0157\4\u0155\2\0\1\u0155\15\0\1\u0155"+ + "\6\0\12\u0158\1\u0156\13\0\1\u0116\160\0\1\u0157\4\u0155"+ + "\2\0\1\u0155\15\0\1\u0155\6\0\12\u0159\1\u0156\13\0"+ + "\1\u0116\160\0\1\u0157\4\u0155\2\0\1\u0155\15\0\1\u0155"+ + "\6\0\1\u0158\1\u015a\1\u0159\2\u0158\2\u0159\1\u0158\1\u0159"+ + "\1\u0158\1\u0156\13\0\1\u0116\226\0\1\u0143\7\0\1\u015b"+ + "\1\u015c\1\u015d\162\0\1\334\1\216\2\u015e\1\u015f\1\u0160"+ + "\10\u015e\1\216\1\u0161\5\u015e\6\216\1\335\12\216\175\0"+ + "\1\334\1\u0162\2\u015e\1\216\1\u015e\1\u0163\6\u015e\4\216"+ + "\1\u015e\1\216\2\u015e\1\216\1\u015e\1\216\3\u015e\1\335"+ + "\12\216\175\0\1\334\3\216\1\u015e\1\216\1\u015e\4\216"+ + "\1\u015e\10\216\1\u015e\2\216\1\u015e\2\216\1\u015e\1\335"+ + "\12\216\175\0\1\334\1\216\1\u015e\1\u0164\2\u015e\2\216"+ + "\1\u015e\6\216\3\u015e\11\216\1\335\12\216\175\0\1\334"+ + "\3\216\1\u015e\1\216\1\u015e\10\216\1\u015e\1\216\2\u015e"+ + "\10\216\1\335\12\216\175\0\1\334\4\216\1\u0165\5\216"+ + "\1\u015e\17\216\1\335\12\216\175\0\1\334\4\216\2\u015e"+ + "\2\216\1\u015e\1\216\1\u015e\13\216\1\u015e\2\216\1\u015e"+ + "\1\335\12\216\175\0\1\334\1\u015e\1\216\3\u015e\1\u0166"+ + "\14\u015e\2\216\2\u015e\2\216\1\u015e\1\216\1\335\12\216"+ + "\175\0\1\334\2\216\4\u015e\3\216\2\u015e\1\u0167\1\u015e"+ + "\1\216\2\u015e\12\216\1\335\12\216\175\0\1\334\2\u015e"+ + "\2\216\1\u015e\3\216\1\u015e\5\216\3\u015e\3\216\1\u015e"+ + "\2\216\3\u015e\1\335\12\216\175\0\1\334\5\u015e\1\u0168"+ + "\1\216\1\u015e\1\u0169\7\u015e\1\u016a\3\u015e\1\216\1\u015e"+ + "\1\216\3\u015e\1\335\12\216\175\0\1\334\1\u016b\1\u015e"+ + "\1\216\1\u0162\6\u015e\3\216\1\u015e\2\216\1\u015e\2\216"+ + "\1\u015e\6\216\1\335\12\216\175\0\1\334\1\u015e\31\216"+ + "\1\335\12\216\175\0\1\334\1\u015e\2\216\1\u015e\1\u016c"+ + "\1\216\2\u015e\1\216\3\u015e\2\216\2\u015e\1\216\1\u015e"+ + "\3\216\1\u015e\2\216\2\u015e\1\335\12\216\175\0\1\334"+ + "\6\u015e\1\216\5\u015e\3\216\2\u015e\2\216\7\u015e\1\335"+ + "\12\216\175\0\1\334\1\216\2\u015e\1\u0169\1\u016d\3\u015e"+ + "\1\216\3\u015e\1\216\1\u015e\1\216\1\u015e\1\216\1\u015e"+ + "\1\216\1\u015e\1\216\3\u015e\1\216\1\u015e\1\335\12\216"+ + "\175\0\1\334\1\u015e\6\216\1\u015e\6\216\1\u015e\4\216"+ + "\1\u015e\4\216\2\u015e\1\335\12\216\175\0\1\334\6\216"+ + "\1\u015e\7\216\1\u015e\13\216\1\335\12\216\175\0\1\334"+ + "\13\216\1\u016e\16\216\1\335\12\216\175\0\1\334\1\u015e"+ + "\11\216\1\u015e\6\216\1\u015e\10\216\1\335\12\216\175\0"+ + "\1\334\1\u015e\1\216\6\u015e\1\u016f\1\216\2\u015e\2\216"+ + "\2\u015e\1\216\1\u015e\1\216\6\u015e\1\216\1\335\12\216"+ + "\175\0\1\334\4\216\1\u015e\5\216\2\u015e\3\216\2\u015e"+ + "\10\216\1\u015e\1\335\12\216\175\0\1\334\3\216\1\u015e"+ + "\1\216\1\u0170\4\216\1\u015e\2\216\1\u015e\14\216\1\335"+ + "\12\216\175\0\1\334\2\u015e\1\216\1\u015e\3\216\2\u015e"+ + "\2\216\1\u015e\4\216\1\u015e\11\216\1\335\12\216\175\0"+ + "\1\334\3\216\1\u015e\13\216\1\u015e\12\216\1\335\12\216"+ + "\175\0\1\334\3\216\2\u015e\2\216\2\u015e\1\216\2\u015e"+ + "\1\216\1\u015e\3\216\1\u015e\1\216\1\u015e\1\216\1\u015e"+ + "\2\216\1\u015e\1\216\1\335\12\216\27\0\1\47\1\0"+ + "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\1\0"+ + "\1\161\3\0\1\162\5\0\1\163\3\0\1\164\11\0"+ + "\1\60\2\0\1\165\16\0\1\166\2\0\1\167\41\0"+ + "\1\25\1\65\7\0\1\65\2\0\1\25\1\123\1\220"+ + "\1\221\1\222\1\223\1\224\1\225\1\226\1\227\1\230"+ + "\1\231\1\232\1\233\1\234\1\235\1\236\1\237\1\240"+ + "\1\241\1\242\1\243\1\244\1\245\1\246\1\247\1\250"+ + "\1\251\1\124\12\252\1\u0143\3\124\1\0\2\124\1\125"+ + "\1\u0133\1\u0134\1\u0135\3\0\1\124\1\150\3\0\2\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\161\3\0\1\162\5\0\1\163"+ + "\3\0\1\164\11\0\1\60\2\0\1\165\16\0\1\166"+ + "\2\0\1\167\41\0\1\25\1\65\7\0\1\65\2\0"+ + "\1\25\1\0\32\25\1\0\12\u0171\175\0\1\u0172\45\u0133"+ + "\1\u015b\2\u0133\1\u0173\1\u015b\2\u0133\1\u0174\2\u0133\1\u0135"+ + "\2\0\1\u015b\1\u0133\4\0\1\u0133\1\124\150\0\1\u0175"+ + "\45\u0134\1\u015c\2\u0134\1\u0176\1\0\2\124\1\u0177\1\u0133"+ + "\1\u0134\1\u0135\2\0\1\u015c\1\u0134\4\0\2\124\150\0"+ + "\1\u0178\45\u0135\1\u015d\2\u0135\1\u0179\1\u015d\2\u0135\1\u017a"+ + "\2\u0135\1\124\2\0\1\u015d\1\u0135\4\0\1\u0135\1\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\54\3\0\1\55\5\0\1\56"+ + "\3\0\1\57\11\0\1\60\2\0\1\61\16\0\1\62"+ + "\2\0\1\63\41\0\2\25\1\64\1\0\1\65\1\0"+ + "\1\65\1\66\1\0\1\25\2\0\1\25\1\126\5\36"+ + "\1\340\24\36\1\127\12\130\1\65\1\124\1\131\1\124"+ + "\1\0\1\124\1\132\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\2\0\1\47\1\0\1\50\1\0\1\51\1\0"+ + "\1\52\1\0\1\53\1\0\1\54\3\0\1\55\5\0"+ + "\1\56\3\0\1\57\11\0\1\60\2\0\1\61\16\0"+ + "\1\62\2\0\1\63\41\0\2\25\1\64\1\0\1\65"+ + "\1\0\1\65\1\66\1\0\1\25\2\0\1\25\1\126"+ + "\15\36\1\340\14\36\1\127\12\130\1\65\1\124\1\131"+ + "\1\124\1\0\1\124\1\132\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\54\3\0\1\55"+ + "\5\0\1\56\3\0\1\57\11\0\1\60\2\0\1\61"+ + "\16\0\1\62\2\0\1\63\41\0\2\25\1\64\1\0"+ + "\1\65\1\0\1\65\1\66\1\0\1\25\2\0\1\25"+ + "\1\126\10\36\1\340\21\36\1\127\12\130\1\65\1\124"+ + "\1\131\1\124\1\0\1\124\1\132\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\2\0\1\47\1\0\1\50\1\0"+ + "\1\51\1\0\1\52\1\0\1\53\1\0\1\54\3\0"+ + "\1\55\5\0\1\56\3\0\1\57\11\0\1\60\2\0"+ + "\1\61\16\0\1\62\2\0\1\63\41\0\2\25\1\64"+ + "\1\0\1\65\1\0\1\65\1\66\1\0\1\25\2\0"+ + "\1\25\1\126\3\36\1\u017b\26\36\1\127\12\130\1\65"+ + "\1\124\1\131\1\124\1\0\1\124\1\132\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\2\0\1\47\1\0\1\50"+ + "\1\0\1\51\1\0\1\52\1\0\1\53\1\0\1\54"+ + "\3\0\1\55\5\0\1\56\3\0\1\57\11\0\1\60"+ + "\2\0\1\61\16\0\1\62\2\0\1\63\41\0\2\25"+ + "\1\64\1\0\1\65\1\0\1\65\1\66\1\0\1\25"+ + "\2\0\1\25\1\126\3\36\1\340\26\36\1\127\12\130"+ + "\1\65\1\124\1\131\1\124\1\0\1\124\1\132\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\2\0\1\47\1\0"+ + "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\1\0"+ + "\1\54\3\0\1\55\5\0\1\56\3\0\1\57\11\0"+ + "\1\60\2\0\1\61\16\0\1\62\2\0\1\63\41\0"+ + "\2\25\1\64\1\0\1\65\1\0\1\65\1\66\1\0"+ + "\1\25\2\0\1\25\1\126\27\36\1\u017c\2\36\1\127"+ + "\12\130\1\65\1\124\1\131\1\124\1\0\1\124\1\132"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\123"+ + "\32\252\1\u017d\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\2\0\1\47\1\0"+ + "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\1\0"+ + "\1\54\3\0\1\55\5\0\1\56\3\0\1\57\11\0"+ + "\1\60\2\0\1\61\16\0\1\62\2\0\1\63\41\0"+ + "\2\25\1\64\1\0\1\65\1\0\1\65\1\66\1\0"+ + "\1\25\2\0\1\25\1\126\16\36\1\340\13\36\1\127"+ + "\12\130\1\65\1\124\1\131\1\124\1\0\1\124\1\132"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\2\0\1\47"+ + "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ + "\1\0\1\161\3\0\1\162\5\0\1\163\3\0\1\164"+ + "\11\0\1\60\2\0\1\165\16\0\1\166\2\0\1\167"+ + "\41\0\1\25\1\65\7\0\1\65\2\0\1\25\1\0"+ + "\32\25\24\0\1\u017e\242\0\1\u017f\15\0\1\47\1\0"+ + "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\1\0"+ + "\1\54\3\0\1\55\5\0\1\56\3\0\1\57\11\0"+ + "\1\60\2\0\1\61\16\0\1\62\2\0\1\63\41\0"+ + "\2\25\1\64\1\0\1\65\1\0\1\65\1\66\1\0"+ + "\1\25\2\0\1\25\1\126\32\36\1\127\12\130\1\u0141"+ + "\1\124\1\131\1\124\1\0\1\124\1\132\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\2\0\1\47\1\0\1\50"+ + "\1\0\1\51\1\0\1\52\1\0\1\53\1\0\1\161"+ + "\3\0\1\162\5\0\1\163\3\0\1\164\11\0\1\60"+ + "\2\0\1\165\16\0\1\166\2\0\1\167\41\0\1\25"+ + "\1\65\7\0\1\65\2\0\1\25\1\0\32\25\24\0"+ + "\1\u0180\163\0\1\123\1\261\1\262\1\263\1\264\1\265"+ "\1\266\1\267\1\270\1\271\1\272\1\273\1\274\1\275"+ - "\1\114\12\301\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\1\140\3\0\2\114\150\0\4\u0141\2\0"+ - "\1\u0141\15\0\1\u0141\6\0\12\u0141\1\305\174\0\4\u0142"+ - "\2\0\1\u0142\15\0\1\u0142\6\0\12\u0142\1\u0143\174\0"+ - "\4\u0144\2\0\1\u0144\15\0\1\u0144\6\0\1\u0145\2\u0146"+ - "\1\u0145\4\u0146\1\u0147\1\u0146\14\0\1\u0109\160\0\4\u0148"+ - "\2\0\1\u0148\15\0\1\u0148\6\0\12\u0148\1\u0149\13\0"+ - "\1\u0109\157\0\1\u014a\4\u0148\2\0\1\u0148\15\0\1\u0148"+ - "\6\0\12\u014b\1\u0149\13\0\1\u0109\157\0\1\u014a\4\u0148"+ - "\2\0\1\u0148\15\0\1\u0148\6\0\12\u014c\1\u0149\13\0"+ - "\1\u0109\157\0\1\u014a\4\u0148\2\0\1\u0148\15\0\1\u0148"+ - "\6\0\1\u014b\1\u014d\1\u014c\2\u014b\2\u014c\2\u014b\1\u014c"+ - "\1\u0149\13\0\1\u0109\225\0\1\u0136\7\0\1\u014e\1\u014f"+ - "\1\u0150\161\0\1\317\1\201\2\u0151\1\u0152\1\u0153\10\u0151"+ - "\1\201\1\u0154\5\u0151\6\201\1\320\12\201\174\0\1\317"+ - "\1\u0155\2\u0151\1\201\1\u0151\1\u0156\6\u0151\4\201\1\u0151"+ - "\1\201\2\u0151\1\201\1\u0151\1\201\3\u0151\1\320\12\201"+ - "\174\0\1\317\3\201\1\u0151\1\201\1\u0151\4\201\1\u0151"+ - "\10\201\1\u0151\2\201\1\u0151\2\201\1\u0151\1\320\12\201"+ - "\174\0\1\317\1\201\1\u0151\1\u0157\2\u0151\2\201\1\u0151"+ - "\6\201\3\u0151\11\201\1\320\12\201\174\0\1\317\3\201"+ - "\1\u0151\1\201\1\u0151\10\201\1\u0151\1\201\2\u0151\10\201"+ - "\1\320\12\201\174\0\1\317\4\201\1\u0158\5\201\1\u0151"+ - "\17\201\1\320\12\201\174\0\1\317\4\201\2\u0151\2\201"+ - "\1\u0151\1\201\1\u0151\13\201\1\u0151\2\201\1\u0151\1\320"+ - "\12\201\174\0\1\317\1\u0151\1\201\3\u0151\1\u0159\14\u0151"+ - "\2\201\2\u0151\2\201\1\u0151\1\201\1\320\12\201\174\0"+ - "\1\317\2\201\4\u0151\3\201\2\u0151\1\u015a\1\u0151\1\201"+ - "\2\u0151\12\201\1\320\12\201\174\0\1\317\2\u0151\2\201"+ - "\1\u0151\3\201\1\u0151\5\201\3\u0151\3\201\1\u0151\2\201"+ - "\3\u0151\1\320\12\201\174\0\1\317\5\u0151\1\u015b\1\201"+ - "\1\u0151\1\u015c\7\u0151\1\u015d\3\u0151\1\201\1\u0151\1\201"+ - "\3\u0151\1\320\12\201\174\0\1\317\1\u015e\1\u0151\1\201"+ - "\1\u0155\6\u0151\3\201\1\u0151\2\201\1\u0151\2\201\1\u0151"+ - "\6\201\1\320\12\201\174\0\1\317\1\u0151\31\201\1\320"+ - "\12\201\174\0\1\317\1\u0151\2\201\1\u0151\1\u015f\1\201"+ - "\2\u0151\1\201\3\u0151\2\201\2\u0151\1\201\1\u0151\3\201"+ - "\1\u0151\2\201\2\u0151\1\320\12\201\174\0\1\317\6\u0151"+ - "\1\201\5\u0151\3\201\2\u0151\2\201\7\u0151\1\320\12\201"+ - "\174\0\1\317\1\201\2\u0151\1\u015c\1\u0160\3\u0151\1\201"+ - "\3\u0151\1\201\1\u0151\1\201\1\u0151\1\201\1\u0151\1\201"+ - "\1\u0151\1\201\3\u0151\1\201\1\u0151\1\320\12\201\174\0"+ - "\1\317\1\u0151\6\201\1\u0151\6\201\1\u0151\4\201\1\u0151"+ - "\4\201\2\u0151\1\320\12\201\174\0\1\317\6\201\1\u0151"+ - "\7\201\1\u0151\13\201\1\320\12\201\174\0\1\317\13\201"+ - "\1\u0161\16\201\1\320\12\201\174\0\1\317\1\u0151\11\201"+ - "\1\u0151\6\201\1\u0151\10\201\1\320\12\201\174\0\1\317"+ - "\1\u0151\1\201\6\u0151\1\u0162\1\201\2\u0151\2\201\2\u0151"+ - "\1\201\1\u0151\1\201\6\u0151\1\201\1\320\12\201\174\0"+ - "\1\317\4\201\1\u0151\5\201\2\u0151\3\201\2\u0151\10\201"+ - "\1\u0151\1\320\12\201\174\0\1\317\3\201\1\u0151\1\201"+ - "\1\u0163\4\201\1\u0151\2\201\1\u0151\14\201\1\320\12\201"+ - "\174\0\1\317\2\u0151\1\201\1\u0151\3\201\2\u0151\2\201"+ - "\1\u0151\4\201\1\u0151\11\201\1\320\12\201\174\0\1\317"+ - "\3\201\1\u0151\13\201\1\u0151\12\201\1\320\12\201\174\0"+ - "\1\317\3\201\2\u0151\2\201\2\u0151\1\201\2\u0151\1\201"+ - "\1\u0151\3\201\1\u0151\1\201\1\u0151\1\201\1\u0151\2\201"+ - "\1\u0151\1\201\1\320\12\201\27\0\1\46\1\0\1\47"+ - "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\151"+ - "\3\0\1\152\5\0\1\153\3\0\1\154\11\0\1\57"+ - "\2\0\1\155\16\0\1\156\2\0\1\157\41\0\1\25"+ - "\1\64\7\0\1\64\2\0\1\113\1\203\1\204\1\205"+ - "\1\206\1\207\1\210\1\211\1\212\1\213\1\214\1\215"+ - "\1\216\1\217\1\220\1\221\1\222\1\223\1\224\1\225"+ - "\1\226\1\227\1\230\1\231\1\232\1\233\1\234\1\114"+ - "\12\235\1\u0136\3\114\1\0\2\114\1\115\1\u0126\1\u0127"+ - "\1\u0128\3\0\1\114\1\140\3\0\2\114\2\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\151\3\0\1\152\5\0\1\153\3\0\1\154"+ - "\11\0\1\57\2\0\1\155\16\0\1\156\2\0\1\157"+ - "\41\0\1\25\1\64\7\0\1\64\3\0\32\25\1\0"+ - "\12\u0164\174\0\1\u0165\45\u0126\1\u014e\2\u0126\1\u0166\1\u014e"+ - "\2\u0126\1\u0167\2\u0126\1\u0128\2\0\1\u014e\1\u0126\4\0"+ - "\1\u0126\1\114\147\0\1\u0168\45\u0127\1\u014f\2\u0127\1\u0169"+ - "\1\0\2\114\1\u016a\1\u0126\1\u0127\1\u0128\2\0\1\u014f"+ - "\1\u0127\4\0\2\114\147\0\1\u016b\45\u0128\1\u0150\2\u0128"+ - "\1\u016c\1\u0150\2\u0128\1\u016d\2\u0128\1\114\2\0\1\u0150"+ - "\1\u0128\4\0\1\u0128\1\114\2\0\1\46\1\0\1\47"+ - "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ - "\3\0\1\54\5\0\1\55\3\0\1\56\11\0\1\57"+ - "\2\0\1\60\16\0\1\61\2\0\1\62\41\0\2\25"+ - "\1\63\1\0\1\64\1\0\1\64\1\65\1\0\1\25"+ - "\2\0\1\116\5\35\1\323\24\35\1\117\12\120\1\64"+ - "\1\114\1\121\1\114\1\0\1\114\1\122\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\2\0\1\46\1\0\1\47"+ - "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ - "\3\0\1\54\5\0\1\55\3\0\1\56\11\0\1\57"+ - "\2\0\1\60\16\0\1\61\2\0\1\62\41\0\2\25"+ - "\1\63\1\0\1\64\1\0\1\64\1\65\1\0\1\25"+ - "\2\0\1\116\15\35\1\323\14\35\1\117\12\120\1\64"+ - "\1\114\1\121\1\114\1\0\1\114\1\122\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\2\0\1\46\1\0\1\47"+ - "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ - "\3\0\1\54\5\0\1\55\3\0\1\56\11\0\1\57"+ - "\2\0\1\60\16\0\1\61\2\0\1\62\41\0\2\25"+ - "\1\63\1\0\1\64\1\0\1\64\1\65\1\0\1\25"+ - "\2\0\1\116\10\35\1\323\21\35\1\117\12\120\1\64"+ - "\1\114\1\121\1\114\1\0\1\114\1\122\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\2\0\1\46\1\0\1\47"+ - "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ - "\3\0\1\54\5\0\1\55\3\0\1\56\11\0\1\57"+ - "\2\0\1\60\16\0\1\61\2\0\1\62\41\0\2\25"+ - "\1\63\1\0\1\64\1\0\1\64\1\65\1\0\1\25"+ - "\2\0\1\116\3\35\1\u016e\26\35\1\117\12\120\1\64"+ - "\1\114\1\121\1\114\1\0\1\114\1\122\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\2\0\1\46\1\0\1\47"+ - "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ - "\3\0\1\54\5\0\1\55\3\0\1\56\11\0\1\57"+ - "\2\0\1\60\16\0\1\61\2\0\1\62\41\0\2\25"+ - "\1\63\1\0\1\64\1\0\1\64\1\65\1\0\1\25"+ - "\2\0\1\116\3\35\1\323\26\35\1\117\12\120\1\64"+ - "\1\114\1\121\1\114\1\0\1\114\1\122\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\2\0\1\46\1\0\1\47"+ - "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ - "\3\0\1\54\5\0\1\55\3\0\1\56\11\0\1\57"+ - "\2\0\1\60\16\0\1\61\2\0\1\62\41\0\2\25"+ - "\1\63\1\0\1\64\1\0\1\64\1\65\1\0\1\25"+ - "\2\0\1\116\27\35\1\u016f\2\35\1\117\12\120\1\64"+ - "\1\114\1\121\1\114\1\0\1\114\1\122\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\113\32\235\1\u0170"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\3\0"+ - "\1\54\5\0\1\55\3\0\1\56\11\0\1\57\2\0"+ - "\1\60\16\0\1\61\2\0\1\62\41\0\2\25\1\63"+ - "\1\0\1\64\1\0\1\64\1\65\1\0\1\25\2\0"+ - "\1\116\16\35\1\323\13\35\1\117\12\120\1\64\1\114"+ - "\1\121\1\114\1\0\1\114\1\122\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\151\3\0"+ - "\1\152\5\0\1\153\3\0\1\154\11\0\1\57\2\0"+ - "\1\155\16\0\1\156\2\0\1\157\41\0\1\25\1\64"+ - "\7\0\1\64\3\0\32\25\24\0\1\u0171\241\0\1\u0172"+ - "\15\0\1\46\1\0\1\47\1\0\1\50\1\0\1\51"+ - "\1\0\1\52\1\0\1\53\3\0\1\54\5\0\1\55"+ - "\3\0\1\56\11\0\1\57\2\0\1\60\16\0\1\61"+ - "\2\0\1\62\41\0\2\25\1\63\1\0\1\64\1\0"+ - "\1\64\1\65\1\0\1\25\2\0\1\116\32\35\1\117"+ - "\12\120\1\u0134\1\114\1\121\1\114\1\0\1\114\1\122"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\2\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\151\3\0\1\152\5\0\1\153\3\0\1\154"+ - "\11\0\1\57\2\0\1\155\16\0\1\156\2\0\1\157"+ - "\41\0\1\25\1\64\7\0\1\64\3\0\32\25\24\0"+ - "\1\u0173\162\0\1\113\1\244\1\245\1\246\1\247\1\250"+ - "\1\251\1\252\1\253\1\254\1\255\1\256\1\257\1\260"+ + "\1\276\1\277\1\300\1\301\1\302\1\303\1\304\1\305"+ + "\1\306\1\307\1\310\1\311\1\312\1\124\12\252\1\u0143"+ + "\3\124\1\0\2\124\1\125\1\u0133\1\u0134\1\u0135\3\0"+ + "\1\124\1\150\3\0\2\124\204\0\12\u0171\175\0\1\363"+ + "\5\252\1\370\24\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\15\252\1\370\14\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\10\252\1\370\21\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\3\252\1\u0181\26\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\3\252\1\370\26\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\27\252\1\u0182\2\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\16\252\1\370"+ + "\13\252\1\127\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\2\0\1\47\1\0"+ + "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\1\0"+ + "\1\67\3\0\1\70\5\0\1\71\3\0\1\72\11\0"+ + "\1\60\2\0\1\73\16\0\1\74\2\0\1\75\41\0"+ + "\1\25\2\26\2\0\2\76\1\77\1\0\1\26\2\0"+ + "\1\25\1\u0183\32\36\1\127\12\u014c\1\0\1\124\1\137"+ + "\1\124\1\0\2\140\1\125\3\124\2\0\1\76\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\67\3\0\1\70"+ + "\5\0\1\71\3\0\1\72\11\0\1\60\2\0\1\73"+ + "\16\0\1\74\2\0\1\75\41\0\1\25\2\26\2\0"+ + "\2\76\1\77\1\0\1\26\2\0\1\25\1\u0183\32\36"+ + "\1\127\12\u0184\1\0\1\124\1\137\1\124\1\0\2\140"+ + "\1\125\3\124\2\0\1\76\1\124\4\0\2\124\2\0"+ + "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ + "\1\53\1\0\1\67\3\0\1\70\5\0\1\71\3\0"+ + "\1\72\11\0\1\60\2\0\1\73\16\0\1\74\2\0"+ + "\1\75\41\0\1\25\2\26\2\0\2\76\1\77\1\0"+ + "\1\26\2\0\1\25\1\u0183\32\36\1\127\1\u014c\1\u0185"+ + "\1\u0184\2\u014c\2\u0184\1\u014c\1\u0184\1\u014c\1\0\1\124"+ + "\1\137\1\124\1\0\2\140\1\125\3\124\2\0\1\76"+ + "\1\124\4\0\2\124\216\0\1\322\175\0\4\u0186\2\0"+ + "\1\u0186\15\0\1\u0186\6\0\12\u0186\1\u0150\175\0\4\u0187"+ + "\2\0\1\u0187\15\0\1\u0187\6\0\12\u0187\1\u0188\175\0"+ + "\4\u0189\2\0\1\u0189\15\0\1\u0189\6\0\12\u0189\1\u018a"+ + "\13\0\1\u0116\160\0\1\u0157\4\u0189\2\0\1\u0189\15\0"+ + "\1\u0189\6\0\12\u018b\1\u018a\13\0\1\u0116\160\0\1\u0157"+ + "\4\u0189\2\0\1\u0189\15\0\1\u0189\6\0\12\u018c\1\u018a"+ + "\13\0\1\u0116\160\0\1\u0157\4\u0189\2\0\1\u0189\15\0"+ + "\1\u0189\6\0\1\u018b\1\u018d\1\u018c\2\u018b\2\u018c\1\u018b"+ + "\1\u018c\1\u018b\1\u018a\13\0\1\u0116\161\0\4\u018e\2\0"+ + "\1\u018e\15\0\1\u018e\6\0\12\u018e\1\u0156\13\0\1\u0116"+ + "\161\0\4\u0151\2\0\1\u0151\15\0\1\u0151\6\0\1\u0152"+ + "\2\u0153\1\u0152\5\u0153\1\u0154\231\0\1\u018f\2\u0190\1\u018f"+ + "\5\u0190\1\u0191\175\0\1\u0157\4\u018e\2\0\1\u018e\15\0"+ + "\1\u018e\6\0\12\u0192\1\u0156\13\0\1\u0116\160\0\1\u0157"+ + "\4\u018e\2\0\1\u018e\15\0\1\u018e\6\0\12\u018e\1\u0156"+ + "\13\0\1\u0116\160\0\1\u0157\4\u018e\2\0\1\u018e\15\0"+ + "\1\u018e\6\0\2\u0192\1\u018e\2\u0192\2\u018e\1\u0192\1\u018e"+ + "\1\u0192\1\u0156\13\0\1\u0116\160\0\51\u015b\1\u0193\6\u015b"+ + "\1\u015d\2\0\2\u015b\4\0\1\u015b\151\0\51\u015c\1\u0194"+ + "\3\0\1\u015c\1\u015b\1\u015c\1\u015d\2\0\2\u015c\156\0"+ + "\51\u015d\1\u0195\6\u015d\3\0\2\u015d\4\0\1\u015d\151\0"+ + "\1\u0196\32\216\1\335\12\216\175\0\1\u0196\4\216\1\u0197"+ + "\25\216\1\335\12\216\175\0\1\u0196\15\216\1\u0123\14\216"+ + "\1\335\12\216\175\0\1\u0196\10\216\1\u0123\21\216\1\335"+ + "\12\216\175\0\1\u0196\17\216\1\u015e\12\216\1\335\12\216"+ + "\175\0\1\u0196\5\216\1\u0198\4\216\1\u015e\17\216\1\335"+ + "\12\216\175\0\1\334\20\216\1\u015e\11\216\1\335\12\216"+ + "\175\0\1\334\7\216\1\u015e\22\216\1\335\12\216\175\0"+ + "\1\334\27\216\1\u015e\2\216\1\335\12\216\175\0\1\u0196"+ + "\6\216\1\u0197\10\216\1\u015e\12\216\1\335\12\216\175\0"+ + "\1\u0196\24\216\1\u0199\5\216\1\335\12\216\175\0\1\334"+ + "\11\216\1\u015e\20\216\1\335\12\216\175\0\1\u0196\16\216"+ + "\1\u019a\13\216\1\335\12\216\175\0\1\u0196\12\216\1\u019b"+ + "\17\216\1\335\12\216\175\0\1\u0196\5\216\1\u015e\24\216"+ + "\1\335\12\216\175\0\1\u0196\1\u019c\31\216\1\335\12\216"+ + "\175\0\1\334\32\216\1\u019d\12\216\175\0\1\u0196\23\216"+ + "\1\u015e\6\216\1\335\12\216\175\0\1\u0196\24\216\1\u019e"+ + "\5\216\1\335\12\216\231\0\12\u019f\10\0\1\u015b\1\u015c"+ + "\1\u015d\162\0\1\u0172\45\u0133\1\u015b\2\u0133\1\u0173\1\u015b"+ + "\2\u0133\1\u0174\2\u0133\1\u0135\2\0\1\u015b\1\u0133\1\150"+ + "\3\0\1\u0133\1\124\150\0\1\123\4\u01a0\2\124\1\u01a0"+ + "\15\124\1\u01a0\6\124\12\u01a0\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\51\u015b"+ + "\1\u0193\6\u015b\1\u015d\1\217\1\0\2\u015b\4\0\1\u015b"+ + "\151\0\1\u0175\45\u0134\1\u015c\2\u0134\1\u0176\1\0\2\124"+ + "\1\u0177\1\u0133\1\u0134\1\u0135\2\0\1\u015c\1\u0134\1\150"+ + "\3\0\2\124\150\0\1\123\4\u01a1\2\124\1\u01a1\15\124"+ + "\1\u01a1\6\124\12\u01a1\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\51\u015c\1\u0194"+ + "\3\0\1\u015c\1\u015b\1\u015c\1\u015d\1\217\1\0\2\u015c"+ + "\156\0\1\u0178\45\u0135\1\u015d\2\u0135\1\u0179\1\u015d\2\u0135"+ + "\1\u017a\2\u0135\1\124\2\0\1\u015d\1\u0135\1\150\3\0"+ + "\1\u0135\1\124\150\0\1\123\4\u01a2\2\124\1\u01a2\15\124"+ + "\1\u01a2\6\124\12\u01a2\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\51\u015d\1\u0195"+ + "\6\u015d\1\0\1\217\1\0\2\u015d\4\0\1\u015d\3\0"+ + "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ + "\1\53\1\0\1\54\3\0\1\55\5\0\1\56\3\0"+ + "\1\57\11\0\1\60\2\0\1\61\16\0\1\62\2\0"+ + "\1\63\41\0\2\25\1\64\1\0\1\65\1\0\1\65"+ + "\1\66\1\0\1\25\2\0\1\25\1\126\20\36\1\u01a3"+ + "\11\36\1\127\12\130\1\65\1\124\1\131\1\124\1\0"+ + "\1\124\1\132\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\54\3\0\1\55\5\0\1\56"+ + "\3\0\1\57\11\0\1\60\2\0\1\61\16\0\1\62"+ + "\2\0\1\63\41\0\2\25\1\64\1\0\1\65\1\0"+ + "\1\65\1\66\1\0\1\25\2\0\1\25\1\126\3\36"+ + "\1\353\26\36\1\127\12\130\1\65\1\124\1\131\1\124"+ + "\1\0\1\124\1\132\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\123\1\252\1\u01a4\1\u01a5\2\252\1\u01a6"+ + "\1\u01a7\1\u01a8\2\252\1\u01a9\2\252\1\u01aa\1\u01ab\2\252"+ + "\1\u01ac\1\u01ad\1\u01ae\1\252\1\u01af\1\u01b0\1\252\1\u01b1"+ + "\1\u01b2\1\127\1\u01b3\2\252\1\u01b4\1\u01b5\1\u01b6\1\252"+ + "\1\u01b7\1\u01b8\1\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\227\0\1\u01b9\163\0"+ + "\1\u01ba\32\u01bb\1\u01ba\12\u01bb\1\u01bc\2\u01ba\1\u01bd\3\u01ba"+ + "\1\u01be\3\0\1\u01bf\1\0\2\u01ba\4\0\1\u01ba\230\0"+ + "\1\u01c0\163\0\1\363\20\252\1\u01c1\11\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\3\252\1\u0103\26\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\14\0\1\175\3\0\1\176\5\0"+ + "\1\177\3\0\1\200\14\0\1\201\16\0\1\202\2\0"+ + "\1\203\42\0\1\76\1\26\6\0\1\76\3\0\1\123"+ "\1\261\1\262\1\263\1\264\1\265\1\266\1\267\1\270"+ - "\1\271\1\272\1\273\1\274\1\275\1\114\12\235\1\u0136"+ - "\3\114\1\0\2\114\1\115\1\u0126\1\u0127\1\u0128\3\0"+ - "\1\114\1\140\3\0\2\114\203\0\12\u0164\174\0\1\346"+ - "\5\235\1\353\24\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\15\235\1\353\14\235\1\117\12\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\10\235\1\353\21\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\3\235\1\u0174\26\235\1\117\12\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\3\235\1\353\26\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\27\235\1\u0175\2\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\16\235\1\353"+ - "\13\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\2\0\1\46\1\0"+ + "\1\271\1\272\1\273\1\274\1\275\1\276\1\277\1\300"+ + "\1\301\1\302\1\303\1\304\1\305\1\306\1\307\1\310"+ + "\1\311\1\312\1\124\1\u01c2\2\u01c3\1\u01c2\5\u01c3\1\u01c4"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\1\150\3\0\2\124\2\0\1\47\1\0\1\50\1\0"+ + "\1\51\1\0\1\52\1\0\1\53\1\0\1\67\3\0"+ + "\1\70\5\0\1\71\3\0\1\72\11\0\1\60\2\0"+ + "\1\73\16\0\1\74\2\0\1\75\41\0\1\25\2\26"+ + "\2\0\2\76\1\77\1\0\1\26\2\0\1\25\1\u0183"+ + "\32\36\1\127\12\316\1\0\1\124\1\137\1\124\1\0"+ + "\2\140\1\125\3\124\2\0\1\76\1\124\4\0\2\124"+ + "\2\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ + "\1\0\1\53\1\0\1\67\3\0\1\70\5\0\1\71"+ + "\3\0\1\72\11\0\1\60\2\0\1\73\16\0\1\74"+ + "\2\0\1\75\41\0\1\25\2\26\2\0\2\76\1\77"+ + "\1\0\1\26\2\0\1\25\1\u0183\32\36\1\127\2\u0184"+ + "\1\316\2\u0184\2\316\1\u0184\1\316\1\u0184\1\0\1\124"+ + "\1\137\1\124\1\0\2\140\1\125\3\124\2\0\1\76"+ + "\1\124\4\0\2\124\151\0\4\u01c5\2\0\1\u01c5\15\0"+ + "\1\u01c5\6\0\12\u01c5\1\u0150\175\0\4\u01c6\2\0\1\u01c6"+ + "\15\0\1\u01c6\6\0\12\u01c6\1\u01c7\175\0\4\u01c8\2\0"+ + "\1\u01c8\15\0\1\u01c8\6\0\1\u01c9\2\u01ca\1\u01c9\5\u01ca"+ + "\1\u01cb\14\0\1\u0116\161\0\4\u01cc\2\0\1\u01cc\15\0"+ + "\1\u01cc\6\0\12\u01cc\1\u018a\13\0\1\u0116\161\0\4\u01c8"+ + "\2\0\1\u01c8\15\0\1\u01c8\6\0\1\u01c9\2\u01ca\1\u01c9"+ + "\5\u01ca\1\u01cb\175\0\1\u0157\4\u01cc\2\0\1\u01cc\15\0"+ + "\1\u01cc\6\0\12\u01cd\1\u018a\13\0\1\u0116\160\0\1\u0157"+ + "\4\u01cc\2\0\1\u01cc\15\0\1\u01cc\6\0\12\u01cc\1\u018a"+ + "\13\0\1\u0116\160\0\1\u0157\4\u01cc\2\0\1\u01cc\15\0"+ + "\1\u01cc\6\0\2\u01cd\1\u01cc\2\u01cd\2\u01cc\1\u01cd\1\u01cc"+ + "\1\u01cd\1\u018a\13\0\1\u0116\161\0\4\u01ce\2\0\1\u01ce"+ + "\15\0\1\u01ce\6\0\12\u01ce\1\u0156\13\0\1\u0116\160\0"+ + "\1\u01cf\33\0\12\u0190\175\0\1\u01cf\33\0\12\u01d0\175\0"+ + "\1\u01cf\33\0\1\u0190\1\u01d1\1\u01d0\2\u0190\2\u01d0\1\u0190"+ + "\1\u01d0\1\u0190\175\0\1\u0157\4\u01ce\2\0\1\u01ce\15\0"+ + "\1\u01ce\6\0\12\u01ce\1\u0156\13\0\1\u0116\161\0\4\u01d2"+ + "\2\0\1\u01d2\15\0\1\u01d2\6\0\12\u01d2\176\0\4\u01d3"+ + "\2\0\1\u01d3\15\0\1\u01d3\6\0\12\u01d3\176\0\4\u01d4"+ + "\2\0\1\u01d4\15\0\1\u01d4\6\0\12\u01d4\175\0\1\334"+ + "\5\216\1\u015e\24\216\1\335\12\216\175\0\1\334\15\216"+ + "\1\u015e\14\216\1\335\12\216\175\0\1\334\10\216\1\u015e"+ + "\21\216\1\335\12\216\175\0\1\334\3\216\1\u01d5\26\216"+ + "\1\335\12\216\175\0\1\334\3\216\1\u015e\26\216\1\335"+ + "\12\216\175\0\1\334\27\216\1\u01d6\2\216\1\335\12\216"+ + "\176\0\32\216\1\u01d7\12\216\175\0\1\334\16\216\1\u015e"+ + "\13\216\1\335\12\216\231\0\12\u01d8\10\0\1\u015b\1\u015c"+ + "\1\u015d\162\0\1\123\4\u0133\2\124\1\u0133\15\124\1\u0133"+ + "\6\124\12\u0133\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\123\4\u0134\2\124"+ + "\1\u0134\15\124\1\u0134\6\124\12\u0134\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\123\4\u0135\2\124\1\u0135\15\124\1\u0135\6\124\12\u0135"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\2\0\1\47\1\0\1\50\1\0\1\51"+ + "\1\0\1\52\1\0\1\53\1\0\1\54\3\0\1\55"+ + "\5\0\1\56\3\0\1\57\11\0\1\60\2\0\1\61"+ + "\16\0\1\62\2\0\1\63\41\0\2\25\1\64\1\0"+ + "\1\65\1\0\1\65\1\66\1\0\1\25\2\0\1\25"+ + "\1\126\12\36\1\340\17\36\1\127\12\130\1\65\1\124"+ + "\1\131\1\124\1\0\1\124\1\132\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\11\252\1\u01d9\20\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\3\252\1\u01da"+ + "\26\252\1\127\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\7\252"+ + "\1\u01db\22\252\1\127\4\252\1\u01dc\5\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\10\252\1\u01dd\4\252\1\u01de\5\252\1\u01df"+ + "\6\252\1\127\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\3\252"+ + "\1\u01e0\26\252\1\127\2\252\1\u01e1\7\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\7\252\1\u01e2\22\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\7\252\1\u01e3\22\252\1\127\3\252"+ + "\1\u01e4\6\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\32\252\1\127"+ + "\5\252\1\u01e5\4\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\7\252"+ + "\1\u01e6\22\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\31\252\1\u01e7\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\1\252\1\u01e8\30\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\7\252\1\u01e9\1\252\1\u01ea\20\252\1\127\11\252"+ + "\1\u01e5\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\22\252\1\u01eb\7\252"+ + "\1\127\2\252\1\u01ec\7\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\6\252\1\u01ed\1\u01ee\22\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\7\252\1\u01ef\5\252\1\u01f0\14\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\23\252\1\u01f1\6\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\32\252\1\127"+ + "\3\252\1\u01f2\6\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\3\252"+ + "\1\u01f3\26\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\17\252\1\u01f4\12\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\32\252\1\127\1\252\1\u01e5\10\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\32\252\1\127\1\u01f5\11\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\151\0\32\u01f6\1\0\12\u01f6\11\0\1\u01f7\1\0\1\u01f8"+ + "\161\0\46\u01ba\1\u01bc\2\u01ba\1\u01bd\3\u01ba\1\u01be\5\0"+ + "\2\u01ba\4\0\1\u01ba\151\0\1\u01f9\32\u01bb\1\u01fa\12\u01bb"+ + "\1\u01fb\2\u01ba\1\u01bd\3\u01ba\1\u01be\1\0\1\u01fc\3\0"+ + "\2\u01ba\4\0\1\u01ba\151\0\46\u01bc\1\0\2\u01bc\1\u01fd"+ + "\3\u01bc\1\u01be\5\0\2\u01bc\4\0\1\u01bc\152\0\4\u01fe"+ + "\2\0\1\u01fe\15\0\1\u01fe\6\0\12\u01fe\176\0\32\u01ff"+ + "\1\0\12\u01ff\13\0\1\u01bf\162\0\4\u0200\2\0\1\u0200"+ + "\15\0\1\u0200\6\0\12\u0200\1\u0201\174\0\1\u0202\32\u0203"+ + "\1\u0202\12\u0203\1\u0204\2\u0202\1\u0205\3\u0202\1\u0206\3\0"+ + "\1\u0207\1\0\2\u0202\4\0\1\u0202\151\0\1\363\12\252"+ + "\1\370\17\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\2\0\1\47"+ + "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ + "\1\0\1\67\3\0\1\70\5\0\1\71\3\0\1\72"+ + "\11\0\1\60\2\0\1\73\16\0\1\74\2\0\1\75"+ + "\41\0\1\25\2\26\2\0\2\76\1\77\1\0\1\26"+ + "\2\0\1\25\1\u010e\32\36\1\127\12\u01c3\1\u0143\1\124"+ + "\1\137\1\124\1\0\2\140\1\125\1\u0133\1\u0134\1\u0135"+ + "\2\0\1\76\1\124\4\0\2\124\2\0\1\47\1\0"+ + "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\1\0"+ + "\1\67\3\0\1\70\5\0\1\71\3\0\1\72\11\0"+ + "\1\60\2\0\1\73\16\0\1\74\2\0\1\75\41\0"+ + "\1\25\2\26\2\0\2\76\1\77\1\0\1\26\2\0"+ + "\1\25\1\u010e\32\36\1\127\12\u0208\1\u0143\1\124\1\137"+ + "\1\124\1\0\2\140\1\125\1\u0133\1\u0134\1\u0135\2\0"+ + "\1\76\1\124\4\0\2\124\2\0\1\47\1\0\1\50"+ + "\1\0\1\51\1\0\1\52\1\0\1\53\1\0\1\67"+ + "\3\0\1\70\5\0\1\71\3\0\1\72\11\0\1\60"+ + "\2\0\1\73\16\0\1\74\2\0\1\75\41\0\1\25"+ + "\2\26\2\0\2\76\1\77\1\0\1\26\2\0\1\25"+ + "\1\u010e\32\36\1\127\1\u01c3\1\u0209\1\u0208\2\u01c3\2\u0208"+ + "\1\u01c3\1\u0208\1\u01c3\1\u0143\1\124\1\137\1\124\1\0"+ + "\2\140\1\125\1\u0133\1\u0134\1\u0135\2\0\1\76\1\124"+ + "\4\0\2\124\216\0\1\u0150\175\0\4\u020a\2\0\1\u020a"+ + "\15\0\1\u020a\6\0\12\u020a\1\u01c7\175\0\4\u020b\2\0"+ + "\1\u020b\15\0\1\u020b\6\0\12\u020b\1\u020c\175\0\4\u020d"+ + "\2\0\1\u020d\15\0\1\u020d\6\0\12\u020d\1\u020e\13\0"+ + "\1\u0116\160\0\1\u0157\4\u020d\2\0\1\u020d\15\0\1\u020d"+ + "\6\0\12\u020f\1\u020e\13\0\1\u0116\160\0\1\u0157\4\u020d"+ + "\2\0\1\u020d\15\0\1\u020d\6\0\12\u0210\1\u020e\13\0"+ + "\1\u0116\160\0\1\u0157\4\u020d\2\0\1\u020d\15\0\1\u020d"+ + "\6\0\1\u020f\1\u0211\1\u0210\2\u020f\2\u0210\1\u020f\1\u0210"+ + "\1\u020f\1\u020e\13\0\1\u0116\161\0\4\u0212\2\0\1\u0212"+ + "\15\0\1\u0212\6\0\12\u0212\1\u018a\13\0\1\u0116\160\0"+ + "\1\u0157\4\u0212\2\0\1\u0212\15\0\1\u0212\6\0\12\u0212"+ + "\1\u018a\13\0\1\u0116\226\0\1\u0156\13\0\1\u0116\214\0"+ + "\1\u0213\2\u0214\1\u0213\5\u0214\1\u0215\175\0\1\u01cf\242\0"+ + "\1\u01cf\33\0\2\u01d0\1\0\2\u01d0\2\0\1\u01d0\1\0"+ + "\1\u01d0\176\0\4\u015b\2\0\1\u015b\15\0\1\u015b\6\0"+ + "\12\u015b\176\0\4\u015c\2\0\1\u015c\15\0\1\u015c\6\0"+ + "\12\u015c\176\0\4\u015d\2\0\1\u015d\15\0\1\u015d\6\0"+ + "\12\u015d\175\0\1\334\20\216\1\u0216\11\216\1\335\12\216"+ + "\175\0\1\334\3\216\1\u0169\26\216\1\335\12\216\176\0"+ + "\1\216\1\u0217\1\u0218\2\216\1\u0219\1\u021a\1\u021b\2\216"+ + "\1\u021c\2\216\1\u021d\1\u021e\2\216\1\u021f\1\u0220\1\u0221"+ + "\1\216\1\u0222\1\u0223\1\216\1\u0224\1\u0225\1\335\1\u0226"+ + "\2\216\1\u0227\1\u0228\1\u0229\1\216\1\u022a\1\u022b\1\216"+ + "\231\0\12\u022c\10\0\1\u015b\1\u015c\1\u015d\162\0\1\363"+ + "\1\252\1\u022d\30\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\24\252\1\u022e\5\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\24\252\1\u022f\5\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\1\252\1\u0230\30\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\14\252\1\u0231\15\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\1\252\1\u0232\30\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\1\252\1\u0233"+ + "\30\252\1\127\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\1\252"+ + "\1\u0234\30\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\21\252\1\u0235\10\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\24\252\1\u0236\5\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\24\252\1\u0237\5\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\1\u0146\31\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\24\252\1\u0234\5\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\24\252\1\u0238\5\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\1\252\1\u0239\30\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\31\252\1\u023a"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\24\252\1\u023b"+ + "\5\252\1\127\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\1\252"+ + "\1\u023c\30\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\1\u023d\31\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\21\252\1\u023e\10\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\4\252\1\u023f\25\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\24\252\1\u0240\5\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\24\252\1\u0241\5\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\4\252\1\u0242\25\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\21\252\1\u0243\10\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\24\252\1\u0244"+ + "\5\252\1\127\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\32\252"+ + "\1\127\1\u0245\11\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\32\252"+ + "\1\127\7\252\1\u0246\2\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\1\u0247\31\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\u0248"+ + "\32\u01f6\1\u0249\12\u01f6\11\0\1\u01f7\163\0\51\u01f7\1\u024a"+ + "\3\0\3\u01f7\1\u015d\3\0\1\u01f7\157\0\4\u024b\2\0"+ + "\1\u024b\15\0\1\u024b\6\0\12\u024b\1\u024c\174\0\1\u01ba"+ + "\32\u01bb\1\u01ba\12\u01bb\1\u01bc\2\u01ba\1\u01bd\3\u01ba\1\u01be"+ + "\5\0\2\u01ba\4\0\1\u01ba\151\0\1\u01ba\32\u01bb\1\u01fa"+ + "\12\u01bb\1\u01bc\2\u01ba\1\u01bd\3\u01ba\1\u01be\5\0\2\u01ba"+ + "\4\0\1\u01ba\151\0\34\u01bc\12\u024d\1\0\2\u01bc\1\u01fd"+ + "\3\u01bc\1\u01be\5\0\2\u01bc\4\0\1\u01bc\151\0\51\u01fc"+ + "\1\u024e\3\0\3\u01fc\1\u015d\2\0\1\u024f\1\u01fc\157\0"+ + "\4\u0250\2\0\1\u0250\15\0\1\u0250\6\0\12\u0250\176\0"+ + "\4\u01ba\2\0\1\u01ba\15\0\1\u01ba\6\0\12\u01ba\175\0"+ + "\1\u0251\32\u01ff\1\u0252\12\u01ff\1\u0253\10\0\1\u01fc\164\0"+ + "\4\u0254\2\0\1\u0254\15\0\1\u0254\6\0\12\u0254\1\u0255"+ + "\242\0\1\u0256\174\0\46\u0202\1\u0204\2\u0202\1\u0205\3\u0202"+ + "\1\u0206\5\0\2\u0202\4\0\1\u0202\151\0\1\u0257\32\u0203"+ + "\1\u0258\12\u0203\1\u0259\2\u0202\1\u0205\3\u0202\1\u0206\1\u015b"+ + "\1\u015c\1\u015d\2\0\2\u0202\4\0\1\u0202\151\0\46\u0204"+ + "\1\0\2\u0204\1\u025a\3\u0204\1\u0206\5\0\2\u0204\4\0"+ + "\1\u0204\152\0\4\u025b\2\0\1\u025b\15\0\1\u025b\6\0"+ + "\12\u025b\176\0\32\u025c\1\0\12\u025c\13\0\1\u0207\13\0"+ "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ - "\1\66\3\0\1\67\5\0\1\70\3\0\1\71\11\0"+ - "\1\57\2\0\1\72\16\0\1\73\2\0\1\74\41\0"+ - "\1\25\2\26\2\0\2\75\1\76\1\0\1\26\2\0"+ - "\1\u0176\32\35\1\117\12\u013f\1\0\1\114\1\127\1\114"+ - "\1\0\2\130\1\115\3\114\2\0\1\75\1\114\4\0"+ - "\2\114\2\0\1\46\1\0\1\47\1\0\1\50\1\0"+ - "\1\51\1\0\1\52\1\0\1\66\3\0\1\67\5\0"+ - "\1\70\3\0\1\71\11\0\1\57\2\0\1\72\16\0"+ - "\1\73\2\0\1\74\41\0\1\25\2\26\2\0\2\75"+ - "\1\76\1\0\1\26\2\0\1\u0176\32\35\1\117\12\u0177"+ - "\1\0\1\114\1\127\1\114\1\0\2\130\1\115\3\114"+ - "\2\0\1\75\1\114\4\0\2\114\2\0\1\46\1\0"+ - "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ - "\1\66\3\0\1\67\5\0\1\70\3\0\1\71\11\0"+ - "\1\57\2\0\1\72\16\0\1\73\2\0\1\74\41\0"+ - "\1\25\2\26\2\0\2\75\1\76\1\0\1\26\2\0"+ - "\1\u0176\32\35\1\117\1\u013f\1\u0178\1\u0177\2\u013f\2\u0177"+ - "\2\u013f\1\u0177\1\0\1\114\1\127\1\114\1\0\2\130"+ - "\1\115\3\114\2\0\1\75\1\114\4\0\2\114\215\0"+ - "\1\305\174\0\4\u0179\2\0\1\u0179\15\0\1\u0179\6\0"+ - "\12\u0179\1\u0143\174\0\4\u017a\2\0\1\u017a\15\0\1\u017a"+ - "\6\0\12\u017a\1\u017b\174\0\4\u017c\2\0\1\u017c\15\0"+ - "\1\u017c\6\0\12\u017c\1\u017d\13\0\1\u0109\157\0\1\u014a"+ - "\4\u017c\2\0\1\u017c\15\0\1\u017c\6\0\12\u017e\1\u017d"+ - "\13\0\1\u0109\157\0\1\u014a\4\u017c\2\0\1\u017c\15\0"+ - "\1\u017c\6\0\12\u017f\1\u017d\13\0\1\u0109\157\0\1\u014a"+ - "\4\u017c\2\0\1\u017c\15\0\1\u017c\6\0\1\u017e\1\u0180"+ - "\1\u017f\2\u017e\2\u017f\2\u017e\1\u017f\1\u017d\13\0\1\u0109"+ - "\160\0\4\u0181\2\0\1\u0181\15\0\1\u0181\6\0\12\u0181"+ - "\1\u0149\13\0\1\u0109\160\0\4\u0144\2\0\1\u0144\15\0"+ - "\1\u0144\6\0\1\u0145\2\u0146\1\u0145\4\u0146\1\u0147\1\u0146"+ - "\230\0\1\u0182\2\u0183\1\u0182\4\u0183\1\u0184\1\u0183\174\0"+ - "\1\u014a\4\u0181\2\0\1\u0181\15\0\1\u0181\6\0\12\u0185"+ - "\1\u0149\13\0\1\u0109\157\0\1\u014a\4\u0181\2\0\1\u0181"+ - "\15\0\1\u0181\6\0\12\u0181\1\u0149\13\0\1\u0109\157\0"+ - "\1\u014a\4\u0181\2\0\1\u0181\15\0\1\u0181\6\0\2\u0185"+ - "\1\u0181\2\u0185\2\u0181\2\u0185\1\u0181\1\u0149\13\0\1\u0109"+ - "\157\0\51\u014e\1\u0186\6\u014e\1\u0150\2\0\2\u014e\4\0"+ - "\1\u014e\150\0\51\u014f\1\u0187\3\0\1\u014f\1\u014e\1\u014f"+ - "\1\u0150\2\0\2\u014f\155\0\51\u0150\1\u0188\6\u0150\3\0"+ - "\2\u0150\4\0\1\u0150\150\0\1\u0189\32\201\1\320\12\201"+ - "\174\0\1\u0189\4\201\1\u018a\25\201\1\320\12\201\174\0"+ - "\1\u0189\15\201\1\u0116\14\201\1\320\12\201\174\0\1\u0189"+ - "\10\201\1\u0116\21\201\1\320\12\201\174\0\1\u0189\17\201"+ - "\1\u0151\12\201\1\320\12\201\174\0\1\u0189\5\201\1\u018b"+ - "\4\201\1\u0151\17\201\1\320\12\201\174\0\1\317\20\201"+ - "\1\u0151\11\201\1\320\12\201\174\0\1\317\7\201\1\u0151"+ - "\22\201\1\320\12\201\174\0\1\317\27\201\1\u0151\2\201"+ - "\1\320\12\201\174\0\1\u0189\6\201\1\u018a\10\201\1\u0151"+ - "\12\201\1\320\12\201\174\0\1\u0189\24\201\1\u018c\5\201"+ - "\1\320\12\201\174\0\1\317\11\201\1\u0151\20\201\1\320"+ - "\12\201\174\0\1\u0189\16\201\1\u018d\13\201\1\320\12\201"+ - "\174\0\1\u0189\12\201\1\u018e\17\201\1\320\12\201\174\0"+ - "\1\u0189\5\201\1\u0151\24\201\1\320\12\201\174\0\1\u0189"+ - "\1\u018f\31\201\1\320\12\201\174\0\1\317\32\201\1\u0190"+ - "\12\201\174\0\1\u0189\23\201\1\u0151\6\201\1\320\12\201"+ - "\174\0\1\u0189\24\201\1\u0191\5\201\1\320\12\201\230\0"+ - "\12\u0192\10\0\1\u014e\1\u014f\1\u0150\161\0\1\u0165\45\u0126"+ - "\1\u014e\2\u0126\1\u0166\1\u014e\2\u0126\1\u0167\2\u0126\1\u0128"+ - "\2\0\1\u014e\1\u0126\1\140\3\0\1\u0126\1\114\147\0"+ - "\1\113\4\u0193\2\114\1\u0193\15\114\1\u0193\6\114\12\u0193"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\51\u014e\1\u0186\6\u014e\1\u0150\1\202"+ - "\1\0\2\u014e\4\0\1\u014e\150\0\1\u0168\45\u0127\1\u014f"+ - "\2\u0127\1\u0169\1\0\2\114\1\u016a\1\u0126\1\u0127\1\u0128"+ - "\2\0\1\u014f\1\u0127\1\140\3\0\2\114\147\0\1\113"+ - "\4\u0194\2\114\1\u0194\15\114\1\u0194\6\114\12\u0194\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\51\u014f\1\u0187\3\0\1\u014f\1\u014e\1\u014f"+ - "\1\u0150\1\202\1\0\2\u014f\155\0\1\u016b\45\u0128\1\u0150"+ - "\2\u0128\1\u016c\1\u0150\2\u0128\1\u016d\2\u0128\1\114\2\0"+ - "\1\u0150\1\u0128\1\140\3\0\1\u0128\1\114\147\0\1\113"+ - "\4\u0195\2\114\1\u0195\15\114\1\u0195\6\114\12\u0195\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\51\u0150\1\u0188\6\u0150\1\0\1\202\1\0"+ - "\2\u0150\4\0\1\u0150\3\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\3\0"+ - "\1\54\5\0\1\55\3\0\1\56\11\0\1\57\2\0"+ - "\1\60\16\0\1\61\2\0\1\62\41\0\2\25\1\63"+ - "\1\0\1\64\1\0\1\64\1\65\1\0\1\25\2\0"+ - "\1\116\20\35\1\u0196\11\35\1\117\12\120\1\64\1\114"+ - "\1\121\1\114\1\0\1\114\1\122\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\53\3\0"+ - "\1\54\5\0\1\55\3\0\1\56\11\0\1\57\2\0"+ - "\1\60\16\0\1\61\2\0\1\62\41\0\2\25\1\63"+ - "\1\0\1\64\1\0\1\64\1\65\1\0\1\25\2\0"+ - "\1\116\3\35\1\336\26\35\1\117\12\120\1\64\1\114"+ - "\1\121\1\114\1\0\1\114\1\122\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\113\2\235\1\u0197\2\235"+ - "\1\u0198\1\u0199\1\u019a\2\235\1\u019b\2\235\1\u019c\3\235"+ - "\1\u019d\1\u019e\1\u019f\1\235\1\u01a0\1\u01a1\1\235\1\u01a2"+ - "\1\u01a3\1\117\1\u01a4\2\235\1\u01a5\1\235\1\u01a6\1\u01a7"+ - "\3\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\226\0\1\u01a8\162\0\1\u01a9\32\u01aa"+ - "\1\u01a9\12\u01aa\1\u01ab\2\u01a9\1\u01ac\3\u01a9\1\u01ad\3\0"+ - "\1\u01ae\1\0\2\u01a9\4\0\1\u01a9\227\0\1\u01af\162\0"+ - "\1\346\20\235\1\u01b0\11\235\1\117\12\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\3\235\1\366\26\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\14\0\1\165\3\0\1\166\5\0\1\167\3\0"+ - "\1\170\14\0\1\171\16\0\1\172\2\0\1\173\42\0"+ - "\1\75\1\26\6\0\1\75\2\0\1\113\1\244\1\245"+ - "\1\246\1\247\1\250\1\251\1\252\1\253\1\254\1\255"+ - "\1\256\1\257\1\260\1\261\1\262\1\263\1\264\1\265"+ - "\1\266\1\267\1\270\1\271\1\272\1\273\1\274\1\275"+ - "\1\114\1\u01b1\2\u01b2\1\u01b1\4\u01b2\1\u01b3\1\u01b2\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\1\140"+ - "\3\0\2\114\2\0\1\46\1\0\1\47\1\0\1\50"+ - "\1\0\1\51\1\0\1\52\1\0\1\66\3\0\1\67"+ - "\5\0\1\70\3\0\1\71\11\0\1\57\2\0\1\72"+ - "\16\0\1\73\2\0\1\74\41\0\1\25\2\26\2\0"+ - "\2\75\1\76\1\0\1\26\2\0\1\u0176\32\35\1\117"+ - "\12\301\1\0\1\114\1\127\1\114\1\0\2\130\1\115"+ - "\3\114\2\0\1\75\1\114\4\0\2\114\2\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\66\3\0\1\67\5\0\1\70\3\0\1\71"+ - "\11\0\1\57\2\0\1\72\16\0\1\73\2\0\1\74"+ - "\41\0\1\25\2\26\2\0\2\75\1\76\1\0\1\26"+ - "\2\0\1\u0176\32\35\1\117\2\u0177\1\301\2\u0177\2\301"+ - "\2\u0177\1\301\1\0\1\114\1\127\1\114\1\0\2\130"+ - "\1\115\3\114\2\0\1\75\1\114\4\0\2\114\150\0"+ - "\4\u01b4\2\0\1\u01b4\15\0\1\u01b4\6\0\12\u01b4\1\u0143"+ - "\174\0\4\u01b5\2\0\1\u01b5\15\0\1\u01b5\6\0\12\u01b5"+ - "\1\u01b6\174\0\4\u01b7\2\0\1\u01b7\15\0\1\u01b7\6\0"+ - "\1\u01b8\2\u01b9\1\u01b8\4\u01b9\1\u01ba\1\u01b9\14\0\1\u0109"+ - "\160\0\4\u01bb\2\0\1\u01bb\15\0\1\u01bb\6\0\12\u01bb"+ - "\1\u017d\13\0\1\u0109\160\0\4\u01b7\2\0\1\u01b7\15\0"+ - "\1\u01b7\6\0\1\u01b8\2\u01b9\1\u01b8\4\u01b9\1\u01ba\1\u01b9"+ - "\174\0\1\u014a\4\u01bb\2\0\1\u01bb\15\0\1\u01bb\6\0"+ - "\12\u01bc\1\u017d\13\0\1\u0109\157\0\1\u014a\4\u01bb\2\0"+ - "\1\u01bb\15\0\1\u01bb\6\0\12\u01bb\1\u017d\13\0\1\u0109"+ - "\157\0\1\u014a\4\u01bb\2\0\1\u01bb\15\0\1\u01bb\6\0"+ - "\2\u01bc\1\u01bb\2\u01bc\2\u01bb\2\u01bc\1\u01bb\1\u017d\13\0"+ - "\1\u0109\160\0\4\u01bd\2\0\1\u01bd\15\0\1\u01bd\6\0"+ - "\12\u01bd\1\u0149\13\0\1\u0109\157\0\1\u01be\33\0\12\u0183"+ - "\174\0\1\u01be\33\0\12\u01bf\174\0\1\u01be\33\0\1\u0183"+ - "\1\u01c0\1\u01bf\2\u0183\2\u01bf\2\u0183\1\u01bf\174\0\1\u014a"+ - "\4\u01bd\2\0\1\u01bd\15\0\1\u01bd\6\0\12\u01bd\1\u0149"+ - "\13\0\1\u0109\160\0\4\u01c1\2\0\1\u01c1\15\0\1\u01c1"+ - "\6\0\12\u01c1\175\0\4\u01c2\2\0\1\u01c2\15\0\1\u01c2"+ - "\6\0\12\u01c2\175\0\4\u01c3\2\0\1\u01c3\15\0\1\u01c3"+ - "\6\0\12\u01c3\174\0\1\317\5\201\1\u0151\24\201\1\320"+ - "\12\201\174\0\1\317\15\201\1\u0151\14\201\1\320\12\201"+ - "\174\0\1\317\10\201\1\u0151\21\201\1\320\12\201\174\0"+ - "\1\317\3\201\1\u01c4\26\201\1\320\12\201\174\0\1\317"+ - "\3\201\1\u0151\26\201\1\320\12\201\174\0\1\317\27\201"+ - "\1\u01c5\2\201\1\320\12\201\175\0\32\201\1\u01c6\12\201"+ - "\174\0\1\317\16\201\1\u0151\13\201\1\320\12\201\230\0"+ - "\12\u01c7\10\0\1\u014e\1\u014f\1\u0150\161\0\1\113\4\u0126"+ - "\2\114\1\u0126\15\114\1\u0126\6\114\12\u0126\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\113\4\u0127\2\114\1\u0127\15\114\1\u0127\6\114"+ - "\12\u0127\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\113\4\u0128\2\114\1\u0128"+ - "\15\114\1\u0128\6\114\12\u0128\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\2\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\53\3\0\1\54\5\0\1\55\3\0\1\56"+ - "\11\0\1\57\2\0\1\60\16\0\1\61\2\0\1\62"+ - "\41\0\2\25\1\63\1\0\1\64\1\0\1\64\1\65"+ - "\1\0\1\25\2\0\1\116\12\35\1\323\17\35\1\117"+ - "\12\120\1\64\1\114\1\121\1\114\1\0\1\114\1\122"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\3\235\1\u01c8\26\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\32\235\1\117\4\235\1\u01c9\5\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\10\235\1\u01ca\12\235\1\u01cb\6\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\32\235\1\117\2\235"+ - "\1\u01cc\7\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\7\235\1\u01cd"+ - "\22\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\7\235"+ - "\1\u01ce\22\235\1\117\3\235\1\u01cf\6\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\7\235\1\u01d0\22\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\31\235\1\u01d1\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\1\235\1\u01d2\30\235\1\117\12\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\7\235\1\u01d3\1\235\1\u01d4"+ - "\20\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\22\235"+ - "\1\u01d5\7\235\1\117\2\235\1\u01d6\7\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\7\235\1\u01d7\22\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\7\235\1\u01d8\5\235\1\u01d9\14\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\23\235\1\u01da"+ - "\6\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\32\235"+ - "\1\117\3\235\1\u01db\6\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\17\235\1\u01dc\12\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\32\235\1\117\1\u01dd\11\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\150\0"+ - "\32\u01de\1\0\12\u01de\11\0\1\u01df\1\0\1\u01e0\160\0"+ - "\46\u01a9\1\u01ab\2\u01a9\1\u01ac\3\u01a9\1\u01ad\5\0\2\u01a9"+ - "\4\0\1\u01a9\150\0\1\u01e1\32\u01aa\1\u01e2\12\u01aa\1\u01e3"+ - "\2\u01a9\1\u01ac\3\u01a9\1\u01ad\1\0\1\u01e4\3\0\2\u01a9"+ - "\4\0\1\u01a9\150\0\46\u01ab\1\0\2\u01ab\1\u01e5\3\u01ab"+ - "\1\u01ad\5\0\2\u01ab\4\0\1\u01ab\151\0\4\u01e6\2\0"+ - "\1\u01e6\15\0\1\u01e6\6\0\12\u01e6\175\0\32\u01e7\1\0"+ - "\12\u01e7\13\0\1\u01ae\161\0\4\u01e8\2\0\1\u01e8\15\0"+ - "\1\u01e8\6\0\12\u01e8\1\u01e9\173\0\1\u01ea\32\u01eb\1\u01ea"+ - "\12\u01eb\1\u01ec\2\u01ea\1\u01ed\3\u01ea\1\u01ee\3\0\1\u01ef"+ - "\1\0\2\u01ea\4\0\1\u01ea\150\0\1\346\12\235\1\353"+ - "\17\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\2\0\1\46\1\0"+ - "\1\47\1\0\1\50\1\0\1\51\1\0\1\52\1\0"+ - "\1\66\3\0\1\67\5\0\1\70\3\0\1\71\11\0"+ - "\1\57\2\0\1\72\16\0\1\73\2\0\1\74\41\0"+ - "\1\25\2\26\2\0\2\75\1\76\1\0\1\26\2\0"+ - "\1\u0101\32\35\1\117\12\u01b2\1\u0136\1\114\1\127\1\114"+ - "\1\0\2\130\1\115\1\u0126\1\u0127\1\u0128\2\0\1\75"+ - "\1\114\4\0\2\114\2\0\1\46\1\0\1\47\1\0"+ - "\1\50\1\0\1\51\1\0\1\52\1\0\1\66\3\0"+ - "\1\67\5\0\1\70\3\0\1\71\11\0\1\57\2\0"+ - "\1\72\16\0\1\73\2\0\1\74\41\0\1\25\2\26"+ - "\2\0\2\75\1\76\1\0\1\26\2\0\1\u0101\32\35"+ - "\1\117\12\u01f0\1\u0136\1\114\1\127\1\114\1\0\2\130"+ - "\1\115\1\u0126\1\u0127\1\u0128\2\0\1\75\1\114\4\0"+ - "\2\114\2\0\1\46\1\0\1\47\1\0\1\50\1\0"+ - "\1\51\1\0\1\52\1\0\1\66\3\0\1\67\5\0"+ - "\1\70\3\0\1\71\11\0\1\57\2\0\1\72\16\0"+ - "\1\73\2\0\1\74\41\0\1\25\2\26\2\0\2\75"+ - "\1\76\1\0\1\26\2\0\1\u0101\32\35\1\117\1\u01b2"+ - "\1\u01f1\1\u01f0\2\u01b2\2\u01f0\2\u01b2\1\u01f0\1\u0136\1\114"+ - "\1\127\1\114\1\0\2\130\1\115\1\u0126\1\u0127\1\u0128"+ - "\2\0\1\75\1\114\4\0\2\114\215\0\1\u0143\174\0"+ - "\4\u01f2\2\0\1\u01f2\15\0\1\u01f2\6\0\12\u01f2\1\u01b6"+ - "\174\0\4\u01f3\2\0\1\u01f3\15\0\1\u01f3\6\0\12\u01f3"+ - "\1\u01f4\174\0\4\u01f5\2\0\1\u01f5\15\0\1\u01f5\6\0"+ - "\12\u01f5\1\u01f6\13\0\1\u0109\157\0\1\u014a\4\u01f5\2\0"+ - "\1\u01f5\15\0\1\u01f5\6\0\12\u01f7\1\u01f6\13\0\1\u0109"+ - "\157\0\1\u014a\4\u01f5\2\0\1\u01f5\15\0\1\u01f5\6\0"+ - "\12\u01f8\1\u01f6\13\0\1\u0109\157\0\1\u014a\4\u01f5\2\0"+ - "\1\u01f5\15\0\1\u01f5\6\0\1\u01f7\1\u01f9\1\u01f8\2\u01f7"+ - "\2\u01f8\2\u01f7\1\u01f8\1\u01f6\13\0\1\u0109\160\0\4\u01fa"+ - "\2\0\1\u01fa\15\0\1\u01fa\6\0\12\u01fa\1\u017d\13\0"+ - "\1\u0109\157\0\1\u014a\4\u01fa\2\0\1\u01fa\15\0\1\u01fa"+ - "\6\0\12\u01fa\1\u017d\13\0\1\u0109\225\0\1\u0149\13\0"+ - "\1\u0109\213\0\1\u01fb\2\u01fc\1\u01fb\4\u01fc\1\u01fd\1\u01fc"+ - "\174\0\1\u01be\241\0\1\u01be\33\0\2\u01bf\1\0\2\u01bf"+ - "\2\0\2\u01bf\176\0\4\u014e\2\0\1\u014e\15\0\1\u014e"+ - "\6\0\12\u014e\175\0\4\u014f\2\0\1\u014f\15\0\1\u014f"+ - "\6\0\12\u014f\175\0\4\u0150\2\0\1\u0150\15\0\1\u0150"+ - "\6\0\12\u0150\174\0\1\317\20\201\1\u01fe\11\201\1\320"+ - "\12\201\174\0\1\317\3\201\1\u015c\26\201\1\320\12\201"+ - "\175\0\2\201\1\u01ff\2\201\1\u0200\1\u0201\1\u0202\2\201"+ - "\1\u0203\2\201\1\u0204\3\201\1\u0205\1\u0206\1\u0207\1\201"+ - "\1\u0208\1\u0209\1\201\1\u020a\1\u020b\1\320\1\u020c\2\201"+ - "\1\u020d\1\201\1\u020e\1\u020f\3\201\230\0\12\u0210\10\0"+ - "\1\u014e\1\u014f\1\u0150\161\0\1\346\24\235\1\u0211\5\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\1\235\1\u0212"+ - "\30\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\14\235"+ - "\1\u0213\15\235\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\1\235\1\u0214\30\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\21\235\1\u0215\10\235\1\117\12\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\24\235\1\u0216\5\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\24\235\1\u0217\5\235\1\117\12\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\1\u0139\31\235\1\117\12\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\24\235\1\u0218\5\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\1\235\1\u0219\30\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\31\235\1\u021a"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\24\235\1\u021b"+ - "\5\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\1\235"+ - "\1\u021c\30\235\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\1\u021d\31\235\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\21\235\1\u021e\10\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\24\235\1\u021f\5\235\1\117\12\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\24\235\1\u0220\5\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\4\235\1\u0221\25\235\1\117\12\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\21\235\1\u0222\10\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\24\235\1\u0223\5\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\32\235\1\117"+ - "\7\235\1\u0224\2\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\1\u0225"+ - "\31\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\u0226\32\u01de"+ - "\1\u0227\12\u01de\11\0\1\u01df\162\0\51\u01df\1\u0228\3\0"+ - "\3\u01df\1\u0150\3\0\1\u01df\156\0\4\u0229\2\0\1\u0229"+ - "\15\0\1\u0229\6\0\12\u0229\1\u022a\173\0\1\u01a9\32\u01aa"+ - "\1\u01a9\12\u01aa\1\u01ab\2\u01a9\1\u01ac\3\u01a9\1\u01ad\5\0"+ - "\2\u01a9\4\0\1\u01a9\150\0\1\u01a9\32\u01aa\1\u01e2\12\u01aa"+ - "\1\u01ab\2\u01a9\1\u01ac\3\u01a9\1\u01ad\5\0\2\u01a9\4\0"+ - "\1\u01a9\150\0\34\u01ab\12\u022b\1\0\2\u01ab\1\u01e5\3\u01ab"+ - "\1\u01ad\5\0\2\u01ab\4\0\1\u01ab\150\0\51\u01e4\1\u022c"+ - "\3\0\3\u01e4\1\u0150\2\0\1\u022d\1\u01e4\156\0\4\u022e"+ - "\2\0\1\u022e\15\0\1\u022e\6\0\12\u022e\175\0\4\u01a9"+ - "\2\0\1\u01a9\15\0\1\u01a9\6\0\12\u01a9\174\0\1\u022f"+ - "\32\u01e7\1\u0230\12\u01e7\1\u0231\10\0\1\u01e4\163\0\4\u0232"+ - "\2\0\1\u0232\15\0\1\u0232\6\0\12\u0232\1\u0233\241\0"+ - "\1\u0234\173\0\46\u01ea\1\u01ec\2\u01ea\1\u01ed\3\u01ea\1\u01ee"+ - "\5\0\2\u01ea\4\0\1\u01ea\150\0\1\u0235\32\u01eb\1\u0236"+ - "\12\u01eb\1\u0237\2\u01ea\1\u01ed\3\u01ea\1\u01ee\1\u014e\1\u014f"+ - "\1\u0150\2\0\2\u01ea\4\0\1\u01ea\150\0\46\u01ec\1\0"+ - "\2\u01ec\1\u0238\3\u01ec\1\u01ee\5\0\2\u01ec\4\0\1\u01ec"+ - "\151\0\4\u0239\2\0\1\u0239\15\0\1\u0239\6\0\12\u0239"+ - "\175\0\32\u023a\1\0\12\u023a\13\0\1\u01ef\13\0\1\46"+ - "\1\0\1\47\1\0\1\50\1\0\1\51\1\0\1\52"+ - "\1\0\1\66\3\0\1\67\5\0\1\70\3\0\1\71"+ - "\11\0\1\57\2\0\1\72\16\0\1\73\2\0\1\74"+ - "\41\0\1\25\2\26\2\0\2\75\1\76\1\0\1\26"+ - "\2\0\1\u0101\32\35\1\117\12\301\1\u0136\1\114\1\127"+ - "\1\114\1\0\2\130\1\115\1\u0126\1\u0127\1\u0128\2\0"+ - "\1\75\1\114\4\0\2\114\2\0\1\46\1\0\1\47"+ - "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\66"+ - "\3\0\1\67\5\0\1\70\3\0\1\71\11\0\1\57"+ - "\2\0\1\72\16\0\1\73\2\0\1\74\41\0\1\25"+ - "\2\26\2\0\2\75\1\76\1\0\1\26\2\0\1\u0101"+ - "\32\35\1\117\2\u01f0\1\301\2\u01f0\2\301\2\u01f0\1\301"+ - "\1\u0136\1\114\1\127\1\114\1\0\2\130\1\115\1\u0126"+ - "\1\u0127\1\u0128\2\0\1\75\1\114\4\0\2\114\150\0"+ - "\4\u023b\2\0\1\u023b\15\0\1\u023b\6\0\12\u023b\1\u01b6"+ - "\174\0\4\u023c\2\0\1\u023c\15\0\1\u023c\6\0\12\u023c"+ - "\1\u023d\174\0\4\u023e\2\0\1\u023e\15\0\1\u023e\6\0"+ - "\1\u023f\2\u0240\1\u023f\4\u0240\1\u0241\1\u0240\14\0\1\u0109"+ - "\160\0\4\u0242\2\0\1\u0242\15\0\1\u0242\6\0\12\u0242"+ - "\1\u01f6\13\0\1\u0109\160\0\4\u023e\2\0\1\u023e\15\0"+ - "\1\u023e\6\0\1\u023f\2\u0240\1\u023f\4\u0240\1\u0241\1\u0240"+ - "\174\0\1\u014a\4\u0242\2\0\1\u0242\15\0\1\u0242\6\0"+ - "\12\u0243\1\u01f6\13\0\1\u0109\157\0\1\u014a\4\u0242\2\0"+ - "\1\u0242\15\0\1\u0242\6\0\12\u0242\1\u01f6\13\0\1\u0109"+ - "\157\0\1\u014a\4\u0242\2\0\1\u0242\15\0\1\u0242\6\0"+ - "\2\u0243\1\u0242\2\u0243\2\u0242\2\u0243\1\u0242\1\u01f6\13\0"+ - "\1\u0109\225\0\1\u017d\13\0\1\u0109\157\0\1\u0244\33\0"+ - "\12\u01fc\174\0\1\u0244\33\0\12\u0245\174\0\1\u0244\33\0"+ - "\1\u01fc\1\u0246\1\u0245\2\u01fc\2\u0245\2\u01fc\1\u0245\174\0"+ - "\1\317\12\201\1\u0151\17\201\1\320\12\201\174\0\1\317"+ - "\3\201\1\u0247\26\201\1\320\12\201\174\0\1\317\32\201"+ - "\1\320\4\201\1\u0248\5\201\174\0\1\317\10\201\1\u0249"+ - "\12\201\1\u024a\6\201\1\320\12\201\174\0\1\317\32\201"+ - "\1\320\2\201\1\u024b\7\201\174\0\1\317\7\201\1\u024c"+ - "\22\201\1\320\12\201\174\0\1\317\7\201\1\u024d\22\201"+ - "\1\320\3\201\1\u024e\6\201\174\0\1\317\7\201\1\u024f"+ - "\22\201\1\320\12\201\174\0\1\317\31\201\1\u0250\1\320"+ - "\12\201\174\0\1\317\1\201\1\u0251\30\201\1\320\12\201"+ - "\174\0\1\317\7\201\1\u0252\1\201\1\u0253\20\201\1\320"+ - "\12\201\174\0\1\317\22\201\1\u0254\7\201\1\320\2\201"+ - "\1\u0255\7\201\174\0\1\317\7\201\1\u0256\22\201\1\320"+ - "\12\201\174\0\1\317\7\201\1\u0257\5\201\1\u0258\14\201"+ - "\1\320\12\201\174\0\1\317\23\201\1\u0259\6\201\1\320"+ - "\12\201\174\0\1\317\32\201\1\320\3\201\1\u025a\6\201"+ - "\174\0\1\317\17\201\1\u025b\12\201\1\320\12\201\174\0"+ - "\1\317\32\201\1\320\1\u025c\11\201\230\0\12\u025d\10\0"+ - "\1\u014e\1\u014f\1\u0150\161\0\1\346\1\u025e\31\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\21\235\1\u025f\10\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\16\235\1\u0260"+ - "\4\235\1\u0261\6\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\32\235\1\117\10\235\1\u0262\1\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\32\235\1\117\10\235\1\u0263\1\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\1\u0264\2\235\1\u0265\26\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\16\235\1\u0266\13\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\11\235\1\u0267"+ - "\13\235\1\u0268\4\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\32\235\1\117\10\235\1\u0269\1\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\23\235\1\u026a\6\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\31\235\1\u026b\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\26\235\1\u026c\3\235\1\117\12\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\11\235\1\u026d\20\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\32\235\1\117\3\235"+ - "\1\u026e\6\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\10\235\1\u026f"+ - "\21\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\3\235"+ - "\1\u0270\26\235\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\21\235\1\u0271\6\235\1\u0272\1\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\12\235\1\u0273\17\235\1\117\12\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\32\235\1\117\1\235\1\u0274"+ - "\10\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\24\235\1\u0275\5\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\31\235\1\u0276"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\150\0\32\u01de\1\0\12\u01de"+ - "\175\0\32\u01de\1\u0227\12\u01de\175\0\4\u0277\2\0\1\u0277"+ - "\15\0\1\u0277\6\0\12\u0277\175\0\4\u0278\2\0\1\u0278"+ - "\15\0\1\u0278\6\0\12\u0278\1\u0279\241\0\1\u027a\173\0"+ - "\34\u01ab\12\u027b\1\0\2\u01ab\1\u01e5\3\u01ab\1\u01ad\1\0"+ - "\1\u01e4\3\0\2\u01ab\4\0\1\u01ab\151\0\4\u027c\2\0"+ - "\1\u027c\15\0\1\u027c\6\0\12\u027c\214\0\1\u027d\222\0"+ - "\4\u01ab\2\0\1\u01ab\15\0\1\u01ab\6\0\12\u01ab\175\0"+ - "\32\u01e7\1\0\12\u01e7\175\0\32\u01e7\1\u0230\12\u01e7\230\0"+ - "\12\u027e\175\0\4\u027f\2\0\1\u027f\15\0\1\u027f\6\0"+ - "\12\u027f\1\u0233\174\0\4\u0280\2\0\1\u0280\15\0\1\u0280"+ - "\6\0\12\u0280\1\u0281\174\0\4\u0282\2\0\1\u0282\15\0"+ - "\1\u0282\6\0\1\u0283\2\u0284\1\u0283\4\u0284\1\u0285\1\u0284"+ - "\14\0\1\u0286\157\0\1\u01ea\32\u01eb\1\u01ea\12\u01eb\1\u01ec"+ - "\2\u01ea\1\u01ed\3\u01ea\1\u01ee\5\0\2\u01ea\4\0\1\u01ea"+ - "\150\0\1\u01ea\32\u01eb\1\u0236\12\u01eb\1\u01ec\2\u01ea\1\u01ed"+ - "\3\u01ea\1\u01ee\5\0\2\u01ea\4\0\1\u01ea\150\0\34\u01ec"+ - "\12\u0287\1\0\2\u01ec\1\u0238\3\u01ec\1\u01ee\5\0\2\u01ec"+ - "\4\0\1\u01ec\151\0\4\u0288\2\0\1\u0288\15\0\1\u0288"+ - "\6\0\12\u0288\175\0\4\u01ea\2\0\1\u01ea\15\0\1\u01ea"+ - "\6\0\12\u01ea\174\0\1\u0289\32\u023a\1\u028a\12\u023a\1\u0136"+ - "\7\0\1\u014e\1\u014f\1\u0150\227\0\1\u01b6\174\0\4\u028b"+ - "\2\0\1\u028b\15\0\1\u028b\6\0\12\u028b\1\u023d\174\0"+ - "\4\u028c\2\0\1\u028c\15\0\1\u028c\6\0\12\u028c\1\u028d"+ - "\174\0\4\u028e\2\0\1\u028e\15\0\1\u028e\6\0\12\u028e"+ - "\1\u028f\13\0\1\u0109\157\0\1\u014a\4\u028e\2\0\1\u028e"+ - "\15\0\1\u028e\6\0\12\u0290\1\u028f\13\0\1\u0109\157\0"+ - "\1\u014a\4\u028e\2\0\1\u028e\15\0\1\u028e\6\0\12\u0291"+ - "\1\u028f\13\0\1\u0109\157\0\1\u014a\4\u028e\2\0\1\u028e"+ - "\15\0\1\u028e\6\0\1\u0290\1\u0292\1\u0291\2\u0290\2\u0291"+ - "\2\u0290\1\u0291\1\u028f\13\0\1\u0109\160\0\4\u0293\2\0"+ - "\1\u0293\15\0\1\u0293\6\0\12\u0293\1\u01f6\13\0\1\u0109"+ - "\157\0\1\u014a\4\u0293\2\0\1\u0293\15\0\1\u0293\6\0"+ - "\12\u0293\1\u01f6\13\0\1\u0109\213\0\1\u0294\2\u0295\1\u0294"+ - "\4\u0295\1\u0296\1\u0295\174\0\1\u0244\241\0\1\u0244\33\0"+ - "\2\u0245\1\0\2\u0245\2\0\2\u0245\175\0\1\317\24\201"+ - "\1\u0297\5\201\1\320\12\201\174\0\1\317\1\201\1\u0298"+ - "\30\201\1\320\12\201\174\0\1\317\14\201\1\u0299\15\201"+ - "\1\320\12\201\174\0\1\317\1\201\1\u029a\30\201\1\320"+ - "\12\201\174\0\1\317\21\201\1\u029b\10\201\1\320\12\201"+ - "\174\0\1\317\24\201\1\u029c\5\201\1\320\12\201\174\0"+ - "\1\317\24\201\1\u029d\5\201\1\320\12\201\174\0\1\317"+ - "\1\u018c\31\201\1\320\12\201\174\0\1\317\24\201\1\u029e"+ - "\5\201\1\320\12\201\174\0\1\317\1\201\1\u029f\30\201"+ - "\1\320\12\201\174\0\1\317\31\201\1\u02a0\1\320\12\201"+ - "\174\0\1\317\24\201\1\u02a1\5\201\1\320\12\201\174\0"+ - "\1\317\1\201\1\u02a2\30\201\1\320\12\201\174\0\1\317"+ - "\1\u02a3\31\201\1\320\12\201\174\0\1\317\21\201\1\u02a4"+ - "\10\201\1\320\12\201\174\0\1\317\24\201\1\u02a5\5\201"+ - "\1\320\12\201\174\0\1\317\24\201\1\u02a6\5\201\1\320"+ - "\12\201\174\0\1\317\4\201\1\u02a7\25\201\1\320\12\201"+ - "\174\0\1\317\21\201\1\u02a8\10\201\1\320\12\201\174\0"+ - "\1\317\24\201\1\u02a9\5\201\1\320\12\201\174\0\1\317"+ - "\32\201\1\320\7\201\1\u02aa\2\201\174\0\1\317\1\u02ab"+ - "\31\201\1\320\12\201\252\0\1\u014e\1\u014f\1\u0150\161\0"+ - "\1\346\32\235\1\117\1\u02ac\11\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\32\235\1\117\7\235\1\u02ad\2\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\32\235\1\117\6\235\1\u013d\3\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\32\235\1\117\5\235\1\u013d\4\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\1\235\1\u02ae\30\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\32\235\1\117\1\235"+ - "\1\u02af\10\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\1\u02b0\27\235"+ - "\1\u02b1\1\235\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\4\235\1\u02b2\25\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\32\235\1\117\1\u02b3\11\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\32\235\1\117\2\235\1\260\7\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\32\235\1\117\3\235\1\u02b4\6\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\1\u02b5\31\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\1\u02ad\31\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\32\235\1\117\2\235\1\u02b6\7\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\32\235\1\117\2\235\1\u02b7"+ - "\7\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\15\235\1\u02b8\14\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\32\235\1\117"+ - "\5\235\1\u02b9\4\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\32\235"+ - "\1\117\10\235\1\u02ba\1\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\1\235\1\u02bb\30\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\32\235\1\117\3\235\1\u02bc\6\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\32\235\1\117\1\235\1\u02bd\10\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\32\235\1\117\1\235\1\u02be\10\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\24\235\1\u02bf\5\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\32\235\1\117\3\235"+ - "\1\u02c0\6\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\25\235\1\u02c1"+ - "\4\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\150\0\4\u01df\2\0"+ - "\1\u01df\15\0\1\u01df\6\0\12\u01df\175\0\4\u02c2\2\0"+ - "\1\u02c2\15\0\1\u02c2\6\0\12\u02c2\1\u0279\174\0\4\u02c3"+ - "\2\0\1\u02c3\15\0\1\u02c3\6\0\12\u02c3\1\u02c4\174\0"+ - "\4\u02c5\2\0\1\u02c5\15\0\1\u02c5\6\0\1\u02c6\2\u02c7"+ - "\1\u02c6\4\u02c7\1\u02c8\1\u02c7\14\0\1\u02c9\157\0\34\u01ab"+ - "\12\u02ca\1\0\2\u01ab\1\u01e5\3\u01ab\1\u01ad\1\0\1\u01e4"+ - "\3\0\2\u01ab\4\0\1\u01ab\151\0\4\u01e4\2\0\1\u01e4"+ - "\15\0\1\u01e4\6\0\12\u01e4\225\0\1\u02cb\244\0\12\u02cc"+ - "\11\0\1\u01e4\163\0\4\u02cd\2\0\1\u02cd\15\0\1\u02cd"+ - "\6\0\12\u02cd\1\u0233\174\0\4\u02ce\2\0\1\u02ce\15\0"+ - "\1\u02ce\6\0\12\u02ce\1\u02cf\174\0\4\u02d0\2\0\1\u02d0"+ - "\15\0\1\u02d0\6\0\1\u02d1\2\u02d2\1\u02d1\4\u02d2\1\u02d3"+ - "\1\u02d2\14\0\1\u0286\160\0\4\u02d4\2\0\1\u02d4\15\0"+ - "\1\u02d4\6\0\12\u02d4\1\u02d5\13\0\1\u0286\157\0\1\u02d6"+ - "\4\u02d4\2\0\1\u02d4\15\0\1\u02d4\6\0\12\u02d7\1\u02d5"+ - "\13\0\1\u0286\157\0\1\u02d6\4\u02d4\2\0\1\u02d4\15\0"+ - "\1\u02d4\6\0\12\u02d8\1\u02d5\13\0\1\u0286\157\0\1\u02d6"+ - "\4\u02d4\2\0\1\u02d4\15\0\1\u02d4\6\0\1\u02d7\1\u02d9"+ - "\1\u02d8\2\u02d7\2\u02d8\2\u02d7\1\u02d8\1\u02d5\13\0\1\u0286"+ - "\225\0\1\u0231\10\0\1\u01e4\162\0\34\u01ec\12\u02da\1\0"+ - "\2\u01ec\1\u0238\3\u01ec\1\u01ee\1\u014e\1\u014f\1\u0150\2\0"+ - "\2\u01ec\4\0\1\u01ec\151\0\4\u01ec\2\0\1\u01ec\15\0"+ - "\1\u01ec\6\0\12\u01ec\175\0\32\u023a\1\0\12\u023a\175\0"+ - "\32\u023a\1\u028a\12\u023a\175\0\4\u02db\2\0\1\u02db\15\0"+ - "\1\u02db\6\0\12\u02db\1\u023d\174\0\4\u02dc\2\0\1\u02dc"+ - "\15\0\1\u02dc\6\0\12\u02dc\1\u02dd\174\0\4\u02de\2\0"+ - "\1\u02de\15\0\1\u02de\6\0\1\u02df\2\u02e0\1\u02df\4\u02e0"+ - "\1\u02e1\1\u02e0\14\0\1\u0109\160\0\4\u02e2\2\0\1\u02e2"+ - "\15\0\1\u02e2\6\0\12\u02e2\1\u028f\13\0\1\u0109\160\0"+ - "\4\u02de\2\0\1\u02de\15\0\1\u02de\6\0\1\u02df\2\u02e0"+ - "\1\u02df\4\u02e0\1\u02e1\1\u02e0\174\0\1\u014a\4\u02e2\2\0"+ - "\1\u02e2\15\0\1\u02e2\6\0\12\u02e3\1\u028f\13\0\1\u0109"+ - "\157\0\1\u014a\4\u02e2\2\0\1\u02e2\15\0\1\u02e2\6\0"+ - "\12\u02e2\1\u028f\13\0\1\u0109\157\0\1\u014a\4\u02e2\2\0"+ - "\1\u02e2\15\0\1\u02e2\6\0\2\u02e3\1\u02e2\2\u02e3\2\u02e2"+ - "\2\u02e3\1\u02e2\1\u028f\13\0\1\u0109\225\0\1\u01f6\13\0"+ - "\1\u0109\213\0\12\u0295\14\0\1\u0109\213\0\12\u02e4\14\0"+ - "\1\u0109\213\0\1\u0295\1\u02e5\1\u02e4\2\u0295\2\u02e4\2\u0295"+ - "\1\u02e4\14\0\1\u0109\157\0\1\317\1\u02e6\31\201\1\320"+ - "\12\201\174\0\1\317\21\201\1\u02e7\10\201\1\320\12\201"+ - "\174\0\1\317\16\201\1\u02e8\4\201\1\u02e9\6\201\1\320"+ - "\12\201\174\0\1\317\32\201\1\320\10\201\1\u02ea\1\201"+ - "\174\0\1\317\32\201\1\320\10\201\1\u02eb\1\201\174\0"+ - "\1\317\1\u02ec\2\201\1\u02ed\26\201\1\320\12\201\174\0"+ - "\1\317\16\201\1\u02ee\13\201\1\320\12\201\174\0\1\317"+ - "\11\201\1\u02ef\13\201\1\u02f0\4\201\1\320\12\201\174\0"+ - "\1\317\32\201\1\320\10\201\1\u02f1\1\201\174\0\1\317"+ - "\23\201\1\u02f2\6\201\1\320\12\201\174\0\1\317\31\201"+ - "\1\u02f3\1\320\12\201\174\0\1\317\26\201\1\u02f4\3\201"+ - "\1\320\12\201\174\0\1\317\11\201\1\u02f5\20\201\1\320"+ - "\12\201\174\0\1\317\32\201\1\320\3\201\1\u02f6\6\201"+ - "\174\0\1\317\10\201\1\u02f7\21\201\1\320\12\201\174\0"+ - "\1\317\3\201\1\u02f8\26\201\1\320\12\201\174\0\1\317"+ - "\21\201\1\u02f9\6\201\1\u02fa\1\201\1\320\12\201\174\0"+ - "\1\317\12\201\1\u02fb\17\201\1\320\12\201\174\0\1\317"+ - "\32\201\1\320\1\201\1\u02fc\10\201\174\0\1\317\24\201"+ - "\1\u02fd\5\201\1\320\12\201\174\0\1\317\31\201\1\u02fe"+ - "\1\320\12\201\174\0\1\346\1\u02ff\31\235\1\117\12\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\25\235\1\353\4\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\32\235\1\117\5\235"+ - "\1\u0300\4\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\32\235\1\117"+ - "\3\235\1\u02ff\6\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\12\235"+ - "\1\u0301\17\235\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\25\235\1\u0302\4\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\15\235\1\u0303\14\235\1\117\12\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\2\235\1\u02ad\27\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\1\235\1\353\30\235\1\117\12\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\11\235\1\u0304\20\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\1\u0305\31\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\1\u0306\31\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\2\235\1\u0307\27\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\32\235\1\117"+ - "\4\235\1\362\5\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\1\u0308"+ - "\31\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\25\235"+ - "\1\u0309\4\235\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\32\235\1\117\4\235\1\u02ff\5\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\32\235\1\117\11\235\1\u02ff\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\32\235\1\117\2\235\1\u02ff\7\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\16\235\1\u030a\13\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\32\235\1\117\3\235\1\u030b\6\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\24\235\1\u030c\5\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\150\0\4\u030d\2\0\1\u030d\15\0"+ - "\1\u030d\6\0\12\u030d\1\u0279\174\0\4\u030e\2\0\1\u030e"+ - "\15\0\1\u030e\6\0\12\u030e\1\u030f\174\0\4\u0310\2\0"+ - "\1\u0310\15\0\1\u0310\6\0\1\u0311\2\u0312\1\u0311\4\u0312"+ - "\1\u0313\1\u0312\14\0\1\u02c9\160\0\4\u0314\2\0\1\u0314"+ - "\15\0\1\u0314\6\0\12\u0314\1\u0315\13\0\1\u02c9\157\0"+ - "\1\u0316\4\u0314\2\0\1\u0314\15\0\1\u0314\6\0\12\u0317"+ - "\1\u0315\13\0\1\u02c9\157\0\1\u0316\4\u0314\2\0\1\u0314"+ - "\15\0\1\u0314\6\0\12\u0318\1\u0315\13\0\1\u02c9\157\0"+ - "\1\u0316\4\u0314\2\0\1\u0314\15\0\1\u0314\6\0\1\u0317"+ - "\1\u0319\1\u0318\2\u0317\2\u0318\2\u0317\1\u0318\1\u0315\13\0"+ - "\1\u02c9\236\0\1\u01df\162\0\34\u01ab\12\u031a\1\0\2\u01ab"+ - "\1\u01e5\3\u01ab\1\u01ad\1\0\1\u01e4\3\0\2\u01ab\4\0"+ - "\1\u01ab\166\0\1\u031b\257\0\12\u031c\11\0\1\u01e4\230\0"+ - "\1\u0233\174\0\4\u031d\2\0\1\u031d\15\0\1\u031d\6\0"+ - "\12\u031d\1\u02cf\174\0\4\u031e\2\0\1\u031e\15\0\1\u031e"+ - "\6\0\12\u031e\1\u031f\174\0\4\u0320\2\0\1\u0320\15\0"+ - "\1\u0320\6\0\12\u0320\1\u0321\13\0\1\u0286\157\0\1\u02d6"+ - "\4\u0320\2\0\1\u0320\15\0\1\u0320\6\0\12\u0322\1\u0321"+ - "\13\0\1\u0286\157\0\1\u02d6\4\u0320\2\0\1\u0320\15\0"+ - "\1\u0320\6\0\12\u0323\1\u0321\13\0\1\u0286\157\0\1\u02d6"+ - "\4\u0320\2\0\1\u0320\15\0\1\u0320\6\0\1\u0322\1\u0324"+ - "\1\u0323\2\u0322\2\u0323\2\u0322\1\u0323\1\u0321\13\0\1\u0286"+ - "\160\0\4\u0325\2\0\1\u0325\15\0\1\u0325\6\0\12\u0325"+ - "\1\u02d5\13\0\1\u0286\160\0\4\u02d0\2\0\1\u02d0\15\0"+ - "\1\u02d0\6\0\1\u02d1\2\u02d2\1\u02d1\4\u02d2\1\u02d3\1\u02d2"+ - "\230\0\1\u0326\2\u0327\1\u0326\4\u0327\1\u0328\1\u0327\174\0"+ - "\1\u02d6\4\u0325\2\0\1\u0325\15\0\1\u0325\6\0\12\u0329"+ - "\1\u02d5\13\0\1\u0286\157\0\1\u02d6\4\u0325\2\0\1\u0325"+ - "\15\0\1\u0325\6\0\12\u0325\1\u02d5\13\0\1\u0286\157\0"+ - "\1\u02d6\4\u0325\2\0\1\u0325\15\0\1\u0325\6\0\2\u0329"+ - "\1\u0325\2\u0329\2\u0325\2\u0329\1\u0325\1\u02d5\13\0\1\u0286"+ - "\157\0\34\u01ec\12\u032a\1\0\2\u01ec\1\u0238\3\u01ec\1\u01ee"+ - "\1\u014e\1\u014f\1\u0150\2\0\2\u01ec\4\0\1\u01ec\216\0"+ - "\1\u023d\174\0\4\u032b\2\0\1\u032b\15\0\1\u032b\6\0"+ - "\12\u032b\1\u02dd\174\0\4\u032c\2\0\1\u032c\15\0\1\u032c"+ - "\6\0\12\u032c\1\u032d\174\0\4\u032e\2\0\1\u032e\15\0"+ - "\1\u032e\6\0\12\u032e\1\u032f\13\0\1\u0109\157\0\1\u014a"+ - "\4\u032e\2\0\1\u032e\15\0\1\u032e\6\0\12\u0330\1\u032f"+ - "\13\0\1\u0109\157\0\1\u014a\4\u032e\2\0\1\u032e\15\0"+ - "\1\u032e\6\0\12\u0331\1\u032f\13\0\1\u0109\157\0\1\u014a"+ - "\4\u032e\2\0\1\u032e\15\0\1\u032e\6\0\1\u0330\1\u0332"+ - "\1\u0331\2\u0330\2\u0331\2\u0330\1\u0331\1\u032f\13\0\1\u0109"+ - "\160\0\4\u0333\2\0\1\u0333\15\0\1\u0333\6\0\12\u0333"+ - "\1\u028f\13\0\1\u0109\157\0\1\u014a\4\u0333\2\0\1\u0333"+ - "\15\0\1\u0333\6\0\12\u0333\1\u028f\13\0\1\u0109\241\0"+ - "\1\u0109\213\0\2\u02e4\1\0\2\u02e4\2\0\2\u02e4\15\0"+ - "\1\u0109\157\0\1\317\32\201\1\320\1\u0334\11\201\174\0"+ - "\1\317\32\201\1\320\7\201\1\u0335\2\201\174\0\1\317"+ - "\32\201\1\320\6\201\1\u0191\3\201\174\0\1\317\32\201"+ - "\1\320\5\201\1\u0191\4\201\174\0\1\317\1\201\1\u0336"+ - "\30\201\1\320\12\201\174\0\1\317\32\201\1\320\1\201"+ - "\1\u0337\10\201\174\0\1\317\1\u0338\27\201\1\u0339\1\201"+ - "\1\320\12\201\174\0\1\317\4\201\1\u033a\25\201\1\320"+ - "\12\201\174\0\1\317\32\201\1\320\1\u033b\11\201\174\0"+ - "\1\317\32\201\1\320\2\201\1\u0116\7\201\174\0\1\317"+ - "\32\201\1\320\3\201\1\u033c\6\201\174\0\1\317\1\u033d"+ - "\31\201\1\320\12\201\174\0\1\317\1\u0335\31\201\1\320"+ - "\12\201\174\0\1\317\32\201\1\320\2\201\1\u033e\7\201"+ - "\174\0\1\317\32\201\1\320\2\201\1\u033f\7\201\174\0"+ - "\1\317\15\201\1\u0340\14\201\1\320\12\201\174\0\1\317"+ - "\32\201\1\320\5\201\1\u0341\4\201\174\0\1\317\32\201"+ - "\1\320\10\201\1\u0342\1\201\174\0\1\317\1\201\1\u0343"+ - "\30\201\1\320\12\201\174\0\1\317\32\201\1\320\3\201"+ - "\1\u0344\6\201\174\0\1\317\32\201\1\320\1\201\1\u0345"+ - "\10\201\174\0\1\317\32\201\1\320\1\201\1\u0346\10\201"+ - "\174\0\1\317\24\201\1\u0347\5\201\1\320\12\201\174\0"+ - "\1\317\32\201\1\320\3\201\1\u0348\6\201\174\0\1\317"+ - "\25\201\1\u0349\4\201\1\320\12\201\174\0\1\346\2\235"+ - "\1\353\27\235\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\3\235\1\u034a\26\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\32\235\1\117\11\235\1\u034b\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\32\235\1\117\11\235\1\u034c\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\32\235\1\117\7\235\1\u034d\2\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\32\235\1\117\4\235\1\u034e\5\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\26\235\1\u034f\3\235\1\117\12\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\30\235\1\u0350\1\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\11\235\1\u0138\20\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\12\235\1\u0351"+ - "\17\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\17\235"+ - "\1\363\12\235\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\32\235\1\117\4\235\1\u0352\5\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\30\235\1\u0353\1\235\1\117\12\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\30\235\1\u0354\1\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\215\0\1\u0279\174\0\4\u0355\2\0\1\u0355\15\0"+ - "\1\u0355\6\0\12\u0355\1\u030f\174\0\4\u0356\2\0\1\u0356"+ - "\15\0\1\u0356\6\0\12\u0356\1\u0357\174\0\4\u0358\2\0"+ - "\1\u0358\15\0\1\u0358\6\0\12\u0358\1\u0359\13\0\1\u02c9"+ - "\157\0\1\u0316\4\u0358\2\0\1\u0358\15\0\1\u0358\6\0"+ - "\12\u035a\1\u0359\13\0\1\u02c9\157\0\1\u0316\4\u0358\2\0"+ - "\1\u0358\15\0\1\u0358\6\0\12\u035b\1\u0359\13\0\1\u02c9"+ - "\157\0\1\u0316\4\u0358\2\0\1\u0358\15\0\1\u0358\6\0"+ - "\1\u035a\1\u035c\1\u035b\2\u035a\2\u035b\2\u035a\1\u035b\1\u0359"+ - "\13\0\1\u02c9\160\0\4\u035d\2\0\1\u035d\15\0\1\u035d"+ - "\6\0\12\u035d\1\u0315\13\0\1\u02c9\160\0\4\u0310\2\0"+ - "\1\u0310\15\0\1\u0310\6\0\1\u0311\2\u0312\1\u0311\4\u0312"+ - "\1\u0313\1\u0312\230\0\1\u035e\2\u035f\1\u035e\4\u035f\1\u0360"+ - "\1\u035f\174\0\1\u0316\4\u035d\2\0\1\u035d\15\0\1\u035d"+ - "\6\0\12\u0361\1\u0315\13\0\1\u02c9\157\0\1\u0316\4\u035d"+ - "\2\0\1\u035d\15\0\1\u035d\6\0\12\u035d\1\u0315\13\0"+ - "\1\u02c9\157\0\1\u0316\4\u035d\2\0\1\u035d\15\0\1\u035d"+ - "\6\0\2\u0361\1\u035d\2\u0361\2\u035d\2\u0361\1\u035d\1\u0315"+ - "\13\0\1\u02c9\157\0\34\u01ab\12\u0362\1\0\2\u01ab\1\u01e5"+ - "\3\u01ab\1\u01ad\1\0\1\u01e4\3\0\2\u01ab\4\0\1\u01ab"+ - "\154\0\1\u0363\271\0\12\u0364\11\0\1\u01e4\163\0\4\u0365"+ - "\2\0\1\u0365\15\0\1\u0365\6\0\12\u0365\1\u02cf\174\0"+ - "\4\u0366\2\0\1\u0366\15\0\1\u0366\6\0\12\u0366\1\u0367"+ - "\174\0\4\u0368\2\0\1\u0368\15\0\1\u0368\6\0\1\u0369"+ - "\2\u036a\1\u0369\4\u036a\1\u036b\1\u036a\14\0\1\u0286\160\0"+ - "\4\u036c\2\0\1\u036c\15\0\1\u036c\6\0\12\u036c\1\u0321"+ - "\13\0\1\u0286\160\0\4\u0368\2\0\1\u0368\15\0\1\u0368"+ - "\6\0\1\u0369\2\u036a\1\u0369\4\u036a\1\u036b\1\u036a\174\0"+ - "\1\u02d6\4\u036c\2\0\1\u036c\15\0\1\u036c\6\0\12\u036d"+ - "\1\u0321\13\0\1\u0286\157\0\1\u02d6\4\u036c\2\0\1\u036c"+ - "\15\0\1\u036c\6\0\12\u036c\1\u0321\13\0\1\u0286\157\0"+ - "\1\u02d6\4\u036c\2\0\1\u036c\15\0\1\u036c\6\0\2\u036d"+ - "\1\u036c\2\u036d\2\u036c\2\u036d\1\u036c\1\u0321\13\0\1\u0286"+ - "\160\0\4\u036e\2\0\1\u036e\15\0\1\u036e\6\0\12\u036e"+ - "\1\u02d5\13\0\1\u0286\157\0\1\u036f\33\0\12\u0327\174\0"+ - "\1\u036f\33\0\12\u0370\174\0\1\u036f\33\0\1\u0327\1\u0371"+ - "\1\u0370\2\u0327\2\u0370\2\u0327\1\u0370\174\0\1\u02d6\4\u036e"+ - "\2\0\1\u036e\15\0\1\u036e\6\0\12\u036e\1\u02d5\13\0"+ - "\1\u0286\157\0\34\u01ec\12\u0372\1\0\2\u01ec\1\u0238\3\u01ec"+ - "\1\u01ee\1\u014e\1\u014f\1\u0150\2\0\2\u01ec\4\0\1\u01ec"+ - "\151\0\4\u0373\2\0\1\u0373\15\0\1\u0373\6\0\12\u0373"+ - "\1\u02dd\174\0\4\u0374\2\0\1\u0374\15\0\1\u0374\6\0"+ - "\12\u0374\1\u0375\174\0\4\u0376\2\0\1\u0376\15\0\1\u0376"+ - "\6\0\1\u0377\2\u0378\1\u0377\4\u0378\1\u0379\1\u0378\14\0"+ - "\1\u0109\160\0\4\u037a\2\0\1\u037a\15\0\1\u037a\6\0"+ - "\12\u037a\1\u032f\13\0\1\u0109\160\0\4\u0376\2\0\1\u0376"+ - "\15\0\1\u0376\6\0\1\u0377\2\u0378\1\u0377\4\u0378\1\u0379"+ - "\1\u0378\174\0\1\u014a\4\u037a\2\0\1\u037a\15\0\1\u037a"+ - "\6\0\12\u037b\1\u032f\13\0\1\u0109\157\0\1\u014a\4\u037a"+ - "\2\0\1\u037a\15\0\1\u037a\6\0\12\u037a\1\u032f\13\0"+ - "\1\u0109\157\0\1\u014a\4\u037a\2\0\1\u037a\15\0\1\u037a"+ - "\6\0\2\u037b\1\u037a\2\u037b\2\u037a\2\u037b\1\u037a\1\u032f"+ - "\13\0\1\u0109\225\0\1\u028f\13\0\1\u0109\157\0\1\317"+ - "\1\u037c\31\201\1\320\12\201\174\0\1\317\25\201\1\u0151"+ - "\4\201\1\320\12\201\174\0\1\317\32\201\1\320\5\201"+ - "\1\u037d\4\201\174\0\1\317\32\201\1\320\3\201\1\u037c"+ - "\6\201\174\0\1\317\12\201\1\u037e\17\201\1\320\12\201"+ - "\174\0\1\317\25\201\1\u037f\4\201\1\320\12\201\174\0"+ - "\1\317\15\201\1\u0380\14\201\1\320\12\201\174\0\1\317"+ - "\2\201\1\u0335\27\201\1\320\12\201\174\0\1\317\1\201"+ - "\1\u0151\30\201\1\320\12\201\174\0\1\317\11\201\1\u0381"+ - "\20\201\1\320\12\201\174\0\1\317\1\u0382\31\201\1\320"+ - "\12\201\174\0\1\317\1\u0383\31\201\1\320\12\201\174\0"+ - "\1\317\2\201\1\u0384\27\201\1\320\12\201\174\0\1\317"+ - "\32\201\1\320\4\201\1\u0158\5\201\174\0\1\317\1\u0385"+ - "\31\201\1\320\12\201\174\0\1\317\25\201\1\u0386\4\201"+ - "\1\320\12\201\174\0\1\317\32\201\1\320\4\201\1\u037c"+ - "\5\201\174\0\1\317\32\201\1\320\11\201\1\u037c\174\0"+ - "\1\317\32\201\1\320\2\201\1\u037c\7\201\174\0\1\317"+ - "\16\201\1\u0387\13\201\1\320\12\201\174\0\1\317\32\201"+ - "\1\320\3\201\1\u0388\6\201\174\0\1\317\24\201\1\u0389"+ - "\5\201\1\320\12\201\174\0\1\346\32\235\1\117\10\235"+ - "\1\u02b4\1\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\1\u038a\31\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\7\235\1\u038b"+ - "\22\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\1\u038c"+ - "\31\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\25\235"+ - "\1\u038d\4\235\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\32\235\1\117\11\235\1\u038e\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\1\u038f\31\235\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\12\235\1\u0390\17\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\1\u0391\31\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\10\235\1\u0392\21\235\1\117\12\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\31\235\1\u0393\1\117\12\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\150\0\4\u0394\2\0\1\u0394\15\0\1\u0394\6\0\12\u0394"+ - "\1\u030f\174\0\4\u0395\2\0\1\u0395\15\0\1\u0395\6\0"+ - "\12\u0395\1\u0396\174\0\4\u0397\2\0\1\u0397\15\0\1\u0397"+ - "\6\0\1\u0398\2\u0399\1\u0398\4\u0399\1\u039a\1\u0399\14\0"+ - "\1\u02c9\160\0\4\u039b\2\0\1\u039b\15\0\1\u039b\6\0"+ - "\12\u039b\1\u0359\13\0\1\u02c9\160\0\4\u0397\2\0\1\u0397"+ - "\15\0\1\u0397\6\0\1\u0398\2\u0399\1\u0398\4\u0399\1\u039a"+ - "\1\u0399\174\0\1\u0316\4\u039b\2\0\1\u039b\15\0\1\u039b"+ - "\6\0\12\u039c\1\u0359\13\0\1\u02c9\157\0\1\u0316\4\u039b"+ - "\2\0\1\u039b\15\0\1\u039b\6\0\12\u039b\1\u0359\13\0"+ - "\1\u02c9\157\0\1\u0316\4\u039b\2\0\1\u039b\15\0\1\u039b"+ - "\6\0\2\u039c\1\u039b\2\u039c\2\u039b\2\u039c\1\u039b\1\u0359"+ - "\13\0\1\u02c9\160\0\4\u039d\2\0\1\u039d\15\0\1\u039d"+ - "\6\0\12\u039d\1\u0315\13\0\1\u02c9\157\0\1\u039e\33\0"+ - "\12\u035f\174\0\1\u039e\33\0\12\u039f\174\0\1\u039e\33\0"+ - "\1\u035f\1\u03a0\1\u039f\2\u035f\2\u039f\2\u035f\1\u039f\174\0"+ - "\1\u0316\4\u039d\2\0\1\u039d\15\0\1\u039d\6\0\12\u039d"+ - "\1\u0315\13\0\1\u02c9\157\0\46\u01ab\1\0\2\u01ab\1\u01e5"+ - "\3\u01ab\1\u01ad\1\0\1\u01e4\3\0\2\u01ab\4\0\1\u01ab"+ - "\234\0\1\u03a1\211\0\12\u03a2\11\0\1\u01e4\230\0\1\u02cf"+ - "\174\0\4\u03a3\2\0\1\u03a3\15\0\1\u03a3\6\0\12\u03a3"+ - "\1\u0367\174\0\4\u03a4\2\0\1\u03a4\15\0\1\u03a4\6\0"+ - "\12\u03a4\1\u03a5\174\0\4\u03a6\2\0\1\u03a6\15\0\1\u03a6"+ - "\6\0\12\u03a6\1\u03a7\13\0\1\u0286\157\0\1\u02d6\4\u03a6"+ - "\2\0\1\u03a6\15\0\1\u03a6\6\0\12\u03a8\1\u03a7\13\0"+ - "\1\u0286\157\0\1\u02d6\4\u03a6\2\0\1\u03a6\15\0\1\u03a6"+ - "\6\0\12\u03a9\1\u03a7\13\0\1\u0286\157\0\1\u02d6\4\u03a6"+ - "\2\0\1\u03a6\15\0\1\u03a6\6\0\1\u03a8\1\u03aa\1\u03a9"+ - "\2\u03a8\2\u03a9\2\u03a8\1\u03a9\1\u03a7\13\0\1\u0286\160\0"+ - "\4\u03ab\2\0\1\u03ab\15\0\1\u03ab\6\0\12\u03ab\1\u0321"+ - "\13\0\1\u0286\157\0\1\u02d6\4\u03ab\2\0\1\u03ab\15\0"+ - "\1\u03ab\6\0\12\u03ab\1\u0321\13\0\1\u0286\225\0\1\u02d5"+ - "\13\0\1\u0286\213\0\1\u03ac\2\u03ad\1\u03ac\4\u03ad\1\u03ae"+ - "\1\u03ad\174\0\1\u036f\241\0\1\u036f\33\0\2\u0370\1\0"+ - "\2\u0370\2\0\2\u0370\175\0\34\u01ec\12\u03af\1\0\2\u01ec"+ - "\1\u0238\3\u01ec\1\u01ee\1\u014e\1\u014f\1\u0150\2\0\2\u01ec"+ - "\4\0\1\u01ec\216\0\1\u02dd\174\0\4\u03b0\2\0\1\u03b0"+ - "\15\0\1\u03b0\6\0\12\u03b0\1\u0375\174\0\4\u03b1\2\0"+ - "\1\u03b1\15\0\1\u03b1\6\0\1\u03b2\2\u03b3\1\u03b2\4\u03b3"+ - "\1\u03b4\1\u03b3\1\u03b5\174\0\4\u03b6\2\0\1\u03b6\15\0"+ - "\1\u03b6\6\0\12\u03b6\1\u03b7\13\0\1\u0109\157\0\1\u014a"+ - "\4\u03b6\2\0\1\u03b6\15\0\1\u03b6\6\0\12\u03b8\1\u03b7"+ - "\13\0\1\u0109\157\0\1\u014a\4\u03b6\2\0\1\u03b6\15\0"+ - "\1\u03b6\6\0\12\u03b9\1\u03b7\13\0\1\u0109\157\0\1\u014a"+ - "\4\u03b6\2\0\1\u03b6\15\0\1\u03b6\6\0\1\u03b8\1\u03ba"+ - "\1\u03b9\2\u03b8\2\u03b9\2\u03b8\1\u03b9\1\u03b7\13\0\1\u0109"+ - "\160\0\4\u03bb\2\0\1\u03bb\15\0\1\u03bb\6\0\12\u03bb"+ - "\1\u032f\13\0\1\u0109\157\0\1\u014a\4\u03bb\2\0\1\u03bb"+ - "\15\0\1\u03bb\6\0\12\u03bb\1\u032f\13\0\1\u0109\157\0"+ - "\1\317\2\201\1\u0151\27\201\1\320\12\201\174\0\1\317"+ - "\3\201\1\u03bc\26\201\1\320\12\201\174\0\1\317\32\201"+ - "\1\320\11\201\1\u03bd\174\0\1\317\32\201\1\320\11\201"+ - "\1\u03be\174\0\1\317\32\201\1\320\7\201\1\u03bf\2\201"+ - "\174\0\1\317\32\201\1\320\4\201\1\u03c0\5\201\174\0"+ - "\1\317\26\201\1\u03c1\3\201\1\320\12\201\174\0\1\317"+ - "\30\201\1\u03c2\1\201\1\320\12\201\174\0\1\317\11\201"+ - "\1\u018b\20\201\1\320\12\201\174\0\1\317\12\201\1\u03c3"+ - "\17\201\1\320\12\201\174\0\1\317\17\201\1\u0159\12\201"+ - "\1\320\12\201\174\0\1\317\32\201\1\320\4\201\1\u03c4"+ - "\5\201\174\0\1\317\30\201\1\u03c5\1\201\1\320\12\201"+ - "\174\0\1\317\30\201\1\u03c6\1\201\1\320\12\201\174\0"+ - "\1\346\32\235\1\117\6\235\1\u02ad\3\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\15\235\1\260\14\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\147\0\1\346\32\235\1\117\1\235\1\u03c7\10\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\30\235\1\u03c8\1\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\6\235\1\u03c9\23\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\32\235\1\117"+ - "\5\235\1\u03ca\4\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\22\235"+ - "\1\353\7\235\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\32\235\1\117\5\235\1\u03cb\4\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\32\235\1\117\1\235\1\260\10\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\13\235\1\u03cc\16\235\1\117\12\235\1\0"+ - "\3\114\1\0\2\114\1\115\3\114\3\0\1\114\4\0"+ - "\2\114\215\0\1\u030f\174\0\4\u03cd\2\0\1\u03cd\15\0"+ - "\1\u03cd\6\0\12\u03cd\1\u0396\174\0\4\u03ce\2\0\1\u03ce"+ - "\15\0\1\u03ce\6\0\12\u03ce\1\u03cf\174\0\4\u03d0\2\0"+ - "\1\u03d0\15\0\1\u03d0\6\0\12\u03d0\1\u03d1\13\0\1\u02c9"+ - "\157\0\1\u0316\4\u03d0\2\0\1\u03d0\15\0\1\u03d0\6\0"+ - "\12\u03d2\1\u03d1\13\0\1\u02c9\157\0\1\u0316\4\u03d0\2\0"+ - "\1\u03d0\15\0\1\u03d0\6\0\12\u03d3\1\u03d1\13\0\1\u02c9"+ - "\157\0\1\u0316\4\u03d0\2\0\1\u03d0\15\0\1\u03d0\6\0"+ - "\1\u03d2\1\u03d4\1\u03d3\2\u03d2\2\u03d3\2\u03d2\1\u03d3\1\u03d1"+ - "\13\0\1\u02c9\160\0\4\u03d5\2\0\1\u03d5\15\0\1\u03d5"+ - "\6\0\12\u03d5\1\u0359\13\0\1\u02c9\157\0\1\u0316\4\u03d5"+ - "\2\0\1\u03d5\15\0\1\u03d5\6\0\12\u03d5\1\u0359\13\0"+ - "\1\u02c9\225\0\1\u0315\13\0\1\u02c9\213\0\1\u03d6\2\u03d7"+ - "\1\u03d6\4\u03d7\1\u03d8\1\u03d7\174\0\1\u039e\241\0\1\u039e"+ - "\33\0\2\u039f\1\0\2\u039f\2\0\2\u039f\176\0\1\u03d9"+ - "\1\0\1\u03d9\5\0\1\u03d9\307\0\1\u01e4\163\0\4\u03da"+ - "\2\0\1\u03da\15\0\1\u03da\6\0\12\u03da\1\u0367\174\0"+ - "\4\u03db\2\0\1\u03db\15\0\1\u03db\6\0\12\u03db\1\u03dc"+ - "\174\0\4\u03dd\2\0\1\u03dd\15\0\1\u03dd\6\0\1\u03de"+ - "\2\u03df\1\u03de\4\u03df\1\u03e0\1\u03df\14\0\1\u0286\160\0"+ - "\4\u03e1\2\0\1\u03e1\15\0\1\u03e1\6\0\12\u03e1\1\u03a7"+ - "\13\0\1\u0286\160\0\4\u03dd\2\0\1\u03dd\15\0\1\u03dd"+ - "\6\0\1\u03de\2\u03df\1\u03de\4\u03df\1\u03e0\1\u03df\174\0"+ - "\1\u02d6\4\u03e1\2\0\1\u03e1\15\0\1\u03e1\6\0\12\u03e2"+ - "\1\u03a7\13\0\1\u0286\157\0\1\u02d6\4\u03e1\2\0\1\u03e1"+ - "\15\0\1\u03e1\6\0\12\u03e1\1\u03a7\13\0\1\u0286\157\0"+ - "\1\u02d6\4\u03e1\2\0\1\u03e1\15\0\1\u03e1\6\0\2\u03e2"+ - "\1\u03e1\2\u03e2\2\u03e1\2\u03e2\1\u03e1\1\u03a7\13\0\1\u0286"+ - "\225\0\1\u0321\13\0\1\u0286\157\0\1\u03e3\33\0\12\u03ad"+ - "\174\0\1\u03e3\33\0\12\u03e4\174\0\1\u03e3\33\0\1\u03ad"+ - "\1\u03e5\1\u03e4\2\u03ad\2\u03e4\2\u03ad\1\u03e4\174\0\46\u01ec"+ - "\1\0\2\u01ec\1\u0238\3\u01ec\1\u01ee\1\u014e\1\u014f\1\u0150"+ - "\2\0\2\u01ec\4\0\1\u01ec\151\0\4\u03e6\2\0\1\u03e6"+ - "\15\0\1\u03e6\6\0\12\u03e6\1\u0375\174\0\4\u03e7\2\0"+ - "\1\u03e7\15\0\1\u03e7\6\0\12\u03e7\1\u03e8\173\0\1\u014a"+ - "\4\u03e7\2\0\1\u03e7\15\0\1\u03e7\6\0\12\u03e9\1\u03e8"+ - "\173\0\1\u014a\4\u03e7\2\0\1\u03e7\15\0\1\u03e7\6\0"+ - "\12\u03ea\1\u03e8\173\0\1\u014a\4\u03e7\2\0\1\u03e7\15\0"+ - "\1\u03e7\6\0\1\u03e9\1\u03eb\1\u03ea\2\u03e9\2\u03ea\2\u03e9"+ - "\1\u03ea\1\u03e8\174\0\4\u03ec\2\0\1\u03ec\15\0\1\u03ec"+ - "\6\0\12\u03ec\14\0\1\u0109\160\0\4\u03ed\2\0\1\u03ed"+ - "\15\0\1\u03ed\6\0\12\u03ed\1\u03b7\13\0\1\u0109\160\0"+ - "\4\u03ec\2\0\1\u03ec\15\0\1\u03ec\6\0\12\u03ec\174\0"+ - "\1\u014a\4\u03ed\2\0\1\u03ed\15\0\1\u03ed\6\0\12\u03ee"+ - "\1\u03b7\13\0\1\u0109\157\0\1\u014a\4\u03ed\2\0\1\u03ed"+ - "\15\0\1\u03ed\6\0\12\u03ed\1\u03b7\13\0\1\u0109\157\0"+ - "\1\u014a\4\u03ed\2\0\1\u03ed\15\0\1\u03ed\6\0\2\u03ee"+ - "\1\u03ed\2\u03ee\2\u03ed\2\u03ee\1\u03ed\1\u03b7\13\0\1\u0109"+ - "\225\0\1\u032f\13\0\1\u0109\157\0\1\317\32\201\1\320"+ - "\10\201\1\u033c\1\201\174\0\1\317\1\u03ef\31\201\1\320"+ - "\12\201\174\0\1\317\7\201\1\u03f0\22\201\1\320\12\201"+ - "\174\0\1\317\1\u03f1\31\201\1\320\12\201\174\0\1\317"+ - "\25\201\1\u03f2\4\201\1\320\12\201\174\0\1\317\32\201"+ - "\1\320\11\201\1\u03f3\174\0\1\317\1\u03f4\31\201\1\320"+ - "\12\201\174\0\1\317\12\201\1\u03f5\17\201\1\320\12\201"+ - "\174\0\1\317\1\u03f6\31\201\1\320\12\201\174\0\1\317"+ - "\10\201\1\u03f7\21\201\1\320\12\201\174\0\1\317\31\201"+ - "\1\u03f8\1\320\12\201\174\0\1\346\2\235\1\u03f9\27\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\3\235\1\u03fa"+ - "\26\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\32\235"+ - "\1\117\1\235\1\u03fb\10\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\3\235\1\u03fc\26\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\1\u03fd\31\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\26\235\1\u03fe\3\235\1\117\12\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\150\0\4\u03ff\2\0\1\u03ff\15\0\1\u03ff\6\0\12\u03ff"+ - "\1\u0396\174\0\4\u0400\2\0\1\u0400\15\0\1\u0400\6\0"+ - "\12\u0400\1\u0401\174\0\4\u0402\2\0\1\u0402\15\0\1\u0402"+ - "\6\0\1\u0403\2\u0404\1\u0403\4\u0404\1\u0405\1\u0404\14\0"+ - "\1\u02c9\160\0\4\u0406\2\0\1\u0406\15\0\1\u0406\6\0"+ - "\12\u0406\1\u03d1\13\0\1\u02c9\160\0\4\u0402\2\0\1\u0402"+ - "\15\0\1\u0402\6\0\1\u0403\2\u0404\1\u0403\4\u0404\1\u0405"+ - "\1\u0404\174\0\1\u0316\4\u0406\2\0\1\u0406\15\0\1\u0406"+ - "\6\0\12\u0407\1\u03d1\13\0\1\u02c9\157\0\1\u0316\4\u0406"+ - "\2\0\1\u0406\15\0\1\u0406\6\0\12\u0406\1\u03d1\13\0"+ - "\1\u02c9\157\0\1\u0316\4\u0406\2\0\1\u0406\15\0\1\u0406"+ - "\6\0\2\u0407\1\u0406\2\u0407\2\u0406\2\u0407\1\u0406\1\u03d1"+ - "\13\0\1\u02c9\225\0\1\u0359\13\0\1\u02c9\157\0\1\u0408"+ - "\33\0\12\u03d7\174\0\1\u0408\33\0\12\u0409\174\0\1\u0408"+ - "\33\0\1\u03d7\1\u040a\1\u0409\2\u03d7\2\u0409\2\u03d7\1\u0409"+ - "\254\0\1\u0150\227\0\1\u0367\174\0\4\u040b\2\0\1\u040b"+ - "\15\0\1\u040b\6\0\12\u040b\1\u03dc\174\0\4\u040c\2\0"+ - "\1\u040c\15\0\1\u040c\6\0\12\u040c\1\u040d\174\0\4\u040e"+ - "\2\0\1\u040e\15\0\1\u040e\6\0\12\u040e\1\u040f\13\0"+ - "\1\u0286\157\0\1\u02d6\4\u040e\2\0\1\u040e\15\0\1\u040e"+ - "\6\0\12\u0410\1\u040f\13\0\1\u0286\157\0\1\u02d6\4\u040e"+ - "\2\0\1\u040e\15\0\1\u040e\6\0\12\u0411\1\u040f\13\0"+ - "\1\u0286\157\0\1\u02d6\4\u040e\2\0\1\u040e\15\0\1\u040e"+ - "\6\0\1\u0410\1\u0412\1\u0411\2\u0410\2\u0411\2\u0410\1\u0411"+ - "\1\u040f\13\0\1\u0286\160\0\4\u0413\2\0\1\u0413\15\0"+ - "\1\u0413\6\0\12\u0413\1\u03a7\13\0\1\u0286\157\0\1\u02d6"+ - "\4\u0413\2\0\1\u0413\15\0\1\u0413\6\0\12\u0413\1\u03a7"+ - "\13\0\1\u0286\213\0\1\u0414\2\u0415\1\u0414\4\u0415\1\u0416"+ - "\1\u0415\174\0\1\u03e3\241\0\1\u03e3\33\0\2\u03e4\1\0"+ - "\2\u03e4\2\0\2\u03e4\243\0\1\u0375\174\0\4\u0417\2\0"+ - "\1\u0417\15\0\1\u0417\6\0\12\u0417\1\u03e8\174\0\4\u03ec"+ - "\2\0\1\u03ec\15\0\1\u03ec\6\0\12\u03ec\1\u02e4\173\0"+ - "\1\u014a\4\u0417\2\0\1\u0417\15\0\1\u0417\6\0\12\u0418"+ - "\1\u03e8\173\0\1\u014a\4\u0417\2\0\1\u0417\15\0\1\u0417"+ - "\6\0\12\u0417\1\u03e8\173\0\1\u014a\4\u0417\2\0\1\u0417"+ - "\15\0\1\u0417\6\0\2\u0418\1\u0417\2\u0418\2\u0417\2\u0418"+ - "\1\u0417\1\u03e8\174\0\4\u0419\2\0\1\u0419\15\0\1\u0419"+ - "\6\0\12\u0419\14\0\1\u0109\160\0\4\u041a\2\0\1\u041a"+ - "\15\0\1\u041a\6\0\12\u041a\1\u03b7\13\0\1\u0109\157\0"+ - "\1\u014a\4\u041a\2\0\1\u041a\15\0\1\u041a\6\0\12\u041a"+ - "\1\u03b7\13\0\1\u0109\157\0\1\317\32\201\1\320\6\201"+ - "\1\u0335\3\201\174\0\1\317\15\201\1\u0116\14\201\1\320"+ - "\12\201\174\0\1\317\32\201\1\320\1\201\1\u041b\10\201"+ - "\174\0\1\317\30\201\1\u041c\1\201\1\320\12\201\174\0"+ - "\1\317\6\201\1\u041d\23\201\1\320\12\201\174\0\1\317"+ - "\32\201\1\320\5\201\1\u041e\4\201\174\0\1\317\22\201"+ - "\1\u0151\7\201\1\320\12\201\174\0\1\317\32\201\1\320"+ - "\5\201\1\u041f\4\201\174\0\1\317\32\201\1\320\1\201"+ - "\1\u0116\10\201\174\0\1\317\13\201\1\u0420\16\201\1\320"+ - "\12\201\174\0\1\346\32\235\1\117\7\235\1\u0421\2\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\32\235\1\117\10\235\1\260"+ - "\1\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\32\235\1\117\4\235"+ - "\1\u0422\5\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\16\235\1\u0423"+ - "\13\235\1\117\12\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\26\235"+ - "\1\u0424\3\235\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\147\0\1\346"+ - "\32\235\1\117\7\235\1\u0425\2\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\215\0"+ - "\1\u0396\174\0\4\u0426\2\0\1\u0426\15\0\1\u0426\6\0"+ - "\12\u0426\1\u0401\174\0\4\u0427\2\0\1\u0427\15\0\1\u0427"+ - "\6\0\12\u0427\1\u0428\174\0\4\u0429\2\0\1\u0429\15\0"+ - "\1\u0429\6\0\12\u0429\1\u042a\13\0\1\u02c9\157\0\1\u0316"+ - "\4\u0429\2\0\1\u0429\15\0\1\u0429\6\0\12\u042b\1\u042a"+ - "\13\0\1\u02c9\157\0\1\u0316\4\u0429\2\0\1\u0429\15\0"+ - "\1\u0429\6\0\12\u042c\1\u042a\13\0\1\u02c9\157\0\1\u0316"+ - "\4\u0429\2\0\1\u0429\15\0\1\u0429\6\0\1\u042b\1\u042d"+ - "\1\u042c\2\u042b\2\u042c\2\u042b\1\u042c\1\u042a\13\0\1\u02c9"+ - "\160\0\4\u042e\2\0\1\u042e\15\0\1\u042e\6\0\12\u042e"+ - "\1\u03d1\13\0\1\u02c9\157\0\1\u0316\4\u042e\2\0\1\u042e"+ - "\15\0\1\u042e\6\0\12\u042e\1\u03d1\13\0\1\u02c9\213\0"+ - "\1\u042f\2\u0430\1\u042f\4\u0430\1\u0431\1\u0430\174\0\1\u0408"+ - "\241\0\1\u0408\33\0\2\u0409\1\0\2\u0409\2\0\2\u0409"+ - "\176\0\4\u0432\2\0\1\u0432\15\0\1\u0432\6\0\12\u0432"+ - "\1\u03dc\174\0\4\u0433\2\0\1\u0433\15\0\1\u0433\6\0"+ - "\12\u0433\1\u0434\174\0\4\u0435\2\0\1\u0435\15\0\1\u0435"+ - "\6\0\1\u0436\2\u0437\1\u0436\4\u0437\1\u0438\1\u0437\14\0"+ - "\1\u0286\160\0\4\u0439\2\0\1\u0439\15\0\1\u0439\6\0"+ - "\12\u0439\1\u040f\13\0\1\u0286\160\0\4\u0435\2\0\1\u0435"+ - "\15\0\1\u0435\6\0\1\u0436\2\u0437\1\u0436\4\u0437\1\u0438"+ - "\1\u0437\174\0\1\u02d6\4\u0439\2\0\1\u0439\15\0\1\u0439"+ - "\6\0\12\u043a\1\u040f\13\0\1\u0286\157\0\1\u02d6\4\u0439"+ - "\2\0\1\u0439\15\0\1\u0439\6\0\12\u0439\1\u040f\13\0"+ - "\1\u0286\157\0\1\u02d6\4\u0439\2\0\1\u0439\15\0\1\u0439"+ - "\6\0\2\u043a\1\u0439\2\u043a\2\u0439\2\u043a\1\u0439\1\u040f"+ - "\13\0\1\u0286\225\0\1\u03a7\13\0\1\u0286\213\0\12\u0415"+ - "\14\0\1\u0286\213\0\12\u043b\14\0\1\u0286\213\0\1\u0415"+ - "\1\u043c\1\u043b\2\u0415\2\u043b\2\u0415\1\u043b\14\0\1\u0286"+ - "\160\0\4\u043d\2\0\1\u043d\15\0\1\u043d\6\0\12\u043d"+ - "\1\u03e8\173\0\1\u014a\4\u043d\2\0\1\u043d\15\0\1\u043d"+ - "\6\0\12\u043d\1\u03e8\174\0\4\u043e\2\0\1\u043e\15\0"+ - "\1\u043e\6\0\12\u043e\14\0\1\u0109\225\0\1\u03b7\13\0"+ - "\1\u0109\157\0\1\317\2\201\1\u043f\27\201\1\320\12\201"+ - "\174\0\1\317\3\201\1\u0440\26\201\1\320\12\201\174\0"+ - "\1\317\32\201\1\320\1\201\1\u0441\10\201\174\0\1\317"+ - "\3\201\1\u0442\26\201\1\320\12\201\174\0\1\317\1\u0443"+ - "\31\201\1\320\12\201\174\0\1\317\26\201\1\u0444\3\201"+ - "\1\320\12\201\174\0\1\346\1\u0445\31\235\1\117\12\235"+ - "\1\0\3\114\1\0\2\114\1\115\3\114\3\0\1\114"+ - "\4\0\2\114\147\0\1\346\24\235\1\u0446\5\235\1\117"+ - "\12\235\1\0\3\114\1\0\2\114\1\115\3\114\3\0"+ - "\1\114\4\0\2\114\147\0\1\346\1\235\1\u0447\30\235"+ - "\1\117\12\235\1\0\3\114\1\0\2\114\1\115\3\114"+ - "\3\0\1\114\4\0\2\114\147\0\1\346\32\235\1\117"+ - "\2\235\1\362\7\235\1\0\3\114\1\0\2\114\1\115"+ - "\3\114\3\0\1\114\4\0\2\114\147\0\1\346\6\235"+ - "\1\353\23\235\1\117\12\235\1\0\3\114\1\0\2\114"+ - "\1\115\3\114\3\0\1\114\4\0\2\114\150\0\4\u0448"+ - "\2\0\1\u0448\15\0\1\u0448\6\0\12\u0448\1\u0401\174\0"+ - "\4\u0449\2\0\1\u0449\15\0\1\u0449\6\0\12\u0449\1\u044a"+ - "\174\0\4\u044b\2\0\1\u044b\15\0\1\u044b\6\0\1\u044c"+ - "\2\u044d\1\u044c\4\u044d\1\u044e\1\u044d\14\0\1\u02c9\160\0"+ - "\4\u044f\2\0\1\u044f\15\0\1\u044f\6\0\12\u044f\1\u042a"+ - "\13\0\1\u02c9\160\0\4\u044b\2\0\1\u044b\15\0\1\u044b"+ - "\6\0\1\u044c\2\u044d\1\u044c\4\u044d\1\u044e\1\u044d\174\0"+ - "\1\u0316\4\u044f\2\0\1\u044f\15\0\1\u044f\6\0\12\u0450"+ - "\1\u042a\13\0\1\u02c9\157\0\1\u0316\4\u044f\2\0\1\u044f"+ - "\15\0\1\u044f\6\0\12\u044f\1\u042a\13\0\1\u02c9\157\0"+ - "\1\u0316\4\u044f\2\0\1\u044f\15\0\1\u044f\6\0\2\u0450"+ - "\1\u044f\2\u0450\2\u044f\2\u0450\1\u044f\1\u042a\13\0\1\u02c9"+ - "\225\0\1\u03d1\13\0\1\u02c9\213\0\12\u0430\14\0\1\u02c9"+ - "\213\0\12\u0451\14\0\1\u02c9\213\0\1\u0430\1\u0452\1\u0451"+ - "\2\u0430\2\u0451\2\u0430\1\u0451\14\0\1\u02c9\225\0\1\u03dc"+ - "\174\0\4\u0453\2\0\1\u0453\15\0\1\u0453\6\0\12\u0453"+ - "\1\u0434\174\0\4\u0454\2\0\1\u0454\15\0\1\u0454\6\0"+ - "\12\u0454\1\u0455\174\0\4\u0456\2\0\1\u0456\15\0\1\u0456"+ - "\6\0\12\u0456\1\u0457\13\0\1\u0286\157\0\1\u02d6\4\u0456"+ - "\2\0\1\u0456\15\0\1\u0456\6\0\12\u0458\1\u0457\13\0"+ - "\1\u0286\157\0\1\u02d6\4\u0456\2\0\1\u0456\15\0\1\u0456"+ - "\6\0\12\u0459\1\u0457\13\0\1\u0286\157\0\1\u02d6\4\u0456"+ - "\2\0\1\u0456\15\0\1\u0456\6\0\1\u0458\1\u045a\1\u0459"+ - "\2\u0458\2\u0459\2\u0458\1\u0459\1\u0457\13\0\1\u0286\160\0"+ - "\4\u045b\2\0\1\u045b\15\0\1\u045b\6\0\12\u045b\1\u040f"+ - "\13\0\1\u0286\157\0\1\u02d6\4\u045b\2\0\1\u045b\15\0"+ - "\1\u045b\6\0\12\u045b\1\u040f\13\0\1\u0286\241\0\1\u0286"+ - "\213\0\2\u043b\1\0\2\u043b\2\0\2\u043b\15\0\1\u0286"+ - "\225\0\1\u03e8\174\0\4\u02e4\2\0\1\u02e4\15\0\1\u02e4"+ - "\6\0\12\u02e4\14\0\1\u0109\157\0\1\317\32\201\1\320"+ - "\7\201\1\u045c\2\201\174\0\1\317\32\201\1\320\10\201"+ - "\1\u0116\1\201\174\0\1\317\32\201\1\320\4\201\1\u045d"+ - "\5\201\174\0\1\317\16\201\1\u045e\13\201\1\320\12\201"+ - "\174\0\1\317\26\201\1\u045f\3\201\1\320\12\201\174\0"+ - "\1\317\32\201\1\320\7\201\1\u0460\2\201\174\0\1\346"+ - "\4\235\1\353\25\235\1\117\12\235\1\0\3\114\1\0"+ - "\2\114\1\115\3\114\3\0\1\114\4\0\2\114\147\0"+ - "\1\346\24\235\1\260\5\235\1\117\12\235\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\147\0\1\346\32\235\1\117\11\235\1\260\1\0\3\114"+ - "\1\0\2\114\1\115\3\114\3\0\1\114\4\0\2\114"+ - "\215\0\1\u0401\174\0\4\u0461\2\0\1\u0461\15\0\1\u0461"+ - "\6\0\12\u0461\1\u044a\174\0\4\u0462\2\0\1\u0462\15\0"+ - "\1\u0462\6\0\12\u0462\1\u0463\174\0\4\u0464\2\0\1\u0464"+ - "\15\0\1\u0464\6\0\12\u0464\1\u0465\13\0\1\u02c9\157\0"+ - "\1\u0316\4\u0464\2\0\1\u0464\15\0\1\u0464\6\0\12\u0466"+ - "\1\u0465\13\0\1\u02c9\157\0\1\u0316\4\u0464\2\0\1\u0464"+ - "\15\0\1\u0464\6\0\12\u0467\1\u0465\13\0\1\u02c9\157\0"+ - "\1\u0316\4\u0464\2\0\1\u0464\15\0\1\u0464\6\0\1\u0466"+ - "\1\u0468\1\u0467\2\u0466\2\u0467\2\u0466\1\u0467\1\u0465\13\0"+ - "\1\u02c9\160\0\4\u0469\2\0\1\u0469\15\0\1\u0469\6\0"+ - "\12\u0469\1\u042a\13\0\1\u02c9\157\0\1\u0316\4\u0469\2\0"+ - "\1\u0469\15\0\1\u0469\6\0\12\u0469\1\u042a\13\0\1\u02c9"+ - "\241\0\1\u02c9\213\0\2\u0451\1\0\2\u0451\2\0\2\u0451"+ - "\15\0\1\u02c9\160\0\4\u046a\2\0\1\u046a\15\0\1\u046a"+ - "\6\0\12\u046a\1\u0434\174\0\4\u046b\2\0\1\u046b\15\0"+ - "\1\u046b\6\0\12\u046b\1\u046c\174\0\4\u046d\2\0\1\u046d"+ - "\15\0\1\u046d\6\0\1\u046e\2\u046f\1\u046e\4\u046f\1\u0470"+ - "\1\u046f\14\0\1\u0286\160\0\4\u0471\2\0\1\u0471\15\0"+ - "\1\u0471\6\0\12\u0471\1\u0457\13\0\1\u0286\160\0\4\u046d"+ - "\2\0\1\u046d\15\0\1\u046d\6\0\1\u046e\2\u046f\1\u046e"+ - "\4\u046f\1\u0470\1\u046f\174\0\1\u02d6\4\u0471\2\0\1\u0471"+ - "\15\0\1\u0471\6\0\12\u0472\1\u0457\13\0\1\u0286\157\0"+ - "\1\u02d6\4\u0471\2\0\1\u0471\15\0\1\u0471\6\0\12\u0471"+ - "\1\u0457\13\0\1\u0286\157\0\1\u02d6\4\u0471\2\0\1\u0471"+ - "\15\0\1\u0471\6\0\2\u0472\1\u0471\2\u0472\2\u0471\2\u0472"+ - "\1\u0471\1\u0457\13\0\1\u0286\225\0\1\u040f\13\0\1\u0286"+ - "\157\0\1\317\1\u0473\31\201\1\320\12\201\174\0\1\317"+ - "\24\201\1\u0474\5\201\1\320\12\201\174\0\1\317\1\201"+ - "\1\u0475\30\201\1\320\12\201\174\0\1\317\32\201\1\320"+ - "\2\201\1\u0158\7\201\174\0\1\317\6\201\1\u0151\23\201"+ - "\1\320\12\201\175\0\4\u0476\2\0\1\u0476\15\0\1\u0476"+ - "\6\0\12\u0476\1\u044a\174\0\4\u0477\2\0\1\u0477\15\0"+ - "\1\u0477\6\0\12\u0477\1\u0478\174\0\4\u0479\2\0\1\u0479"+ - "\15\0\1\u0479\6\0\1\u047a\2\u047b\1\u047a\4\u047b\1\u047c"+ - "\1\u047b\14\0\1\u02c9\160\0\4\u047d\2\0\1\u047d\15\0"+ - "\1\u047d\6\0\12\u047d\1\u0465\13\0\1\u02c9\160\0\4\u0479"+ - "\2\0\1\u0479\15\0\1\u0479\6\0\1\u047a\2\u047b\1\u047a"+ - "\4\u047b\1\u047c\1\u047b\174\0\1\u0316\4\u047d\2\0\1\u047d"+ - "\15\0\1\u047d\6\0\12\u047e\1\u0465\13\0\1\u02c9\157\0"+ - "\1\u0316\4\u047d\2\0\1\u047d\15\0\1\u047d\6\0\12\u047d"+ - "\1\u0465\13\0\1\u02c9\157\0\1\u0316\4\u047d\2\0\1\u047d"+ - "\15\0\1\u047d\6\0\2\u047e\1\u047d\2\u047e\2\u047d\2\u047e"+ - "\1\u047d\1\u0465\13\0\1\u02c9\225\0\1\u042a\13\0\1\u02c9"+ - "\225\0\1\u0434\174\0\4\u047f\2\0\1\u047f\15\0\1\u047f"+ - "\6\0\12\u047f\1\u046c\174\0\4\u0480\2\0\1\u0480\15\0"+ - "\1\u0480\6\0\1\u0481\2\u0482\1\u0481\4\u0482\1\u0483\1\u0482"+ - "\1\u0484\174\0\4\u0485\2\0\1\u0485\15\0\1\u0485\6\0"+ - "\12\u0485\1\u0486\13\0\1\u0286\157\0\1\u02d6\4\u0485\2\0"+ - "\1\u0485\15\0\1\u0485\6\0\12\u0487\1\u0486\13\0\1\u0286"+ - "\157\0\1\u02d6\4\u0485\2\0\1\u0485\15\0\1\u0485\6\0"+ - "\12\u0488\1\u0486\13\0\1\u0286\157\0\1\u02d6\4\u0485\2\0"+ - "\1\u0485\15\0\1\u0485\6\0\1\u0487\1\u0489\1\u0488\2\u0487"+ - "\2\u0488\2\u0487\1\u0488\1\u0486\13\0\1\u0286\160\0\4\u048a"+ - "\2\0\1\u048a\15\0\1\u048a\6\0\12\u048a\1\u0457\13\0"+ - "\1\u0286\157\0\1\u02d6\4\u048a\2\0\1\u048a\15\0\1\u048a"+ - "\6\0\12\u048a\1\u0457\13\0\1\u0286\157\0\1\317\4\201"+ - "\1\u0151\25\201\1\320\12\201\174\0\1\317\24\201\1\u0116"+ - "\5\201\1\320\12\201\174\0\1\317\32\201\1\320\11\201"+ - "\1\u0116\242\0\1\u044a\174\0\4\u048b\2\0\1\u048b\15\0"+ - "\1\u048b\6\0\12\u048b\1\u0478\174\0\4\u048c\2\0\1\u048c"+ - "\15\0\1\u048c\6\0\1\u048d\2\u048e\1\u048d\4\u048e\1\u048f"+ - "\1\u048e\1\u0490\174\0\4\u0491\2\0\1\u0491\15\0\1\u0491"+ - "\6\0\12\u0491\1\u0492\13\0\1\u02c9\157\0\1\u0316\4\u0491"+ - "\2\0\1\u0491\15\0\1\u0491\6\0\12\u0493\1\u0492\13\0"+ - "\1\u02c9\157\0\1\u0316\4\u0491\2\0\1\u0491\15\0\1\u0491"+ - "\6\0\12\u0494\1\u0492\13\0\1\u02c9\157\0\1\u0316\4\u0491"+ - "\2\0\1\u0491\15\0\1\u0491\6\0\1\u0493\1\u0495\1\u0494"+ - "\2\u0493\2\u0494\2\u0493\1\u0494\1\u0492\13\0\1\u02c9\160\0"+ - "\4\u0496\2\0\1\u0496\15\0\1\u0496\6\0\12\u0496\1\u0465"+ - "\13\0\1\u02c9\157\0\1\u0316\4\u0496\2\0\1\u0496\15\0"+ - "\1\u0496\6\0\12\u0496\1\u0465\13\0\1\u02c9\160\0\4\u0497"+ - "\2\0\1\u0497\15\0\1\u0497\6\0\12\u0497\1\u046c\174\0"+ - "\4\u0498\2\0\1\u0498\15\0\1\u0498\6\0\12\u0498\1\u0499"+ - "\173\0\1\u02d6\4\u0498\2\0\1\u0498\15\0\1\u0498\6\0"+ - "\12\u049a\1\u0499\173\0\1\u02d6\4\u0498\2\0\1\u0498\15\0"+ - "\1\u0498\6\0\12\u049b\1\u0499\173\0\1\u02d6\4\u0498\2\0"+ - "\1\u0498\15\0\1\u0498\6\0\1\u049a\1\u049c\1\u049b\2\u049a"+ - "\2\u049b\2\u049a\1\u049b\1\u0499\174\0\4\u049d\2\0\1\u049d"+ - "\15\0\1\u049d\6\0\12\u049d\14\0\1\u0286\160\0\4\u049e"+ - "\2\0\1\u049e\15\0\1\u049e\6\0\12\u049e\1\u0486\13\0"+ - "\1\u0286\160\0\4\u049d\2\0\1\u049d\15\0\1\u049d\6\0"+ - "\12\u049d\174\0\1\u02d6\4\u049e\2\0\1\u049e\15\0\1\u049e"+ - "\6\0\12\u049f\1\u0486\13\0\1\u0286\157\0\1\u02d6\4\u049e"+ - "\2\0\1\u049e\15\0\1\u049e\6\0\12\u049e\1\u0486\13\0"+ - "\1\u0286\157\0\1\u02d6\4\u049e\2\0\1\u049e\15\0\1\u049e"+ - "\6\0\2\u049f\1\u049e\2\u049f\2\u049e\2\u049f\1\u049e\1\u0486"+ - "\13\0\1\u0286\225\0\1\u0457\13\0\1\u0286\160\0\4\u04a0"+ - "\2\0\1\u04a0\15\0\1\u04a0\6\0\12\u04a0\1\u0478\174\0"+ - "\4\u04a1\2\0\1\u04a1\15\0\1\u04a1\6\0\12\u04a1\1\u04a2"+ - "\173\0\1\u0316\4\u04a1\2\0\1\u04a1\15\0\1\u04a1\6\0"+ - "\12\u04a3\1\u04a2\173\0\1\u0316\4\u04a1\2\0\1\u04a1\15\0"+ - "\1\u04a1\6\0\12\u04a4\1\u04a2\173\0\1\u0316\4\u04a1\2\0"+ - "\1\u04a1\15\0\1\u04a1\6\0\1\u04a3\1\u04a5\1\u04a4\2\u04a3"+ - "\2\u04a4\2\u04a3\1\u04a4\1\u04a2\174\0\4\u04a6\2\0\1\u04a6"+ - "\15\0\1\u04a6\6\0\12\u04a6\14\0\1\u02c9\160\0\4\u04a7"+ - "\2\0\1\u04a7\15\0\1\u04a7\6\0\12\u04a7\1\u0492\13\0"+ - "\1\u02c9\160\0\4\u04a6\2\0\1\u04a6\15\0\1\u04a6\6\0"+ - "\12\u04a6\174\0\1\u0316\4\u04a7\2\0\1\u04a7\15\0\1\u04a7"+ - "\6\0\12\u04a8\1\u0492\13\0\1\u02c9\157\0\1\u0316\4\u04a7"+ - "\2\0\1\u04a7\15\0\1\u04a7\6\0\12\u04a7\1\u0492\13\0"+ - "\1\u02c9\157\0\1\u0316\4\u04a7\2\0\1\u04a7\15\0\1\u04a7"+ - "\6\0\2\u04a8\1\u04a7\2\u04a8\2\u04a7\2\u04a8\1\u04a7\1\u0492"+ - "\13\0\1\u02c9\225\0\1\u0465\13\0\1\u02c9\225\0\1\u046c"+ - "\174\0\4\u04a9\2\0\1\u04a9\15\0\1\u04a9\6\0\12\u04a9"+ - "\1\u0499\174\0\4\u049d\2\0\1\u049d\15\0\1\u049d\6\0"+ - "\12\u049d\1\u043b\173\0\1\u02d6\4\u04a9\2\0\1\u04a9\15\0"+ - "\1\u04a9\6\0\12\u04aa\1\u0499\173\0\1\u02d6\4\u04a9\2\0"+ - "\1\u04a9\15\0\1\u04a9\6\0\12\u04a9\1\u0499\173\0\1\u02d6"+ - "\4\u04a9\2\0\1\u04a9\15\0\1\u04a9\6\0\2\u04aa\1\u04a9"+ - "\2\u04aa\2\u04a9\2\u04aa\1\u04a9\1\u0499\174\0\4\u04ab\2\0"+ - "\1\u04ab\15\0\1\u04ab\6\0\12\u04ab\14\0\1\u0286\160\0"+ - "\4\u04ac\2\0\1\u04ac\15\0\1\u04ac\6\0\12\u04ac\1\u0486"+ - "\13\0\1\u0286\157\0\1\u02d6\4\u04ac\2\0\1\u04ac\15\0"+ - "\1\u04ac\6\0\12\u04ac\1\u0486\13\0\1\u0286\225\0\1\u0478"+ - "\174\0\4\u04ad\2\0\1\u04ad\15\0\1\u04ad\6\0\12\u04ad"+ - "\1\u04a2\174\0\4\u04a6\2\0\1\u04a6\15\0\1\u04a6\6\0"+ - "\12\u04a6\1\u0451\173\0\1\u0316\4\u04ad\2\0\1\u04ad\15\0"+ - "\1\u04ad\6\0\12\u04ae\1\u04a2\173\0\1\u0316\4\u04ad\2\0"+ - "\1\u04ad\15\0\1\u04ad\6\0\12\u04ad\1\u04a2\173\0\1\u0316"+ - "\4\u04ad\2\0\1\u04ad\15\0\1\u04ad\6\0\2\u04ae\1\u04ad"+ - "\2\u04ae\2\u04ad\2\u04ae\1\u04ad\1\u04a2\174\0\4\u04af\2\0"+ - "\1\u04af\15\0\1\u04af\6\0\12\u04af\14\0\1\u02c9\160\0"+ - "\4\u04b0\2\0\1\u04b0\15\0\1\u04b0\6\0\12\u04b0\1\u0492"+ - "\13\0\1\u02c9\157\0\1\u0316\4\u04b0\2\0\1\u04b0\15\0"+ - "\1\u04b0\6\0\12\u04b0\1\u0492\13\0\1\u02c9\160\0\4\u04b1"+ - "\2\0\1\u04b1\15\0\1\u04b1\6\0\12\u04b1\1\u0499\173\0"+ - "\1\u02d6\4\u04b1\2\0\1\u04b1\15\0\1\u04b1\6\0\12\u04b1"+ - "\1\u0499\174\0\4\u04b2\2\0\1\u04b2\15\0\1\u04b2\6\0"+ - "\12\u04b2\14\0\1\u0286\225\0\1\u0486\13\0\1\u0286\160\0"+ - "\4\u04b3\2\0\1\u04b3\15\0\1\u04b3\6\0\12\u04b3\1\u04a2"+ - "\173\0\1\u0316\4\u04b3\2\0\1\u04b3\15\0\1\u04b3\6\0"+ - "\12\u04b3\1\u04a2\174\0\4\u04b4\2\0\1\u04b4\15\0\1\u04b4"+ - "\6\0\12\u04b4\14\0\1\u02c9\225\0\1\u0492\13\0\1\u02c9"+ - "\225\0\1\u0499\174\0\4\u043b\2\0\1\u043b\15\0\1\u043b"+ - "\6\0\12\u043b\14\0\1\u0286\225\0\1\u04a2\174\0\4\u0451"+ - "\2\0\1\u0451\15\0\1\u0451\6\0\12\u0451\14\0\1\u02c9"+ - "\11\0"; + "\1\53\1\0\1\67\3\0\1\70\5\0\1\71\3\0"+ + "\1\72\11\0\1\60\2\0\1\73\16\0\1\74\2\0"+ + "\1\75\41\0\1\25\2\26\2\0\2\76\1\77\1\0"+ + "\1\26\2\0\1\25\1\u010e\32\36\1\127\12\316\1\u0143"+ + "\1\124\1\137\1\124\1\0\2\140\1\125\1\u0133\1\u0134"+ + "\1\u0135\2\0\1\76\1\124\4\0\2\124\2\0\1\47"+ + "\1\0\1\50\1\0\1\51\1\0\1\52\1\0\1\53"+ + "\1\0\1\67\3\0\1\70\5\0\1\71\3\0\1\72"+ + "\11\0\1\60\2\0\1\73\16\0\1\74\2\0\1\75"+ + "\41\0\1\25\2\26\2\0\2\76\1\77\1\0\1\26"+ + "\2\0\1\25\1\u010e\32\36\1\127\2\u0208\1\316\2\u0208"+ + "\2\316\1\u0208\1\316\1\u0208\1\u0143\1\124\1\137\1\124"+ + "\1\0\2\140\1\125\1\u0133\1\u0134\1\u0135\2\0\1\76"+ + "\1\124\4\0\2\124\151\0\4\u025d\2\0\1\u025d\15\0"+ + "\1\u025d\6\0\12\u025d\1\u01c7\175\0\4\u025e\2\0\1\u025e"+ + "\15\0\1\u025e\6\0\12\u025e\1\u025f\175\0\4\u0260\2\0"+ + "\1\u0260\15\0\1\u0260\6\0\1\u0261\2\u0262\1\u0261\5\u0262"+ + "\1\u0263\14\0\1\u0116\161\0\4\u0264\2\0\1\u0264\15\0"+ + "\1\u0264\6\0\12\u0264\1\u020e\13\0\1\u0116\161\0\4\u0260"+ + "\2\0\1\u0260\15\0\1\u0260\6\0\1\u0261\2\u0262\1\u0261"+ + "\5\u0262\1\u0263\175\0\1\u0157\4\u0264\2\0\1\u0264\15\0"+ + "\1\u0264\6\0\12\u0265\1\u020e\13\0\1\u0116\160\0\1\u0157"+ + "\4\u0264\2\0\1\u0264\15\0\1\u0264\6\0\12\u0264\1\u020e"+ + "\13\0\1\u0116\160\0\1\u0157\4\u0264\2\0\1\u0264\15\0"+ + "\1\u0264\6\0\2\u0265\1\u0264\2\u0265\2\u0264\1\u0265\1\u0264"+ + "\1\u0265\1\u020e\13\0\1\u0116\226\0\1\u018a\13\0\1\u0116"+ + "\160\0\1\u0266\33\0\12\u0214\175\0\1\u0266\33\0\12\u0267"+ + "\175\0\1\u0266\33\0\1\u0214\1\u0268\1\u0267\2\u0214\2\u0267"+ + "\1\u0214\1\u0267\1\u0214\175\0\1\334\12\216\1\u015e\17\216"+ + "\1\335\12\216\175\0\1\334\11\216\1\u0269\20\216\1\335"+ + "\12\216\175\0\1\334\3\216\1\u026a\26\216\1\335\12\216"+ + "\175\0\1\334\7\216\1\u026b\22\216\1\335\4\216\1\u026c"+ + "\5\216\175\0\1\334\10\216\1\u026d\4\216\1\u026e\5\216"+ + "\1\u026f\6\216\1\335\12\216\175\0\1\334\3\216\1\u0270"+ + "\26\216\1\335\2\216\1\u0271\7\216\175\0\1\334\7\216"+ + "\1\u0272\22\216\1\335\12\216\175\0\1\334\7\216\1\u0273"+ + "\22\216\1\335\3\216\1\u0274\6\216\175\0\1\334\32\216"+ + "\1\335\5\216\1\u0275\4\216\175\0\1\334\7\216\1\u0276"+ + "\22\216\1\335\12\216\175\0\1\334\31\216\1\u0277\1\335"+ + "\12\216\175\0\1\334\1\216\1\u0278\30\216\1\335\12\216"+ + "\175\0\1\334\7\216\1\u0279\1\216\1\u027a\20\216\1\335"+ + "\11\216\1\u0275\175\0\1\334\22\216\1\u027b\7\216\1\335"+ + "\2\216\1\u027c\7\216\175\0\1\334\6\216\1\u027d\1\u027e"+ + "\22\216\1\335\12\216\175\0\1\334\7\216\1\u027f\5\216"+ + "\1\u0280\14\216\1\335\12\216\175\0\1\334\23\216\1\u0281"+ + "\6\216\1\335\12\216\175\0\1\334\32\216\1\335\3\216"+ + "\1\u0282\6\216\175\0\1\334\3\216\1\u0283\26\216\1\335"+ + "\12\216\175\0\1\334\17\216\1\u0284\12\216\1\335\12\216"+ + "\175\0\1\334\32\216\1\335\1\216\1\u0275\10\216\175\0"+ + "\1\334\32\216\1\335\1\u0285\11\216\231\0\12\u0286\10\0"+ + "\1\u015b\1\u015c\1\u015d\162\0\1\363\25\252\1\u0287\4\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\1\u0288\31\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\15\252\1\u0289"+ + "\14\252\1\127\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\21\252"+ + "\1\u028a\10\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\16\252\1\u028b\4\252\1\u028c\6\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\4\252\1\u028d\25\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\32\252\1\127\11\252\1\u028e"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\4\252\1\u028f\25\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\32\252\1\127\11\252"+ + "\1\u0290\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\1\u0291\2\252\1\u0292"+ + "\20\252\1\u0293\5\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\16\252\1\u0294\13\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\11\252\1\u0295\13\252\1\u0296\4\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\32\252\1\127\11\252"+ + "\1\u0297\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\23\252\1\u0298\6\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\31\252\1\u0299"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\26\252\1\u029a"+ + "\3\252\1\127\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\11\252"+ + "\1\u029b\20\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\32\252\1\127\3\252\1\u029c\6\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\5\252\1\u029d\24\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\10\252\1\u029e\21\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\3\252\1\u029f\26\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\21\252\1\u02a0\6\252\1\u02a1"+ + "\1\252\1\127\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\12\252"+ + "\1\u02a2\17\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\32\252\1\127\1\252\1\u02a3\10\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\24\252\1\u02a4\5\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\24\252\1\u02a5\5\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\31\252\1\u02a6\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\151\0\32\u01f6\1\0\12\u01f6\176\0\32\u01f6\1\u0249"+ + "\12\u01f6\176\0\4\u02a7\2\0\1\u02a7\15\0\1\u02a7\6\0"+ + "\12\u02a7\176\0\4\u02a8\2\0\1\u02a8\15\0\1\u02a8\6\0"+ + "\12\u02a8\1\u02a9\242\0\1\u02aa\174\0\34\u01bc\12\u02ab\1\0"+ + "\2\u01bc\1\u01fd\3\u01bc\1\u01be\1\0\1\u01fc\3\0\2\u01bc"+ + "\4\0\1\u01bc\152\0\4\u02ac\2\0\1\u02ac\15\0\1\u02ac"+ + "\6\0\12\u02ac\215\0\1\u02ad\223\0\4\u01bc\2\0\1\u01bc"+ + "\15\0\1\u01bc\6\0\12\u01bc\176\0\32\u01ff\1\0\12\u01ff"+ + "\176\0\32\u01ff\1\u0252\12\u01ff\231\0\12\u02ae\176\0\4\u02af"+ + "\2\0\1\u02af\15\0\1\u02af\6\0\12\u02af\1\u0255\175\0"+ + "\4\u02b0\2\0\1\u02b0\15\0\1\u02b0\6\0\12\u02b0\1\u02b1"+ + "\175\0\4\u02b2\2\0\1\u02b2\15\0\1\u02b2\6\0\1\u02b3"+ + "\2\u02b4\1\u02b3\5\u02b4\1\u02b5\14\0\1\u02b6\160\0\1\u0202"+ + "\32\u0203\1\u0202\12\u0203\1\u0204\2\u0202\1\u0205\3\u0202\1\u0206"+ + "\5\0\2\u0202\4\0\1\u0202\151\0\1\u0202\32\u0203\1\u0258"+ + "\12\u0203\1\u0204\2\u0202\1\u0205\3\u0202\1\u0206\5\0\2\u0202"+ + "\4\0\1\u0202\151\0\34\u0204\12\u02b7\1\0\2\u0204\1\u025a"+ + "\3\u0204\1\u0206\5\0\2\u0204\4\0\1\u0204\152\0\4\u02b8"+ + "\2\0\1\u02b8\15\0\1\u02b8\6\0\12\u02b8\176\0\4\u0202"+ + "\2\0\1\u0202\15\0\1\u0202\6\0\12\u0202\175\0\1\u02b9"+ + "\32\u025c\1\u02ba\12\u025c\1\u0143\7\0\1\u015b\1\u015c\1\u015d"+ + "\230\0\1\u01c7\175\0\4\u02bb\2\0\1\u02bb\15\0\1\u02bb"+ + "\6\0\12\u02bb\1\u025f\175\0\4\u02bc\2\0\1\u02bc\15\0"+ + "\1\u02bc\6\0\12\u02bc\1\u02bd\175\0\4\u02be\2\0\1\u02be"+ + "\15\0\1\u02be\6\0\12\u02be\1\u02bf\13\0\1\u0116\160\0"+ + "\1\u0157\4\u02be\2\0\1\u02be\15\0\1\u02be\6\0\12\u02c0"+ + "\1\u02bf\13\0\1\u0116\160\0\1\u0157\4\u02be\2\0\1\u02be"+ + "\15\0\1\u02be\6\0\12\u02c1\1\u02bf\13\0\1\u0116\160\0"+ + "\1\u0157\4\u02be\2\0\1\u02be\15\0\1\u02be\6\0\1\u02c0"+ + "\1\u02c2\1\u02c1\2\u02c0\2\u02c1\1\u02c0\1\u02c1\1\u02c0\1\u02bf"+ + "\13\0\1\u0116\161\0\4\u02c3\2\0\1\u02c3\15\0\1\u02c3"+ + "\6\0\12\u02c3\1\u020e\13\0\1\u0116\160\0\1\u0157\4\u02c3"+ + "\2\0\1\u02c3\15\0\1\u02c3\6\0\12\u02c3\1\u020e\13\0"+ + "\1\u0116\214\0\1\u02c4\2\u02c5\1\u02c4\5\u02c5\1\u02c6\175\0"+ + "\1\u0266\242\0\1\u0266\33\0\2\u0267\1\0\2\u0267\2\0"+ + "\1\u0267\1\0\1\u0267\175\0\1\334\1\216\1\u02c7\30\216"+ + "\1\335\12\216\175\0\1\334\24\216\1\u02c8\5\216\1\335"+ + "\12\216\175\0\1\334\24\216\1\u02c9\5\216\1\335\12\216"+ + "\175\0\1\334\1\216\1\u02ca\30\216\1\335\12\216\175\0"+ + "\1\334\14\216\1\u02cb\15\216\1\335\12\216\175\0\1\334"+ + "\1\216\1\u02cc\30\216\1\335\12\216\175\0\1\334\1\216"+ + "\1\u02cd\30\216\1\335\12\216\175\0\1\334\1\216\1\u02ce"+ + "\30\216\1\335\12\216\175\0\1\334\21\216\1\u02cf\10\216"+ + "\1\335\12\216\175\0\1\334\24\216\1\u02d0\5\216\1\335"+ + "\12\216\175\0\1\334\24\216\1\u02d1\5\216\1\335\12\216"+ + "\175\0\1\334\1\u0199\31\216\1\335\12\216\175\0\1\334"+ + "\24\216\1\u02ce\5\216\1\335\12\216\175\0\1\334\24\216"+ + "\1\u02d2\5\216\1\335\12\216\175\0\1\334\1\216\1\u02d3"+ + "\30\216\1\335\12\216\175\0\1\334\31\216\1\u02d4\1\335"+ + "\12\216\175\0\1\334\24\216\1\u02d5\5\216\1\335\12\216"+ + "\175\0\1\334\1\216\1\u02d6\30\216\1\335\12\216\175\0"+ + "\1\334\1\u02d7\31\216\1\335\12\216\175\0\1\334\21\216"+ + "\1\u02d8\10\216\1\335\12\216\175\0\1\334\4\216\1\u02d9"+ + "\25\216\1\335\12\216\175\0\1\334\24\216\1\u02da\5\216"+ + "\1\335\12\216\175\0\1\334\24\216\1\u02db\5\216\1\335"+ + "\12\216\175\0\1\334\4\216\1\u02dc\25\216\1\335\12\216"+ + "\175\0\1\334\21\216\1\u02dd\10\216\1\335\12\216\175\0"+ + "\1\334\24\216\1\u02de\5\216\1\335\12\216\175\0\1\334"+ + "\32\216\1\335\1\u02df\11\216\175\0\1\334\32\216\1\335"+ + "\7\216\1\u02e0\2\216\175\0\1\334\1\u02e1\31\216\1\335"+ + "\12\216\253\0\1\u015b\1\u015c\1\u015d\162\0\1\363\1\252"+ + "\1\u02e2\30\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\32\252\1\127\1\u02e3\11\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\6\252\1\u02e4\23\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\32\252\1\127\7\252\1\u02e5\2\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\32\252\1\127\10\252\1\u014a\1\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\32\252\1\127\5\252\1\u014a\4\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\26\252\1\u02e6\3\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\1\252\1\u02e7\30\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\26\252\1\u02e8"+ + "\3\252\1\127\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\32\252"+ + "\1\127\1\252\1\u02e9\10\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\1\u02ea\27\252\1\u02eb\1\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\4\252\1\u02ec\25\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\25\252\1\u02ed\4\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\32\252\1\127\1\u02ee\11\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\32\252\1\127\2\252\1\275"+ + "\7\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\32\252\1\127\3\252"+ + "\1\u02ef\6\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\1\u02f0\1\252"+ + "\1\u02f1\27\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\1\u02e5\31\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\32\252\1\127\2\252\1\u02f2\7\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\32\252\1\127\2\252\1\u02f3\7\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\15\252\1\u02f4\14\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\32\252\1\127\5\252\1\u02f5\4\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\32\252\1\127\7\252\1\u02f6"+ + "\2\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\32\252\1\127\11\252"+ + "\1\u02f7\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\1\252\1\u02f8\30\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\32\252\1\127"+ + "\3\252\1\u02f9\6\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\32\252"+ + "\1\127\1\252\1\u02fa\10\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\32\252\1\127\1\252\1\u02fb\10\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\24\252\1\u02fc\5\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\32\252\1\127\6\252\1\u02fd\3\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\32\252\1\127\3\252\1\u02fe\6\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\25\252\1\u02ff\4\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\151\0\4\u01f7\2\0\1\u01f7\15\0"+ + "\1\u01f7\6\0\12\u01f7\176\0\4\u0300\2\0\1\u0300\15\0"+ + "\1\u0300\6\0\12\u0300\1\u02a9\175\0\4\u0301\2\0\1\u0301"+ + "\15\0\1\u0301\6\0\12\u0301\1\u0302\175\0\4\u0303\2\0"+ + "\1\u0303\15\0\1\u0303\6\0\1\u0304\2\u0305\1\u0304\5\u0305"+ + "\1\u0306\14\0\1\u0307\160\0\34\u01bc\12\u0308\1\0\2\u01bc"+ + "\1\u01fd\3\u01bc\1\u01be\1\0\1\u01fc\3\0\2\u01bc\4\0"+ + "\1\u01bc\152\0\4\u01fc\2\0\1\u01fc\15\0\1\u01fc\6\0"+ + "\12\u01fc\226\0\1\u0309\245\0\12\u030a\11\0\1\u01fc\164\0"+ + "\4\u030b\2\0\1\u030b\15\0\1\u030b\6\0\12\u030b\1\u0255"+ + "\175\0\4\u030c\2\0\1\u030c\15\0\1\u030c\6\0\12\u030c"+ + "\1\u030d\175\0\4\u030e\2\0\1\u030e\15\0\1\u030e\6\0"+ + "\1\u030f\2\u0310\1\u030f\5\u0310\1\u0311\14\0\1\u02b6\161\0"+ + "\4\u0312\2\0\1\u0312\15\0\1\u0312\6\0\12\u0312\1\u0313"+ + "\13\0\1\u02b6\160\0\1\u0314\4\u0312\2\0\1\u0312\15\0"+ + "\1\u0312\6\0\12\u0315\1\u0313\13\0\1\u02b6\160\0\1\u0314"+ + "\4\u0312\2\0\1\u0312\15\0\1\u0312\6\0\12\u0316\1\u0313"+ + "\13\0\1\u02b6\160\0\1\u0314\4\u0312\2\0\1\u0312\15\0"+ + "\1\u0312\6\0\1\u0315\1\u0317\1\u0316\2\u0315\2\u0316\1\u0315"+ + "\1\u0316\1\u0315\1\u0313\13\0\1\u02b6\226\0\1\u0253\10\0"+ + "\1\u01fc\163\0\34\u0204\12\u0318\1\0\2\u0204\1\u025a\3\u0204"+ + "\1\u0206\1\u015b\1\u015c\1\u015d\2\0\2\u0204\4\0\1\u0204"+ + "\152\0\4\u0204\2\0\1\u0204\15\0\1\u0204\6\0\12\u0204"+ + "\176\0\32\u025c\1\0\12\u025c\176\0\32\u025c\1\u02ba\12\u025c"+ + "\176\0\4\u0319\2\0\1\u0319\15\0\1\u0319\6\0\12\u0319"+ + "\1\u025f\175\0\4\u031a\2\0\1\u031a\15\0\1\u031a\6\0"+ + "\12\u031a\1\u031b\175\0\4\u031c\2\0\1\u031c\15\0\1\u031c"+ + "\6\0\1\u031d\2\u031e\1\u031d\5\u031e\1\u031f\14\0\1\u0116"+ + "\161\0\4\u0320\2\0\1\u0320\15\0\1\u0320\6\0\12\u0320"+ + "\1\u02bf\13\0\1\u0116\161\0\4\u031c\2\0\1\u031c\15\0"+ + "\1\u031c\6\0\1\u031d\2\u031e\1\u031d\5\u031e\1\u031f\175\0"+ + "\1\u0157\4\u0320\2\0\1\u0320\15\0\1\u0320\6\0\12\u0321"+ + "\1\u02bf\13\0\1\u0116\160\0\1\u0157\4\u0320\2\0\1\u0320"+ + "\15\0\1\u0320\6\0\12\u0320\1\u02bf\13\0\1\u0116\160\0"+ + "\1\u0157\4\u0320\2\0\1\u0320\15\0\1\u0320\6\0\2\u0321"+ + "\1\u0320\2\u0321\2\u0320\1\u0321\1\u0320\1\u0321\1\u02bf\13\0"+ + "\1\u0116\226\0\1\u020e\13\0\1\u0116\214\0\12\u02c5\14\0"+ + "\1\u0116\214\0\12\u0322\14\0\1\u0116\214\0\1\u02c5\1\u0323"+ + "\1\u0322\2\u02c5\2\u0322\1\u02c5\1\u0322\1\u02c5\14\0\1\u0116"+ + "\160\0\1\334\25\216\1\u0324\4\216\1\335\12\216\175\0"+ + "\1\334\1\u0325\31\216\1\335\12\216\175\0\1\334\15\216"+ + "\1\u0326\14\216\1\335\12\216\175\0\1\334\21\216\1\u0327"+ + "\10\216\1\335\12\216\175\0\1\334\16\216\1\u0328\4\216"+ + "\1\u0329\6\216\1\335\12\216\175\0\1\334\4\216\1\u032a"+ + "\25\216\1\335\12\216\175\0\1\334\32\216\1\335\11\216"+ + "\1\u032b\175\0\1\334\4\216\1\u032c\25\216\1\335\12\216"+ + "\175\0\1\334\32\216\1\335\11\216\1\u032d\175\0\1\334"+ + "\1\u032e\2\216\1\u032f\20\216\1\u0330\5\216\1\335\12\216"+ + "\175\0\1\334\16\216\1\u0331\13\216\1\335\12\216\175\0"+ + "\1\334\11\216\1\u0332\13\216\1\u0333\4\216\1\335\12\216"+ + "\175\0\1\334\32\216\1\335\11\216\1\u0334\175\0\1\334"+ + "\23\216\1\u0335\6\216\1\335\12\216\175\0\1\334\31\216"+ + "\1\u0336\1\335\12\216\175\0\1\334\26\216\1\u0337\3\216"+ + "\1\335\12\216\175\0\1\334\11\216\1\u0338\20\216\1\335"+ + "\12\216\175\0\1\334\32\216\1\335\3\216\1\u0339\6\216"+ + "\175\0\1\334\5\216\1\u033a\24\216\1\335\12\216\175\0"+ + "\1\334\10\216\1\u033b\21\216\1\335\12\216\175\0\1\334"+ + "\3\216\1\u033c\26\216\1\335\12\216\175\0\1\334\21\216"+ + "\1\u033d\6\216\1\u033e\1\216\1\335\12\216\175\0\1\334"+ + "\12\216\1\u033f\17\216\1\335\12\216\175\0\1\334\32\216"+ + "\1\335\1\216\1\u0340\10\216\175\0\1\334\24\216\1\u0341"+ + "\5\216\1\335\12\216\175\0\1\334\24\216\1\u0342\5\216"+ + "\1\335\12\216\175\0\1\334\31\216\1\u0343\1\335\12\216"+ + "\175\0\1\363\32\252\1\127\1\u0344\11\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\1\u0345\31\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\32\252\1\127\10\252\1\u0346\1\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\25\252\1\370\4\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\32\252\1\127\5\252\1\u0347"+ + "\4\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\32\252\1\127\5\252"+ + "\1\u0348\4\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\32\252\1\127"+ + "\5\252\1\u02ef\4\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\32\252"+ + "\1\127\3\252\1\u0345\6\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\12\252\1\u0349\17\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\25\252\1\u034a\4\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\15\252\1\u034b\14\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\32\252\1\127\3\252\1\u034c\6\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\2\252\1\u02e5\27\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\1\252\1\370\30\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\11\252\1\u034d"+ + "\20\252\1\127\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\11\252"+ + "\1\u034e\20\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\1\u034f\31\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\1\u0350\31\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\2\252\1\u0351\27\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\32\252\1\127\4\252\1\377\5\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\10\252\1\u0352\21\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\1\u0353\31\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\25\252\1\u0354\4\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\32\252\1\127\4\252\1\u0345"+ + "\5\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\32\252\1\127\6\252"+ + "\1\u0345\3\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\32\252\1\127"+ + "\2\252\1\u0345\7\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\16\252"+ + "\1\u0355\13\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\32\252\1\127\1\u0356\11\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\32\252\1\127\3\252\1\u0357\6\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\24\252\1\u0358\5\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\151\0\4\u0359\2\0\1\u0359\15\0\1\u0359\6\0\12\u0359"+ + "\1\u02a9\175\0\4\u035a\2\0\1\u035a\15\0\1\u035a\6\0"+ + "\12\u035a\1\u035b\175\0\4\u035c\2\0\1\u035c\15\0\1\u035c"+ + "\6\0\1\u035d\2\u035e\1\u035d\5\u035e\1\u035f\14\0\1\u0307"+ + "\161\0\4\u0360\2\0\1\u0360\15\0\1\u0360\6\0\12\u0360"+ + "\1\u0361\13\0\1\u0307\160\0\1\u0362\4\u0360\2\0\1\u0360"+ + "\15\0\1\u0360\6\0\12\u0363\1\u0361\13\0\1\u0307\160\0"+ + "\1\u0362\4\u0360\2\0\1\u0360\15\0\1\u0360\6\0\12\u0364"+ + "\1\u0361\13\0\1\u0307\160\0\1\u0362\4\u0360\2\0\1\u0360"+ + "\15\0\1\u0360\6\0\1\u0363\1\u0365\1\u0364\2\u0363\2\u0364"+ + "\1\u0363\1\u0364\1\u0363\1\u0361\13\0\1\u0307\237\0\1\u01f7"+ + "\163\0\34\u01bc\12\u0366\1\0\2\u01bc\1\u01fd\3\u01bc\1\u01be"+ + "\1\0\1\u01fc\3\0\2\u01bc\4\0\1\u01bc\167\0\1\u0367"+ + "\260\0\12\u0368\11\0\1\u01fc\231\0\1\u0255\175\0\4\u0369"+ + "\2\0\1\u0369\15\0\1\u0369\6\0\12\u0369\1\u030d\175\0"+ + "\4\u036a\2\0\1\u036a\15\0\1\u036a\6\0\12\u036a\1\u036b"+ + "\175\0\4\u036c\2\0\1\u036c\15\0\1\u036c\6\0\12\u036c"+ + "\1\u036d\13\0\1\u02b6\160\0\1\u0314\4\u036c\2\0\1\u036c"+ + "\15\0\1\u036c\6\0\12\u036e\1\u036d\13\0\1\u02b6\160\0"+ + "\1\u0314\4\u036c\2\0\1\u036c\15\0\1\u036c\6\0\12\u036f"+ + "\1\u036d\13\0\1\u02b6\160\0\1\u0314\4\u036c\2\0\1\u036c"+ + "\15\0\1\u036c\6\0\1\u036e\1\u0370\1\u036f\2\u036e\2\u036f"+ + "\1\u036e\1\u036f\1\u036e\1\u036d\13\0\1\u02b6\161\0\4\u0371"+ + "\2\0\1\u0371\15\0\1\u0371\6\0\12\u0371\1\u0313\13\0"+ + "\1\u02b6\161\0\4\u030e\2\0\1\u030e\15\0\1\u030e\6\0"+ + "\1\u030f\2\u0310\1\u030f\5\u0310\1\u0311\231\0\1\u0372\2\u0373"+ + "\1\u0372\5\u0373\1\u0374\175\0\1\u0314\4\u0371\2\0\1\u0371"+ + "\15\0\1\u0371\6\0\12\u0375\1\u0313\13\0\1\u02b6\160\0"+ + "\1\u0314\4\u0371\2\0\1\u0371\15\0\1\u0371\6\0\12\u0371"+ + "\1\u0313\13\0\1\u02b6\160\0\1\u0314\4\u0371\2\0\1\u0371"+ + "\15\0\1\u0371\6\0\2\u0375\1\u0371\2\u0375\2\u0371\1\u0375"+ + "\1\u0371\1\u0375\1\u0313\13\0\1\u02b6\160\0\34\u0204\12\u0376"+ + "\1\0\2\u0204\1\u025a\3\u0204\1\u0206\1\u015b\1\u015c\1\u015d"+ + "\2\0\2\u0204\4\0\1\u0204\217\0\1\u025f\175\0\4\u0377"+ + "\2\0\1\u0377\15\0\1\u0377\6\0\12\u0377\1\u031b\175\0"+ + "\4\u0378\2\0\1\u0378\15\0\1\u0378\6\0\12\u0378\1\u0379"+ + "\175\0\4\u037a\2\0\1\u037a\15\0\1\u037a\6\0\12\u037a"+ + "\1\u037b\13\0\1\u0116\160\0\1\u0157\4\u037a\2\0\1\u037a"+ + "\15\0\1\u037a\6\0\12\u037c\1\u037b\13\0\1\u0116\160\0"+ + "\1\u0157\4\u037a\2\0\1\u037a\15\0\1\u037a\6\0\12\u037d"+ + "\1\u037b\13\0\1\u0116\160\0\1\u0157\4\u037a\2\0\1\u037a"+ + "\15\0\1\u037a\6\0\1\u037c\1\u037e\1\u037d\2\u037c\2\u037d"+ + "\1\u037c\1\u037d\1\u037c\1\u037b\13\0\1\u0116\161\0\4\u037f"+ + "\2\0\1\u037f\15\0\1\u037f\6\0\12\u037f\1\u02bf\13\0"+ + "\1\u0116\160\0\1\u0157\4\u037f\2\0\1\u037f\15\0\1\u037f"+ + "\6\0\12\u037f\1\u02bf\13\0\1\u0116\242\0\1\u0116\214\0"+ + "\2\u0322\1\0\2\u0322\2\0\1\u0322\1\0\1\u0322\14\0"+ + "\1\u0116\160\0\1\334\1\216\1\u0380\30\216\1\335\12\216"+ + "\175\0\1\334\32\216\1\335\1\u0381\11\216\175\0\1\334"+ + "\6\216\1\u0382\23\216\1\335\12\216\175\0\1\334\32\216"+ + "\1\335\7\216\1\u0383\2\216\175\0\1\334\32\216\1\335"+ + "\10\216\1\u019e\1\216\175\0\1\334\32\216\1\335\5\216"+ + "\1\u019e\4\216\175\0\1\334\26\216\1\u0384\3\216\1\335"+ + "\12\216\175\0\1\334\1\216\1\u0385\30\216\1\335\12\216"+ + "\175\0\1\334\26\216\1\u0386\3\216\1\335\12\216\175\0"+ + "\1\334\32\216\1\335\1\216\1\u0387\10\216\175\0\1\334"+ + "\1\u0388\27\216\1\u0389\1\216\1\335\12\216\175\0\1\334"+ + "\4\216\1\u038a\25\216\1\335\12\216\175\0\1\334\25\216"+ + "\1\u038b\4\216\1\335\12\216\175\0\1\334\32\216\1\335"+ + "\1\u038c\11\216\175\0\1\334\32\216\1\335\2\216\1\u0123"+ + "\7\216\175\0\1\334\32\216\1\335\3\216\1\u038d\6\216"+ + "\175\0\1\334\1\u038e\1\216\1\u038f\27\216\1\335\12\216"+ + "\175\0\1\334\1\u0383\31\216\1\335\12\216\175\0\1\334"+ + "\32\216\1\335\2\216\1\u0390\7\216\175\0\1\334\32\216"+ + "\1\335\2\216\1\u0391\7\216\175\0\1\334\15\216\1\u0392"+ + "\14\216\1\335\12\216\175\0\1\334\32\216\1\335\5\216"+ + "\1\u0393\4\216\175\0\1\334\32\216\1\335\7\216\1\u0394"+ + "\2\216\175\0\1\334\32\216\1\335\11\216\1\u0395\175\0"+ + "\1\334\1\216\1\u0396\30\216\1\335\12\216\175\0\1\334"+ + "\32\216\1\335\3\216\1\u0397\6\216\175\0\1\334\32\216"+ + "\1\335\1\216\1\u0398\10\216\175\0\1\334\32\216\1\335"+ + "\1\216\1\u0399\10\216\175\0\1\334\24\216\1\u039a\5\216"+ + "\1\335\12\216\175\0\1\334\32\216\1\335\6\216\1\u039b"+ + "\3\216\175\0\1\334\32\216\1\335\3\216\1\u039c\6\216"+ + "\175\0\1\334\25\216\1\u039d\4\216\1\335\12\216\175\0"+ + "\1\363\3\252\1\u039e\26\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\2\252\1\370\27\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\6\252\1\u0103\23\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\1\252\1\u02f9\30\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\3\252\1\u039f\26\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\32\252\1\127"+ + "\6\252\1\u03a0\3\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\32\252"+ + "\1\127\6\252\1\u03a1\3\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\32\252\1\127\7\252\1\u03a2\2\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\1\u03a3\31\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\32\252\1\127\4\252\1\u03a4\5\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\32\252\1\127\4\252\1\u03a5\5\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\26\252\1\u03a6\3\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\30\252\1\u03a7\1\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\11\252\1\u0145\20\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\32\252\1\127"+ + "\2\252\1\u03a8\7\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\12\252"+ + "\1\u03a9\17\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\17\252\1\u0100\12\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\32\252\1\127\4\252\1\u03aa\5\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\32\252\1\127\6\252\1\u0148\3\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\30\252\1\u03ab\1\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\30\252\1\u03ac\1\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\216\0\1\u02a9\175\0\4\u03ad\2\0"+ + "\1\u03ad\15\0\1\u03ad\6\0\12\u03ad\1\u035b\175\0\4\u03ae"+ + "\2\0\1\u03ae\15\0\1\u03ae\6\0\12\u03ae\1\u03af\175\0"+ + "\4\u03b0\2\0\1\u03b0\15\0\1\u03b0\6\0\12\u03b0\1\u03b1"+ + "\13\0\1\u0307\160\0\1\u0362\4\u03b0\2\0\1\u03b0\15\0"+ + "\1\u03b0\6\0\12\u03b2\1\u03b1\13\0\1\u0307\160\0\1\u0362"+ + "\4\u03b0\2\0\1\u03b0\15\0\1\u03b0\6\0\12\u03b3\1\u03b1"+ + "\13\0\1\u0307\160\0\1\u0362\4\u03b0\2\0\1\u03b0\15\0"+ + "\1\u03b0\6\0\1\u03b2\1\u03b4\1\u03b3\2\u03b2\2\u03b3\1\u03b2"+ + "\1\u03b3\1\u03b2\1\u03b1\13\0\1\u0307\161\0\4\u03b5\2\0"+ + "\1\u03b5\15\0\1\u03b5\6\0\12\u03b5\1\u0361\13\0\1\u0307"+ + "\161\0\4\u035c\2\0\1\u035c\15\0\1\u035c\6\0\1\u035d"+ + "\2\u035e\1\u035d\5\u035e\1\u035f\231\0\1\u03b6\2\u03b7\1\u03b6"+ + "\5\u03b7\1\u03b8\175\0\1\u0362\4\u03b5\2\0\1\u03b5\15\0"+ + "\1\u03b5\6\0\12\u03b9\1\u0361\13\0\1\u0307\160\0\1\u0362"+ + "\4\u03b5\2\0\1\u03b5\15\0\1\u03b5\6\0\12\u03b5\1\u0361"+ + "\13\0\1\u0307\160\0\1\u0362\4\u03b5\2\0\1\u03b5\15\0"+ + "\1\u03b5\6\0\2\u03b9\1\u03b5\2\u03b9\2\u03b5\1\u03b9\1\u03b5"+ + "\1\u03b9\1\u0361\13\0\1\u0307\160\0\34\u01bc\12\u03ba\1\0"+ + "\2\u01bc\1\u01fd\3\u01bc\1\u01be\1\0\1\u01fc\3\0\2\u01bc"+ + "\4\0\1\u01bc\155\0\1\u03bb\272\0\12\u03bc\11\0\1\u01fc"+ + "\164\0\4\u03bd\2\0\1\u03bd\15\0\1\u03bd\6\0\12\u03bd"+ + "\1\u030d\175\0\4\u03be\2\0\1\u03be\15\0\1\u03be\6\0"+ + "\12\u03be\1\u03bf\175\0\4\u03c0\2\0\1\u03c0\15\0\1\u03c0"+ + "\6\0\1\u03c1\2\u03c2\1\u03c1\5\u03c2\1\u03c3\14\0\1\u02b6"+ + "\161\0\4\u03c4\2\0\1\u03c4\15\0\1\u03c4\6\0\12\u03c4"+ + "\1\u036d\13\0\1\u02b6\161\0\4\u03c0\2\0\1\u03c0\15\0"+ + "\1\u03c0\6\0\1\u03c1\2\u03c2\1\u03c1\5\u03c2\1\u03c3\175\0"+ + "\1\u0314\4\u03c4\2\0\1\u03c4\15\0\1\u03c4\6\0\12\u03c5"+ + "\1\u036d\13\0\1\u02b6\160\0\1\u0314\4\u03c4\2\0\1\u03c4"+ + "\15\0\1\u03c4\6\0\12\u03c4\1\u036d\13\0\1\u02b6\160\0"+ + "\1\u0314\4\u03c4\2\0\1\u03c4\15\0\1\u03c4\6\0\2\u03c5"+ + "\1\u03c4\2\u03c5\2\u03c4\1\u03c5\1\u03c4\1\u03c5\1\u036d\13\0"+ + "\1\u02b6\161\0\4\u03c6\2\0\1\u03c6\15\0\1\u03c6\6\0"+ + "\12\u03c6\1\u0313\13\0\1\u02b6\160\0\1\u03c7\33\0\12\u0373"+ + "\175\0\1\u03c7\33\0\12\u03c8\175\0\1\u03c7\33\0\1\u0373"+ + "\1\u03c9\1\u03c8\2\u0373\2\u03c8\1\u0373\1\u03c8\1\u0373\175\0"+ + "\1\u0314\4\u03c6\2\0\1\u03c6\15\0\1\u03c6\6\0\12\u03c6"+ + "\1\u0313\13\0\1\u02b6\160\0\34\u0204\12\u03ca\1\0\2\u0204"+ + "\1\u025a\3\u0204\1\u0206\1\u015b\1\u015c\1\u015d\2\0\2\u0204"+ + "\4\0\1\u0204\152\0\4\u03cb\2\0\1\u03cb\15\0\1\u03cb"+ + "\6\0\12\u03cb\1\u031b\175\0\4\u03cc\2\0\1\u03cc\15\0"+ + "\1\u03cc\6\0\12\u03cc\1\u03cd\175\0\4\u03ce\2\0\1\u03ce"+ + "\15\0\1\u03ce\6\0\1\u03cf\2\u03d0\1\u03cf\5\u03d0\1\u03d1"+ + "\14\0\1\u0116\161\0\4\u03d2\2\0\1\u03d2\15\0\1\u03d2"+ + "\6\0\12\u03d2\1\u037b\13\0\1\u0116\161\0\4\u03ce\2\0"+ + "\1\u03ce\15\0\1\u03ce\6\0\1\u03cf\2\u03d0\1\u03cf\5\u03d0"+ + "\1\u03d1\175\0\1\u0157\4\u03d2\2\0\1\u03d2\15\0\1\u03d2"+ + "\6\0\12\u03d3\1\u037b\13\0\1\u0116\160\0\1\u0157\4\u03d2"+ + "\2\0\1\u03d2\15\0\1\u03d2\6\0\12\u03d2\1\u037b\13\0"+ + "\1\u0116\160\0\1\u0157\4\u03d2\2\0\1\u03d2\15\0\1\u03d2"+ + "\6\0\2\u03d3\1\u03d2\2\u03d3\2\u03d2\1\u03d3\1\u03d2\1\u03d3"+ + "\1\u037b\13\0\1\u0116\226\0\1\u02bf\13\0\1\u0116\160\0"+ + "\1\334\32\216\1\335\1\u03d4\11\216\175\0\1\334\1\u03d5"+ + "\31\216\1\335\12\216\175\0\1\334\32\216\1\335\10\216"+ + "\1\u03d6\1\216\175\0\1\334\25\216\1\u015e\4\216\1\335"+ + "\12\216\175\0\1\334\32\216\1\335\5\216\1\u03d7\4\216"+ + "\175\0\1\334\32\216\1\335\5\216\1\u03d8\4\216\175\0"+ + "\1\334\32\216\1\335\5\216\1\u038d\4\216\175\0\1\334"+ + "\32\216\1\335\3\216\1\u03d5\6\216\175\0\1\334\12\216"+ + "\1\u03d9\17\216\1\335\12\216\175\0\1\334\25\216\1\u03da"+ + "\4\216\1\335\12\216\175\0\1\334\15\216\1\u03db\14\216"+ + "\1\335\12\216\175\0\1\334\32\216\1\335\3\216\1\u03dc"+ + "\6\216\175\0\1\334\2\216\1\u0383\27\216\1\335\12\216"+ + "\175\0\1\334\1\216\1\u015e\30\216\1\335\12\216\175\0"+ + "\1\334\11\216\1\u03dd\20\216\1\335\12\216\175\0\1\334"+ + "\11\216\1\u03de\20\216\1\335\12\216\175\0\1\334\1\u03df"+ + "\31\216\1\335\12\216\175\0\1\334\1\u03e0\31\216\1\335"+ + "\12\216\175\0\1\334\2\216\1\u03e1\27\216\1\335\12\216"+ + "\175\0\1\334\32\216\1\335\4\216\1\u0165\5\216\175\0"+ + "\1\334\10\216\1\u03e2\21\216\1\335\12\216\175\0\1\334"+ + "\1\u03e3\31\216\1\335\12\216\175\0\1\334\25\216\1\u03e4"+ + "\4\216\1\335\12\216\175\0\1\334\32\216\1\335\4\216"+ + "\1\u03d5\5\216\175\0\1\334\32\216\1\335\6\216\1\u03d5"+ + "\3\216\175\0\1\334\32\216\1\335\2\216\1\u03d5\7\216"+ + "\175\0\1\334\16\216\1\u03e5\13\216\1\335\12\216\175\0"+ + "\1\334\32\216\1\335\1\u03e6\11\216\175\0\1\334\32\216"+ + "\1\335\3\216\1\u03e7\6\216\175\0\1\334\24\216\1\u03e8"+ + "\5\216\1\335\12\216\175\0\1\363\1\u03e9\31\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\32\252\1\127\11\252"+ + "\1\u02ef\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\1\u03ea\31\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\7\252\1\u03eb\22\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\1\u03ec\31\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\32\252\1\127"+ + "\6\252\1\u03ed\3\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\25\252"+ + "\1\u03ee\4\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\1\u03ef\31\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\32\252\1\127\6\252\1\u03f0\3\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\1\u03f1\31\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\32\252\1\127\6\252\1\u0144\3\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\12\252\1\u03f2\17\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\1\u03f3\31\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\10\252\1\u03f4\21\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\31\252\1\u03f5\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\151\0\4\u03f6\2\0\1\u03f6\15\0\1\u03f6"+ + "\6\0\12\u03f6\1\u035b\175\0\4\u03f7\2\0\1\u03f7\15\0"+ + "\1\u03f7\6\0\12\u03f7\1\u03f8\175\0\4\u03f9\2\0\1\u03f9"+ + "\15\0\1\u03f9\6\0\1\u03fa\2\u03fb\1\u03fa\5\u03fb\1\u03fc"+ + "\14\0\1\u0307\161\0\4\u03fd\2\0\1\u03fd\15\0\1\u03fd"+ + "\6\0\12\u03fd\1\u03b1\13\0\1\u0307\161\0\4\u03f9\2\0"+ + "\1\u03f9\15\0\1\u03f9\6\0\1\u03fa\2\u03fb\1\u03fa\5\u03fb"+ + "\1\u03fc\175\0\1\u0362\4\u03fd\2\0\1\u03fd\15\0\1\u03fd"+ + "\6\0\12\u03fe\1\u03b1\13\0\1\u0307\160\0\1\u0362\4\u03fd"+ + "\2\0\1\u03fd\15\0\1\u03fd\6\0\12\u03fd\1\u03b1\13\0"+ + "\1\u0307\160\0\1\u0362\4\u03fd\2\0\1\u03fd\15\0\1\u03fd"+ + "\6\0\2\u03fe\1\u03fd\2\u03fe\2\u03fd\1\u03fe\1\u03fd\1\u03fe"+ + "\1\u03b1\13\0\1\u0307\161\0\4\u03ff\2\0\1\u03ff\15\0"+ + "\1\u03ff\6\0\12\u03ff\1\u0361\13\0\1\u0307\160\0\1\u0400"+ + "\33\0\12\u03b7\175\0\1\u0400\33\0\12\u0401\175\0\1\u0400"+ + "\33\0\1\u03b7\1\u0402\1\u0401\2\u03b7\2\u0401\1\u03b7\1\u0401"+ + "\1\u03b7\175\0\1\u0362\4\u03ff\2\0\1\u03ff\15\0\1\u03ff"+ + "\6\0\12\u03ff\1\u0361\13\0\1\u0307\160\0\46\u01bc\1\0"+ + "\2\u01bc\1\u01fd\3\u01bc\1\u01be\1\0\1\u01fc\3\0\2\u01bc"+ + "\4\0\1\u01bc\235\0\1\u0403\212\0\12\u0404\11\0\1\u01fc"+ + "\231\0\1\u030d\175\0\4\u0405\2\0\1\u0405\15\0\1\u0405"+ + "\6\0\12\u0405\1\u03bf\175\0\4\u0406\2\0\1\u0406\15\0"+ + "\1\u0406\6\0\12\u0406\1\u0407\175\0\4\u0408\2\0\1\u0408"+ + "\15\0\1\u0408\6\0\12\u0408\1\u0409\13\0\1\u02b6\160\0"+ + "\1\u0314\4\u0408\2\0\1\u0408\15\0\1\u0408\6\0\12\u040a"+ + "\1\u0409\13\0\1\u02b6\160\0\1\u0314\4\u0408\2\0\1\u0408"+ + "\15\0\1\u0408\6\0\12\u040b\1\u0409\13\0\1\u02b6\160\0"+ + "\1\u0314\4\u0408\2\0\1\u0408\15\0\1\u0408\6\0\1\u040a"+ + "\1\u040c\1\u040b\2\u040a\2\u040b\1\u040a\1\u040b\1\u040a\1\u0409"+ + "\13\0\1\u02b6\161\0\4\u040d\2\0\1\u040d\15\0\1\u040d"+ + "\6\0\12\u040d\1\u036d\13\0\1\u02b6\160\0\1\u0314\4\u040d"+ + "\2\0\1\u040d\15\0\1\u040d\6\0\12\u040d\1\u036d\13\0"+ + "\1\u02b6\226\0\1\u0313\13\0\1\u02b6\214\0\1\u040e\2\u040f"+ + "\1\u040e\5\u040f\1\u0410\175\0\1\u03c7\242\0\1\u03c7\33\0"+ + "\2\u03c8\1\0\2\u03c8\2\0\1\u03c8\1\0\1\u03c8\175\0"+ + "\34\u0204\12\u0411\1\0\2\u0204\1\u025a\3\u0204\1\u0206\1\u015b"+ + "\1\u015c\1\u015d\2\0\2\u0204\4\0\1\u0204\217\0\1\u031b"+ + "\175\0\4\u0412\2\0\1\u0412\15\0\1\u0412\6\0\12\u0412"+ + "\1\u03cd\175\0\4\u0413\2\0\1\u0413\15\0\1\u0413\6\0"+ + "\1\u0414\2\u0415\1\u0414\5\u0415\1\u0416\1\u0417\175\0\4\u0418"+ + "\2\0\1\u0418\15\0\1\u0418\6\0\12\u0418\1\u0419\13\0"+ + "\1\u0116\160\0\1\u0157\4\u0418\2\0\1\u0418\15\0\1\u0418"+ + "\6\0\12\u041a\1\u0419\13\0\1\u0116\160\0\1\u0157\4\u0418"+ + "\2\0\1\u0418\15\0\1\u0418\6\0\12\u041b\1\u0419\13\0"+ + "\1\u0116\160\0\1\u0157\4\u0418\2\0\1\u0418\15\0\1\u0418"+ + "\6\0\1\u041a\1\u041c\1\u041b\2\u041a\2\u041b\1\u041a\1\u041b"+ + "\1\u041a\1\u0419\13\0\1\u0116\161\0\4\u041d\2\0\1\u041d"+ + "\15\0\1\u041d\6\0\12\u041d\1\u037b\13\0\1\u0116\160\0"+ + "\1\u0157\4\u041d\2\0\1\u041d\15\0\1\u041d\6\0\12\u041d"+ + "\1\u037b\13\0\1\u0116\160\0\1\334\3\216\1\u041e\26\216"+ + "\1\335\12\216\175\0\1\334\2\216\1\u015e\27\216\1\335"+ + "\12\216\175\0\1\334\6\216\1\u0169\23\216\1\335\12\216"+ + "\175\0\1\334\1\216\1\u0397\30\216\1\335\12\216\175\0"+ + "\1\334\3\216\1\u041f\26\216\1\335\12\216\175\0\1\334"+ + "\32\216\1\335\6\216\1\u0420\3\216\175\0\1\334\32\216"+ + "\1\335\6\216\1\u0421\3\216\175\0\1\334\32\216\1\335"+ + "\7\216\1\u0422\2\216\175\0\1\334\1\u0423\31\216\1\335"+ + "\12\216\175\0\1\334\32\216\1\335\4\216\1\u0424\5\216"+ + "\175\0\1\334\32\216\1\335\4\216\1\u0425\5\216\175\0"+ + "\1\334\26\216\1\u0426\3\216\1\335\12\216\175\0\1\334"+ + "\30\216\1\u0427\1\216\1\335\12\216\175\0\1\334\11\216"+ + "\1\u0198\20\216\1\335\12\216\175\0\1\334\32\216\1\335"+ + "\2\216\1\u0428\7\216\175\0\1\334\12\216\1\u0429\17\216"+ + "\1\335\12\216\175\0\1\334\17\216\1\u0166\12\216\1\335"+ + "\12\216\175\0\1\334\32\216\1\335\4\216\1\u042a\5\216"+ + "\175\0\1\334\32\216\1\335\6\216\1\u019b\3\216\175\0"+ + "\1\334\30\216\1\u042b\1\216\1\335\12\216\175\0\1\334"+ + "\30\216\1\u042c\1\216\1\335\12\216\175\0\1\363\32\252"+ + "\1\127\1\u042d\11\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\32\252"+ + "\1\127\10\252\1\u02e5\1\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\15\252\1\275\14\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\32\252\1\127\1\252\1\u042e\10\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\32\252\1\127\3\252\1\u0148\6\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\30\252\1\u042f\1\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\32\252\1\127\1\252\1\u0430"+ + "\10\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\6\252\1\u0431\23\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\32\252\1\127"+ + "\5\252\1\u0432\4\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\22\252"+ + "\1\370\7\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\32\252\1\127\5\252\1\u0433\4\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\32\252\1\127\1\252\1\275\10\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\13\252\1\u0434\16\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\216\0\1\u035b\175\0\4\u0435\2\0\1\u0435\15\0"+ + "\1\u0435\6\0\12\u0435\1\u03f8\175\0\4\u0436\2\0\1\u0436"+ + "\15\0\1\u0436\6\0\12\u0436\1\u0437\175\0\4\u0438\2\0"+ + "\1\u0438\15\0\1\u0438\6\0\12\u0438\1\u0439\13\0\1\u0307"+ + "\160\0\1\u0362\4\u0438\2\0\1\u0438\15\0\1\u0438\6\0"+ + "\12\u043a\1\u0439\13\0\1\u0307\160\0\1\u0362\4\u0438\2\0"+ + "\1\u0438\15\0\1\u0438\6\0\12\u043b\1\u0439\13\0\1\u0307"+ + "\160\0\1\u0362\4\u0438\2\0\1\u0438\15\0\1\u0438\6\0"+ + "\1\u043a\1\u043c\1\u043b\2\u043a\2\u043b\1\u043a\1\u043b\1\u043a"+ + "\1\u0439\13\0\1\u0307\161\0\4\u043d\2\0\1\u043d\15\0"+ + "\1\u043d\6\0\12\u043d\1\u03b1\13\0\1\u0307\160\0\1\u0362"+ + "\4\u043d\2\0\1\u043d\15\0\1\u043d\6\0\12\u043d\1\u03b1"+ + "\13\0\1\u0307\226\0\1\u0361\13\0\1\u0307\214\0\1\u043e"+ + "\2\u043f\1\u043e\5\u043f\1\u0440\175\0\1\u0400\242\0\1\u0400"+ + "\33\0\2\u0401\1\0\2\u0401\2\0\1\u0401\1\0\1\u0401"+ + "\176\0\1\u0441\1\0\1\u0441\5\0\1\u0441\310\0\1\u01fc"+ + "\164\0\4\u0442\2\0\1\u0442\15\0\1\u0442\6\0\12\u0442"+ + "\1\u03bf\175\0\4\u0443\2\0\1\u0443\15\0\1\u0443\6\0"+ + "\12\u0443\1\u0444\175\0\4\u0445\2\0\1\u0445\15\0\1\u0445"+ + "\6\0\1\u0446\2\u0447\1\u0446\5\u0447\1\u0448\14\0\1\u02b6"+ + "\161\0\4\u0449\2\0\1\u0449\15\0\1\u0449\6\0\12\u0449"+ + "\1\u0409\13\0\1\u02b6\161\0\4\u0445\2\0\1\u0445\15\0"+ + "\1\u0445\6\0\1\u0446\2\u0447\1\u0446\5\u0447\1\u0448\175\0"+ + "\1\u0314\4\u0449\2\0\1\u0449\15\0\1\u0449\6\0\12\u044a"+ + "\1\u0409\13\0\1\u02b6\160\0\1\u0314\4\u0449\2\0\1\u0449"+ + "\15\0\1\u0449\6\0\12\u0449\1\u0409\13\0\1\u02b6\160\0"+ + "\1\u0314\4\u0449\2\0\1\u0449\15\0\1\u0449\6\0\2\u044a"+ + "\1\u0449\2\u044a\2\u0449\1\u044a\1\u0449\1\u044a\1\u0409\13\0"+ + "\1\u02b6\226\0\1\u036d\13\0\1\u02b6\160\0\1\u044b\33\0"+ + "\12\u040f\175\0\1\u044b\33\0\12\u044c\175\0\1\u044b\33\0"+ + "\1\u040f\1\u044d\1\u044c\2\u040f\2\u044c\1\u040f\1\u044c\1\u040f"+ + "\175\0\46\u0204\1\0\2\u0204\1\u025a\3\u0204\1\u0206\1\u015b"+ + "\1\u015c\1\u015d\2\0\2\u0204\4\0\1\u0204\152\0\4\u044e"+ + "\2\0\1\u044e\15\0\1\u044e\6\0\12\u044e\1\u03cd\175\0"+ + "\4\u044f\2\0\1\u044f\15\0\1\u044f\6\0\12\u044f\1\u0450"+ + "\174\0\1\u0157\4\u044f\2\0\1\u044f\15\0\1\u044f\6\0"+ + "\12\u0451\1\u0450\174\0\1\u0157\4\u044f\2\0\1\u044f\15\0"+ + "\1\u044f\6\0\12\u0452\1\u0450\174\0\1\u0157\4\u044f\2\0"+ + "\1\u044f\15\0\1\u044f\6\0\1\u0451\1\u0453\1\u0452\2\u0451"+ + "\2\u0452\1\u0451\1\u0452\1\u0451\1\u0450\175\0\4\u0454\2\0"+ + "\1\u0454\15\0\1\u0454\6\0\12\u0454\14\0\1\u0116\161\0"+ + "\4\u0455\2\0\1\u0455\15\0\1\u0455\6\0\12\u0455\1\u0419"+ + "\13\0\1\u0116\161\0\4\u0454\2\0\1\u0454\15\0\1\u0454"+ + "\6\0\12\u0454\175\0\1\u0157\4\u0455\2\0\1\u0455\15\0"+ + "\1\u0455\6\0\12\u0456\1\u0419\13\0\1\u0116\160\0\1\u0157"+ + "\4\u0455\2\0\1\u0455\15\0\1\u0455\6\0\12\u0455\1\u0419"+ + "\13\0\1\u0116\160\0\1\u0157\4\u0455\2\0\1\u0455\15\0"+ + "\1\u0455\6\0\2\u0456\1\u0455\2\u0456\2\u0455\1\u0456\1\u0455"+ + "\1\u0456\1\u0419\13\0\1\u0116\226\0\1\u037b\13\0\1\u0116"+ + "\160\0\1\334\1\u0457\31\216\1\335\12\216\175\0\1\334"+ + "\32\216\1\335\11\216\1\u038d\175\0\1\334\1\u0458\31\216"+ + "\1\335\12\216\175\0\1\334\7\216\1\u0459\22\216\1\335"+ + "\12\216\175\0\1\334\1\u045a\31\216\1\335\12\216\175\0"+ + "\1\334\32\216\1\335\6\216\1\u045b\3\216\175\0\1\334"+ + "\25\216\1\u045c\4\216\1\335\12\216\175\0\1\334\1\u045d"+ + "\31\216\1\335\12\216\175\0\1\334\32\216\1\335\6\216"+ + "\1\u045e\3\216\175\0\1\334\1\u045f\31\216\1\335\12\216"+ + "\175\0\1\334\32\216\1\335\6\216\1\u0197\3\216\175\0"+ + "\1\334\12\216\1\u0460\17\216\1\335\12\216\175\0\1\334"+ + "\1\u0461\31\216\1\335\12\216\175\0\1\334\10\216\1\u0462"+ + "\21\216\1\335\12\216\175\0\1\334\31\216\1\u0463\1\335"+ + "\12\216\175\0\1\363\24\252\1\u0464\5\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\2\252\1\u0465\27\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\3\252\1\u0466\26\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\3\252\1\u0467"+ + "\26\252\1\127\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\32\252"+ + "\1\127\1\252\1\u0468\10\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\3\252\1\u0469\26\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\1\u046a\31\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\26\252\1\u046b\3\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\151\0\4\u046c\2\0\1\u046c\15\0\1\u046c\6\0\12\u046c"+ + "\1\u03f8\175\0\4\u046d\2\0\1\u046d\15\0\1\u046d\6\0"+ + "\12\u046d\1\u046e\175\0\4\u046f\2\0\1\u046f\15\0\1\u046f"+ + "\6\0\1\u0470\2\u0471\1\u0470\5\u0471\1\u0472\14\0\1\u0307"+ + "\161\0\4\u0473\2\0\1\u0473\15\0\1\u0473\6\0\12\u0473"+ + "\1\u0439\13\0\1\u0307\161\0\4\u046f\2\0\1\u046f\15\0"+ + "\1\u046f\6\0\1\u0470\2\u0471\1\u0470\5\u0471\1\u0472\175\0"+ + "\1\u0362\4\u0473\2\0\1\u0473\15\0\1\u0473\6\0\12\u0474"+ + "\1\u0439\13\0\1\u0307\160\0\1\u0362\4\u0473\2\0\1\u0473"+ + "\15\0\1\u0473\6\0\12\u0473\1\u0439\13\0\1\u0307\160\0"+ + "\1\u0362\4\u0473\2\0\1\u0473\15\0\1\u0473\6\0\2\u0474"+ + "\1\u0473\2\u0474\2\u0473\1\u0474\1\u0473\1\u0474\1\u0439\13\0"+ + "\1\u0307\226\0\1\u03b1\13\0\1\u0307\160\0\1\u0475\33\0"+ + "\12\u043f\175\0\1\u0475\33\0\12\u0476\175\0\1\u0475\33\0"+ + "\1\u043f\1\u0477\1\u0476\2\u043f\2\u0476\1\u043f\1\u0476\1\u043f"+ + "\255\0\1\u015d\230\0\1\u03bf\175\0\4\u0478\2\0\1\u0478"+ + "\15\0\1\u0478\6\0\12\u0478\1\u0444\175\0\4\u0479\2\0"+ + "\1\u0479\15\0\1\u0479\6\0\12\u0479\1\u047a\175\0\4\u047b"+ + "\2\0\1\u047b\15\0\1\u047b\6\0\12\u047b\1\u047c\13\0"+ + "\1\u02b6\160\0\1\u0314\4\u047b\2\0\1\u047b\15\0\1\u047b"+ + "\6\0\12\u047d\1\u047c\13\0\1\u02b6\160\0\1\u0314\4\u047b"+ + "\2\0\1\u047b\15\0\1\u047b\6\0\12\u047e\1\u047c\13\0"+ + "\1\u02b6\160\0\1\u0314\4\u047b\2\0\1\u047b\15\0\1\u047b"+ + "\6\0\1\u047d\1\u047f\1\u047e\2\u047d\2\u047e\1\u047d\1\u047e"+ + "\1\u047d\1\u047c\13\0\1\u02b6\161\0\4\u0480\2\0\1\u0480"+ + "\15\0\1\u0480\6\0\12\u0480\1\u0409\13\0\1\u02b6\160\0"+ + "\1\u0314\4\u0480\2\0\1\u0480\15\0\1\u0480\6\0\12\u0480"+ + "\1\u0409\13\0\1\u02b6\214\0\1\u0481\2\u0482\1\u0481\5\u0482"+ + "\1\u0483\175\0\1\u044b\242\0\1\u044b\33\0\2\u044c\1\0"+ + "\2\u044c\2\0\1\u044c\1\0\1\u044c\243\0\1\u03cd\175\0"+ + "\4\u0484\2\0\1\u0484\15\0\1\u0484\6\0\12\u0484\1\u0450"+ + "\175\0\4\u0454\2\0\1\u0454\15\0\1\u0454\6\0\12\u0454"+ + "\1\u0322\174\0\1\u0157\4\u0484\2\0\1\u0484\15\0\1\u0484"+ + "\6\0\12\u0485\1\u0450\174\0\1\u0157\4\u0484\2\0\1\u0484"+ + "\15\0\1\u0484\6\0\12\u0484\1\u0450\174\0\1\u0157\4\u0484"+ + "\2\0\1\u0484\15\0\1\u0484\6\0\2\u0485\1\u0484\2\u0485"+ + "\2\u0484\1\u0485\1\u0484\1\u0485\1\u0450\175\0\4\u0486\2\0"+ + "\1\u0486\15\0\1\u0486\6\0\12\u0486\14\0\1\u0116\161\0"+ + "\4\u0487\2\0\1\u0487\15\0\1\u0487\6\0\12\u0487\1\u0419"+ + "\13\0\1\u0116\160\0\1\u0157\4\u0487\2\0\1\u0487\15\0"+ + "\1\u0487\6\0\12\u0487\1\u0419\13\0\1\u0116\160\0\1\334"+ + "\32\216\1\335\1\u0488\11\216\175\0\1\334\32\216\1\335"+ + "\10\216\1\u0383\1\216\175\0\1\334\15\216\1\u0123\14\216"+ + "\1\335\12\216\175\0\1\334\32\216\1\335\1\216\1\u0489"+ + "\10\216\175\0\1\334\32\216\1\335\3\216\1\u019b\6\216"+ + "\175\0\1\334\30\216\1\u048a\1\216\1\335\12\216\175\0"+ + "\1\334\32\216\1\335\1\216\1\u048b\10\216\175\0\1\334"+ + "\6\216\1\u048c\23\216\1\335\12\216\175\0\1\334\32\216"+ + "\1\335\5\216\1\u048d\4\216\175\0\1\334\22\216\1\u015e"+ + "\7\216\1\335\12\216\175\0\1\334\32\216\1\335\5\216"+ + "\1\u048e\4\216\175\0\1\334\32\216\1\335\1\216\1\u0123"+ + "\10\216\175\0\1\334\13\216\1\u048f\16\216\1\335\12\216"+ + "\175\0\1\363\32\252\1\127\11\252\1\u0490\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\150\0\1\363\32\252\1\127\7\252\1\u0491\2\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\32\252\1\127\11\252\1\275\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\150\0\1\363\3\252\1\u0492\26\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\32\252\1\127\4\252\1\u0493"+ + "\5\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\16\252\1\u0494\13\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\26\252\1\u0495"+ + "\3\252\1\127\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\32\252"+ + "\1\127\7\252\1\u0496\2\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\216\0\1\u03f8"+ + "\175\0\4\u0497\2\0\1\u0497\15\0\1\u0497\6\0\12\u0497"+ + "\1\u046e\175\0\4\u0498\2\0\1\u0498\15\0\1\u0498\6\0"+ + "\12\u0498\1\u0499\175\0\4\u049a\2\0\1\u049a\15\0\1\u049a"+ + "\6\0\12\u049a\1\u049b\13\0\1\u0307\160\0\1\u0362\4\u049a"+ + "\2\0\1\u049a\15\0\1\u049a\6\0\12\u049c\1\u049b\13\0"+ + "\1\u0307\160\0\1\u0362\4\u049a\2\0\1\u049a\15\0\1\u049a"+ + "\6\0\12\u049d\1\u049b\13\0\1\u0307\160\0\1\u0362\4\u049a"+ + "\2\0\1\u049a\15\0\1\u049a\6\0\1\u049c\1\u049e\1\u049d"+ + "\2\u049c\2\u049d\1\u049c\1\u049d\1\u049c\1\u049b\13\0\1\u0307"+ + "\161\0\4\u049f\2\0\1\u049f\15\0\1\u049f\6\0\12\u049f"+ + "\1\u0439\13\0\1\u0307\160\0\1\u0362\4\u049f\2\0\1\u049f"+ + "\15\0\1\u049f\6\0\12\u049f\1\u0439\13\0\1\u0307\214\0"+ + "\1\u04a0\2\u04a1\1\u04a0\5\u04a1\1\u04a2\175\0\1\u0475\242\0"+ + "\1\u0475\33\0\2\u0476\1\0\2\u0476\2\0\1\u0476\1\0"+ + "\1\u0476\176\0\4\u04a3\2\0\1\u04a3\15\0\1\u04a3\6\0"+ + "\12\u04a3\1\u0444\175\0\4\u04a4\2\0\1\u04a4\15\0\1\u04a4"+ + "\6\0\12\u04a4\1\u04a5\175\0\4\u04a6\2\0\1\u04a6\15\0"+ + "\1\u04a6\6\0\1\u04a7\2\u04a8\1\u04a7\5\u04a8\1\u04a9\14\0"+ + "\1\u02b6\161\0\4\u04aa\2\0\1\u04aa\15\0\1\u04aa\6\0"+ + "\12\u04aa\1\u047c\13\0\1\u02b6\161\0\4\u04a6\2\0\1\u04a6"+ + "\15\0\1\u04a6\6\0\1\u04a7\2\u04a8\1\u04a7\5\u04a8\1\u04a9"+ + "\175\0\1\u0314\4\u04aa\2\0\1\u04aa\15\0\1\u04aa\6\0"+ + "\12\u04ab\1\u047c\13\0\1\u02b6\160\0\1\u0314\4\u04aa\2\0"+ + "\1\u04aa\15\0\1\u04aa\6\0\12\u04aa\1\u047c\13\0\1\u02b6"+ + "\160\0\1\u0314\4\u04aa\2\0\1\u04aa\15\0\1\u04aa\6\0"+ + "\2\u04ab\1\u04aa\2\u04ab\2\u04aa\1\u04ab\1\u04aa\1\u04ab\1\u047c"+ + "\13\0\1\u02b6\226\0\1\u0409\13\0\1\u02b6\214\0\12\u0482"+ + "\14\0\1\u02b6\214\0\12\u04ac\14\0\1\u02b6\214\0\1\u0482"+ + "\1\u04ad\1\u04ac\2\u0482\2\u04ac\1\u0482\1\u04ac\1\u0482\14\0"+ + "\1\u02b6\161\0\4\u04ae\2\0\1\u04ae\15\0\1\u04ae\6\0"+ + "\12\u04ae\1\u0450\174\0\1\u0157\4\u04ae\2\0\1\u04ae\15\0"+ + "\1\u04ae\6\0\12\u04ae\1\u0450\175\0\4\u04af\2\0\1\u04af"+ + "\15\0\1\u04af\6\0\12\u04af\14\0\1\u0116\226\0\1\u0419"+ + "\13\0\1\u0116\160\0\1\334\24\216\1\u04b0\5\216\1\335"+ + "\12\216\175\0\1\334\2\216\1\u04b1\27\216\1\335\12\216"+ + "\175\0\1\334\3\216\1\u04b2\26\216\1\335\12\216\175\0"+ + "\1\334\3\216\1\u04b3\26\216\1\335\12\216\175\0\1\334"+ + "\32\216\1\335\1\216\1\u04b4\10\216\175\0\1\334\3\216"+ + "\1\u04b5\26\216\1\335\12\216\175\0\1\334\1\u04b6\31\216"+ + "\1\335\12\216\175\0\1\334\26\216\1\u04b7\3\216\1\335"+ + "\12\216\175\0\1\363\7\252\1\u04b8\22\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\1\u04b9\31\252\1\127\12\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\32\252\1\127\1\u02e5\11\252"+ + "\1\0\3\124\1\0\2\124\1\125\3\124\3\0\1\124"+ + "\4\0\2\124\150\0\1\363\24\252\1\u04ba\5\252\1\127"+ + "\12\252\1\0\3\124\1\0\2\124\1\125\3\124\3\0"+ + "\1\124\4\0\2\124\150\0\1\363\1\252\1\u04bb\30\252"+ + "\1\127\12\252\1\0\3\124\1\0\2\124\1\125\3\124"+ + "\3\0\1\124\4\0\2\124\150\0\1\363\32\252\1\127"+ + "\2\252\1\377\7\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\6\252"+ + "\1\370\23\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\151\0\4\u04bc"+ + "\2\0\1\u04bc\15\0\1\u04bc\6\0\12\u04bc\1\u046e\175\0"+ + "\4\u04bd\2\0\1\u04bd\15\0\1\u04bd\6\0\12\u04bd\1\u04be"+ + "\175\0\4\u04bf\2\0\1\u04bf\15\0\1\u04bf\6\0\1\u04c0"+ + "\2\u04c1\1\u04c0\5\u04c1\1\u04c2\14\0\1\u0307\161\0\4\u04c3"+ + "\2\0\1\u04c3\15\0\1\u04c3\6\0\12\u04c3\1\u049b\13\0"+ + "\1\u0307\161\0\4\u04bf\2\0\1\u04bf\15\0\1\u04bf\6\0"+ + "\1\u04c0\2\u04c1\1\u04c0\5\u04c1\1\u04c2\175\0\1\u0362\4\u04c3"+ + "\2\0\1\u04c3\15\0\1\u04c3\6\0\12\u04c4\1\u049b\13\0"+ + "\1\u0307\160\0\1\u0362\4\u04c3\2\0\1\u04c3\15\0\1\u04c3"+ + "\6\0\12\u04c3\1\u049b\13\0\1\u0307\160\0\1\u0362\4\u04c3"+ + "\2\0\1\u04c3\15\0\1\u04c3\6\0\2\u04c4\1\u04c3\2\u04c4"+ + "\2\u04c3\1\u04c4\1\u04c3\1\u04c4\1\u049b\13\0\1\u0307\226\0"+ + "\1\u0439\13\0\1\u0307\214\0\12\u04a1\14\0\1\u0307\214\0"+ + "\12\u04c5\14\0\1\u0307\214\0\1\u04a1\1\u04c6\1\u04c5\2\u04a1"+ + "\2\u04c5\1\u04a1\1\u04c5\1\u04a1\14\0\1\u0307\226\0\1\u0444"+ + "\175\0\4\u04c7\2\0\1\u04c7\15\0\1\u04c7\6\0\12\u04c7"+ + "\1\u04a5\175\0\4\u04c8\2\0\1\u04c8\15\0\1\u04c8\6\0"+ + "\12\u04c8\1\u04c9\175\0\4\u04ca\2\0\1\u04ca\15\0\1\u04ca"+ + "\6\0\12\u04ca\1\u04cb\13\0\1\u02b6\160\0\1\u0314\4\u04ca"+ + "\2\0\1\u04ca\15\0\1\u04ca\6\0\12\u04cc\1\u04cb\13\0"+ + "\1\u02b6\160\0\1\u0314\4\u04ca\2\0\1\u04ca\15\0\1\u04ca"+ + "\6\0\12\u04cd\1\u04cb\13\0\1\u02b6\160\0\1\u0314\4\u04ca"+ + "\2\0\1\u04ca\15\0\1\u04ca\6\0\1\u04cc\1\u04ce\1\u04cd"+ + "\2\u04cc\2\u04cd\1\u04cc\1\u04cd\1\u04cc\1\u04cb\13\0\1\u02b6"+ + "\161\0\4\u04cf\2\0\1\u04cf\15\0\1\u04cf\6\0\12\u04cf"+ + "\1\u047c\13\0\1\u02b6\160\0\1\u0314\4\u04cf\2\0\1\u04cf"+ + "\15\0\1\u04cf\6\0\12\u04cf\1\u047c\13\0\1\u02b6\242\0"+ + "\1\u02b6\214\0\2\u04ac\1\0\2\u04ac\2\0\1\u04ac\1\0"+ + "\1\u04ac\14\0\1\u02b6\226\0\1\u0450\175\0\4\u0322\2\0"+ + "\1\u0322\15\0\1\u0322\6\0\12\u0322\14\0\1\u0116\160\0"+ + "\1\334\32\216\1\335\11\216\1\u04d0\175\0\1\334\32\216"+ + "\1\335\7\216\1\u04d1\2\216\175\0\1\334\32\216\1\335"+ + "\11\216\1\u0123\175\0\1\334\3\216\1\u04d2\26\216\1\335"+ + "\12\216\175\0\1\334\32\216\1\335\4\216\1\u04d3\5\216"+ + "\175\0\1\334\16\216\1\u04d4\13\216\1\335\12\216\175\0"+ + "\1\334\26\216\1\u04d5\3\216\1\335\12\216\175\0\1\334"+ + "\32\216\1\335\7\216\1\u04d6\2\216\175\0\1\363\32\252"+ + "\1\127\11\252\1\u04d7\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\150\0\1\363\4\252"+ + "\1\370\25\252\1\127\12\252\1\0\3\124\1\0\2\124"+ + "\1\125\3\124\3\0\1\124\4\0\2\124\150\0\1\363"+ + "\24\252\1\275\5\252\1\127\12\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\150\0"+ + "\1\363\32\252\1\127\6\252\1\275\3\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\216\0\1\u046e\175\0\4\u04d8\2\0\1\u04d8\15\0\1\u04d8"+ + "\6\0\12\u04d8\1\u04be\175\0\4\u04d9\2\0\1\u04d9\15\0"+ + "\1\u04d9\6\0\12\u04d9\1\u04da\175\0\4\u04db\2\0\1\u04db"+ + "\15\0\1\u04db\6\0\12\u04db\1\u04dc\13\0\1\u0307\160\0"+ + "\1\u0362\4\u04db\2\0\1\u04db\15\0\1\u04db\6\0\12\u04dd"+ + "\1\u04dc\13\0\1\u0307\160\0\1\u0362\4\u04db\2\0\1\u04db"+ + "\15\0\1\u04db\6\0\12\u04de\1\u04dc\13\0\1\u0307\160\0"+ + "\1\u0362\4\u04db\2\0\1\u04db\15\0\1\u04db\6\0\1\u04dd"+ + "\1\u04df\1\u04de\2\u04dd\2\u04de\1\u04dd\1\u04de\1\u04dd\1\u04dc"+ + "\13\0\1\u0307\161\0\4\u04e0\2\0\1\u04e0\15\0\1\u04e0"+ + "\6\0\12\u04e0\1\u049b\13\0\1\u0307\160\0\1\u0362\4\u04e0"+ + "\2\0\1\u04e0\15\0\1\u04e0\6\0\12\u04e0\1\u049b\13\0"+ + "\1\u0307\242\0\1\u0307\214\0\2\u04c5\1\0\2\u04c5\2\0"+ + "\1\u04c5\1\0\1\u04c5\14\0\1\u0307\161\0\4\u04e1\2\0"+ + "\1\u04e1\15\0\1\u04e1\6\0\12\u04e1\1\u04a5\175\0\4\u04e2"+ + "\2\0\1\u04e2\15\0\1\u04e2\6\0\12\u04e2\1\u04e3\175\0"+ + "\4\u04e4\2\0\1\u04e4\15\0\1\u04e4\6\0\1\u04e5\2\u04e6"+ + "\1\u04e5\5\u04e6\1\u04e7\14\0\1\u02b6\161\0\4\u04e8\2\0"+ + "\1\u04e8\15\0\1\u04e8\6\0\12\u04e8\1\u04cb\13\0\1\u02b6"+ + "\161\0\4\u04e4\2\0\1\u04e4\15\0\1\u04e4\6\0\1\u04e5"+ + "\2\u04e6\1\u04e5\5\u04e6\1\u04e7\175\0\1\u0314\4\u04e8\2\0"+ + "\1\u04e8\15\0\1\u04e8\6\0\12\u04e9\1\u04cb\13\0\1\u02b6"+ + "\160\0\1\u0314\4\u04e8\2\0\1\u04e8\15\0\1\u04e8\6\0"+ + "\12\u04e8\1\u04cb\13\0\1\u02b6\160\0\1\u0314\4\u04e8\2\0"+ + "\1\u04e8\15\0\1\u04e8\6\0\2\u04e9\1\u04e8\2\u04e9\2\u04e8"+ + "\1\u04e9\1\u04e8\1\u04e9\1\u04cb\13\0\1\u02b6\226\0\1\u047c"+ + "\13\0\1\u02b6\160\0\1\334\7\216\1\u04ea\22\216\1\335"+ + "\12\216\175\0\1\334\1\u04eb\31\216\1\335\12\216\175\0"+ + "\1\334\32\216\1\335\1\u0383\11\216\175\0\1\334\24\216"+ + "\1\u04ec\5\216\1\335\12\216\175\0\1\334\1\216\1\u04ed"+ + "\30\216\1\335\12\216\175\0\1\334\32\216\1\335\2\216"+ + "\1\u0165\7\216\175\0\1\334\6\216\1\u015e\23\216\1\335"+ + "\12\216\175\0\1\363\1\u04ee\31\252\1\127\12\252\1\0"+ + "\3\124\1\0\2\124\1\125\3\124\3\0\1\124\4\0"+ + "\2\124\151\0\4\u04ef\2\0\1\u04ef\15\0\1\u04ef\6\0"+ + "\12\u04ef\1\u04be\175\0\4\u04f0\2\0\1\u04f0\15\0\1\u04f0"+ + "\6\0\12\u04f0\1\u04f1\175\0\4\u04f2\2\0\1\u04f2\15\0"+ + "\1\u04f2\6\0\1\u04f3\2\u04f4\1\u04f3\5\u04f4\1\u04f5\14\0"+ + "\1\u0307\161\0\4\u04f6\2\0\1\u04f6\15\0\1\u04f6\6\0"+ + "\12\u04f6\1\u04dc\13\0\1\u0307\161\0\4\u04f2\2\0\1\u04f2"+ + "\15\0\1\u04f2\6\0\1\u04f3\2\u04f4\1\u04f3\5\u04f4\1\u04f5"+ + "\175\0\1\u0362\4\u04f6\2\0\1\u04f6\15\0\1\u04f6\6\0"+ + "\12\u04f7\1\u04dc\13\0\1\u0307\160\0\1\u0362\4\u04f6\2\0"+ + "\1\u04f6\15\0\1\u04f6\6\0\12\u04f6\1\u04dc\13\0\1\u0307"+ + "\160\0\1\u0362\4\u04f6\2\0\1\u04f6\15\0\1\u04f6\6\0"+ + "\2\u04f7\1\u04f6\2\u04f7\2\u04f6\1\u04f7\1\u04f6\1\u04f7\1\u04dc"+ + "\13\0\1\u0307\226\0\1\u049b\13\0\1\u0307\226\0\1\u04a5"+ + "\175\0\4\u04f8\2\0\1\u04f8\15\0\1\u04f8\6\0\12\u04f8"+ + "\1\u04e3\175\0\4\u04f9\2\0\1\u04f9\15\0\1\u04f9\6\0"+ + "\1\u04fa\2\u04fb\1\u04fa\5\u04fb\1\u04fc\1\u04fd\175\0\4\u04fe"+ + "\2\0\1\u04fe\15\0\1\u04fe\6\0\12\u04fe\1\u04ff\13\0"+ + "\1\u02b6\160\0\1\u0314\4\u04fe\2\0\1\u04fe\15\0\1\u04fe"+ + "\6\0\12\u0500\1\u04ff\13\0\1\u02b6\160\0\1\u0314\4\u04fe"+ + "\2\0\1\u04fe\15\0\1\u04fe\6\0\12\u0501\1\u04ff\13\0"+ + "\1\u02b6\160\0\1\u0314\4\u04fe\2\0\1\u04fe\15\0\1\u04fe"+ + "\6\0\1\u0500\1\u0502\1\u0501\2\u0500\2\u0501\1\u0500\1\u0501"+ + "\1\u0500\1\u04ff\13\0\1\u02b6\161\0\4\u0503\2\0\1\u0503"+ + "\15\0\1\u0503\6\0\12\u0503\1\u04cb\13\0\1\u02b6\160\0"+ + "\1\u0314\4\u0503\2\0\1\u0503\15\0\1\u0503\6\0\12\u0503"+ + "\1\u04cb\13\0\1\u02b6\160\0\1\334\32\216\1\335\11\216"+ + "\1\u0504\175\0\1\334\4\216\1\u015e\25\216\1\335\12\216"+ + "\175\0\1\334\24\216\1\u0123\5\216\1\335\12\216\175\0"+ + "\1\334\32\216\1\335\6\216\1\u0123\3\216\175\0\1\363"+ + "\32\252\1\127\5\252\1\u0505\4\252\1\0\3\124\1\0"+ + "\2\124\1\125\3\124\3\0\1\124\4\0\2\124\216\0"+ + "\1\u04be\175\0\4\u0506\2\0\1\u0506\15\0\1\u0506\6\0"+ + "\12\u0506\1\u04f1\175\0\4\u0507\2\0\1\u0507\15\0\1\u0507"+ + "\6\0\1\u0508\2\u0509\1\u0508\5\u0509\1\u050a\1\u050b\175\0"+ + "\4\u050c\2\0\1\u050c\15\0\1\u050c\6\0\12\u050c\1\u050d"+ + "\13\0\1\u0307\160\0\1\u0362\4\u050c\2\0\1\u050c\15\0"+ + "\1\u050c\6\0\12\u050e\1\u050d\13\0\1\u0307\160\0\1\u0362"+ + "\4\u050c\2\0\1\u050c\15\0\1\u050c\6\0\12\u050f\1\u050d"+ + "\13\0\1\u0307\160\0\1\u0362\4\u050c\2\0\1\u050c\15\0"+ + "\1\u050c\6\0\1\u050e\1\u0510\1\u050f\2\u050e\2\u050f\1\u050e"+ + "\1\u050f\1\u050e\1\u050d\13\0\1\u0307\161\0\4\u0511\2\0"+ + "\1\u0511\15\0\1\u0511\6\0\12\u0511\1\u04dc\13\0\1\u0307"+ + "\160\0\1\u0362\4\u0511\2\0\1\u0511\15\0\1\u0511\6\0"+ + "\12\u0511\1\u04dc\13\0\1\u0307\161\0\4\u0512\2\0\1\u0512"+ + "\15\0\1\u0512\6\0\12\u0512\1\u04e3\175\0\4\u0513\2\0"+ + "\1\u0513\15\0\1\u0513\6\0\12\u0513\1\u0514\174\0\1\u0314"+ + "\4\u0513\2\0\1\u0513\15\0\1\u0513\6\0\12\u0515\1\u0514"+ + "\174\0\1\u0314\4\u0513\2\0\1\u0513\15\0\1\u0513\6\0"+ + "\12\u0516\1\u0514\174\0\1\u0314\4\u0513\2\0\1\u0513\15\0"+ + "\1\u0513\6\0\1\u0515\1\u0517\1\u0516\2\u0515\2\u0516\1\u0515"+ + "\1\u0516\1\u0515\1\u0514\175\0\4\u0518\2\0\1\u0518\15\0"+ + "\1\u0518\6\0\12\u0518\14\0\1\u02b6\161\0\4\u0519\2\0"+ + "\1\u0519\15\0\1\u0519\6\0\12\u0519\1\u04ff\13\0\1\u02b6"+ + "\161\0\4\u0518\2\0\1\u0518\15\0\1\u0518\6\0\12\u0518"+ + "\175\0\1\u0314\4\u0519\2\0\1\u0519\15\0\1\u0519\6\0"+ + "\12\u051a\1\u04ff\13\0\1\u02b6\160\0\1\u0314\4\u0519\2\0"+ + "\1\u0519\15\0\1\u0519\6\0\12\u0519\1\u04ff\13\0\1\u02b6"+ + "\160\0\1\u0314\4\u0519\2\0\1\u0519\15\0\1\u0519\6\0"+ + "\2\u051a\1\u0519\2\u051a\2\u0519\1\u051a\1\u0519\1\u051a\1\u04ff"+ + "\13\0\1\u02b6\226\0\1\u04cb\13\0\1\u02b6\160\0\1\334"+ + "\1\u051b\31\216\1\335\12\216\175\0\1\363\7\252\1\u051c"+ + "\22\252\1\127\12\252\1\0\3\124\1\0\2\124\1\125"+ + "\3\124\3\0\1\124\4\0\2\124\151\0\4\u051d\2\0"+ + "\1\u051d\15\0\1\u051d\6\0\12\u051d\1\u04f1\175\0\4\u051e"+ + "\2\0\1\u051e\15\0\1\u051e\6\0\12\u051e\1\u051f\174\0"+ + "\1\u0362\4\u051e\2\0\1\u051e\15\0\1\u051e\6\0\12\u0520"+ + "\1\u051f\174\0\1\u0362\4\u051e\2\0\1\u051e\15\0\1\u051e"+ + "\6\0\12\u0521\1\u051f\174\0\1\u0362\4\u051e\2\0\1\u051e"+ + "\15\0\1\u051e\6\0\1\u0520\1\u0522\1\u0521\2\u0520\2\u0521"+ + "\1\u0520\1\u0521\1\u0520\1\u051f\175\0\4\u0523\2\0\1\u0523"+ + "\15\0\1\u0523\6\0\12\u0523\14\0\1\u0307\161\0\4\u0524"+ + "\2\0\1\u0524\15\0\1\u0524\6\0\12\u0524\1\u050d\13\0"+ + "\1\u0307\161\0\4\u0523\2\0\1\u0523\15\0\1\u0523\6\0"+ + "\12\u0523\175\0\1\u0362\4\u0524\2\0\1\u0524\15\0\1\u0524"+ + "\6\0\12\u0525\1\u050d\13\0\1\u0307\160\0\1\u0362\4\u0524"+ + "\2\0\1\u0524\15\0\1\u0524\6\0\12\u0524\1\u050d\13\0"+ + "\1\u0307\160\0\1\u0362\4\u0524\2\0\1\u0524\15\0\1\u0524"+ + "\6\0\2\u0525\1\u0524\2\u0525\2\u0524\1\u0525\1\u0524\1\u0525"+ + "\1\u050d\13\0\1\u0307\226\0\1\u04dc\13\0\1\u0307\226\0"+ + "\1\u04e3\175\0\4\u0526\2\0\1\u0526\15\0\1\u0526\6\0"+ + "\12\u0526\1\u0514\175\0\4\u0518\2\0\1\u0518\15\0\1\u0518"+ + "\6\0\12\u0518\1\u04ac\174\0\1\u0314\4\u0526\2\0\1\u0526"+ + "\15\0\1\u0526\6\0\12\u0527\1\u0514\174\0\1\u0314\4\u0526"+ + "\2\0\1\u0526\15\0\1\u0526\6\0\12\u0526\1\u0514\174\0"+ + "\1\u0314\4\u0526\2\0\1\u0526\15\0\1\u0526\6\0\2\u0527"+ + "\1\u0526\2\u0527\2\u0526\1\u0527\1\u0526\1\u0527\1\u0514\175\0"+ + "\4\u0528\2\0\1\u0528\15\0\1\u0528\6\0\12\u0528\14\0"+ + "\1\u02b6\161\0\4\u0529\2\0\1\u0529\15\0\1\u0529\6\0"+ + "\12\u0529\1\u04ff\13\0\1\u02b6\160\0\1\u0314\4\u0529\2\0"+ + "\1\u0529\15\0\1\u0529\6\0\12\u0529\1\u04ff\13\0\1\u02b6"+ + "\160\0\1\334\32\216\1\335\5\216\1\u052a\4\216\175\0"+ + "\1\363\1\252\1\u0345\30\252\1\127\12\252\1\0\3\124"+ + "\1\0\2\124\1\125\3\124\3\0\1\124\4\0\2\124"+ + "\216\0\1\u04f1\175\0\4\u052b\2\0\1\u052b\15\0\1\u052b"+ + "\6\0\12\u052b\1\u051f\175\0\4\u0523\2\0\1\u0523\15\0"+ + "\1\u0523\6\0\12\u0523\1\u04c5\174\0\1\u0362\4\u052b\2\0"+ + "\1\u052b\15\0\1\u052b\6\0\12\u052c\1\u051f\174\0\1\u0362"+ + "\4\u052b\2\0\1\u052b\15\0\1\u052b\6\0\12\u052b\1\u051f"+ + "\174\0\1\u0362\4\u052b\2\0\1\u052b\15\0\1\u052b\6\0"+ + "\2\u052c\1\u052b\2\u052c\2\u052b\1\u052c\1\u052b\1\u052c\1\u051f"+ + "\175\0\4\u052d\2\0\1\u052d\15\0\1\u052d\6\0\12\u052d"+ + "\14\0\1\u0307\161\0\4\u052e\2\0\1\u052e\15\0\1\u052e"+ + "\6\0\12\u052e\1\u050d\13\0\1\u0307\160\0\1\u0362\4\u052e"+ + "\2\0\1\u052e\15\0\1\u052e\6\0\12\u052e\1\u050d\13\0"+ + "\1\u0307\161\0\4\u052f\2\0\1\u052f\15\0\1\u052f\6\0"+ + "\12\u052f\1\u0514\174\0\1\u0314\4\u052f\2\0\1\u052f\15\0"+ + "\1\u052f\6\0\12\u052f\1\u0514\175\0\4\u0530\2\0\1\u0530"+ + "\15\0\1\u0530\6\0\12\u0530\14\0\1\u02b6\226\0\1\u04ff"+ + "\13\0\1\u02b6\160\0\1\334\7\216\1\u0531\22\216\1\335"+ + "\12\216\176\0\4\u0532\2\0\1\u0532\15\0\1\u0532\6\0"+ + "\12\u0532\1\u051f\174\0\1\u0362\4\u0532\2\0\1\u0532\15\0"+ + "\1\u0532\6\0\12\u0532\1\u051f\175\0\4\u0533\2\0\1\u0533"+ + "\15\0\1\u0533\6\0\12\u0533\14\0\1\u0307\226\0\1\u050d"+ + "\13\0\1\u0307\226\0\1\u0514\175\0\4\u04ac\2\0\1\u04ac"+ + "\15\0\1\u04ac\6\0\12\u04ac\14\0\1\u02b6\160\0\1\334"+ + "\1\216\1\u03d5\30\216\1\335\12\216\243\0\1\u051f\175\0"+ + "\4\u04c5\2\0\1\u04c5\15\0\1\u04c5\6\0\12\u04c5\14\0"+ + "\1\u0307\11\0"; private static int [] zzUnpackTrans() { - int [] result = new int[192294]; + int [] result = new int[214182]; int offset = 0; offset = zzUnpackTrans(ZZ_TRANS_PACKED_0, offset, result); return result; @@ -2953,23 +3192,23 @@ public final class UAX29URLEmailTokenizer extends Tokenizer { private static final int [] ZZ_ATTRIBUTE = zzUnpackAttribute(); private static final String ZZ_ATTRIBUTE_PACKED_0 = - "\1\0\1\11\27\1\2\11\12\1\15\0\1\1\1\0"+ - "\1\1\10\0\1\1\21\0\2\1\1\0\3\1\1\0"+ - "\1\1\1\0\4\1\46\0\32\1\3\0\4\1\32\0"+ - "\4\1\17\0\1\11\1\0\23\1\2\0\1\1\1\0"+ - "\7\1\3\0\2\1\1\0\4\1\1\0\2\1\1\0"+ - "\2\1\10\0\1\1\32\0\1\1\1\0\11\1\1\0"+ - "\1\1\2\0\1\1\1\0\1\1\10\0\3\1\15\0"+ - "\11\1\3\0\2\1\1\0\4\1\1\0\4\1\1\0"+ - "\2\1\1\0\2\1\1\0\3\1\7\0\2\1\20\0"+ - "\1\1\10\0\1\1\3\0\1\1\32\0\3\1\23\0"+ - "\1\1\27\0\1\1\4\0\1\1\6\0\1\1\4\0"+ - "\2\1\36\0\1\1\51\0\1\1\42\0\1\1\51\0"+ - "\1\1\122\0\1\1\117\0\1\1\107\0\1\1\74\0"+ - "\1\1\51\0\1\1\333\0"; + "\1\0\1\11\27\1\2\11\13\1\15\0\1\1\1\0"+ + "\1\1\10\0\1\1\15\0\1\1\12\0\2\1\1\0"+ + "\3\1\1\0\1\1\1\0\4\1\53\0\32\1\3\0"+ + "\4\1\32\0\4\1\17\0\1\11\1\0\23\1\2\0"+ + "\1\1\1\0\7\1\3\0\2\1\1\0\4\1\1\0"+ + "\2\1\1\0\2\1\10\0\1\1\32\0\1\1\1\0"+ + "\11\1\1\0\1\1\2\0\1\1\1\0\1\1\10\0"+ + "\3\1\15\0\11\1\3\0\2\1\1\0\4\1\1\0"+ + "\4\1\1\0\2\1\1\0\2\1\1\0\3\1\7\0"+ + "\2\1\20\0\1\1\10\0\1\1\3\0\1\1\36\0"+ + "\3\1\23\0\1\1\36\0\1\1\4\0\1\1\6\0"+ + "\1\1\4\0\2\1\42\0\1\1\57\0\1\1\51\0"+ + "\1\1\60\0\1\1\140\0\1\1\135\0\1\1\123\0"+ + "\1\1\106\0\1\1\57\0\1\1\362\0"; private static int [] zzUnpackAttribute() { - int [] result = new int[1204]; + int [] result = new int[1331]; int offset = 0; offset = zzUnpackAttribute(ZZ_ATTRIBUTE_PACKED_0, offset, result); return result; @@ -3038,16 +3277,16 @@ public final class UAX29URLEmailTokenizer extends Tokenizer { /* user code: */ /** Alphanumeric sequences */ - public static final String WORD_TYPE = ""; + public static final String WORD_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.ALPHANUM]; /** Numbers */ - public static final String NUMERIC_TYPE = ""; + public static final String NUMERIC_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.NUM]; /** URLs with scheme: HTTP(S), FTP, or FILE; no-scheme URLs match HTTP syntax */ public static final String URL_TYPE = ""; /** E-mail addresses */ - public static final String EMAIL_TYPE = " * See Unicode Line Breaking Algorithm: http://www.unicode.org/reports/tr14/#SA */ - public static final String SOUTH_EAST_ASIAN_TYPE = ""; + public static final String SOUTH_EAST_ASIAN_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.SOUTHEAST_ASIAN]; - public static final String IDEOGRAPHIC_TYPE = ""; + public static final String IDEOGRAPHIC_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.IDEOGRAPHIC]; - public static final String HIRAGANA_TYPE = ""; + public static final String HIRAGANA_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.HIRAGANA]; + public static final String KATAKANA_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.KATAKANA]; + + public static final String HANGUL_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.HANGUL]; + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); private final PositionIncrementAttribute posIncrAtt @@ -3195,7 +3438,7 @@ public final class UAX29URLEmailTokenizer extends Tokenizer { char [] map = new char[0x10000]; int i = 0; /* index in packed string */ int j = 0; /* index in unpacked array */ - while (i < 2802) { + while (i < 2812) { int count = packed.charAt(i++); char value = packed.charAt(i++); do map[j++] = value; while (--count > 0); @@ -3474,38 +3717,46 @@ public final class UAX29URLEmailTokenizer extends Tokenizer { zzMarkedPos = zzMarkedPosL; switch (zzAction < 0 ? zzAction : ZZ_ACTION[zzAction]) { - case 7: + case 9: { if (populateAttributes(EMAIL_TYPE)) return true; } - case 9: break; + case 11: break; case 2: { if (populateAttributes(WORD_TYPE)) return true; } - case 10: break; - case 6: + case 12: break; + case 7: { if (populateAttributes(HIRAGANA_TYPE)) return true; } - case 11: break; - case 5: + case 13: break; + case 6: { if (populateAttributes(IDEOGRAPHIC_TYPE)) return true; } - case 12: break; - case 4: + case 14: break; + case 5: { if (populateAttributes(SOUTH_EAST_ASIAN_TYPE)) return true; } - case 13: break; + case 15: break; + case 4: + { if (populateAttributes(KATAKANA_TYPE)) return true; + } + case 16: break; case 3: { if (populateAttributes(NUMERIC_TYPE)) return true; } - case 14: break; + case 17: break; case 1: { /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */ } - case 15: break; - case 8: + case 18: break; + case 10: { if (populateAttributes(URL_TYPE)) return true; } - case 16: break; + case 19: break; + case 8: + { if (populateAttributes(HANGUL_TYPE)) return true; + } + case 20: break; default: if (zzInput == YYEOF && zzStartRead == zzCurrentPos) { zzAtEOF = true; diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.jflex b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.jflex index 7d9dc405c37..680378e94f5 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.jflex +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.jflex @@ -77,6 +77,8 @@ ComplexContext = ([\p{LB:Complex_Context}] | {ComplexContextSupp}) Han = ([\p{Script:Han}] | {HanSupp}) Hiragana = ([\p{Script:Hiragana}] | {HiraganaSupp}) +// Script=Hangul & Aletter +HangulEx = (!(!\p{Script:Hangul}|!\p{WB:ALetter})) ({Format} | {Extend})* // UAX#29 WB4. X (Extend | Format)* --> X // ALetterEx = {ALetter} ({Format} | {Extend})* @@ -168,16 +170,16 @@ EMAIL = {EMAILlocalPart} "@" ({DomainNameStrict} | {EMAILbracketedHost}) %{ /** Alphanumeric sequences */ - public static final String WORD_TYPE = ""; + public static final String WORD_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.ALPHANUM]; /** Numbers */ - public static final String NUMERIC_TYPE = ""; + public static final String NUMERIC_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.NUM]; /** URLs with scheme: HTTP(S), FTP, or FILE; no-scheme URLs match HTTP syntax */ public static final String URL_TYPE = ""; /** E-mail addresses */ - public static final String EMAIL_TYPE = " * See Unicode Line Breaking Algorithm: http://www.unicode.org/reports/tr14/#SA */ - public static final String SOUTH_EAST_ASIAN_TYPE = ""; + public static final String SOUTH_EAST_ASIAN_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.SOUTHEAST_ASIAN]; - public static final String IDEOGRAPHIC_TYPE = ""; + public static final String IDEOGRAPHIC_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.IDEOGRAPHIC]; - public static final String HIRAGANA_TYPE = ""; + public static final String HIRAGANA_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.HIRAGANA]; + public static final String KATAKANA_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.KATAKANA]; + + public static final String HANGUL_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.HANGUL]; + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); private final PositionIncrementAttribute posIncrAtt @@ -316,6 +322,12 @@ EMAIL = {EMAILlocalPart} "@" ({DomainNameStrict} | {EMAILbracketedHost}) {ExtendNumLetEx}* { if (populateAttributes(NUMERIC_TYPE)) return true; } +// subset of the below for typing purposes only! +{HangulEx}+ + { if (populateAttributes(HANGUL_TYPE)) return true; } + +{KatakanaEx}+ + { if (populateAttributes(KATAKANA_TYPE)) return true; } // UAX#29 WB5. ALetter × ALetter // WB6. ALetter × (MidLetter | MidNumLet) ALetter diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java index 7927d3d59bb..b9254c1d44c 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java @@ -1,4 +1,4 @@ -/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 10/3/10 9:07 AM */ +/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 2/9/11 11:45 AM */ package org.apache.lucene.analysis.wikipedia; @@ -25,8 +25,8 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; /** * This class is a scanner generated by * JFlex 1.5.0-SNAPSHOT - * on 10/3/10 9:07 AM from the specification file - * C:/Users/rmuir/workspace/lucene-clean/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex + * on 2/9/11 11:45 AM from the specification file + * C:/Users/rmuir/workspace/lucene-2911/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex */ class WikipediaTokenizerImpl { @@ -757,6 +757,12 @@ final int setText(StringBuilder buffer){ zzState = ZZ_LEXSTATE[zzLexicalState]; + // set up zzAction for empty match case: + int zzAttributes = zzAttrL[zzState]; + if ( (zzAttributes & 1) == 1 ) { + zzAction = zzState; + } + zzForAction: { while (true) { @@ -789,7 +795,7 @@ final int setText(StringBuilder buffer){ if (zzNext == -1) break zzForAction; zzState = zzNext; - int zzAttributes = zzAttrL[zzState]; + zzAttributes = zzAttrL[zzState]; if ( (zzAttributes & 1) == 1 ) { zzAction = zzState; zzMarkedPosL = zzCurrentPosL; diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java index 5c7a46f0461..53b61c7c639 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java @@ -207,4 +207,16 @@ public class TestStandardAnalyzer extends BaseTokenStreamTestCase { new String[] {"𩬅", "艱", "éŸ", "䇹", "愯", "瀛"}, new String[] { "", "", "", "", "", "" }); } + + public void testKorean() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "훈민정ìŒ", + new String[] { "훈민정ìŒ" }, + new String[] { "" }); + } + + public void testJapanese() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "ä»®åé£ã„ カタカナ", + new String[] { "ä»®", "å", "é£", "ã„", "カタカナ" }, + new String[] { "", "", "", "", "" }); + } } diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java index 71b37361789..e33af62fc0a 100644 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java @@ -406,4 +406,16 @@ public class TestUAX29URLEmailTokenizer extends BaseTokenStreamTestCase { new String[] {"𩬅", "艱", "éŸ", "䇹", "愯", "瀛"}, new String[] { "", "", "", "", "", "" }); } + + public void testKorean() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "훈민정ìŒ", + new String[] { "훈민정ìŒ" }, + new String[] { "" }); + } + + public void testJapanese() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "ä»®åé£ã„ カタカナ", + new String[] { "ä»®", "å", "é£", "ã„", "カタカナ" }, + new String[] { "", "", "", "", "" }); + } } diff --git a/modules/analysis/icu/src/data/uax29/Default.rbbi b/modules/analysis/icu/src/data/uax29/Default.rbbi new file mode 100644 index 00000000000..9dbab966632 --- /dev/null +++ b/modules/analysis/icu/src/data/uax29/Default.rbbi @@ -0,0 +1,127 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Default RBBI rules, based on UAX#29. +# + +!!chain; + +# +# Character Class Definitions. +# + +$CR = [\p{Word_Break = CR}]; +$LF = [\p{Word_Break = LF}]; +$Newline = [\p{Word_Break = Newline}]; +$Extend = [\p{Word_Break = Extend}]; +$Format = [\p{Word_Break = Format}]; +$Katakana = [\p{Word_Break = Katakana}]; +$ALetter = [\p{Word_Break = ALetter}]; +$MidNumLet = [\p{Word_Break = MidNumLet}]; +$MidLetter = [\p{Word_Break = MidLetter}]; +$MidNum = [\p{Word_Break = MidNum}]; +$Numeric = [\p{Word_Break = Numeric}[[:Decomposition_Type=Wide:]&[:General_Category=Decimal_Number:]]]; +$ExtendNumLet = [\p{Word_Break = ExtendNumLet}]; + + +# Dictionary character set, for triggering language-based break engines. Currently +# limited to LineBreak=Complex_Context. Note that this set only works in Unicode +# 5.0 or later as the definition of Complex_Context was corrected to include all +# characters requiring dictionary break. + +$dictionary = [:LineBreak = Complex_Context:]; +$Control = [\p{Grapheme_Cluster_Break = Control}]; +$ALetterPlus = [$ALetter [$dictionary-$Extend-$Control]]; # Note: default ALetter does not + # include the dictionary characters. + +# +# Rules 4 Ignore Format and Extend characters, +# except when they appear at the beginning of a region of text. +# +$KatakanaEx = $Katakana ($Extend | $Format)*; +$ALetterEx = $ALetterPlus ($Extend | $Format)*; +$MidNumLetEx = $MidNumLet ($Extend | $Format)*; +$MidLetterEx = $MidLetter ($Extend | $Format)*; +$MidNumEx = $MidNum ($Extend | $Format)*; +$NumericEx = $Numeric ($Extend | $Format)*; +$ExtendNumLetEx = $ExtendNumLet ($Extend | $Format)*; + +$Hiragana = [\p{script=Hiragana}]; +$Ideographic = [\p{Ideographic}]; +$HiraganaEx = $Hiragana ($Extend | $Format)*; +$IdeographicEx = $Ideographic ($Extend | $Format)*; + +## ------------------------------------------------- + +!!forward; + + +# Rule 3 - CR x LF +# +$CR $LF; + +# Rule 4 - ignore Format and Extend characters, except when they appear at the beginning +# of a region of Text. The rule here comes into play when the start of text +# begins with a group of Format chars, or with a "word" consisting of a single +# char that is not in any of the listed word break categories followed by +# format char(s). +[^$CR $LF $Newline]? ($Extend | $Format)+; + +$NumericEx {100}; +$ALetterEx {200}; +$KatakanaEx {300}; # note: these status values override those from rule 5 +$HiraganaEx {300}; # by virtual of being numerically larger. +$IdeographicEx {400}; # + +# +# rule 5 +# Do not break between most letters. +# +$ALetterEx $ALetterEx {200}; + +# rule 6 and 7 +$ALetterEx ($MidLetterEx | $MidNumLetEx) $ALetterEx {200}; + +# rule 8 + +$NumericEx $NumericEx {100}; + +# rule 9 + +$ALetterEx $NumericEx {200}; + +# rule 10 + +$NumericEx $ALetterEx {200}; + +# rule 11 and 12 + +$NumericEx ($MidNumEx | $MidNumLetEx) $NumericEx {100}; + +# rule 13 + +$KatakanaEx $KatakanaEx {300}; + +# rule 13a/b + +$ALetterEx $ExtendNumLetEx {200}; # (13a) +$NumericEx $ExtendNumLetEx {100}; # (13a) +$KatakanaEx $ExtendNumLetEx {300}; # (13a) +$ExtendNumLetEx $ExtendNumLetEx {200}; # (13a) + +$ExtendNumLetEx $ALetterEx {200}; # (13b) +$ExtendNumLetEx $NumericEx {100}; # (13b) +$ExtendNumLetEx $KatakanaEx {300}; # (13b) diff --git a/modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/DefaultICUTokenizerConfig.java b/modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/DefaultICUTokenizerConfig.java index 130516fea30..ecff8241049 100644 --- a/modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/DefaultICUTokenizerConfig.java +++ b/modules/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/DefaultICUTokenizerConfig.java @@ -20,6 +20,8 @@ package org.apache.lucene.analysis.icu.segmentation; import java.io.IOException; import java.io.InputStream; +import org.apache.lucene.analysis.standard.StandardTokenizer; + import com.ibm.icu.lang.UScript; import com.ibm.icu.text.BreakIterator; import com.ibm.icu.text.RuleBasedBreakIterator; @@ -44,20 +46,24 @@ import com.ibm.icu.util.ULocale; */ public class DefaultICUTokenizerConfig extends ICUTokenizerConfig { /** Token type for words containing ideographic characters */ - public static final String WORD_IDEO = ""; - /** Token type for words containing Japanese kana */ - public static final String WORD_KANA = ""; + public static final String WORD_IDEO = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.IDEOGRAPHIC]; + /** Token type for words containing Japanese hiragana */ + public static final String WORD_HIRAGANA = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.HIRAGANA]; + /** Token type for words containing Japanese katakana */ + public static final String WORD_KATAKANA = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.KATAKANA]; + /** Token type for words containing Korean hangul */ + public static final String WORD_HANGUL = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.HANGUL]; /** Token type for words that contain letters */ - public static final String WORD_LETTER = ""; + public static final String WORD_LETTER = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.ALPHANUM]; /** Token type for words that appear to be numbers */ - public static final String WORD_NUMBER = ""; + public static final String WORD_NUMBER = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.NUM]; /* * the default breakiterators in use. these can be expensive to * instantiate, cheap to clone. */ private static final BreakIterator rootBreakIterator = - BreakIterator.getWordInstance(ULocale.ROOT); + readBreakIterator("Default.brk"); private static final BreakIterator thaiBreakIterator = BreakIterator.getWordInstance(new ULocale("th_TH")); private static final BreakIterator hebrewBreakIterator = @@ -87,9 +93,9 @@ public class DefaultICUTokenizerConfig extends ICUTokenizerConfig { case RuleBasedBreakIterator.WORD_IDEO: return WORD_IDEO; case RuleBasedBreakIterator.WORD_KANA: - return WORD_KANA; + return script == UScript.HIRAGANA ? WORD_HIRAGANA : WORD_KATAKANA; case RuleBasedBreakIterator.WORD_LETTER: - return WORD_LETTER; + return script == UScript.HANGUL ? WORD_HANGUL : WORD_LETTER; case RuleBasedBreakIterator.WORD_NUMBER: return WORD_NUMBER; default: /* some other custom code */ diff --git a/modules/analysis/icu/src/resources/org/apache/lucene/analysis/icu/segmentation/Default.brk b/modules/analysis/icu/src/resources/org/apache/lucene/analysis/icu/segmentation/Default.brk new file mode 100644 index 0000000000000000000000000000000000000000..0e303d0e2ddb318144f1e59df1d40ed173553808 GIT binary patch literal 27088 zcmeHPd5m6F6~F!7KGSx(L7|0Gnz9rnY$mofqyuWpCYvOs!P*Wm){&Ohw$L`U*hC<0 zF$7W*Nibm)MPm&K5*HvQP3a#5h(R=BP!SSAX-H8+rqK}UIrn^L?zzj`W>C;{Uf#Rs zp5=FzyS(?luV0n=p#L5ljwWCXLs!G)csN%pmEyTyDYaFpY$3GpC);ZtP-;c4)Raq< zy8Zsg6EkxF#x9861he=BiLVjquS164Td^L2rm@^Nt>c zK1E~;CaOtlGO+s86cxkKr>|N^+r+lI?oEa3G&Nn#P{%q(7l6pSK{BVB=Ll#l1Sf^l+J zHc?FkimnyvTEsS!V0;yvc!fV5TiraE3^Sq+PMjBPQ#TL#z+wiReZnT>2gN)Orofz= z3Fj2xHCcIcKyNA>bMzIFD4)9r)%AsV?VRiY={l7Cbl|1Cn-ROldjEbeX=?kYK>R@*QnNWVCKw#6X%in z!`0AamKzI)(!LFEf@*MFw{d6?HvZFKd)lwmoVfGz&i>9bofkU4>%7|eYv*sBk&5HJ zD2*oTV_GyTn%5bLPGjxp>{051XkpYJEsZXZJ{zrw20&{l`Z7wR+oL<8yQ6!e`*rVu zXgGQ#dNle;^o!`==<$5?%jmb!E72R#ThTkwyKx-%#M9zg@x1uV_}qA5^aiy3@g?!q z@%8cQcul-MzAfGnFO2Vs?}_)u4?_EJyf1z{ekwi?KO4UozZ}03zY!gb562_%d(pup zO(rMDC$p1z$%5oO=r2l^B+HVklI5L|WL0t#XCd3<7LaX9wj^JL_AWT@0lU4Zkvtgn zOU*Jmc{tgp^&d~3N}f)hOMVT0f1kXTyqUbM`9}cjXlEpi(%y7>Ix9UTS(KiUo)aBR zKMvHAbXj^;x*Thiu1arAhtdt{)^sQ6?}GEb^nuP1EgMcBkr70b(??|=>GY>?KG^|( z>4Egw^u_e$^tJTO^zZ3E0ih?GkQLd~?8MGUcIxPen6as(viV?jR$Hs_>O)S4I)^#S z?0l%>Du`ooc5!xDb{WjogW0uJKC%_rKsJ*2iv{UHrt!s zuC=~txYpTOq|`^cWm z4#3^?*+fgS7qgeLKcPnUIzB68hq8ZU|LTn7oxCT#F`t&tiiY!f`GT}JdOklrhvzep zEy^#-FHIKZ*W@drgZV%{#CKni>E}w@VvO)E37A zqswdISo{A`0snb5zbAVLPdL5}``_OD!Te#I-}(3R9|h<65!i=E^2gx(S^iZ1bpBla ztNbO>%>RgQ`^moi_53fL!};5}es3I4<&q!dM~jT_BlymQG`ttlAUUaZnTwDZi&ArTn_g?rABfHo1 z;a=4GU6-u^d22dQTpF)Qw-(nJ?thO8&H(2g?CWHAa)cx+;LbX>!^dvRF|mv#f_iVo zKoxE=`ZQ6c)mBz)D7F@NuvKw4Q+|u_7x%*XjpAFyccI-^JW@O!%Bpy>IM8{!xEHh! zbq*Ex!|_bnkM=yLIW^C~Lr?v-M zPXe5_wy4kcGM}VTzS06|Mv^VzPj+O;?bSTvzisqw_1)Tx*Pie=R~Mc8EDyD;BWr+q z*mrp$d;CUlJcsm;H2Eg3oR!4y1r5@o_^4NR9dDJ(>@z3xx;UiVuceeWrKu`5R#V%s zUS{nDh&-OrC0oQG9CNi}E~~H`qp@NMtzg4v3cuaPgZtd#H~&b7vtq5Xc*0g8uDi!# zA9r8$jMM(44E;b`mL)K zLPjf$J~a03O7{EY)8*4>qc=njd1am}#Xi;2Xy8h9GeI4WoMK=*vL-px6bT`+RVZK5 zW2;ab%9r)nn*4;Xp?ooNvSkFh51ARwk#h{_*GMS}_G;?L(S`CwB(iXE*ou8Iu4y?F zZjD4zu9W(cOIf(%YbwC|H0t?d%yCkSp1De)d~I#NN7*uhZ2kJM6j9JG=fwQ5|GErl zt}_-@(&T2mFb;d>7)&%wNln7DuY}H3=IFW4E#=QxWaHNa`Qg0TOIeG45zmgz@1;qjdGW_(2!r z9Q8amPwN{PL-|M)wH;Y{nFHo7mE9i)_^5ukf1(zthNUi14}NyHk(j7S}x z8HHw)>(#fyd#lm%<>ee>WvFuPItgKv-7ncgogDQ7HEc33unI;`)P#Loa-)e9Yd4rv zW)30+GmdR_8;qSaw4b^v+HzB^ZVqMVFXmOuyvJ;EVxR3OLCrH;$VPw5Jws!YrmQmlX8Zj*Fk zJ%u^d_`ITgTa>uFLkNrXeEVvSkA(U@w+%-k5LR^zTCoQf{b)V~8^&}dO zryAEt!`8HG8LjqIUF4zpDPuAA<&=zyY|VW|%3K80mGW}hwKRPhtE8((s@;1S#btc@ zl=MtA+qgNQEo#)%z3P3uLN@xDM}-c3PJ-WR(dP}_PfLUa@=-ea>5Goo&(Y%;3}<9f zQ&u9>x6!0F&L+2SYx_OYcSnlW6KdDgf7G9kOdLC&vL0KP$?N%yP<9_gKI&A-QD%E$ zRJS!+MpCw0x?^H&?eRiA(v?@K-IO*l)lv0`h)|_f=fRbiVX>Y&6Wu<&=i0vnU|Vc$ zeJPQ&d2g0nn?{{U`rlFFyLr+5H~6-W-!u7D*UjjaQHX7S0?_u^)78h?Agc7(yzl4Q zj6<1GY*`OmRwdGYkX(eg<*+EL0&~nv}h32(g*4D(ZYwoW8U8{HP+%>#wzdl~wt+3racXxl6 zLws@Fy}C>Sb~|m^?&00S66CLve2*IUF}I7*zh|}W)9vlq>2lil_YAkUkrwdlU;V!s zciWr0?K9L)xUbuO6k7a$BTJw_d`lPy6%?%zG0`~^>IkiD; zSIgCUwGnVusf(c<1p8Y+iW+FYL#@dO8LG5r%%!?E>!2o^$Al^E8S`?DhE*PbSO)}dvp8LQ#=S^g0oiZX zHP|Rnn~soe�e)QMy@Oscuv2y7*k%X3_fgHgYKDHptT6My8lKT58)MlRaZnZxB|< z5UetZ+6>dS-vO)d2DMmy3P$WEm}Bc;C9YSS0DCi>>tSB3QrD?%z#W8@xLl9HAn31D zr)#@qdY)r61F()(!Aiv$I;hs@`MM2k2zdyQNrU{~2s#@z&dTn(mb0F8sG?10O_@E{ z?Pgss&T0U>Z|hc%>b4kW74BDqf!&AdvKC}pb&gmU%o5i*)`Ipp;%Etj)Xmuudoh!Y~u@`U%#MtgDp^R zd?Gc^u=C+Au@w3@sL$&i5}#{{hpqiQxKb@?;rS!li#cbuJt`O82Dg>U^OQ z-XXWE3r2O!7$7y@ndp$=xSOGt9NK3GKl+nzR?n{fRJfKWC}CE*EH7 z2+{T#8U-9(eD>rgFV3}YY|8B9h=q`w)FxzSYD3bdRS99)GcP=^u-ajXoN?i7%sjN8 zJ}4F%TduRI7h8oZ%TkOBZOiv)nH^h|", "", "", "" }); } + + public void testKorean() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "훈민정ìŒ", + new String[] { "훈민정ìŒ" }, + new String[] { "" }); + } + + public void testJapanese() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "ä»®åé£ã„ カタカナ", + new String[] { "ä»®", "å", "é£", "ã„", "カタカナ" }, + new String[] { "", "", "", "", "" }); + } } From 68380e413ddc1b5eba8d52554df475225d9fe93c Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Thu, 10 Feb 2011 10:01:02 +0000 Subject: [PATCH 116/185] fix silly repro issue with MockRandomCodec git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1069315 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/lucene/index/codecs/mockrandom/MockRandomCodec.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java b/lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java index 745c619cb87..bfc9fc81423 100644 --- a/lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java +++ b/lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java @@ -151,7 +151,7 @@ public class MockRandomCodec extends Codec { @Override public boolean isIndexTerm(BytesRef term, TermStats stats) { - return random.nextInt(gap) == 17; + return rand.nextInt(gap) == 17; } @Override From ecb292eebb78d4deb6c577e1c58f4e4a49e83ce3 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Thu, 10 Feb 2011 10:01:23 +0000 Subject: [PATCH 117/185] LUCENE-1076: fix AIOOBE when infoStream is on git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1069316 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/src/java/org/apache/lucene/index/LogMergePolicy.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java b/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java index 1925a78d74d..669d3b0d901 100644 --- a/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java +++ b/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java @@ -546,9 +546,10 @@ public abstract class LogMergePolicy extends MergePolicy { if (size < 1) { size = 1; } - levels.add(new SegmentInfoAndLevel(info, (float) Math.log(size)/norm, i)); + final SegmentInfoAndLevel infoLevel = new SegmentInfoAndLevel(info, (float) Math.log(size)/norm, i); + levels.add(infoLevel); if (verbose()) { - message("seg " + info.name + " level=" + levels.get(i).level + " size=" + size); + message("seg " + info.name + " level=" + infoLevel.level + " size=" + size); } } From f4e977bb26143619a034574ef4a61e1dd136a3f4 Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Thu, 10 Feb 2011 11:50:37 +0000 Subject: [PATCH 118/185] LUCENE-2913: Add missing getters to Numeric* classes git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1069341 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/CHANGES.txt | 4 +++- .../java/org/apache/lucene/analysis/NumericTokenStream.java | 5 +++++ lucene/src/java/org/apache/lucene/document/NumericField.java | 5 +++++ .../java/org/apache/lucene/search/NumericRangeFilter.java | 3 +++ .../src/java/org/apache/lucene/search/NumericRangeQuery.java | 3 +++ 5 files changed, 19 insertions(+), 1 deletion(-) diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index 402bb3dcc17..a9a51677f4f 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -856,7 +856,9 @@ New features CJK types are explicitly marked to allow for custom downstream handling: , , , and . (Robert Muir, Steven Rowe) - + +* LUCENE-2913: Add missing getters to Numeric* classes. (Uwe Schindler) + Optimizations * LUCENE-2494: Use CompletionService in ParallelMultiSearcher instead of diff --git a/lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java b/lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java index b98a24646ca..4dc63e9441f 100644 --- a/lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java +++ b/lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java @@ -293,6 +293,11 @@ public final class NumericTokenStream extends TokenStream { return (shift < valSize); } + /** Returns the precision step. */ + public int getPrecisionStep() { + return precisionStep; + } + // members private final NumericTermAttribute numericAtt = addAttribute(NumericTermAttribute.class); private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class); diff --git a/lucene/src/java/org/apache/lucene/document/NumericField.java b/lucene/src/java/org/apache/lucene/document/NumericField.java index 4d008e0169f..6cae722a1d9 100644 --- a/lucene/src/java/org/apache/lucene/document/NumericField.java +++ b/lucene/src/java/org/apache/lucene/document/NumericField.java @@ -222,6 +222,11 @@ public final class NumericField extends AbstractField { return (Number) fieldsData; } + /** Returns the precision step. */ + public int getPrecisionStep() { + return numericTS.getPrecisionStep(); + } + /** * Initializes the field with the supplied long value. * @param value the numeric value diff --git a/lucene/src/java/org/apache/lucene/search/NumericRangeFilter.java b/lucene/src/java/org/apache/lucene/search/NumericRangeFilter.java index f3a2dc1a66c..116972a0fbe 100644 --- a/lucene/src/java/org/apache/lucene/search/NumericRangeFilter.java +++ b/lucene/src/java/org/apache/lucene/search/NumericRangeFilter.java @@ -179,4 +179,7 @@ public final class NumericRangeFilter extends MultiTermQueryWr /** Returns the upper value of this range filter */ public T getMax() { return query.getMax(); } + /** Returns the precision step. */ + public int getPrecisionStep() { return query.getPrecisionStep(); } + } diff --git a/lucene/src/java/org/apache/lucene/search/NumericRangeQuery.java b/lucene/src/java/org/apache/lucene/search/NumericRangeQuery.java index 1daa453383c..1d0b6628972 100644 --- a/lucene/src/java/org/apache/lucene/search/NumericRangeQuery.java +++ b/lucene/src/java/org/apache/lucene/search/NumericRangeQuery.java @@ -319,6 +319,9 @@ public final class NumericRangeQuery extends MultiTermQuery { /** Returns the upper value of this range query */ public T getMax() { return max; } + /** Returns the precision step. */ + public int getPrecisionStep() { return precisionStep; } + @Override public String toString(final String field) { final StringBuilder sb = new StringBuilder(); From 990ba390aecc6aaebd947b37d31a468296af5b3c Mon Sep 17 00:00:00 2001 From: Grant Ingersoll Date: Thu, 10 Feb 2011 12:00:29 +0000 Subject: [PATCH 119/185] clean up some urls git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1069345 13f79535-47bb-0310-9956-ffa450edef68 --- solr/example/solr/conf/velocity/VM_global_library.vm | 2 +- solr/example/solr/conf/velocity/tabs.vm | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/solr/example/solr/conf/velocity/VM_global_library.vm b/solr/example/solr/conf/velocity/VM_global_library.vm index 5d30e360915..d7cbce4ec25 100644 --- a/solr/example/solr/conf/velocity/VM_global_library.vm +++ b/solr/example/solr/conf/velocity/VM_global_library.vm @@ -2,7 +2,7 @@ #macro(param $key)$request.params.get($key)#end #macro(url_for_solr)/solr#if($request.core.name != "")/$request.core.name#end#end -#macro(url_for_home)#url_for_solr/browse#end +#macro(url_for_home)#url_for_solr/browse?#end #macro(q)&q=$!{esc.url($params.get('q'))}#end diff --git a/solr/example/solr/conf/velocity/tabs.vm b/solr/example/solr/conf/velocity/tabs.vm index b897c7ab09e..dd5471adbb0 100644 --- a/solr/example/solr/conf/velocity/tabs.vm +++ b/solr/example/solr/conf/velocity/tabs.vm @@ -1,6 +1,6 @@ ##TODO: Make some nice tabs here #set($queryOpts = $params.get("queryOpts")) -Examples: #if($queryOpts && $queryOpts != "")Simple#{else}Simple#end -#if($queryOpts == "spatial")Spatial#elseSpatial#end -#if($queryOpts == "group")Group By#elseGroup By#end +Examples: #if($queryOpts && $queryOpts != "")Simple#{else}Simple#end +#if($queryOpts == "spatial")Spatial#elseSpatial#end +#if($queryOpts == "group")Group By#elseGroup By#end
      \ No newline at end of file From fbc4d10df47f4a739d29419f6a39a90d073d16ea Mon Sep 17 00:00:00 2001 From: Grant Ingersoll Date: Thu, 10 Feb 2011 12:01:22 +0000 Subject: [PATCH 120/185] clean up some urls git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1069346 13f79535-47bb-0310-9956-ffa450edef68 --- solr/example/solr/conf/velocity/VM_global_library.vm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solr/example/solr/conf/velocity/VM_global_library.vm b/solr/example/solr/conf/velocity/VM_global_library.vm index d7cbce4ec25..5d30e360915 100644 --- a/solr/example/solr/conf/velocity/VM_global_library.vm +++ b/solr/example/solr/conf/velocity/VM_global_library.vm @@ -2,7 +2,7 @@ #macro(param $key)$request.params.get($key)#end #macro(url_for_solr)/solr#if($request.core.name != "")/$request.core.name#end#end -#macro(url_for_home)#url_for_solr/browse?#end +#macro(url_for_home)#url_for_solr/browse#end #macro(q)&q=$!{esc.url($params.get('q'))}#end From 224d7b4ed6b720271cc891cfe094ca5990ee73a5 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Thu, 10 Feb 2011 14:19:47 +0000 Subject: [PATCH 121/185] LUCENE-2915: add CoreCodecProvider git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1069402 13f79535-47bb-0310-9956-ffa450edef68 --- .../lucene/index/codecs/CodecProvider.java | 16 +----- .../index/codecs/CoreCodecProvider.java | 49 +++++++++++++++++++ 2 files changed, 50 insertions(+), 15 deletions(-) create mode 100644 lucene/src/java/org/apache/lucene/index/codecs/CoreCodecProvider.java diff --git a/lucene/src/java/org/apache/lucene/index/codecs/CodecProvider.java b/lucene/src/java/org/apache/lucene/index/codecs/CodecProvider.java index c249116eb09..921a8e6db62 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/CodecProvider.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/CodecProvider.java @@ -23,11 +23,6 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; -import org.apache.lucene.index.codecs.preflex.PreFlexCodec; -import org.apache.lucene.index.codecs.pulsing.PulsingCodec; -import org.apache.lucene.index.codecs.simpletext.SimpleTextCodec; -import org.apache.lucene.index.codecs.standard.StandardCodec; - /** Holds a set of codecs, keyed by name. You subclass * this, instantiate it, and register your codecs, then * pass this instance to IndexReader/IndexWriter (via @@ -96,7 +91,7 @@ public class CodecProvider { return infosReader; } - static private CodecProvider defaultCodecs = new DefaultCodecProvider(); + static private CodecProvider defaultCodecs = new CoreCodecProvider(); public static CodecProvider getDefault() { return defaultCodecs; @@ -164,12 +159,3 @@ public class CodecProvider { defaultFieldCodec = codec; } } - -class DefaultCodecProvider extends CodecProvider { - DefaultCodecProvider() { - register(new StandardCodec()); - register(new PreFlexCodec()); - register(new PulsingCodec(1)); - register(new SimpleTextCodec()); - } -} diff --git a/lucene/src/java/org/apache/lucene/index/codecs/CoreCodecProvider.java b/lucene/src/java/org/apache/lucene/index/codecs/CoreCodecProvider.java new file mode 100644 index 00000000000..6a502f31c5e --- /dev/null +++ b/lucene/src/java/org/apache/lucene/index/codecs/CoreCodecProvider.java @@ -0,0 +1,49 @@ +package org.apache.lucene.index.codecs; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.lucene.index.codecs.preflex.PreFlexCodec; +import org.apache.lucene.index.codecs.pulsing.PulsingCodec; +import org.apache.lucene.index.codecs.simpletext.SimpleTextCodec; +import org.apache.lucene.index.codecs.standard.StandardCodec; + +/** + * A CodecProvider that registers all core codecs that ship + * with Lucene. This will not register any user codecs, but + * you can easily instantiate this class and register them + * yourself and specify per-field codecs: + * + *
      + *   CodecProvider cp = new CoreCodecProvider();
      + *   cp.register(new MyFastCodec());
      + *   cp.setDefaultFieldCodec("Standard");
      + *   cp.setFieldCodec("id", "Pulsing");
      + *   cp.setFieldCodec("body", "MyFastCodec");
      + *   IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
      + *   iwc.setCodecProvider(cp);
      + * 
      + */ + +class CoreCodecProvider extends CodecProvider { + CoreCodecProvider() { + register(new StandardCodec()); + register(new PreFlexCodec()); + register(new PulsingCodec(1)); + register(new SimpleTextCodec()); + } +} From 7972ac83ad4ba8f1e7534af6f43ac68e55a887fa Mon Sep 17 00:00:00 2001 From: Koji Sekiguchi Date: Fri, 11 Feb 2011 01:29:01 +0000 Subject: [PATCH 122/185] SOLR-1449: output error log in addToClassLoader(String,FileFilter), same as addToClassLoader(String) does, when baseDir is not correct git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1069656 13f79535-47bb-0310-9956-ffa450edef68 --- solr/src/java/org/apache/solr/core/SolrResourceLoader.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/solr/src/java/org/apache/solr/core/SolrResourceLoader.java b/solr/src/java/org/apache/solr/core/SolrResourceLoader.java index 7d0d10d5d7d..f359b4d085b 100644 --- a/solr/src/java/org/apache/solr/core/SolrResourceLoader.java +++ b/solr/src/java/org/apache/solr/core/SolrResourceLoader.java @@ -131,7 +131,12 @@ public class SolrResourceLoader implements ResourceLoader */ void addToClassLoader(final String baseDir, final FileFilter filter) { File base = FileUtils.resolvePath(new File(getInstanceDir()), baseDir); - this.classLoader = replaceClassLoader(classLoader, base, filter); + if(base != null && base.canRead() && base.isDirectory()){ + this.classLoader = replaceClassLoader(classLoader, base, filter); + } + else{ + log.error("Can't find (or read) file to add to classloader: " + base); + } } /** From 896e9c7d64a690e694c494990109490940f3f41d Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Fri, 11 Feb 2011 11:32:29 +0000 Subject: [PATCH 123/185] don't use MockRandomMergePolicy for this test git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1069757 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/src/test/org/apache/lucene/index/TestIndexWriter.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java index 298fdcf9e8d..1b5a1ffed69 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java @@ -286,7 +286,7 @@ public class TestIndexWriter extends LuceneTestCase { // Import to use same term index interval else a // smaller one here could increase the disk usage and // cause a false failure: - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval)); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval).setMergePolicy(newLogMergePolicy())); writer.setInfoStream(VERBOSE ? System.out : null); writer.optimize(); writer.close(); From e14219e78d101039d3f2214cb012b82a1c7cf324 Mon Sep 17 00:00:00 2001 From: Steven Rowe Date: Fri, 11 Feb 2011 14:06:31 +0000 Subject: [PATCH 124/185] fix broken test-framework artifact deployment from generate-maven-artifacts git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1069800 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/build.xml | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/lucene/build.xml b/lucene/build.xml index 4cb7a82a532..1e7accc92b4 100644 --- a/lucene/build.xml +++ b/lucene/build.xml @@ -391,7 +391,8 @@ - + @@ -403,22 +404,35 @@ classifier="sources"/> - - - - - + + + + + + + + + - + From faf8d1308665a83d57d363fa2d9975322503c940 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 11 Feb 2011 15:10:50 +0000 Subject: [PATCH 125/185] LUCENE-2905: make skip variables private to codec, separate skipMinimum from skipInterval, don't skip when close in preflex and sep git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1069829 13f79535-47bb-0310-9956-ffa450edef68 --- .../lucene/index/SegmentWriteState.java | 14 --------- .../index/codecs/preflex/SegmentTermDocs.java | 3 +- .../codecs/sep/SepPostingsReaderImpl.java | 20 +++++------- .../codecs/sep/SepPostingsWriterImpl.java | 31 +++++++++++++------ .../standard/StandardPostingsReader.java | 10 +++--- .../standard/StandardPostingsWriter.java | 29 ++++++++++++----- 6 files changed, 59 insertions(+), 48 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/index/SegmentWriteState.java b/lucene/src/java/org/apache/lucene/index/SegmentWriteState.java index 288c75097a0..e44462cc72d 100644 --- a/lucene/src/java/org/apache/lucene/index/SegmentWriteState.java +++ b/lucene/src/java/org/apache/lucene/index/SegmentWriteState.java @@ -53,20 +53,6 @@ public class SegmentWriteState { * tweaking this is rarely useful.*/ public int termIndexInterval; // TODO: this should be private to the codec, not settable here or in IWC - /** Expert: The fraction of TermDocs entries stored in skip tables, - * used to accelerate {@link DocsEnum#advance(int)}. Larger values result in - * smaller indexes, greater acceleration, but fewer accelerable cases, while - * smaller values result in bigger indexes, less acceleration and more - * accelerable cases. More detailed experiments would be useful here. */ - public final int skipInterval = 16; - - /** Expert: The maximum number of skip levels. Smaller values result in - * slightly smaller indexes, but slower skipping in big posting lists. - */ - public final int maxSkipLevels = 10; - - - public SegmentWriteState(PrintStream infoStream, Directory directory, String segmentName, FieldInfos fieldInfos, int numDocs, int termIndexInterval, SegmentCodecs segmentCodecs, BufferedDeletes segDeletes) { this.infoStream = infoStream; diff --git a/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermDocs.java b/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermDocs.java index e4ef40ae286..ac483f93e4f 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermDocs.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermDocs.java @@ -209,7 +209,8 @@ public class SegmentTermDocs { /** Optimized implementation. */ public boolean skipTo(int target) throws IOException { - if (df >= skipInterval) { // optimized case + // don't skip if the target is close (within skipInterval docs away) + if ((target - skipInterval) >= doc && df >= skipInterval) { // optimized case if (skipListReader == null) skipListReader = new DefaultSkipListReader((IndexInput) freqStream.clone(), maxSkipLevels, skipInterval); // lazily clone diff --git a/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java b/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java index b693db361c9..7b400ae96be 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java @@ -56,6 +56,7 @@ public class SepPostingsReaderImpl extends PostingsReaderBase { int skipInterval; int maxSkipLevels; + int skipMinimum; public SepPostingsReaderImpl(Directory dir, SegmentInfo segmentInfo, int readBufferSize, IntStreamFactory intFactory, String codecId) throws IOException { @@ -102,6 +103,7 @@ public class SepPostingsReaderImpl extends PostingsReaderBase { SepPostingsWriterImpl.VERSION_START, SepPostingsWriterImpl.VERSION_START); skipInterval = termsIn.readInt(); maxSkipLevels = termsIn.readInt(); + skipMinimum = termsIn.readInt(); } @Override @@ -231,7 +233,7 @@ public class SepPostingsReaderImpl extends PostingsReaderBase { //System.out.println(" payloadFP=" + termState.payloadFP); } } - if (termState.docFreq >= skipInterval) { + if (termState.docFreq >= skipMinimum) { //System.out.println(" readSkip @ " + termState.bytesReader.pos); if (isFirstTerm) { termState.skipFP = termState.bytesReader.readVLong(); @@ -344,7 +346,7 @@ public class SepPostingsReaderImpl extends PostingsReaderBase { } docFreq = termState.docFreq; - // NOTE: unused if docFreq < skipInterval: + // NOTE: unused if docFreq < skipMinimum: skipFP = termState.skipFP; count = 0; doc = 0; @@ -420,13 +422,10 @@ public class SepPostingsReaderImpl extends PostingsReaderBase { @Override public int advance(int target) throws IOException { - // TODO: jump right to next() if target is < X away - // from where we are now? - - if (docFreq >= skipInterval) { + if ((target - skipInterval) >= doc && docFreq >= skipMinimum) { // There are enough docs in the posting to have - // skip data + // skip data, and its not too close if (skipper == null) { // This DocsEnum has never done any skipping @@ -599,13 +598,10 @@ public class SepPostingsReaderImpl extends PostingsReaderBase { public int advance(int target) throws IOException { //System.out.println("SepD&P advance target=" + target + " vs current=" + doc + " this=" + this); - // TODO: jump right to next() if target is < X away - // from where we are now? - - if (docFreq >= skipInterval) { + if ((target - skipInterval) >= doc && docFreq >= skipMinimum) { // There are enough docs in the posting to have - // skip data + // skip data, and its not too close if (skipper == null) { //System.out.println(" create skipper"); diff --git a/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsWriterImpl.java b/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsWriterImpl.java index 9e9b9966808..2f84da6cb95 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsWriterImpl.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsWriterImpl.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.Set; import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.SegmentWriteState; @@ -63,8 +64,23 @@ public final class SepPostingsWriterImpl extends PostingsWriterBase { IndexOutput termsOut; final SepSkipListWriter skipListWriter; - final int skipInterval; - final int maxSkipLevels; + /** Expert: The fraction of TermDocs entries stored in skip tables, + * used to accelerate {@link DocsEnum#advance(int)}. Larger values result in + * smaller indexes, greater acceleration, but fewer accelerable cases, while + * smaller values result in bigger indexes, less acceleration and more + * accelerable cases. More detailed experiments would be useful here. */ + final int skipInterval = 16; + + /** + * Expert: minimum docFreq to write any skip data at all + */ + final int skipMinimum = skipInterval; + + /** Expert: The maximum number of skip levels. Smaller values result in + * slightly smaller indexes, but slower skipping in big posting lists. + */ + final int maxSkipLevels = 10; + final int totalNumDocs; boolean storePayloads; @@ -118,15 +134,11 @@ public final class SepPostingsWriterImpl extends PostingsWriterBase { totalNumDocs = state.numDocs; - // TODO: -- abstraction violation - skipListWriter = new SepSkipListWriter(state.skipInterval, - state.maxSkipLevels, + skipListWriter = new SepSkipListWriter(skipInterval, + maxSkipLevels, state.numDocs, freqOut, docOut, posOut, payloadOut); - - skipInterval = state.skipInterval; - maxSkipLevels = state.maxSkipLevels; } @Override @@ -136,6 +148,7 @@ public final class SepPostingsWriterImpl extends PostingsWriterBase { // TODO: -- just ask skipper to "start" here termsOut.writeInt(skipInterval); // write skipInterval termsOut.writeInt(maxSkipLevels); // write maxSkipLevels + termsOut.writeInt(skipMinimum); // write skipMinimum } @Override @@ -264,7 +277,7 @@ public final class SepPostingsWriterImpl extends PostingsWriterBase { } } - if (df >= skipInterval) { + if (df >= skipMinimum) { //System.out.println(" skipFP=" + skipStart); final long skipFP = skipOut.getFilePointer(); skipListWriter.writeSkip(skipOut); diff --git a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java index 0c9dd4f5c86..a75dffe00cd 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java @@ -47,6 +47,7 @@ public class StandardPostingsReader extends PostingsReaderBase { int skipInterval; int maxSkipLevels; + int skipMinimum; //private String segment; @@ -86,6 +87,7 @@ public class StandardPostingsReader extends PostingsReaderBase { skipInterval = termsIn.readInt(); maxSkipLevels = termsIn.readInt(); + skipMinimum = termsIn.readInt(); } // Must keep final because we do non-standard clone @@ -179,7 +181,7 @@ public class StandardPostingsReader extends PostingsReaderBase { //System.out.println(" freqFP=" + termState.freqOffset); assert termState.freqOffset < freqIn.length(); - if (termState.docFreq >= skipInterval) { + if (termState.docFreq >= skipMinimum) { termState.skipOffset = termState.bytesReader.readVInt(); //System.out.println(" skipOffset=" + termState.skipOffset + " vs freqIn.length=" + freqIn.length()); assert termState.freqOffset + termState.skipOffset < freqIn.length(); @@ -378,7 +380,7 @@ public class StandardPostingsReader extends PostingsReaderBase { @Override public int advance(int target) throws IOException { - if ((target - skipInterval) >= doc && limit >= skipInterval) { + if ((target - skipInterval) >= doc && limit >= skipMinimum) { // There are enough docs in the posting to have // skip data, and it isn't too close. @@ -528,7 +530,7 @@ public class StandardPostingsReader extends PostingsReaderBase { //System.out.println("StandardR.D&PE advance target=" + target); - if ((target - skipInterval) >= doc && limit >= skipInterval) { + if ((target - skipInterval) >= doc && limit >= skipMinimum) { // There are enough docs in the posting to have // skip data, and it isn't too close @@ -725,7 +727,7 @@ public class StandardPostingsReader extends PostingsReaderBase { //System.out.println("StandardR.D&PE advance seg=" + segment + " target=" + target + " this=" + this); - if ((target - skipInterval) >= doc && limit >= skipInterval) { + if ((target - skipInterval) >= doc && limit >= skipMinimum) { // There are enough docs in the posting to have // skip data, and it isn't too close diff --git a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsWriter.java b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsWriter.java index 22e923f2273..aa3dd05b582 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsWriter.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsWriter.java @@ -23,6 +23,7 @@ package org.apache.lucene.index.codecs.standard; import java.io.IOException; import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.SegmentWriteState; @@ -44,8 +45,22 @@ public final class StandardPostingsWriter extends PostingsWriterBase { final IndexOutput freqOut; final IndexOutput proxOut; final DefaultSkipListWriter skipListWriter; - final int skipInterval; - final int maxSkipLevels; + /** Expert: The fraction of TermDocs entries stored in skip tables, + * used to accelerate {@link DocsEnum#advance(int)}. Larger values result in + * smaller indexes, greater acceleration, but fewer accelerable cases, while + * smaller values result in bigger indexes, less acceleration and more + * accelerable cases. More detailed experiments would be useful here. */ + final int skipInterval = 16; + + /** + * Expert: minimum docFreq to write any skip data at all + */ + final int skipMinimum = skipInterval; + + /** Expert: The maximum number of skip levels. Smaller values result in + * slightly smaller indexes, but slower skipping in big posting lists. + */ + final int maxSkipLevels = 10; final int totalNumDocs; IndexOutput termsOut; @@ -84,14 +99,11 @@ public final class StandardPostingsWriter extends PostingsWriterBase { totalNumDocs = state.numDocs; - skipListWriter = new DefaultSkipListWriter(state.skipInterval, - state.maxSkipLevels, + skipListWriter = new DefaultSkipListWriter(skipInterval, + maxSkipLevels, state.numDocs, freqOut, proxOut); - - skipInterval = state.skipInterval; - maxSkipLevels = state.maxSkipLevels; } @Override @@ -100,6 +112,7 @@ public final class StandardPostingsWriter extends PostingsWriterBase { CodecUtil.writeHeader(termsOut, CODEC, VERSION_CURRENT); termsOut.writeInt(skipInterval); // write skipInterval termsOut.writeInt(maxSkipLevels); // write maxSkipLevels + termsOut.writeInt(skipMinimum); // write skipMinimum } @Override @@ -218,7 +231,7 @@ public final class StandardPostingsWriter extends PostingsWriterBase { } lastFreqStart = freqStart; - if (df >= skipInterval) { + if (df >= skipMinimum) { bytesWriter.writeVInt((int) (skipListWriter.writeSkip(freqOut)-freqStart)); } From 866f01e2adfb05eac6b4056795d74973689725b5 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Fri, 11 Feb 2011 18:08:08 +0000 Subject: [PATCH 126/185] LUCENE-2915: make CoreCodecProvider public git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1069909 13f79535-47bb-0310-9956-ffa450edef68 --- .../java/org/apache/lucene/index/codecs/CoreCodecProvider.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucene/src/java/org/apache/lucene/index/codecs/CoreCodecProvider.java b/lucene/src/java/org/apache/lucene/index/codecs/CoreCodecProvider.java index 6a502f31c5e..4947c749781 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/CoreCodecProvider.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/CoreCodecProvider.java @@ -39,7 +39,7 @@ import org.apache.lucene.index.codecs.standard.StandardCodec; *
    */ -class CoreCodecProvider extends CodecProvider { +public class CoreCodecProvider extends CodecProvider { CoreCodecProvider() { register(new StandardCodec()); register(new PreFlexCodec()); From d733c07a567d9941ea2d30152d1d05546170dac8 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Fri, 11 Feb 2011 18:11:37 +0000 Subject: [PATCH 127/185] LUCENE-2915: ... and its ctor too git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1069911 13f79535-47bb-0310-9956-ffa450edef68 --- .../java/org/apache/lucene/index/codecs/CoreCodecProvider.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucene/src/java/org/apache/lucene/index/codecs/CoreCodecProvider.java b/lucene/src/java/org/apache/lucene/index/codecs/CoreCodecProvider.java index 4947c749781..d6bb79b5ac0 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/CoreCodecProvider.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/CoreCodecProvider.java @@ -40,7 +40,7 @@ import org.apache.lucene.index.codecs.standard.StandardCodec; */ public class CoreCodecProvider extends CodecProvider { - CoreCodecProvider() { + public CoreCodecProvider() { register(new StandardCodec()); register(new PreFlexCodec()); register(new PulsingCodec(1)); From 00ad6999ca385832a49be60e46da24427774f365 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 11 Feb 2011 18:16:43 +0000 Subject: [PATCH 128/185] remove dead code git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1069912 13f79535-47bb-0310-9956-ffa450edef68 --- .../intblock/FixedIntBlockIndexInput.java | 19 ------------------ .../intblock/FixedIntBlockIndexOutput.java | 19 ------------------ .../intblock/VariableIntBlockIndexInput.java | 18 ----------------- .../intblock/VariableIntBlockIndexOutput.java | 20 ------------------- .../index/codecs/sep/IntIndexInput.java | 14 ------------- .../index/codecs/sep/IntIndexOutput.java | 19 ------------------ .../mocksep/MockSingleIntIndexInput.java | 10 ---------- .../mocksep/MockSingleIntIndexOutput.java | 11 ---------- 8 files changed, 130 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/index/codecs/intblock/FixedIntBlockIndexInput.java b/lucene/src/java/org/apache/lucene/index/codecs/intblock/FixedIntBlockIndexInput.java index 1b6829dc28d..652fdea1a70 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/intblock/FixedIntBlockIndexInput.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/intblock/FixedIntBlockIndexInput.java @@ -168,25 +168,6 @@ public abstract class FixedIntBlockIndexInput extends IntIndexInput { assert upto < blockSize; } - @Override - public void read(final IntIndexInput.Reader indexIn, final boolean absolute) throws IOException { - if (absolute) { - fp = indexIn.readVLong(); - upto = indexIn.next(); - } else { - final long delta = indexIn.readVLong(); - if (delta == 0) { - // same block - upto += indexIn.next(); - } else { - // new block - fp += delta; - upto = indexIn.next(); - } - } - assert upto < blockSize; - } - @Override public void seek(final IntIndexInput.Reader other) throws IOException { ((Reader) other).seek(fp, upto); diff --git a/lucene/src/java/org/apache/lucene/index/codecs/intblock/FixedIntBlockIndexOutput.java b/lucene/src/java/org/apache/lucene/index/codecs/intblock/FixedIntBlockIndexOutput.java index 8b5e4988fcd..1c5f757108c 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/intblock/FixedIntBlockIndexOutput.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/intblock/FixedIntBlockIndexOutput.java @@ -93,25 +93,6 @@ public abstract class FixedIntBlockIndexOutput extends IntIndexOutput { lastFP = fp; } - @Override - public void write(IntIndexOutput indexOut, boolean absolute) throws IOException { - if (absolute) { - indexOut.writeVLong(fp); - indexOut.write(upto); - } else if (fp == lastFP) { - // same block - indexOut.writeVLong(0); - assert upto >= lastUpto; - indexOut.write(upto - lastUpto); - } else { - // new block - indexOut.writeVLong(fp - lastFP); - indexOut.write(upto); - } - lastUpto = upto; - lastFP = fp; - } - @Override public String toString() { return "fp=" + fp + " upto=" + upto; diff --git a/lucene/src/java/org/apache/lucene/index/codecs/intblock/VariableIntBlockIndexInput.java b/lucene/src/java/org/apache/lucene/index/codecs/intblock/VariableIntBlockIndexInput.java index 0881587d041..cbdb45271ba 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/intblock/VariableIntBlockIndexInput.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/intblock/VariableIntBlockIndexInput.java @@ -189,24 +189,6 @@ public abstract class VariableIntBlockIndexInput extends IntIndexInput { //assert upto < maxBlockSize: "upto=" + upto + " max=" + maxBlockSize; } - @Override - public void read(final IntIndexInput.Reader indexIn, final boolean absolute) throws IOException { - if (absolute) { - fp = indexIn.readVLong(); - upto = indexIn.next()&0xFF; - } else { - final long delta = indexIn.readVLong(); - if (delta == 0) { - // same block - upto = indexIn.next()&0xFF; - } else { - // new block - fp += delta; - upto = indexIn.next()&0xFF; - } - } - } - @Override public String toString() { return "VarIntBlock.Index fp=" + fp + " upto=" + upto + " maxBlock=" + maxBlockSize; diff --git a/lucene/src/java/org/apache/lucene/index/codecs/intblock/VariableIntBlockIndexOutput.java b/lucene/src/java/org/apache/lucene/index/codecs/intblock/VariableIntBlockIndexOutput.java index f32b0d47c77..1dfbf32b3c2 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/intblock/VariableIntBlockIndexOutput.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/intblock/VariableIntBlockIndexOutput.java @@ -103,26 +103,6 @@ public abstract class VariableIntBlockIndexOutput extends IntIndexOutput { lastUpto = upto; lastFP = fp; } - - @Override - public void write(IntIndexOutput indexOut, boolean absolute) throws IOException { - assert upto >= 0; - if (absolute) { - indexOut.writeVLong(fp); - indexOut.write(upto); - } else if (fp == lastFP) { - // same block - indexOut.writeVLong(0); - assert upto >= lastUpto; - indexOut.write(upto); - } else { - // new block - indexOut.writeVLong(fp - lastFP); - indexOut.write(upto); - } - lastUpto = upto; - lastFP = fp; - } } @Override diff --git a/lucene/src/java/org/apache/lucene/index/codecs/sep/IntIndexInput.java b/lucene/src/java/org/apache/lucene/index/codecs/sep/IntIndexInput.java index 631476df0ba..9faef714a82 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/sep/IntIndexInput.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/sep/IntIndexInput.java @@ -41,8 +41,6 @@ public abstract class IntIndexInput implements Closeable { public abstract void read(DataInput indexIn, boolean absolute) throws IOException; - public abstract void read(IntIndexInput.Reader indexIn, boolean absolute) throws IOException; - /** Seeks primary stream to the last read offset */ public abstract void seek(IntIndexInput.Reader stream) throws IOException; @@ -57,18 +55,6 @@ public abstract class IntIndexInput implements Closeable { /** Reads next single int */ public abstract int next() throws IOException; - /** Encodes as 1 or 2 ints, and can only use 61 of the 64 - * long bits. */ - public long readVLong() throws IOException { - final int v = next(); - if ((v & 1) == 0) { - return v >> 1; - } else { - final long v2 = next(); - return (v2 << 30) | (v >> 1); - } - } - /** Reads next chunk of ints */ private IntsRef bulkResult; diff --git a/lucene/src/java/org/apache/lucene/index/codecs/sep/IntIndexOutput.java b/lucene/src/java/org/apache/lucene/index/codecs/sep/IntIndexOutput.java index 141cc6382e7..7b3a9201ce4 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/sep/IntIndexOutput.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/sep/IntIndexOutput.java @@ -38,23 +38,6 @@ public abstract class IntIndexOutput implements Closeable { * >= 0. */ public abstract void write(int v) throws IOException; - public static final long MAX_SINGLE_INT_VLONG = Integer.MAX_VALUE - (1<<30); - public static final long MAX_VLONG = Long.MAX_VALUE - (1L<<62) - (1L<<61); - - /** Encodes as 1 or 2 ints, and can only use 61 of the 64 - * long bits. */ - public void writeVLong(long v) throws IOException { - assert v >= 0: "v=" + v; - assert v < MAX_VLONG: "v=" + v; - // we cannot pass a negative int - if (v <= MAX_SINGLE_INT_VLONG) { - write(((int) v)<<1); - } else { - write(((int) ((v & MAX_SINGLE_INT_VLONG))<<1) | 1); - write(((int) (v >> 30))); - } - } - public abstract static class Index { /** Internally records the current location */ @@ -66,8 +49,6 @@ public abstract class IntIndexOutput implements Closeable { /** Writes "location" of current output pointer of primary * output to different output (out) */ public abstract void write(IndexOutput indexOut, boolean absolute) throws IOException; - - public abstract void write(IntIndexOutput indexOut, boolean absolute) throws IOException; } /** If you are indexing the primary output file, call diff --git a/lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSingleIntIndexInput.java b/lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSingleIntIndexInput.java index 031794dd3ca..242c244c442 100644 --- a/lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSingleIntIndexInput.java +++ b/lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSingleIntIndexInput.java @@ -82,16 +82,6 @@ public class MockSingleIntIndexInput extends IntIndexInput { } } - @Override - public void read(IntIndexInput.Reader indexIn, boolean absolute) - throws IOException { - if (absolute) { - fp = indexIn.readVLong(); - } else { - fp += indexIn.readVLong(); - } - } - @Override public void set(IntIndexInput.Index other) { fp = ((Index) other).fp; diff --git a/lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSingleIntIndexOutput.java b/lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSingleIntIndexOutput.java index 98ba2b4bc9d..46c4cf26028 100644 --- a/lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSingleIntIndexOutput.java +++ b/lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSingleIntIndexOutput.java @@ -77,17 +77,6 @@ public class MockSingleIntIndexOutput extends IntIndexOutput { } lastFP = fp; } - - @Override - public void write(IntIndexOutput indexOut, boolean absolute) - throws IOException { - if (absolute) { - indexOut.writeVLong(fp); - } else { - indexOut.writeVLong(fp - lastFP); - } - lastFP = fp; - } @Override public String toString() { From f7a130e393ee5dbe18772e5df50ddb89571d6be8 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 11 Feb 2011 19:41:55 +0000 Subject: [PATCH 129/185] LUCENE-2905: write pointers and skip data more efficiently for fixed and variable intblock git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1069930 13f79535-47bb-0310-9956-ffa450edef68 --- .../intblock/FixedIntBlockIndexInput.java | 12 ++++++------ .../intblock/FixedIntBlockIndexOutput.java | 8 ++++---- .../intblock/VariableIntBlockIndexInput.java | 12 ++++++------ .../intblock/VariableIntBlockIndexOutput.java | 18 ++++++++---------- 4 files changed, 24 insertions(+), 26 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/index/codecs/intblock/FixedIntBlockIndexInput.java b/lucene/src/java/org/apache/lucene/index/codecs/intblock/FixedIntBlockIndexInput.java index 652fdea1a70..0d3ef2ec465 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/intblock/FixedIntBlockIndexInput.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/intblock/FixedIntBlockIndexInput.java @@ -152,17 +152,17 @@ public abstract class FixedIntBlockIndexInput extends IntIndexInput { @Override public void read(final DataInput indexIn, final boolean absolute) throws IOException { if (absolute) { - fp = indexIn.readVLong(); upto = indexIn.readVInt(); + fp = indexIn.readVLong(); } else { - final long delta = indexIn.readVLong(); - if (delta == 0) { + final int uptoDelta = indexIn.readVInt(); + if ((uptoDelta & 1) == 1) { // same block - upto += indexIn.readVInt(); + upto += uptoDelta >>> 1; } else { // new block - fp += delta; - upto = indexIn.readVInt(); + upto = uptoDelta >>> 1; + fp += indexIn.readVLong(); } } assert upto < blockSize; diff --git a/lucene/src/java/org/apache/lucene/index/codecs/intblock/FixedIntBlockIndexOutput.java b/lucene/src/java/org/apache/lucene/index/codecs/intblock/FixedIntBlockIndexOutput.java index 1c5f757108c..ffbce61c493 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/intblock/FixedIntBlockIndexOutput.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/intblock/FixedIntBlockIndexOutput.java @@ -77,17 +77,17 @@ public abstract class FixedIntBlockIndexOutput extends IntIndexOutput { @Override public void write(IndexOutput indexOut, boolean absolute) throws IOException { if (absolute) { - indexOut.writeVLong(fp); indexOut.writeVInt(upto); + indexOut.writeVLong(fp); } else if (fp == lastFP) { // same block - indexOut.writeVLong(0); assert upto >= lastUpto; - indexOut.writeVInt(upto - lastUpto); + int uptoDelta = upto - lastUpto; + indexOut.writeVInt(uptoDelta << 1 | 1); } else { // new block + indexOut.writeVInt(upto << 1); indexOut.writeVLong(fp - lastFP); - indexOut.writeVInt(upto); } lastUpto = upto; lastFP = fp; diff --git a/lucene/src/java/org/apache/lucene/index/codecs/intblock/VariableIntBlockIndexInput.java b/lucene/src/java/org/apache/lucene/index/codecs/intblock/VariableIntBlockIndexInput.java index cbdb45271ba..d4b7fcb41ba 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/intblock/VariableIntBlockIndexInput.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/intblock/VariableIntBlockIndexInput.java @@ -171,17 +171,17 @@ public abstract class VariableIntBlockIndexInput extends IntIndexInput { @Override public void read(final DataInput indexIn, final boolean absolute) throws IOException { if (absolute) { + upto = indexIn.readVInt(); fp = indexIn.readVLong(); - upto = indexIn.readByte()&0xFF; } else { - final long delta = indexIn.readVLong(); - if (delta == 0) { + final int uptoDelta = indexIn.readVInt(); + if ((uptoDelta & 1) == 1) { // same block - upto = indexIn.readByte()&0xFF; + upto += uptoDelta >>> 1; } else { // new block - fp += delta; - upto = indexIn.readByte()&0xFF; + upto = uptoDelta >>> 1; + fp += indexIn.readVLong(); } } // TODO: we can't do this assert because non-causal diff --git a/lucene/src/java/org/apache/lucene/index/codecs/intblock/VariableIntBlockIndexOutput.java b/lucene/src/java/org/apache/lucene/index/codecs/intblock/VariableIntBlockIndexOutput.java index 1dfbf32b3c2..d39db6cd750 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/intblock/VariableIntBlockIndexOutput.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/intblock/VariableIntBlockIndexOutput.java @@ -42,16 +42,14 @@ public abstract class VariableIntBlockIndexOutput extends IntIndexOutput { private int upto; - private static final int MAX_BLOCK_SIZE = 1 << 8; + // TODO what Var-Var codecs exist in practice... and what are there blocksizes like? + // if its less than 128 we should set that as max and use byte? - /** NOTE: maxBlockSize plus the max non-causal lookahead - * of your codec must be less than 256. EG Simple9 + /** NOTE: maxBlockSize must be the maximum block size + * plus the max non-causal lookahead of your codec. EG Simple9 * requires lookahead=1 because on seeing the Nth value * it knows it must now encode the N-1 values before it. */ protected VariableIntBlockIndexOutput(IndexOutput out, int maxBlockSize) throws IOException { - if (maxBlockSize > MAX_BLOCK_SIZE) { - throw new IllegalArgumentException("maxBlockSize must be <= " + MAX_BLOCK_SIZE + "; got " + maxBlockSize); - } this.out = out; out.writeInt(maxBlockSize); } @@ -88,17 +86,17 @@ public abstract class VariableIntBlockIndexOutput extends IntIndexOutput { public void write(IndexOutput indexOut, boolean absolute) throws IOException { assert upto >= 0; if (absolute) { + indexOut.writeVInt(upto); indexOut.writeVLong(fp); - indexOut.writeByte((byte) upto); } else if (fp == lastFP) { // same block - indexOut.writeVLong(0); assert upto >= lastUpto; - indexOut.writeByte((byte) upto); + int uptoDelta = upto - lastUpto; + indexOut.writeVInt(uptoDelta << 1 | 1); } else { // new block + indexOut.writeVInt(upto << 1); indexOut.writeVLong(fp - lastFP); - indexOut.writeByte((byte) upto); } lastUpto = upto; lastFP = fp; From 4953202fbade58e6a8a1c78b47e5ccd23ea973da Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 11 Feb 2011 22:51:53 +0000 Subject: [PATCH 130/185] LUCENE-2892: Add QueryParser.newFieldQuery git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1069977 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/CHANGES.txt | 4 + .../lucene/queryParser/QueryParserBase.java | 9 +- .../lucene/queryParser/TestQueryParser.java | 84 +++++++++++++++++++ 3 files changed, 96 insertions(+), 1 deletion(-) diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index a9a51677f4f..0f41674c542 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -195,6 +195,10 @@ API Changes for building top-level norms. If you really need a top-level norms, use MultiNorms or SlowMultiReaderWrapper. (Robert Muir, Mike Mccandless) +* LUCENE-2892: Add QueryParser.newFieldQuery (called by getFieldQuery by default) + which takes Analyzer as a parameter, for easier customization by subclasses. + (Robert Muir) + New features * LUCENE-2604: Added RegexpQuery support to QueryParser. Regular expressions diff --git a/lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java b/lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java index 41ad00987ed..f9668e03271 100644 --- a/lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java +++ b/lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java @@ -467,7 +467,14 @@ public abstract class QueryParserBase { /** * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow */ - protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException { + protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException { + return newFieldQuery(analyzer, field, queryText, quoted); + } + + /** + * @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow + */ + protected Query newFieldQuery(Analyzer analyzer, String field, String queryText, boolean quoted) throws ParseException { // Use the analyzer to get all the tokens, and then build a TermQuery, // PhraseQuery, or nothing based on the term count diff --git a/lucene/src/test/org/apache/lucene/queryParser/TestQueryParser.java b/lucene/src/test/org/apache/lucene/queryParser/TestQueryParser.java index 4803a58a339..e2c433de55c 100644 --- a/lucene/src/test/org/apache/lucene/queryParser/TestQueryParser.java +++ b/lucene/src/test/org/apache/lucene/queryParser/TestQueryParser.java @@ -35,6 +35,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.document.DateTools; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -58,6 +59,7 @@ import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.Version; import org.apache.lucene.util.automaton.BasicAutomata; import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.apache.lucene.util.automaton.RegExp; @@ -1175,5 +1177,87 @@ public class TestQueryParser extends LuceneTestCase { // expected } } + + /** + * adds synonym of "dog" for "dogs". + */ + private class MockSynonymFilter extends TokenFilter { + CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class); + boolean addSynonym = false; + + public MockSynonymFilter(TokenStream input) { + super(input); + } + @Override + public final boolean incrementToken() throws IOException { + if (addSynonym) { // inject our synonym + clearAttributes(); + termAtt.setEmpty().append("dog"); + posIncAtt.setPositionIncrement(0); + addSynonym = false; + return true; + } + + if (input.incrementToken()) { + addSynonym = termAtt.toString().equals("dogs"); + return true; + } else { + return false; + } + } + } + + /** whitespace+lowercase analyzer with synonyms */ + private class Analyzer1 extends Analyzer { + @Override + public TokenStream tokenStream(String fieldName, Reader reader) { + return new MockSynonymFilter(new MockTokenizer(reader, MockTokenizer.WHITESPACE, true)); + } + } + + /** whitespace+lowercase analyzer without synonyms */ + private class Analyzer2 extends Analyzer { + @Override + public TokenStream tokenStream(String fieldName, Reader reader) { + return new MockTokenizer(reader, MockTokenizer.WHITESPACE, true); + } + } + + /** query parser that doesn't expand synonyms when users use double quotes */ + private class SmartQueryParser extends QueryParser { + Analyzer morePrecise = new Analyzer2(); + + public SmartQueryParser() { + super(TEST_VERSION_CURRENT, "field", new Analyzer1()); + } + + @Override + protected Query getFieldQuery(String field, String queryText, boolean quoted) + throws ParseException { + if (quoted) + return newFieldQuery(morePrecise, field, queryText, quoted); + else + return super.getFieldQuery(field, queryText, quoted); + } + } + + public void testNewFieldQuery() throws Exception { + /** ordinary behavior, synonyms form uncoordinated boolean query */ + QueryParser dumb = new QueryParser(TEST_VERSION_CURRENT, "field", new Analyzer1()); + BooleanQuery expanded = new BooleanQuery(true); + expanded.add(new TermQuery(new Term("field", "dogs")), BooleanClause.Occur.SHOULD); + expanded.add(new TermQuery(new Term("field", "dog")), BooleanClause.Occur.SHOULD); + assertEquals(expanded, dumb.parse("\"dogs\"")); + /** even with the phrase operator the behavior is the same */ + assertEquals(expanded, dumb.parse("dogs")); + + /** custom behavior, the synonyms are expanded, unless you use quote operator */ + QueryParser smart = new SmartQueryParser(); + assertEquals(expanded, smart.parse("dogs")); + + Query unexpanded = new TermQuery(new Term("field", "dogs")); + assertEquals(unexpanded, smart.parse("\"dogs\"")); + } } From a01e9cbb861b650a226ed16d149147a00cc556fe Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 11 Feb 2011 23:16:14 +0000 Subject: [PATCH 131/185] LUCENE-2912: remove field param from computeNorm, scorePayload ; remove UOE'd lengthNorm, switch SweetSpot to per-field git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1069980 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/MIGRATE.txt | 6 +- lucene/contrib/CHANGES.txt | 5 ++ .../instantiated/InstantiatedIndexWriter.java | 2 +- .../lucene/index/memory/MemoryIndex.java | 2 +- .../lucene/index/FieldNormModifier.java | 2 +- .../lucene/misc/SweetSpotSimilarity.java | 66 ++++-------------- .../lucene/index/TestFieldNormModifier.java | 2 +- .../lucene/misc/SweetSpotSimilarityTest.java | 68 ++++++++++++------- .../lucene/misc/TestLengthNormModifier.java | 4 +- .../apache/lucene/document/AbstractField.java | 9 +-- .../org/apache/lucene/document/Fieldable.java | 10 ++- .../org/apache/lucene/index/IndexReader.java | 4 +- .../lucene/index/NormsWriterPerField.java | 2 +- .../lucene/search/DefaultSimilarity.java | 2 +- .../org/apache/lucene/search/Similarity.java | 41 +---------- .../search/payloads/PayloadNearQuery.java | 2 +- .../search/payloads/PayloadTermQuery.java | 6 +- .../index/TestIndexReaderCloneNorms.java | 2 +- .../lucene/index/TestMaxTermFrequency.java | 2 +- .../org/apache/lucene/index/TestNorms.java | 4 +- .../org/apache/lucene/index/TestOmitTf.java | 2 +- .../lucene/search/JustCompileSearch.java | 2 +- .../search/TestDisjunctionMaxQuery.java | 2 +- .../apache/lucene/search/TestSimilarity.java | 2 +- .../lucene/search/TestSimilarityProvider.java | 4 +- .../search/payloads/TestPayloadNearQuery.java | 4 +- .../search/payloads/TestPayloadTermQuery.java | 4 +- .../search/function/TestFunctionQuery.java | 2 +- 28 files changed, 103 insertions(+), 160 deletions(-) diff --git a/lucene/MIGRATE.txt b/lucene/MIGRATE.txt index c5e85969e03..2e2adf25fa4 100644 --- a/lucene/MIGRATE.txt +++ b/lucene/MIGRATE.txt @@ -332,8 +332,12 @@ LUCENE-1458, LUCENE-2111: Flexible Indexing toString(), port your customization over to reflectWith(). reflectAsString() would then return what toString() did before. -* LUCENE-2236: DefaultSimilarity can no longer be set statically (and dangerously) for the entire JVM. +* LUCENE-2236, LUCENE-2912: DefaultSimilarity can no longer be set statically + (and dangerously) for the entire JVM. Instead, IndexWriterConfig and IndexSearcher now take a SimilarityProvider. Similarity can now be configured on a per-field basis. Similarity retains only the field-specific relevance methods such as tf() and idf(). + Previously some (but not all) of these methods, such as computeNorm and scorePayload took + field as a parameter, this is removed due to the fact the entire Similarity (all methods) + can now be configured per-field. Methods that apply to the entire query such as coord() and queryNorm() exist in SimilarityProvider. diff --git a/lucene/contrib/CHANGES.txt b/lucene/contrib/CHANGES.txt index 7f2fe40dffc..7615463e391 100644 --- a/lucene/contrib/CHANGES.txt +++ b/lucene/contrib/CHANGES.txt @@ -38,6 +38,11 @@ API Changes * LUCENE-2638 MakeHighFreqTerms.TermStats public to make it more useful for API use. (Andrzej Bialecki) + * LUCENE-2912: The field-specific hashmaps in SweetSpotSimilarity were removed. + Instead, use SimilarityProvider to return different SweetSpotSimilaritys + for different fields, this way all parameters (such as TF factors) can be + customized on a per-field basis. (Robert Muir) + ======================= Lucene 3.x (not yet released) ======================= Changes in backwards compatibility policy diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java index 6114e9fff9e..f55cb573b01 100644 --- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java +++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java @@ -241,7 +241,7 @@ public class InstantiatedIndexWriter implements Closeable { final FieldInvertState invertState = new FieldInvertState(); invertState.setBoost(eFieldTermDocInfoFactoriesByTermText.getKey().boost * document.getDocument().getBoost()); invertState.setLength(eFieldTermDocInfoFactoriesByTermText.getKey().fieldLength); - final float norm = similarityProvider.get(fieldName).computeNorm(fieldName, invertState); + final float norm = similarityProvider.get(fieldName).computeNorm(invertState); normsByFieldNameAndDocumentNumber.get(fieldName)[document.getDocumentNumber()] = similarityProvider.get(fieldName).encodeNormValue(norm); } else { System.currentTimeMillis(); diff --git a/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java index 4ffac05bcf4..0e8c1f42c05 100644 --- a/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java +++ b/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java @@ -1190,7 +1190,7 @@ public class MemoryIndex { int numOverlapTokens = info != null ? info.numOverlapTokens : 0; float boost = info != null ? info.getBoost() : 1.0f; FieldInvertState invertState = new FieldInvertState(0, numTokens, numOverlapTokens, 0, boost); - float n = fieldSim.computeNorm(fieldName, invertState); + float n = fieldSim.computeNorm(invertState); byte norm = fieldSim.encodeNormValue(n); norms = new byte[] {norm}; diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/index/FieldNormModifier.java b/lucene/contrib/misc/src/java/org/apache/lucene/index/FieldNormModifier.java index acebe221a9a..b630f45bf9b 100644 --- a/lucene/contrib/misc/src/java/org/apache/lucene/index/FieldNormModifier.java +++ b/lucene/contrib/misc/src/java/org/apache/lucene/index/FieldNormModifier.java @@ -149,7 +149,7 @@ public class FieldNormModifier { for (int d = 0; d < termCounts.length; d++) { if (delDocs == null || !delDocs.get(d)) { invertState.setLength(termCounts[d]); - subReader.setNorm(d, fieldName, fieldSim.encodeNormValue(fieldSim.computeNorm(fieldName, invertState))); + subReader.setNorm(d, fieldName, fieldSim.encodeNormValue(fieldSim.computeNorm(invertState))); } } } diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java b/lucene/contrib/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java index cda2f0790bf..c5c454a14bb 100644 --- a/lucene/contrib/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java +++ b/lucene/contrib/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java @@ -20,9 +20,6 @@ package org.apache.lucene.misc; import org.apache.lucene.search.DefaultSimilarity; import org.apache.lucene.index.FieldInvertState; -import java.util.Map; -import java.util.HashMap; - /** * A similarity with a lengthNorm that provides for a "plateau" of * equally good lengths, and tf helper functions. @@ -50,11 +47,6 @@ public class SweetSpotSimilarity extends DefaultSimilarity { private int ln_max = 1; private float ln_steep = 0.5f; - private Map ln_maxs = new HashMap(7); - private Map ln_mins = new HashMap(7); - private Map ln_steeps = new HashMap(7); - private Map ln_overlaps = new HashMap(7); - private float tf_base = 0.0f; private float tf_min = 0.0f; @@ -98,55 +90,31 @@ public class SweetSpotSimilarity extends DefaultSimilarity { * Sets the default function variables used by lengthNorm when no field * specific variables have been set. * - * @see #lengthNorm + * @see #computeLengthNorm */ - public void setLengthNormFactors(int min, int max, float steepness) { + public void setLengthNormFactors(int min, int max, float steepness, boolean discountOverlaps) { this.ln_min = min; this.ln_max = max; this.ln_steep = steepness; - } - - /** - * Sets the function variables used by lengthNorm for a specific named field. - * - * @param field field name - * @param min minimum value - * @param max maximum value - * @param steepness steepness of the curve - * @param discountOverlaps if true, numOverlapTokens will be - * subtracted from numTokens; if false then - * numOverlapTokens will be assumed to be 0 (see - * {@link DefaultSimilarity#computeNorm(String, FieldInvertState)} for details). - * - * @see #lengthNorm - */ - public void setLengthNormFactors(String field, int min, int max, - float steepness, boolean discountOverlaps) { - ln_mins.put(field, Integer.valueOf(min)); - ln_maxs.put(field, Integer.valueOf(max)); - ln_steeps.put(field, Float.valueOf(steepness)); - ln_overlaps.put(field, new Boolean(discountOverlaps)); + this.discountOverlaps = discountOverlaps; } /** * Implemented as state.getBoost() * - * lengthNorm(fieldName, numTokens) where + * computeLengthNorm(numTokens) where * numTokens does not count overlap tokens if * discountOverlaps is true by default or true for this * specific field. */ @Override - public float computeNorm(String fieldName, FieldInvertState state) { + public float computeNorm(FieldInvertState state) { final int numTokens; - boolean overlaps = discountOverlaps; - if (ln_overlaps.containsKey(fieldName)) { - overlaps = ln_overlaps.get(fieldName).booleanValue(); - } - if (overlaps) + + if (discountOverlaps) numTokens = state.getLength() - state.getNumOverlap(); else numTokens = state.getLength(); - return state.getBoost() * computeLengthNorm(fieldName, numTokens); + return state.getBoost() * computeLengthNorm(numTokens); } /** @@ -167,20 +135,10 @@ public class SweetSpotSimilarity extends DefaultSimilarity { * * @see #setLengthNormFactors */ - public float computeLengthNorm(String fieldName, int numTerms) { - int l = ln_min; - int h = ln_max; - float s = ln_steep; - - if (ln_mins.containsKey(fieldName)) { - l = ln_mins.get(fieldName).intValue(); - } - if (ln_maxs.containsKey(fieldName)) { - h = ln_maxs.get(fieldName).intValue(); - } - if (ln_steeps.containsKey(fieldName)) { - s = ln_steeps.get(fieldName).floatValue(); - } + public float computeLengthNorm(int numTerms) { + final int l = ln_min; + final int h = ln_max; + final float s = ln_steep; return (float) (1.0f / diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java index 48bb42dfcf5..33e97be4405 100644 --- a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java +++ b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java @@ -44,7 +44,7 @@ public class TestFieldNormModifier extends LuceneTestCase { /** inverts the normal notion of lengthNorm */ public static SimilarityProvider s = new DefaultSimilarity() { @Override - public float computeNorm(String fieldName, FieldInvertState state) { + public float computeNorm(FieldInvertState state) { return state.getBoost() * (discountOverlaps ? state.getLength() - state.getNumOverlap() : state.getLength()); } }; diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java b/lucene/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java index 170ef247842..dbc76dd3b90 100644 --- a/lucene/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java +++ b/lucene/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java @@ -20,6 +20,7 @@ package org.apache.lucene.misc; import org.apache.lucene.search.DefaultSimilarity; import org.apache.lucene.search.Similarity; +import org.apache.lucene.search.SimilarityProvider; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.index.FieldInvertState; @@ -30,8 +31,8 @@ public class SweetSpotSimilarityTest extends LuceneTestCase { public void testSweetSpotComputeNorm() { - SweetSpotSimilarity ss = new SweetSpotSimilarity(); - ss.setLengthNormFactors(1,1,0.5f); + final SweetSpotSimilarity ss = new SweetSpotSimilarity(); + ss.setLengthNormFactors(1,1,0.5f,true); Similarity d = new DefaultSimilarity(); Similarity s = ss; @@ -43,28 +44,28 @@ public class SweetSpotSimilarityTest extends LuceneTestCase { for (int i = 1; i < 1000; i++) { invertState.setLength(i); assertEquals("base case: i="+i, - d.computeNorm("foo", invertState), - s.computeNorm("foo", invertState), + d.computeNorm(invertState), + s.computeNorm(invertState), 0.0f); } // make a sweet spot - ss.setLengthNormFactors(3,10,0.5f); + ss.setLengthNormFactors(3,10,0.5f,true); for (int i = 3; i <=10; i++) { invertState.setLength(i); assertEquals("3,10: spot i="+i, 1.0f, - s.computeNorm("foo", invertState), + s.computeNorm(invertState), 0.0f); } for (int i = 10; i < 1000; i++) { invertState.setLength(i-9); - final float normD = d.computeNorm("foo", invertState); + final float normD = d.computeNorm(invertState); invertState.setLength(i); - final float normS = s.computeNorm("foo", invertState); + final float normS = s.computeNorm(invertState); assertEquals("3,10: 10The encoding uses a three-bit mantissa, a five-bit exponent, and @@ -781,7 +745,6 @@ public abstract class Similarity { * The default implementation returns 1. * * @param docId The docId currently being scored. If this value is {@link #NO_DOC_ID_PROVIDED}, then it should be assumed that the PayloadQuery implementation does not provide document information - * @param fieldName The fieldName of the term this payload belongs to * @param start The start position of the payload * @param end The end position of the payload * @param payload The payload byte array to be scored @@ -791,7 +754,7 @@ public abstract class Similarity { * */ // TODO: maybe switch this API to BytesRef? - public float scorePayload(int docId, String fieldName, int start, int end, byte [] payload, int offset, int length) + public float scorePayload(int docId, int start, int end, byte [] payload, int offset, int length) { return 1; } diff --git a/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java b/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java index 35356f30f7d..da91ef59f9d 100644 --- a/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java +++ b/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java @@ -192,7 +192,7 @@ public class PayloadNearQuery extends SpanNearQuery { protected void processPayloads(Collection payLoads, int start, int end) { for (final byte[] thePayload : payLoads) { payloadScore = function.currentScore(doc, fieldName, start, end, - payloadsSeen, payloadScore, similarity.scorePayload(doc, fieldName, + payloadsSeen, payloadScore, similarity.scorePayload(doc, spans.start(), spans.end(), thePayload, 0, thePayload.length)); ++payloadsSeen; } diff --git a/lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java b/lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java index 81da6a4adf0..b3415a7b42c 100644 --- a/lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java +++ b/lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java @@ -41,7 +41,7 @@ import java.io.IOException; * {@link org.apache.lucene.index.Term} occurs. *

    * In order to take advantage of this, you must override - * {@link org.apache.lucene.search.Similarity#scorePayload(int, String, int, int, byte[],int,int)} + * {@link org.apache.lucene.search.Similarity#scorePayload(int, int, int, byte[],int,int)} * which returns 1 by default. *

    * Payload scores are aggregated using a pluggable {@link PayloadFunction}. @@ -119,14 +119,14 @@ public class PayloadTermQuery extends SpanTermQuery { if (payload != null) { payloadScore = function.currentScore(doc, term.field(), spans.start(), spans.end(), payloadsSeen, payloadScore, - similarity.scorePayload(doc, term.field(), spans.start(), + similarity.scorePayload(doc, spans.start(), spans.end(), payload.bytes, payload.offset, payload.length)); } else { payloadScore = function.currentScore(doc, term.field(), spans.start(), spans.end(), payloadsSeen, payloadScore, - similarity.scorePayload(doc, term.field(), spans.start(), + similarity.scorePayload(doc, spans.start(), spans.end(), null, 0, 0)); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java b/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java index e2c4c017a95..4f49cfb8dfb 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java @@ -43,7 +43,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase { private class SimilarityOne extends DefaultSimilarity { @Override - public float computeNorm(String fieldName, FieldInvertState state) { + public float computeNorm(FieldInvertState state) { // diable length norm return state.getBoost(); } diff --git a/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java b/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java index fe1f29be001..c6b9be60031 100644 --- a/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java +++ b/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java @@ -109,7 +109,7 @@ public class TestMaxTermFrequency extends LuceneTestCase { } @Override - public float computeNorm(String field, FieldInvertState state) { + public float computeNorm(FieldInvertState state) { return (float) state.getMaxTermFrequency(); } } diff --git a/lucene/src/test/org/apache/lucene/index/TestNorms.java b/lucene/src/test/org/apache/lucene/index/TestNorms.java index af6e7248024..2951ef8a30b 100755 --- a/lucene/src/test/org/apache/lucene/index/TestNorms.java +++ b/lucene/src/test/org/apache/lucene/index/TestNorms.java @@ -42,7 +42,7 @@ public class TestNorms extends LuceneTestCase { private class SimilarityOne extends DefaultSimilarity { @Override - public float computeNorm(String fieldName, FieldInvertState state) { + public float computeNorm(FieldInvertState state) { // Disable length norm return state.getBoost(); } @@ -252,7 +252,7 @@ public class TestNorms extends LuceneTestCase { } @Override - public float computeNorm(String field, FieldInvertState state) { + public float computeNorm(FieldInvertState state) { return (float) state.getLength(); } } diff --git a/lucene/src/test/org/apache/lucene/index/TestOmitTf.java b/lucene/src/test/org/apache/lucene/index/TestOmitTf.java index 3b26e4e7cd9..d760018a6e3 100644 --- a/lucene/src/test/org/apache/lucene/index/TestOmitTf.java +++ b/lucene/src/test/org/apache/lucene/index/TestOmitTf.java @@ -36,7 +36,7 @@ import org.apache.lucene.search.Explanation.IDFExplanation; public class TestOmitTf extends LuceneTestCase { public static class SimpleSimilarity extends Similarity implements SimilarityProvider { - @Override public float computeNorm(String field, FieldInvertState state) { return state.getBoost(); } + @Override public float computeNorm(FieldInvertState state) { return state.getBoost(); } @Override public float tf(float freq) { return freq; } @Override public float sloppyFreq(int distance) { return 2.0f; } @Override public float idf(int docFreq, int numDocs) { return 1.0f; } diff --git a/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java b/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java index db54970334d..e61563648e6 100644 --- a/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java +++ b/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java @@ -248,7 +248,7 @@ final class JustCompileSearch { } @Override - public float computeNorm(String fieldName, FieldInvertState state) { + public float computeNorm(FieldInvertState state) { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } diff --git a/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java index 012e95eb98f..7541dafe94c 100644 --- a/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java +++ b/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java @@ -62,7 +62,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { } @Override - public float computeNorm(String fieldName, FieldInvertState state) { + public float computeNorm(FieldInvertState state) { // Disable length norm return state.getBoost(); } diff --git a/lucene/src/test/org/apache/lucene/search/TestSimilarity.java b/lucene/src/test/org/apache/lucene/search/TestSimilarity.java index d788799db9d..1e4bc503840 100644 --- a/lucene/src/test/org/apache/lucene/search/TestSimilarity.java +++ b/lucene/src/test/org/apache/lucene/search/TestSimilarity.java @@ -40,7 +40,7 @@ import org.apache.lucene.search.Explanation.IDFExplanation; public class TestSimilarity extends LuceneTestCase { public static class SimpleSimilarity extends Similarity implements SimilarityProvider { - @Override public float computeNorm(String field, FieldInvertState state) { return state.getBoost(); } + @Override public float computeNorm(FieldInvertState state) { return state.getBoost(); } @Override public float tf(float freq) { return freq; } @Override public float sloppyFreq(int distance) { return 2.0f; } @Override public float idf(int docFreq, int numDocs) { return 1.0f; } diff --git a/lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java b/lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java index 7a8f123be71..d7350d3246f 100644 --- a/lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java +++ b/lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java @@ -107,7 +107,7 @@ public class TestSimilarityProvider extends LuceneTestCase { private class Sim1 extends Similarity { @Override - public float computeNorm(String field, FieldInvertState state) { + public float computeNorm(FieldInvertState state) { return 1f; } @@ -129,7 +129,7 @@ public class TestSimilarityProvider extends LuceneTestCase { private class Sim2 extends Similarity { @Override - public float computeNorm(String field, FieldInvertState state) { + public float computeNorm(FieldInvertState state) { return 10f; } diff --git a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java index 4bd8a6cb6a4..522642bccac 100644 --- a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java +++ b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java @@ -299,14 +299,14 @@ public class TestPayloadNearQuery extends LuceneTestCase { // must be static for weight serialization tests static class BoostingSimilarity extends DefaultSimilarity { - @Override public float scorePayload(int docId, String fieldName, int start, int end, byte[] payload, int offset, int length) { + @Override public float scorePayload(int docId, int start, int end, byte[] payload, int offset, int length) { //we know it is size 4 here, so ignore the offset/length return payload[offset]; } //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! //Make everything else 1 so we see the effect of the payload //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - @Override public float computeNorm(String fieldName, FieldInvertState state) { + @Override public float computeNorm(FieldInvertState state) { return state.getBoost(); } diff --git a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java index 51bb7385c23..c82c07907ec 100644 --- a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java +++ b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java @@ -287,7 +287,7 @@ public class TestPayloadTermQuery extends LuceneTestCase { // TODO: Remove warning after API has been finalized @Override - public float scorePayload(int docId, String fieldName, int start, int end, byte[] payload, int offset, int length) { + public float scorePayload(int docId, int start, int end, byte[] payload, int offset, int length) { //we know it is size 4 here, so ignore the offset/length return payload[offset]; } @@ -296,7 +296,7 @@ public class TestPayloadTermQuery extends LuceneTestCase { //Make everything else 1 so we see the effect of the payload //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @Override - public float computeNorm(String fieldName, FieldInvertState state) { + public float computeNorm(FieldInvertState state) { return state.getBoost(); } diff --git a/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java b/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java index 300acd34b8a..ceba5e6b109 100755 --- a/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java +++ b/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java @@ -299,7 +299,7 @@ public class TestFunctionQuery extends SolrTestCaseJ4 { state.setBoost(1.0f); state.setLength(4); assertQ(req("fl","*,score","q", "{!func}norm(a_t)", "fq","id:2"), - "//float[@name='score']='" + similarity.computeNorm("a_t",state) + "'"); // sqrt(4)==2 and is exactly representable when quantized to a byte + "//float[@name='score']='" + similarity.computeNorm(state) + "'"); // sqrt(4)==2 and is exactly representable when quantized to a byte // test that ord and rord are working on a global index basis, not just // at the segment level (since Lucene 2.9 has switched to per-segment searching) From 449f739498f107b7ae89608305ec4eabc596b7d3 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Sat, 12 Feb 2011 16:21:03 +0000 Subject: [PATCH 132/185] make MockRandomCodec more evil: now it randomly swaps different int codecs per-file git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1070106 13f79535-47bb-0310-9956-ffa450edef68 --- .../codecs/mockrandom/MockRandomCodec.java | 98 +++++++++++-------- .../test/org/apache/lucene/index/TestDoc.java | 3 + .../index/TestIndexReaderCloneNorms.java | 17 +++- 3 files changed, 78 insertions(+), 40 deletions(-) diff --git a/lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java b/lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java index bfc9fc81423..7399fde92b4 100644 --- a/lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java +++ b/lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java @@ -18,15 +18,17 @@ package org.apache.lucene.index.codecs.mockrandom; */ import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; +import java.util.List; import java.util.Random; import java.util.Set; +import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.SegmentInfo; import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.index.SegmentWriteState; -import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.codecs.BlockTermsReader; import org.apache.lucene.index.codecs.BlockTermsWriter; import org.apache.lucene.index.codecs.Codec; @@ -46,6 +48,9 @@ import org.apache.lucene.index.codecs.mockintblock.MockVariableIntBlockCodec; import org.apache.lucene.index.codecs.mocksep.MockSingleIntFactory; import org.apache.lucene.index.codecs.pulsing.PulsingPostingsReaderImpl; import org.apache.lucene.index.codecs.pulsing.PulsingPostingsWriterImpl; +import org.apache.lucene.index.codecs.sep.IntIndexInput; +import org.apache.lucene.index.codecs.sep.IntIndexOutput; +import org.apache.lucene.index.codecs.sep.IntStreamFactory; import org.apache.lucene.index.codecs.sep.SepPostingsReaderImpl; import org.apache.lucene.index.codecs.sep.SepPostingsWriterImpl; import org.apache.lucene.index.codecs.standard.StandardPostingsReader; @@ -71,11 +76,57 @@ public class MockRandomCodec extends Codec { this.seedRandom = new Random(random.nextLong()); } + // Chooses random IntStreamFactory depending on file's extension + private static class MockIntStreamFactory extends IntStreamFactory { + private final int salt; + private final List delegates = new ArrayList(); + + public MockIntStreamFactory(Random random) { + salt = random.nextInt(); + delegates.add(new MockSingleIntFactory()); + final int blockSize = _TestUtil.nextInt(random, 1, 2000); + delegates.add(new MockFixedIntBlockCodec.MockIntFactory(blockSize)); + final int baseBlockSize = _TestUtil.nextInt(random, 1, 127); + delegates.add(new MockVariableIntBlockCodec.MockIntFactory(baseBlockSize)); + // TODO: others + } + + private static String getExtension(String fileName) { + final int idx = fileName.indexOf('.'); + assert idx != -1; + return fileName.substring(idx); + } + + @Override + public IntIndexInput openInput(Directory dir, String fileName, int readBufferSize) throws IOException { + // Must only use extension, because IW.addIndexes can + // rename segment! + final IntStreamFactory f = delegates.get((Math.abs(salt ^ getExtension(fileName).hashCode())) % delegates.size()); + if (LuceneTestCase.VERBOSE) { + System.out.println("MockRandomCodec: read using int factory " + f + " from fileName=" + fileName); + } + return f.openInput(dir, fileName, readBufferSize); + } + + @Override + public IntIndexOutput createOutput(Directory dir, String fileName) throws IOException { + final IntStreamFactory f = delegates.get((Math.abs(salt ^ getExtension(fileName).hashCode())) % delegates.size()); + if (LuceneTestCase.VERBOSE) { + System.out.println("MockRandomCodec: write using int factory " + f + " to fileName=" + fileName); + } + return f.createOutput(dir, fileName); + } + } + @Override public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { final long seed = seedRandom.nextLong(); + if (LuceneTestCase.VERBOSE) { + System.out.println("MockRandomCodec: writing to seg=" + state.segmentName + " seed=" + seed); + } + final String seedFileName = IndexFileNames.segmentFileName(state.segmentName, state.codecId, SEED_EXT); final IndexOutput out = state.directory.createOutput(seedFileName); out.writeLong(seed); @@ -83,25 +134,9 @@ public class MockRandomCodec extends Codec { final Random random = new Random(seed); PostingsWriterBase postingsWriter; - final int n = random.nextInt(4); - if (n == 0) { - if (LuceneTestCase.VERBOSE) { - System.out.println("MockRandomCodec: writing MockSep postings"); - } - postingsWriter = new SepPostingsWriterImpl(state, new MockSingleIntFactory()); - } else if (n == 1) { - final int blockSize = _TestUtil.nextInt(random, 1, 2000); - if (LuceneTestCase.VERBOSE) { - System.out.println("MockRandomCodec: writing MockFixedIntBlock(" + blockSize + ") postings"); - } - postingsWriter = new SepPostingsWriterImpl(state, new MockFixedIntBlockCodec.MockIntFactory(blockSize)); - } else if (n == 2) { - final int baseBlockSize = _TestUtil.nextInt(random, 1, 127); - if (LuceneTestCase.VERBOSE) { - System.out.println("MockRandomCodec: writing MockVariableIntBlock(" + baseBlockSize + ") postings"); - } - postingsWriter = new SepPostingsWriterImpl(state, new MockVariableIntBlockCodec.MockIntFactory(baseBlockSize)); + if (random.nextBoolean()) { + postingsWriter = new SepPostingsWriterImpl(state, new MockIntStreamFactory(random)); } else { if (LuceneTestCase.VERBOSE) { System.out.println("MockRandomCodec: writing Standard postings"); @@ -190,32 +225,17 @@ public class MockRandomCodec extends Codec { final String seedFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.codecId, SEED_EXT); final IndexInput in = state.dir.openInput(seedFileName); final long seed = in.readLong(); + if (LuceneTestCase.VERBOSE) { + System.out.println("MockRandomCodec: reading from seg=" + state.segmentInfo.name + " seed=" + seed); + } in.close(); final Random random = new Random(seed); PostingsReaderBase postingsReader; - final int n = random.nextInt(4); - if (n == 0) { - if (LuceneTestCase.VERBOSE) { - System.out.println("MockRandomCodec: reading MockSep postings"); - } + if (random.nextBoolean()) { postingsReader = new SepPostingsReaderImpl(state.dir, state.segmentInfo, - state.readBufferSize, new MockSingleIntFactory(), state.codecId); - } else if (n == 1) { - final int blockSize = _TestUtil.nextInt(random, 1, 2000); - if (LuceneTestCase.VERBOSE) { - System.out.println("MockRandomCodec: reading MockFixedIntBlock(" + blockSize + ") postings"); - } - postingsReader = new SepPostingsReaderImpl(state.dir, state.segmentInfo, - state.readBufferSize, new MockFixedIntBlockCodec.MockIntFactory(blockSize), state.codecId); - } else if (n == 2) { - final int baseBlockSize = _TestUtil.nextInt(random, 1, 127); - if (LuceneTestCase.VERBOSE) { - System.out.println("MockRandomCodec: reading MockVariableIntBlock(" + baseBlockSize + ") postings"); - } - postingsReader = new SepPostingsReaderImpl(state.dir, state.segmentInfo, - state.readBufferSize, new MockVariableIntBlockCodec.MockIntFactory(baseBlockSize), state.codecId); + state.readBufferSize, new MockIntStreamFactory(random), state.codecId); } else { if (LuceneTestCase.VERBOSE) { System.out.println("MockRandomCodec: reading Standard postings"); diff --git a/lucene/src/test/org/apache/lucene/index/TestDoc.java b/lucene/src/test/org/apache/lucene/index/TestDoc.java index 774581c8ae1..e15fc5174bb 100644 --- a/lucene/src/test/org/apache/lucene/index/TestDoc.java +++ b/lucene/src/test/org/apache/lucene/index/TestDoc.java @@ -57,6 +57,9 @@ public class TestDoc extends LuceneTestCase { @Override public void setUp() throws Exception { super.setUp(); + if (VERBOSE) { + System.out.println("TEST: setUp"); + } workDir = new File(TEMP_DIR,"TestDoc"); workDir.mkdirs(); diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java b/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java index 4f49cfb8dfb..22a0736728c 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java @@ -106,13 +106,17 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase { Directory dir3 = newDirectory(); createIndex(random, dir3); + if (VERBOSE) { + System.out.println("TEST: now addIndexes/optimize"); + } IndexWriter iw = new IndexWriter( dir3, newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr). setOpenMode(OpenMode.APPEND). setMaxBufferedDocs(5). - setMergePolicy(newLogMergePolicy(3)) + setMergePolicy(newLogMergePolicy(3)) ); + iw.setInfoStream(VERBOSE ? System.out : null); iw.addIndexes(dir1, dir2); iw.optimize(); iw.close(); @@ -146,6 +150,9 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase { // try cloning and reopening the norms private void doTestNorms(Random random, Directory dir) throws IOException { + if (VERBOSE) { + System.out.println("TEST: now doTestNorms"); + } addDocs(random, dir, 12, true); IndexReader ir = IndexReader.open(dir, false); verifyIndex(ir); @@ -237,13 +244,20 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase { } private void createIndex(Random random, Directory dir) throws IOException { + if (VERBOSE) { + System.out.println("TEST: createIndex"); + } IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.CREATE) .setMaxBufferedDocs(5).setSimilarityProvider(similarityOne).setMergePolicy(newLogMergePolicy())); + LogMergePolicy lmp = (LogMergePolicy) iw.getConfig().getMergePolicy(); lmp.setMergeFactor(3); lmp.setUseCompoundFile(true); iw.close(); + if (VERBOSE) { + System.out.println("TEST: done createIndex"); + } } private void modifyNormsForF1(IndexReader ir) throws IOException { @@ -298,6 +312,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase { lmp.setMergeFactor(3); lmp.setUseCompoundFile(compound); IndexWriter iw = new IndexWriter(dir, conf); + iw.setInfoStream(VERBOSE ? System.out : null); for (int i = 0; i < ndocs; i++) { iw.addDocument(newDoc()); } From fa38446aedc7223cce4285505c7973dac66864a7 Mon Sep 17 00:00:00 2001 From: Grant Ingersoll Date: Sat, 12 Feb 2011 20:17:56 +0000 Subject: [PATCH 133/185] missed the example config when merging in cloud to trunk git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1070145 13f79535-47bb-0310-9956-ffa450edef68 --- solr/example/solr/zoo.cfg | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 solr/example/solr/zoo.cfg diff --git a/solr/example/solr/zoo.cfg b/solr/example/solr/zoo.cfg new file mode 100644 index 00000000000..aea451885e4 --- /dev/null +++ b/solr/example/solr/zoo.cfg @@ -0,0 +1,17 @@ +# The number of milliseconds of each tick +tickTime=2000 +# The number of ticks that the initial +# synchronization phase can take +initLimit=10 +# The number of ticks that can pass between +# sending a request and getting an acknowledgement +syncLimit=5 + +# the directory where the snapshot is stored. +# dataDir=/opt/zookeeper/data +# NOTE: Solr defaults the dataDir to /zoo_data + +# the port at which the clients will connect +# clientPort=2181 +# NOTE: Solr sets this based on zkRun / zkHost params + From cccf7ab9ac7ab451aa8b363c7d6b8b9d4ac5f94e Mon Sep 17 00:00:00 2001 From: Grant Ingersoll Date: Sat, 12 Feb 2011 20:22:09 +0000 Subject: [PATCH 134/185] add in missing shard declaration from the cloud example git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1070146 13f79535-47bb-0310-9956-ffa450edef68 --- solr/example/solr/solr.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solr/example/solr/solr.xml b/solr/example/solr/solr.xml index 4d1a84e7531..f43e6e5349a 100644 --- a/solr/example/solr/solr.xml +++ b/solr/example/solr/solr.xml @@ -29,6 +29,6 @@ If 'null' (or absent), cores will not be manageable via request handler --> - + From 7caa47e48ba34fe0e23fdb913db468e4f21516d7 Mon Sep 17 00:00:00 2001 From: Koji Sekiguchi Date: Sun, 13 Feb 2011 02:45:11 +0000 Subject: [PATCH 135/185] SOLR-2129: add note for AlchemyAPI and Calais to uima README.txt git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1070183 13f79535-47bb-0310-9956-ffa450edef68 --- solr/contrib/uima/README.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/solr/contrib/uima/README.txt b/solr/contrib/uima/README.txt index b2b97293dac..d450e3ff72d 100644 --- a/solr/contrib/uima/README.txt +++ b/solr/contrib/uima/README.txt @@ -35,6 +35,12 @@ To start using Solr UIMA Metadata Extraction Library you should go through the f + + where VALID_ALCHEMYAPI_KEY is your AlchemyAPI Access Key. You need to register AlchemyAPI Access + key to exploit the AlchemyAPI services: http://www.alchemyapi.com/api/register.html + + where VALID_OPENCALAIS_KEY is your Calais Service Key. You need to register Calais Service + key to exploit the Calais services: http://www.opencalais.com/apikey 5. the analysisEngine tag must contain an AE descriptor inside the specified path in the classpath From 4eb631938263a140ccff8f7f15a4b190ac58bc84 Mon Sep 17 00:00:00 2001 From: Koji Sekiguchi Date: Sun, 13 Feb 2011 03:29:09 +0000 Subject: [PATCH 136/185] SOLR-2129: fix the name of uima update processor git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1070185 13f79535-47bb-0310-9956-ffa450edef68 --- solr/contrib/uima/README.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solr/contrib/uima/README.txt b/solr/contrib/uima/README.txt index d450e3ff72d..e9a03eec648 100644 --- a/solr/contrib/uima/README.txt +++ b/solr/contrib/uima/README.txt @@ -51,7 +51,7 @@ To start using Solr UIMA Metadata Extraction Library you should go through the f 8. define in your solrconfig.xml an UpdateRequestProcessorChain as following: - + From ec9c231f5ab35255d79c9156d31e2f4b28a5cbfb Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Sun, 13 Feb 2011 10:31:44 +0000 Subject: [PATCH 137/185] SOLR-1989: recompilation of source needs to be possible in release artifacts git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1070206 13f79535-47bb-0310-9956-ffa450edef68 --- solr/build.xml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/solr/build.xml b/solr/build.xml index d8021276039..78ecb8d6618 100644 --- a/solr/build.xml +++ b/solr/build.xml @@ -756,6 +756,12 @@ excludes="*.tgz *.zip *.md5 **/*src*.jar **/*docs*.jar" /> + + @@ -775,6 +781,12 @@ prefix="${fullnamever}" includes="**/*.sh **/bin/ src/scripts/" filemode="755" /> + + From 64b1c3ef1f7d5f824f0aa0167daa146984c36156 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Sun, 13 Feb 2011 14:08:49 +0000 Subject: [PATCH 138/185] add test case to stress advance/next git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1070240 13f79535-47bb-0310-9956-ffa450edef68 --- .../lucene/index/TestStressAdvance.java | 131 ++++++++++++++++++ 1 file changed, 131 insertions(+) create mode 100644 lucene/src/test/org/apache/lucene/index/TestStressAdvance.java diff --git a/lucene/src/test/org/apache/lucene/index/TestStressAdvance.java b/lucene/src/test/org/apache/lucene/index/TestStressAdvance.java new file mode 100644 index 00000000000..8735fe77ded --- /dev/null +++ b/lucene/src/test/org/apache/lucene/index/TestStressAdvance.java @@ -0,0 +1,131 @@ +package org.apache.lucene.index; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.lucene.util.*; +import org.apache.lucene.store.*; +import org.apache.lucene.document.*; + +public class TestStressAdvance extends LuceneTestCase { + + public void testStressAdvance() throws Exception { + for(int iter=0;iter<3;iter++) { + if (VERBOSE) { + System.out.println("\nTEST: iter=" + iter); + } + Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random, dir); + final Set aDocs = new HashSet(); + final Document doc = new Document(); + final Field f = newField("field", "", Field.Index.NOT_ANALYZED_NO_NORMS); + doc.add(f); + final Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); + doc.add(idField); + for(int id=0;id<5000*RANDOM_MULTIPLIER;id++) { + if (random.nextInt(4) == 3) { + f.setValue("a"); + aDocs.add(id); + } else { + f.setValue("b"); + } + idField.setValue(""+id); + w.addDocument(doc); + } + + w.optimize(); + + final List aDocIDs = new ArrayList(); + final List bDocIDs = new ArrayList(); + + final IndexReader r = w.getReader(); + final int[] idToDocID = new int[r.maxDoc()]; + for(int docID=0;docID expected) throws Exception { + if (VERBOSE) { + System.out.println("test"); + } + int upto = -1; + while(upto < expected.size()) { + if (VERBOSE) { + System.out.println(" cycle upto=" + upto + " of " + expected.size()); + } + final int docID; + if (random.nextInt(4) == 1 || upto == expected.size()-1) { + // test nextDoc() + if (VERBOSE) { + System.out.println(" do nextDoc"); + } + upto++; + docID = docs.nextDoc(); + } else { + // test advance() + final int inc = _TestUtil.nextInt(random, 1, expected.size()-1-upto); + if (VERBOSE) { + System.out.println(" do advance inc=" + inc); + } + upto += inc; + docID = docs.advance(expected.get(upto)); + } + if (upto == expected.size()) { + if (VERBOSE) { + System.out.println(" expect docID=" + DocsEnum.NO_MORE_DOCS + " actual=" + docID); + } + assertEquals(DocsEnum.NO_MORE_DOCS, docID); + } else { + if (VERBOSE) { + System.out.println(" expect docID=" + expected.get(upto) + " actual=" + docID); + } + assertTrue(docID != DocsEnum.NO_MORE_DOCS); + assertEquals(expected.get(upto).intValue(), docID); + } + } + } +} From 23e9af6057320f3a7b0de0a7e330264a233e0970 Mon Sep 17 00:00:00 2001 From: Mark Robert Miller Date: Sun, 13 Feb 2011 23:09:50 +0000 Subject: [PATCH 139/185] SOLR-2353: SpellCheckCollator uses org.mortbay.log.Log for logging - via Sami Siren git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1070321 13f79535-47bb-0310-9956-ffa450edef68 --- solr/src/java/org/apache/solr/spelling/SpellCheckCollator.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/solr/src/java/org/apache/solr/spelling/SpellCheckCollator.java b/solr/src/java/org/apache/solr/spelling/SpellCheckCollator.java index aa1c377524f..6b5c37b0ef1 100644 --- a/solr/src/java/org/apache/solr/spelling/SpellCheckCollator.java +++ b/solr/src/java/org/apache/solr/spelling/SpellCheckCollator.java @@ -29,7 +29,6 @@ import org.apache.solr.handler.component.ResponseBuilder; import org.apache.solr.handler.component.SearchComponent; import org.apache.solr.request.LocalSolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; -import org.mortbay.log.Log; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -91,7 +90,7 @@ public class SpellCheckCollator { queryComponent.process(checkResponse); hits = (Integer) checkResponse.rsp.getToLog().get("hits"); } catch (Exception e) { - Log.warn("Exception trying to re-query to check if a spell check possibility would return any hits.", e); + LOG.warn("Exception trying to re-query to check if a spell check possibility would return any hits.", e); } finally { checkResponse.req.close(); } From 18944a28b5b4aa20c629d6306320cfeee30d3fa9 Mon Sep 17 00:00:00 2001 From: Steven Rowe Date: Mon, 14 Feb 2011 14:04:35 +0000 Subject: [PATCH 140/185] javac should always use UTF-8 encoding git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1070488 13f79535-47bb-0310-9956-ffa450edef68 --- dev-tools/idea/.idea/compiler.xml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dev-tools/idea/.idea/compiler.xml b/dev-tools/idea/.idea/compiler.xml index e893261eeb9..b031758b4fb 100644 --- a/dev-tools/idea/.idea/compiler.xml +++ b/dev-tools/idea/.idea/compiler.xml @@ -33,5 +33,8 @@ + + From 8c6f28451a89fb93bbd3c828e3c41e4f215dd392 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Mon, 14 Feb 2011 14:14:10 +0000 Subject: [PATCH 141/185] fix false fail git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1070494 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/lucene/search/TestMultiTermConstantScore.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java b/lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java index 5b52755f3fe..dcf14976e6f 100644 --- a/lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java +++ b/lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java @@ -23,6 +23,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; @@ -59,7 +60,9 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { "X 4 5 6" }; small = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, small, new MockAnalyzer(MockTokenizer.WHITESPACE, false)); + RandomIndexWriter writer = new RandomIndexWriter(random, small, + newIndexWriterConfig(TEST_VERSION_CURRENT, + new MockAnalyzer(MockTokenizer.WHITESPACE, false)).setMergePolicy(newInOrderLogMergePolicy())); for (int i = 0; i < data.length; i++) { Document doc = new Document(); From 573ecf2e16d618785d04871c5b61e29073eef5d5 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Mon, 14 Feb 2011 22:56:35 +0000 Subject: [PATCH 142/185] SOLR-2342: don't let hot add/updates starve commit git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1070691 13f79535-47bb-0310-9956-ffa450edef68 --- .../src/java/org/apache/solr/update/DirectUpdateHandler2.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/solr/src/java/org/apache/solr/update/DirectUpdateHandler2.java b/solr/src/java/org/apache/solr/update/DirectUpdateHandler2.java index 55f9ac77fc0..5106b1cb0aa 100644 --- a/solr/src/java/org/apache/solr/update/DirectUpdateHandler2.java +++ b/solr/src/java/org/apache/solr/update/DirectUpdateHandler2.java @@ -91,7 +91,9 @@ public class DirectUpdateHandler2 extends UpdateHandler { public DirectUpdateHandler2(SolrCore core) throws IOException { super(core); - ReadWriteLock rwl = new ReentrantReadWriteLock(); + // Pass fairness=true so commit request is not starved + // when add/updates are running hot (SOLR-2342): + ReadWriteLock rwl = new ReentrantReadWriteLock(true); iwAccess = rwl.readLock(); iwCommit = rwl.writeLock(); From d3852002c22b22a055472c6f2ffe130c084bc7e0 Mon Sep 17 00:00:00 2001 From: Koji Sekiguchi Date: Tue, 15 Feb 2011 02:19:49 +0000 Subject: [PATCH 143/185] LUCENE-2894: fix the prettify path git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1070760 13f79535-47bb-0310-9956-ffa450edef68 --- solr/common-build.xml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/solr/common-build.xml b/solr/common-build.xml index 966607530b1..e03f35a88c4 100644 --- a/solr/common-build.xml +++ b/solr/common-build.xml @@ -347,7 +347,8 @@ - + + @@ -376,7 +377,7 @@

    + ]]>
    From 5691bea096b0b070f0d90558c4656cf84314e7bc Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Tue, 15 Feb 2011 09:24:06 +0000 Subject: [PATCH 144/185] LUCENE-2920: Removed ShingleMatrixFilter as it is unmaintained and does not work with custom Attributes or custom payload encoders git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1070821 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/contrib/CHANGES.txt | 3 + .../analysis/shingle/ShingleMatrixFilter.java | 1055 ----------------- .../shingle/TestShingleMatrixFilter.java | 530 --------- 3 files changed, 3 insertions(+), 1585 deletions(-) delete mode 100644 modules/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java delete mode 100644 modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleMatrixFilter.java diff --git a/lucene/contrib/CHANGES.txt b/lucene/contrib/CHANGES.txt index 7615463e391..7251f49788c 100644 --- a/lucene/contrib/CHANGES.txt +++ b/lucene/contrib/CHANGES.txt @@ -193,6 +193,9 @@ API Changes * LUCENE-2830: Use StringBuilder instead of StringBuffer across Benchmark, and remove the StringBuffer HtmlParser.parse() variant. (Shai Erera) + * LUCENE-2920: Deprecated ShingleMatrixFilter as it is unmaintained and does + not work with custom Attributes or custom payload encoders. (Uwe Schindler) + New features * LUCENE-2500: Added DirectIOLinuxDirectory, a Linux-specific diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java deleted file mode 100644 index a21ff3711e2..00000000000 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java +++ /dev/null @@ -1,1055 +0,0 @@ -package org.apache.lucene.analysis.shingle; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.Set; - -import org.apache.lucene.analysis.Token; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.miscellaneous.EmptyTokenStream; -import org.apache.lucene.analysis.payloads.PayloadHelper; -import org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix.Column.Row; -import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.analysis.tokenattributes.FlagsAttribute; -import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; -import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.analysis.tokenattributes.TypeAttribute; -import org.apache.lucene.index.Payload; - - -/** - *

    A ShingleMatrixFilter constructs shingles (token n-grams) from a token stream. - * In other words, it creates combinations of tokens as a single token. - * - *

    For example, the sentence "please divide this sentence into shingles" - * might be tokenized into shingles "please divide", "divide this", - * "this sentence", "sentence into", and "into shingles". - * - *

    Using a shingle filter at index and query time can in some instances - * be used to replace phrase queries, especially them with 0 slop. - * - *

    Without a spacer character - * it can be used to handle composition and decomposition of words - * such as searching for "multi dimensional" instead of "multidimensional". - * It is a rather common human problem at query time - * in several languages, notably the northern Germanic branch. - * - *

    Shingles are amongst many things also known to solve problems - * in spell checking, language detection and document clustering. - * - *

    This filter is backed by a three dimensional column oriented matrix - * used to create permutations of the second dimension, the rows, - * and leaves the third, the z-axis, for for multi token synonyms. - * - *

    In order to use this filter you need to define a way of positioning - * the input stream tokens in the matrix. This is done using a - * {@link org.apache.lucene.analysis.shingle.ShingleMatrixFilter.TokenSettingsCodec}. - * There are three simple implementations for demonstrational purposes, - * see {@link org.apache.lucene.analysis.shingle.ShingleMatrixFilter.OneDimensionalNonWeightedTokenSettingsCodec}, - * {@link org.apache.lucene.analysis.shingle.ShingleMatrixFilter.TwoDimensionalNonWeightedSynonymTokenSettingsCodec} - * and {@link org.apache.lucene.analysis.shingle.ShingleMatrixFilter.SimpleThreeDimensionalTokenSettingsCodec}. - * - *

    Consider this token matrix: - *

    - *  Token[column][row][z-axis]{
    - *    {{hello}, {greetings, and, salutations}},
    - *    {{world}, {earth}, {tellus}}
    - *  };
    - * 
    - * - * It would produce the following 2-3 gram sized shingles: - * - *
    - * "hello_world"
    - * "greetings_and"
    - * "greetings_and_salutations"
    - * "and_salutations"
    - * "and_salutations_world"
    - * "salutations_world"
    - * "hello_earth"
    - * "and_salutations_earth"
    - * "salutations_earth"
    - * "hello_tellus"
    - * "and_salutations_tellus"
    - * "salutations_tellus"
    - *  
    - * - *

    This implementation can be rather heap demanding - * if (maximum shingle size - minimum shingle size) is a great number and the stream contains many columns, - * or if each column contains a great number of rows. - * - *

    The problem is that in order avoid producing duplicates - * the filter needs to keep track of any shingle already produced and returned to the consumer. - * - * There is a bit of resource management to handle this - * but it would of course be much better if the filter was written - * so it never created the same shingle more than once in the first place. - * - *

    The filter also has basic support for calculating weights for the shingles - * based on the weights of the tokens from the input stream, output shingle size, etc. - * See {@link #calculateShingleWeight(org.apache.lucene.analysis.Token, java.util.List, int, java.util.List, java.util.List)}. - *

    - * NOTE: This filter might not behave correctly if used with custom Attributes, i.e. Attributes other than - * the ones located in org.apache.lucene.analysis.tokenattributes. - */ -public final class ShingleMatrixFilter extends TokenStream { - - public static Character defaultSpacerCharacter = Character.valueOf('_'); - public static TokenSettingsCodec defaultSettingsCodec = new OneDimensionalNonWeightedTokenSettingsCodec(); - public static boolean ignoringSinglePrefixOrSuffixShingleByDefault = false; - - /** - * Strategy used to code and decode meta data of the tokens from the input stream - * regarding how to position the tokens in the matrix, set and retreive weight, et c. - */ - public static abstract class TokenSettingsCodec { - - /** - * Retrieves information on how a {@link org.apache.lucene.analysis.Token} is to be inserted to a {@link org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix}. - * @param token - * @return {@link ShingleMatrixFilter.TokenPositioner} - * @throws IOException - */ - public abstract TokenPositioner getTokenPositioner(Token token) throws IOException; - - /** - * Sets information on how a {@link org.apache.lucene.analysis.Token} is to be inserted to a {@link org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix}. - * - * @param token - * @param tokenPositioner - */ - public abstract void setTokenPositioner(Token token, ShingleMatrixFilter.TokenPositioner tokenPositioner); - - /** - * Have this method return 1f in order to 'disable' weights. - * @param token - * @return the weight of parameter token - */ - public abstract float getWeight(Token token); - - /** - * Have this method do nothing in order to 'disable' weights. - * @param token - * @param weight - */ - public abstract void setWeight(Token token, float weight); - } - - - /** - * Used to describe how a {@link org.apache.lucene.analysis.Token} is to be inserted to a {@link org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix}. - * @see org.apache.lucene.analysis.shingle.ShingleMatrixFilter.TokenSettingsCodec#getTokenPositioner(org.apache.lucene.analysis.Token) - * @see org.apache.lucene.analysis.shingle.ShingleMatrixFilter.TokenSettingsCodec#setTokenPositioner(org.apache.lucene.analysis.Token,org.apache.lucene.analysis.shingle.ShingleMatrixFilter.TokenPositioner) - */ - public static class TokenPositioner { - public static final TokenPositioner newColumn = new TokenPositioner(0); - public static final TokenPositioner newRow = new TokenPositioner(1); - public static final TokenPositioner sameRow = new TokenPositioner(2); - - private final int index; - - private TokenPositioner(int index) { - this.index = index; - } - - public int getIndex() { - return index; - } - } - - // filter instance settings variables - - private TokenSettingsCodec settingsCodec; - - private int minimumShingleSize; - private int maximumShingleSize; - - private boolean ignoringSinglePrefixOrSuffixShingle = false; - - private Character spacerCharacter = defaultSpacerCharacter; - - private TokenStream input; - - private CharTermAttribute termAtt; - private PositionIncrementAttribute posIncrAtt; - private PayloadAttribute payloadAtt; - private OffsetAttribute offsetAtt; - private TypeAttribute typeAtt; - private FlagsAttribute flagsAtt; - - private CharTermAttribute in_termAtt; - private PositionIncrementAttribute in_posIncrAtt; - private PayloadAttribute in_payloadAtt; - private OffsetAttribute in_offsetAtt; - private TypeAttribute in_typeAtt; - private FlagsAttribute in_flagsAtt; - - - /** - * Creates a shingle filter based on a user defined matrix. - * - * The filter /will/ delete columns from the input matrix! You will not be able to reset the filter if you used this constructor. - * todo: don't touch the matrix! use a boolean, set the input stream to null or something, and keep track of where in the matrix we are at. - * - * @param matrix the input based for creating shingles. Does not need to contain any information until {@link #incrementToken()} is called the first time. - * @param minimumShingleSize minimum number of tokens in any shingle. - * @param maximumShingleSize maximum number of tokens in any shingle. - * @param spacerCharacter character to use between texts of the token parts in a shingle. null for none. - * @param ignoringSinglePrefixOrSuffixShingle if true, shingles that only contains permutation of the first of the last column will not be produced as shingles. Useful when adding boundary marker tokens such as '^' and '$'. - * @param settingsCodec codec used to read input token weight and matrix positioning. - */ - public ShingleMatrixFilter(Matrix matrix, int minimumShingleSize, int maximumShingleSize, Character spacerCharacter, boolean ignoringSinglePrefixOrSuffixShingle, TokenSettingsCodec settingsCodec) { - this.matrix = matrix; - this.minimumShingleSize = minimumShingleSize; - this.maximumShingleSize = maximumShingleSize; - this.spacerCharacter = spacerCharacter; - this.ignoringSinglePrefixOrSuffixShingle = ignoringSinglePrefixOrSuffixShingle; - this.settingsCodec = settingsCodec; - - termAtt = addAttribute(CharTermAttribute.class); - posIncrAtt = addAttribute(PositionIncrementAttribute.class); - payloadAtt = addAttribute(PayloadAttribute.class); - offsetAtt = addAttribute(OffsetAttribute.class); - typeAtt = addAttribute(TypeAttribute.class); - flagsAtt = addAttribute(FlagsAttribute.class); - - // set the input to be an empty token stream, we already have the data. - this.input = new EmptyTokenStream(); - - in_termAtt = input.addAttribute(CharTermAttribute.class); - in_posIncrAtt = input.addAttribute(PositionIncrementAttribute.class); - in_payloadAtt = input.addAttribute(PayloadAttribute.class); - in_offsetAtt = input.addAttribute(OffsetAttribute.class); - in_typeAtt = input.addAttribute(TypeAttribute.class); - in_flagsAtt = input.addAttribute(FlagsAttribute.class); - } - - /** - * Creates a shingle filter using default settings. - * - * @see #defaultSpacerCharacter - * @see #ignoringSinglePrefixOrSuffixShingleByDefault - * @see #defaultSettingsCodec - * - * @param input stream from which to construct the matrix - * @param minimumShingleSize minimum number of tokens in any shingle. - * @param maximumShingleSize maximum number of tokens in any shingle. - */ - public ShingleMatrixFilter(TokenStream input, int minimumShingleSize, int maximumShingleSize) { - this(input, minimumShingleSize, maximumShingleSize, defaultSpacerCharacter); - } - - - /** - * Creates a shingle filter using default settings. - * - * @see #ignoringSinglePrefixOrSuffixShingleByDefault - * @see #defaultSettingsCodec - * - * @param input stream from which to construct the matrix - * @param minimumShingleSize minimum number of tokens in any shingle. - * @param maximumShingleSize maximum number of tokens in any shingle. - * @param spacerCharacter character to use between texts of the token parts in a shingle. null for none. - */ - public ShingleMatrixFilter(TokenStream input, int minimumShingleSize, int maximumShingleSize, Character spacerCharacter) { - this(input, minimumShingleSize, maximumShingleSize, spacerCharacter, ignoringSinglePrefixOrSuffixShingleByDefault); - } - - /** - * Creates a shingle filter using the default {@link TokenSettingsCodec}. - * - * @see #defaultSettingsCodec - * - * @param input stream from which to construct the matrix - * @param minimumShingleSize minimum number of tokens in any shingle. - * @param maximumShingleSize maximum number of tokens in any shingle. - * @param spacerCharacter character to use between texts of the token parts in a shingle. null for none. - * @param ignoringSinglePrefixOrSuffixShingle if true, shingles that only contains permutation of the first of the last column will not be produced as shingles. Useful when adding boundary marker tokens such as '^' and '$'. - */ - public ShingleMatrixFilter(TokenStream input, int minimumShingleSize, int maximumShingleSize, Character spacerCharacter, boolean ignoringSinglePrefixOrSuffixShingle) { - this(input, minimumShingleSize, maximumShingleSize, spacerCharacter, ignoringSinglePrefixOrSuffixShingle, defaultSettingsCodec); - } - - - /** - * Creates a shingle filter with ad hoc parameter settings. - * - * @param input stream from which to construct the matrix - * @param minimumShingleSize minimum number of tokens in any shingle. - * @param maximumShingleSize maximum number of tokens in any shingle. - * @param spacerCharacter character to use between texts of the token parts in a shingle. null for none. - * @param ignoringSinglePrefixOrSuffixShingle if true, shingles that only contains permutation of the first of the last column will not be produced as shingles. Useful when adding boundary marker tokens such as '^' and '$'. - * @param settingsCodec codec used to read input token weight and matrix positioning. - */ - public ShingleMatrixFilter(TokenStream input, int minimumShingleSize, int maximumShingleSize, Character spacerCharacter, boolean ignoringSinglePrefixOrSuffixShingle, TokenSettingsCodec settingsCodec) { - this.input = input; - this.minimumShingleSize = minimumShingleSize; - this.maximumShingleSize = maximumShingleSize; - this.spacerCharacter = spacerCharacter; - this.ignoringSinglePrefixOrSuffixShingle = ignoringSinglePrefixOrSuffixShingle; - this.settingsCodec = settingsCodec; - termAtt = addAttribute(CharTermAttribute.class); - posIncrAtt = addAttribute(PositionIncrementAttribute.class); - payloadAtt = addAttribute(PayloadAttribute.class); - offsetAtt = addAttribute(OffsetAttribute.class); - typeAtt = addAttribute(TypeAttribute.class); - flagsAtt = addAttribute(FlagsAttribute.class); - - in_termAtt = input.addAttribute(CharTermAttribute.class); - in_posIncrAtt = input.addAttribute(PositionIncrementAttribute.class); - in_payloadAtt = input.addAttribute(PayloadAttribute.class); - in_offsetAtt = input.addAttribute(OffsetAttribute.class); - in_typeAtt = input.addAttribute(TypeAttribute.class); - in_flagsAtt = input.addAttribute(FlagsAttribute.class); - } - - // internal filter instance variables - - /** iterator over the current matrix row permutations */ - private Iterator permutations; - - /** the current permutation of tokens used to produce shingles */ - private List currentPermuationTokens; - /** index to what row a token in currentShingleTokens represents*/ - private List currentPermutationRows; - - private int currentPermutationTokensStartOffset; - private int currentShingleLength; - - /** - * a set containing shingles that has been the result of a call to {@link #incrementToken()}, - * used to avoid producing the same shingle more than once. - */ - private Set> shinglesSeen = new HashSet>(); - - - @Override - public void reset() throws IOException { - permutations = null; - shinglesSeen.clear(); - input.reset(); - } - - private Matrix matrix; - - private Token reusableToken = new Token(); - - @Override - public final boolean incrementToken() throws IOException { - if (matrix == null) { - matrix = new Matrix(); - // fill matrix with maximumShingleSize columns - while (matrix.columns.size() < maximumShingleSize && readColumn()) { - // this loop looks ugly - } - } - - // this loop exists in order to avoid recursive calls to the next method - // as the complexity of a large matrix - // then would require a multi gigabyte sized stack. - Token token; - do { - token = produceNextToken(reusableToken); - } while (token == request_next_token); - if (token == null) return false; - - clearAttributes(); - termAtt.copyBuffer(token.buffer(), 0, token.length()); - posIncrAtt.setPositionIncrement(token.getPositionIncrement()); - flagsAtt.setFlags(token.getFlags()); - offsetAtt.setOffset(token.startOffset(), token.endOffset()); - typeAtt.setType(token.type()); - payloadAtt.setPayload(token.getPayload()); - return true; - } - - private Token getNextInputToken(Token token) throws IOException { - if (!input.incrementToken()) return null; - token.copyBuffer(in_termAtt.buffer(), 0, in_termAtt.length()); - token.setPositionIncrement(in_posIncrAtt.getPositionIncrement()); - token.setFlags(in_flagsAtt.getFlags()); - token.setOffset(in_offsetAtt.startOffset(), in_offsetAtt.endOffset()); - token.setType(in_typeAtt.type()); - token.setPayload(in_payloadAtt.getPayload()); - return token; - } - - private Token getNextToken(Token token) throws IOException { - if (!this.incrementToken()) return null; - token.copyBuffer(termAtt.buffer(), 0, termAtt.length()); - token.setPositionIncrement(posIncrAtt.getPositionIncrement()); - token.setFlags(flagsAtt.getFlags()); - token.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset()); - token.setType(typeAtt.type()); - token.setPayload(payloadAtt.getPayload()); - return token; - } - - private static final Token request_next_token = new Token(); - - /** - * This method exists in order to avoid recursive calls to the method - * as the complexity of a fairly small matrix then easily would require - * a gigabyte sized stack per thread. - * - * @param reusableToken - * @return null if exhausted, instance request_next_token if one more call is required for an answer, or instance parameter resuableToken. - * @throws IOException - */ - private Token produceNextToken(final Token reusableToken) throws IOException { - - if (currentPermuationTokens != null) { - currentShingleLength++; - - if (currentShingleLength + currentPermutationTokensStartOffset <= currentPermuationTokens.size() - && currentShingleLength <= maximumShingleSize) { - - // it is possible to create at least one more shingle of the current matrix permutation - - if (ignoringSinglePrefixOrSuffixShingle - && currentShingleLength == 1 - && ((currentPermutationRows.get(currentPermutationTokensStartOffset)).getColumn().isFirst() || (currentPermutationRows.get(currentPermutationTokensStartOffset)).getColumn().isLast())) { - return getNextToken(reusableToken); - } - - int termLength = 0; - - List shingle = new ArrayList(currentShingleLength); - - for (int i = 0; i < currentShingleLength; i++) { - Token shingleToken = currentPermuationTokens.get(i + currentPermutationTokensStartOffset); - termLength += shingleToken.length(); - shingle.add(shingleToken); - } - if (spacerCharacter != null) { - termLength += currentShingleLength - 1; - } - - // only produce shingles that not already has been created - if (!shinglesSeen.add(shingle)) { - return request_next_token; - } - - // shingle token factory - StringBuilder sb = new StringBuilder(termLength + 10); // paranormal ability to foresee the future. - for (Token shingleToken : shingle) { - if (spacerCharacter != null && sb.length() > 0) { - sb.append(spacerCharacter); - } - sb.append(shingleToken.buffer(), 0, shingleToken.length()); - } - reusableToken.setEmpty().append(sb); - updateToken(reusableToken, shingle, currentPermutationTokensStartOffset, currentPermutationRows, currentPermuationTokens); - - return reusableToken; - - } else { - - // it is NOT possible to create one more shingles of the current matrix permutation - - if (currentPermutationTokensStartOffset < currentPermuationTokens.size() - 1) { - // reset shingle size and move one step to the right in the current tokens permutation - currentPermutationTokensStartOffset++; - currentShingleLength = minimumShingleSize - 1; - return request_next_token; - } - - - if (permutations == null) { - // todo does this ever occur? - return null; - } - - - if (!permutations.hasNext()) { - - // load more data (if available) to the matrix - - if (input != null && readColumn()) { - // don't really care, we just read it. - } - - // get rid of resources - - // delete the first column in the matrix - Matrix.Column deletedColumn = matrix.columns.remove(0); - - // remove all shingles seen that include any of the tokens from the deleted column. - List deletedColumnTokens = new ArrayList(); - for (Matrix.Column.Row row : deletedColumn.getRows()) { - for (Token token : row.getTokens()) { - deletedColumnTokens.add(token); - } - - } - for (Iterator> shinglesSeenIterator = shinglesSeen.iterator(); shinglesSeenIterator.hasNext();) { - List shingle = shinglesSeenIterator.next(); - for (Token deletedColumnToken : deletedColumnTokens) { - if (shingle.contains(deletedColumnToken)) { - shinglesSeenIterator.remove(); - break; - } - } - } - - - if (matrix.columns.size() < minimumShingleSize) { - // exhausted - return null; - } - - // create permutations of the matrix it now looks - permutations = matrix.permutationIterator(); - } - - nextTokensPermutation(); - return request_next_token; - - } - } - - if (permutations == null) { - permutations = matrix.permutationIterator(); - } - - if (!permutations.hasNext()) { - return null; - } - - nextTokensPermutation(); - - return request_next_token; - } - - /** - * get next permutation of row combinations, - * creates list of all tokens in the row and - * an index from each such token to what row they exist in. - * finally resets the current (next) shingle size and offset. - */ - private void nextTokensPermutation() { - Matrix.Column.Row[] rowsPermutation = permutations.next(); - List currentPermutationRows = new ArrayList(); - List currentPermuationTokens = new ArrayList(); - for (Matrix.Column.Row row : rowsPermutation) { - for (Token token : row.getTokens()) { - currentPermuationTokens.add(token); - currentPermutationRows.add(row); - } - } - this.currentPermuationTokens = currentPermuationTokens; - this.currentPermutationRows = currentPermutationRows; - - currentPermutationTokensStartOffset = 0; - currentShingleLength = minimumShingleSize - 1; - - } - - /** - * Final touch of a shingle token before it is passed on to the consumer from method {@link #incrementToken()}. - * - * Calculates and sets type, flags, position increment, start/end offsets and weight. - * - * @param token Shingle token - * @param shingle Tokens used to produce the shingle token. - * @param currentPermutationStartOffset Start offset in parameter currentPermutationTokens - * @param currentPermutationRows index to Matrix.Column.Row from the position of tokens in parameter currentPermutationTokens - * @param currentPermuationTokens tokens of the current permutation of rows in the matrix. - */ - public void updateToken(Token token, List shingle, int currentPermutationStartOffset, List currentPermutationRows, List currentPermuationTokens) { - token.setType(ShingleMatrixFilter.class.getName()); - token.setFlags(0); - token.setPositionIncrement(1); - token.setStartOffset(shingle.get(0).startOffset()); - token.setEndOffset(shingle.get(shingle.size() - 1).endOffset()); - settingsCodec.setWeight(token, calculateShingleWeight(token, shingle, currentPermutationStartOffset, currentPermutationRows, currentPermuationTokens)); - } - - /** - * Evaluates the new shingle token weight. - * - * for (shingle part token in shingle) - * weight += shingle part token weight * (1 / sqrt(all shingle part token weights summed)) - * - * This algorithm gives a slightly greater score for longer shingles - * and is rather penalising to great shingle token part weights. - * - * @param shingleToken token returned to consumer - * @param shingle tokens the tokens used to produce the shingle token. - * @param currentPermutationStartOffset start offset in parameter currentPermutationRows and currentPermutationTokens. - * @param currentPermutationRows an index to what matrix row a token in parameter currentPermutationTokens exist. - * @param currentPermuationTokens all tokens in the current row permutation of the matrix. A sub list (parameter offset, parameter shingle.size) equals parameter shingle. - * @return weight to be set for parameter shingleToken - */ - public float calculateShingleWeight(Token shingleToken, List shingle, int currentPermutationStartOffset, List currentPermutationRows, List currentPermuationTokens) { - double[] weights = new double[shingle.size()]; - - double total = 0f; - double top = 0d; - - - for (int i=0; i top) { - top = tmp; - } - total += tmp; - } - - double factor = 1d / Math.sqrt(total); - - double weight = 0d; - for (double partWeight : weights) { - weight += partWeight * factor; - } - - return (float) weight; - } - - - private Token readColumnBuf; - - /** - * Loads one column from the token stream. - * - * When the last token is read from the token stream it will column.setLast(true); - * - * @return true if it manage to read one more column from the input token stream - * @throws IOException if the matrix source input stream throws an exception - */ - private boolean readColumn() throws IOException { - - Token token; - if (readColumnBuf != null) { - token = readColumnBuf; - readColumnBuf = null; - } else { - token = getNextInputToken(new Token()); - } - - if (token == null) { - return false; - } - - Matrix.Column currentReaderColumn = matrix.new Column(); - Matrix.Column.Row currentReaderRow = currentReaderColumn.new Row(); - - currentReaderRow.getTokens().add(token); - TokenPositioner tokenPositioner; - while ((readColumnBuf = getNextInputToken(new Token())) != null - && (tokenPositioner = settingsCodec.getTokenPositioner(readColumnBuf)) != TokenPositioner.newColumn) { - - if (tokenPositioner == TokenPositioner.sameRow) { - currentReaderRow.getTokens().add(readColumnBuf); - } else /*if (tokenPositioner == TokenPositioner.newRow)*/ { - currentReaderRow = currentReaderColumn.new Row(); - currentReaderRow.getTokens().add(readColumnBuf); - } - readColumnBuf = null; - - } - - if (readColumnBuf == null) { - readColumnBuf = getNextInputToken(new Token()); - if (readColumnBuf == null) { - currentReaderColumn.setLast(true); - } - } - - - return true; - - } - - - /** - * A column focused matrix in three dimensions: - * - *

    -   * Token[column][row][z-axis] {
    -   *     {{hello}, {greetings, and, salutations}},
    -   *     {{world}, {earth}, {tellus}}
    -   * };
    -   * 
    - * - * todo consider row groups - * to indicate that shingles is only to contain permutations with texts in that same row group. - * - */ - public static class Matrix { - - private boolean columnsHasBeenCreated = false; - - private List columns = new ArrayList(); - - public List getColumns() { - return columns; - } - - public class Column { - - private boolean last; - private boolean first; - - public Matrix getMatrix() { - return Matrix.this; - } - - public Column(Token token) { - this(); - Row row = new Row(); - row.getTokens().add(token); - } - - public Column() { - synchronized (Matrix.this) { - if (!columnsHasBeenCreated) { - this.setFirst(true); - columnsHasBeenCreated = true; - } - } - Matrix.this.columns.add(this); - } - - private List rows = new ArrayList(); - - public List getRows() { - return rows; - } - - - public int getIndex() { - return Matrix.this.columns.indexOf(this); - } - - @Override - public String toString() { - return "Column{" + - "first=" + first + - ", last=" + last + - ", rows=" + rows + - '}'; - } - - public boolean isFirst() { - return first; - } - - public void setFirst(boolean first) { - this.first = first; - } - - public void setLast(boolean last) { - this.last = last; - } - - public boolean isLast() { - return last; - } - - public class Row { - - public Column getColumn() { - return Column.this; - } - - private List tokens = new LinkedList(); - - public Row() { - Column.this.rows.add(this); - } - - public int getIndex() { - return Column.this.rows.indexOf(this); - } - - public List getTokens() { - return tokens; - } - - public void setTokens(List tokens) { - this.tokens = tokens; - } - -// public int getStartOffset() { -// int ret = tokens[0].startOffset(); -// if (getIndex() > 0 && ret == 0) { -// ret = Column.this.rows.get(0).getStartOffset(); -// } -// return ret; -// } -// -// public int getEndOffset() { -// int ret = tokens[tokens.length - 1].endOffset(); -// if (getIndex() > 0 && ret == 0) { -// ret = Column.this.rows.get(0).getEndOffset(); -// } -// return ret; -// } - - @Override - public String toString() { - return "Row{" + - "index=" + getIndex() + - ", tokens=" + (tokens == null ? null : tokens) + - '}'; - } - } - - } - - - public Iterator permutationIterator() { - - return new Iterator() { - - private int[] columnRowCounters = new int[columns.size()]; - - public void remove() { - throw new IllegalStateException("not implemented"); - } - - public boolean hasNext() { - int s = columnRowCounters.length; - int n = columns.size(); - return s != 0 && n >= s && columnRowCounters[s - 1] < (columns.get(s - 1)).getRows().size(); - } - - public Column.Row[] next() { - if (!hasNext()) { - throw new NoSuchElementException("no more elements"); - } - - Column.Row[] rows = new Column.Row[columnRowCounters.length]; - - for (int i = 0; i < columnRowCounters.length; i++) { - rows[i] = columns.get(i).rows.get(columnRowCounters[i]); - } - incrementColumnRowCounters(); - - return rows; - } - - private void incrementColumnRowCounters() { - for (int i = 0; i < columnRowCounters.length; i++) { - columnRowCounters[i]++; - if (columnRowCounters[i] == columns.get(i).rows.size() && - i < columnRowCounters.length - 1) { - columnRowCounters[i] = 0; - } else { - break; - } - } - } - }; - } - - @Override - public String toString() { - return "Matrix{" + - "columns=" + columns + - '}'; - } - } - - - public int getMinimumShingleSize() { - return minimumShingleSize; - } - - public void setMinimumShingleSize(int minimumShingleSize) { - this.minimumShingleSize = minimumShingleSize; - } - - public int getMaximumShingleSize() { - return maximumShingleSize; - } - - public void setMaximumShingleSize(int maximumShingleSize) { - this.maximumShingleSize = maximumShingleSize; - } - - - public Matrix getMatrix() { - return matrix; - } - - public void setMatrix(Matrix matrix) { - this.matrix = matrix; - } - - public Character getSpacerCharacter() { - return spacerCharacter; - } - - public void setSpacerCharacter(Character spacerCharacter) { - this.spacerCharacter = spacerCharacter; - } - - public boolean isIgnoringSinglePrefixOrSuffixShingle() { - return ignoringSinglePrefixOrSuffixShingle; - } - - public void setIgnoringSinglePrefixOrSuffixShingle(boolean ignoringSinglePrefixOrSuffixShingle) { - this.ignoringSinglePrefixOrSuffixShingle = ignoringSinglePrefixOrSuffixShingle; - } - - /** - * Using this codec makes a {@link ShingleMatrixFilter} act like {@link org.apache.lucene.analysis.shingle.ShingleFilter}. - * It produces the most simple sort of shingles, ignoring token position increments, et c. - * - * It adds each token as a new column. - */ - public static class OneDimensionalNonWeightedTokenSettingsCodec extends TokenSettingsCodec { - - @Override - public TokenPositioner getTokenPositioner(Token token) throws IOException { - return TokenPositioner.newColumn; - } - - @Override - public void setTokenPositioner(Token token, TokenPositioner tokenPositioner) { - } - - @Override - public float getWeight(Token token) { - return 1f; - } - - @Override - public void setWeight(Token token, float weight) { - } - - } - - - /** - * A codec that creates a two dimensional matrix - * by treating tokens from the input stream with 0 position increment - * as new rows to the current column. - */ - public static class TwoDimensionalNonWeightedSynonymTokenSettingsCodec extends TokenSettingsCodec { - - @Override - public TokenPositioner getTokenPositioner(Token token) throws IOException { - if (token.getPositionIncrement() == 0) { - return TokenPositioner.newRow; - } else { - return TokenPositioner.newColumn; - } - } - - @Override - public void setTokenPositioner(Token token, TokenPositioner tokenPositioner) { - throw new UnsupportedOperationException(); - } - - @Override - public float getWeight(Token token) { - return 1f; - } - - @Override - public void setWeight(Token token, float weight) { - } - - } - - /** - * A full featured codec not to be used for something serious. - * - * It takes complete control of - * payload for weight - * and the bit flags for positioning in the matrix. - * - * Mainly exist for demonstrational purposes. - */ - public static class SimpleThreeDimensionalTokenSettingsCodec extends TokenSettingsCodec { - - /** - * @param token - * @return the token flags int value as TokenPosition - * @throws IOException - */ - @Override - public TokenPositioner getTokenPositioner(Token token) throws IOException { - switch (token.getFlags()) { - case 0: - return TokenPositioner.newColumn; - case 1: - return TokenPositioner.newRow; - case 2: - return TokenPositioner.sameRow; - } - throw new IOException("Unknown matrix positioning of token " + token); - } - - /** - * Sets the TokenPositioner as token flags int value. - * - * @param token - * @param tokenPositioner - */ - @Override - public void setTokenPositioner(Token token, TokenPositioner tokenPositioner) { - token.setFlags(tokenPositioner.getIndex()); - } - - /** - * Returns a 32 bit float from the payload, or 1f it null. - * - * @param token - * @return 32 bit float - */ - @Override - public float getWeight(Token token) { - if (token.getPayload() == null || token.getPayload().getData() == null) { - return 1f; - } else { - return PayloadHelper.decodeFloat(token.getPayload().getData()); - } - } - - /** - * Stores a 32 bit float in the payload, or set it to null if 1f; - * @param token - * @param weight - */ - @Override - public void setWeight(Token token, float weight) { - if (weight == 1f) { - token.setPayload(null); - } else { - token.setPayload(new Payload(PayloadHelper.encodeFloat(weight))); - } - } - - } - - -} diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleMatrixFilter.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleMatrixFilter.java deleted file mode 100644 index 323811ac164..00000000000 --- a/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleMatrixFilter.java +++ /dev/null @@ -1,530 +0,0 @@ -package org.apache.lucene.analysis.shingle; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.IOException; -import java.io.StringReader; -import java.util.Collection; -import java.util.Iterator; -import java.util.LinkedList; - -import org.apache.lucene.analysis.*; -import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.apache.lucene.analysis.miscellaneous.EmptyTokenStream; -import org.apache.lucene.analysis.miscellaneous.PrefixAndSuffixAwareTokenFilter; -import org.apache.lucene.analysis.miscellaneous.SingleTokenTokenStream; -import org.apache.lucene.analysis.payloads.PayloadHelper; -import org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix; -import org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix.Column; -import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.analysis.tokenattributes.FlagsAttribute; -import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; -import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.analysis.tokenattributes.TypeAttribute; - -public class TestShingleMatrixFilter extends BaseTokenStreamTestCase { - - public void testIterator() throws IOException { - - WhitespaceTokenizer wst = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("one two three four five")); - ShingleMatrixFilter smf = new ShingleMatrixFilter(wst, 2, 2, '_', false, new ShingleMatrixFilter.OneDimensionalNonWeightedTokenSettingsCodec()); - - int i; - for(i=0; smf.incrementToken(); i++) {} - assertEquals(4, i); - - // call next once more. this should return false again rather than throwing an exception (LUCENE-1939) - assertFalse(smf.incrementToken()); - - System.currentTimeMillis(); - - } - - public void testBehavingAsShingleFilter() throws IOException { - - ShingleMatrixFilter.defaultSettingsCodec = null; - - TokenStream ts; - - ts = new ShingleMatrixFilter(new EmptyTokenStream(), 1, 2, new Character(' '), false, new ShingleMatrixFilter.OneDimensionalNonWeightedTokenSettingsCodec()); - assertFalse(ts.incrementToken()); - - TokenListStream tls; - LinkedList tokens; - - // test a plain old token stream with synonyms translated to rows. - - tokens = new LinkedList(); - tokens.add(createToken("please", 0, 6)); - tokens.add(createToken("divide", 7, 13)); - tokens.add(createToken("this", 14, 18)); - tokens.add(createToken("sentence", 19, 27)); - tokens.add(createToken("into", 28, 32)); - tokens.add(createToken("shingles", 33, 39)); - - tls = new TokenListStream(tokens); - - // bi-grams - - ts = new ShingleMatrixFilter(tls, 1, 2, new Character(' '), false, new ShingleMatrixFilter.OneDimensionalNonWeightedTokenSettingsCodec()); - - assertTokenStreamContents(ts, - new String[] { "please", "please divide", "divide", "divide this", - "this", "this sentence", "sentence", "sentence into", "into", - "into shingles", "shingles" }, - new int[] { 0, 0, 7, 7, 14, 14, 19, 19, 28, 28, 33 }, - new int[] { 6, 13, 13, 18, 18, 27, 27, 32, 32, 39, 39 }); - } - - /** - * Extracts a matrix from a token stream. - * @throws IOException - */ - public void testTokenStream() throws IOException { - - ShingleMatrixFilter.defaultSettingsCodec = null;//new ShingleMatrixFilter.SimpleThreeDimensionalTokenSettingsCodec(); - - TokenStream ts; - TokenStream tls; - LinkedList tokens; - - // test a plain old token stream with synonyms tranlated to rows. - - tokens = new LinkedList(); - tokens.add(tokenFactory("hello", 1, 0, 4)); - tokens.add(tokenFactory("greetings", 0, 0, 4)); - tokens.add(tokenFactory("world", 1, 5, 10)); - tokens.add(tokenFactory("earth", 0, 5, 10)); - tokens.add(tokenFactory("tellus", 0, 5, 10)); - - tls = new TokenListStream(tokens); - - // bi-grams - - ts = new ShingleMatrixFilter(tls, 2, 2, new Character('_'), false, new ShingleMatrixFilter.TwoDimensionalNonWeightedSynonymTokenSettingsCodec()); - - assertNext(ts, "hello_world"); - assertNext(ts, "greetings_world"); - assertNext(ts, "hello_earth"); - assertNext(ts, "greetings_earth"); - assertNext(ts, "hello_tellus"); - assertNext(ts, "greetings_tellus"); - assertFalse(ts.incrementToken()); - - // bi-grams with no spacer character, start offset, end offset - - tls.reset(); - ts = new ShingleMatrixFilter(tls, 2, 2, null, false, new ShingleMatrixFilter.TwoDimensionalNonWeightedSynonymTokenSettingsCodec()); - assertNext(ts, "helloworld", 0, 10); - assertNext(ts, "greetingsworld", 0, 10); - assertNext(ts, "helloearth", 0, 10); - assertNext(ts, "greetingsearth", 0, 10); - assertNext(ts, "hellotellus", 0, 10); - assertNext(ts, "greetingstellus", 0, 10); - assertFalse(ts.incrementToken()); - - - // add ^_prefix_and_suffix_$ - // - // using 3d codec as it supports weights - - ShingleMatrixFilter.defaultSettingsCodec = new ShingleMatrixFilter.SimpleThreeDimensionalTokenSettingsCodec(); - - tokens = new LinkedList(); - tokens.add(tokenFactory("hello", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.newColumn)); - tokens.add(tokenFactory("greetings", 0, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.newRow)); - tokens.add(tokenFactory("world", 1, 1f, 5, 10, ShingleMatrixFilter.TokenPositioner.newColumn)); - tokens.add(tokenFactory("earth", 0, 1f, 5, 10, ShingleMatrixFilter.TokenPositioner.newRow)); - tokens.add(tokenFactory("tellus", 0, 1f, 5, 10, ShingleMatrixFilter.TokenPositioner.newRow)); - - tls = new TokenListStream(tokens); - - ts = new PrefixAndSuffixAwareTokenFilter(new SingleTokenTokenStream(tokenFactory("^", 1, 100f, 0, 0)), tls, new SingleTokenTokenStream(tokenFactory("$", 1, 50f, 0, 0))); - tls = new CachingTokenFilter(ts); - - // bi-grams, position incrememnt, weight, start offset, end offset - - ts = new ShingleMatrixFilter(tls, 2, 2, new Character('_'), false); -// -// for (Token token = ts.next(new Token()); token != null; token = ts.next(token)) { -// System.out.println("assertNext(ts, \"" + token.term() + "\", " + token.getPositionIncrement() + ", " + (token.getPayload() == null ? "1.0" : PayloadHelper.decodeFloat(token.getPayload().getData())) + "f, " + token.startOffset() + ", " + token.endOffset() + ");"); -// token.clear(); -// } - - assertNext(ts, "^_hello", 1, 10.049875f, 0, 4); - assertNext(ts, "^_greetings", 1, 10.049875f, 0, 4); - assertNext(ts, "hello_world", 1, 1.4142135f, 0, 10); - assertNext(ts, "greetings_world", 1, 1.4142135f, 0, 10); - assertNext(ts, "hello_earth", 1, 1.4142135f, 0, 10); - assertNext(ts, "greetings_earth", 1, 1.4142135f, 0, 10); - assertNext(ts, "hello_tellus", 1, 1.4142135f, 0, 10); - assertNext(ts, "greetings_tellus", 1, 1.4142135f, 0, 10); - assertNext(ts, "world_$", 1, 7.1414285f, 5, 10); - assertNext(ts, "earth_$", 1, 7.1414285f, 5, 10); - assertNext(ts, "tellus_$", 1, 7.1414285f, 5, 10); - assertFalse(ts.incrementToken()); - - // test unlimited size and allow single boundary token as shingle - tls.reset(); - ts = new ShingleMatrixFilter(tls, 1, Integer.MAX_VALUE, new Character('_'), false); - -// -// for (Token token = ts.next(new Token()); token != null; token = ts.next(token)) { -// System.out.println("assertNext(ts, \"" + token.term() + "\", " + token.getPositionIncrement() + ", " + (token.getPayload() == null ? "1.0" : PayloadHelper.decodeFloat(token.getPayload().getData())) + "f, " + token.startOffset() + ", " + token.endOffset() + ");"); -// token.clear(); -// } - - assertNext(ts, "^", 1, 10.0f, 0, 0); - assertNext(ts, "^_hello", 1, 10.049875f, 0, 4); - assertNext(ts, "^_hello_world", 1, 10.099504f, 0, 10); - assertNext(ts, "^_hello_world_$", 1, 12.328828f, 0, 10); - assertNext(ts, "hello", 1, 1.0f, 0, 4); - assertNext(ts, "hello_world", 1, 1.4142135f, 0, 10); - assertNext(ts, "hello_world_$", 1, 7.2111025f, 0, 10); - assertNext(ts, "world", 1, 1.0f, 5, 10); - assertNext(ts, "world_$", 1, 7.1414285f, 5, 10); - assertNext(ts, "$", 1, 7.071068f, 10, 10); - assertNext(ts, "^_greetings", 1, 10.049875f, 0, 4); - assertNext(ts, "^_greetings_world", 1, 10.099504f, 0, 10); - assertNext(ts, "^_greetings_world_$", 1, 12.328828f, 0, 10); - assertNext(ts, "greetings", 1, 1.0f, 0, 4); - assertNext(ts, "greetings_world", 1, 1.4142135f, 0, 10); - assertNext(ts, "greetings_world_$", 1, 7.2111025f, 0, 10); - assertNext(ts, "^_hello_earth", 1, 10.099504f, 0, 10); - assertNext(ts, "^_hello_earth_$", 1, 12.328828f, 0, 10); - assertNext(ts, "hello_earth", 1, 1.4142135f, 0, 10); - assertNext(ts, "hello_earth_$", 1, 7.2111025f, 0, 10); - assertNext(ts, "earth", 1, 1.0f, 5, 10); - assertNext(ts, "earth_$", 1, 7.1414285f, 5, 10); - assertNext(ts, "^_greetings_earth", 1, 10.099504f, 0, 10); - assertNext(ts, "^_greetings_earth_$", 1, 12.328828f, 0, 10); - assertNext(ts, "greetings_earth", 1, 1.4142135f, 0, 10); - assertNext(ts, "greetings_earth_$", 1, 7.2111025f, 0, 10); - assertNext(ts, "^_hello_tellus", 1, 10.099504f, 0, 10); - assertNext(ts, "^_hello_tellus_$", 1, 12.328828f, 0, 10); - assertNext(ts, "hello_tellus", 1, 1.4142135f, 0, 10); - assertNext(ts, "hello_tellus_$", 1, 7.2111025f, 0, 10); - assertNext(ts, "tellus", 1, 1.0f, 5, 10); - assertNext(ts, "tellus_$", 1, 7.1414285f, 5, 10); - assertNext(ts, "^_greetings_tellus", 1, 10.099504f, 0, 10); - assertNext(ts, "^_greetings_tellus_$", 1, 12.328828f, 0, 10); - assertNext(ts, "greetings_tellus", 1, 1.4142135f, 0, 10); - assertNext(ts, "greetings_tellus_$", 1, 7.2111025f, 0, 10); - - assertFalse(ts.incrementToken()); - - // test unlimited size but don't allow single boundary token as shingle - - tls.reset(); - ts = new ShingleMatrixFilter(tls, 1, Integer.MAX_VALUE, new Character('_'), true); -// for (Token token = ts.next(new Token()); token != null; token = ts.next(token)) { -// System.out.println("assertNext(ts, \"" + token.term() + "\", " + token.getPositionIncrement() + ", " + (token.getPayload() == null ? "1.0" : PayloadHelper.decodeFloat(token.getPayload().getData())) + "f, " + token.startOffset() + ", " + token.endOffset() + ");"); -// token.clear(); -// } - - assertNext(ts, "^_hello", 1, 10.049875f, 0, 4); - assertNext(ts, "^_hello_world", 1, 10.099504f, 0, 10); - assertNext(ts, "^_hello_world_$", 1, 12.328828f, 0, 10); - assertNext(ts, "hello", 1, 1.0f, 0, 4); - assertNext(ts, "hello_world", 1, 1.4142135f, 0, 10); - assertNext(ts, "hello_world_$", 1, 7.2111025f, 0, 10); - assertNext(ts, "world", 1, 1.0f, 5, 10); - assertNext(ts, "world_$", 1, 7.1414285f, 5, 10); - assertNext(ts, "^_greetings", 1, 10.049875f, 0, 4); - assertNext(ts, "^_greetings_world", 1, 10.099504f, 0, 10); - assertNext(ts, "^_greetings_world_$", 1, 12.328828f, 0, 10); - assertNext(ts, "greetings", 1, 1.0f, 0, 4); - assertNext(ts, "greetings_world", 1, 1.4142135f, 0, 10); - assertNext(ts, "greetings_world_$", 1, 7.2111025f, 0, 10); - assertNext(ts, "^_hello_earth", 1, 10.099504f, 0, 10); - assertNext(ts, "^_hello_earth_$", 1, 12.328828f, 0, 10); - assertNext(ts, "hello_earth", 1, 1.4142135f, 0, 10); - assertNext(ts, "hello_earth_$", 1, 7.2111025f, 0, 10); - assertNext(ts, "earth", 1, 1.0f, 5, 10); - assertNext(ts, "earth_$", 1, 7.1414285f, 5, 10); - assertNext(ts, "^_greetings_earth", 1, 10.099504f, 0, 10); - assertNext(ts, "^_greetings_earth_$", 1, 12.328828f, 0, 10); - assertNext(ts, "greetings_earth", 1, 1.4142135f, 0, 10); - assertNext(ts, "greetings_earth_$", 1, 7.2111025f, 0, 10); - assertNext(ts, "^_hello_tellus", 1, 10.099504f, 0, 10); - assertNext(ts, "^_hello_tellus_$", 1, 12.328828f, 0, 10); - assertNext(ts, "hello_tellus", 1, 1.4142135f, 0, 10); - assertNext(ts, "hello_tellus_$", 1, 7.2111025f, 0, 10); - assertNext(ts, "tellus", 1, 1.0f, 5, 10); - assertNext(ts, "tellus_$", 1, 7.1414285f, 5, 10); - assertNext(ts, "^_greetings_tellus", 1, 10.099504f, 0, 10); - assertNext(ts, "^_greetings_tellus_$", 1, 12.328828f, 0, 10); - assertNext(ts, "greetings_tellus", 1, 1.4142135f, 0, 10); - assertNext(ts, "greetings_tellus_$", 1, 7.2111025f, 0, 10); - - - assertFalse(ts.incrementToken()); - - System.currentTimeMillis(); - - // multi-token synonyms - // - // Token[][][] { - // {{hello}, {greetings, and, salutations}, - // {{world}, {earth}, {tellus}} - // } - // - - - tokens = new LinkedList(); - tokens.add(tokenFactory("hello", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.newColumn)); - tokens.add(tokenFactory("greetings", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.newRow)); - tokens.add(tokenFactory("and", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.sameRow)); - tokens.add(tokenFactory("salutations", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.sameRow)); - tokens.add(tokenFactory("world", 1, 1f, 5, 10, ShingleMatrixFilter.TokenPositioner.newColumn)); - tokens.add(tokenFactory("earth", 1, 1f, 5, 10, ShingleMatrixFilter.TokenPositioner.newRow)); - tokens.add(tokenFactory("tellus", 1, 1f, 5, 10, ShingleMatrixFilter.TokenPositioner.newRow)); - - tls = new TokenListStream(tokens); - - // 2-3 grams - - ts = new ShingleMatrixFilter(tls, 2, 3, new Character('_'), false); - -// for (Token token = ts.next(new Token()); token != null; token = ts.next(token)) { -// System.out.println("assertNext(ts, \"" + token.term() + "\", " + token.getPositionIncrement() + ", " + (token.getPayload() == null ? "1.0" : PayloadHelper.decodeFloat(token.getPayload().getData())) + "f, " + token.startOffset() + ", " + token.endOffset() + ");"); -// token.clear(); -// } - - // shingle, position increment, weight, start offset, end offset - - assertNext(ts, "hello_world", 1, 1.4142135f, 0, 10); - assertNext(ts, "greetings_and", 1, 1.4142135f, 0, 4); - assertNext(ts, "greetings_and_salutations", 1, 1.7320508f, 0, 4); - assertNext(ts, "and_salutations", 1, 1.4142135f, 0, 4); - assertNext(ts, "and_salutations_world", 1, 1.7320508f, 0, 10); - assertNext(ts, "salutations_world", 1, 1.4142135f, 0, 10); - assertNext(ts, "hello_earth", 1, 1.4142135f, 0, 10); - assertNext(ts, "and_salutations_earth", 1, 1.7320508f, 0, 10); - assertNext(ts, "salutations_earth", 1, 1.4142135f, 0, 10); - assertNext(ts, "hello_tellus", 1, 1.4142135f, 0, 10); - assertNext(ts, "and_salutations_tellus", 1, 1.7320508f, 0, 10); - assertNext(ts, "salutations_tellus", 1, 1.4142135f, 0, 10); - - assertFalse(ts.incrementToken()); - - System.currentTimeMillis(); - - - } - - /** - * Tests creat shingles from a pre-assembled matrix - * - * Tests the row token z-axis, multi token synonyms. - * - * @throws IOException - */ - public void testMatrix() throws IOException { - // some other tests set this to null. - // set it here in case tests are run out of the usual order. - ShingleMatrixFilter.defaultSettingsCodec = new ShingleMatrixFilter.SimpleThreeDimensionalTokenSettingsCodec(); - Matrix matrix = new Matrix(); - - matrix.new Column(tokenFactory("no", 1)); - matrix.new Column(tokenFactory("surprise", 1)); - matrix.new Column(tokenFactory("to", 1)); - matrix.new Column(tokenFactory("see", 1)); - matrix.new Column(tokenFactory("england", 1)); - matrix.new Column(tokenFactory("manager", 1)); - - Column col = matrix.new Column(); - - // sven göran eriksson is a multi token synonym to svennis - col.new Row().getTokens().add(tokenFactory("svennis", 1)); - - Column.Row row = col.new Row(); - row.getTokens().add(tokenFactory("sven", 1)); - row.getTokens().add(tokenFactory("göran", 1)); - row.getTokens().add(tokenFactory("eriksson", 1)); - - matrix.new Column(tokenFactory("in", 1)); - matrix.new Column(tokenFactory("the", 1)); - matrix.new Column(tokenFactory("croud", 1)); - - TokenStream ts = new ShingleMatrixFilter(matrix, 2, 4, new Character('_'), true, new ShingleMatrixFilter.SimpleThreeDimensionalTokenSettingsCodec()); - -// for (Token token = ts.next(new Token()); token != null; token = ts.next(token)) { -// System.out.println("assertNext(ts, \"" + token.term() + "\", " + token.getPositionIncrement() + ", " + (token.getPayload() == null ? "1.0" : PayloadHelper.decodeFloat(token.getPayload().getData())) + "f, " + token.startOffset() + ", " + token.endOffset() + ");"); -// token.clear(); -// } - - assertNext(ts, "no_surprise", 1, 1.4142135f, 0, 0); - assertNext(ts, "no_surprise_to", 1, 1.7320508f, 0, 0); - assertNext(ts, "no_surprise_to_see", 1, 2.0f, 0, 0); - assertNext(ts, "surprise_to", 1, 1.4142135f, 0, 0); - assertNext(ts, "surprise_to_see", 1, 1.7320508f, 0, 0); - assertNext(ts, "surprise_to_see_england", 1, 2.0f, 0, 0); - assertNext(ts, "to_see", 1, 1.4142135f, 0, 0); - assertNext(ts, "to_see_england", 1, 1.7320508f, 0, 0); - assertNext(ts, "to_see_england_manager", 1, 2.0f, 0, 0); - assertNext(ts, "see_england", 1, 1.4142135f, 0, 0); - assertNext(ts, "see_england_manager", 1, 1.7320508f, 0, 0); - assertNext(ts, "see_england_manager_svennis", 1, 2.0f, 0, 0); - assertNext(ts, "england_manager", 1, 1.4142135f, 0, 0); - assertNext(ts, "england_manager_svennis", 1, 1.7320508f, 0, 0); - assertNext(ts, "england_manager_svennis_in", 1, 2.0f, 0, 0); - assertNext(ts, "manager_svennis", 1, 1.4142135f, 0, 0); - assertNext(ts, "manager_svennis_in", 1, 1.7320508f, 0, 0); - assertNext(ts, "manager_svennis_in_the", 1, 2.0f, 0, 0); - assertNext(ts, "svennis_in", 1, 1.4142135f, 0, 0); - assertNext(ts, "svennis_in_the", 1, 1.7320508f, 0, 0); - assertNext(ts, "svennis_in_the_croud", 1, 2.0f, 0, 0); - assertNext(ts, "in_the", 1, 1.4142135f, 0, 0); - assertNext(ts, "in_the_croud", 1, 1.7320508f, 0, 0); - assertNext(ts, "the_croud", 1, 1.4142135f, 0, 0); - assertNext(ts, "see_england_manager_sven", 1, 2.0f, 0, 0); - assertNext(ts, "england_manager_sven", 1, 1.7320508f, 0, 0); - assertNext(ts, "england_manager_sven_göran", 1, 2.0f, 0, 0); - assertNext(ts, "manager_sven", 1, 1.4142135f, 0, 0); - assertNext(ts, "manager_sven_göran", 1, 1.7320508f, 0, 0); - assertNext(ts, "manager_sven_göran_eriksson", 1, 2.0f, 0, 0); - assertNext(ts, "sven_göran", 1, 1.4142135f, 0, 0); - assertNext(ts, "sven_göran_eriksson", 1, 1.7320508f, 0, 0); - assertNext(ts, "sven_göran_eriksson_in", 1, 2.0f, 0, 0); - assertNext(ts, "göran_eriksson", 1, 1.4142135f, 0, 0); - assertNext(ts, "göran_eriksson_in", 1, 1.7320508f, 0, 0); - assertNext(ts, "göran_eriksson_in_the", 1, 2.0f, 0, 0); - assertNext(ts, "eriksson_in", 1, 1.4142135f, 0, 0); - assertNext(ts, "eriksson_in_the", 1, 1.7320508f, 0, 0); - assertNext(ts, "eriksson_in_the_croud", 1, 2.0f, 0, 0); - - assertFalse(ts.incrementToken()); - - } - - private Token tokenFactory(String text, int posIncr, int startOffset, int endOffset) { - Token token = new Token(startOffset, endOffset); - token.setEmpty().append(text); - token.setPositionIncrement(posIncr); - return token; - } - - - private Token tokenFactory(String text, int posIncr) { - return tokenFactory(text, posIncr, 1f, 0, 0); - } - - private Token tokenFactory(String text, int posIncr, float weight, int startOffset, int endOffset) { - Token token = new Token(startOffset, endOffset); - token.setEmpty().append(text); - token.setPositionIncrement(posIncr); - ShingleMatrixFilter.defaultSettingsCodec.setWeight(token, weight); - return token; - } - - private Token tokenFactory(String text, int posIncr, float weight, int startOffset, int endOffset, ShingleMatrixFilter.TokenPositioner positioner) { - Token token = new Token(startOffset, endOffset); - token.setEmpty().append(text); - token.setPositionIncrement(posIncr); - ShingleMatrixFilter.defaultSettingsCodec.setWeight(token, weight); - ShingleMatrixFilter.defaultSettingsCodec.setTokenPositioner(token, positioner); - return token; - } - - // assert-methods start here - - private void assertNext(TokenStream ts, String text) throws IOException { - CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); - - assertTrue(ts.incrementToken()); - assertEquals(text, termAtt.toString()); - } - - private void assertNext(TokenStream ts, String text, int positionIncrement, float boost, int startOffset, int endOffset) throws IOException { - CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); - PositionIncrementAttribute posIncrAtt = ts.addAttribute(PositionIncrementAttribute.class); - PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class); - OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class); - - assertTrue(ts.incrementToken()); - assertEquals(text, termAtt.toString()); - assertEquals(positionIncrement, posIncrAtt.getPositionIncrement()); - assertEquals(boost, payloadAtt.getPayload() == null ? 1f : PayloadHelper.decodeFloat(payloadAtt.getPayload().getData()), 0); - assertEquals(startOffset, offsetAtt.startOffset()); - assertEquals(endOffset, offsetAtt.endOffset()); - } - - private void assertNext(TokenStream ts, String text, int startOffset, int endOffset) throws IOException { - CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); - OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class); - - assertTrue(ts.incrementToken()); - assertEquals(text, termAtt.toString()); - assertEquals(startOffset, offsetAtt.startOffset()); - assertEquals(endOffset, offsetAtt.endOffset()); - } - - private static Token createToken(String term, int start, int offset) - { - Token token = new Token(start, offset); - token.setEmpty().append(term); - return token; - } - - - public final static class TokenListStream extends TokenStream { - - private Collection tokens; - private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); - private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class); - private final PayloadAttribute payloadAtt = addAttribute(PayloadAttribute.class); - private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); - private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class); - private final FlagsAttribute flagsAtt = addAttribute(FlagsAttribute.class); - - public TokenListStream(Collection tokens) { - this.tokens = tokens; - } - - private Iterator iterator; - - @Override - public boolean incrementToken() throws IOException { - if (iterator == null) { - iterator = tokens.iterator(); - } - if (!iterator.hasNext()) { - return false; - } - Token prototype = iterator.next(); - clearAttributes(); - termAtt.copyBuffer(prototype.buffer(), 0, prototype.length()); - posIncrAtt.setPositionIncrement(prototype.getPositionIncrement()); - flagsAtt.setFlags(prototype.getFlags()); - offsetAtt.setOffset(prototype.startOffset(), prototype.endOffset()); - typeAtt.setType(prototype.type()); - payloadAtt.setPayload(prototype.getPayload()); - - return true; - } - - - @Override - public void reset() throws IOException { - iterator = null; - } - } - -} From 39de6aabc8b3930762735ca3b0e39dc811b3d601 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Tue, 15 Feb 2011 13:10:29 +0000 Subject: [PATCH 145/185] SOLR-1553: mark experimental git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1070879 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 2 +- .../org/apache/solr/search/ExtendedDismaxQParserPlugin.java | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 7beb1d35d61..c3aebc03ddd 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -279,7 +279,7 @@ New Features * SOLR-1553: New dismax parser implementation (accessible as "edismax") that supports full lucene syntax, improved reserved char escaping, fielded queries, improved proximity boosting, and improved stopword - handling. (yonik) + handling. Note: status is experimental for now. (yonik) * SOLR-1574: Add many new functions from java Math (e.g. sin, cos) (yonik) diff --git a/solr/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java b/solr/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java index daeab8f73ae..e64476364b9 100755 --- a/solr/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java +++ b/solr/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java @@ -48,6 +48,7 @@ import java.io.IOException; /** * An advanced multi-field query parser. + * @lucene.experimental */ public class ExtendedDismaxQParserPlugin extends QParserPlugin { public static final String NAME = "edismax"; From a82fa91d419f8ba7a7807822ba13a27b0c882fbc Mon Sep 17 00:00:00 2001 From: Steven Rowe Date: Tue, 15 Feb 2011 21:14:18 +0000 Subject: [PATCH 146/185] removed (non-existent) solrj dep git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071066 13f79535-47bb-0310-9956-ffa450edef68 --- dev-tools/idea/solr/contrib/uima/uima.iml | 1 - 1 file changed, 1 deletion(-) diff --git a/dev-tools/idea/solr/contrib/uima/uima.iml b/dev-tools/idea/solr/contrib/uima/uima.iml index 9eca88b7752..b1aafa79912 100644 --- a/dev-tools/idea/solr/contrib/uima/uima.iml +++ b/dev-tools/idea/solr/contrib/uima/uima.iml @@ -12,7 +12,6 @@ - From 57956f0948abf111acf7836c827385a3d8e72656 Mon Sep 17 00:00:00 2001 From: Steven Rowe Date: Tue, 15 Feb 2011 21:14:54 +0000 Subject: [PATCH 147/185] removed dependency on (no-longer-present) remote module git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071067 13f79535-47bb-0310-9956-ffa450edef68 --- dev-tools/idea/lucene/contrib/ant/ant.iml | 1 - dev-tools/idea/lucene/contrib/demo/demo.iml | 1 - dev-tools/idea/lucene/contrib/highlighter/highlighter.iml | 1 - dev-tools/idea/lucene/contrib/instantiated/instantiated.iml | 1 - dev-tools/idea/lucene/contrib/lucli/lucli.iml | 1 - dev-tools/idea/lucene/contrib/memory/memory.iml | 1 - dev-tools/idea/lucene/contrib/queryparser/queryparser.iml | 1 - dev-tools/idea/lucene/contrib/spatial/spatial.iml | 1 - dev-tools/idea/lucene/contrib/spellchecker/spellchecker.iml | 1 - dev-tools/idea/lucene/contrib/swing/swing.iml | 1 - dev-tools/idea/lucene/contrib/wordnet/wordnet.iml | 1 - .../idea/lucene/contrib/xml-query-parser/xml-query-parser.iml | 1 - dev-tools/idea/modules/benchmark/benchmark.iml | 1 - dev-tools/idea/solr/contrib/clustering/clustering.iml | 1 - dev-tools/idea/solr/solr.iml | 1 - 15 files changed, 15 deletions(-) diff --git a/dev-tools/idea/lucene/contrib/ant/ant.iml b/dev-tools/idea/lucene/contrib/ant/ant.iml index fbe88c99764..7cabce8a0ad 100644 --- a/dev-tools/idea/lucene/contrib/ant/ant.iml +++ b/dev-tools/idea/lucene/contrib/ant/ant.iml @@ -10,7 +10,6 @@ - diff --git a/dev-tools/idea/lucene/contrib/demo/demo.iml b/dev-tools/idea/lucene/contrib/demo/demo.iml index 0ee8feea4e6..adbd3ad69f3 100644 --- a/dev-tools/idea/lucene/contrib/demo/demo.iml +++ b/dev-tools/idea/lucene/contrib/demo/demo.iml @@ -10,7 +10,6 @@ - diff --git a/dev-tools/idea/lucene/contrib/highlighter/highlighter.iml b/dev-tools/idea/lucene/contrib/highlighter/highlighter.iml index 7b86e777dc5..b14b49f4cca 100644 --- a/dev-tools/idea/lucene/contrib/highlighter/highlighter.iml +++ b/dev-tools/idea/lucene/contrib/highlighter/highlighter.iml @@ -8,7 +8,6 @@ - diff --git a/dev-tools/idea/lucene/contrib/instantiated/instantiated.iml b/dev-tools/idea/lucene/contrib/instantiated/instantiated.iml index d339e518fb4..25e1b2e0865 100644 --- a/dev-tools/idea/lucene/contrib/instantiated/instantiated.iml +++ b/dev-tools/idea/lucene/contrib/instantiated/instantiated.iml @@ -8,7 +8,6 @@ - diff --git a/dev-tools/idea/lucene/contrib/lucli/lucli.iml b/dev-tools/idea/lucene/contrib/lucli/lucli.iml index 9158d7a059b..23d11788ba8 100644 --- a/dev-tools/idea/lucene/contrib/lucli/lucli.iml +++ b/dev-tools/idea/lucene/contrib/lucli/lucli.iml @@ -9,7 +9,6 @@ - diff --git a/dev-tools/idea/lucene/contrib/memory/memory.iml b/dev-tools/idea/lucene/contrib/memory/memory.iml index 761d7722010..f92c9c012a3 100644 --- a/dev-tools/idea/lucene/contrib/memory/memory.iml +++ b/dev-tools/idea/lucene/contrib/memory/memory.iml @@ -8,7 +8,6 @@ - diff --git a/dev-tools/idea/lucene/contrib/queryparser/queryparser.iml b/dev-tools/idea/lucene/contrib/queryparser/queryparser.iml index f4a112719e5..33297bbd24e 100644 --- a/dev-tools/idea/lucene/contrib/queryparser/queryparser.iml +++ b/dev-tools/idea/lucene/contrib/queryparser/queryparser.iml @@ -9,7 +9,6 @@ - diff --git a/dev-tools/idea/lucene/contrib/spatial/spatial.iml b/dev-tools/idea/lucene/contrib/spatial/spatial.iml index 99e10a0fe54..84f9bf02d5d 100644 --- a/dev-tools/idea/lucene/contrib/spatial/spatial.iml +++ b/dev-tools/idea/lucene/contrib/spatial/spatial.iml @@ -8,7 +8,6 @@ - diff --git a/dev-tools/idea/lucene/contrib/spellchecker/spellchecker.iml b/dev-tools/idea/lucene/contrib/spellchecker/spellchecker.iml index aa57c8f113a..4656346a309 100644 --- a/dev-tools/idea/lucene/contrib/spellchecker/spellchecker.iml +++ b/dev-tools/idea/lucene/contrib/spellchecker/spellchecker.iml @@ -9,7 +9,6 @@ - diff --git a/dev-tools/idea/lucene/contrib/swing/swing.iml b/dev-tools/idea/lucene/contrib/swing/swing.iml index d000b56bb8c..df1ade3afe3 100644 --- a/dev-tools/idea/lucene/contrib/swing/swing.iml +++ b/dev-tools/idea/lucene/contrib/swing/swing.iml @@ -9,7 +9,6 @@ - diff --git a/dev-tools/idea/lucene/contrib/wordnet/wordnet.iml b/dev-tools/idea/lucene/contrib/wordnet/wordnet.iml index 0c194b86bef..3a4710441f6 100644 --- a/dev-tools/idea/lucene/contrib/wordnet/wordnet.iml +++ b/dev-tools/idea/lucene/contrib/wordnet/wordnet.iml @@ -9,7 +9,6 @@ - diff --git a/dev-tools/idea/lucene/contrib/xml-query-parser/xml-query-parser.iml b/dev-tools/idea/lucene/contrib/xml-query-parser/xml-query-parser.iml index 0b9e00a43e4..6150f8af9c4 100644 --- a/dev-tools/idea/lucene/contrib/xml-query-parser/xml-query-parser.iml +++ b/dev-tools/idea/lucene/contrib/xml-query-parser/xml-query-parser.iml @@ -11,7 +11,6 @@ - diff --git a/dev-tools/idea/modules/benchmark/benchmark.iml b/dev-tools/idea/modules/benchmark/benchmark.iml index fcd77da13e1..d838caa0190 100644 --- a/dev-tools/idea/modules/benchmark/benchmark.iml +++ b/dev-tools/idea/modules/benchmark/benchmark.iml @@ -12,7 +12,6 @@ - diff --git a/dev-tools/idea/solr/contrib/clustering/clustering.iml b/dev-tools/idea/solr/contrib/clustering/clustering.iml index 325cde256ce..a3d785e2be0 100644 --- a/dev-tools/idea/solr/contrib/clustering/clustering.iml +++ b/dev-tools/idea/solr/contrib/clustering/clustering.iml @@ -16,7 +16,6 @@ - diff --git a/dev-tools/idea/solr/solr.iml b/dev-tools/idea/solr/solr.iml index 218fd039de9..0c1af3aa00e 100644 --- a/dev-tools/idea/solr/solr.iml +++ b/dev-tools/idea/solr/solr.iml @@ -22,7 +22,6 @@ - From 8bff9e1ff5bbc7310df92c53a71e0fb91f999e8d Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Tue, 15 Feb 2011 21:29:01 +0000 Subject: [PATCH 148/185] SOLR-1711: fix SUSS deadlock git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071074 13f79535-47bb-0310-9956-ffa450edef68 --- .../solrj/impl/StreamingUpdateSolrServer.java | 42 +++++++++++++++---- 1 file changed, 33 insertions(+), 9 deletions(-) diff --git a/solr/src/solrj/org/apache/solr/client/solrj/impl/StreamingUpdateSolrServer.java b/solr/src/solrj/org/apache/solr/client/solrj/impl/StreamingUpdateSolrServer.java index c47f4a09957..607480ed69f 100644 --- a/solr/src/solrj/org/apache/solr/client/solrj/impl/StreamingUpdateSolrServer.java +++ b/solr/src/solrj/org/apache/solr/client/solrj/impl/StreamingUpdateSolrServer.java @@ -178,6 +178,8 @@ public class StreamingUpdateSolrServer extends CommonsHttpSolrServer // remove it from the list of running things unless we are the last runner and the queue is full... // in which case, the next queue.put() would block and there would be no runners to handle it. + // This case has been further handled by using offer instead of put, and using a retry loop + // to avoid blocking forever (see request()). synchronized (runners) { if (runners.size() == 1 && queue.remainingCapacity() == 0) { // keep this runner alive @@ -223,18 +225,40 @@ public class StreamingUpdateSolrServer extends CommonsHttpSolrServer tmpLock.await(); } - queue.put( req ); + boolean success = queue.offer(req); - synchronized( runners ) { - if( runners.isEmpty() - || (queue.remainingCapacity() < queue.size() - && runners.size() < threadCount) ) - { - Runner r = new Runner(); - scheduler.execute( r ); - runners.add( r ); + for(;;) { + synchronized( runners ) { + if( runners.isEmpty() + || (queue.remainingCapacity() < queue.size() // queue is half full and we can add more runners + && runners.size() < threadCount) ) + { + // We need more runners, so start a new one. + Runner r = new Runner(); + runners.add( r ); + scheduler.execute( r ); + } else { + // break out of the retry loop if we added the element to the queue successfully, *and* + // while we are still holding the runners lock to prevent race conditions. + // race conditions. + if (success) break; + } } + + // Retry to add to the queue w/o the runners lock held (else we risk temporary deadlock) + // This retry could also fail because + // 1) existing runners were not able to take off any new elements in the queue + // 2) the queue was filled back up since our last try + // If we succeed, the queue may have been completely emptied, and all runners stopped. + // In all cases, we should loop back to the top to see if we need to start more runners. + // + if (!success) { + success = queue.offer(req, 100, TimeUnit.MILLISECONDS); + } + } + + } catch (InterruptedException e) { log.error( "interrupted", e ); From 59b0e98cfb328445d0cdc7b168a3421428ffd783 Mon Sep 17 00:00:00 2001 From: Koji Sekiguchi Date: Wed, 16 Feb 2011 01:44:33 +0000 Subject: [PATCH 149/185] SOLR-1449, SOLR-2364: revert r1069656 change git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071121 13f79535-47bb-0310-9956-ffa450edef68 --- solr/src/java/org/apache/solr/core/SolrResourceLoader.java | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/solr/src/java/org/apache/solr/core/SolrResourceLoader.java b/solr/src/java/org/apache/solr/core/SolrResourceLoader.java index f359b4d085b..7d0d10d5d7d 100644 --- a/solr/src/java/org/apache/solr/core/SolrResourceLoader.java +++ b/solr/src/java/org/apache/solr/core/SolrResourceLoader.java @@ -131,12 +131,7 @@ public class SolrResourceLoader implements ResourceLoader */ void addToClassLoader(final String baseDir, final FileFilter filter) { File base = FileUtils.resolvePath(new File(getInstanceDir()), baseDir); - if(base != null && base.canRead() && base.isDirectory()){ - this.classLoader = replaceClassLoader(classLoader, base, filter); - } - else{ - log.error("Can't find (or read) file to add to classloader: " + base); - } + this.classLoader = replaceClassLoader(classLoader, base, filter); } /** From 0da6f25e6b901c3d0529b08e2a974ef9c01c47d0 Mon Sep 17 00:00:00 2001 From: Mark Robert Miller Date: Wed, 16 Feb 2011 13:39:48 +0000 Subject: [PATCH 150/185] SOLR-1992: Remove abortOnConfigurationError from example solrconfig.xml as it no longer has any affect git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071242 13f79535-47bb-0310-9956-ffa450edef68 --- solr/client/ruby/solr-ruby/solr/conf/solrconfig.xml | 8 -------- solr/client/ruby/solr-ruby/test/conf/solrconfig.xml | 8 -------- .../test/resources/solr-clustering/conf/solrconfig.xml | 8 -------- .../solr-dihextras/conf/dataimport-solrconfig.xml | 8 -------- .../resources/solr-dih/conf/contentstream-solrconfig.xml | 8 -------- .../solr-dih/conf/dataimport-nodatasource-solrconfig.xml | 8 -------- .../resources/solr-dih/conf/dataimport-solrconfig.xml | 8 -------- .../src/test/resources/solr-uima/conf/solrconfig.xml | 9 +-------- 8 files changed, 1 insertion(+), 64 deletions(-) diff --git a/solr/client/ruby/solr-ruby/solr/conf/solrconfig.xml b/solr/client/ruby/solr-ruby/solr/conf/solrconfig.xml index dca160ba7c5..3f29388e9c2 100755 --- a/solr/client/ruby/solr-ruby/solr/conf/solrconfig.xml +++ b/solr/client/ruby/solr-ruby/solr/conf/solrconfig.xml @@ -17,14 +17,6 @@ --> - - ${solr.abortOnConfigurationError:true} - - ${solr.abortOnConfigurationError:true} - ${solr.abortOnConfigurationError:true} - ${solr.abortOnConfigurationError:true} - ${solr.abortOnConfigurationError:true} - ${solr.abortOnConfigurationError:true} - ${solr.abortOnConfigurationError:true} - + LUCENE_40 - ${solr.abortOnConfigurationError:true} - + @@ -533,6 +533,7 @@ + diff --git a/solr/src/test/org/apache/solr/search/QueryParsingTest.java b/solr/src/test/org/apache/solr/search/QueryParsingTest.java index e1bf9c5c2aa..6601c210d7b 100644 --- a/solr/src/test/org/apache/solr/search/QueryParsingTest.java +++ b/solr/src/test/org/apache/solr/search/QueryParsingTest.java @@ -102,10 +102,10 @@ public class QueryParsingTest extends SolrTestCaseJ4 { assertEquals(flds[0].getField(), "pow(float(weight),const(2.0))"); //test functions (more deep) - sort = QueryParsing.parseSort("sum(product(r_f,sum(d_f,t_f,1)),a_f) asc", req); + sort = QueryParsing.parseSort("sum(product(r_f1,sum(d_f1,t_f1,1)),a_f1) asc", req); flds = sort.getSort(); assertEquals(flds[0].getType(), SortField.CUSTOM); - assertEquals(flds[0].getField(), "sum(product(float(r_f),sum(float(d_f),float(t_f),const(1.0))),float(a_f))"); + assertEquals(flds[0].getField(), "sum(product(float(r_f1),sum(float(d_f1),float(t_f1),const(1.0))),float(a_f1))"); sort = QueryParsing.parseSort("pow(weight, 2) desc", req); flds = sort.getSort(); @@ -135,11 +135,11 @@ public class QueryParsingTest extends SolrTestCaseJ4 { assertEquals(flds[0].getField(), "weight"); //Test literals in functions - sort = QueryParsing.parseSort("strdist(foo_s, \"junk\", jw) desc", req); + sort = QueryParsing.parseSort("strdist(foo_s1, \"junk\", jw) desc", req); flds = sort.getSort(); assertEquals(flds[0].getType(), SortField.CUSTOM); //the value sources get wrapped, so the out field is different than the input - assertEquals(flds[0].getField(), "strdist(str(foo_s),literal(junk), dist=org.apache.lucene.search.spell.JaroWinklerDistance)"); + assertEquals(flds[0].getField(), "strdist(str(foo_s1),literal(junk), dist=org.apache.lucene.search.spell.JaroWinklerDistance)"); sort = QueryParsing.parseSort("", req); assertNull(sort); diff --git a/solr/src/test/org/apache/solr/search/TestIndexSearcher.java b/solr/src/test/org/apache/solr/search/TestIndexSearcher.java index 7299390ed94..dc809ecd850 100755 --- a/solr/src/test/org/apache/solr/search/TestIndexSearcher.java +++ b/solr/src/test/org/apache/solr/search/TestIndexSearcher.java @@ -59,18 +59,18 @@ public class TestIndexSearcher extends SolrTestCaseJ4 { public void testReopen() throws Exception { - assertU(adoc("id","1", "v_t","Hello Dude", "v_s","string1")); - assertU(adoc("id","2", "v_t","Hello Yonik", "v_s","string2")); + assertU(adoc("id","1", "v_t","Hello Dude", "v_s1","string1")); + assertU(adoc("id","2", "v_t","Hello Yonik", "v_s1","string2")); assertU(commit()); SolrQueryRequest sr1 = req("q","foo"); ReaderContext rCtx1 = sr1.getSearcher().getTopReaderContext(); - String sval1 = getStringVal(sr1, "v_s",0); + String sval1 = getStringVal(sr1, "v_s1",0); assertEquals("string1", sval1); - assertU(adoc("id","3", "v_s","{!literal}")); - assertU(adoc("id","4", "v_s","other stuff")); + assertU(adoc("id","3", "v_s1","{!literal}")); + assertU(adoc("id","4", "v_s1","other stuff")); assertU(commit()); SolrQueryRequest sr2 = req("q","foo"); @@ -81,7 +81,7 @@ public class TestIndexSearcher extends SolrTestCaseJ4 { assertEquals(ReaderUtil.leaves(rCtx1)[0].reader, ReaderUtil.leaves(rCtx2)[0].reader); assertU(adoc("id","5", "v_f","3.14159")); - assertU(adoc("id","6", "v_f","8983", "v_s","string6")); + assertU(adoc("id","6", "v_f","8983", "v_s1","string6")); assertU(commit()); SolrQueryRequest sr3 = req("q","foo"); @@ -129,4 +129,4 @@ public class TestIndexSearcher extends SolrTestCaseJ4 { sr5.close(); sr6.close(); } -} \ No newline at end of file +} diff --git a/solr/src/test/org/apache/solr/search/TestQueryTypes.java b/solr/src/test/org/apache/solr/search/TestQueryTypes.java index 53f46213aa8..d053921bed4 100755 --- a/solr/src/test/org/apache/solr/search/TestQueryTypes.java +++ b/solr/src/test/org/apache/solr/search/TestQueryTypes.java @@ -87,7 +87,6 @@ public class TestQueryTypes extends AbstractSolrTestCase { ,"//*[@name='id'][.='999.0']" ,"//*[@name='" + f + "'][.='" + v + "']" ); - // System.out.println("#########################################" + f + "=" + v); // field qparser assertQ(req( "q", "{!field f="+f+"}"+v) @@ -98,20 +97,34 @@ public class TestQueryTypes extends AbstractSolrTestCase { assertQ(req( "q", f + ":[\"" + v + "\" TO \"" + v + "\"]" ) ,"//result[@numFound='1']" ); + } + // frange and function query only work on single valued field types + Object[] fc_vals = new Object[] { + "id",999.0 + ,"v_s","wow dude" + ,"v_ti",-1 + ,"v_tl",-1234567891234567890L + ,"v_tf",-2.0f + ,"v_td",-2.0 + ,"v_tdt","2000-05-10T01:01:01Z" + }; + + for (int i=0; i0931.0442muLti-Default2009-12-12T12:59:46.412Z4.02.01.0342muLti-Default2009-12-12T12:59:46.409Z3.02.01.0242muLti-Default2009-12-12T12:59:46.406Z2.02.01.0142muLti-Default2009-12-12T12:59:46.361Z0.02.0 -*/ diff --git a/solr/src/test/org/apache/solr/search/function/distance/DistanceFunctionTest.java b/solr/src/test/org/apache/solr/search/function/distance/DistanceFunctionTest.java index 17b68934a1c..f2324c32d09 100644 --- a/solr/src/test/org/apache/solr/search/function/distance/DistanceFunctionTest.java +++ b/solr/src/test/org/apache/solr/search/function/distance/DistanceFunctionTest.java @@ -36,12 +36,12 @@ public class DistanceFunctionTest extends SolrTestCaseJ4 { @Test public void testHaversine() throws Exception { clearIndex(); - assertU(adoc("id", "1", "x_td", "0", "y_td", "0", "gh_s", GeoHashUtils.encode(32.7693246, -79.9289094))); - assertU(adoc("id", "2", "x_td", "0", "y_td", String.valueOf(Math.PI / 2), "gh_s", GeoHashUtils.encode(32.7693246, -78.9289094))); - assertU(adoc("id", "3", "x_td", String.valueOf(Math.PI / 2), "y_td", String.valueOf(Math.PI / 2), "gh_s", GeoHashUtils.encode(32.7693246, -80.9289094))); - assertU(adoc("id", "4", "x_td", String.valueOf(Math.PI / 4), "y_td", String.valueOf(Math.PI / 4), "gh_s", GeoHashUtils.encode(32.7693246, -81.9289094))); + assertU(adoc("id", "1", "x_td", "0", "y_td", "0", "gh_s1", GeoHashUtils.encode(32.7693246, -79.9289094))); + assertU(adoc("id", "2", "x_td", "0", "y_td", String.valueOf(Math.PI / 2), "gh_s1", GeoHashUtils.encode(32.7693246, -78.9289094))); + assertU(adoc("id", "3", "x_td", String.valueOf(Math.PI / 2), "y_td", String.valueOf(Math.PI / 2), "gh_s1", GeoHashUtils.encode(32.7693246, -80.9289094))); + assertU(adoc("id", "4", "x_td", String.valueOf(Math.PI / 4), "y_td", String.valueOf(Math.PI / 4), "gh_s1", GeoHashUtils.encode(32.7693246, -81.9289094))); assertU(adoc("id", "5", "x_td", "45.0", "y_td", "45.0", - "gh_s", GeoHashUtils.encode(32.7693246, -81.9289094))); + "gh_s1", GeoHashUtils.encode(32.7693246, -81.9289094))); assertU(adoc("id", "6", "point_hash", "32.5, -79.0", "point", "32.5, -79.0")); assertU(adoc("id", "7", "point_hash", "32.6, -78.0", "point", "32.6, -78.0")); assertU(commit()); @@ -56,7 +56,7 @@ public class DistanceFunctionTest extends SolrTestCaseJ4 { //Geo Hash Haversine //Can verify here: http://www.movable-type.co.uk/scripts/latlong.html, but they use a slightly different radius for the earth, so just be close - assertQ(req("fl", "*,score", "q", "{!func}ghhsin(" + DistanceUtils.EARTH_MEAN_RADIUS_KM + ", gh_s, \"" + GeoHashUtils.encode(32, -79) + + assertQ(req("fl", "*,score", "q", "{!func}ghhsin(" + DistanceUtils.EARTH_MEAN_RADIUS_KM + ", gh_s1, \"" + GeoHashUtils.encode(32, -79) + "\",)", "fq", "id:1"), "//float[@name='score']='122.171875'"); assertQ(req("fl", "id,point_hash,score", "q", "{!func}recip(ghhsin(" + DistanceUtils.EARTH_MEAN_RADIUS_KM + ", point_hash, \"" + GeoHashUtils.encode(32, -79) + "\"), 1, 1, 0)"), @@ -66,7 +66,7 @@ public class DistanceFunctionTest extends SolrTestCaseJ4 { ); - assertQ(req("fl", "*,score", "q", "{!func}ghhsin(" + DistanceUtils.EARTH_MEAN_RADIUS_KM + ", gh_s, geohash(32, -79))", "fq", "id:1"), "//float[@name='score']='122.171875'"); + assertQ(req("fl", "*,score", "q", "{!func}ghhsin(" + DistanceUtils.EARTH_MEAN_RADIUS_KM + ", gh_s1, geohash(32, -79))", "fq", "id:1"), "//float[@name='score']='122.171875'"); } From 3f8c9b5cfcf9b58a301a1fb87fc8e087117fb8e0 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Thu, 17 Feb 2011 10:31:59 +0000 Subject: [PATCH 155/185] LUCENE-2922: optimize the scan-within-block step of BlockTermsReader.seek git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071564 13f79535-47bb-0310-9956-ffa450edef68 --- .../codecs/appending/AppendingCodec.java | 3 +- .../appending/AppendingTermsDictReader.java | 6 +- .../appending/AppendingTermsDictWriter.java | 8 +- .../org/apache/lucene/index/OrdTermState.java | 5 + .../org/apache/lucene/index/TermState.java | 7 +- .../lucene/index/codecs/BlockTermState.java | 2 +- .../lucene/index/codecs/BlockTermsReader.java | 236 ++++++++++++++---- .../lucene/index/codecs/BlockTermsWriter.java | 21 +- .../index/codecs/pulsing/PulsingCodec.java | 3 +- .../pulsing/PulsingPostingsReaderImpl.java | 2 +- .../index/codecs/standard/StandardCodec.java | 4 +- .../mockintblock/MockFixedIntBlockCodec.java | 3 +- .../MockVariableIntBlockCodec.java | 3 +- .../codecs/mockrandom/MockRandomCodec.java | 3 +- .../index/codecs/mocksep/MockSepCodec.java | 3 +- .../org/apache/lucene/TestExternalCodecs.java | 167 +------------ .../apache/lucene/index/TestIndexWriter.java | 8 +- .../lucene/index/TestIndexWriterReader.java | 7 + .../lucene/search/TestAutomatonQuery.java | 3 + 19 files changed, 246 insertions(+), 248 deletions(-) diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingCodec.java b/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingCodec.java index cb0dde07e76..31f3e08e590 100644 --- a/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingCodec.java +++ b/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingCodec.java @@ -71,7 +71,7 @@ public class AppendingCodec extends Codec { } success = false; try { - FieldsConsumer ret = new AppendingTermsDictWriter(indexWriter, state, docsWriter, BytesRef.getUTF8SortedAsUnicodeComparator()); + FieldsConsumer ret = new AppendingTermsDictWriter(indexWriter, state, docsWriter); success = true; return ret; } finally { @@ -111,7 +111,6 @@ public class AppendingCodec extends Codec { state.dir, state.fieldInfos, state.segmentInfo.name, docsReader, state.readBufferSize, - BytesRef.getUTF8SortedAsUnicodeComparator(), StandardCodec.TERMS_CACHE_SIZE, state.codecId); success = true; diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingTermsDictReader.java b/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingTermsDictReader.java index 8a1d9b80e78..f930b2c4695 100644 --- a/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingTermsDictReader.java +++ b/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingTermsDictReader.java @@ -18,7 +18,6 @@ package org.apache.lucene.index.codecs.appending; */ import java.io.IOException; -import java.util.Comparator; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.codecs.PostingsReaderBase; @@ -27,7 +26,6 @@ import org.apache.lucene.index.codecs.BlockTermsWriter; import org.apache.lucene.index.codecs.TermsIndexReaderBase; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IndexInput; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CodecUtil; public class AppendingTermsDictReader extends BlockTermsReader { @@ -35,9 +33,9 @@ public class AppendingTermsDictReader extends BlockTermsReader { public AppendingTermsDictReader(TermsIndexReaderBase indexReader, Directory dir, FieldInfos fieldInfos, String segment, PostingsReaderBase postingsReader, int readBufferSize, - Comparator termComp, int termsCacheSize, String codecId) throws IOException { + int termsCacheSize, String codecId) throws IOException { super(indexReader, dir, fieldInfos, segment, postingsReader, readBufferSize, - termComp, termsCacheSize, codecId); + termsCacheSize, codecId); } @Override diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingTermsDictWriter.java b/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingTermsDictWriter.java index 46362386afe..1e595cdd5ab 100644 --- a/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingTermsDictWriter.java +++ b/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingTermsDictWriter.java @@ -18,23 +18,21 @@ package org.apache.lucene.index.codecs.appending; */ import java.io.IOException; -import java.util.Comparator; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.codecs.PostingsWriterBase; import org.apache.lucene.index.codecs.BlockTermsWriter; import org.apache.lucene.index.codecs.TermsIndexWriterBase; import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CodecUtil; public class AppendingTermsDictWriter extends BlockTermsWriter { final static String CODEC_NAME = "APPENDING_TERMS_DICT"; public AppendingTermsDictWriter(TermsIndexWriterBase indexWriter, - SegmentWriteState state, PostingsWriterBase postingsWriter, - Comparator termComp) throws IOException { - super(indexWriter, state, postingsWriter, termComp); + SegmentWriteState state, PostingsWriterBase postingsWriter) + throws IOException { + super(indexWriter, state, postingsWriter); } @Override diff --git a/lucene/src/java/org/apache/lucene/index/OrdTermState.java b/lucene/src/java/org/apache/lucene/index/OrdTermState.java index 57c965796f0..20e8a8433f4 100644 --- a/lucene/src/java/org/apache/lucene/index/OrdTermState.java +++ b/lucene/src/java/org/apache/lucene/index/OrdTermState.java @@ -30,4 +30,9 @@ public class OrdTermState extends TermState { assert other instanceof OrdTermState : "can not copy from " + other.getClass().getName(); this.ord = ((OrdTermState) other).ord; } + + @Override + public String toString() { + return "OrdTermState ord=" + ord; + } } diff --git a/lucene/src/java/org/apache/lucene/index/TermState.java b/lucene/src/java/org/apache/lucene/index/TermState.java index 3279366b589..12251751dd1 100644 --- a/lucene/src/java/org/apache/lucene/index/TermState.java +++ b/lucene/src/java/org/apache/lucene/index/TermState.java @@ -44,4 +44,9 @@ public abstract class TermState implements Cloneable { throw new RuntimeException(cnse); } } -} \ No newline at end of file + + @Override + public String toString() { + return "TermState"; + } +} diff --git a/lucene/src/java/org/apache/lucene/index/codecs/BlockTermState.java b/lucene/src/java/org/apache/lucene/index/codecs/BlockTermState.java index 40bf8e95e11..36e24c2ec25 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/BlockTermState.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/BlockTermState.java @@ -51,6 +51,6 @@ public class BlockTermState extends OrdTermState { @Override public String toString() { - return super.toString() + "ord=" + ord + " docFreq=" + docFreq + " totalTermFreq=" + totalTermFreq + " termCount=" + termCount + " blockFP=" + blockFilePointer; + return "ord=" + ord + " docFreq=" + docFreq + " totalTermFreq=" + totalTermFreq + " termCount=" + termCount + " blockFP=" + blockFilePointer; } } diff --git a/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java b/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java index 2e0e9cd3959..a296a5ed5f9 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java @@ -66,9 +66,6 @@ public class BlockTermsReader extends FieldsProducer { private final TreeMap fields = new TreeMap(); - // Comparator that orders our terms - private final Comparator termComp; - // Caches the most recently looked-up field + terms: private final DoubleBarrelLRUCache termsCache; @@ -111,13 +108,12 @@ public class BlockTermsReader extends FieldsProducer { //private String segment; public BlockTermsReader(TermsIndexReaderBase indexReader, Directory dir, FieldInfos fieldInfos, String segment, PostingsReaderBase postingsReader, int readBufferSize, - Comparator termComp, int termsCacheSize, String codecId) + int termsCacheSize, String codecId) throws IOException { this.postingsReader = postingsReader; termsCache = new DoubleBarrelLRUCache(termsCacheSize); - this.termComp = termComp; //this.segment = segment; in = dir.openInput(IndexFileNames.segmentFileName(segment, codecId, BlockTermsWriter.TERMS_EXTENSION), readBufferSize); @@ -260,7 +256,7 @@ public class BlockTermsReader extends FieldsProducer { @Override public Comparator getComparator() { - return termComp; + return BytesRef.getUTF8SortedAsUnicodeComparator(); } @Override @@ -342,23 +338,29 @@ public class BlockTermsReader extends FieldsProducer { @Override public Comparator getComparator() { - return termComp; + return BytesRef.getUTF8SortedAsUnicodeComparator(); } + // TODO: we may want an alternate mode here which is + // "if you are about to return NOT_FOUND I won't use + // the terms data from that"; eg FuzzyTermsEnum will + // (usually) just immediately call seek again if we + // return NOT_FOUND so it's a waste for us to fill in + // the term that was actually NOT_FOUND @Override public SeekStatus seek(final BytesRef target, final boolean useCache) throws IOException { if (indexEnum == null) { throw new IllegalStateException("terms index was not loaded"); } - - //System.out.println("BTR.seek seg=" + segment + " target=" + fieldInfo.name + ":" + target.utf8ToString() + " " + target + " current=" + term().utf8ToString() + " " + term() + " useCache=" + useCache + " indexIsCurrent=" + indexIsCurrent + " didIndexNext=" + didIndexNext + " seekPending=" + seekPending + " divisor=" + indexReader.getDivisor() + " this=" + this); + /* + System.out.println("BTR.seek seg=" + segment + " target=" + fieldInfo.name + ":" + target.utf8ToString() + " " + target + " current=" + term().utf8ToString() + " " + term() + " useCache=" + useCache + " indexIsCurrent=" + indexIsCurrent + " didIndexNext=" + didIndexNext + " seekPending=" + seekPending + " divisor=" + indexReader.getDivisor() + " this=" + this); if (didIndexNext) { if (nextIndexTerm == null) { - //System.out.println(" nextIndexTerm=null"); + System.out.println(" nextIndexTerm=null"); } else { - //System.out.println(" nextIndexTerm=" + nextIndexTerm.utf8ToString()); + System.out.println(" nextIndexTerm=" + nextIndexTerm.utf8ToString()); } } */ @@ -386,7 +388,7 @@ public class BlockTermsReader extends FieldsProducer { // is after current term but before next index term: if (indexIsCurrent) { - final int cmp = termComp.compare(term, target); + final int cmp = BytesRef.getUTF8SortedAsUnicodeComparator().compare(term, target); if (cmp == 0) { // Already at the requested term @@ -404,7 +406,7 @@ public class BlockTermsReader extends FieldsProducer { didIndexNext = true; } - if (nextIndexTerm == null || termComp.compare(target, nextIndexTerm) < 0) { + if (nextIndexTerm == null || BytesRef.getUTF8SortedAsUnicodeComparator().compare(target, nextIndexTerm) < 0) { // Optimization: requested term is within the // same term block we are now in; skip seeking // (but do scanning): @@ -434,48 +436,175 @@ public class BlockTermsReader extends FieldsProducer { state.ord = indexEnum.ord()-1; } - // NOTE: the first _next() after an index seek is - // a bit wasteful, since it redundantly reads some - // suffix bytes into the buffer. We could avoid storing - // those bytes in the primary file, but then when - // next()ing over an index term we'd have to - // special case it: term.copy(indexEnum.term()); //System.out.println(" seek: term=" + term.utf8ToString()); } else { - ////System.out.println(" skip seek"); + //System.out.println(" skip seek"); + if (state.termCount == state.blockTermCount && !nextBlock()) { + indexIsCurrent = false; + return SeekStatus.END; + } } seekPending = false; - // Now scan: - while (_next() != null) { - final int cmp = termComp.compare(term, target); - if (cmp == 0) { - // Match! - if (useCache) { - // Store in cache - decodeMetaData(); - termsCache.put(new FieldAndTerm(fieldTerm), (BlockTermState) state.clone()); + int common = 0; + + // Scan within block. We could do this by calling + // _next() and testing the resulting term, but this + // is wasteful. Instead, we first confirm the + // target matches the common prefix of this block, + // and then we scan the term bytes directly from the + // termSuffixesreader's byte[], saving a copy into + // the BytesRef term per term. Only when we return + // do we then copy the bytes into the term. + + while(true) { + + // First, see if target term matches common prefix + // in this block: + if (common < termBlockPrefix) { + final int cmp = (term.bytes[common]&0xFF) - (target.bytes[target.offset + common]&0xFF); + if (cmp < 0) { + + // TODO: maybe we should store common prefix + // in block header? (instead of relying on + // last term of previous block) + + // Target's prefix is after the common block + // prefix, so term cannot be in this block + // but it could be in next block. We + // must scan to end-of-block to set common + // prefix for next block: + if (state.termCount < state.blockTermCount) { + while(state.termCount < state.blockTermCount-1) { + state.termCount++; + state.ord++; + termSuffixesReader.skipBytes(termSuffixesReader.readVInt()); + } + final int suffix = termSuffixesReader.readVInt(); + term.length = termBlockPrefix + suffix; + if (term.bytes.length < term.length) { + term.grow(term.length); + } + termSuffixesReader.readBytes(term.bytes, termBlockPrefix, suffix); + } + state.ord++; + + if (!nextBlock()) { + indexIsCurrent = false; + return SeekStatus.END; + } + common = 0; + + } else if (cmp > 0) { + // Target's prefix is before the common prefix + // of this block, so we position to start of + // block and return NOT_FOUND: + assert state.termCount == 0; + + final int suffix = termSuffixesReader.readVInt(); + term.length = termBlockPrefix + suffix; + if (term.bytes.length < term.length) { + term.grow(term.length); + } + termSuffixesReader.readBytes(term.bytes, termBlockPrefix, suffix); + return SeekStatus.NOT_FOUND; + } else { + common++; } - //System.out.println(" FOUND"); - return SeekStatus.FOUND; - } else if (cmp > 0) { - //System.out.println(" NOT_FOUND term=" + term.utf8ToString()); - return SeekStatus.NOT_FOUND; + + continue; } - + + // Test every term in this block + while (true) { + state.termCount++; + state.ord++; + + final int suffix = termSuffixesReader.readVInt(); + + // We know the prefix matches, so just compare the new suffix: + final int termLen = termBlockPrefix + suffix; + int bytePos = termSuffixesReader.getPosition(); + + boolean next = false; + final int limit = target.offset + (termLen < target.length ? termLen : target.length); + int targetPos = target.offset + termBlockPrefix; + while(targetPos < limit) { + final int cmp = (termSuffixes[bytePos++]&0xFF) - (target.bytes[targetPos++]&0xFF); + if (cmp < 0) { + // Current term is still before the target; + // keep scanning + next = true; + break; + } else if (cmp > 0) { + // Done! Current term is after target. Stop + // here, fill in real term, return NOT_FOUND. + term.length = termBlockPrefix + suffix; + if (term.bytes.length < term.length) { + term.grow(term.length); + } + termSuffixesReader.readBytes(term.bytes, termBlockPrefix, suffix); + //System.out.println(" NOT_FOUND"); + return SeekStatus.NOT_FOUND; + } + } + + if (!next && target.length <= termLen) { + term.length = termBlockPrefix + suffix; + if (term.bytes.length < term.length) { + term.grow(term.length); + } + termSuffixesReader.readBytes(term.bytes, termBlockPrefix, suffix); + + if (target.length == termLen) { + // Done! Exact match. Stop here, fill in + // real term, return FOUND. + //System.out.println(" FOUND"); + + if (useCache) { + // Store in cache + decodeMetaData(); + //System.out.println(" cache! state=" + state); + termsCache.put(new FieldAndTerm(fieldTerm), (BlockTermState) state.clone()); + } + + return SeekStatus.FOUND; + } else { + //System.out.println(" NOT_FOUND"); + return SeekStatus.NOT_FOUND; + } + } + + if (state.termCount == state.blockTermCount) { + // Must pre-fill term for next block's common prefix + term.length = termBlockPrefix + suffix; + if (term.bytes.length < term.length) { + term.grow(term.length); + } + termSuffixesReader.readBytes(term.bytes, termBlockPrefix, suffix); + break; + } else { + termSuffixesReader.skipBytes(suffix); + } + } + // The purpose of the terms dict index is to seek // the enum to the closest index term before the // term we are looking for. So, we should never // cross another index term (besides the first // one) while we are scanning: - assert indexIsCurrent; - } - indexIsCurrent = false; - //System.out.println(" END"); - return SeekStatus.END; + assert indexIsCurrent; + + if (!nextBlock()) { + //System.out.println(" END"); + indexIsCurrent = false; + return SeekStatus.END; + } + common = 0; + } } @Override @@ -515,12 +644,10 @@ public class BlockTermsReader extends FieldsProducer { decode all metadata up to the current term. */ private BytesRef _next() throws IOException { //System.out.println("BTR._next seg=" + segment + " this=" + this + " termCount=" + state.termCount + " (vs " + state.blockTermCount + ")"); - if (state.termCount == state.blockTermCount) { - if (!nextBlock()) { - //System.out.println(" eof"); - indexIsCurrent = false; - return null; - } + if (state.termCount == state.blockTermCount && !nextBlock()) { + //System.out.println(" eof"); + indexIsCurrent = false; + return null; } // TODO: cutover to something better for these ints! simple64? @@ -689,7 +816,7 @@ public class BlockTermsReader extends FieldsProducer { } //System.out.println(" termSuffixes len=" + len); in.readBytes(termSuffixes, 0, len); - termSuffixesReader.reset(termSuffixes); + termSuffixesReader.reset(termSuffixes, 0, len); // docFreq, totalTermFreq len = in.readVInt(); @@ -698,7 +825,7 @@ public class BlockTermsReader extends FieldsProducer { } //System.out.println(" freq bytes len=" + len); in.readBytes(docFreqBytes, 0, len); - freqReader.reset(docFreqBytes); + freqReader.reset(docFreqBytes, 0, len); metaDataUpto = 0; state.termCount = 0; @@ -717,23 +844,32 @@ public class BlockTermsReader extends FieldsProducer { if (!seekPending) { // lazily catch up on metadata decode: final int limit = state.termCount; + // We must set/incr state.termCount because + // postings impl can look at this state.termCount = metaDataUpto; + // TODO: better API would be "jump straight to term=N"??? while (metaDataUpto < limit) { - //System.out.println(" decode"); + //System.out.println(" decode mdUpto=" + metaDataUpto); // TODO: we could make "tiers" of metadata, ie, // decode docFreq/totalTF but don't decode postings // metadata; this way caller could get // docFreq/totalTF w/o paying decode cost for // postings + + // TODO: if docFreq were bulk decoded we could + // just skipN here: state.docFreq = freqReader.readVInt(); + //System.out.println(" dF=" + state.docFreq); if (!fieldInfo.omitTermFreqAndPositions) { state.totalTermFreq = state.docFreq + freqReader.readVLong(); + //System.out.println(" totTF=" + state.totalTermFreq); } + postingsReader.nextTerm(fieldInfo, state); metaDataUpto++; state.termCount++; } - } else { + //} else { //System.out.println(" skip! seekPending"); } } diff --git a/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsWriter.java b/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsWriter.java index c60b42506ed..2734f56ba56 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsWriter.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsWriter.java @@ -63,24 +63,23 @@ public class BlockTermsWriter extends FieldsConsumer { FieldInfo currentField; private final TermsIndexWriterBase termsIndexWriter; private final List fields = new ArrayList(); - private final Comparator termComp; - private final String segment; + + //private final String segment; public BlockTermsWriter( TermsIndexWriterBase termsIndexWriter, SegmentWriteState state, - PostingsWriterBase postingsWriter, - Comparator termComp) throws IOException + PostingsWriterBase postingsWriter) + throws IOException { final String termsFileName = IndexFileNames.segmentFileName(state.segmentName, state.codecId, TERMS_EXTENSION); this.termsIndexWriter = termsIndexWriter; - this.termComp = termComp; out = state.directory.createOutput(termsFileName); fieldInfos = state.fieldInfos; writeHeader(out); currentField = null; this.postingsWriter = postingsWriter; - segment = state.segmentName; + //segment = state.segmentName; //System.out.println("BTW.init seg=" + state.segmentName); @@ -161,7 +160,6 @@ public class BlockTermsWriter extends FieldsConsumer { private long numTerms; private final TermsIndexWriterBase.FieldWriter fieldIndexWriter; long sumTotalTermFreq; - private final BytesRef lastTerm = new BytesRef(); private TermEntry[] pendingTerms; @@ -185,12 +183,12 @@ public class BlockTermsWriter extends FieldsConsumer { @Override public Comparator getComparator() { - return termComp; + return BytesRef.getUTF8SortedAsUnicodeComparator(); } @Override public PostingsConsumer startTerm(BytesRef text) throws IOException { - //System.out.println("BTW.startTerm seg=" + segment + " term=" + fieldInfo.name + ":" + text.utf8ToString() + " " + text); + //System.out.println("BTW.startTerm term=" + fieldInfo.name + ":" + text.utf8ToString() + " " + text + " seg=" + segment); postingsWriter.startTerm(); return postingsWriter; } @@ -201,7 +199,7 @@ public class BlockTermsWriter extends FieldsConsumer { public void finishTerm(BytesRef text, TermStats stats) throws IOException { assert stats.docFreq > 0; - //System.out.println("BTW.finishTerm seg=" + segment + " term=" + fieldInfo.name + ":" + text.utf8ToString() + " " + text + " df=" + stats.docFreq); + //System.out.println("BTW.finishTerm term=" + fieldInfo.name + ":" + text.utf8ToString() + " " + text + " seg=" + segment + " df=" + stats.docFreq); final boolean isIndexTerm = fieldIndexWriter.checkIndexTerm(text, stats); @@ -213,6 +211,7 @@ public class BlockTermsWriter extends FieldsConsumer { flushBlock(); } fieldIndexWriter.add(text, stats, out.getFilePointer()); + //System.out.println(" index term!"); } if (pendingTerms.length == pendingCount) { @@ -265,7 +264,7 @@ public class BlockTermsWriter extends FieldsConsumer { private final RAMOutputStream bytesWriter = new RAMOutputStream(); private void flushBlock() throws IOException { - //System.out.println("BTW.flushBlock pendingCount=" + pendingCount); + //System.out.println("BTW.flushBlock seg=" + segment + " pendingCount=" + pendingCount + " fp=" + out.getFilePointer()); // First pass: compute common prefix for all terms // in the block, against term before first term in diff --git a/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingCodec.java b/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingCodec.java index 0867425baa5..78ad8836d3a 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingCodec.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingCodec.java @@ -89,7 +89,7 @@ public class PulsingCodec extends Codec { // Terms dict success = false; try { - FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, pulsingWriter, BytesRef.getUTF8SortedAsUnicodeComparator()); + FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, pulsingWriter); success = true; return ret; } finally { @@ -136,7 +136,6 @@ public class PulsingCodec extends Codec { state.dir, state.fieldInfos, state.segmentInfo.name, pulsingReader, state.readBufferSize, - BytesRef.getUTF8SortedAsUnicodeComparator(), StandardCodec.TERMS_CACHE_SIZE, state.codecId); success = true; diff --git a/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsReaderImpl.java b/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsReaderImpl.java index 6adab4d9f19..f5d6aba7bf5 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsReaderImpl.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsReaderImpl.java @@ -144,7 +144,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase { //System.out.println(" count=" + count + " threshold=" + maxPositions); if (count <= maxPositions) { - //System.out.println(" inlined"); + //System.out.println(" inlined pos=" + termState.inlinedBytesReader.getPosition()); // Inlined into terms dict -- just read the byte[] blob in, // but don't decode it now (we only decode when a DocsEnum diff --git a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardCodec.java b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardCodec.java index f0af9ca2507..3ef0c464037 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardCodec.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardCodec.java @@ -23,7 +23,6 @@ import java.util.Set; import org.apache.lucene.index.SegmentInfo; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.SegmentReadState; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.index.codecs.Codec; import org.apache.lucene.index.codecs.FieldsConsumer; import org.apache.lucene.index.codecs.FieldsProducer; @@ -66,7 +65,7 @@ public class StandardCodec extends Codec { success = false; try { - FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, docs, BytesRef.getUTF8SortedAsUnicodeComparator()); + FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, docs); success = true; return ret; } finally { @@ -109,7 +108,6 @@ public class StandardCodec extends Codec { state.segmentInfo.name, postings, state.readBufferSize, - BytesRef.getUTF8SortedAsUnicodeComparator(), TERMS_CACHE_SIZE, state.codecId); success = true; diff --git a/lucene/src/test-framework/org/apache/lucene/index/codecs/mockintblock/MockFixedIntBlockCodec.java b/lucene/src/test-framework/org/apache/lucene/index/codecs/mockintblock/MockFixedIntBlockCodec.java index fc50b4a817b..b6534718b56 100644 --- a/lucene/src/test-framework/org/apache/lucene/index/codecs/mockintblock/MockFixedIntBlockCodec.java +++ b/lucene/src/test-framework/org/apache/lucene/index/codecs/mockintblock/MockFixedIntBlockCodec.java @@ -126,7 +126,7 @@ public class MockFixedIntBlockCodec extends Codec { success = false; try { - FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, postingsWriter, BytesRef.getUTF8SortedAsUnicodeComparator()); + FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, postingsWriter); success = true; return ret; } finally { @@ -170,7 +170,6 @@ public class MockFixedIntBlockCodec extends Codec { state.segmentInfo.name, postingsReader, state.readBufferSize, - BytesRef.getUTF8SortedAsUnicodeComparator(), StandardCodec.TERMS_CACHE_SIZE, state.codecId); success = true; diff --git a/lucene/src/test-framework/org/apache/lucene/index/codecs/mockintblock/MockVariableIntBlockCodec.java b/lucene/src/test-framework/org/apache/lucene/index/codecs/mockintblock/MockVariableIntBlockCodec.java index 82b8615f433..63a6eb40308 100644 --- a/lucene/src/test-framework/org/apache/lucene/index/codecs/mockintblock/MockVariableIntBlockCodec.java +++ b/lucene/src/test-framework/org/apache/lucene/index/codecs/mockintblock/MockVariableIntBlockCodec.java @@ -150,7 +150,7 @@ public class MockVariableIntBlockCodec extends Codec { success = false; try { - FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, postingsWriter, BytesRef.getUTF8SortedAsUnicodeComparator()); + FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, postingsWriter); success = true; return ret; } finally { @@ -195,7 +195,6 @@ public class MockVariableIntBlockCodec extends Codec { state.segmentInfo.name, postingsReader, state.readBufferSize, - BytesRef.getUTF8SortedAsUnicodeComparator(), StandardCodec.TERMS_CACHE_SIZE, state.codecId); success = true; diff --git a/lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java b/lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java index 7399fde92b4..05fd72b39aa 100644 --- a/lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java +++ b/lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java @@ -205,7 +205,7 @@ public class MockRandomCodec extends Codec { success = false; try { - FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, postingsWriter, BytesRef.getUTF8SortedAsUnicodeComparator()); + FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, postingsWriter); success = true; return ret; } finally { @@ -306,7 +306,6 @@ public class MockRandomCodec extends Codec { state.segmentInfo.name, postingsReader, state.readBufferSize, - BytesRef.getUTF8SortedAsUnicodeComparator(), termsCacheSize, state.codecId); success = true; diff --git a/lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSepCodec.java b/lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSepCodec.java index e1e93587abe..6a059d8a619 100644 --- a/lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSepCodec.java +++ b/lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSepCodec.java @@ -70,7 +70,7 @@ public class MockSepCodec extends Codec { success = false; try { - FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, postingsWriter, BytesRef.getUTF8SortedAsUnicodeComparator()); + FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, postingsWriter); success = true; return ret; } finally { @@ -114,7 +114,6 @@ public class MockSepCodec extends Codec { state.segmentInfo.name, postingsReader, state.readBufferSize, - BytesRef.getUTF8SortedAsUnicodeComparator(), StandardCodec.TERMS_CACHE_SIZE, state.codecId); success = true; diff --git a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java index 7cf57a96393..039eff4c3bc 100644 --- a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java +++ b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java @@ -496,139 +496,13 @@ public class TestExternalCodecs extends LuceneTestCase { } } - public static class MyCodecs extends CodecProvider { - MyCodecs() { - Codec ram = new RAMOnlyCodec(); - register(ram); - setDefaultFieldCodec(ram.name); - } - } - - // copied from PulsingCodec, just changing the terms - // comparator - private static class PulsingReverseTermsCodec extends Codec { - - public PulsingReverseTermsCodec() { - name = "PulsingReverseTerms"; - } - - @Override - public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { - PostingsWriterBase docsWriter = new StandardPostingsWriter(state); - - // Terms that have <= freqCutoff number of docs are - // "pulsed" (inlined): - final int freqCutoff = 1; - PostingsWriterBase pulsingWriter = new PulsingPostingsWriterImpl(freqCutoff, docsWriter); - - // Terms dict index - TermsIndexWriterBase indexWriter; - boolean success = false; - try { - indexWriter = new FixedGapTermsIndexWriter(state) { - // We sort in reverse unicode order, so, we must - // disable the suffix-stripping opto that - // FixedGapTermsIndexWriter does by default! - @Override - protected int indexedTermPrefixLength(BytesRef priorTerm, BytesRef indexedTerm) { - return indexedTerm.length; - } - }; - success = true; - } finally { - if (!success) { - pulsingWriter.close(); - } - } - - // Terms dict - success = false; - try { - FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, pulsingWriter, reverseUnicodeComparator); - success = true; - return ret; - } finally { - if (!success) { - try { - pulsingWriter.close(); - } finally { - indexWriter.close(); - } - } - } - } - - @Override - public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { - - PostingsReaderBase docsReader = new StandardPostingsReader(state.dir, state.segmentInfo, state.readBufferSize, state.codecId); - PostingsReaderBase pulsingReader = new PulsingPostingsReaderImpl(docsReader); - - // Terms dict index reader - TermsIndexReaderBase indexReader; - - boolean success = false; - try { - indexReader = new FixedGapTermsIndexReader(state.dir, - state.fieldInfos, - state.segmentInfo.name, - state.termsIndexDivisor, - reverseUnicodeComparator, - state.codecId); - success = true; - } finally { - if (!success) { - pulsingReader.close(); - } - } - - // Terms dict reader - success = false; - try { - FieldsProducer ret = new BlockTermsReader(indexReader, - state.dir, - state.fieldInfos, - state.segmentInfo.name, - pulsingReader, - state.readBufferSize, - reverseUnicodeComparator, - StandardCodec.TERMS_CACHE_SIZE, - state.codecId); - success = true; - return ret; - } finally { - if (!success) { - try { - pulsingReader.close(); - } finally { - indexReader.close(); - } - } - } - } - - @Override - public void files(Directory dir, SegmentInfo segmentInfo, String codecId, Set files) throws IOException { - StandardPostingsReader.files(dir, segmentInfo, codecId, files); - BlockTermsReader.files(dir, segmentInfo, codecId, files); - FixedGapTermsIndexReader.files(dir, segmentInfo, codecId, files); - } - - @Override - public void getExtensions(Set extensions) { - StandardCodec.getStandardExtensions(extensions); - } - } - - // tests storing "id" and "field2" fields as pulsing codec, // whose term sort is backwards unicode code point, and // storing "field1" as a custom entirely-in-RAM codec public void testPerFieldCodec() throws Exception { - CodecProvider provider = new MyCodecs(); - Codec pulsing = new PulsingReverseTermsCodec(); - provider.register(pulsing); - + CodecProvider provider = new CoreCodecProvider(); + provider.register(new RAMOnlyCodec()); + provider.setDefaultFieldCodec("RamOnly"); final int NUM_DOCS = 173; MockDirectoryWrapper dir = newDirectory(); @@ -645,11 +519,11 @@ public class TestExternalCodecs extends LuceneTestCase { doc.add(newField("field1", "this field uses the standard codec as the test", Field.Store.NO, Field.Index.ANALYZED)); // uses pulsing codec: Field field2 = newField("field2", "this field uses the pulsing codec as the test", Field.Store.NO, Field.Index.ANALYZED); - provider.setFieldCodec(field2.name(), pulsing.name); + provider.setFieldCodec(field2.name(), "Pulsing"); doc.add(field2); Field idField = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED); - provider.setFieldCodec(idField.name(), pulsing.name); + provider.setFieldCodec(idField.name(), "Pulsing"); doc.add(idField); for(int i=0;i= 0) { //System.out.println("BD: cycle delIDX=" + delIDX + " infoIDX=" + infosIDX); @@ -199,6 +205,7 @@ class BufferedDeletesStream { assert readerPool.infoIsLive(info); SegmentReader reader = readerPool.get(info, false); int delCount = 0; + final boolean segAllDeletes; try { if (coalescedDeletes != null) { //System.out.println(" del coalesced"); @@ -209,13 +216,21 @@ class BufferedDeletesStream { // Don't delete by Term here; DocumentsWriter // already did that on flush: delCount += applyQueryDeletes(packet.queriesIterable(), reader); + segAllDeletes = reader.numDocs() == 0; } finally { readerPool.release(reader); } anyNewDeletes |= delCount > 0; + if (segAllDeletes) { + if (allDeleted == null) { + allDeleted = new SegmentInfos(); + } + allDeleted.add(info); + } + if (infoStream != null) { - message("seg=" + info + " segGen=" + segGen + " segDeletes=[" + packet + "]; coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount); + message("seg=" + info + " segGen=" + segGen + " segDeletes=[" + packet + "]; coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount + (segAllDeletes ? " 100% deleted" : "")); } if (coalescedDeletes == null) { @@ -234,16 +249,25 @@ class BufferedDeletesStream { assert readerPool.infoIsLive(info); SegmentReader reader = readerPool.get(info, false); int delCount = 0; + final boolean segAllDeletes; try { delCount += applyTermDeletes(coalescedDeletes.termsIterable(), reader); delCount += applyQueryDeletes(coalescedDeletes.queriesIterable(), reader); + segAllDeletes = reader.numDocs() == 0; } finally { readerPool.release(reader); } anyNewDeletes |= delCount > 0; + if (segAllDeletes) { + if (allDeleted == null) { + allDeleted = new SegmentInfos(); + } + allDeleted.add(info); + } + if (infoStream != null) { - message("seg=" + info + " segGen=" + segGen + " coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount); + message("seg=" + info + " segGen=" + segGen + " coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount + (segAllDeletes ? " 100% deleted" : "")); } } info.setBufferedDeletesGen(nextGen); @@ -258,7 +282,7 @@ class BufferedDeletesStream { } // assert infos != segmentInfos || !any() : "infos=" + infos + " segmentInfos=" + segmentInfos + " any=" + any; - return new ApplyDeletesResult(anyNewDeletes, nextGen++); + return new ApplyDeletesResult(anyNewDeletes, nextGen++, allDeleted); } public synchronized long getNextGen() { diff --git a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java index 344a91159c7..78174cdf3b6 100644 --- a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java +++ b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java @@ -146,7 +146,6 @@ class DirectoryReader extends IndexReader implements Cloneable { this.readOnly = true; this.applyAllDeletes = applyAllDeletes; // saved for reopen - segmentInfos = (SegmentInfos) infos.clone();// make sure we clone otherwise we share mutable state with IW this.termInfosIndexDivisor = termInfosIndexDivisor; if (codecs == null) { this.codecs = CodecProvider.getDefault(); @@ -159,23 +158,33 @@ class DirectoryReader extends IndexReader implements Cloneable { // us, which ensures infos will not change; so there's // no need to process segments in reverse order final int numSegments = infos.size(); - SegmentReader[] readers = new SegmentReader[numSegments]; + + List readers = new ArrayList(); final Directory dir = writer.getDirectory(); + segmentInfos = (SegmentInfos) infos.clone(); + int infosUpto = 0; for (int i=0;i 0 || writer.getKeepFullyDeletedSegments()) { + reader.readerFinishedListeners = readerFinishedListeners; + readers.add(reader); + infosUpto++; + } else { + reader.close(); + segmentInfos.remove(infosUpto); + } success = true; } finally { if (!success) { // Close all readers we had opened: - for(i--;i>=0;i--) { + for(SegmentReader reader : readers) { try { - readers[i].close(); + reader.close(); } catch (Throwable ignore) { // keep going - we want to clean up as much as possible } @@ -186,7 +195,7 @@ class DirectoryReader extends IndexReader implements Cloneable { this.writer = writer; - initialize(readers); + initialize(readers.toArray(new SegmentReader[readers.size()])); } /** This constructor is only used for {@link #reopen()} */ diff --git a/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java b/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java index bb5304371fc..341891ab09b 100644 --- a/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java +++ b/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java @@ -648,8 +648,16 @@ final class DocumentsWriter { newSegment.setDelCount(delCount); newSegment.advanceDelGen(); final String delFileName = newSegment.getDelFileName(); + if (infoStream != null) { + message("flush: write " + delCount + " deletes to " + delFileName); + } boolean success2 = false; try { + // TODO: in the NRT case it'd be better to hand + // this del vector over to the + // shortly-to-be-opened SegmentReader and let it + // carry the changes; there's no reason to use + // filesystem as intermediary here. flushState.deletedDocs.write(directory, delFileName); success2 = true; } finally { diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java index 44d909265b3..c5e3e5776e7 100644 --- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java @@ -388,8 +388,7 @@ public class IndexWriter implements Closeable { private final Map readerMap = new HashMap(); - /** Forcefully clear changes for the specified segments, - * and remove from the pool. This is called on successful merge. */ + /** Forcefully clear changes for the specified segments. This is called on successful merge. */ synchronized void clear(SegmentInfos infos) throws IOException { if (infos == null) { for (Map.Entry ent: readerMap.entrySet()) { @@ -397,8 +396,9 @@ public class IndexWriter implements Closeable { } } else { for (final SegmentInfo info: infos) { - if (readerMap.containsKey(info)) { - readerMap.get(info).hasChanges = false; + final SegmentReader r = readerMap.get(info); + if (r != null) { + r.hasChanges = false; } } } @@ -407,8 +407,8 @@ public class IndexWriter implements Closeable { // used only by asserts public synchronized boolean infoIsLive(SegmentInfo info) { int idx = segmentInfos.indexOf(info); - assert idx != -1; - assert segmentInfos.get(idx) == info; + assert idx != -1: "info=" + info + " isn't in pool"; + assert segmentInfos.get(idx) == info: "info=" + info + " doesn't match live info in segmentInfos"; return true; } @@ -478,6 +478,21 @@ public class IndexWriter implements Closeable { return false; } + + public synchronized void drop(SegmentInfos infos) throws IOException { + for(SegmentInfo info : infos) { + drop(info); + } + } + + public synchronized void drop(SegmentInfo info) throws IOException { + final SegmentReader sr = readerMap.get(info); + if (sr != null) { + sr.hasChanges = false; + readerMap.remove(info); + sr.close(); + } + } /** Remove all our references to readers, and commits * any pending changes. */ @@ -516,19 +531,18 @@ public class IndexWriter implements Closeable { * Commit all segment reader in the pool. * @throws IOException */ - synchronized void commit() throws IOException { + synchronized void commit(SegmentInfos infos) throws IOException { // We invoke deleter.checkpoint below, so we must be // sync'd on IW: assert Thread.holdsLock(IndexWriter.this); - for (Map.Entry ent : readerMap.entrySet()) { + for (SegmentInfo info : infos) { - SegmentReader sr = ent.getValue(); - if (sr.hasChanges) { - assert infoIsLive(sr.getSegmentInfo()); + final SegmentReader sr = readerMap.get(info); + if (sr != null && sr.hasChanges) { + assert infoIsLive(info); sr.doCommit(null); - // Must checkpoint w/ deleter, because this // segment reader will have created new _X_N.del // file. @@ -2558,6 +2572,24 @@ public class IndexWriter implements Closeable { if (result.anyDeletes) { checkpoint(); } + if (!keepFullyDeletedSegments && result.allDeleted != null) { + if (infoStream != null) { + message("drop 100% deleted segments: " + result.allDeleted); + } + for(SegmentInfo info : result.allDeleted) { + // If a merge has already registered for this + // segment, we leave it in the readerPool; the + // merge will skip merging it and will then drop + // it once it's done: + if (!mergingSegments.contains(info)) { + segmentInfos.remove(info); + if (readerPool != null) { + readerPool.drop(info); + } + } + } + checkpoint(); + } bufferedDeletesStream.prune(segmentInfos); assert !bufferedDeletesStream.any(); flushControl.clearDeletes(); @@ -2634,9 +2666,13 @@ public class IndexWriter implements Closeable { SegmentInfo info = sourceSegments.info(i); minGen = Math.min(info.getBufferedDeletesGen(), minGen); int docCount = info.docCount; - SegmentReader previousReader = merge.readersClone[i]; + final SegmentReader previousReader = merge.readerClones.get(i); + if (previousReader == null) { + // Reader was skipped because it was 100% deletions + continue; + } final Bits prevDelDocs = previousReader.getDeletedDocs(); - SegmentReader currentReader = merge.readers[i]; + final SegmentReader currentReader = merge.readers.get(i); final Bits currentDelDocs = currentReader.getDeletedDocs(); if (previousReader.hasDeletions()) { @@ -2719,18 +2755,21 @@ public class IndexWriter implements Closeable { return false; } - ensureValidMerge(merge); - commitMergedDeletes(merge, mergedReader); // If the doc store we are using has been closed and // is in now compound format (but wasn't when we // started), then we will switch to the compound // format as well: - setMergeDocStoreIsCompoundFile(merge); assert !segmentInfos.contains(merge.info); + final boolean allDeleted = mergedReader.numDocs() == 0; + + if (infoStream != null && allDeleted) { + message("merged segment " + merge.info + " is 100% deleted" + (keepFullyDeletedSegments ? "" : "; skipping insert")); + } + final Set mergedAway = new HashSet(merge.segments); int segIdx = 0; int newSegIdx = 0; @@ -2739,7 +2778,7 @@ public class IndexWriter implements Closeable { while(segIdx < curSegCount) { final SegmentInfo info = segmentInfos.info(segIdx++); if (mergedAway.contains(info)) { - if (!inserted) { + if (!inserted && (!allDeleted || keepFullyDeletedSegments)) { segmentInfos.set(segIdx-1, merge.info); inserted = true; newSegIdx++; @@ -2748,7 +2787,20 @@ public class IndexWriter implements Closeable { segmentInfos.set(newSegIdx++, info); } } - assert newSegIdx == curSegCount - merge.segments.size() + 1; + + // Either we found place to insert segment, or, we did + // not, but only because all segments we merged became + // deleted while we are merging, in which case it should + // be the case that the new segment is also all deleted: + if (!inserted) { + assert allDeleted; + if (keepFullyDeletedSegments) { + segmentInfos.add(0, merge.info); + } else { + readerPool.drop(merge.info); + } + } + segmentInfos.subList(newSegIdx, segmentInfos.size()).clear(); if (infoStream != null) { @@ -2770,7 +2822,6 @@ public class IndexWriter implements Closeable { // cascade the optimize: segmentsToOptimize.add(merge.info); } - return true; } @@ -2913,8 +2964,9 @@ public class IndexWriter implements Closeable { // is running (while synchronized) to avoid race // condition where two conflicting merges from different // threads, start - for(int i=0;i BD final BufferedDeletesStream.ApplyDeletesResult result = bufferedDeletesStream.applyDeletes(readerPool, merge.segments); + if (result.anyDeletes) { checkpoint(); } + if (!keepFullyDeletedSegments && result.allDeleted != null) { + if (infoStream != null) { + message("drop 100% deleted segments: " + result.allDeleted); + } + for(SegmentInfo info : result.allDeleted) { + segmentInfos.remove(info); + if (merge.segments.contains(info)) { + mergingSegments.remove(info); + merge.segments.remove(info); + } + } + if (readerPool != null) { + readerPool.drop(result.allDeleted); + } + checkpoint(); + } + merge.info.setBufferedDeletesGen(result.gen); // Lock order: IW -> BD @@ -3023,8 +3093,9 @@ public class IndexWriter implements Closeable { if (merge.registerDone) { final SegmentInfos sourceSegments = merge.segments; final int end = sourceSegments.size(); - for(int i=0;i 0) { + merger.add(clone); + } totDocCount += clone.numDocs(); + segUpto++; } if (infoStream != null) { - message("merge: total "+totDocCount+" docs"); + message("merge: total " + totDocCount + " docs"); } merge.checkAborted(directory); @@ -3160,11 +3220,11 @@ public class IndexWriter implements Closeable { if (infoStream != null) { message("merge segmentCodecs=" + merger.getSegmentCodecs()); - message("merge store matchedCount=" + merger.getMatchedSubReaderCount() + " vs " + numSegments); + message("merge store matchedCount=" + merger.getMatchedSubReaderCount() + " vs " + merge.readers.size()); } - anyNonBulkMerges |= merger.getMatchedSubReaderCount() != numSegments; + anyNonBulkMerges |= merger.getMatchedSubReaderCount() != merge.readers.size(); - assert mergedDocCount == totDocCount; + assert mergedDocCount == totDocCount: "mergedDocCount=" + mergedDocCount + " vs " + totDocCount; // Very important to do this before opening the reader // because codec must know if prox was written for @@ -3347,6 +3407,10 @@ public class IndexWriter implements Closeable { keepFullyDeletedSegments = true; } + boolean getKeepFullyDeletedSegments() { + return keepFullyDeletedSegments; + } + // called only from assert private boolean filesExist(SegmentInfos toSync) throws IOException { Collection files = toSync.files(directory, false); @@ -3402,12 +3466,8 @@ public class IndexWriter implements Closeable { if (infoStream != null) message("startCommit index=" + segString(segmentInfos) + " changeCount=" + changeCount); - readerPool.commit(); - + readerPool.commit(segmentInfos); toSync = (SegmentInfos) segmentInfos.clone(); - if (!keepFullyDeletedSegments) { - toSync.pruneDeletedSegments(); - } assert filesExist(toSync); diff --git a/lucene/src/java/org/apache/lucene/index/MergePolicy.java b/lucene/src/java/org/apache/lucene/index/MergePolicy.java index 704161b09bd..088065293f7 100644 --- a/lucene/src/java/org/apache/lucene/index/MergePolicy.java +++ b/lucene/src/java/org/apache/lucene/index/MergePolicy.java @@ -72,8 +72,8 @@ public abstract class MergePolicy implements java.io.Closeable { long mergeGen; // used by IndexWriter boolean isExternal; // used by IndexWriter int maxNumSegmentsOptimize; // used by IndexWriter - SegmentReader[] readers; // used by IndexWriter - SegmentReader[] readersClone; // used by IndexWriter + List readers; // used by IndexWriter + List readerClones; // used by IndexWriter public final SegmentInfos segments; boolean aborted; Throwable error; diff --git a/lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java b/lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java index 2faf22bb011..d0afe3f262f 100644 --- a/lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java +++ b/lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java @@ -102,6 +102,17 @@ public class RandomIndexWriter implements Closeable { } } + public void updateDocument(Term t, Document doc) throws IOException { + w.updateDocument(t, doc); + if (docCount++ == flushAt) { + if (LuceneTestCase.VERBOSE) { + System.out.println("RIW.updateDocument: now doing a commit"); + } + w.commit(); + flushAt += _TestUtil.nextInt(r, 10, 1000); + } + } + public void addIndexes(Directory... dirs) throws CorruptIndexException, IOException { w.addIndexes(dirs); } @@ -127,17 +138,21 @@ public class RandomIndexWriter implements Closeable { } public IndexReader getReader() throws IOException { + return getReader(true); + } + + public IndexReader getReader(boolean applyDeletions) throws IOException { getReaderCalled = true; if (r.nextInt(4) == 2) w.optimize(); // If we are writing with PreFlexRW, force a full // IndexReader.open so terms are sorted in codepoint // order during searching: - if (!w.codecs.getDefaultFieldCodec().equals("PreFlex") && r.nextBoolean()) { + if (!applyDeletions || !w.codecs.getDefaultFieldCodec().equals("PreFlex") && r.nextBoolean()) { if (LuceneTestCase.VERBOSE) { System.out.println("RIW.getReader: use NRT reader"); } - return w.getReader(); + return w.getReader(applyDeletions); } else { if (LuceneTestCase.VERBOSE) { System.out.println("RIW.getReader: open new reader"); diff --git a/lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java b/lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java index 483106d9fef..7ccd225e113 100644 --- a/lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java +++ b/lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java @@ -2,14 +2,13 @@ package org.apache.lucene.search; import java.io.IOException; import java.util.Random; -import java.lang.reflect.Method; import junit.framework.Assert; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.MultiReader; @@ -19,6 +18,7 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.ReaderUtil; +import org.apache.lucene.util._TestUtil; import static org.apache.lucene.util.LuceneTestCase.TEST_VERSION_CURRENT; @@ -172,16 +172,7 @@ public class QueryUtils { } w.commit(); w.deleteDocuments( new MatchAllDocsQuery() ); - try { - // Carefully invoke what is a package-private (test - // only, internal) method on IndexWriter: - Method m = IndexWriter.class.getDeclaredMethod("keepFullyDeletedSegments"); - m.setAccessible(true); - m.invoke(w); - } catch (Exception e) { - // Should not happen? - throw new RuntimeException(e); - } + _TestUtil.keepFullyDeletedSegments(w); w.commit(); if (0 < numDeletedDocs) diff --git a/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java b/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java index a10689c98be..6469ca9302b 100644 --- a/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java +++ b/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java @@ -1243,7 +1243,7 @@ public abstract class LuceneTestCase extends Assert { } @Override - public String toString() { + public synchronized String toString() { return "RandomCodecProvider: " + previousMappings.toString(); } } diff --git a/lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java b/lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java index 1ad038040a2..f16a971020c 100644 --- a/lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java +++ b/lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.PrintStream; +import java.lang.reflect.Method; import java.util.Enumeration; import java.util.Random; import java.util.Map; @@ -305,4 +306,17 @@ public class _TestUtil { }); Assert.assertEquals("Reflection does not produce same map", reflectedValues, map); } + + public static void keepFullyDeletedSegments(IndexWriter w) { + try { + // Carefully invoke what is a package-private (test + // only, internal) method on IndexWriter: + Method m = IndexWriter.class.getDeclaredMethod("keepFullyDeletedSegments"); + m.setAccessible(true); + m.invoke(w); + } catch (Exception e) { + // Should not happen? + throw new RuntimeException(e); + } + } } diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java index f0b3fd6d8b0..0acc750376a 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java @@ -81,7 +81,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) .setMaxBufferedDeleteTerms(2)); - + modifier.setInfoStream(VERBOSE ? System.out : null); int id = 0; int value = 100; diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java index 5fc03471ecf..929bebc145f 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java @@ -464,11 +464,11 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase { setReaderPooling(true). setMergePolicy(newLogMergePolicy(2)) ); + _TestUtil.keepFullyDeletedSegments(w); Document doc = new Document(); doc.add(newField("f", "doctor who", Field.Store.YES, Field.Index.ANALYZED)); w.addDocument(doc); - w.commit(); w.deleteDocuments(new Term("f", "who")); diff --git a/lucene/src/test/org/apache/lucene/index/TestIsCurrent.java b/lucene/src/test/org/apache/lucene/index/TestIsCurrent.java index 8e41d522607..524108d3524 100644 --- a/lucene/src/test/org/apache/lucene/index/TestIsCurrent.java +++ b/lucene/src/test/org/apache/lucene/index/TestIsCurrent.java @@ -68,7 +68,7 @@ public class TestIsCurrent extends LuceneTestCase { // assert index has a document and reader is up2date assertEquals("One document should be in the index", 1, writer.numDocs()); - assertTrue("Document added, reader should be stale ", reader.isCurrent()); + assertTrue("One document added, reader should be current", reader.isCurrent()); // remove document Term idTerm = new Term("UUID", "1"); diff --git a/lucene/src/test/org/apache/lucene/index/TestMultiFields.java b/lucene/src/test/org/apache/lucene/index/TestMultiFields.java index f1337e95191..30cb118f2f7 100644 --- a/lucene/src/test/org/apache/lucene/index/TestMultiFields.java +++ b/lucene/src/test/org/apache/lucene/index/TestMultiFields.java @@ -32,6 +32,7 @@ public class TestMultiFields extends LuceneTestCase { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); + _TestUtil.keepFullyDeletedSegments(w); Map> docs = new HashMap>(); Set deleted = new HashSet(); diff --git a/lucene/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java b/lucene/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java index b544f027126..c71448dbb03 100644 --- a/lucene/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java +++ b/lucene/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java @@ -36,6 +36,7 @@ public class TestNRTReaderWithThreads extends LuceneTestCase { setMaxBufferedDocs(10). setMergePolicy(newLogMergePolicy(false,2)) ); + writer.setInfoStream(VERBOSE ? System.out : null); IndexReader reader = writer.getReader(); // start pooling readers reader.close(); RunThread[] indexThreads = new RunThread[4]; diff --git a/lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java b/lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java new file mode 100644 index 00000000000..c38fd2d4b2c --- /dev/null +++ b/lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java @@ -0,0 +1,75 @@ +package org.apache.lucene.index; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.document.*; +import org.apache.lucene.store.*; +import org.apache.lucene.util.*; +import org.junit.Test; + +public class TestRollingUpdates extends LuceneTestCase { + + // Just updates the same set of N docs over and over, to + // stress out deletions + + @Test + public void testRollingUpdates() throws Exception { + final Directory dir = newDirectory(); + + final LineFileDocs docs = new LineFileDocs(random); + + final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + final int SIZE = 200 * RANDOM_MULTIPLIER; + int id = 0; + IndexReader r = null; + final int numUpdates = (int) (SIZE * (2+random.nextDouble())); + for(int docIter=0;docIter= SIZE && random.nextInt(50) == 17) { + if (r != null) { + r.close(); + } + final boolean applyDeletions = random.nextBoolean(); + r = w.getReader(applyDeletions); + assertTrue("applyDeletions=" + applyDeletions + " r.numDocs()=" + r.numDocs() + " vs SIZE=" + SIZE, !applyDeletions || r.numDocs() == SIZE); + } + } + + if (r != null) { + r.close(); + } + + w.commit(); + assertEquals(SIZE, w.numDocs()); + + w.close(); + docs.close(); + + dir.close(); + } +} diff --git a/lucene/src/test/org/apache/lucene/index/TestThreadedOptimize.java b/lucene/src/test/org/apache/lucene/index/TestThreadedOptimize.java index c2f988422d3..3516079d208 100644 --- a/lucene/src/test/org/apache/lucene/index/TestThreadedOptimize.java +++ b/lucene/src/test/org/apache/lucene/index/TestThreadedOptimize.java @@ -71,7 +71,7 @@ public class TestThreadedOptimize extends LuceneTestCase { } ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4); - //writer.setInfoStream(System.out); + writer.setInfoStream(VERBOSE ? System.out : null); Thread[] threads = new Thread[NUM_THREADS]; diff --git a/lucene/src/test/org/apache/lucene/search/TestCachingSpanFilter.java b/lucene/src/test/org/apache/lucene/search/TestCachingSpanFilter.java index 3424658ed27..5d8aa209107 100644 --- a/lucene/src/test/org/apache/lucene/search/TestCachingSpanFilter.java +++ b/lucene/src/test/org/apache/lucene/search/TestCachingSpanFilter.java @@ -29,6 +29,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util._TestUtil; public class TestCachingSpanFilter extends LuceneTestCase { @@ -73,7 +74,9 @@ public class TestCachingSpanFilter extends LuceneTestCase { docs = searcher.search(constantScore, 1); assertEquals("[just filter] Should find a hit...", 1, docs.totalHits); - // now delete the doc, refresh the reader, and see that it's not there + // now delete the doc, refresh the reader, and see that + // it's not there + _TestUtil.keepFullyDeletedSegments(writer.w); writer.deleteDocuments(new Term("id", "1")); reader = refreshReader(reader); diff --git a/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java b/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java index 357b3df1017..6c7f7af1165 100644 --- a/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java +++ b/lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java @@ -22,8 +22,8 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.SerialMergeScheduler; import org.apache.lucene.index.SlowMultiReaderWrapper; @@ -32,6 +32,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.OpenBitSet; import org.apache.lucene.util.OpenBitSetDISI; +import org.apache.lucene.util._TestUtil; public class TestCachingWrapperFilter extends LuceneTestCase { @@ -196,6 +197,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase { assertEquals("[just filter] Should find a hit...", 1, docs.totalHits); // now delete the doc, refresh the reader, and see that it's not there + _TestUtil.keepFullyDeletedSegments(writer.w); writer.deleteDocuments(new Term("id", "1")); reader = refreshReader(reader); From 28cf54a4cce9850c015a36a3d6349f097881f5bc Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Thu, 17 Feb 2011 12:22:49 +0000 Subject: [PATCH 157/185] LUCENE-2905: don't write abs skip pointer until we have to, the block might be all low-freq terms git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071593 13f79535-47bb-0310-9956-ffa450edef68 --- .../lucene/index/codecs/sep/SepPostingsReaderImpl.java | 2 +- .../lucene/index/codecs/sep/SepPostingsWriterImpl.java | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java b/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java index 7b400ae96be..4380003b754 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java @@ -242,7 +242,7 @@ public class SepPostingsReaderImpl extends PostingsReaderBase { } //System.out.println(" skipFP=" + termState.skipFP); } else if (isFirstTerm) { - termState.skipFP = termState.bytesReader.readVLong(); + termState.skipFP = 0; } } diff --git a/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsWriterImpl.java b/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsWriterImpl.java index 2f84da6cb95..e158714b604 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsWriterImpl.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsWriterImpl.java @@ -289,12 +289,8 @@ public final class SepPostingsWriterImpl extends PostingsWriterBase { } lastSkipFP = skipFP; } else if (isFirstTerm) { - // TODO: this is somewhat wasteful; eg if no terms in - // this block will use skip data, we don't need to - // write this: - final long skipFP = skipOut.getFilePointer(); - indexBytesWriter.writeVLong(skipFP); - lastSkipFP = skipFP; + // lazily write an absolute delta if a term in this block requires skip data. + lastSkipFP = 0; } lastDocID = 0; From d964adafe93b034f4cfbb80e2dbc34373d559459 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Thu, 17 Feb 2011 12:26:11 +0000 Subject: [PATCH 158/185] SOLR-2367: use ignoreException for DIH tests that expect exceptions git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071594 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/solr/handler/dataimport/DataImporter.java | 3 ++- .../java/org/apache/solr/handler/dataimport/DocBuilder.java | 5 +++-- .../apache/solr/handler/dataimport/EntityProcessorBase.java | 3 ++- .../solr/handler/dataimport/EntityProcessorWrapper.java | 3 ++- .../apache/solr/handler/dataimport/TestErrorHandling.java | 1 + 5 files changed, 10 insertions(+), 5 deletions(-) diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataImporter.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataImporter.java index 45f8fcf1598..84cb0514820 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataImporter.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DataImporter.java @@ -17,6 +17,7 @@ package org.apache.solr.handler.dataimport; +import org.apache.solr.common.SolrException; import org.apache.solr.core.SolrConfig; import org.apache.solr.core.SolrCore; import org.apache.solr.schema.IndexSchema; @@ -336,7 +337,7 @@ public class DataImporter { if (!requestParams.debug) cumulativeStatistics.add(docBuilder.importStatistics); } catch (Throwable t) { - LOG.error("Full Import failed", t); + SolrException.log(LOG, "Full Import failed", t); docBuilder.rollback(); } finally { setStatus(Status.IDLE); diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DocBuilder.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DocBuilder.java index 858d688976e..974fe214480 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DocBuilder.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/DocBuilder.java @@ -17,6 +17,7 @@ package org.apache.solr.handler.dataimport; +import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrInputDocument; import org.apache.solr.core.SolrCore; import static org.apache.solr.handler.dataimport.SolrWriter.LAST_INDEX_KEY; @@ -490,7 +491,7 @@ public class DocBuilder { importStatistics.skipDocCount.getAndIncrement(); exception = null;//should not propogate up } else { - LOG.error("Exception while processing: " + SolrException.log(LOG, "Exception while processing: " + entity.name + " document : " + docWrapper, dihe); } if (dihe.getErrCode() == DataImportHandlerException.SEVERE) @@ -649,7 +650,7 @@ public class DocBuilder { importStatistics.skipDocCount.getAndIncrement(); doc = null; } else { - LOG.error("Exception while processing: " + SolrException.log(LOG, "Exception while processing: " + entity.name + " document : " + doc, e); } if (e.getErrCode() == DataImportHandlerException.SEVERE) diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java index 5d761194440..10d8c147d6c 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java @@ -16,6 +16,7 @@ */ package org.apache.solr.handler.dataimport; +import org.apache.solr.common.SolrException; import static org.apache.solr.handler.dataimport.DataImportHandlerException.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -79,7 +80,7 @@ public class EntityProcessorBase extends EntityProcessor { rowIterator = null; return null; } catch (Exception e) { - log.error("getNext() failed for query '" + query + "'", e); + SolrException.log(log, "getNext() failed for query '" + query + "'", e); query = null; rowIterator = null; wrapAndThrow(DataImportHandlerException.WARN, e); diff --git a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java index 432e64ac767..92fc9d48b62 100644 --- a/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java +++ b/solr/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java @@ -16,6 +16,7 @@ */ package org.apache.solr.handler.dataimport; +import org.apache.solr.common.SolrException; import static org.apache.solr.handler.dataimport.DataImportHandlerException.*; import static org.apache.solr.handler.dataimport.EntityProcessorBase.*; import static org.apache.solr.handler.dataimport.EntityProcessorBase.SKIP; @@ -240,7 +241,7 @@ public class EntityProcessorWrapper extends EntityProcessor { wrapAndThrow(SEVERE, e); } else { //SKIP is not really possible. If this calls the nextRow() again the Entityprocessor would be in an inconisttent state - log.error("Exception in entity : "+ entityName, e); + SolrException.log(log, "Exception in entity : "+ entityName, e); return null; } } diff --git a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestErrorHandling.java b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestErrorHandling.java index b8e285dffe1..64f58df4ff4 100644 --- a/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestErrorHandling.java +++ b/solr/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestErrorHandling.java @@ -37,6 +37,7 @@ public class TestErrorHandling extends AbstractDataImportHandlerTestCase { @BeforeClass public static void beforeClass() throws Exception { initCore("dataimport-solrconfig.xml", "dataimport-schema.xml"); + ignoreException("Unexpected close tag"); } @Before @Override From 68f9ccffe79bda8749e7ccca49f278207b414dd3 Mon Sep 17 00:00:00 2001 From: Grant Ingersoll Date: Thu, 17 Feb 2011 15:44:42 +0000 Subject: [PATCH 159/185] SOLR-2371: adds min function, makes max take two value sources git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071654 13f79535-47bb-0310-9956-ffa450edef68 --- .../test/org/apache/solr/search/function/TestFunctionQuery.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java b/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java index ceba5e6b109..edb36a2833a 100755 --- a/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java +++ b/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java @@ -498,6 +498,8 @@ public class TestFunctionQuery extends SolrTestCaseJ4 { dofunc("deg(.5)", Math.toDegrees(.5)); dofunc("sqrt(9)", Math.sqrt(9)); dofunc("cbrt(8)", Math.cbrt(8)); + dofunc("max(0,1)", Math.max(0,1)); + dofunc("min(0,1)", Math.min(0,1)); dofunc("log(100)", Math.log10(100)); dofunc("ln(3)", Math.log(3)); dofunc("exp(1)", Math.exp(1)); From ff77defb92a20a2a9fc108ef9207602d08a07454 Mon Sep 17 00:00:00 2001 From: Grant Ingersoll Date: Thu, 17 Feb 2011 15:44:59 +0000 Subject: [PATCH 160/185] SOLR-2371: adds min function, makes max take two value sources git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071655 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/solr/search/ValueSourceParser.java | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/solr/src/java/org/apache/solr/search/ValueSourceParser.java b/solr/src/java/org/apache/solr/search/ValueSourceParser.java index 867ec18d067..75badb4e03d 100755 --- a/solr/src/java/org/apache/solr/search/ValueSourceParser.java +++ b/solr/src/java/org/apache/solr/search/ValueSourceParser.java @@ -117,14 +117,6 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { return new LinearFloatFunction(source, slope, intercept); } }); - addParser("max", new ValueSourceParser() { - @Override - public ValueSource parse(FunctionQParser fp) throws ParseException { - ValueSource source = fp.parseValueSource(); - float val = fp.parseFloat(); - return new MaxFloatFunction(source, val); - } - }); addParser("recip", new ValueSourceParser() { @Override public ValueSource parse(FunctionQParser fp) throws ParseException { @@ -476,6 +468,18 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { return Math.atan2(a.doubleVal(doc), b.doubleVal(doc)); } }); + addParser(new Double2Parser("max") { + @Override + public double func(int doc, DocValues a, DocValues b) { + return Math.max(a.doubleVal(doc), b.doubleVal(doc)); + } + }); + addParser(new Double2Parser("min") { + @Override + public double func(int doc, DocValues a, DocValues b) { + return Math.min(a.doubleVal(doc), b.doubleVal(doc)); + } + }); addParser("sqedist", new ValueSourceParser() { @Override From 545ea4021b9485fe40989a36cb2f07e291deca8e Mon Sep 17 00:00:00 2001 From: Grant Ingersoll Date: Thu, 17 Feb 2011 15:52:33 +0000 Subject: [PATCH 161/185] SOLR-2371: adds min function, makes max take two value sources git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071658 13f79535-47bb-0310-9956-ffa450edef68 --- .../search/function/MaxFloatFunction.java | 100 ------------------ 1 file changed, 100 deletions(-) delete mode 100644 solr/src/java/org/apache/solr/search/function/MaxFloatFunction.java diff --git a/solr/src/java/org/apache/solr/search/function/MaxFloatFunction.java b/solr/src/java/org/apache/solr/search/function/MaxFloatFunction.java deleted file mode 100644 index f7678f3b53a..00000000000 --- a/solr/src/java/org/apache/solr/search/function/MaxFloatFunction.java +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.solr.search.function; - -import org.apache.lucene.index.IndexReader.AtomicReaderContext; -import org.apache.lucene.search.IndexSearcher; - -import java.io.IOException; -import java.util.Map; - -/** - * Returns the max of a ValueSource and a float - * (which is useful for "bottoming out" another function at 0.0, - * or some positive number). - *
    - * Normally Used as an argument to a {@link FunctionQuery} - * - * @version $Id$ - */ -public class MaxFloatFunction extends ValueSource { - protected final ValueSource source; - protected final float fval; - - public MaxFloatFunction(ValueSource source, float fval) { - this.source = source; - this.fval = fval; - } - - @Override - public String description() { - return "max(" + source.description() + "," + fval + ")"; - } - - @Override - public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { - final DocValues vals = source.getValues(context, readerContext); - return new DocValues() { - @Override - public float floatVal(int doc) { - float v = vals.floatVal(doc); - return v < fval ? fval : v; - } - @Override - public int intVal(int doc) { - return (int)floatVal(doc); - } - @Override - public long longVal(int doc) { - return (long)floatVal(doc); - } - @Override - public double doubleVal(int doc) { - return (double)floatVal(doc); - } - @Override - public String strVal(int doc) { - return Float.toString(floatVal(doc)); - } - @Override - public String toString(int doc) { - return "max(" + vals.toString(doc) + "," + fval + ")"; - } - }; - } - - @Override - public void createWeight(Map context, IndexSearcher searcher) throws IOException { - source.createWeight(context, searcher); - } - - @Override - public int hashCode() { - int h = Float.floatToIntBits(fval); - h = (h >>> 2) | (h << 30); - return h + source.hashCode(); - } - - @Override - public boolean equals(Object o) { - if (MaxFloatFunction.class != o.getClass()) return false; - MaxFloatFunction other = (MaxFloatFunction)o; - return this.fval == other.fval - && this.source.equals(other.source); - } -} From e4d29a1da2fe26601cd6c45e1ecb29f10e664cc6 Mon Sep 17 00:00:00 2001 From: Grant Ingersoll Date: Thu, 17 Feb 2011 16:08:40 +0000 Subject: [PATCH 162/185] remove javadoc warning about functions not being weighted. git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071673 13f79535-47bb-0310-9956-ffa450edef68 --- solr/src/java/org/apache/solr/search/function/ValueSource.java | 1 - 1 file changed, 1 deletion(-) diff --git a/solr/src/java/org/apache/solr/search/function/ValueSource.java b/solr/src/java/org/apache/solr/search/function/ValueSource.java index 8f62760f5e4..5b4bd29c9d7 100644 --- a/solr/src/java/org/apache/solr/search/function/ValueSource.java +++ b/solr/src/java/org/apache/solr/search/function/ValueSource.java @@ -90,7 +90,6 @@ public abstract class ValueSource implements Serializable { /** * EXPERIMENTAL: This method is subject to change. - *
    WARNING: Sorted function queries are not currently weighted. *

    * Get the SortField for this ValueSource. Uses the {@link #getValues(java.util.Map, IndexReader.AtomicReaderContext)} * to populate the SortField. From 8396617fb42f8b514c822ea51bbab5f77c3b484d Mon Sep 17 00:00:00 2001 From: Steven Rowe Date: Thu, 17 Feb 2011 18:52:56 +0000 Subject: [PATCH 163/185] include RSLP stemmer grammar files in the build classpath, for the Galician and Portuguese stemmers in modules/analysis/common/ git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071747 13f79535-47bb-0310-9956-ffa450edef68 --- dev-tools/idea/.idea/compiler.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/dev-tools/idea/.idea/compiler.xml b/dev-tools/idea/.idea/compiler.xml index b031758b4fb..2e36fba99a6 100644 --- a/dev-tools/idea/.idea/compiler.xml +++ b/dev-tools/idea/.idea/compiler.xml @@ -23,6 +23,7 @@ + From 2493780acbfe7c49bd1feac908e9071f3c99d9b5 Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Thu, 17 Feb 2011 22:14:08 +0000 Subject: [PATCH 164/185] SOLR-2373: update to slf4j 1.6.1 git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071812 13f79535-47bb-0310-9956-ffa450edef68 --- dev-tools/eclipse/dot.classpath | 8 ++++---- dev-tools/maven/pom.xml.template | 2 +- solr/lib/jcl-over-slf4j-1.5.5.jar | 2 -- solr/lib/jcl-over-slf4j-1.6.1.jar | 2 ++ solr/lib/log4j-over-slf4j-1.5.5.jar | 2 -- solr/lib/log4j-over-slf4j-1.6.1.jar | 2 ++ solr/lib/slf4j-api-1.5.5.jar | 2 -- solr/lib/slf4j-api-1.6.1.jar | 2 ++ solr/lib/slf4j-jdk14-1.5.5.jar | 2 -- solr/lib/slf4j-jdk14-1.6.1.jar | 2 ++ 10 files changed, 13 insertions(+), 13 deletions(-) delete mode 100644 solr/lib/jcl-over-slf4j-1.5.5.jar create mode 100644 solr/lib/jcl-over-slf4j-1.6.1.jar delete mode 100644 solr/lib/log4j-over-slf4j-1.5.5.jar create mode 100644 solr/lib/log4j-over-slf4j-1.6.1.jar delete mode 100644 solr/lib/slf4j-api-1.5.5.jar create mode 100644 solr/lib/slf4j-api-1.6.1.jar delete mode 100644 solr/lib/slf4j-jdk14-1.5.5.jar create mode 100644 solr/lib/slf4j-jdk14-1.6.1.jar diff --git a/dev-tools/eclipse/dot.classpath b/dev-tools/eclipse/dot.classpath index 9f2c3286d05..5e86db866cc 100644 --- a/dev-tools/eclipse/dot.classpath +++ b/dev-tools/eclipse/dot.classpath @@ -107,12 +107,12 @@ - + - + - - + + diff --git a/dev-tools/maven/pom.xml.template b/dev-tools/maven/pom.xml.template index 9bf98955a61..b47726ba880 100644 --- a/dev-tools/maven/pom.xml.template +++ b/dev-tools/maven/pom.xml.template @@ -42,7 +42,7 @@ yyyy-MM-dd HH:mm:ss 1.5 6.1.26 - 1.5.5 + 1.6.1 0.8 ${project.version} diff --git a/solr/lib/jcl-over-slf4j-1.5.5.jar b/solr/lib/jcl-over-slf4j-1.5.5.jar deleted file mode 100644 index 9a5e656a289..00000000000 --- a/solr/lib/jcl-over-slf4j-1.5.5.jar +++ /dev/null @@ -1,2 +0,0 @@ -AnyObjectId[6b8ddd6339582fa3e6bce99a74f4186918f3f4d8] was removed in git history. -Apache SVN contains full history. \ No newline at end of file diff --git a/solr/lib/jcl-over-slf4j-1.6.1.jar b/solr/lib/jcl-over-slf4j-1.6.1.jar new file mode 100644 index 00000000000..052c22acc57 --- /dev/null +++ b/solr/lib/jcl-over-slf4j-1.6.1.jar @@ -0,0 +1,2 @@ +AnyObjectId[79e1ec2ca7179a2be9ef42db978285c6da4d9cb4] was removed in git history. +Apache SVN contains full history. \ No newline at end of file diff --git a/solr/lib/log4j-over-slf4j-1.5.5.jar b/solr/lib/log4j-over-slf4j-1.5.5.jar deleted file mode 100644 index cfc2b224028..00000000000 --- a/solr/lib/log4j-over-slf4j-1.5.5.jar +++ /dev/null @@ -1,2 +0,0 @@ -AnyObjectId[d32987769b4827a9c539009e804415b09b39e3ed] was removed in git history. -Apache SVN contains full history. \ No newline at end of file diff --git a/solr/lib/log4j-over-slf4j-1.6.1.jar b/solr/lib/log4j-over-slf4j-1.6.1.jar new file mode 100644 index 00000000000..1b45587b92c --- /dev/null +++ b/solr/lib/log4j-over-slf4j-1.6.1.jar @@ -0,0 +1,2 @@ +AnyObjectId[504541e9cd627b8ae616e37442958713a192e1a0] was removed in git history. +Apache SVN contains full history. \ No newline at end of file diff --git a/solr/lib/slf4j-api-1.5.5.jar b/solr/lib/slf4j-api-1.5.5.jar deleted file mode 100644 index ffc1fa34ed4..00000000000 --- a/solr/lib/slf4j-api-1.5.5.jar +++ /dev/null @@ -1,2 +0,0 @@ -AnyObjectId[4bb4abbb0f75c1bdfb81a44c3a2dd078c1656832] was removed in git history. -Apache SVN contains full history. \ No newline at end of file diff --git a/solr/lib/slf4j-api-1.6.1.jar b/solr/lib/slf4j-api-1.6.1.jar new file mode 100644 index 00000000000..46d734aa603 --- /dev/null +++ b/solr/lib/slf4j-api-1.6.1.jar @@ -0,0 +1,2 @@ +AnyObjectId[42e0ad0de7773da9b94b12f503deda7f5a506015] was removed in git history. +Apache SVN contains full history. \ No newline at end of file diff --git a/solr/lib/slf4j-jdk14-1.5.5.jar b/solr/lib/slf4j-jdk14-1.5.5.jar deleted file mode 100644 index 03386fff862..00000000000 --- a/solr/lib/slf4j-jdk14-1.5.5.jar +++ /dev/null @@ -1,2 +0,0 @@ -AnyObjectId[d58ef5a16b4ae2aa82cf725d510bd0c8c8d4097f] was removed in git history. -Apache SVN contains full history. \ No newline at end of file diff --git a/solr/lib/slf4j-jdk14-1.6.1.jar b/solr/lib/slf4j-jdk14-1.6.1.jar new file mode 100644 index 00000000000..6ac0b5c5c15 --- /dev/null +++ b/solr/lib/slf4j-jdk14-1.6.1.jar @@ -0,0 +1,2 @@ +AnyObjectId[f4eb2f8afafda6920828ac6bdac5b14c22f0fa11] was removed in git history. +Apache SVN contains full history. \ No newline at end of file From 742f95598ff4c5e6e745a77bd60b753ed2b23cf8 Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Thu, 17 Feb 2011 23:09:43 +0000 Subject: [PATCH 165/185] SOLR-2369: turn off zookeeper log4j mbean registration due to slf4j incompat git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071831 13f79535-47bb-0310-9956-ffa450edef68 --- solr/src/java/org/apache/solr/core/CoreContainer.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/solr/src/java/org/apache/solr/core/CoreContainer.java b/solr/src/java/org/apache/solr/core/CoreContainer.java index c8d158540c5..ecf3b59302c 100644 --- a/solr/src/java/org/apache/solr/core/CoreContainer.java +++ b/solr/src/java/org/apache/solr/core/CoreContainer.java @@ -133,6 +133,11 @@ public class CoreContainer if (zkRun == null && zookeeperHost == null) return; // not in zk mode + // zookeeper in quorum mode currently causes a failure when trying to + // register log4j mbeans. See SOLR-2369 + // TODO: remove after updating to an slf4j based zookeeper + System.setProperty("zookeeper.jmx.log4j.disable", "true"); + zkServer = new SolrZkServer(zkRun, zookeeperHost, solrHome, hostPort); zkServer.parseConfig(); zkServer.start(); From 55af86960999529595c871a1d20fc74656552e23 Mon Sep 17 00:00:00 2001 From: "Chris M. Hostetter" Date: Fri, 18 Feb 2011 00:14:22 +0000 Subject: [PATCH 166/185] SOLR-1240: deprecating date faceting methods now that we have generalized range faceting git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071842 13f79535-47bb-0310-9956-ffa450edef68 --- .../java/org/apache/solr/request/SimpleFacets.java | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/solr/src/java/org/apache/solr/request/SimpleFacets.java b/solr/src/java/org/apache/solr/request/SimpleFacets.java index 3026f2dd5a0..26d05691c1e 100644 --- a/solr/src/java/org/apache/solr/request/SimpleFacets.java +++ b/solr/src/java/org/apache/solr/request/SimpleFacets.java @@ -779,8 +779,9 @@ public class SimpleFacets { * SolrParams * * @see FacetParams#FACET_DATE + * @deprecated Use getFacetRangeCounts which is more generalized */ - + @Deprecated public NamedList getFacetDateCounts() throws IOException, ParseException { @@ -802,6 +803,10 @@ public class SimpleFacets { return resOuter; } + /** + * @deprecated Use getFacetRangeCounts which is more generalized + */ + @Deprecated public void getFacetDateCounts(String dateFacet, NamedList resOuter) throws IOException, ParseException { @@ -1158,6 +1163,10 @@ public class SimpleFacets { return searcher.numDocs(rangeQ ,base); } + /** + * @deprecated Use rangeCount(SchemaField,String,String,boolean,boolean) which is more generalized + */ + @Deprecated protected int rangeCount(SchemaField sf, Date low, Date high, boolean iLow, boolean iHigh) throws IOException { Query rangeQ = ((DateField)(sf.getType())).getRangeQuery(null, sf,low,high,iLow,iHigh); From 377c16ad52e6b74091a5ab3eb6e48adfcd4246ee Mon Sep 17 00:00:00 2001 From: Steven Rowe Date: Fri, 18 Feb 2011 05:56:52 +0000 Subject: [PATCH 167/185] LUCENE-2926: contrib 'init' target now ensures that lucene-core jar is built and up-to-date git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071891 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/common-build.xml | 7 +++++++ lucene/contrib/contrib-build.xml | 6 +++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/lucene/common-build.xml b/lucene/common-build.xml index 80dbcce6368..408e3154e8f 100644 --- a/lucene/common-build.xml +++ b/lucene/common-build.xml @@ -664,6 +664,13 @@ + + + + + + diff --git a/lucene/contrib/contrib-build.xml b/lucene/contrib/contrib-build.xml index bbd35082ac2..1073db21b7f 100644 --- a/lucene/contrib/contrib-build.xml +++ b/lucene/contrib/contrib-build.xml @@ -60,7 +60,11 @@ - + + + + + From ca24eb3c8152224d1c9d89224961c4fbc06f6132 Mon Sep 17 00:00:00 2001 From: Steven Rowe Date: Fri, 18 Feb 2011 14:48:24 +0000 Subject: [PATCH 168/185] LUCENE-2926: reverting all changes - it slows down the build. LUCENE-2923 will include these changes for the demo contrib only git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1072009 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/common-build.xml | 7 ------- lucene/contrib/contrib-build.xml | 6 +----- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/lucene/common-build.xml b/lucene/common-build.xml index 408e3154e8f..80dbcce6368 100644 --- a/lucene/common-build.xml +++ b/lucene/common-build.xml @@ -664,13 +664,6 @@ - - - - - - diff --git a/lucene/contrib/contrib-build.xml b/lucene/contrib/contrib-build.xml index 1073db21b7f..bbd35082ac2 100644 --- a/lucene/contrib/contrib-build.xml +++ b/lucene/contrib/contrib-build.xml @@ -60,11 +60,7 @@ - - - - - + From e22ebc0fdab6c9cee352c37527309a978961ac9a Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Fri, 18 Feb 2011 15:20:39 +0000 Subject: [PATCH 169/185] docs: move entry to 3.1 from 4.0 git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1072020 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 36b2c58a4ea..2a839163145 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -53,14 +53,6 @@ Upgrading from Solr 3.1-dev legacy behavior should set a default value for the 'mm' param in their solrconfig.xml file. -* In previous releases, sorting or evaluating function queries on - fields that were "multiValued" (either by explicit declaration in - schema.xml or by implict behavior because the "version" attribute on - the schema was less then 1.2) did not generally work, but it would - sometimes silently act as if it succeeded and order the docs - arbitrarily. Solr will now fail on any attempt to sort, or apply a - function to, multi-valued fields - Detailed Change List @@ -268,6 +260,14 @@ Upgrading from Solr 1.4 * readercycle script was removed. (SOLR-2046) +* In previous releases, sorting or evaluating function queries on + fields that were "multiValued" (either by explicit declaration in + schema.xml or by implict behavior because the "version" attribute on + the schema was less then 1.2) did not generally work, but it would + sometimes silently act as if it succeeded and order the docs + arbitrarily. Solr will now fail on any attempt to sort, or apply a + function to, multi-valued fields + Detailed Change List ---------------------- From 77258e59ac6d66d3cc080d7548792060542766b5 Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Fri, 18 Feb 2011 20:16:07 +0000 Subject: [PATCH 170/185] SOLR-2365: Move DIH jars out of solr.war git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1072127 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 8 +++++++- solr/contrib/dataimporthandler/build.xml | 2 +- .../src/main/webapp/admin/dataimport.jsp | 3 +-- solr/example/example-DIH/solr/db/conf/solrconfig.xml | 2 ++ solr/example/example-DIH/solr/mail/conf/solrconfig.xml | 2 +- solr/example/example-DIH/solr/rss/conf/solrconfig.xml | 2 ++ solr/example/example-DIH/solr/tika/conf/solrconfig.xml | 8 +------- 7 files changed, 15 insertions(+), 12 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 2a839163145..cb24aaefb3a 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -54,7 +54,6 @@ Upgrading from Solr 3.1-dev their solrconfig.xml file. - Detailed Change List ---------------------- @@ -268,6 +267,11 @@ Upgrading from Solr 1.4 arbitrarily. Solr will now fail on any attempt to sort, or apply a function to, multi-valued fields +* The DataImportHandler jars are no longer included in the solr + WAR and should be added in Solr's lib directory, or referenced + via the directive in solrconfig.xml. + + Detailed Change List ---------------------- @@ -788,6 +792,8 @@ Other Changes * SOLR-2340: Add version infos to message in JavaBinCodec when throwing exception. (koji) +* SOLR-2365: Move DIH jars out of solr.war (David Smiley via yonik) + Build ---------------------- diff --git a/solr/contrib/dataimporthandler/build.xml b/solr/contrib/dataimporthandler/build.xml index 7772fcbb4e8..5b9ddc18dec 100644 --- a/solr/contrib/dataimporthandler/build.xml +++ b/solr/contrib/dataimporthandler/build.xml @@ -276,7 +276,7 @@ - + diff --git a/solr/contrib/dataimporthandler/src/main/webapp/admin/dataimport.jsp b/solr/contrib/dataimporthandler/src/main/webapp/admin/dataimport.jsp index 3b9d71c1914..574ef67509e 100644 --- a/solr/contrib/dataimporthandler/src/main/webapp/admin/dataimport.jsp +++ b/solr/contrib/dataimporthandler/src/main/webapp/admin/dataimport.jsp @@ -1,6 +1,5 @@ <%@ page import="org.apache.solr.request.SolrRequestHandler" %> <%@ page import="java.util.Map" %> -<%@ page import="org.apache.solr.handler.dataimport.DataImportHandler" %> <%@ page contentType="text/html; charset=utf-8" pageEncoding="UTF-8"%> <%-- Licensed to the Apache Software Foundation (ASF) under one or more @@ -36,7 +35,7 @@ Select handler:
      <% for (String key : handlers.keySet()) { - if (handlers.get(key) instanceof DataImportHandler) { %> + if (handlers.get(key).getClass().getName().equals("org.apache.solr.handler.dataimport.DataImportHandler")) { %>
    • <%=key%>
    • <% } diff --git a/solr/example/example-DIH/solr/db/conf/solrconfig.xml b/solr/example/example-DIH/solr/db/conf/solrconfig.xml index e46d8fe299d..d90bb28c3c1 100644 --- a/solr/example/example-DIH/solr/db/conf/solrconfig.xml +++ b/solr/example/example-DIH/solr/db/conf/solrconfig.xml @@ -28,6 +28,8 @@ + + false diff --git a/solr/example/example-DIH/solr/mail/conf/solrconfig.xml b/solr/example/example-DIH/solr/mail/conf/solrconfig.xml index 19a4ebac562..ac30c45bad9 100644 --- a/solr/example/example-DIH/solr/mail/conf/solrconfig.xml +++ b/solr/example/example-DIH/solr/mail/conf/solrconfig.xml @@ -27,7 +27,7 @@ LUCENE_40 - + diff --git a/solr/example/example-DIH/solr/rss/conf/solrconfig.xml b/solr/example/example-DIH/solr/rss/conf/solrconfig.xml index 054e6f3bb2b..e5ced63200d 100644 --- a/solr/example/example-DIH/solr/rss/conf/solrconfig.xml +++ b/solr/example/example-DIH/solr/rss/conf/solrconfig.xml @@ -28,6 +28,8 @@ + + false diff --git a/solr/example/example-DIH/solr/tika/conf/solrconfig.xml b/solr/example/example-DIH/solr/tika/conf/solrconfig.xml index c4dae4cb5cb..36a03cff662 100644 --- a/solr/example/example-DIH/solr/tika/conf/solrconfig.xml +++ b/solr/example/example-DIH/solr/tika/conf/solrconfig.xml @@ -27,13 +27,7 @@ LUCENE_40 - - - - ${solr.data.dir:./solr/data} - + From 8ced9198dc8e09882f7d12fa35dbc0c606e20a4d Mon Sep 17 00:00:00 2001 From: Yonik Seeley Date: Fri, 18 Feb 2011 22:45:47 +0000 Subject: [PATCH 171/185] docs: move CHANGES entries from 4.0 to 3.1 git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1072181 13f79535-47bb-0310-9956-ffa450edef68 --- solr/CHANGES.txt | 64 +++++++++++++++++++++++++----------------------- 1 file changed, 33 insertions(+), 31 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index cb24aaefb3a..5bb242de949 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -153,29 +153,6 @@ Bug Fixes * SOLR-2275: fix DisMax 'mm' parsing to be tolerant of whitespace (Erick Erickson via hossman) -* SOLR-2307: fix bug in PHPSerializedResponseWriter (wt=phps) when - dealing with SolrDocumentList objects -- ie: sharded queries. - (Antonio Verni via hossman) - -* SOLR-2127: Fixed serialization of default core and indentation of solr.xml when serializing. - (Ephraim Ofir, Mark Miller) - -* SOLR-482: Provide more exception handling in CSVLoader (gsingers) - -* SOLR-2320: Fixed ReplicationHandler detail reporting for masters - (hossman) - -* SOLR-2085: Improve SolrJ behavior when FacetComponent comes before - QueryComponent (Tomas Salfischberger via hossman) - -* SOLR-1940: Fix SolrDispatchFilter behavior when Content-Type is - unknown (Lance Norskog and hossman) - -* SOLR-2339: Fix sorting to explicitly generate an error if you - attempt to sort on a multiValued field. (hossman) - -* SOLR-2348: Fix field types to explicitly generate an error if you - attempt to get a ValueSource for a multiValued field. (hossman) Other Changes ---------------------- @@ -198,16 +175,13 @@ Other Changes using Generics where applicable in method/object declatations, and adding @SuppressWarnings("unchecked") when appropriate (hossman) -* SOLR-2350: Since Solr no longer requires XML files to be in UTF-8 - (see SOLR-96) SimplePostTool (aka: post.jar) has been improved to - work with files of any mime-type or charset. (hossman) - Documentation ---------------------- * SOLR-2232: Improved README info on solr.solr.home in examples (Eric Pugh and hossman) + ================== 3.1.0-dev ================== Versions of Major Components --------------------- @@ -677,6 +651,27 @@ Bug Fixes * SOLR-2261: fix velocity template layout.vm that referred to an older version of jquery. (Eric Pugh via rmuir) +* SOLR-2307: fix bug in PHPSerializedResponseWriter (wt=phps) when + dealing with SolrDocumentList objects -- ie: sharded queries. + (Antonio Verni via hossman) + +* SOLR-2127: Fixed serialization of default core and indentation of solr.xml when serializing. + (Ephraim Ofir, Mark Miller) + +* SOLR-2320: Fixed ReplicationHandler detail reporting for masters + (hossman) + +* SOLR-482: Provide more exception handling in CSVLoader (gsingers) + +* SOLR-1283: HTMLStripCharFilter sometimes threw a "Mark Invalid" exception. + (Julien Coloos, hossman, yonik) + +* SOLR-2085: Improve SolrJ behavior when FacetComponent comes before + QueryComponent (Tomas Salfischberger via hossman) + +* SOLR-1940: Fix SolrDispatchFilter behavior when Content-Type is + unknown (Lance Norskog and hossman) + * SOLR-1983: snappuller fails when modifiedConfFiles is not empty and full copy of index is needed. (Alexander Kanarsky via yonik) @@ -687,13 +682,18 @@ Bug Fixes DocumentAnalysisRequestHandler to respect charset from XML file and only use HTTP header's "Content-Type" as a "hint". (Uwe Schindler) +* SOLR-2339: Fix sorting to explicitly generate an error if you + attempt to sort on a multiValued field. (hossman) + +* SOLR-2348: Fix field types to explicitly generate an error if you + attempt to get a ValueSource for a multiValued field. (hossman) + * SOLR-1191: resolve DataImportHandler deltaQuery column against pk when pk has a prefix (e.g. pk="book.id" deltaQuery="select id from ..."). More useful error reporting when no match found (previously failed with a NullPointerException in log and no clear user feedback). (gthb via yonik) - Other Changes ---------------------- @@ -792,6 +792,10 @@ Other Changes * SOLR-2340: Add version infos to message in JavaBinCodec when throwing exception. (koji) +* SOLR-2350: Since Solr no longer requires XML files to be in UTF-8 + (see SOLR-96) SimplePostTool (aka: post.jar) has been improved to + work with files of any mime-type or charset. (hossman) + * SOLR-2365: Move DIH jars out of solr.war (David Smiley via yonik) @@ -810,7 +814,6 @@ Build * LUCENE-2657: Switch from using Maven POM templates to full POMs when generating Maven artifacts (Steven Rowe) - Documentation ---------------------- @@ -928,8 +931,7 @@ New Features 7. SOLR-680: Add StatsComponent. This gets simple statistics on matched numeric fields, including: min, max, mean, median, stddev. (koji, ryan) - 7.1 SOLR-1380: Added support for multi-valued fields to stats component - (Harish Agarwal via gsingers) + 7.1 SOLR-1380: Added support for multi-valued fields (Harish Agarwal via gsingers) 8. SOLR-561: Added Replication implemented in Java as a request handler. Supports index replication as well as configuration replication and exposes detailed statistics and progress information From d279ff8251050f421bc9015f5226f448f86d82e4 Mon Sep 17 00:00:00 2001 From: "Chris M. Hostetter" Date: Sat, 19 Feb 2011 01:33:12 +0000 Subject: [PATCH 172/185] this test was missing @Test, and wasn't actually checking any fields were added git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1072230 13f79535-47bb-0310-9956-ffa450edef68 --- .../handler/dataimport/TestTikaEntityProcessor.java | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/solr/contrib/dataimporthandler/src/extras/test/java/org/apache/solr/handler/dataimport/TestTikaEntityProcessor.java b/solr/contrib/dataimporthandler/src/extras/test/java/org/apache/solr/handler/dataimport/TestTikaEntityProcessor.java index 6bcaedf6d52..019fa85a0a5 100644 --- a/solr/contrib/dataimporthandler/src/extras/test/java/org/apache/solr/handler/dataimport/TestTikaEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/extras/test/java/org/apache/solr/handler/dataimport/TestTikaEntityProcessor.java @@ -17,6 +17,7 @@ package org.apache.solr.handler.dataimport; import org.junit.BeforeClass; +import org.junit.Test; /**Testcase for TikaEntityProcessor * @version $Id$ @@ -28,6 +29,7 @@ public class TestTikaEntityProcessor extends AbstractDataImportHandlerTestCase { initCore("dataimport-solrconfig.xml", "dataimport-schema-no-unique-key.xml", getFile("solr-dihextras").getAbsolutePath()); } + @Test public void testIndexingWithTikaEntityProcessor() throws Exception { String conf = "" + @@ -35,12 +37,17 @@ public class TestTikaEntityProcessor extends AbstractDataImportHandlerTestCase { " " + " " + " " + - " " + + " " + " " + " " + " " + ""; runFullImport(conf); - assertQ(req("*:*"), "//*[@numFound='1']"); + assertQ(req("*:*") + ,"//*[@numFound='1']" + ,"//str[@name='author'][.='Grant Ingersoll']" + ,"//str[@name='title'][.='solr-word']" + ,"//str[@name='text']" + ); } } From 3b9482440b2b0d792b8d855b91a5559d6a8e8a5d Mon Sep 17 00:00:00 2001 From: "Chris M. Hostetter" Date: Sat, 19 Feb 2011 01:49:10 +0000 Subject: [PATCH 173/185] SOLR-2116: Fix TikaConfig classloader bug in TikaEntityProcessor git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1072232 13f79535-47bb-0310-9956-ffa450edef68 --- solr/contrib/dataimporthandler/CHANGES.txt | 4 +++ .../dataimport/TikaEntityProcessor.java | 34 ++++++++++--------- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/solr/contrib/dataimporthandler/CHANGES.txt b/solr/contrib/dataimporthandler/CHANGES.txt index 10117d7fe42..65ab731384c 100644 --- a/solr/contrib/dataimporthandler/CHANGES.txt +++ b/solr/contrib/dataimporthandler/CHANGES.txt @@ -80,6 +80,10 @@ Bug Fixes * SOLR-2330: solrconfig.xml files in example-DIH are broken. (Matt Parker, koji) +* SOLR-2116: Fix TikaConfig classloader bug in TikaEntityProcessor + (Martijn van Groningen via hossman) + + Other Changes ---------------------- diff --git a/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java b/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java index b7b31ab5abb..9913c15cf18 100644 --- a/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/extras/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java @@ -17,15 +17,11 @@ package org.apache.solr.handler.dataimport; import org.apache.commons.io.IOUtils; -import static org.apache.solr.handler.dataimport.DataImportHandlerException.SEVERE; -import static org.apache.solr.handler.dataimport.DataImportHandlerException.wrapAndThrow; -import static org.apache.solr.handler.dataimport.DataImporter.COLUMN; -import static org.apache.solr.handler.dataimport.XPathEntityProcessor.URL; import org.apache.tika.config.TikaConfig; import org.apache.tika.metadata.Metadata; import org.apache.tika.parser.AutoDetectParser; -import org.apache.tika.parser.Parser; import org.apache.tika.parser.ParseContext; +import org.apache.tika.parser.Parser; import org.apache.tika.sax.BodyContentHandler; import org.apache.tika.sax.ContentHandlerDecorator; import org.apache.tika.sax.XHTMLContentHandler; @@ -47,6 +43,11 @@ import java.io.StringWriter; import java.io.Writer; import java.util.HashMap; import java.util.Map; + +import static org.apache.solr.handler.dataimport.DataImportHandlerException.SEVERE; +import static org.apache.solr.handler.dataimport.DataImportHandlerException.wrapAndThrow; +import static org.apache.solr.handler.dataimport.DataImporter.COLUMN; +import static org.apache.solr.handler.dataimport.XPathEntityProcessor.URL; /** *

      An implementation of {@link EntityProcessor} which reads data from rich docs * using Apache Tika @@ -65,19 +66,20 @@ public class TikaEntityProcessor extends EntityProcessorBase { @Override protected void firstInit(Context context) { - String tikaConfigFile = context.getResolvedEntityAttribute("tikaConfig"); - if (tikaConfigFile == null) { - tikaConfig = TikaConfig.getDefaultConfig(); - } else { - File configFile = new File(tikaConfigFile); - if (!configFile.isAbsolute()) { - configFile = new File(context.getSolrCore().getResourceLoader().getConfigDir(), tikaConfigFile); - } - try { + try { + String tikaConfigFile = context.getResolvedEntityAttribute("tikaConfig"); + if (tikaConfigFile == null) { + ClassLoader classLoader = context.getSolrCore().getResourceLoader().getClassLoader(); + tikaConfig = new TikaConfig(classLoader); + } else { + File configFile = new File(tikaConfigFile); + if (!configFile.isAbsolute()) { + configFile = new File(context.getSolrCore().getResourceLoader().getConfigDir(), tikaConfigFile); + } tikaConfig = new TikaConfig(configFile); - } catch (Exception e) { - wrapAndThrow (SEVERE, e,"Unable to load Tika Config"); } + } catch (Exception e) { + wrapAndThrow (SEVERE, e,"Unable to load Tika Config"); } format = context.getResolvedEntityAttribute("format"); From f7b037d3cfcb9b4f817d4026135e8e47df9fb211 Mon Sep 17 00:00:00 2001 From: Steven Rowe Date: Sat, 19 Feb 2011 04:49:36 +0000 Subject: [PATCH 174/185] LUCENE-2923: Cleanup contrib/demo git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1072250 13f79535-47bb-0310-9956-ffa450edef68 --- dev-tools/idea/lucene/contrib/demo/demo.iml | 3 - .../idea/modules/benchmark/benchmark.iml | 1 - .../lucene/contrib/demo/pom.xml.template | 26 +- .../maven/modules/benchmark/pom.xml.template | 6 - lucene/build.xml | 3 - lucene/contrib/demo/build.xml | 75 +- .../contrib/demo/src/java/demo-build.template | 253 ------- .../org/apache/lucene/demo/DeleteFiles.java | 66 -- .../org/apache/lucene/demo/FileDocument.java | 72 -- .../org/apache/lucene/demo/HTMLDocument.java | 92 --- .../org/apache/lucene/demo/IndexFiles.java | 155 +++- .../org/apache/lucene/demo/IndexHTML.java | 181 ----- .../org/apache/lucene/demo/SearchFiles.java | 55 +- .../org/apache/lucene/demo/html/Test.java | 51 -- lucene/contrib/demo/src/jsp/README.txt | 8 - lucene/contrib/demo/src/jsp/WEB-INF/web.xml | 10 - lucene/contrib/demo/src/jsp/configuration.jsp | 22 - lucene/contrib/demo/src/jsp/footer.jsp | 21 - lucene/contrib/demo/src/jsp/header.jsp | 26 - lucene/contrib/demo/src/jsp/index.jsp | 29 - lucene/contrib/demo/src/jsp/results.jsp | 179 ----- .../test/org/apache/lucene/demo/TestDemo.java | 41 +- .../lucene/demo/test-files/docs/apache1.0.txt | 56 ++ .../lucene/demo/test-files/docs/apache1.1.txt | 58 ++ .../lucene/demo/test-files/docs/apache2.0.txt | 201 ++++++ .../lucene/demo/test-files/docs/cpl1.0.txt | 74 ++ .../lucene/demo/test-files/docs/epl1.0.txt | 88 +++ .../lucene/demo/test-files/docs/freebsd.txt | 10 + .../lucene/demo/test-files/docs/gpl1.0.txt | 250 +++++++ .../lucene/demo/test-files/docs/gpl2.0.txt | 339 +++++++++ .../lucene/demo/test-files/docs/gpl3.0.txt | 674 ++++++++++++++++++ .../lucene/demo/test-files/docs/lgpl2.1.txt | 504 +++++++++++++ .../lucene/demo/test-files/docs/lgpl3.txt | 165 +++++ .../lucene/demo/test-files/docs/lpgl2.0.txt | 481 +++++++++++++ .../lucene/demo/test-files/docs/mit.txt | 22 + .../demo/test-files/docs/mozilla1.1.txt | 470 ++++++++++++ .../test-files/docs/mozilla_eula_firefox3.txt | 29 + .../docs/mozilla_eula_thunderbird2.txt | 27 + .../lucene/demo/test-files/html/test1.html | 8 - .../lucene/demo/test-files/html/test2.html | 9 - .../apache/lucene/demo/test-files/queries.txt | 1 - .../lucene/demo/test-files/queries2.txt | 1 - lucene/docs/contributions.html | 10 +- lucene/docs/demo.html | 55 +- lucene/docs/demo.pdf | 167 +++-- lucene/docs/demo2.html | 150 ++-- lucene/docs/demo2.pdf | 137 ++-- lucene/docs/demo3.html | 387 ---------- lucene/docs/demo3.pdf | 344 --------- lucene/docs/demo4.html | 452 ------------ lucene/docs/demo4.pdf | 389 ---------- lucene/docs/fileformats.html | 10 +- lucene/docs/gettingstarted.html | 31 +- lucene/docs/gettingstarted.pdf | 30 +- lucene/docs/index.html | 10 +- lucene/docs/linkmap.html | 22 +- lucene/docs/linkmap.pdf | 28 +- lucene/docs/lucene-contrib/index.html | 47 +- lucene/docs/lucene-contrib/index.pdf | 447 ++++++------ lucene/docs/queryparsersyntax.html | 10 +- lucene/docs/scoring.html | 10 +- lucene/docs/systemrequirements.html | 10 +- lucene/src/java/overview.html | 27 +- .../src/documentation/content/xdocs/demo.xml | 27 +- .../src/documentation/content/xdocs/demo2.xml | 156 ++-- .../src/documentation/content/xdocs/demo3.xml | 90 --- .../src/documentation/content/xdocs/demo4.xml | 160 ----- .../content/xdocs/gettingstarted.xml | 16 +- .../content/xdocs/lucene-contrib/index.xml | 6 + .../src/documentation/content/xdocs/site.xml | 4 +- modules/benchmark/build.xml | 22 +- .../byTask/feeds/DemoHTMLParser.java | 2 +- .../byTask/feeds/demohtml}/Entities.java | 2 +- .../byTask/feeds/demohtml}/HTMLParser.java | 2 +- .../byTask/feeds/demohtml}/HTMLParser.jj | 2 +- .../feeds/demohtml}/HTMLParserConstants.java | 2 +- .../demohtml}/HTMLParserTokenManager.java | 2 +- .../feeds/demohtml}/ParseException.java | 4 +- .../byTask/feeds/demohtml}/ParserThread.java | 2 +- .../feeds/demohtml}/SimpleCharStream.java | 8 +- .../byTask/feeds/demohtml}/Tags.java | 2 +- .../byTask/feeds/demohtml}/Token.java | 4 +- .../byTask/feeds/demohtml}/TokenMgrError.java | 4 +- .../feeds/demohtml}/TestHtmlParser.java | 3 +- 84 files changed, 4409 insertions(+), 3726 deletions(-) delete mode 100644 lucene/contrib/demo/src/java/demo-build.template delete mode 100644 lucene/contrib/demo/src/java/org/apache/lucene/demo/DeleteFiles.java delete mode 100644 lucene/contrib/demo/src/java/org/apache/lucene/demo/FileDocument.java delete mode 100644 lucene/contrib/demo/src/java/org/apache/lucene/demo/HTMLDocument.java delete mode 100644 lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexHTML.java delete mode 100644 lucene/contrib/demo/src/java/org/apache/lucene/demo/html/Test.java delete mode 100644 lucene/contrib/demo/src/jsp/README.txt delete mode 100755 lucene/contrib/demo/src/jsp/WEB-INF/web.xml delete mode 100644 lucene/contrib/demo/src/jsp/configuration.jsp delete mode 100644 lucene/contrib/demo/src/jsp/footer.jsp delete mode 100644 lucene/contrib/demo/src/jsp/header.jsp delete mode 100755 lucene/contrib/demo/src/jsp/index.jsp delete mode 100755 lucene/contrib/demo/src/jsp/results.jsp create mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/apache1.0.txt create mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/apache1.1.txt create mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/apache2.0.txt create mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/cpl1.0.txt create mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/epl1.0.txt create mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/freebsd.txt create mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/gpl1.0.txt create mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/gpl2.0.txt create mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/gpl3.0.txt create mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/lgpl2.1.txt create mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/lgpl3.txt create mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/lpgl2.0.txt create mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/mit.txt create mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/mozilla1.1.txt create mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/mozilla_eula_firefox3.txt create mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/mozilla_eula_thunderbird2.txt delete mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/html/test1.html delete mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/html/test2.html delete mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/queries.txt delete mode 100644 lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/queries2.txt delete mode 100644 lucene/docs/demo3.html delete mode 100644 lucene/docs/demo3.pdf delete mode 100644 lucene/docs/demo4.html delete mode 100644 lucene/docs/demo4.pdf delete mode 100644 lucene/src/site/src/documentation/content/xdocs/demo3.xml delete mode 100644 lucene/src/site/src/documentation/content/xdocs/demo4.xml rename {lucene/contrib/demo/src/java/org/apache/lucene/demo/html => modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/demohtml}/Entities.java (99%) rename {lucene/contrib/demo/src/java/org/apache/lucene/demo/html => modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/demohtml}/HTMLParser.java (99%) rename {lucene/contrib/demo/src/java/org/apache/lucene/demo/html => modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/demohtml}/HTMLParser.jj (99%) rename {lucene/contrib/demo/src/java/org/apache/lucene/demo/html => modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/demohtml}/HTMLParserConstants.java (97%) rename {lucene/contrib/demo/src/java/org/apache/lucene/demo/html => modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/demohtml}/HTMLParserTokenManager.java (99%) rename {lucene/contrib/demo/src/java/org/apache/lucene/demo/html => modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/demohtml}/ParseException.java (98%) rename {lucene/contrib/demo/src/java/org/apache/lucene/demo/html => modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/demohtml}/ParserThread.java (96%) rename {lucene/contrib/demo/src/java/org/apache/lucene/demo/html => modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/demohtml}/SimpleCharStream.java (98%) rename {lucene/contrib/demo/src/java/org/apache/lucene/demo/html => modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/demohtml}/Tags.java (97%) rename {lucene/contrib/demo/src/java/org/apache/lucene/demo/html => modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/demohtml}/Token.java (96%) rename {lucene/contrib/demo/src/java/org/apache/lucene/demo/html => modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/demohtml}/TokenMgrError.java (97%) rename {lucene/contrib/demo/src/test/org/apache/lucene/demo/html => modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/demohtml}/TestHtmlParser.java (98%) diff --git a/dev-tools/idea/lucene/contrib/demo/demo.iml b/dev-tools/idea/lucene/contrib/demo/demo.iml index adbd3ad69f3..85978fc7fb8 100644 --- a/dev-tools/idea/lucene/contrib/demo/demo.iml +++ b/dev-tools/idea/lucene/contrib/demo/demo.iml @@ -6,13 +6,10 @@ - - - diff --git a/dev-tools/idea/modules/benchmark/benchmark.iml b/dev-tools/idea/modules/benchmark/benchmark.iml index d838caa0190..783333be8df 100644 --- a/dev-tools/idea/modules/benchmark/benchmark.iml +++ b/dev-tools/idea/modules/benchmark/benchmark.iml @@ -17,7 +17,6 @@ - diff --git a/dev-tools/maven/lucene/contrib/demo/pom.xml.template b/dev-tools/maven/lucene/contrib/demo/pom.xml.template index 0efced332d0..cd82b482448 100644 --- a/dev-tools/maven/lucene/contrib/demo/pom.xml.template +++ b/dev-tools/maven/lucene/contrib/demo/pom.xml.template @@ -28,8 +28,8 @@ org.apache.lucene lucene-demo - war - Lucene Demos + jar + Lucene Demo This is the demo for Apache Lucene Java lucene/contrib/demo @@ -53,11 +53,6 @@ lucene-analyzers-common ${project.version} - - javax.servlet - servlet-api - provided - junit junit @@ -79,15 +74,6 @@ - - org.apache.maven.plugins - maven-war-plugin - - src/jsp - true - - - org.codehaus.mojo appassembler-maven-plugin @@ -100,18 +86,10 @@ unix - - org.apache.lucene.demo.DeleteFiles - DeleteFiles - org.apache.lucene.demo.IndexFiles IndexFiles - - org.apache.lucene.demo.IndexHTML - IndexHTML - org.apache.lucene.demo.SearchFiles SearchFiles diff --git a/dev-tools/maven/modules/benchmark/pom.xml.template b/dev-tools/maven/modules/benchmark/pom.xml.template index 184c190434a..4f9ad50aa39 100755 --- a/dev-tools/maven/modules/benchmark/pom.xml.template +++ b/dev-tools/maven/modules/benchmark/pom.xml.template @@ -53,12 +53,6 @@ lucene-analyzers-common ${project.version} - - ${project.groupId} - lucene-demo - ${project.version} - classes - ${project.groupId} lucene-highlighter diff --git a/lucene/build.xml b/lucene/build.xml index 1e7accc92b4..4c9e1d50b2e 100644 --- a/lucene/build.xml +++ b/lucene/build.xml @@ -497,9 +497,6 @@ - - - diff --git a/lucene/contrib/demo/build.xml b/lucene/contrib/demo/build.xml index 8097400f87f..be1fcd03816 100644 --- a/lucene/contrib/demo/build.xml +++ b/lucene/contrib/demo/build.xml @@ -17,81 +17,40 @@ limitations under the License. --> - + Lucene Demo - - - - + + + + + + + + + + + + - + - + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/lucene/contrib/demo/src/java/demo-build.template b/lucene/contrib/demo/src/java/demo-build.template deleted file mode 100644 index d673b212530..00000000000 --- a/lucene/contrib/demo/src/java/demo-build.template +++ /dev/null @@ -1,253 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ################################################################## - ${common.dir}/${core.name}.jar not found. - ################################################################## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ----- (1) Prepare dir ----- - cd ${common.dir} - rmdir demo-text-dir - - mkdir demo-text-dir - - cd demo-text-dir - ----- (2) Index the files located under ${common.dir}/src ----- - - - - - - - - - - ----- Interactive search ----- - cd demo-text-dir - - - - - - - - - - - ----- (1) Prepare dir ----- - cd ${common.dir} - rmdir demo-html-dir - - mkdir demo-html-dir - - cd demo-html-dir - ----- (2) Index the files located under ${common.dir}/src ----- - - - - - - - - - - ----- Interactive search ----- - cd demo-html-dir - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - java -classpath "../${core.name}.jar;../${demo.name}.jar" org.apache.lucene.demo.@{class} @{paramsDisplay} - - - - - - - - - - - diff --git a/lucene/contrib/demo/src/java/org/apache/lucene/demo/DeleteFiles.java b/lucene/contrib/demo/src/java/org/apache/lucene/demo/DeleteFiles.java deleted file mode 100644 index a5588eb151a..00000000000 --- a/lucene/contrib/demo/src/java/org/apache/lucene/demo/DeleteFiles.java +++ /dev/null @@ -1,66 +0,0 @@ -package org.apache.lucene.demo; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.File; - -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FSDirectory; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.Term; -//import org.apache.lucene.index.Term; - - -/** Deletes documents from an index that do not contain a term. */ -public class DeleteFiles { - - private DeleteFiles() {} // singleton - - /** Deletes documents from an index that do not contain a term. */ - public static void main(String[] args) { - String usage = "java org.apache.lucene.demo.DeleteFiles "; - if (args.length == 0) { - System.err.println("Usage: " + usage); - System.exit(1); - } - try { - Directory directory = FSDirectory.open(new File("index")); - IndexReader reader = IndexReader.open(directory, false); // we don't want read-only because we are about to delete - - Term term = new Term("path", args[0]); - int deleted = reader.deleteDocuments(term); - - System.out.println("deleted " + deleted + - " documents containing " + term); - - // one can also delete documents by their internal id: - /* - for (int i = 0; i < reader.maxDoc(); i++) { - System.out.println("Deleting document with id " + i); - reader.delete(i); - }*/ - - reader.close(); - directory.close(); - - } catch (Exception e) { - System.out.println(" caught a " + e.getClass() + - "\n with message: " + e.getMessage()); - } - } -} diff --git a/lucene/contrib/demo/src/java/org/apache/lucene/demo/FileDocument.java b/lucene/contrib/demo/src/java/org/apache/lucene/demo/FileDocument.java deleted file mode 100644 index d635e6e07e7..00000000000 --- a/lucene/contrib/demo/src/java/org/apache/lucene/demo/FileDocument.java +++ /dev/null @@ -1,72 +0,0 @@ -package org.apache.lucene.demo; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.File; -import java.io.FileInputStream; -import java.io.InputStreamReader; - -import org.apache.lucene.document.DateTools; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; - -/** A utility for making Lucene Documents from a File. */ - -public class FileDocument { - /** Makes a document for a File. -

      - The document has three fields: -

        -
      • path--containing the pathname of the file, as a stored, - untokenized field; -
      • modified--containing the last modified date of the file as - a field as created by DateTools; and -
      • contents--containing the full contents of the file, as a - Reader field; - */ - public static Document Document(File f) - throws java.io.IOException { - - // make a new, empty document - Document doc = new Document(); - - // Add the path of the file as a field named "path". Use a field that is - // indexed (i.e. searchable), but don't tokenize the field into words. - doc.add(new Field("path", f.getPath(), Field.Store.YES, Field.Index.NOT_ANALYZED)); - - // Add the last modified date of the file a field named "modified". Use - // a field that is indexed (i.e. searchable), but don't tokenize the field - // into words. - doc.add(new Field("modified", - DateTools.timeToString(f.lastModified(), DateTools.Resolution.MINUTE), - Field.Store.YES, Field.Index.NOT_ANALYZED)); - - // Add the contents of the file to a field named "contents". Specify a Reader, - // so that the text of the file is tokenized and indexed, but not stored. - // Note that FileReader expects the file to be in UTF-8 encoding. - // If that's not the case searching for special characters will fail. - doc.add(new Field("contents", new InputStreamReader(new FileInputStream(f), "UTF-8"))); - - // return the document - return doc; - } - - private FileDocument() {} -} - diff --git a/lucene/contrib/demo/src/java/org/apache/lucene/demo/HTMLDocument.java b/lucene/contrib/demo/src/java/org/apache/lucene/demo/HTMLDocument.java deleted file mode 100644 index 67cafbf2fad..00000000000 --- a/lucene/contrib/demo/src/java/org/apache/lucene/demo/HTMLDocument.java +++ /dev/null @@ -1,92 +0,0 @@ -package org.apache.lucene.demo; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.*; -import org.apache.lucene.document.*; -import org.apache.lucene.demo.html.HTMLParser; - -/** A utility for making Lucene Documents for HTML documents. */ - -public class HTMLDocument { - static char dirSep = System.getProperty("file.separator").charAt(0); - - public static String uid(File f) { - // Append path and date into a string in such a way that lexicographic - // sorting gives the same results as a walk of the file hierarchy. Thus - // null (\u0000) is used both to separate directory components and to - // separate the path from the date. - return f.getPath().replace(dirSep, '\u0000') + - "\u0000" + - DateTools.timeToString(f.lastModified(), DateTools.Resolution.SECOND); - } - - public static String uid2url(String uid) { - String url = uid.replace('\u0000', '/'); // replace nulls with slashes - return url.substring(0, url.lastIndexOf('/')); // remove date from end - } - - public static Document Document(File f) - throws IOException, InterruptedException { - // make a new, empty document - Document doc = new Document(); - - // Add the url as a field named "path". Use a field that is - // indexed (i.e. searchable), but don't tokenize the field into words. - doc.add(new Field("path", f.getPath().replace(dirSep, '/'), Field.Store.YES, - Field.Index.NOT_ANALYZED)); - - // Add the last modified date of the file a field named "modified". - // Use a field that is indexed (i.e. searchable), but don't tokenize - // the field into words. - doc.add(new Field("modified", - DateTools.timeToString(f.lastModified(), DateTools.Resolution.MINUTE), - Field.Store.YES, Field.Index.NOT_ANALYZED)); - - // Add the uid as a field, so that index can be incrementally maintained. - // This field is not stored with document, it is indexed, but it is not - // tokenized prior to indexing. - doc.add(new Field("uid", uid(f), Field.Store.NO, Field.Index.NOT_ANALYZED)); - - FileInputStream fis = new FileInputStream(f); - InputStreamReader reader = new InputStreamReader(fis, "UTF-8"); - HTMLParser parser = new HTMLParser(reader); - - // Add the tag-stripped contents as a Reader-valued Text field so it will - // get tokenized and indexed. - doc.add(new Field("contents", parser.getReader())); - - // add any document keywords if they exist - String keywords = parser.getMetaTags().getProperty("keywords"); - if (keywords != null) - doc.add(new Field("contents", keywords, Field.Store.NO, Field.Index.ANALYZED)); - - // Add the summary as a field that is stored and returned with - // hit documents for display. - doc.add(new Field("summary", parser.getSummary(), Field.Store.YES, Field.Index.NO)); - - // Add the title as a field that it can be searched and that is stored. - doc.add(new Field("title", parser.getTitle(), Field.Store.YES, Field.Index.ANALYZED)); - - // return the document - return doc; - } - - private HTMLDocument() {} -} - diff --git a/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java b/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java index 7652f6c0cd1..6ac4bec4de5 100644 --- a/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java +++ b/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java @@ -17,39 +17,59 @@ package org.apache.lucene.demo; * limitations under the License. */ +import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericField; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig.OpenMode; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.Term; +import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.Version; +import java.io.BufferedReader; import java.io.File; +import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; +import java.io.InputStreamReader; import java.util.Date; -/** Index all text files under a directory. */ +/** Index all text files under a directory. See http://lucene.apache.org/java/4_0/demo.html. */ public class IndexFiles { private IndexFiles() {} - static final File INDEX_DIR = new File("index"); - /** Index all text files under a directory. */ public static void main(String[] args) { - String usage = "java org.apache.lucene.demo.IndexFiles "; - if (args.length == 0) { + String usage = "java org.apache.lucene.demo.IndexFiles " + + " [-index INDEX_PATH] [-docs DOCS_PATH] [-update]\n\n" + // TODO: Change the link with every release (or: fill in some less error-prone alternative here...) + + "See http://lucene.apache.org/java/4_0/demo.html for details."; + String indexPath = "index"; + String docsPath = null; + boolean create = true; + for(int i=0;iWriteLineDocTask. + * + * @param writer Writer to the index where the given file/dir info will be stored + * @param file The file to index, or the directory to recurse into to find files to index + * @throws IOException + */ static void indexDocs(IndexWriter writer, File file) throws IOException { // do not try to index files that cannot be read @@ -88,16 +149,62 @@ public class IndexFiles { } } } else { - System.out.println("adding " + file); + + FileInputStream fis; try { - writer.addDocument(FileDocument.Document(file)); + fis = new FileInputStream(file); + } catch (FileNotFoundException fnfe) { + // at least on windows, some temporary files raise this exception with an "access denied" message + // checking if the file can be read doesn't help + return; } - // at least on windows, some temporary files raise this exception with an "access denied" message - // checking if the file can be read doesn't help - catch (FileNotFoundException fnfe) { + + try { + + // make a new, empty document + Document doc = new Document(); + + // Add the path of the file as a field named "path". Use a + // field that is indexed (i.e. searchable), but don't tokenize + // the field into separate words and don't index term frequency + // or positional information: + Field pathField = new Field("path", file.getPath(), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); + pathField.setOmitTermFreqAndPositions(true); + doc.add(pathField); + + // Add the last modified date of the file a field named "modified". + // Use a NumericField that is indexed (i.e. efficiently filterable with + // NumericRangeFilter). This indexes to milli-second resolution, which + // is often too fine. You could instead create a number based on + // year/month/day/hour/minutes/seconds, down the resolution you require. + // For example the long value 2011021714 would mean + // February 17, 2011, 2-3 PM. + NumericField modifiedField = new NumericField("modified"); + modifiedField.setLongValue(file.lastModified()); + doc.add(modifiedField); + + // Add the contents of the file to a field named "contents". Specify a Reader, + // so that the text of the file is tokenized and indexed, but not stored. + // Note that FileReader expects the file to be in UTF-8 encoding. + // If that's not the case searching for special characters will fail. + doc.add(new Field("contents", new BufferedReader(new InputStreamReader(fis, "UTF-8")))); + + if (writer.getConfig().getOpenMode() == OpenMode.CREATE) { + // New index, so we just add the document (no old document can be there): + System.out.println("adding " + file); + writer.addDocument(doc); + } else { + // Existing index (an old copy of this document may have been indexed) so + // we use updateDocument instead to replace the old one matching the exact + // path, if present: + System.out.println("updating " + file); + writer.updateDocument(new Term("path", file.getPath()), doc); + } + + } finally { + fis.close(); } } } } - } diff --git a/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexHTML.java b/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexHTML.java deleted file mode 100644 index 75090df931a..00000000000 --- a/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexHTML.java +++ /dev/null @@ -1,181 +0,0 @@ -package org.apache.lucene.demo; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.Term; -import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.index.Terms; -import org.apache.lucene.index.MultiFields; -import org.apache.lucene.index.IndexWriterConfig.OpenMode; -import org.apache.lucene.store.FSDirectory; -import org.apache.lucene.util.Version; -import org.apache.lucene.util.BytesRef; - -import java.io.File; -import java.util.Date; -import java.util.Arrays; - -/** Indexer for HTML files. */ -public class IndexHTML { - private IndexHTML() {} - - private static boolean deleting = false; // true during deletion pass - private static IndexReader reader; // existing index - private static IndexWriter writer; // new index being built - private static TermsEnum uidIter; // document id iterator - - /** Indexer for HTML files.*/ - public static void main(String[] argv) { - try { - File index = new File("index"); - boolean create = false; - File root = null; - - String usage = "IndexHTML [-create] [-index ] "; - - if (argv.length == 0) { - System.err.println("Usage: " + usage); - return; - } - - for (int i = 0; i < argv.length; i++) { - if (argv[i].equals("-index")) { // parse -index option - index = new File(argv[++i]); - } else if (argv[i].equals("-create")) { // parse -create option - create = true; - } else if (i != argv.length-1) { - System.err.println("Usage: " + usage); - return; - } else - root = new File(argv[i]); - } - - if(root == null) { - System.err.println("Specify directory to index"); - System.err.println("Usage: " + usage); - return; - } - - Date start = new Date(); - - if (!create) { // delete stale docs - deleting = true; - indexDocs(root, index, create); - } - writer = new IndexWriter(FSDirectory.open(index), new IndexWriterConfig( - Version.LUCENE_CURRENT, new StandardAnalyzer(Version.LUCENE_CURRENT)) - .setOpenMode(create ? OpenMode.CREATE : OpenMode.CREATE_OR_APPEND)); - indexDocs(root, index, create); // add new docs - - System.out.println("Optimizing index..."); - writer.optimize(); - writer.close(); - - Date end = new Date(); - - System.out.print(end.getTime() - start.getTime()); - System.out.println(" total milliseconds"); - - } catch (Exception e) { - e.printStackTrace(); - } - } - - /* Walk directory hierarchy in uid order, while keeping uid iterator from - /* existing index in sync. Mismatches indicate one of: (a) old documents to - /* be deleted; (b) unchanged documents, to be left alone; or (c) new - /* documents, to be indexed. - */ - - private static void indexDocs(File file, File index, boolean create) - throws Exception { - if (!create) { // incrementally update - - reader = IndexReader.open(FSDirectory.open(index), false); // open existing index - Terms terms = MultiFields.getTerms(reader, "uid"); - if (terms != null) { - uidIter = terms.iterator(); - - indexDocs(file); - - if (deleting) { // delete rest of stale docs - BytesRef text; - while ((text=uidIter.next()) != null) { - String termText = text.utf8ToString(); - System.out.println("deleting " + - HTMLDocument.uid2url(termText)); - reader.deleteDocuments(new Term("uid", termText)); - } - deleting = false; - } - } - - reader.close(); // close existing index - - } else // don't have exisiting - indexDocs(file); - } - - private static void indexDocs(File file) throws Exception { - if (file.isDirectory()) { // if a directory - String[] files = file.list(); // list its files - Arrays.sort(files); // sort the files - for (int i = 0; i < files.length; i++) // recursively index them - indexDocs(new File(file, files[i])); - - } else if (file.getPath().endsWith(".html") || // index .html files - file.getPath().endsWith(".htm") || // index .htm files - file.getPath().endsWith(".txt")) { // index .txt files - - if (uidIter != null) { - String uid = HTMLDocument.uid(file); // construct uid for doc - - BytesRef text; - while((text = uidIter.next()) != null) { - String termText = text.utf8ToString(); - if (termText.compareTo(uid) < 0) { - if (deleting) { // delete stale docs - System.out.println("deleting " + - HTMLDocument.uid2url(termText)); - reader.deleteDocuments(new Term("uid", termText)); - } - } else { - break; - } - } - if (text != null && - text.utf8ToString().compareTo(uid) == 0) { - uidIter.next(); // keep matching docs - } else if (!deleting) { // add new docs - Document doc = HTMLDocument.Document(file); - System.out.println("adding " + doc.get("path")); - writer.addDocument(doc); - } - } else { // creating a new index - Document doc = HTMLDocument.Document(file); - System.out.println("adding " + doc.get("path")); - writer.addDocument(doc); // add docs unconditionally - } - } - } -} diff --git a/lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java b/lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java index 6a300459995..134650418e3 100644 --- a/lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java +++ b/lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java @@ -27,7 +27,6 @@ import java.util.Date; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.Collector; @@ -35,7 +34,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.TopScoreDocCollector; +import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.Version; @@ -47,7 +46,7 @@ public class SearchFiles { /** Simple command-line based search demo. */ public static void main(String[] args) throws Exception { String usage = - "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-raw] [-paging hitsPerPage]"; + "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/java/4_0/demo.html for details."; usage += "\n\tSpecify 'false' for hitsPerPage to use streaming instead of paging search."; if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) { System.out.println(usage); @@ -60,9 +59,10 @@ public class SearchFiles { int repeat = 0; boolean raw = false; boolean paging = true; + String queryString = null; int hitsPerPage = 10; - for (int i = 0; i < args.length; i++) { + for(int i = 0;i < args.length;i++) { if ("-index".equals(args[i])) { index = args[i+1]; i++; @@ -72,6 +72,9 @@ public class SearchFiles { } else if ("-queries".equals(args[i])) { queries = args[i+1]; i++; + } else if ("-query".equals(args[i])) { + queryString = args[i+1]; + i++; } else if ("-repeat".equals(args[i])) { repeat = Integer.parseInt(args[i+1]); i++; @@ -90,10 +93,8 @@ public class SearchFiles { } } - IndexReader reader = IndexReader.open(FSDirectory.open(new File(index)), true); // only searching, so read-only=true - - IndexSearcher searcher = new IndexSearcher(reader); - Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT); + IndexSearcher searcher = new IndexSearcher(FSDirectory.open(new File(index))); + Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40); BufferedReader in = null; if (queries != null) { @@ -101,23 +102,25 @@ public class SearchFiles { } else { in = new BufferedReader(new InputStreamReader(System.in, "UTF-8")); } - QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, field, analyzer); + QueryParser parser = new QueryParser(Version.LUCENE_40, field, analyzer); while (true) { - if (queries == null) // prompt the user + if (queries == null && queryString == null) { // prompt the user System.out.println("Enter query: "); + } - String line = in.readLine(); + String line = queryString != null ? queryString : in.readLine(); - if (line == null || line.length() == -1) + if (line == null || line.length() == -1) { break; + } line = line.trim(); - if (line.length() == 0) + if (line.length() == 0) { break; + } Query query = parser.parse(line); System.out.println("Searching for: " + query.toString(field)); - if (repeat > 0) { // repeat & time as benchmark Date start = new Date(); @@ -129,16 +132,20 @@ public class SearchFiles { } if (paging) { - doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null); + doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null); } else { doStreamingSearch(searcher, query); } + + if (queryString != null) { + break; + } } - reader.close(); + searcher.close(); } /** - * This method uses a custom HitCollector implementation which simply prints out + * This method uses a custom Collector implementation which simply prints out * the docId and score of every matching document. * * This simulates the streaming search use case, where all hits are supposed to @@ -190,12 +197,10 @@ public class SearchFiles { int hitsPerPage, boolean raw, boolean interactive) throws IOException { // Collect enough docs to show 5 pages - TopScoreDocCollector collector = TopScoreDocCollector.create( - 5 * hitsPerPage, false); - searcher.search(query, collector); - ScoreDoc[] hits = collector.topDocs().scoreDocs; + TopDocs results = searcher.search(query, 5 * hitsPerPage); + ScoreDoc[] hits = results.scoreDocs; - int numTotalHits = collector.getTotalHits(); + int numTotalHits = results.totalHits; System.out.println(numTotalHits + " total matching documents"); int start = 0; @@ -210,9 +215,7 @@ public class SearchFiles { break; } - collector = TopScoreDocCollector.create(numTotalHits, false); - searcher.search(query, collector); - hits = collector.topDocs().scoreDocs; + hits = searcher.search(query, numTotalHits).scoreDocs; } end = Math.min(hits.length, start + hitsPerPage); @@ -279,8 +282,6 @@ public class SearchFiles { if (quit) break; end = Math.min(numTotalHits, start + hitsPerPage); } - } - } } diff --git a/lucene/contrib/demo/src/java/org/apache/lucene/demo/html/Test.java b/lucene/contrib/demo/src/java/org/apache/lucene/demo/html/Test.java deleted file mode 100644 index 224ae5e4784..00000000000 --- a/lucene/contrib/demo/src/java/org/apache/lucene/demo/html/Test.java +++ /dev/null @@ -1,51 +0,0 @@ -package org.apache.lucene.demo.html; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.*; - -class Test { - public static void main(String[] argv) throws IOException, InterruptedException { - if ("-dir".equals(argv[0])) { - String[] files = new File(argv[1]).list(); - java.util.Arrays.sort(files); - for (int i = 0; i < files.length; i++) { - System.err.println(files[i]); - File file = new File(argv[1], files[i]); - parse(file); - } - } else - parse(new File(argv[0])); - } - - public static void parse(File file) throws IOException, InterruptedException { - FileInputStream fis = null; - try { - fis = new FileInputStream(file); - HTMLParser parser = new HTMLParser(fis); - System.out.println("Title: " + Entities.encode(parser.getTitle())); - System.out.println("Summary: " + Entities.encode(parser.getSummary())); - System.out.println("Content:"); - LineNumberReader reader = new LineNumberReader(parser.getReader()); - for (String l = reader.readLine(); l != null; l = reader.readLine()) - System.out.println(l); - } finally { - if (fis != null) fis.close(); - } - } -} diff --git a/lucene/contrib/demo/src/jsp/README.txt b/lucene/contrib/demo/src/jsp/README.txt deleted file mode 100644 index 31ae0637865..00000000000 --- a/lucene/contrib/demo/src/jsp/README.txt +++ /dev/null @@ -1,8 +0,0 @@ -To build the Apache Lucene web app demo just run -"ant war-demo" from the Apache Lucene Installation -directory (follow the master instructions in -BUILD.txt). If you have questions please post -them to the Apache Lucene mailing lists. To -actually figure this out you really need to -read the Lucene "Getting Started" guide provided -with the doc build ("ant docs"). diff --git a/lucene/contrib/demo/src/jsp/WEB-INF/web.xml b/lucene/contrib/demo/src/jsp/WEB-INF/web.xml deleted file mode 100755 index d6740d2c4b7..00000000000 --- a/lucene/contrib/demo/src/jsp/WEB-INF/web.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - diff --git a/lucene/contrib/demo/src/jsp/configuration.jsp b/lucene/contrib/demo/src/jsp/configuration.jsp deleted file mode 100644 index eb0bcfea726..00000000000 --- a/lucene/contrib/demo/src/jsp/configuration.jsp +++ /dev/null @@ -1,22 +0,0 @@ - -<% -String appTitle = "Apache Lucene Example - Intranet Server Search Application"; -/* make sure you point the below string to the index you created with IndexHTML */ -String indexLocation = "/opt/lucene/index"; -String appfooter = "Apache Lucene Template WebApp 1.0"; -%> diff --git a/lucene/contrib/demo/src/jsp/footer.jsp b/lucene/contrib/demo/src/jsp/footer.jsp deleted file mode 100644 index 44127a0c3c0..00000000000 --- a/lucene/contrib/demo/src/jsp/footer.jsp +++ /dev/null @@ -1,21 +0,0 @@ - -

        - <%=appfooter%> -

        - - diff --git a/lucene/contrib/demo/src/jsp/header.jsp b/lucene/contrib/demo/src/jsp/header.jsp deleted file mode 100644 index 3806d7a1608..00000000000 --- a/lucene/contrib/demo/src/jsp/header.jsp +++ /dev/null @@ -1,26 +0,0 @@ - -<%@include file="configuration.jsp"%> - - - <%=appTitle%> - - - -

        -Welcome to the Lucene Template application. (This is the header) -

        diff --git a/lucene/contrib/demo/src/jsp/index.jsp b/lucene/contrib/demo/src/jsp/index.jsp deleted file mode 100755 index 5e637212fce..00000000000 --- a/lucene/contrib/demo/src/jsp/index.jsp +++ /dev/null @@ -1,29 +0,0 @@ - -<%@include file="header.jsp"%> -
        -
        -

        -  Search Criteria -

        -

        -  Results Per Page  - -

        -
        -
        -<%@include file="footer.jsp"%> diff --git a/lucene/contrib/demo/src/jsp/results.jsp b/lucene/contrib/demo/src/jsp/results.jsp deleted file mode 100755 index 90cc0201dc2..00000000000 --- a/lucene/contrib/demo/src/jsp/results.jsp +++ /dev/null @@ -1,179 +0,0 @@ - -<%@ page import = " javax.servlet.*, javax.servlet.http.*, java.io.*, org.apache.lucene.analysis.*, org.apache.lucene.analysis.standard.StandardAnalyzer, org.apache.lucene.document.*, org.apache.lucene.index.*, org.apache.lucene.store.*, org.apache.lucene.search.*, org.apache.lucene.queryParser.*, org.apache.lucene.demo.*, org.apache.lucene.demo.html.Entities, java.net.URLEncoder, org.apache.lucene.util.Version" %> - -<% -/* - - This jsp page is deliberatly written in the horrible java directly embedded - in the page style for an easy and concise demonstration of Lucene. - Due note...if you write pages that look like this...sooner or later - you'll have a maintenance nightmare. If you use jsps...use taglibs - and beans! That being said, this should be acceptable for a small - page demonstrating how one uses Lucene in a web app. - - This is also deliberately overcommented. ;-) - -*/ -%> -<%! -public String escapeHTML(String s) { - s = s.replaceAll("&", "&"); - s = s.replaceAll("<", "<"); - s = s.replaceAll(">", ">"); - s = s.replaceAll("\"", """); - s = s.replaceAll("'", "'"); - return s; -} -%> -<%@include file="header.jsp"%> -<% - boolean error = false; //used to control flow for error messages - String indexName = indexLocation; //local copy of the configuration variable - IndexSearcher searcher = null; //the searcher used to open/search the index - Query query = null; //the Query created by the QueryParser - TopDocs hits = null; //the search results - int startindex = 0; //the first index displayed on this page - int maxpage = 50; //the maximum items displayed on this page - String queryString = null; //the query entered in the previous page - String startVal = null; //string version of startindex - String maxresults = null; //string version of maxpage - int thispage = 0; //used for the for/next either maxpage or - //hits.totalHits - startindex - whichever is - //less - - try { - IndexReader reader = IndexReader.open(FSDirectory.open(new File(indexName)), true); // only searching, so read-only=true - searcher = new IndexSearcher(reader); //create an indexSearcher for our page - //NOTE: this operation is slow for large - //indices (much slower than the search itself) - //so you might want to keep an IndexSearcher - //open - - } catch (Exception e) { //any error that happens is probably due - //to a permission problem or non-existant - //or otherwise corrupt index -%> -

        ERROR opening the Index - contact sysadmin!

        -

        Error message: <%=escapeHTML(e.getMessage())%>

        -<% error = true; //don't do anything up to the footer - } -%> -<% - if (error == false) { //did we open the index? - queryString = request.getParameter("query"); //get the search criteria - startVal = request.getParameter("startat"); //get the start index - maxresults = request.getParameter("maxresults"); //get max results per page - try { - maxpage = Integer.parseInt(maxresults); //parse the max results first - startindex = Integer.parseInt(startVal); //then the start index - } catch (Exception e) { } //we don't care if something happens we'll just start at 0 - //or end at 50 - - - - if (queryString == null) - throw new ServletException("no query "+ //if you don't have a query then - "specified"); //you probably played on the - //query string so you get the - //treatment - - Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT); //construct our usual analyzer - try { - QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "contents", analyzer); - query = qp.parse(queryString); //parse the - } catch (ParseException e) { //query and construct the Query - //object - //if it's just "operator error" - //send them a nice error HTML - -%> -

        Error while parsing query: <%=escapeHTML(e.getMessage())%>

        -<% - error = true; //don't bother with the rest of - //the page - } - } -%> -<% - if (error == false && searcher != null) { // if we've had no errors - // searcher != null was to handle - // a weird compilation bug - thispage = maxpage; // default last element to maxpage - hits = searcher.search(query, maxpage + startindex); // run the query - if (hits.totalHits == 0) { // if we got no results tell the user -%> -

        I'm sorry I couldn't find what you were looking for.

        -<% - error = true; // don't bother with the rest of the - // page - } - } - - if (error == false && searcher != null) { -%> - - - - - -<% - if ((startindex + maxpage) > hits.totalHits) { - thispage = hits.totalHits - startindex; // set the max index to maxpage or last - } // actual search result whichever is less - - for (int i = startindex; i < (thispage + startindex); i++) { // for each element -%> - -<% - Document doc = searcher.doc(hits.scoreDocs[i].doc); //get the next document - String doctitle = doc.get("title"); //get its title - String url = doc.get("path"); //get its path field - if (url != null && url.startsWith("../webapps/")) { // strip off ../webapps prefix if present - url = url.substring(10); - } - if ((doctitle == null) || doctitle.equals("")) //use the path if it has no title - doctitle = url; - //then output! -%> - - - -<% - } -%> -<% if ( (startindex + maxpage) < hits.totalHits) { //if there are more results...display - //the more link - - String moreurl="results.jsp?query=" + - URLEncoder.encode(queryString) + //construct the "more" link - "&maxresults=" + maxpage + - "&startat=" + (startindex + maxpage); -%> - - - -<% - } -%> -
        DocumentSummary
        <%=doctitle%><%=doc.get("summary")%>
        More Results>>
        - -<% } //then include our footer. - if (searcher != null) - searcher.close(); -%> -<%@include file="footer.jsp"%> diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/TestDemo.java b/lucene/contrib/demo/src/test/org/apache/lucene/demo/TestDemo.java index 6fbe35187e8..4457ef7aae3 100644 --- a/lucene/contrib/demo/src/test/org/apache/lucene/demo/TestDemo.java +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/TestDemo.java @@ -24,43 +24,30 @@ import java.io.PrintStream; import org.apache.lucene.util.LuceneTestCase; public class TestDemo extends LuceneTestCase { - // LUCENE-589 - public void testUnicodeHtml() throws Exception { - File dir = getDataFile("test-files/html"); - File indexDir = new File(TEMP_DIR, "demoIndex"); - IndexHTML.main(new String[] { "-create", "-index", indexDir.getPath(), dir.getPath() }); - File queries = getDataFile("test-files/queries.txt"); + + private void testOneSearch(String query, int expectedHitCount) throws Exception { PrintStream outSave = System.out; try { ByteArrayOutputStream bytes = new ByteArrayOutputStream(); PrintStream fakeSystemOut = new PrintStream(bytes); System.setOut(fakeSystemOut); - SearchFiles.main(new String[] { "-index", indexDir.getPath(), "-queries", queries.getPath()}); + SearchFiles.main(new String[] {"-query", query}); fakeSystemOut.flush(); String output = bytes.toString(); // intentionally use default encoding - assertTrue(output.contains("1 total matching documents")); + assertTrue("output=" + output, output.contains(expectedHitCount + " total matching documents")); } finally { System.setOut(outSave); } } - - // LUCENE-591 - public void testIndexKeywords() throws Exception { - File dir = getDataFile("test-files/html"); - File indexDir = new File(TEMP_DIR, "demoIndex2"); - IndexHTML.main(new String[] { "-create", "-index", indexDir.getPath(), dir.getPath() }); - File queries = getDataFile("test-files/queries2.txt"); - PrintStream outSave = System.out; - try { - ByteArrayOutputStream bytes = new ByteArrayOutputStream(); - PrintStream fakeSystemOut = new PrintStream(bytes); - System.setOut(fakeSystemOut); - SearchFiles.main(new String[] { "-index", indexDir.getPath(), "-queries", queries.getPath()}); - fakeSystemOut.flush(); - String output = bytes.toString(); // intentionally use default encoding - assertTrue(output.contains("1 total matching documents")); - } finally { - System.setOut(outSave); - } + + public void testIndexSearch() throws Exception { + File dir = getDataFile("test-files/docs"); + IndexFiles.main(new String[] { "-create", "-docs", dir.getPath() }); + testOneSearch("apache", 3); + testOneSearch("patent", 8); + testOneSearch("lucene", 0); + testOneSearch("gnu", 6); + testOneSearch("derivative", 8); + testOneSearch("license", 13); } } diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/apache1.0.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/apache1.0.txt new file mode 100644 index 00000000000..5b1250d8c2a --- /dev/null +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/apache1.0.txt @@ -0,0 +1,56 @@ +/* ==================================================================== + * Copyright (c) 1995-1999 The Apache Group. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the Apache Group + * for use in the Apache HTTP server project (http://www.apache.org/)." + * + * 4. The names "Apache Server" and "Apache Group" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * apache@apache.org. + * + * 5. Products derived from this software may not be called "Apache" + * nor may "Apache" appear in their names without prior written + * permission of the Apache Group. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the Apache Group + * for use in the Apache HTTP server project (http://www.apache.org/)." + * + * THIS SOFTWARE IS PROVIDED BY THE APACHE GROUP ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE APACHE GROUP OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This software consists of voluntary contributions made by many + * individuals on behalf of the Apache Group and was originally based + * on public domain software written at the National Center for + * Supercomputing Applications, University of Illinois, Urbana-Champaign. + * For more information on the Apache Group and the Apache HTTP server + * project, please see . + * + */ diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/apache1.1.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/apache1.1.txt new file mode 100644 index 00000000000..de6706f26e0 --- /dev/null +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/apache1.1.txt @@ -0,0 +1,58 @@ +/* ==================================================================== + * The Apache Software License, Version 1.1 + * + * Copyright (c) 2000 The Apache Software Foundation. All rights + * reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. The end-user documentation included with the redistribution, + * if any, must include the following acknowledgment: + * "This product includes software developed by the + * Apache Software Foundation (http://www.apache.org/)." + * Alternately, this acknowledgment may appear in the software itself, + * if and wherever such third-party acknowledgments normally appear. + * + * 4. The names "Apache" and "Apache Software Foundation" must + * not be used to endorse or promote products derived from this + * software without prior written permission. For written + * permission, please contact apache@apache.org. + * + * 5. Products derived from this software may not be called "Apache", + * nor may "Apache" appear in their name, without prior written + * permission of the Apache Software Foundation. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * ==================================================================== + * + * This software consists of voluntary contributions made by many + * individuals on behalf of the Apache Software Foundation. For more + * information on the Apache Software Foundation, please see + * . + * + * Portions of this software are based upon public domain software + * originally written at the National Center for Supercomputing Applications, + * University of Illinois, Urbana-Champaign. + */ + diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/apache2.0.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/apache2.0.txt new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/apache2.0.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/cpl1.0.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/cpl1.0.txt new file mode 100644 index 00000000000..8e71e98d3be --- /dev/null +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/cpl1.0.txt @@ -0,0 +1,74 @@ +Common Public License - V1.0 + + + +03 Mar 2005 + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS COMMON PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + + 1. DEFINITIONS + + "Contribution" means: + 1. in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and + 2. in the case of each subsequent Contributor: + 1. changes to the Program, and + 2. additions to the Program; + + where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. + + "Contributor" means any person or entity that distributes the Program. + + "Licensed Patents " mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. + + "Program" means the Contributions distributed in accordance with this Agreement. + + "Recipient" means anyone who receives the Program under this Agreement, including all Contributors. + 2. GRANT OF RIGHTS + + 1. Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. + + 2. Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. + + 3. Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. + + 4. Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. + + 3. REQUIREMENTS + + A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: + 1. it complies with the terms and conditions of this Agreement; and + 2. its license agreement: + 1. effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; + 2. effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; + 3. states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and + 4. states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. + + When the Program is made available in source code form: + 1. it must be made available under this Agreement; and + 2. a copy of this Agreement must be included with each copy of the Program. + + Contributors may not remove or alter any copyright notices contained within the Program. + + Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. + 4. COMMERCIAL DISTRIBUTION + + Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. + + For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. + 5. NO WARRANTY + + EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. + 6. DISCLAIMER OF LIABILITY + + EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + 7. GENERAL + + If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + + If Recipient institutes patent litigation against a Contributor with respect to a patent applicable to software (including a cross-claim or counterclaim in a lawsuit), then any patent licenses granted by that Contributor to such Recipient under this Agreement shall terminate as of the date such litigation is filed. In addition, if Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. + + All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. + + Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. IBM is the initial Agreement Steward. IBM may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. + + This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/epl1.0.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/epl1.0.txt new file mode 100644 index 00000000000..f61d34dfe34 --- /dev/null +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/epl1.0.txt @@ -0,0 +1,88 @@ +Eclipse Public License - v 1.0 + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + +a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and +b) in the case of each subsequent Contributor: + +i) changes to the Program, and + +ii) additions to the Program; + +where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. + +"Contributor" means any person or entity that distributes the Program. + +"Licensed Patents " mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. + +"Program" means the Contributions distributed in accordance with this Agreement. + +"Recipient" means anyone who receives the Program under this Agreement, including all Contributors. + +2. GRANT OF RIGHTS + +a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. + +b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. + +c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. + +d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. + +3. REQUIREMENTS + +A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: + +a) it complies with the terms and conditions of this Agreement; and + +b) its license agreement: + +i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; + +ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; + +iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and + +iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. + +When the Program is made available in source code form: + +a) it must be made available under this Agreement; and + +b) a copy of this Agreement must be included with each copy of the Program. + +Contributors may not remove or alter any copyright notices contained within the Program. + +Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. + +This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. + + diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/freebsd.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/freebsd.txt new file mode 100644 index 00000000000..f9c9e496210 --- /dev/null +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/freebsd.txt @@ -0,0 +1,10 @@ +Copyright 1994-2008 The FreeBSD Project. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of the FreeBSD Project. diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/gpl1.0.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/gpl1.0.txt new file mode 100644 index 00000000000..e999b366f6c --- /dev/null +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/gpl1.0.txt @@ -0,0 +1,250 @@ + GNU GENERAL PUBLIC LICENSE + Version 1, February 1989 + + Copyright (C) 1989 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The license agreements of most software companies try to keep users +at the mercy of those companies. By contrast, our General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. The +General Public License applies to the Free Software Foundation's +software and to any other program whose authors commit to using it. +You can use it for your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Specifically, the General Public License is designed to make +sure that you have the freedom to give away or sell copies of free +software, that you receive source code or can get it if you want it, +that you can change the software or use pieces of it in new free +programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of a such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must tell them their rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any program or other work which +contains a notice placed by the copyright holder saying it may be +distributed under the terms of this General Public License. The +"Program", below, refers to any such program or work, and a "work based +on the Program" means either the Program or any work containing the +Program or a portion of it, either verbatim or with modifications. Each +licensee is addressed as "you". + + 1. You may copy and distribute verbatim copies of the Program's source +code as you receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice and +disclaimer of warranty; keep intact all the notices that refer to this +General Public License and to the absence of any warranty; and give any +other recipients of the Program a copy of this General Public License +along with the Program. You may charge a fee for the physical act of +transferring a copy. + + 2. You may modify your copy or copies of the Program or any portion of +it, and copy and distribute such modifications under the terms of Paragraph +1 above, provided that you also do the following: + + a) cause the modified files to carry prominent notices stating that + you changed the files and the date of any change; and + + b) cause the whole of any work that you distribute or publish, that + in whole or in part contains the Program or any part thereof, either + with or without modifications, to be licensed at no charge to all + third parties under the terms of this General Public License (except + that you may choose to grant warranty protection to some or all + third parties, at your option). + + c) If the modified program normally reads commands interactively when + run, you must cause it, when started running for such interactive use + in the simplest and most usual way, to print or display an + announcement including an appropriate copyright notice and a notice + that there is no warranty (or else, saying that you provide a + warranty) and that users may redistribute the program under these + conditions, and telling the user how to view a copy of this General + Public License. + + d) You may charge a fee for the physical act of transferring a + copy, and you may at your option offer warranty protection in + exchange for a fee. + +Mere aggregation of another independent work with the Program (or its +derivative) on a volume of a storage or distribution medium does not bring +the other work under the scope of these terms. + + 3. You may copy and distribute the Program (or a portion or derivative of +it, under Paragraph 2) in object code or executable form under the terms of +Paragraphs 1 and 2 above provided that you also do one of the following: + + a) accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of + Paragraphs 1 and 2 above; or, + + b) accompany it with a written offer, valid for at least three + years, to give any third party free (except for a nominal charge + for the cost of distribution) a complete machine-readable copy of the + corresponding source code, to be distributed under the terms of + Paragraphs 1 and 2 above; or, + + c) accompany it with the information you received as to where the + corresponding source code may be obtained. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form alone.) + +Source code for a work means the preferred form of the work for making +modifications to it. For an executable file, complete source code means +all the source code for all modules it contains; but, as a special +exception, it need not include source code for modules which are standard +libraries that accompany the operating system on which the executable +file runs, or for standard header files or definitions files that +accompany that operating system. + + 4. You may not copy, modify, sublicense, distribute or transfer the +Program except as expressly provided under this General Public License. +Any attempt otherwise to copy, modify, sublicense, distribute or transfer +the Program is void, and will automatically terminate your rights to use +the Program under this License. However, parties who have received +copies, or rights to use copies, from you under this General Public +License will not have their licenses terminated so long as such parties +remain in full compliance. + + 5. By copying, distributing or modifying the Program (or any work based +on the Program) you indicate your acceptance of this license to do so, +and all its terms and conditions. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the original +licensor to copy, distribute or modify the Program subject to these +terms and conditions. You may not impose any further restrictions on the +recipients' exercise of the rights granted herein. + + 7. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of the license which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +the license, you may choose any version ever published by the Free Software +Foundation. + + 8. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 9. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 10. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + Appendix: How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to humanity, the best way to achieve this is to make it +free software which everyone can redistribute and change under these +terms. + + To do so, attach the following notices to the program. It is safest to +attach them to the start of each source file to most effectively convey +the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) 19yy + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 1, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) 19xx name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the +appropriate parts of the General Public License. Of course, the +commands you use may be called something other than `show w' and `show +c'; they could even be mouse-clicks or menu items--whatever suits your +program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + program `Gnomovision' (a program to direct compilers to make passes + at assemblers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +That's all there is to it! diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/gpl2.0.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/gpl2.0.txt new file mode 100644 index 00000000000..d511905c164 --- /dev/null +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/gpl2.0.txt @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/gpl3.0.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/gpl3.0.txt new file mode 100644 index 00000000000..94a9ed024d3 --- /dev/null +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/gpl3.0.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/lgpl2.1.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/lgpl2.1.txt new file mode 100644 index 00000000000..5ab7695ab8c --- /dev/null +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/lgpl2.1.txt @@ -0,0 +1,504 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + + diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/lgpl3.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/lgpl3.txt new file mode 100644 index 00000000000..fc8a5de7edf --- /dev/null +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/lgpl3.txt @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/lpgl2.0.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/lpgl2.0.txt new file mode 100644 index 00000000000..b6bd5176305 --- /dev/null +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/lpgl2.0.txt @@ -0,0 +1,481 @@ + GNU LIBRARY GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1991 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the library GPL. It is + numbered 2 because it goes with version 2 of the ordinary GPL.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Library General Public License, applies to some +specially designated Free Software Foundation software, and to any +other libraries whose authors decide to use it. You can use it for +your libraries, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if +you distribute copies of the library, or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link a program with the library, you must provide +complete object files to the recipients so that they can relink them +with the library, after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + Our method of protecting your rights has two steps: (1) copyright +the library, and (2) offer you this license which gives you legal +permission to copy, distribute and/or modify the library. + + Also, for each distributor's protection, we want to make certain +that everyone understands that there is no warranty for this free +library. If the library is modified by someone else and passed on, we +want its recipients to know that what they have is not the original +version, so that any problems introduced by others will not reflect on +the original authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that companies distributing free +software will individually obtain patent licenses, thus in effect +transforming the program into proprietary software. To prevent this, +we have made it clear that any patent must be licensed for everyone's +free use or not licensed at all. + + Most GNU software, including some libraries, is covered by the ordinary +GNU General Public License, which was designed for utility programs. This +license, the GNU Library General Public License, applies to certain +designated libraries. This license is quite different from the ordinary +one; be sure to read it in full, and don't assume that anything in it is +the same as in the ordinary license. + + The reason we have a separate public license for some libraries is that +they blur the distinction we usually make between modifying or adding to a +program and simply using it. Linking a program with a library, without +changing the library, is in some sense simply using the library, and is +analogous to running a utility program or application program. However, in +a textual and legal sense, the linked executable is a combined work, a +derivative of the original library, and the ordinary General Public License +treats it as such. + + Because of this blurred distinction, using the ordinary General +Public License for libraries did not effectively promote software +sharing, because most developers did not use the libraries. We +concluded that weaker conditions might promote sharing better. + + However, unrestricted linking of non-free programs would deprive the +users of those programs of all benefit from the free status of the +libraries themselves. This Library General Public License is intended to +permit developers of non-free programs to use free libraries, while +preserving your freedom as a user of such programs to change the free +libraries that are incorporated in them. (We have not seen how to achieve +this as regards changes in header files, but we have achieved it as regards +changes in the actual functions of the Library.) The hope is that this +will lead to faster development of free libraries. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, while the latter only +works together with the library. + + Note that it is possible for a library to be covered by the ordinary +General Public License rather than by this special one. + + GNU LIBRARY GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library which +contains a notice placed by the copyright holder or other authorized +party saying it may be distributed under the terms of this Library +General Public License (also called "this License"). Each licensee is +addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also compile or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + c) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + d) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the source code distributed need not include anything that is normally +distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Library General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with this library; if not, write to the Free + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/mit.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/mit.txt new file mode 100644 index 00000000000..a45fad6d653 --- /dev/null +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/mit.txt @@ -0,0 +1,22 @@ +Copyright (c) + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/mozilla1.1.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/mozilla1.1.txt new file mode 100644 index 00000000000..7714141d154 --- /dev/null +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/mozilla1.1.txt @@ -0,0 +1,470 @@ + MOZILLA PUBLIC LICENSE + Version 1.1 + + --------------- + +1. Definitions. + + 1.0.1. "Commercial Use" means distribution or otherwise making the + Covered Code available to a third party. + + 1.1. "Contributor" means each entity that creates or contributes to + the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the Original + Code, prior Modifications used by a Contributor, and the Modifications + made by that particular Contributor. + + 1.3. "Covered Code" means the Original Code or Modifications or the + combination of the Original Code and Modifications, in each case + including portions thereof. + + 1.4. "Electronic Distribution Mechanism" means a mechanism generally + accepted in the software development community for the electronic + transfer of data. + + 1.5. "Executable" means Covered Code in any form other than Source + Code. + + 1.6. "Initial Developer" means the individual or entity identified + as the Initial Developer in the Source Code notice required by Exhibit + A. + + 1.7. "Larger Work" means a work which combines Covered Code or + portions thereof with code not governed by the terms of this License. + + 1.8. "License" means this document. + + 1.8.1. "Licensable" means having the right to grant, to the maximum + extent possible, whether at the time of the initial grant or + subsequently acquired, any and all of the rights conveyed herein. + + 1.9. "Modifications" means any addition to or deletion from the + substance or structure of either the Original Code or any previous + Modifications. When Covered Code is released as a series of files, a + Modification is: + A. Any addition to or deletion from the contents of a file + containing Original Code or previous Modifications. + + B. Any new file that contains any part of the Original Code or + previous Modifications. + + 1.10. "Original Code" means Source Code of computer software code + which is described in the Source Code notice required by Exhibit A as + Original Code, and which, at the time of its release under this + License is not already Covered Code governed by this License. + + 1.10.1. "Patent Claims" means any patent claim(s), now owned or + hereafter acquired, including without limitation, method, process, + and apparatus claims, in any patent Licensable by grantor. + + 1.11. "Source Code" means the preferred form of the Covered Code for + making modifications to it, including all modules it contains, plus + any associated interface definition files, scripts used to control + compilation and installation of an Executable, or source code + differential comparisons against either the Original Code or another + well known, available Covered Code of the Contributor's choice. The + Source Code can be in a compressed or archival form, provided the + appropriate decompression or de-archiving software is widely available + for no charge. + + 1.12. "You" (or "Your") means an individual or a legal entity + exercising rights under, and complying with all of the terms of, this + License or a future version of this License issued under Section 6.1. + For legal entities, "You" includes any entity which controls, is + controlled by, or is under common control with You. For purposes of + this definition, "control" means (a) the power, direct or indirect, + to cause the direction or management of such entity, whether by + contract or otherwise, or (b) ownership of more than fifty percent + (50%) of the outstanding shares or beneficial ownership of such + entity. + +2. Source Code License. + + 2.1. The Initial Developer Grant. + The Initial Developer hereby grants You a world-wide, royalty-free, + non-exclusive license, subject to third party intellectual property + claims: + (a) under intellectual property rights (other than patent or + trademark) Licensable by Initial Developer to use, reproduce, + modify, display, perform, sublicense and distribute the Original + Code (or portions thereof) with or without Modifications, and/or + as part of a Larger Work; and + + (b) under Patents Claims infringed by the making, using or + selling of Original Code, to make, have made, use, practice, + sell, and offer for sale, and/or otherwise dispose of the + Original Code (or portions thereof). + + (c) the licenses granted in this Section 2.1(a) and (b) are + effective on the date Initial Developer first distributes + Original Code under the terms of this License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is + granted: 1) for code that You delete from the Original Code; 2) + separate from the Original Code; or 3) for infringements caused + by: i) the modification of the Original Code or ii) the + combination of the Original Code with other software or devices. + + 2.2. Contributor Grant. + Subject to third party intellectual property claims, each Contributor + hereby grants You a world-wide, royalty-free, non-exclusive license + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Contributor, to use, reproduce, modify, + display, perform, sublicense and distribute the Modifications + created by such Contributor (or portions thereof) either on an + unmodified basis, with other Modifications, as Covered Code + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using, or + selling of Modifications made by that Contributor either alone + and/or in combination with its Contributor Version (or portions + of such combination), to make, use, sell, offer for sale, have + made, and/or otherwise dispose of: 1) Modifications made by that + Contributor (or portions thereof); and 2) the combination of + Modifications made by that Contributor with its Contributor + Version (or portions of such combination). + + (c) the licenses granted in Sections 2.2(a) and 2.2(b) are + effective on the date Contributor first makes Commercial Use of + the Covered Code. + + (d) Notwithstanding Section 2.2(b) above, no patent license is + granted: 1) for any code that Contributor has deleted from the + Contributor Version; 2) separate from the Contributor Version; + 3) for infringements caused by: i) third party modifications of + Contributor Version or ii) the combination of Modifications made + by that Contributor with other software (except as part of the + Contributor Version) or other devices; or 4) under Patent Claims + infringed by Covered Code in the absence of Modifications made by + that Contributor. + +3. Distribution Obligations. + + 3.1. Application of License. + The Modifications which You create or to which You contribute are + governed by the terms of this License, including without limitation + Section 2.2. The Source Code version of Covered Code may be + distributed only under the terms of this License or a future version + of this License released under Section 6.1, and You must include a + copy of this License with every copy of the Source Code You + distribute. You may not offer or impose any terms on any Source Code + version that alters or restricts the applicable version of this + License or the recipients' rights hereunder. However, You may include + an additional document offering the additional rights described in + Section 3.5. + + 3.2. Availability of Source Code. + Any Modification which You create or to which You contribute must be + made available in Source Code form under the terms of this License + either on the same media as an Executable version or via an accepted + Electronic Distribution Mechanism to anyone to whom you made an + Executable version available; and if made available via Electronic + Distribution Mechanism, must remain available for at least twelve (12) + months after the date it initially became available, or at least six + (6) months after a subsequent version of that particular Modification + has been made available to such recipients. You are responsible for + ensuring that the Source Code version remains available even if the + Electronic Distribution Mechanism is maintained by a third party. + + 3.3. Description of Modifications. + You must cause all Covered Code to which You contribute to contain a + file documenting the changes You made to create that Covered Code and + the date of any change. You must include a prominent statement that + the Modification is derived, directly or indirectly, from Original + Code provided by the Initial Developer and including the name of the + Initial Developer in (a) the Source Code, and (b) in any notice in an + Executable version or related documentation in which You describe the + origin or ownership of the Covered Code. + + 3.4. Intellectual Property Matters + (a) Third Party Claims. + If Contributor has knowledge that a license under a third party's + intellectual property rights is required to exercise the rights + granted by such Contributor under Sections 2.1 or 2.2, + Contributor must include a text file with the Source Code + distribution titled "LEGAL" which describes the claim and the + party making the claim in sufficient detail that a recipient will + know whom to contact. If Contributor obtains such knowledge after + the Modification is made available as described in Section 3.2, + Contributor shall promptly modify the LEGAL file in all copies + Contributor makes available thereafter and shall take other steps + (such as notifying appropriate mailing lists or newsgroups) + reasonably calculated to inform those who received the Covered + Code that new knowledge has been obtained. + + (b) Contributor APIs. + If Contributor's Modifications include an application programming + interface and Contributor has knowledge of patent licenses which + are reasonably necessary to implement that API, Contributor must + also include this information in the LEGAL file. + + (c) Representations. + Contributor represents that, except as disclosed pursuant to + Section 3.4(a) above, Contributor believes that Contributor's + Modifications are Contributor's original creation(s) and/or + Contributor has sufficient rights to grant the rights conveyed by + this License. + + 3.5. Required Notices. + You must duplicate the notice in Exhibit A in each file of the Source + Code. If it is not possible to put such notice in a particular Source + Code file due to its structure, then You must include such notice in a + location (such as a relevant directory) where a user would be likely + to look for such a notice. If You created one or more Modification(s) + You may add your name as a Contributor to the notice described in + Exhibit A. You must also duplicate this License in any documentation + for the Source Code where You describe recipients' rights or ownership + rights relating to Covered Code. You may choose to offer, and to + charge a fee for, warranty, support, indemnity or liability + obligations to one or more recipients of Covered Code. However, You + may do so only on Your own behalf, and not on behalf of the Initial + Developer or any Contributor. You must make it absolutely clear than + any such warranty, support, indemnity or liability obligation is + offered by You alone, and You hereby agree to indemnify the Initial + Developer and every Contributor for any liability incurred by the + Initial Developer or such Contributor as a result of warranty, + support, indemnity or liability terms You offer. + + 3.6. Distribution of Executable Versions. + You may distribute Covered Code in Executable form only if the + requirements of Section 3.1-3.5 have been met for that Covered Code, + and if You include a notice stating that the Source Code version of + the Covered Code is available under the terms of this License, + including a description of how and where You have fulfilled the + obligations of Section 3.2. The notice must be conspicuously included + in any notice in an Executable version, related documentation or + collateral in which You describe recipients' rights relating to the + Covered Code. You may distribute the Executable version of Covered + Code or ownership rights under a license of Your choice, which may + contain terms different from this License, provided that You are in + compliance with the terms of this License and that the license for the + Executable version does not attempt to limit or alter the recipient's + rights in the Source Code version from the rights set forth in this + License. If You distribute the Executable version under a different + license You must make it absolutely clear that any terms which differ + from this License are offered by You alone, not by the Initial + Developer or any Contributor. You hereby agree to indemnify the + Initial Developer and every Contributor for any liability incurred by + the Initial Developer or such Contributor as a result of any such + terms You offer. + + 3.7. Larger Works. + You may create a Larger Work by combining Covered Code with other code + not governed by the terms of this License and distribute the Larger + Work as a single product. In such a case, You must make sure the + requirements of this License are fulfilled for the Covered Code. + +4. Inability to Comply Due to Statute or Regulation. + + If it is impossible for You to comply with any of the terms of this + License with respect to some or all of the Covered Code due to + statute, judicial order, or regulation then You must: (a) comply with + the terms of this License to the maximum extent possible; and (b) + describe the limitations and the code they affect. Such description + must be included in the LEGAL file described in Section 3.4 and must + be included with all distributions of the Source Code. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Application of this License. + + This License applies to code to which the Initial Developer has + attached the notice in Exhibit A and to related Covered Code. + +6. Versions of the License. + + 6.1. New Versions. + Netscape Communications Corporation ("Netscape") may publish revised + and/or new versions of the License from time to time. Each version + will be given a distinguishing version number. + + 6.2. Effect of New Versions. + Once Covered Code has been published under a particular version of the + License, You may always continue to use it under the terms of that + version. You may also choose to use such Covered Code under the terms + of any subsequent version of the License published by Netscape. No one + other than Netscape has the right to modify the terms applicable to + Covered Code created under this License. + + 6.3. Derivative Works. + If You create or use a modified version of this License (which you may + only do in order to apply it to code which is not already Covered Code + governed by this License), You must (a) rename Your license so that + the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape", + "MPL", "NPL" or any confusingly similar phrase do not appear in your + license (except to note that your license differs from this License) + and (b) otherwise make it clear that Your version of the license + contains terms which differ from the Mozilla Public License and + Netscape Public License. (Filling in the name of the Initial + Developer, Original Code or Contributor in the notice described in + Exhibit A shall not of themselves be deemed to be modifications of + this License.) + +7. DISCLAIMER OF WARRANTY. + + COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, + WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF + DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. + THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE + IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT, + YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE + COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER + OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF + ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. + +8. TERMINATION. + + 8.1. This License and the rights granted hereunder will terminate + automatically if You fail to comply with terms herein and fail to cure + such breach within 30 days of becoming aware of the breach. All + sublicenses to the Covered Code which are properly granted shall + survive any termination of this License. Provisions which, by their + nature, must remain in effect beyond the termination of this License + shall survive. + + 8.2. If You initiate litigation by asserting a patent infringement + claim (excluding declatory judgment actions) against Initial Developer + or a Contributor (the Initial Developer or Contributor against whom + You file such action is referred to as "Participant") alleging that: + + (a) such Participant's Contributor Version directly or indirectly + infringes any patent, then any and all rights granted by such + Participant to You under Sections 2.1 and/or 2.2 of this License + shall, upon 60 days notice from Participant terminate prospectively, + unless if within 60 days after receipt of notice You either: (i) + agree in writing to pay Participant a mutually agreeable reasonable + royalty for Your past and future use of Modifications made by such + Participant, or (ii) withdraw Your litigation claim with respect to + the Contributor Version against such Participant. If within 60 days + of notice, a reasonable royalty and payment arrangement are not + mutually agreed upon in writing by the parties or the litigation claim + is not withdrawn, the rights granted by Participant to You under + Sections 2.1 and/or 2.2 automatically terminate at the expiration of + the 60 day notice period specified above. + + (b) any software, hardware, or device, other than such Participant's + Contributor Version, directly or indirectly infringes any patent, then + any rights granted to You by such Participant under Sections 2.1(b) + and 2.2(b) are revoked effective as of the date You first made, used, + sold, distributed, or had made, Modifications made by that + Participant. + + 8.3. If You assert a patent infringement claim against Participant + alleging that such Participant's Contributor Version directly or + indirectly infringes any patent where such claim is resolved (such as + by license or settlement) prior to the initiation of patent + infringement litigation, then the reasonable value of the licenses + granted by such Participant under Sections 2.1 or 2.2 shall be taken + into account in determining the amount or value of any payment or + license. + + 8.4. In the event of termination under Sections 8.1 or 8.2 above, + all end user license agreements (excluding distributors and resellers) + which have been validly granted by You or any distributor hereunder + prior to termination shall survive termination. + +9. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL + DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE, + OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR + ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY + CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, + WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER + COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN + INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF + LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY + RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW + PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE + EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO + THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. + +10. U.S. GOVERNMENT END USERS. + + The Covered Code is a "commercial item," as that term is defined in + 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer + software" and "commercial computer software documentation," as such + terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 + C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), + all U.S. Government End Users acquire Covered Code with only those + rights set forth herein. + +11. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. This License shall be governed by + California law provisions (except to the extent applicable law, if + any, provides otherwise), excluding its conflict-of-law provisions. + With respect to disputes in which at least one party is a citizen of, + or an entity chartered or registered to do business in the United + States of America, any litigation relating to this License shall be + subject to the jurisdiction of the Federal Courts of the Northern + District of California, with venue lying in Santa Clara County, + California, with the losing party responsible for costs, including + without limitation, court costs and reasonable attorneys' fees and + expenses. The application of the United Nations Convention on + Contracts for the International Sale of Goods is expressly excluded. + Any law or regulation which provides that the language of a contract + shall be construed against the drafter shall not apply to this + License. + +12. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or indirectly, + out of its utilization of rights under this License and You agree to + work with Initial Developer and Contributors to distribute such + responsibility on an equitable basis. Nothing herein is intended or + shall be deemed to constitute any admission of liability. + +13. MULTIPLE-LICENSED CODE. + + Initial Developer may designate portions of the Covered Code as + "Multiple-Licensed". "Multiple-Licensed" means that the Initial + Developer permits you to utilize portions of the Covered Code under + Your choice of the NPL or the alternative licenses, if any, specified + by the Initial Developer in the file described in Exhibit A. + +EXHIBIT A -Mozilla Public License. + + ``The contents of this file are subject to the Mozilla Public License + Version 1.1 (the "License"); you may not use this file except in + compliance with the License. You may obtain a copy of the License at + http://www.mozilla.org/MPL/ + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the + License for the specific language governing rights and limitations + under the License. + + The Original Code is ______________________________________. + + The Initial Developer of the Original Code is ________________________. + Portions created by ______________________ are Copyright (C) ______ + _______________________. All Rights Reserved. + + Contributor(s): ______________________________________. + + Alternatively, the contents of this file may be used under the terms + of the _____ license (the "[___] License"), in which case the + provisions of [______] License are applicable instead of those + above. If you wish to allow use of your version of this file only + under the terms of the [____] License and not to allow others to use + your version of this file under the MPL, indicate your decision by + deleting the provisions above and replace them with the notice and + other provisions required by the [___] License. If you do not delete + the provisions above, a recipient may use your version of this file + under either the MPL or the [___] License." + + [NOTE: The text of this Exhibit A may differ slightly from the text of + the notices in the Source Code files of the Original Code. You should + use the text of this Exhibit A rather than the text found in the + Original Code Source Code for Your Modifications.] + diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/mozilla_eula_firefox3.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/mozilla_eula_firefox3.txt new file mode 100644 index 00000000000..414afd7b324 --- /dev/null +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/mozilla_eula_firefox3.txt @@ -0,0 +1,29 @@ +MOZILLA FIREFOX END-USER SOFTWARE LICENSE AGREEMENT + +Version 3.0, May 2008 + +A SOURCE CODE VERSION OF CERTAIN FIREFOX BROWSER FUNCTIONALITY THAT YOU MAY USE, MODIFY AND DISTRIBUTE IS AVAILABLE TO YOU FREE-OF-CHARGE FROM WWW.MOZILLA.ORG UNDER THE MOZILLA PUBLIC LICENSE and other open source software licenses. + +The accompanying executable code version of Mozilla Firefox and related documentation (the "Product") is made available to you under the terms of this MOZILLA FIREFOX END-USER SOFTWARE LICENSE AGREEMENT (THE "AGREEMENT"). BY CLICKING THE "ACCEPT" BUTTON, OR BY INSTALLING OR USING THE MOZILLA FIREFOX BROWSER, YOU ARE CONSENTING TO BE BOUND BY THE AGREEMENT. IF YOU DO NOT AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT, DO NOT CLICK THE "ACCEPT" BUTTON, AND DO NOT INSTALL OR USE ANY PART OF THE MOZILLA FIREFOX BROWSER. + +DURING THE MOZILLA FIREFOX INSTALLATION PROCESS, AND AT LATER TIMES, YOU MAY BE GIVEN THE OPTION OF INSTALLING ADDITIONAL COMPONENTS FROM THIRD-PARTY SOFTWARE PROVIDERS. THE INSTALLATION AND USE OF THOSE THIRD-PARTY COMPONENTS MAY BE GOVERNED BY ADDITIONAL LICENSE AGREEMENTS. + +1. LICENSE GRANT. The Mozilla Corporation grants you a non-exclusive license to use the executable code version of the Product. This Agreement will also govern any software upgrades provided by Mozilla that replace and/or supplement the original Product, unless such upgrades are accompanied by a separate license, in which case the terms of that license will govern. + +2. TERMINATION. If you breach this Agreement your right to use the Product will terminate immediately and without notice, but all provisions of this Agreement except the License Grant (Paragraph 1) will survive termination and continue in effect. Upon termination, you must destroy all copies of the Product. + +3. PROPRIETARY RIGHTS. Portions of the Product are available in source code form under the terms of the Mozilla Public License and other open source licenses (collectively, "Open Source Licenses") at http://www.mozilla.org/MPL. Nothing in this Agreement will be construed to limit any rights granted under the Open Source Licenses. Subject to the foregoing, Mozilla, for itself and on behalf of its licensors, hereby reserves all intellectual property rights in the Product, except for the rights expressly granted in this Agreement. You may not remove or alter any trademark, logo, copyright or other proprietary notice in or on the Product. This license does not grant you any right to use the trademarks, service marks or logos of Mozilla or its licensors. + +4. PRIVACY POLICY. You agree to the Mozilla Firefox Privacy Policy, made available online at http://www.mozilla.com/legal/privacy/, as that policy may be changed from time to time. When Mozilla changes the policy in a material way a notice will be posted on the website at www.mozilla.com and when any change is made in the privacy policy, the updated policy will be posted at the above link. It is your responsibility to ensure that you understand the terms of the privacy policy, so you should periodically check the current version of the policy for changes. + +5. WEBSITE INFORMATION SERVICES. Mozilla and its contributors, licensors and partners work to provide the most accurate and up-to-date phishing and malware information. However, they cannot guarantee that this information is comprehensive and error-free: some risky sites may not be identified, and some safe sites may be identified in error. + +6. DISCLAIMER OF WARRANTY. THE PRODUCT IS PROVIDED "AS IS" WITH ALL FAULTS. TO THE EXTENT PERMITTED BY LAW, MOZILLA AND MOZILLA'S DISTRIBUTORS, AND LICENSORS HEREBY DISCLAIM ALL WARRANTIES, WHETHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION WARRANTIES THAT THE PRODUCT IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE AND NON-INFRINGING. YOU BEAR THE ENTIRE RISK AS TO SELECTING THE PRODUCT FOR YOUR PURPOSES AND AS TO THE QUALITY AND PERFORMANCE OF THE PRODUCT. THIS LIMITATION WILL APPLY NOTWITHSTANDING THE FAILURE OF ESSENTIAL PURPOSE OF ANY REMEDY. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF IMPLIED WARRANTIES, SO THIS DISCLAIMER MAY NOT APPLY TO YOU. + +7. LIMITATION OF LIABILITY. EXCEPT AS REQUIRED BY LAW, MOZILLA AND ITS DISTRIBUTORS, DIRECTORS, LICENSORS, CONTRIBUTORS AND AGENTS (COLLECTIVELY, THE "MOZILLA GROUP") WILL NOT BE LIABLE FOR ANY INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL OR EXEMPLARY DAMAGES ARISING OUT OF OR IN ANY WAY RELATING TO THIS AGREEMENT OR THE USE OF OR INABILITY TO USE THE PRODUCT, INCLUDING WITHOUT LIMITATION DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, LOST PROFITS, LOSS OF DATA, AND COMPUTER FAILURE OR MALFUNCTION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES AND REGARDLESS OF THE THEORY (CONTRACT, TORT OR OTHERWISE) UPON WHICH SUCH CLAIM IS BASED. THE MOZILLA GROUP'S COLLECTIVE LIABILITY UNDER THIS AGREEMENT WILL NOT EXCEED THE GREATER OF $500 (FIVE HUNDRED DOLLARS) AND THE FEES PAID BY YOU UNDER THE LICENSE (IF ANY). SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL, CONSEQUENTIAL OR SPECIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. + +8. EXPORT CONTROLS. This license is subject to all applicable export restrictions. You must comply with all export and import laws and restrictions and regulations of any United States or foreign agency or authority relating to the Product and its use. + +9. U.S. GOVERNMENT END-USERS. This Product is a "commercial item," as that term is defined in 48 C.F.R. 2.101, consisting of "commercial computer software" and "commercial computer software documentation," as such terms are used in 48 C.F.R. 12.212 (Sept. 1995) and 48 C.F.R. 227.7202 (June 1995). Consistent with 48 C.F.R. 12.212, 48 C.F.R. 27.405(b)(2) (June 1998) and 48 C.F.R. 227.7202, all U.S. Government End Users acquire the Product with only those rights as set forth therein. + +10. MISCELLANEOUS. (a) This Agreement constitutes the entire agreement between Mozilla and you concerning the subject matter hereof, and it may only be modified by a written amendment signed by an authorized executive of Mozilla. (b) Except to the extent applicable law, if any, provides otherwise, this Agreement will be governed by the laws of the state of California, U.S.A., excluding its conflict of law provisions. (c) This Agreement will not be governed by the United Nations Convention on Contracts for the International Sale of Goods. (d) If any part of this Agreement is held invalid or unenforceable, that part will be construed to reflect the parties' original intent, and the remaining portions will remain in full force and effect. (e) A waiver by either party of any term or condition of this Agreement or any breach thereof, in any one instance, will not waive such term or condition or any subsequent breach thereof. (f) Except as required by law, the controlling language of this Agreement is English. (g) You may assign your rights under this Agreement to any party that consents to, and agrees to be bound by, its terms; the Mozilla Corporation may assign its rights under this Agreement without condition. (h) This Agreement will be binding upon and inure to the benefit of the parties, their successors and permitted assigns. diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/mozilla_eula_thunderbird2.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/mozilla_eula_thunderbird2.txt new file mode 100644 index 00000000000..4993e4a3606 --- /dev/null +++ b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/docs/mozilla_eula_thunderbird2.txt @@ -0,0 +1,27 @@ +MOZILLA THUNDERBIRD END-USER SOFTWARE LICENSE AGREEMENT + +Version 2.0 + +A SOURCE CODE VERSION OF CERTAIN THUNDERBIRD E-MAIL FUNCTIONALITY THAT YOU MAY USE, MODIFY AND DISTRIBUTE IS AVAILABLE TO YOU FREE-OF-CHARGE FROM WWW.MOZILLA.ORG UNDER THE MOZILLA PUBLIC LICENSE and other open source software licenses. + +The accompanying executable code version of Mozilla Thunderbird and related documentation (the "Product") is made available to you under the terms of this MOZILLA THUNDERBIRD END-USER SOFTWARE LICENSE AGREEMENT (THE "AGREEMENT"). BY CLICKING THE "ACCEPT" BUTTON, OR BY INSTALLING OR USING THE MOZILLA THUNDERBIRD E-MAIL CLIENT, YOU ARE CONSENTING TO BE BOUND BY THE AGREEMENT. IF YOU DO NOT AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT, DO NOT CLICK THE "ACCEPT" BUTTON, AND DO NOT INSTALL OR USE ANY PART OF THE MOZILLA THUNDERBIRD E-MAIL CLIENT. + +DURING THE MOZILLA THUNDERBIRD INSTALLATION PROCESS, AND AT LATER TIMES, YOU MAY BE GIVEN THE OPTION OF INSTALLING ADDITIONAL COMPONENTS FROM THIRD-PARTY SOFTWARE PROVIDERS. THE INSTALLATION AND USE OF THOSE THIRD-PARTY COMPONENTS MAY BE GOVERNED BY ADDITIONAL LICENSE AGREEMENTS. + +1. LICENSE GRANT. The Mozilla Corporation grants you a non-exclusive license to use the executable code version of the Product. This Agreement will also govern any software upgrades provided by Mozilla that replace and/or supplement the original Product, unless such upgrades are accompanied by a separate license, in which case the terms of that license will govern. + +2. TERMINATION. If you breach this Agreement your right to use the Product will terminate immediately and without notice, but all provisions of this Agreement except the License Grant (Paragraph 1) will survive termination and continue in effect. Upon termination, you must destroy all copies of the Product. + +3. PROPRIETARY RIGHTS. Portions of the Product are available in source code form under the terms of the Mozilla Public License and other open source licenses (collectively, "Open Source Licenses") at mozilla.org. Nothing in this Agreement will be construed to limit any rights granted under the Open Source Licenses. Subject to the foregoing, Mozilla, for itself and on behalf of its licensors, hereby reserves all intellectual property rights in the Product, except for the rights expressly granted in this Agreement. You may not remove or alter any trademark, logo, copyright or other proprietary notice in or on the Product. This license does not grant you any right to use the trademarks, service marks or logos of Mozilla or its licensors. + +4. PRIVACY POLICY. You agree to the Mozilla Privacy Policy, made available online at http://www.mozilla.com/legal/privacy/, as that policy may be changed from time to time, including a separate privacy policy for Thunderbird. When Mozilla changes the policy in a material way a notice will be posted on the website at www.mozilla.com, and when any change is made in the privacy policy, the updated policy will be posted at the above link. It is your responsibility to ensure that you understand the terms of the privacy policy, so you should periodically check the current version of the policy for changes. + +5. DISCLAIMER OF WARRANTY. THE PRODUCT IS PROVIDED "AS IS" WITH ALL FAULTS. TO THE EXTENT PERMITTED BY LAW, MOZILLA AND MOZILLA'S DISTRIBUTORS, AND LICENSORS HEREBY DISCLAIM ALL WARRANTIES, WHETHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION WARRANTIES THAT THE PRODUCT IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE AND NON-INFRINGING. YOU BEAR THE ENTIRE RISK AS TO SELECTING THE PRODUCT FOR YOUR PURPOSES AND AS TO THE QUALITY AND PERFORMANCE OF THE PRODUCT. THIS LIMITATION WILL APPLY NOTWITHSTANDING THE FAILURE OF ESSENTIAL PURPOSE OF ANY REMEDY. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF IMPLIED WARRANTIES, SO THIS DISCLAIMER MAY NOT APPLY TO YOU. + +6. LIMITATION OF LIABILITY. EXCEPT AS REQUIRED BY LAW, MOZILLA AND ITS DISTRIBUTORS, DIRECTORS, LICENSORS, CONTRIBUTORS AND AGENTS (COLLECTIVELY, THE "MOZILLA GROUP") WILL NOT BE LIABLE FOR ANY INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL OR EXEMPLARY DAMAGES ARISING OUT OF OR IN ANY WAY RELATING TO THIS AGREEMENT OR THE USE OF OR INABILITY TO USE THE PRODUCT, INCLUDING WITHOUT LIMITATION DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, LOST PROFITS, LOSS OF DATA, AND COMPUTER FAILURE OR MALFUNCTION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES AND REGARDLESS OF THE THEORY (CONTRACT, TORT OR OTHERWISE) UPON WHICH SUCH CLAIM IS BASED. THE MOZILLA GROUP'S COLLECTIVE LIABILITY UNDER THIS AGREEMENT WILL NOT EXCEED THE GREATER OF $500 (FIVE HUNDRED DOLLARS) AND THE FEES PAID BY YOU UNDER THE LICENSE (IF ANY). SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL, CONSEQUENTIAL OR SPECIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. + +7. EXPORT CONTROLS. This license is subject to all applicable export restrictions. You must comply with all export and import laws and restrictions and regulations of any United States or foreign agency or authority relating to the Product and its use. + +8. U.S. GOVERNMENT END-USERS. This Product is a "commercial item," as that term is defined in 48 C.F.R. 2.101, consisting of "commercial computer software" and "commercial computer software documentation," as such terms are used in 48 C.F.R. 12.212 (Sept. 1995) and 48 C.F.R. 227.7202 (June 1995). Consistent with 48 C.F.R. 12.212, 48 C.F.R. 27.405(b)(2) (June 1998) and 48 C.F.R. 227.7202, all U.S. Government End Users acquire the Product with only those rights as set forth therein. + +9. MISCELLANEOUS. (a) This Agreement constitutes the entire agreement between Mozilla and you concerning the subject matter hereof, and it may only be modified by a written amendment signed by an authorized executive of Mozilla. (b) Except to the extent applicable law, if any, provides otherwise, this Agreement will be governed by the laws of the state of California, U.S.A., excluding its conflict of law provisions. (c) This Agreement will not be governed by the United Nations Convention on Contracts for the International Sale of Goods. (d) If any part of this Agreement is held invalid or unenforceable, that part will be construed to reflect the parties' original intent, and the remaining portions will remain in full force and effect. (e) A waiver by either party of any term or condition of this Agreement or any breach thereof, in any one instance, will not waive such term or condition or any subsequent breach thereof. (f) Except as required by law, the controlling language of this Agreement is English. (g) You may assign your rights under this Agreement to any party that consents to, and agrees to be bound by, its terms; the Mozilla Corporation may assign its rights under this Agreement without condition. (h) This Agreement will be binding upon and inure to the benefit of the parties, their successors and permitted assigns. diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/html/test1.html b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/html/test1.html deleted file mode 100644 index 46b0ea20322..00000000000 --- a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/html/test1.html +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - 汉语 - - diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/html/test2.html b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/html/test2.html deleted file mode 100644 index cddd3effa16..00000000000 --- a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/html/test2.html +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - This document is actually not about cats! - - diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/queries.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/queries.txt deleted file mode 100644 index 23e0cd21035..00000000000 --- a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/queries.txt +++ /dev/null @@ -1 +0,0 @@ -contents:汉语 diff --git a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/queries2.txt b/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/queries2.txt deleted file mode 100644 index 86cc85730b7..00000000000 --- a/lucene/contrib/demo/src/test/org/apache/lucene/demo/test-files/queries2.txt +++ /dev/null @@ -1 +0,0 @@ -+contents:dogs +contents:fish diff --git a/lucene/docs/contributions.html b/lucene/docs/contributions.html index 8760c889010..c00555d4627 100644 --- a/lucene/docs/contributions.html +++ b/lucene/docs/contributions.html @@ -129,11 +129,8 @@ document.write("Last Published: " + document.lastModified); - - -
      • -About the Demos +About the Demo
      • Setting your CLASSPATH @@ -269,12 +269,12 @@ It walks you through some basic installation and configuration. - -

        About the Demos

        + +

        About the Demo

        -The Lucene command-line demo code consists of two applications that demonstrate various -functionalities of Lucene and how one should go about adding Lucene to their applications. +The Lucene command-line demo code consists of an application that demonstrates various +functionalities of Lucene and how you can add Lucene to your applications.

        @@ -285,19 +285,38 @@ functionalities of Lucene and how one should go about adding Lucene to their app

        First, you should download the latest Lucene distribution and then extract it to a working directory. Alternatively, you can check out the sources from -Subversion, and then run ant war-demo to generate the JARs and WARs. +Subversion, and then run ant in the lucene/contrib/demo/ +directory to generate the JARs.

        -You should see the Lucene JAR file in the directory you created when you extracted the archive. It -should be named something like lucene-core-{version}.jar. You should also see a file -called lucene-demos-{version}.jar. If you checked out the sources from Subversion then -the JARs are located under the build subdirectory (after running ant -successfully). Put both of these files in your Java CLASSPATH. +You need three JARs: the Lucene JAR, the common analysis JAR, and the Lucene demo JAR. You should +see the Lucene JAR file in the directory you created when you extracted the archive -- it +should be named something like lucene-core-{version}.jar. You should also see files +called lucene-analysis-common-{version}.jar and lucene-demos-{version}.jar. +If you checked out the sources from Subversion then the JARs are located at: +

        +
          + +
        • +lucene/build/lucene-core-4.0-SNAPSHOT.jar +
        • + +
        • +modules/analysis/build/common/lucene-analyzers-common-4.0-SNAPSHOT.jar +
        • + +
        • +lucene/build/contrib/demo/lucene-demo-4.0-SNAPSHOT.jar +
        • + +
        +

        +Put all three of these files in your Java CLASSPATH.

        - +

        Indexing Files

        @@ -326,7 +345,7 @@ you whether you want more results.

        - +

        About the code...

        diff --git a/lucene/docs/demo.pdf b/lucene/docs/demo.pdf index 9f3a762e3eb..729f0a3f3fd 100644 --- a/lucene/docs/demo.pdf +++ b/lucene/docs/demo.pdf @@ -5,10 +5,10 @@ /Producer (FOP 0.20.5) >> endobj 5 0 obj -<< /Length 615 /Filter [ /ASCII85Decode /FlateDecode ] +<< /Length 620 /Filter [ /ASCII85Decode /FlateDecode ] >> stream -Gaua<9lHd\&;KZO$6PI!?%6qk[>h\QrZS[;SS9*tq%RUKg"Z-=iJi,V6e@1\!3'-R1n!sH%5#,o-gM!\Hg&>W_;dQP+4,u7[XPB1\/7+Ao>h6sjnT"]0]79`e]WO?[k;oKaS!U7po!1:=t2l\T84$?#Bp5qGZ?qA1f]/4l9qWt_?2:EL'cF#E*R:c*3!Ck-/0XCNFDE8ON6or?.h2[V=Slf,qoS;K]fqX$ud%MC+h7cPP?[U:^fU"'&U0IL[V?uh&+DIdDl;"6G?7r_&sh]k]?hfla8Q]3%>0u=5DG&/T6jGqH&jZ3)^jab#@g6E8GGq!JO7#He(-UOT1?^=(N\mBt[#]19e)\_:&\L!JaIgHe/5Op:2edT?ObYU-JMC2SaGRr9aARs80`*5f((_ZiWP15qTo"p`1o%59aUF&<)EQghTEIEu9ZP`,n*A9E"KdQeS'N5OeuqYu:e!d&GO`,cA=lXZ,AgX1apdDH2ge;CeqB,ZWqo+1q'MCh3Jq$g>sp!2=LlrV~> +Gaua<9lldX&;KZO$6AM$'R^@LfVte5A>\a]>MB5\fk(F].>XeqpZ3nrFu(;T3,h-d=EaA=JlICG3J&?2p`[llC'h&!%,5ua]-^J1doUFsC5Oc"o\Rig3a[JG+Sg$n!)j]lM/P4ZKT=3%pS$aXp7b\4&aXXAT<@!(ZEUMdcXSW;dh&,Xpj0Y%/<[uToO@pMKL-u,Yt9\(3<"Y;qmiME]#"]u%$]*g12/3\stPTpa-`V6M7_=r+^.#c?4q\d1oaFJM:$7(7OKVJ&en>s+Ag@;BUgs;jsS6Fghg[2I0q0\l8FQ?F#a56Er:du-W&+3TXlrr^526HS!'VnCe*&p%L!a4rLs+5'5'H=- endstream endobj 6 0 obj @@ -42,7 +42,7 @@ endobj 10 0 obj << /Type /Annot /Subtype /Link -/Rect [ 102.0 500.966 194.66 488.966 ] +/Rect [ 102.0 500.966 189.992 488.966 ] /C [ 0 0 0 ] /Border [ 0 0 0 ] /A 11 0 R @@ -80,10 +80,10 @@ endobj >> endobj 18 0 obj -<< /Length 2210 /Filter [ /ASCII85Decode /FlateDecode ] +<< /Length 2291 /Filter [ /ASCII85Decode /FlateDecode ] >> stream -Gatm=gQ(#H&:O:SW;D_\L`''6pE@=idS1AXBppnU'MTZ_M7/\77>IVg0nT=M8,&].\o&DhfNh)n3HO?,n-jBESGfa`I@f+;>"#f7(*V[cYRAQK,*;d[@4(d$_I[TEbG@DR$&iJgo6WcI(^/h)MW!r(;o@4Ls$gbiVhZbnkJ-HXd=Le>3'uT$A-PM)!Mh3(3LORFDDjg]Wa,&WU6)F4*FEhgGDto:O"WW@H]9^rk-s(V:'`I4=Lu.MC2KCP>.Gl*-j/k^WSs5bAstb/G!+nX0u],[q3d)hVtC/Ce)EXI89Q4'IX6jOhV-eh'8hXU;U8-;/Wh`/$VI2Y?6q,(bK\fbEtmjDK$prZMoU:0`OqKqGBYnJJ`%AWkmbO+UMt__9"]-<93ht$;Vmm"aN@Q[f]aq=c/hP6,qe-g5N[8$\dRkf#Y<&R`P4"/n*]%>07Pj#flfpm7Wp+E+Up%IZNi_.0!0"i=fA6Q)D_k++qAlF=COfa-ni50k---sF.'2R1IVT#6a-8$i,O\d!&[IP<:18`hUf!';VctOicL2.WT:;snqcU(VRXj$4:aP9RQu]IZ7t/+p`:%(=I':"DN$Ad/]A3^Tj0a"=TC-O01#k2[:\m9s&0^5,Aeo!s3ldNl2,GY1F^(L,[^+$A3t\^ah0S+]PD=g[!1,r%,?lh_>k^%MXdW6a+qnYXZqU'/:a;a,r@e/S*SE/[P74PpX.f(;h<:A@r(d)W\aPNYO<5B03b/,o/c'iUW2/p\YGtSAg(2_EbRXX'qR>6Lss(QGDD!-RR.Na9s@266=YhT"om]6e**%b-F>NBY[;]N^41UOI/1S4;)o8`i[.dBY3&08kCjC#`!A9,#hQIE7@Y+b61RZ.eG`NERceoR<-J#ETn:EcM\B_\XYEu$>DF#H0'=_mAi"eP&mQgGYTS#j_>u65<`[p`T7e=DY:?*hL>ns51u8M7oK9t'l'b1l.Sc8B'J$W7&3Wr,l_R]F:^'2F/Q\"[5&H/ca!mkc@.k7s2FUL2%(aN69_(6Za.=&ORE1%,lHmbr.f2'qoVod':`]<.BCo(HUlZP4p'_9MBdt^6.P7V+J7GaO&*;@+bl:kCe9oTlG-8cr=le>D6D@dg^L]C#=f)?M.HB[QA%K,HlS/GeT6PgXRotO0>]KB!l(l3TNOL7XToE#&c)1P^(=CK\S<.9m^bEFpW:HE;WGfNGC_giP0YH+RZ&Op4^G'Bf+&eP&85gc4AW20E/ke)ijlp]n3&uKZdU/hbY4SO8QWs+>7u;0&X-&,7t#7%,QfG*PXYnKYfh%i0\HOFNCYcc8FKaiAbQ`Fc(8B!B=*U'UE!hE]W9:XhaLLg#[$&DCn:n5$MM)i.$]V1B4&[+Y!%nj:1o)D`e.Zqu\8B7)Z!c'iLApB@3f"?'iiONA.Hf2$YLXs(JQpDCRZpAlSf7,tU=Mh:tDZ5?"?%DS#`2rlN@B?D<99@+KR?LEuLr]PQU&'jU&7/in1-#<1J[[2,DB_q*T:D+=/e3=%6P2fJk7SC$lTJ#CZY=uc#Ra91@6Zd]Af5K\)SXeQt-(&f9qT+3*d[Y&#T\&3cYuZXgBJrY("Z099%=LIC8a^1emp2S@1^`f)-\u^@!6c9H('~> +Gatm=99[ga&AI=/Ge(Uj&]L'.iALd"4Gj3+abAI)K4?Eg)93)i4COQ8^Mh@3eRiDtgDR'mIu`&Qoiu`)YZ?,:Y9shUh3uf=R3red?HHV(G?Hqb?LXcEZN?`uA$+TE3]Qf\03a9CsPc8K\&cPA";F!V=&S^[h?@-mZ:IsO#C@cAX6_>J^SfbbD[5AAJ/CoQG6iB`s+IBo7Dp-qE>GXERJ\`aJP@(Fd;qQAMSB.<)DUL^9B*PTTc!HNT.IU1=\\^:0]INl>nVaNX[prb88oZHHLU6C+`ip*;EcrtUtkJi7rClnZ,h3\)T!t*OHO4ZV1)+H(6qJ%>06#:+-a1"&AGMdDIW'c[(4'#.s^]]sJaW9rWV#Bgh?I@$EFtZN#La3m9ci8/DX$LC*4:6r#BRb-D<"At]W5U['$jJ&'uqkVd$Bg_Bh'8i'%L3eO2"pe?-olFXb_rfu"sKjSI@V=.dIdnho[?=O]\#C;NS5KGoA`j-@!3,lCd3%`BM4Vksnr"_c]OgXgg8B\eFRtqMS,J)anZo7iL0C0pd$Js&O#8MW1k>t@Fc<5"u*r1>LpaN1sq6hT8J%?Oh28UQ>F^e#2H7)]tA!J#U$Msi][G2e#+<-^Yl`,[f'I,6FFI8L>n2PZmj*A\[Ui1.0kN^psOeU!sYqI)\p_BYWok[N-23L=bLkB,-lQR+_WjGKJekpD$DdEjeDJ7P*V1a3hmO1@SodO>q]AWtAiZ\2lBM6t]G+&sp3[N!?c1JkZcf>#c0&k]UQ=PpM-Y5?:ug!uupK6%,_(_bSE`,e8/X)%LAsFJhB]PIp@j#+rg[3kMqlmXZ?K2"n^sd(qFkRaatMisu`K/l'qA4b$?[,+dZX7DstI-$5f$U!%&cMjb+)1=0&Q_gl2IpUB;=kYc!E(*rk'6QYC8'8gOO endstream endobj 19 0 obj @@ -94,80 +94,95 @@ endobj /Contents 18 0 R >> endobj +20 0 obj +<< /Length 471 /Filter [ /ASCII85Decode /FlateDecode ] + >> +stream +GarnSbAMqd&A7ljp6Zo\fZ(pMM%nYjC=PZngtEehE!R)m.?IU/qjL$.8R!?HAsqJGSTPXAgDG>]JqGa"*jd;mZonR!"+VkRs*m4N!/1Ohn--:>0&#bS'ps:e.LfVSKVB;8ee2XhLj+Y?L27%j;!:igBB_Uu#C)dGHj+TBQ'nb_k1#?og$94q-Iir;niOear<0JX]G^?d*!a3-(-^2W24rDnUFq^Ea8gK^3n;:U#>,ugZX6?1^L=T]lN;BKYBlD[N +endstream +endobj 21 0 obj -<< - /Title (\376\377\0\61\0\40\0\101\0\142\0\157\0\165\0\164\0\40\0\164\0\150\0\151\0\163\0\40\0\104\0\157\0\143\0\165\0\155\0\145\0\156\0\164) - /Parent 20 0 R - /Next 22 0 R - /A 9 0 R ->> endobj -22 0 obj -<< - /Title (\376\377\0\62\0\40\0\101\0\142\0\157\0\165\0\164\0\40\0\164\0\150\0\145\0\40\0\104\0\145\0\155\0\157\0\163) - /Parent 20 0 R - /Prev 21 0 R - /Next 23 0 R - /A 11 0 R ->> endobj +<< /Type /Page +/Parent 1 0 R +/MediaBox [ 0 0 612 792 ] +/Resources 3 0 R +/Contents 20 0 R +>> +endobj 23 0 obj << - /Title (\376\377\0\63\0\40\0\123\0\145\0\164\0\164\0\151\0\156\0\147\0\40\0\171\0\157\0\165\0\162\0\40\0\103\0\114\0\101\0\123\0\123\0\120\0\101\0\124\0\110) - /Parent 20 0 R - /Prev 22 0 R + /Title (\376\377\0\61\0\40\0\101\0\142\0\157\0\165\0\164\0\40\0\164\0\150\0\151\0\163\0\40\0\104\0\157\0\143\0\165\0\155\0\145\0\156\0\164) + /Parent 22 0 R /Next 24 0 R - /A 13 0 R + /A 9 0 R >> endobj 24 0 obj << - /Title (\376\377\0\64\0\40\0\111\0\156\0\144\0\145\0\170\0\151\0\156\0\147\0\40\0\106\0\151\0\154\0\145\0\163) - /Parent 20 0 R + /Title (\376\377\0\62\0\40\0\101\0\142\0\157\0\165\0\164\0\40\0\164\0\150\0\145\0\40\0\104\0\145\0\155\0\157) + /Parent 22 0 R /Prev 23 0 R /Next 25 0 R - /A 15 0 R + /A 11 0 R >> endobj 25 0 obj << - /Title (\376\377\0\65\0\40\0\101\0\142\0\157\0\165\0\164\0\40\0\164\0\150\0\145\0\40\0\143\0\157\0\144\0\145\0\56\0\56\0\56) - /Parent 20 0 R + /Title (\376\377\0\63\0\40\0\123\0\145\0\164\0\164\0\151\0\156\0\147\0\40\0\171\0\157\0\165\0\162\0\40\0\103\0\114\0\101\0\123\0\123\0\120\0\101\0\124\0\110) + /Parent 22 0 R /Prev 24 0 R - /A 17 0 R + /Next 26 0 R + /A 13 0 R >> endobj 26 0 obj +<< + /Title (\376\377\0\64\0\40\0\111\0\156\0\144\0\145\0\170\0\151\0\156\0\147\0\40\0\106\0\151\0\154\0\145\0\163) + /Parent 22 0 R + /Prev 25 0 R + /Next 27 0 R + /A 15 0 R +>> endobj +27 0 obj +<< + /Title (\376\377\0\65\0\40\0\101\0\142\0\157\0\165\0\164\0\40\0\164\0\150\0\145\0\40\0\143\0\157\0\144\0\145\0\56\0\56\0\56) + /Parent 22 0 R + /Prev 26 0 R + /A 17 0 R +>> endobj +28 0 obj << /Type /Font /Subtype /Type1 /Name /F3 /BaseFont /Helvetica-Bold /Encoding /WinAnsiEncoding >> endobj -27 0 obj +29 0 obj << /Type /Font /Subtype /Type1 /Name /F5 /BaseFont /Times-Roman /Encoding /WinAnsiEncoding >> endobj -28 0 obj +30 0 obj << /Type /Font /Subtype /Type1 /Name /F1 /BaseFont /Helvetica /Encoding /WinAnsiEncoding >> endobj -29 0 obj +31 0 obj << /Type /Font /Subtype /Type1 /Name /F9 /BaseFont /Courier /Encoding /WinAnsiEncoding >> endobj -30 0 obj +32 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica-Oblique /Encoding /WinAnsiEncoding >> endobj -31 0 obj +33 0 obj << /Type /Font /Subtype /Type1 /Name /F7 @@ -176,19 +191,19 @@ endobj endobj 1 0 obj << /Type /Pages -/Count 2 -/Kids [6 0 R 19 0 R ] >> +/Count 3 +/Kids [6 0 R 19 0 R 21 0 R ] >> endobj 2 0 obj << /Type /Catalog /Pages 1 0 R - /Outlines 20 0 R + /Outlines 22 0 R /PageMode /UseOutlines >> endobj 3 0 obj << -/Font << /F3 26 0 R /F5 27 0 R /F1 28 0 R /F9 29 0 R /F2 30 0 R /F7 31 0 R >> +/Font << /F3 28 0 R /F5 29 0 R /F1 30 0 R /F9 31 0 R /F2 32 0 R /F7 33 0 R >> /ProcSet [ /PDF /ImageC /Text ] >> endobj 9 0 obj @@ -212,60 +227,62 @@ endobj 15 0 obj << /S /GoTo -/D [19 0 R /XYZ 85.0 375.198 null] +/D [19 0 R /XYZ 85.0 295.198 null] >> endobj 17 0 obj << /S /GoTo -/D [19 0 R /XYZ 85.0 209.264 null] +/D [21 0 R /XYZ 85.0 659.0 null] >> endobj -20 0 obj +22 0 obj << - /First 21 0 R - /Last 25 0 R + /First 23 0 R + /Last 27 0 R >> endobj xref -0 32 +0 34 0000000000 65535 f -0000005712 00000 n -0000005777 00000 n -0000005869 00000 n +0000006464 00000 n +0000006536 00000 n +0000006628 00000 n 0000000015 00000 n 0000000071 00000 n -0000000777 00000 n -0000000897 00000 n -0000000950 00000 n -0000006003 00000 n -0000001085 00000 n -0000006066 00000 n -0000001221 00000 n -0000006132 00000 n -0000001357 00000 n -0000006198 00000 n -0000001492 00000 n -0000006264 00000 n -0000001628 00000 n -0000003931 00000 n -0000006330 00000 n -0000004039 00000 n -0000004242 00000 n -0000004436 00000 n -0000004672 00000 n -0000004861 00000 n -0000005050 00000 n -0000005163 00000 n -0000005273 00000 n -0000005381 00000 n -0000005487 00000 n -0000005603 00000 n +0000000782 00000 n +0000000902 00000 n +0000000955 00000 n +0000006762 00000 n +0000001090 00000 n +0000006825 00000 n +0000001227 00000 n +0000006891 00000 n +0000001363 00000 n +0000006957 00000 n +0000001498 00000 n +0000007023 00000 n +0000001634 00000 n +0000004018 00000 n +0000004126 00000 n +0000004689 00000 n +0000007087 00000 n +0000004797 00000 n +0000005000 00000 n +0000005188 00000 n +0000005424 00000 n +0000005613 00000 n +0000005802 00000 n +0000005915 00000 n +0000006025 00000 n +0000006133 00000 n +0000006239 00000 n +0000006355 00000 n trailer << -/Size 32 +/Size 34 /Root 2 0 R /Info 4 0 R >> startxref -6381 +7138 %%EOF diff --git a/lucene/docs/demo2.html b/lucene/docs/demo2.html index 9d3f8f9527a..0939ad21940 100644 --- a/lucene/docs/demo2.html +++ b/lucene/docs/demo2.html @@ -129,11 +129,8 @@ document.write("Last Published: " + document.lastModified);

        - - -
      • Searching Files
      • -
      • -The Web example... -
      @@ -275,9 +272,9 @@ how to use Lucene in their applications.

      Relative to the directory created when you extracted Lucene or retrieved it from Subversion, you -should see a directory called src which in turn contains a directory called -demo. This is the root for all of the Lucene demos. Under this directory is -org/apache/lucene/demo. This is where all the Java sources for the demos live. +should see a directory called lucene/contrib/demo/. This is the root for the Lucene +demo. Under this directory is src/java/org/apache/lucene/demo/. This is where all +the Java sources for the demo live.

      Within this directory you should see the IndexFiles.java class we executed earlier. @@ -286,94 +283,111 @@ Bring it up in vi or your editor of choice and let

      - +

      IndexFiles

      -As we discussed in the previous walk-through, the IndexFiles class creates a Lucene +As we discussed in the previous walk-through, the IndexFiles class creates a Lucene Index. Let's take a look at how it does this.

      -The first substantial thing the main function does is instantiate IndexWriter. It passes the string -"index" and a new instance of a class called StandardAnalyzer. -The "index" string is the name of the filesystem directory where all index information -should be stored. Because we're not passing a full path, this will be created as a subdirectory of -the current working directory (if it does not already exist). On some platforms, it may be created -in other directories (such as the user's home directory). +The main() method parses the command-line parameters, then in preparation for +instantiating IndexWriter, opens a +Directory and instantiates +StandardAnalyzer and +IndexWriterConfig.

      -The IndexWriter is the main -class responsible for creating indices. To use it you must instantiate it with a path that it can -write the index into. If this path does not exist it will first create it. Otherwise it will -refresh the index at that path. You can also create an index using one of the subclasses of Directory. In any case, you must also pass an -instance of org.apache.lucene.analysis.Analyzer. +The value of the -index command-line parameter is the name of the filesystem directory +where all index information should be stored. If IndexFiles is invoked with a +relative path given in the -index command-line parameter, or if the -index +command-line parameter is not given, causing the default relative index path "index" +to be used, the index path will be created as a subdirectory of the current working directory +(if it does not already exist). On some platforms, the index path may be created in a different +directory (such as the user's home directory).

      -The particular Analyzer we -are using, StandardAnalyzer, is -little more than a standard Java Tokenizer, converting all strings to lowercase and filtering out -stop words and characters from the index. By stop words and characters I mean common language -words such as articles (a, an, the, etc.) and other strings that may have less value for searching -(e.g. 's) . It should be noted that there are different rules for every language, and you -should use the proper analyzer for each. Lucene currently provides Analyzers for a number of -different languages (see the *Analyzer.java sources under modules/analysis/common/src/java/org/apache/lucene/analysis). +The -docs command-line parameter value is the location of the directory containing +files to be indexed.

      -Looking further down in the file, you should see the indexDocs() code. This recursive -function simply crawls the directories and uses FileDocument to create Document objects. The Document is simply a data object to -represent the content in the file as well as its creation time and location. These instances are -added to the indexWriter. Take a look inside FileDocument. It's not particularly -complicated. It just adds fields to the Document. +The -update command-line parameter tells IndexFiles not to delete the +index if it already exists. When -update is not given, IndexFiles will +first wipe the slate clean before indexing any documents.

      -As you can see there isn't much to creating an index. The devil is in the details. You may also -wish to examine the other samples in this directory, particularly the IndexHTML class. It is a bit more -complex but builds upon this example. +Lucene Directorys are used by the +IndexWriter to store information in the index. In addition to the +FSDirectory implementation we are using, +there are several other Directory subclasses that can write to RAM, to databases, etc. +

      +

      +Lucene Analyzers are processing pipelines +that break up text into indexed tokens, a.k.a. terms, and optionally perform other operations on these +tokens, e.g. downcasing, synonym insertion, filtering out unwanted tokens, etc. The Analyzer +we are using is StandardAnalyzer, which creates tokens using the Word Break rules from the +Unicode Text Segmentation algorithm specified in Unicode +Standard Annex #29; converts tokens to lowercase; and then filters out stopwords. Stopwords are +common language words such as articles (a, an, the, etc.) and other tokens that may have less value for +searching. It should be noted that there are different rules for every language, and you should use the +proper analyzer for each. Lucene currently provides Analyzers for a number of different languages (see +the *Analyzer.java sources under +modules/analysis/common/src/java/org/apache/lucene/analysis). +

      +

      +The IndexWriterConfig instance holds all configuration for IndexWriter. For +example, we set the OpenMode to use here based on the value of the -update +command-line parameter. +

      +

      +Looking further down in the file, after IndexWriter is instantiated, you should see the +indexDocs() code. This recursive function crawls the directories and creates +Document objects. The +Document is simply a data object to represent the text content from the file as well as +its creation time and location. These instances are added to the IndexWriter. If +the -update command-line parameter is given, the IndexWriter +OpenMode will be set to OpenMode.CREATE_OR_APPEND, and rather than +adding documents to the index, the IndexWriter will update them +in the index by attempting to find an already-indexed document with the same identifier (in our +case, the file path serves as the identifier); deleting it from the index if it exists; and then +adding the new document to the index.

      - +

      Searching Files

      -The SearchFiles class is -quite simple. It primarily collaborates with an IndexSearcher, StandardAnalyzer -(which is used in the IndexFiles class as well) and a -QueryParser. The +The SearchFiles class is +quite simple. It primarily collaborates with an +IndexSearcher, +StandardAnalyzer (which is used in the +IndexFiles class as well) +and a QueryParser. The query parser is constructed with an analyzer used to interpret your query text in the same way the -documents are interpreted: finding the end of words and removing useless words like 'a', 'an' and -'the'. The Query object contains -the results from the QueryParser which is passed to -the searcher. Note that it's also possible to programmatically construct a rich Query object without using the query +documents are interpreted: finding word boundaries, downcasing, and removing useless words like +'a', 'an' and 'the'. The Query +object contains the results from the +QueryParser which is passed +to the searcher. Note that it's also possible to programmatically construct a rich +Query object without using the query parser. The query parser just enables decoding the Lucene query -syntax into the corresponding Query object. Search can be executed in -two different ways: +syntax into the corresponding Query +object. Search can be executed in two different ways: +

      • Streaming: A Collector subclass simply prints out the document ID and score for each matching document.
      • -
      • Paging: Using a TopScoreDocCollector - the search results are printed in pages, sorted by score (i. e. relevance).
      • +
      • Paging: Using the IndexSearcher.search(query,n) method that returns +TopDocs with max n hits, +the search results are printed in pages, sorted by score (i.e. relevance).
      • -
      - -

      +
    - - -

    The Web example...

    -
    -

    - -read on>>> - -

    -
    - - - - -
    - - - - - - - - - - - - -
    - -
    -
    - -
    - - -
    - -
    - -   -
    - - - - - -
    - -

    - Apache Lucene - Building and Installing the Basic Demo -

    - - - - -

    About this Document

    -
    -

    -This document is intended as a "getting started" guide to installing and running the Lucene -web application demo. This guide assumes that you have read the information in the previous two -examples. We'll use Tomcat as our reference web container. These demos should work with nearly any -container, but you may have to adapt them appropriately. -

    -
    - - - - -

    About the Demos

    -
    -

    -The Lucene Web Application demo is a template web application intended for deployment on Tomcat or a -similar web container. It's NOT designed as a "best practices" implementation by ANY means. It's -more of a "hello world" type Lucene Web App. The purpose of this application is to demonstrate -Lucene. With that being said, it should be relatively simple to create a small searchable website -in Tomcat or a similar application server. -

    -
    - - - -

    Indexing Files

    -
    -

    Once you've gotten this far you're probably itching to go. Let's start by creating the index -you'll need for the web examples. Since you've already set your CLASSPATH in the previous examples, -all you need to do is type: - -

    -    java org.apache.lucene.demo.IndexHTML -create -index {index-dir} ..
    -
    - -You'll need to do this from a (any) subdirectory of your {tomcat}/webapps directory -(make sure you didn't leave off the .. or you'll get a null pointer exception). -{index-dir} should be a directory that Tomcat has permission to read and write, but is -outside of a web accessible context. By default the webapp is configured to look in -/opt/lucene/index for this index. -

    -
    - - - -

    Deploying the Demos

    -
    -

    Located in your distribution directory you should see a war file called -luceneweb.war. If you're working with a Subversion checkout, this will be under the -build subdirectory. Copy this to your {tomcat-home}/webapps directory. -You may need to restart Tomcat.

    -
    - - - -

    Configuration

    -
    -

    From your Tomcat directory look in the webapps/luceneweb subdirectory. If it's not -present, try browsing to http://localhost:8080/luceneweb (which causes Tomcat to deploy -the webapp), then look again. Edit a file called configuration.jsp. Ensure that the -indexLocation is equal to the location you used for your index. You may also customize -the appTitle and appFooter strings as you see fit. Once you have finished -altering the configuration you may need to restart Tomcat. You may also wish to update the war file -by typing jar -uf luceneweb.war configuration.jsp from the luceneweb -subdirectory. (The -u option is not available in all versions of jar. In this case recreate the -war file). -

    -
    - - - -

    Running the Demos

    -
    -

    Now you're ready to roll. In your browser set the url to -http://localhost:8080/luceneweb enter test and the number of items per -page and press search.

    -

    You should now be looking either at a number of results (provided you didn't erase the Tomcat -examples) or nothing. If you get an error regarding opening the index, then you probably set the -path in configuration.jsp incorrectly or Tomcat doesn't have permissions to the index -(or you skipped the step of creating it). Try other search terms. Depending on the number of items -per page you set and results returned, there may be a link at the bottom that says More -Results>>; clicking it takes you to subsequent pages.

    -
    - - - -

    About the code...

    -
    -

    -If you want to know more about how this web app works or how to customize it then read on>>>. -

    -
    - - -
    - -
     
    -
    - - - diff --git a/lucene/docs/demo3.pdf b/lucene/docs/demo3.pdf deleted file mode 100644 index ad1a9f19e29..00000000000 --- a/lucene/docs/demo3.pdf +++ /dev/null @@ -1,344 +0,0 @@ -%PDF-1.3 -%ª«¬­ -4 0 obj -<< /Type /Info -/Producer (FOP 0.20.5) >> -endobj -5 0 obj -<< /Length 661 /Filter [ /ASCII85Decode /FlateDecode ] - >> -stream -Gb!$E966RV&:j6I$6AM$'Km$JrMXAWktJ!,\$QAA_UaYs\NH&;hj7E+lul!AM$jQq@bKRU[1^)HK%h:LKL#a(,!as/$P:qq(ngTB+R\AYL*-pMO;u+8@Wb;LK3X!I,,4eei_u%4j2j5t\8IPVQ.`d;eDs9l"!esA*+8%OgmP0;[6FJjp-#S-8nCcQb+fu:hX3)5A:Q[]^c@$,@O$HG9DRWWCnM_sf'6+uP;rXtqjTUg?qA2O>`q;75!%Q6>1g]O1M^`>l>!WF!Cl8G0^@ZZm;sl`He?raJAM3I^@pj.2RW-Y+&q?tXj*;E)o**Y,(-SpN<"_WUtl1iA/:fV"H!eIbi8NRJ)GZ(d1o.DhWRl>#U++WJ7+J"89@#rT(\_sjf`[-%+mYB"ginoUHFiTUU]oDG3R^CIp#k->N[.YZ7$J9\&EQQ1BDJtIH]dJl0\&lA(@%sJ),KS*0S,5;eP]2"#Qfg7A:JoRt+_48D&j<1>KdC6BYk>L[X[JPB%-QG4)+;?jcnU$)80'8WTlDM#]Ap1U;H,$Vsp0a`)'7/Q$-d,,JO"uJ#rAP_*n7h~> -endstream -endobj -6 0 obj -<< /Type /Page -/Parent 1 0 R -/MediaBox [ 0 0 612 792 ] -/Resources 3 0 R -/Contents 5 0 R -/Annots 7 0 R ->> -endobj -7 0 obj -[ -8 0 R -10 0 R -12 0 R -14 0 R -16 0 R -18 0 R -20 0 R -] -endobj -8 0 obj -<< /Type /Annot -/Subtype /Link -/Rect [ 102.0 519.166 213.332 507.166 ] -/C [ 0 0 0 ] -/Border [ 0 0 0 ] -/A 9 0 R -/H /I ->> -endobj -10 0 obj -<< /Type /Annot -/Subtype /Link -/Rect [ 102.0 500.966 194.66 488.966 ] -/C [ 0 0 0 ] -/Border [ 0 0 0 ] -/A 11 0 R -/H /I ->> -endobj -12 0 obj -<< /Type /Annot -/Subtype /Link -/Rect [ 102.0 482.766 179.0 470.766 ] -/C [ 0 0 0 ] -/Border [ 0 0 0 ] -/A 13 0 R -/H /I ->> -endobj -14 0 obj -<< /Type /Annot -/Subtype /Link -/Rect [ 102.0 464.566 215.324 452.566 ] -/C [ 0 0 0 ] -/Border [ 0 0 0 ] -/A 15 0 R -/H /I ->> -endobj -16 0 obj -<< /Type /Annot -/Subtype /Link -/Rect [ 102.0 446.366 177.332 434.366 ] -/C [ 0 0 0 ] -/Border [ 0 0 0 ] -/A 17 0 R -/H /I ->> -endobj -18 0 obj -<< /Type /Annot -/Subtype /Link -/Rect [ 102.0 428.166 206.0 416.166 ] -/C [ 0 0 0 ] -/Border [ 0 0 0 ] -/A 19 0 R -/H /I ->> -endobj -20 0 obj -<< /Type /Annot -/Subtype /Link -/Rect [ 102.0 409.966 192.32 397.966 ] -/C [ 0 0 0 ] -/Border [ 0 0 0 ] -/A 21 0 R -/H /I ->> -endobj -22 0 obj -<< /Length 2641 /Filter [ /ASCII85Decode /FlateDecode ] - >> -stream -Gatm=>Ar7U(4OT5&GjfjBPpFVdk1L4ND].pN<2Z"W?[*>R$C33&4*R'#j,KYhgO\)`hV[/B;Z:LSQpO/iUQWG8q=2/%O/5B[+F1T+MkM>%29k)/B[Z:=(Isg]i,qa800_"&1+*@Nio71i,G:MjYY5Wlm]=_>&71Scm=Dk9HB6se4^^kXDWP/V@LX'r/r9Ff)Dk1[k0D"=:_pP=mCaKo$%Msade/H/k*t?4@ac>\R*QKQ&;cPZ[=NgC0,Xn?&e%`)pZ>!:CSY">*=uC@!C3m1&eQO0C2;SREcZ?BW>+f`5[OB4pbml=Jhe'0Ua.Q;Mh3a%oj3Mb"<\MX$ASIo]JV#'>)Pr<\C1_ouf+>G1>Ekj1*csIQG3K[/=ikc&EC:N\?0u9*WP\4L=ii%fgY1_5O9Ecj3o2uo:s6',^be3Ju=mY.b`YNB)p;*33[1bp[++jTf$g#D5>2VIus\QGS&rYiQN7Ek5"=dbSq>C9GurV:%iq#DRU'0B,qB6IS`/E#,EdkQ9%_.u)]5F!ub?r@k%hJl%"O=Q51bQ8i@5&qgT[_L`@QQ_ro:WYJOaNNJ2bG_Qd1OjF;o#tqfIn_k/lTj@3&AZcOL^SuuNADZMn??P+6GY)FbXnF5?.uV*_+KeNch>Hm_[?sXGg1pUn="%-rA%$P.o,Qj5+="o@e/ZUq;-=U49Fgr>!l]V1\883Df+oS[c^UsCPDR;ig2#WeMA5Aa3sK\VI$BRjP#?A$ed1m^H?5+W/$mu-aJ.)L8jXJr-]*D_mZVR4-Nj@cT&:7=VjN9TJF@<@Y&`6l[;#tYrq0#1kqKUdH4r3eZ?kfJm$j]X%oP-N&G'r7p,rFc0HTTOE9@3lI3g>W;n98F[P*"DJ5)dBggf:hTFLloo^O`rINZtgQmB/P!UR2d_ZjbKRp$DlUK:7p#@+OeNiGC)G=%G%QNT^&;/1i[^+AXa;fs6@'o/-obT13?R>rf-OB/Y0WtQm?.'s5%2RmJXBhI01P3Nmkms"Lb%hHG1/:TW(0F_@.E:+RVS93rLjEK7RS=![l$:-]\D0N5_IN&3S5+ulFeX!%CRdDBD#>jbuI,>#j/:Y;k\eB'!Np^/u&aB.ZluDrM5JCI)lAt0Q6g=Do2XNC5B.Zqo//>(4+$80Pp_G0b?%FF[CNP8.NlbnSGjk@c/i!+'(W=N>6h#HO,Ng-V-q"'QqIT53j,<=n7;!R@Gm(t]7:M`:BdnhF=r4A>\`T'RCVADgWmg$1#@j>E;r\]Vj,EGJ(?Dd>[T`O(/=i?jpTB=N8>8bHJ_U8$/j>:Bp+BSObi&se_XaWjmGKc4L7uaq/`Zo&eJ:HBTF6@-BU\514ReepDWl3-QOurVOX9-4@0V=;hlc.EX,4Ls:hk&$/mTPDlQ/2NbY92I589YD4+<.U>!#Y0@L&J),raVmCU#bBgc5nj9bpX1RPuXmUoiZTu:+'KH8!3[DQNq$eI/#kMV645drZ@;SGP)Gb5ZTtHL?kDeb=eX`kB7YaZUIR%I(%nEfp,P`BB"AJVf)o6_M'iArs'g[#s/2d5Z0Me!'YWGJ-Vgg]lDN"qgAM5n/+c17):sZ/Lf&j8_a;#Vn@(RbmQKfGW_e1*6Vm_Jf*\9+6"'\(U2Eh-cGP4*;U"Z]T;(b*.#XoiCHnjJtN+(^oGi/ouW'>m^O4A,`_+)];@%70,?Qe5eb]io0.>-pTcJt*olkR*oB*K6Bqbs2^+n)8kDr#\Rq=j\dtH4;UOq0QAkO3*0o?'G,WkA?.eoqh(E0l9(DYo80$4/%53k\@1IMkdHJf-]jO-2LPLMRf!ssgi3fZ8]31aQD-F!!4Sd?RH%S'q\8Wi>(J#Z.nfSK`d*^c`Ud71g2ZPeMfQ.ZI1rn"u?d^O^?D)7WG=WnH^52[eIA+u>J0eX^!1e,GLS[&BA]NRfdK61;i)HIIC\7SiG/Ls#hCd'Q)p%YK6!)NA -endstream -endobj -23 0 obj -<< /Type /Page -/Parent 1 0 R -/MediaBox [ 0 0 612 792 ] -/Resources 3 0 R -/Contents 22 0 R ->> -endobj -24 0 obj -<< /Length 1607 /Filter [ /ASCII85Decode /FlateDecode ] - >> -stream -GatmHj8aZRh]p#>_brG$?L+nfRL%p#0Rhd5m4LO.cF31Tc+[lSb`?#>6/B#fgFmj;^:?*Vgu/`_*-Yrb-#E[AIi)P/Hube\gC^:;]D=/D[>b]B7*gD24+93onMJ@H5[:WN7pIdLgkGFo:X/IN#a;Kd=7>>uq39I)a,Me&,W>9(ZeV9X(FpKrJ'pqdJ/&ISh&;P_:l421KLt-[mT*VVV`>ZHg#A8%p!.0n,uSVD4cSN>nu0PCSa:2?t/PAVO-Cm@-!oou?Eh4E1o7JgNHAAof1a@I4o[\]C/9/5?,S\--^P5;8#e+Q@L?"0#hE0"%7_@lgDOpF?+S0jCT);!VBe47Z*UlbH[AtsSG_c59NCU!BLKpnJ,RZ<3$(#M3uu/p$VLK"CZkmKS1AY\>VWk.WaBN+IS=.$E`@H[M[j/Dn435,ba!&a,.88^2\'5h\8cOua.=Wa+A1+sGbOO8_MhY>k/ZZ[j"/1C3%88/U8mu.0c8*7kt5VnU6>So[Z.Kb>%9PIb3S=6klg1n,RqQad5/mk%QQJj69/QNIq,j"UtfEFO?I(Wc#'R/0AHoMNQrDs0l%pV.YZYR@KN'KTggqA'Ph8s1H>pUnPR2!aP:,:2XR40k3hYa'^\fZqba__CprM="uVpF0KL-OV1s0V!cJ"`Gq"=X%FC()lA6qsHl0JJ%bDD+o"2<0k?uS8_$M9k'2o#B6Gn8$K7PItc+k#G1Ql(A[orPk15mJJHLGspK+hVS]!K:%]BQhlhomhR[0eY);<^4b^%ImXA5ceRa@Ph3T1L6)75TYT<:5NE9]?F69&AC[Oe/]X0*?=::o4q]VdS36GZV/_$Qi421/.i=lK<>$F1,tVeU9$l$g[GSkPFZ!T-V&YQf\"0*@\U57c_ii1N5G/&)#Fch7U.lnMtUIr@;dVAol'53pGrCk4/*1l>L;^lSXA%.-c"J#NZVp;2kLr5i\4_PM"&&5`Kr]ISeQdMs[[, -endstream -endobj -25 0 obj -<< /Type /Page -/Parent 1 0 R -/MediaBox [ 0 0 612 792 ] -/Resources 3 0 R -/Contents 24 0 R ->> -endobj -27 0 obj -<< - /Title (\376\377\0\61\0\40\0\101\0\142\0\157\0\165\0\164\0\40\0\164\0\150\0\151\0\163\0\40\0\104\0\157\0\143\0\165\0\155\0\145\0\156\0\164) - /Parent 26 0 R - /Next 28 0 R - /A 9 0 R ->> endobj -28 0 obj -<< - /Title (\376\377\0\62\0\40\0\101\0\142\0\157\0\165\0\164\0\40\0\164\0\150\0\145\0\40\0\104\0\145\0\155\0\157\0\163) - /Parent 26 0 R - /Prev 27 0 R - /Next 29 0 R - /A 11 0 R ->> endobj -29 0 obj -<< - /Title (\376\377\0\63\0\40\0\111\0\156\0\144\0\145\0\170\0\151\0\156\0\147\0\40\0\106\0\151\0\154\0\145\0\163) - /Parent 26 0 R - /Prev 28 0 R - /Next 30 0 R - /A 13 0 R ->> endobj -30 0 obj -<< - /Title (\376\377\0\64\0\40\0\104\0\145\0\160\0\154\0\157\0\171\0\151\0\156\0\147\0\40\0\164\0\150\0\145\0\40\0\104\0\145\0\155\0\157\0\163) - /Parent 26 0 R - /Prev 29 0 R - /Next 31 0 R - /A 15 0 R ->> endobj -31 0 obj -<< - /Title (\376\377\0\65\0\40\0\103\0\157\0\156\0\146\0\151\0\147\0\165\0\162\0\141\0\164\0\151\0\157\0\156) - /Parent 26 0 R - /Prev 30 0 R - /Next 32 0 R - /A 17 0 R ->> endobj -32 0 obj -<< - /Title (\376\377\0\66\0\40\0\122\0\165\0\156\0\156\0\151\0\156\0\147\0\40\0\164\0\150\0\145\0\40\0\104\0\145\0\155\0\157\0\163) - /Parent 26 0 R - /Prev 31 0 R - /Next 33 0 R - /A 19 0 R ->> endobj -33 0 obj -<< - /Title (\376\377\0\67\0\40\0\101\0\142\0\157\0\165\0\164\0\40\0\164\0\150\0\145\0\40\0\143\0\157\0\144\0\145\0\56\0\56\0\56) - /Parent 26 0 R - /Prev 32 0 R - /A 21 0 R ->> endobj -34 0 obj -<< /Type /Font -/Subtype /Type1 -/Name /F3 -/BaseFont /Helvetica-Bold -/Encoding /WinAnsiEncoding >> -endobj -35 0 obj -<< /Type /Font -/Subtype /Type1 -/Name /F5 -/BaseFont /Times-Roman -/Encoding /WinAnsiEncoding >> -endobj -36 0 obj -<< /Type /Font -/Subtype /Type1 -/Name /F1 -/BaseFont /Helvetica -/Encoding /WinAnsiEncoding >> -endobj -37 0 obj -<< /Type /Font -/Subtype /Type1 -/Name /F9 -/BaseFont /Courier -/Encoding /WinAnsiEncoding >> -endobj -38 0 obj -<< /Type /Font -/Subtype /Type1 -/Name /F2 -/BaseFont /Helvetica-Oblique -/Encoding /WinAnsiEncoding >> -endobj -39 0 obj -<< /Type /Font -/Subtype /Type1 -/Name /F7 -/BaseFont /Times-Bold -/Encoding /WinAnsiEncoding >> -endobj -1 0 obj -<< /Type /Pages -/Count 3 -/Kids [6 0 R 23 0 R 25 0 R ] >> -endobj -2 0 obj -<< /Type /Catalog -/Pages 1 0 R - /Outlines 26 0 R - /PageMode /UseOutlines - >> -endobj -3 0 obj -<< -/Font << /F3 34 0 R /F5 35 0 R /F1 36 0 R /F9 37 0 R /F2 38 0 R /F7 39 0 R >> -/ProcSet [ /PDF /ImageC /Text ] >> -endobj -9 0 obj -<< -/S /GoTo -/D [23 0 R /XYZ 85.0 659.0 null] ->> -endobj -11 0 obj -<< -/S /GoTo -/D [23 0 R /XYZ 85.0 567.066 null] ->> -endobj -13 0 obj -<< -/S /GoTo -/D [23 0 R /XYZ 85.0 448.732 null] ->> -endobj -15 0 obj -<< -/S /GoTo -/D [23 0 R /XYZ 85.0 303.998 null] ->> -endobj -17 0 obj -<< -/S /GoTo -/D [23 0 R /XYZ 85.0 225.264 null] ->> -endobj -19 0 obj -<< -/S /GoTo -/D [25 0 R /XYZ 85.0 602.2 null] ->> -endobj -21 0 obj -<< -/S /GoTo -/D [25 0 R /XYZ 85.0 436.266 null] ->> -endobj -26 0 obj -<< - /First 27 0 R - /Last 33 0 R ->> endobj -xref -0 40 -0000000000 65535 f -0000008656 00000 n -0000008728 00000 n -0000008820 00000 n -0000000015 00000 n -0000000071 00000 n -0000000823 00000 n -0000000943 00000 n -0000001010 00000 n -0000008954 00000 n -0000001145 00000 n -0000009017 00000 n -0000001281 00000 n -0000009083 00000 n -0000001416 00000 n -0000009149 00000 n -0000001553 00000 n -0000009215 00000 n -0000001690 00000 n -0000009281 00000 n -0000001825 00000 n -0000009345 00000 n -0000001961 00000 n -0000004695 00000 n -0000004803 00000 n -0000006503 00000 n -0000009411 00000 n -0000006611 00000 n -0000006814 00000 n -0000007008 00000 n -0000007197 00000 n -0000007415 00000 n -0000007599 00000 n -0000007805 00000 n -0000007994 00000 n -0000008107 00000 n -0000008217 00000 n -0000008325 00000 n -0000008431 00000 n -0000008547 00000 n -trailer -<< -/Size 40 -/Root 2 0 R -/Info 4 0 R ->> -startxref -9462 -%%EOF diff --git a/lucene/docs/demo4.html b/lucene/docs/demo4.html deleted file mode 100644 index 39ebfdf41f7..00000000000 --- a/lucene/docs/demo4.html +++ /dev/null @@ -1,452 +0,0 @@ - - - - - - - - - Apache Lucene - Basic Demo Sources Walkthrough - - - - - - - - - - -
    - - - -
    - - - - - - - - - - - - -
    -
    -
    -
    - -
    - - -
    - -
    - -   -
    - - - - - -
    - -

    - Apache Lucene - Basic Demo Sources Walkthrough -

    - - - - -

    About the Code

    -
    -

    -In this section we walk through the sources behind the basic Lucene Web Application demo: where to -find them, their parts and their function. This section is intended for Java developers wishing to -understand how to use Lucene in their applications or for those involved in deploying web -applications based on Lucene. -

    -
    - - - - -

    Location of the source (developers/deployers)

    -
    -

    -Relative to the directory created when you extracted Lucene or retrieved it from Subversion, you -should see a directory called src which in turn contains a directory called -jsp. This is the root for all of the Lucene web demo. -

    -

    -Within this directory you should see index.jsp. Bring this up in vi or your editor of -choice. -

    -
    - - - -

    index.jsp (developers/deployers)

    -
    -

    -This jsp page is pretty boring by itself. All it does is include a header, display a form and -include a footer. If you look at the form, it has two fields: query (where you enter -your search criteria) and maxresults where you specify the number of results per page. -By the structure of this JSP it should be easy to customize it without even editing this particular -file. You could simply change the header and footer. Let's look at the header.jsp -(located in the same directory) next. -

    -
    - - - -

    header.jsp (developers/deployers)

    -
    -

    -The header is also very simple by itself. The only thing it does is include the -configuration.jsp (which you looked at in the last section of this guide) and set the -title and a brief header. This would be a good place to put your own custom HTML to "pretty" things -up a bit. We won't cover the footer because all it does is display the footer and close your tags. -Let's look at the results.jsp, the meat of this application, next. -

    -
    - - - -

    results.jsp (developers)

    -
    -

    -Most of the functionality lies in results.jsp. Much of it is for paging the search -results, which we'll not cover here as it's commented well enough. The first thing in this page is -the actual imports for the Lucene classes and Lucene demo classes. These classes are loaded from -the jars included in the WEB-INF/lib directory in the luceneweb.war file. -

    -

    -You'll notice that this file includes the same header and footer as index.jsp. From -there it constructs an IndexSearcher with the -indexLocation that was specified in configuration.jsp. If there is an -error of any kind in opening the index, it is displayed to the user and the boolean flag -error is set to tell the rest of the sections of the jsp not to continue. -

    -

    -From there, this jsp attempts to get the search criteria, the start index (used for paging) and the -maximum number of results per page. If the maximum results per page is not set or not valid then it -and the start index are set to default values. If only the start index is invalid it is set to a -default value. If the criteria isn't provided then a servlet error is thrown (it is assumed that -this is the result of url tampering or some form of browser malfunction). -

    -

    -The jsp moves on to construct a StandardAnalyzer to -analyze the search text. This matches the analyzer used during indexing (IndexHTML), which is generally -recommended. This is passed to the QueryParser along with the -criteria to construct a Query -object. You'll also notice the string literal "contents" included. This specifies -that the search should cover the contents field and not the title, -url or some other field in the indexed documents. If there is any error in -constructing a Query object an -error is displayed to the user. -

    -

    -In the next section of the jsp the IndexSearcher is asked to search -given the query object. The results are returned in a collection called hits. If the -length property of the hits collection is 0 (meaning there were no results) then an -error is displayed to the user and the error flag is set. -

    -

    -Finally the jsp iterates through the hits collection, taking the current page into -account, and displays properties of the Document objects we talked about in -the first walkthrough. These objects contain "known" fields specific to their indexer (in this case -IndexHTML constructs a document -with "url", "title" and "contents"). -

    -

    -Please note that in a real deployment of Lucene, it's best to instantiate IndexSearcher and QueryParser once, and then -share them across search requests, instead of re-instantiating per search request. -

    -
    - - - -

    More sources (developers)

    -
    -

    -There are additional sources used by the web app that were not specifically covered by either -walkthrough. For example the HTML parser, the IndexHTML class and HTMLDocument class. These are very -similar to the classes covered in the first example, with properties specific to parsing and -indexing HTML. This is beyond our scope; however, by now you should feel like you're "getting -started" with Lucene. -

    -
    - - - -

    Where to go from here? (everyone!)

    -
    -

    -There are a number of things this demo doesn't do or doesn't do quite right. For instance, you may -have noticed that documents in the root context are unreachable (unless you reconfigure Tomcat to -support that context or redirect to it), anywhere where the directory doesn't quite match the -context mapping, you'll have a broken link in your results. If you want to index non-local files or -have some other needs this isn't supported, plus there may be security issues with running the -indexing application from your webapps directory. There are a number of things left for you the -developer to do. -

    -

    -In time some of these things may be added to Lucene as features (if you've got a good idea we'd love -to hear it!), but for now: this is where you begin and the search engine/indexer ends. Lastly, one -would assume you'd want to follow the above advice and customize the application to look a little -more fancy than black on white with "Lucene Template" at the top. We'll see you on the Lucene -Users' or Developers' mailing lists! -

    -
    - - - -

    When to contact the Author

    -
    -

    -Please resist the urge to contact the authors of this document (without bribes of fame and fortune -attached). First contact the mailing lists, taking care to Ask Questions The Smart Way. -Certainly you'll get the most help that way as well. That being said, feedback, and modifications -to this document and samples are ever so greatly appreciated. They are just best sent to the lists -or posted as patches, so that -everyone can share in them. Thanks for understanding! -

    -
    - - -
    - -
     
    -
    - - - diff --git a/lucene/docs/demo4.pdf b/lucene/docs/demo4.pdf deleted file mode 100644 index 29eb9975317..00000000000 --- a/lucene/docs/demo4.pdf +++ /dev/null @@ -1,389 +0,0 @@ -%PDF-1.3 -%ª«¬­ -4 0 obj -<< /Type /Info -/Producer (FOP 0.20.5) >> -endobj -5 0 obj -<< /Length 755 /Filter [ /ASCII85Decode /FlateDecode ] - >> -stream -Gb!$E9on!^&;KZL(%)M@C]'T];['NJWi`:9,OM2sOA-]])e/(XD#*gb6Q-mN$7g75_gr\bVIQ.jGG&B:^TM++YE+/C'\!1.4)TgL]Yq.A%<+S98#8eJBLQS/]Djgds0HRZt:LJ.rh2\'jags5FX3@$T>UGH^RdfH81CYiIilruGeDt\_4[H6uK_i&?`V`"rm-$p&ik-Q5k)J@n(ZVm*19#/t8+[:2IFrrj^CRjKU%>$2VqQFu,7f+53SUon+Q5QlO?0#)R4cBJY8`'WnJJa80[)Nc>Rgf[b$8iYC;%b>;q3E*a2T>0\],$EP3oW0H]Vt7*%@q"+mU=dY?S&BRc_/#g>&OdMc-LE8>&9K"V]0?('~> -endstream -endobj -6 0 obj -<< /Type /Page -/Parent 1 0 R -/MediaBox [ 0 0 612 792 ] -/Resources 3 0 R -/Contents 5 0 R -/Annots 7 0 R ->> -endobj -7 0 obj -[ -8 0 R -10 0 R -12 0 R -14 0 R -16 0 R -18 0 R -20 0 R -22 0 R -] -endobj -8 0 obj -<< /Type /Annot -/Subtype /Link -/Rect [ 102.0 519.166 185.996 507.166 ] -/C [ 0 0 0 ] -/Border [ 0 0 0 ] -/A 9 0 R -/H /I ->> -endobj -10 0 obj -<< /Type /Annot -/Subtype /Link -/Rect [ 102.0 500.966 330.608 488.966 ] -/C [ 0 0 0 ] -/Border [ 0 0 0 ] -/A 11 0 R -/H /I ->> -endobj -12 0 obj -<< /Type /Annot -/Subtype /Link -/Rect [ 102.0 482.766 266.636 470.766 ] -/C [ 0 0 0 ] -/Border [ 0 0 0 ] -/A 13 0 R -/H /I ->> -endobj -14 0 obj -<< /Type /Annot -/Subtype /Link -/Rect [ 102.0 464.566 271.952 452.566 ] -/C [ 0 0 0 ] -/Border [ 0 0 0 ] -/A 15 0 R -/H /I ->> -endobj -16 0 obj -<< /Type /Annot -/Subtype /Link -/Rect [ 102.0 446.366 221.312 434.366 ] -/C [ 0 0 0 ] -/Border [ 0 0 0 ] -/A 17 0 R -/H /I ->> -endobj -18 0 obj -<< /Type /Annot -/Subtype /Link -/Rect [ 102.0 428.166 237.956 416.166 ] -/C [ 0 0 0 ] -/Border [ 0 0 0 ] -/A 19 0 R -/H /I ->> -endobj -20 0 obj -<< /Type /Annot -/Subtype /Link -/Rect [ 102.0 409.966 283.592 397.966 ] -/C [ 0 0 0 ] -/Border [ 0 0 0 ] -/A 21 0 R -/H /I ->> -endobj -22 0 obj -<< /Type /Annot -/Subtype /Link -/Rect [ 102.0 391.766 243.308 379.766 ] -/C [ 0 0 0 ] -/Border [ 0 0 0 ] -/A 23 0 R -/H /I ->> -endobj -24 0 obj -<< /Length 2510 /Filter [ /ASCII85Decode /FlateDecode ] - >> -stream -Gatm>>E@Ms(4Q"]&GhP(QnT.Ah>5]:4)KR[d;9:#(-?r;A_L`HMln[UAq0b#=gE,Z/SWue#V\'!GWFCE4,:uWEq.V!n^kWhE>#mte,0uQfDH-6,^,\elV,(km:U'o\BK*45ep/-1k3:1!p9eGK@*3Yd7gal]C]7:p[TBERSKb)X"B=W6cqq>Q;3cFe=S(HQ$9O/+FC8Zg&%qZ9Ghjrm+Tul"Ge=cG$GnqQU/m:u3BgQ3_l-OKV%%a!6=QKrp*X0KR!QB6:n]^gOXda09'p`7Ss[3SV+ug)LH8EqHMsdCaDco%VGg(*>cYR6XtpqqgPm*>e3<8!JTe0>nFsqgD1>'1\ep/,=]@pHBs#0`(#"+Hd,sfY;.9nQB$_j6@QSI`)@e=@MC>kgVhrNlRWb.F-da`QKG+XSR[`"S^)"naNQHfqLjK9>TsGHHjo^IX"D4lqLp%*KTNW0?I-IseMH\3GBp5;8"oXmfP+eQZfsN0T27Y`D2edM4afDK8qGnOnbGWM\:L:l:at&qYfl3!O?7J1`eAS/lSPO;^Gq@fcs%[H&OYUB^Rd,kDU(^Dq4A+V[DikCO$@[&H6qGrPL^#<,`mOW3]rb!ndSqCit-7m3HDTHG/P[BB#R`YTjH%:^/Z"AS7jYTa=-J3QS=s\^eFq1p'SFXpk"'$GFngWNlu9RW2qJ$sjlQNh<6@1oo6g:qBRN%f5H+EFtAn00HI!DL/#$k_Z&7bA(l45?qCpXD>3UBTuoEsauObVP3\BS$C1p^lHSpRp[WXF;AN.>j[U[lT0eFB,?olLe3732k"q1i:!WE[QN[K'h9NRF#IFb8cWUTdqsk+dfCqBS)I.*tMAce=+C1S36=;ei@5n9ugY-`G+5mdj'S)o5L:E@N"O!i@%B$dBhQ]hXDj`o0!KVCKoGPXHSgr$6IMdoW!^^8']61K;FrLbjPR.PYKG>I'.6CO/tfK<;^ir8CXaAigm2<>TX]fFLdER]=6$sC9(J'tpk)sdpsG=i`l0+S?%C*lj.7pQ,f90+BG"m%,H'E;/cHX[a\lZdO#86bX96a;*%L$8#7>CMr5:IReXCRmO'W-=&ZRiBC-&=q(aa8ZNBX`D^b)0*H(19$^?N)q4Tn>fnsKT%mM)UAD]d5TI`P3d)H`#O3o#8Ds[%##=5:GOJ,8nb39V[`4+IQ:)n:C">Lp6Upg::W!'o+1k55++,!@1&'!8AbKd:AK$[icsQ*e2e#Y2;4l$VcnOA>_L.rW:&o&(Z0rsb5"(r"^NMlnT?33U4L_^S,RBH!JGqW\(Bc8q!s^k!O*g7p5FB?0TsqXTN1G7hob.kK@DlG1*(CqY[f^0joD"O&<2U4Cg*[E.cI]B%rnK)@_n@(\cS!T^YcKga2(F"Q%rn)Z0E~> -endstream -endobj -25 0 obj -<< /Type /Page -/Parent 1 0 R -/MediaBox [ 0 0 612 792 ] -/Resources 3 0 R -/Contents 24 0 R ->> -endobj -26 0 obj -<< /Length 2630 /Filter [ /ASCII85Decode /FlateDecode ] - >> -stream -Gau0FD3*^8%fSZ,_TeG7!4+l(gVJ:d\o"o0fE0g^!(G;G%uRJ7lI],egG>>uqP3Gr-5FXd4']%XS]J&]nfdgjPigfam^pa"T6J#ULS+&(et21f8Wpmh;;?YM)eQ&g3/TfP]3k!_D=7Dn5'Hi(^=hl@]O@*2s*SIL)(0=]7F%cNIfD0iFjm?CDju+:D(r'b-*-4T5Lp#N*"IbbMiB<@9g1c`:=*.jWI5.Nh\(_Gu48dfH*]sIP<5WFmOk6>aCW;e""P=gC\2DH/LuqgF;\.o]4G>9gK2/k:QE@_&3t0$tP4uC_Q5jD=/(oBnSH/%&9W/YB3#?XZRA9tDqIF(fo/UEMl>:-$GL##`;BVrl?He>6-0qMbbI%@YbpY7F6\R4]KL^Jfo$8oY`!nFp`EFYnbsC9!(.Mk:dSU3%3bkT:36@B)YnI"DoCIaXH^$Ep%K_QK-/J0].op+G:ct(E`F5U[Wh;Af,KQJj2'0TO8UT3+TO0a_:*5muV,/VGAqj4;*pU3r`iq>kY3d<-L*3=BPMI0_fs%b:Q5C40abe[nDi!367P-K,faH-KV,GV1h(XZK*im#8iH/U]T'RA.]dRVpeYMd.CV3'3/O]fJfO6#;eAPEhQ)UdGBIL;_It:r+)TreOBQUVj^\1+9"al;LjW\mTV1u!P>*u!`qSJ\gLU?U'h7"Q;+6%eC1,1(=(ONDJe`2c5qOCcie<8F1SCaD^Kkd\P/_Jc,GSFe.fZ3r?+a=.Bqt_9@S&%ot^)50#4C!V?S6_\h?lQhA;(]%q#M\:i<%H2mSoD)J"S/.mh09(/>S2Zd&UC>:0sX-5L?R8Ue?g4%L\O\,Zs7B2])D#ONc>C06:M-'`Fjd=mJ?[Lfk9e_"adPIZ/qNSO=lX`dr)iGQ$JtEY&5=<,8lS:]U<-Q/qAuf`qUacNKm5Q4_HUBbg>,]>ii5R&1H;UNc`^8D0A6'jIEZ+3a7`PAn0oH0M:NL]ooS&OVlr#oi+;*]$g0JFn(f&UIL=rg_U"T_5!kS%`k.tH:$oo'LNq^`,F-CP3>;"`Kc^%X0@1jBn[O>2:Q:ic',#CVm>%)[>$k[bs.Ma-jA]W.JoOPr[:8cc;8jbn7*nCRaWG[r6XQPY*"q42[HN;3GQNf'*'Vb>J]%FMZQOb"mbh$n&CYfeiD>LeBHmrJRilt&tLPW>fh^?3n*@]EMtU\[X"P2EE8Gm;uH$a4s?_s\MFOIFGjV]rSeg7Q':BClc]a-rcdH#rU8BCB9&b\Nr@.NIIE,]<'c"\F1J+;A(M59E_DZYIcOhm`ro4;YJXlb1I5RAZQs1i\M(&^5H3<2r,Ae*0g:91!I?MF-t2!k\JtW:^!tCJ0O9k=P=5n-qPJ8R'YTSQrpb,%;DgL](DNS%!k1rSXoEjf"''s7b^+j(8)8).%02@NRe72*kYXks4/`IL(k_\#t(COM&3Cs1rH$k],&CK,\Yb+8%T*Has))]^4!OugES`&io7kn2)-VftSK.Xjlo9OF`E1t(*Q-BqO9)F+dWLrsQ\8EZn+!tN)0VXXQ1]9pkN83QhcT?/Uf['@#ZYj\*0Im*3JTUi=E8rX^aKBT%i+-_I-$K\RM'&fh\\C>6HjkrjFk:=?>TbAShNH8Jer1)&4o(CFP!&Lr;tKYJf9t?Kq*:WN)5*$I53q4=6/:e3]BJ5nFb.WWd^(DFcQ0j/@5tY@!A$E<&53k8;2kL%_sd2+&j'5!W;,CNhoOhLM>^kULbp%Z'#ac3$9Tu08*B2oEI\T@`h2=pI6$oTpuFRp-cPkI&89dBDEJD`]c/fjrM@q]#^ZU?5Or;V49~> -endstream -endobj -27 0 obj -<< /Type /Page -/Parent 1 0 R -/MediaBox [ 0 0 612 792 ] -/Resources 3 0 R -/Contents 26 0 R ->> -endobj -28 0 obj -<< /Length 1780 /Filter [ /ASCII85Decode /FlateDecode ] - >> -stream -Gat%$968iG&AII30Y'07M%RHD^#`+(d`EZ3ea]@$!@4!UC62g/d#.D[$U,gf'I$,h@p9iTSK=;Ko6Kq>]6SN<3M*FHCo`OWc#WKYX2C`DR+(Y]jS\9;cfBqZ?m0[:cpTuiumgS-`Y<]+Z:d]ej666hR<5dj`$&"lAg?[VCe^_H@GB=h?0Z[I`J@]`V:/`>`9kTGUqk9"@6a$j6bN.f6]2"YaY:t$f!>q6U>TYuA[iFjr&cmjm#^b93DKg7F%d44pk/D1lo*b\F"S>3n*4j647$ZN$:6*FFrh(UIgK%Q-@Iq""C$B.?GJBOYP7'CEg^N1ABTSJ\E5:e<@ol,mg6efN8AE8GYKPt1!oS4OXB&Ul>GV:3-3Sg^6/\?CcY+].!.;q!,MK9_FcoK6qnHap[S@+FD4OHe?6'MIQ&s<)ZA]ab>[^K;I-%&u+7?h@/4bf[Gm0sj87;s`R3OYMb'<)ZTZ@i]4HF3o)XYR`:X=]*X&d>i+K/)3?KlLF?lk1""cdZAI(:Ja/XHZM@m$D(G=pfh\PHPRU+CYerLk>GG(7-^47+FJ74uiRN%97LH99_N2u'"`9+hn0=3U*tV%qo=#M<06'&7d$OfP@-nS1$-pNH!f5u.6>61jqZKP;MEr`>Y]#l)D$@;%g9jp`_%(K/:dIB`j%LoW3A^eMYgP,O!P:\GqDkIm,7Z7OK$fFCRecmP'Q'Q.3*#GfH)N:1<2*SsM_IM(g-$9a=+ZEr_Ua9D](hVF?='bc\p$Yi__3j2EC9O^P=e#C>bCk9Q_[dL#1!!.oXs^DjCYCOoK1+mnB^jSh!'Sc7cI@E*t^.[>Q)M168GtFAt\.J,XFNt.Rb`H^'4sj`_M:)[Z%j?<%WD)bd`afYtMt/jCjtnhf9qrh$atV-F9&23)H^;>%Db0/aa&GcD#bPL-[1(\=G@t+>1K'/!#sEUq"2AReJ?7/d5X:d6U%G,m%80eR0(4V4lC85$[+U%dTtf^bMg>GD;]_B7&DFi6&b=cAc]LccX([P,&LFVj2.;F"NS/J"CO:+lNL;&LFIn4dYIhru!&@WZnDX=A21/Q!1n`RRsm.:1#Rf%_Jl"])-#AX%Tet4BU26GEAlI%^M2k$X.sPE/P78,&qj'^7dOrh!'_g0sg(#G#.q/Wd?JeC880#$8>oZ1olgR&V$/BdA[#o;>\%"30Aq-Uk,;oOX:I):NU51*hmUumT_hqH1[:S6JlL&;+.]LEmdbh`g`<@/%WXjXjFF]3[qN[H"YAX9?5]`#Oe2$*<~> -endstream -endobj -29 0 obj -<< /Type /Page -/Parent 1 0 R -/MediaBox [ 0 0 612 792 ] -/Resources 3 0 R -/Contents 28 0 R ->> -endobj -31 0 obj -<< - /Title (\376\377\0\61\0\40\0\101\0\142\0\157\0\165\0\164\0\40\0\164\0\150\0\145\0\40\0\103\0\157\0\144\0\145) - /Parent 30 0 R - /Next 32 0 R - /A 9 0 R ->> endobj -32 0 obj -<< - /Title (\376\377\0\62\0\40\0\114\0\157\0\143\0\141\0\164\0\151\0\157\0\156\0\40\0\157\0\146\0\40\0\164\0\150\0\145\0\40\0\163\0\157\0\165\0\162\0\143\0\145\0\40\0\50\0\144\0\145\0\166\0\145\0\154\0\157\0\160\0\145\0\162\0\163\0\57\0\144\0\145\0\160\0\154\0\157\0\171\0\145\0\162\0\163\0\51) - /Parent 30 0 R - /Prev 31 0 R - /Next 33 0 R - /A 11 0 R ->> endobj -33 0 obj -<< - /Title (\376\377\0\63\0\40\0\151\0\156\0\144\0\145\0\170\0\56\0\152\0\163\0\160\0\40\0\50\0\144\0\145\0\166\0\145\0\154\0\157\0\160\0\145\0\162\0\163\0\57\0\144\0\145\0\160\0\154\0\157\0\171\0\145\0\162\0\163\0\51) - /Parent 30 0 R - /Prev 32 0 R - /Next 34 0 R - /A 13 0 R ->> endobj -34 0 obj -<< - /Title (\376\377\0\64\0\40\0\150\0\145\0\141\0\144\0\145\0\162\0\56\0\152\0\163\0\160\0\40\0\50\0\144\0\145\0\166\0\145\0\154\0\157\0\160\0\145\0\162\0\163\0\57\0\144\0\145\0\160\0\154\0\157\0\171\0\145\0\162\0\163\0\51) - /Parent 30 0 R - /Prev 33 0 R - /Next 35 0 R - /A 15 0 R ->> endobj -35 0 obj -<< - /Title (\376\377\0\65\0\40\0\162\0\145\0\163\0\165\0\154\0\164\0\163\0\56\0\152\0\163\0\160\0\40\0\50\0\144\0\145\0\166\0\145\0\154\0\157\0\160\0\145\0\162\0\163\0\51) - /Parent 30 0 R - /Prev 34 0 R - /Next 36 0 R - /A 17 0 R ->> endobj -36 0 obj -<< - /Title (\376\377\0\66\0\40\0\115\0\157\0\162\0\145\0\40\0\163\0\157\0\165\0\162\0\143\0\145\0\163\0\40\0\50\0\144\0\145\0\166\0\145\0\154\0\157\0\160\0\145\0\162\0\163\0\51) - /Parent 30 0 R - /Prev 35 0 R - /Next 37 0 R - /A 19 0 R ->> endobj -37 0 obj -<< - /Title (\376\377\0\67\0\40\0\127\0\150\0\145\0\162\0\145\0\40\0\164\0\157\0\40\0\147\0\157\0\40\0\146\0\162\0\157\0\155\0\40\0\150\0\145\0\162\0\145\0\77\0\40\0\50\0\145\0\166\0\145\0\162\0\171\0\157\0\156\0\145\0\41\0\51) - /Parent 30 0 R - /Prev 36 0 R - /Next 38 0 R - /A 21 0 R ->> endobj -38 0 obj -<< - /Title (\376\377\0\70\0\40\0\127\0\150\0\145\0\156\0\40\0\164\0\157\0\40\0\143\0\157\0\156\0\164\0\141\0\143\0\164\0\40\0\164\0\150\0\145\0\40\0\101\0\165\0\164\0\150\0\157\0\162) - /Parent 30 0 R - /Prev 37 0 R - /A 23 0 R ->> endobj -39 0 obj -<< /Type /Font -/Subtype /Type1 -/Name /F3 -/BaseFont /Helvetica-Bold -/Encoding /WinAnsiEncoding >> -endobj -40 0 obj -<< /Type /Font -/Subtype /Type1 -/Name /F5 -/BaseFont /Times-Roman -/Encoding /WinAnsiEncoding >> -endobj -41 0 obj -<< /Type /Font -/Subtype /Type1 -/Name /F1 -/BaseFont /Helvetica -/Encoding /WinAnsiEncoding >> -endobj -42 0 obj -<< /Type /Font -/Subtype /Type1 -/Name /F9 -/BaseFont /Courier -/Encoding /WinAnsiEncoding >> -endobj -43 0 obj -<< /Type /Font -/Subtype /Type1 -/Name /F2 -/BaseFont /Helvetica-Oblique -/Encoding /WinAnsiEncoding >> -endobj -44 0 obj -<< /Type /Font -/Subtype /Type1 -/Name /F7 -/BaseFont /Times-Bold -/Encoding /WinAnsiEncoding >> -endobj -1 0 obj -<< /Type /Pages -/Count 4 -/Kids [6 0 R 25 0 R 27 0 R 29 0 R ] >> -endobj -2 0 obj -<< /Type /Catalog -/Pages 1 0 R - /Outlines 30 0 R - /PageMode /UseOutlines - >> -endobj -3 0 obj -<< -/Font << /F3 39 0 R /F5 40 0 R /F1 41 0 R /F9 42 0 R /F2 43 0 R /F7 44 0 R >> -/ProcSet [ /PDF /ImageC /Text ] >> -endobj -9 0 obj -<< -/S /GoTo -/D [25 0 R /XYZ 85.0 659.0 null] ->> -endobj -11 0 obj -<< -/S /GoTo -/D [25 0 R /XYZ 85.0 567.066 null] ->> -endobj -13 0 obj -<< -/S /GoTo -/D [25 0 R /XYZ 85.0 453.932 null] ->> -endobj -15 0 obj -<< -/S /GoTo -/D [25 0 R /XYZ 85.0 335.598 null] ->> -endobj -17 0 obj -<< -/S /GoTo -/D [25 0 R /XYZ 85.0 230.464 null] ->> -endobj -19 0 obj -<< -/S /GoTo -/D [27 0 R /XYZ 85.0 271.8 null] ->> -endobj -21 0 obj -<< -/S /GoTo -/D [27 0 R /XYZ 85.0 166.666 null] ->> -endobj -23 0 obj -<< -/S /GoTo -/D [29 0 R /XYZ 85.0 484.6 null] ->> -endobj -30 0 obj -<< - /First 31 0 R - /Last 38 0 R ->> endobj -xref -0 45 -0000000000 65535 f -0000012567 00000 n -0000012646 00000 n -0000012738 00000 n -0000000015 00000 n -0000000071 00000 n -0000000917 00000 n -0000001037 00000 n -0000001111 00000 n -0000012872 00000 n -0000001246 00000 n -0000012935 00000 n -0000001383 00000 n -0000013001 00000 n -0000001520 00000 n -0000013067 00000 n -0000001657 00000 n -0000013133 00000 n -0000001794 00000 n -0000013199 00000 n -0000001931 00000 n -0000013263 00000 n -0000002068 00000 n -0000013329 00000 n -0000002205 00000 n -0000004808 00000 n -0000004916 00000 n -0000007639 00000 n -0000007747 00000 n -0000009620 00000 n -0000013393 00000 n -0000009728 00000 n -0000009901 00000 n -0000010270 00000 n -0000010563 00000 n -0000010862 00000 n -0000011108 00000 n -0000011360 00000 n -0000011661 00000 n -0000011905 00000 n -0000012018 00000 n -0000012128 00000 n -0000012236 00000 n -0000012342 00000 n -0000012458 00000 n -trailer -<< -/Size 45 -/Root 2 0 R -/Info 4 0 R ->> -startxref -13444 -%%EOF diff --git a/lucene/docs/fileformats.html b/lucene/docs/fileformats.html index 95acbd2d8b8..59fb8eb4017 100644 --- a/lucene/docs/fileformats.html +++ b/lucene/docs/fileformats.html @@ -129,11 +129,8 @@ document.write("Last Published: " + document.lastModified); - - -