Fix some spell check issues

This commit is contained in:
Bruno P. Kinoshita 2017-10-30 23:57:52 +13:00 committed by Tomas Fernandez Lobbe
parent 4e34a0cb41
commit 5310152450
57 changed files with 69 additions and 69 deletions

View File

@ -30256,7 +30256,7 @@ public final class HTMLStripCharFilter extends BaseCharFilter {
/** /**
* Reports an error that occured while scanning. * Reports an error that occurred while scanning.
* *
* In a wellformed scanner (no or only correct usage of * In a wellformed scanner (no or only correct usage of
* yypushback(int) and a match-all fallback rule) this method * yypushback(int) and a match-all fallback rule) this method

View File

@ -256,7 +256,7 @@ public final class WordDelimiterIterator {
* Determines if the text at the given position indicates an English possessive which should be removed * Determines if the text at the given position indicates an English possessive which should be removed
* *
* @param pos Position in the text to check if it indicates an English possessive * @param pos Position in the text to check if it indicates an English possessive
* @return {@code true} if the text at the position indicates an English posessive, {@code false} otherwise * @return {@code true} if the text at the position indicates an English possessive, {@code false} otherwise
*/ */
private boolean endsWithPossessive(int pos) { private boolean endsWithPossessive(int pos) {
return (stemEnglishPossessive && return (stemEnglishPossessive &&

View File

@ -26,7 +26,7 @@ import java.io.IOException;
/** /**
* A TokenFilter which applies a Pattern to each token in the stream, * A TokenFilter which applies a Pattern to each token in the stream,
* replacing match occurances with the specified replacement string. * replacing match occurrences with the specified replacement string.
* *
* <p> * <p>
* <b>Note:</b> Depending on the input and the pattern used and the input * <b>Note:</b> Depending on the input and the pattern used and the input
@ -43,7 +43,7 @@ public final class PatternReplaceFilter extends TokenFilter {
private final Matcher m; private final Matcher m;
/** /**
* Constructs an instance to replace either the first, or all occurances * Constructs an instance to replace either the first, or all occurrences
* *
* @param in the TokenStream to process * @param in the TokenStream to process
* @param p the patterm to apply to each Token * @param p the patterm to apply to each Token

View File

@ -31,7 +31,7 @@ import org.apache.lucene.util.automaton.RegExp;
/** /**
* This tokenizer uses a Lucene {@link RegExp} or (expert usage) a pre-built determinized {@link Automaton}, to locate tokens. * This tokenizer uses a Lucene {@link RegExp} or (expert usage) a pre-built determinized {@link Automaton}, to locate tokens.
* The regexp syntax is more limited than {@link PatternTokenizer}, but the tokenization is quite a bit faster. This is just * The regexp syntax is more limited than {@link PatternTokenizer}, but the tokenization is quite a bit faster. This is just
* like {@link SimplePatternTokenizer} except that the pattern shold make valid token separator characters, like * like {@link SimplePatternTokenizer} except that the pattern should make valid token separator characters, like
* {@code String.split}. Empty string tokens are never produced. * {@code String.split}. Empty string tokens are never produced.
* *
* @lucene.experimental * @lucene.experimental

View File

@ -131,7 +131,7 @@ public final class WikipediaTokenizer extends Tokenizer {
/** /**
* Creates a new instance of the {@link org.apache.lucene.analysis.wikipedia.WikipediaTokenizer}. Attaches the * Creates a new instance of the {@link org.apache.lucene.analysis.wikipedia.WikipediaTokenizer}. Attaches the
* <code>input</code> to a the newly created JFlex scanner. * <code>input</code> to the newly created JFlex scanner.
* *
* @param tokenOutput One of {@link #TOKENS_ONLY}, {@link #UNTOKENIZED_ONLY}, {@link #BOTH} * @param tokenOutput One of {@link #TOKENS_ONLY}, {@link #UNTOKENIZED_ONLY}, {@link #BOTH}
*/ */
@ -142,7 +142,7 @@ public final class WikipediaTokenizer extends Tokenizer {
/** /**
* Creates a new instance of the {@link org.apache.lucene.analysis.wikipedia.WikipediaTokenizer}. Attaches the * Creates a new instance of the {@link org.apache.lucene.analysis.wikipedia.WikipediaTokenizer}. Attaches the
* <code>input</code> to a the newly created JFlex scanner. Uses the given {@link org.apache.lucene.util.AttributeFactory}. * <code>input</code> to the newly created JFlex scanner. Uses the given {@link org.apache.lucene.util.AttributeFactory}.
* *
* @param tokenOutput One of {@link #TOKENS_ONLY}, {@link #UNTOKENIZED_ONLY}, {@link #BOTH} * @param tokenOutput One of {@link #TOKENS_ONLY}, {@link #UNTOKENIZED_ONLY}, {@link #BOTH}
*/ */
@ -314,4 +314,4 @@ public final class WikipediaTokenizer extends Tokenizer {
final int finalOffset = correctOffset(scanner.yychar() + scanner.yylength()); final int finalOffset = correctOffset(scanner.yychar() + scanner.yylength());
this.offsetAtt.setOffset(finalOffset, finalOffset); this.offsetAtt.setOffset(finalOffset, finalOffset);
} }
} }

View File

@ -28,7 +28,7 @@ import java.util.*;
import java.lang.reflect.Constructor; import java.lang.reflect.Constructor;
/** /**
* Create a new {@link org.apache.lucene.analysis.Analyzer} and set it it in the getRunData() for use by all future tasks. * Create a new {@link org.apache.lucene.analysis.Analyzer} and set it in the getRunData() for use by all future tasks.
* *
*/ */
public class NewAnalyzerTask extends PerfTask { public class NewAnalyzerTask extends PerfTask {

View File

@ -45,7 +45,7 @@ public class TrecJudge implements Judge {
* 19 0 doc303 1 * 19 0 doc303 1
* 19 0 doc7295 0 * 19 0 doc7295 0
* </pre> * </pre>
* @param reader where judgments are read from. * @param reader where judgements are read from.
* @throws IOException If there is a low-level I/O error. * @throws IOException If there is a low-level I/O error.
*/ */
public TrecJudge (BufferedReader reader) throws IOException { public TrecJudge (BufferedReader reader) throws IOException {

View File

@ -85,7 +85,7 @@ public class BM25NBClassifier implements Classifier<BytesRef> {
* @param analyzer an {@link Analyzer} used to analyze unseen text * @param analyzer an {@link Analyzer} used to analyze unseen text
* @param query a {@link Query} to eventually filter the docs used for training the classifier, or {@code null} * @param query a {@link Query} to eventually filter the docs used for training the classifier, or {@code null}
* if all the indexed docs should be used * if all the indexed docs should be used
* @param classFieldName the name of the field used as the output for the classifier NOTE: must not be havely analyzed * @param classFieldName the name of the field used as the output for the classifier NOTE: must not be heavely analyzed
* as the returned class will be a token indexed for this field * as the returned class will be a token indexed for this field
* @param textFieldNames the name of the fields used as the inputs for the classifier, NO boosting supported per field * @param textFieldNames the name of the fields used as the inputs for the classifier, NO boosting supported per field
*/ */

View File

@ -62,7 +62,7 @@ public class SimpleNaiveBayesDocumentClassifier extends SimpleNaiveBayesClassifi
* @param indexReader the reader on the index to be used for classification * @param indexReader the reader on the index to be used for classification
* @param query a {@link org.apache.lucene.search.Query} to eventually filter the docs used for training the classifier, or {@code null} * @param query a {@link org.apache.lucene.search.Query} to eventually filter the docs used for training the classifier, or {@code null}
* if all the indexed docs should be used * if all the indexed docs should be used
* @param classFieldName the name of the field used as the output for the classifier NOTE: must not be havely analyzed * @param classFieldName the name of the field used as the output for the classifier NOTE: must not be heavely analyzed
* as the returned class will be a token indexed for this field * as the returned class will be a token indexed for this field
* @param textFieldNames the name of the fields used as the inputs for the classifier, they can contain boosting indication e.g. title^10 * @param textFieldNames the name of the fields used as the inputs for the classifier, they can contain boosting indication e.g. title^10
*/ */

View File

@ -62,7 +62,7 @@ public class NearestFuzzyQuery extends Query {
/** /**
* Default constructor * Default constructor
* *
* @param analyzer the analyzer used to proecss the query text * @param analyzer the analyzer used to process the query text
*/ */
public NearestFuzzyQuery(Analyzer analyzer) { public NearestFuzzyQuery(Analyzer analyzer) {
this.analyzer = analyzer; this.analyzer = analyzer;

View File

@ -30,7 +30,7 @@ import org.apache.lucene.util.FixedBitSet;
/** /**
* Extension of {@link PostingsWriterBase}, adding a push * Extension of {@link PostingsWriterBase}, adding a push
* API for writing each element of the postings. This API * API for writing each element of the postings. This API
* is somewhat analagous to an XML SAX API, while {@link * is somewhat analogous to an XML SAX API, while {@link
* PostingsWriterBase} is more like an XML DOM API. * PostingsWriterBase} is more like an XML DOM API.
* *
* @see PostingsReaderBase * @see PostingsReaderBase

View File

@ -31,7 +31,7 @@ public abstract class Compressor implements Closeable {
protected Compressor() {} protected Compressor() {}
/** /**
* Compress bytes into <code>out</code>. It it the responsibility of the * Compress bytes into <code>out</code>. It is the responsibility of the
* compressor to add all necessary information so that a {@link Decompressor} * compressor to add all necessary information so that a {@link Decompressor}
* will know when to stop decompressing bytes from the stream. * will know when to stop decompressing bytes from the stream.
*/ */

View File

@ -28,7 +28,7 @@ import org.apache.lucene.index.SegmentWriteState;
/** /**
* Lucene 6.0 point format, which encodes dimensional values in a block KD-tree structure * Lucene 6.0 point format, which encodes dimensional values in a block KD-tree structure
* for fast 1D range and N dimesional shape intersection filtering. * for fast 1D range and N dimensional shape intersection filtering.
* See <a href="https://www.cs.duke.edu/~pankaj/publications/papers/bkd-sstd.pdf">this paper</a> for details. * See <a href="https://www.cs.duke.edu/~pankaj/publications/papers/bkd-sstd.pdf">this paper</a> for details.
* *
* <p>This data structure is written as a series of blocks on disk, with an in-memory perfectly balanced * <p>This data structure is written as a series of blocks on disk, with an in-memory perfectly balanced

View File

@ -58,7 +58,7 @@ import org.apache.lucene.util.Version;
* <li>IsCompoundFile --&gt; {@link DataOutput#writeByte Int8}</li> * <li>IsCompoundFile --&gt; {@link DataOutput#writeByte Int8}</li>
* <li>IndexSort --&gt; {@link DataOutput#writeVInt Int32} count, followed by {@code count} SortField</li> * <li>IndexSort --&gt; {@link DataOutput#writeVInt Int32} count, followed by {@code count} SortField</li>
* <li>SortField --&gt; {@link DataOutput#writeString String} field name, followed by {@link DataOutput#writeVInt Int32} sort type ID, * <li>SortField --&gt; {@link DataOutput#writeString String} field name, followed by {@link DataOutput#writeVInt Int32} sort type ID,
* followed by {@link DataOutput#writeByte Int8} indicatating reversed sort, followed by a type-specific encoding of the optional missing value * followed by {@link DataOutput#writeByte Int8} indicating reversed sort, followed by a type-specific encoding of the optional missing value
* <li>Footer --&gt; {@link CodecUtil#writeFooter CodecFooter}</li> * <li>Footer --&gt; {@link CodecUtil#writeFooter CodecFooter}</li>
* </ul> * </ul>
* Field Descriptions: * Field Descriptions:

View File

@ -44,7 +44,7 @@ import java.util.List;
type: MultiPolygon (union of polygons) is also accepted. type: MultiPolygon (union of polygons) is also accepted.
*/ */
/** Does minimal parsing of a GeoJSON object, to extract either Polygon or MultiPolygon, either directly as a the top-level type, or if /** Does minimal parsing of a GeoJSON object, to extract either Polygon or MultiPolygon, either directly as the top-level type, or if
* the top-level type is Feature, as the geometry of that feature. */ * the top-level type is Feature, as the geometry of that feature. */
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")

View File

@ -47,7 +47,7 @@ import org.apache.lucene.util.ThreadInterruptedException;
* *
* <p>If more than {@link #getMaxMergeCount} merges are * <p>If more than {@link #getMaxMergeCount} merges are
* requested then this class will forcefully throttle the * requested then this class will forcefully throttle the
* incoming threads by pausing until one more more merges * incoming threads by pausing until one more merges
* complete.</p> * complete.</p>
* *
* <p>This class attempts to detect whether the index is * <p>This class attempts to detect whether the index is

View File

@ -21,7 +21,7 @@ import java.io.IOException;
import org.apache.lucene.codecs.DocValuesProducer; import org.apache.lucene.codecs.DocValuesProducer;
/** Abstrast base class implementing a {@link DocValuesProducer} that has no doc values. */ /** Abstract base class implementing a {@link DocValuesProducer} that has no doc values. */
public abstract class EmptyDocValuesProducer extends DocValuesProducer { public abstract class EmptyDocValuesProducer extends DocValuesProducer {
/** Sole constructor */ /** Sole constructor */

View File

@ -427,7 +427,7 @@ public final class IndexWriterConfig extends LiveIndexWriterConfig {
* Information about merges, deletes and a * Information about merges, deletes and a
* message when maxFieldLength is reached will be printed * message when maxFieldLength is reached will be printed
* to this. Must not be null, but {@link InfoStream#NO_OUTPUT} * to this. Must not be null, but {@link InfoStream#NO_OUTPUT}
* may be used to supress output. * may be used to suppress output.
*/ */
public IndexWriterConfig setInfoStream(InfoStream infoStream) { public IndexWriterConfig setInfoStream(InfoStream infoStream) {
if (infoStream == null) { if (infoStream == null) {

View File

@ -30,7 +30,7 @@ import org.apache.lucene.document.DocumentStoredFieldVisitor;
* <p><b>NOTE</b>: a {@code StoredFieldVisitor} implementation * <p><b>NOTE</b>: a {@code StoredFieldVisitor} implementation
* should not try to load or visit other stored documents in * should not try to load or visit other stored documents in
* the same reader because the implementation of stored * the same reader because the implementation of stored
* fields for most codecs is not reeentrant and you will see * fields for most codecs is not reentrant and you will see
* strange exceptions as a result. * strange exceptions as a result.
* *
* <p>See {@link DocumentStoredFieldVisitor}, which is a * <p>See {@link DocumentStoredFieldVisitor}, which is a

View File

@ -50,7 +50,7 @@ public class ControlledRealTimeReopenThread<T> extends Thread implements Closeab
/** /**
* Create ControlledRealTimeReopenThread, to periodically * Create ControlledRealTimeReopenThread, to periodically
* reopen the a {@link ReferenceManager}. * reopen the {@link ReferenceManager}.
* *
* @param targetMaxStaleSec Maximum time until a new * @param targetMaxStaleSec Maximum time until a new
* reader must be opened; this sets the upper bound * reader must be opened; this sets the upper bound

View File

@ -24,7 +24,7 @@ import org.apache.lucene.util.MathUtil;
import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
/** /**
* The Scorer for DisjunctionMaxQuery. The union of all documents generated by the the subquery scorers * The Scorer for DisjunctionMaxQuery. The union of all documents generated by the subquery scorers
* is generated in document number order. The score for each document is the maximum of the scores computed * is generated in document number order. The score for each document is the maximum of the scores computed
* by the subquery scorers that generate that document, plus tieBreakerMultiplier times the sum of the scores * by the subquery scorers that generate that document, plus tieBreakerMultiplier times the sum of the scores
* for the other subqueries that generate the document. * for the other subqueries that generate the document.

View File

@ -238,7 +238,7 @@ public class SearcherLifetimeManager implements Closeable {
* entries are passed to the Pruner in sorted (newest to * entries are passed to the Pruner in sorted (newest to
* oldest IndexSearcher) order. * oldest IndexSearcher) order.
* *
* <p><b>NOTE</b>: you must peridiocally call this, ideally * <p><b>NOTE</b>: you must periodically call this, ideally
* from the same background thread that opens new * from the same background thread that opens new
* searchers. */ * searchers. */
public synchronized void prune(Pruner pruner) throws IOException { public synchronized void prune(Pruner pruner) throws IOException {

View File

@ -172,7 +172,7 @@ public final class SearcherManager extends ReferenceManager<IndexSearcher> {
} }
/** /**
* Returns <code>true</code> if no changes have occured since this searcher * Returns <code>true</code> if no changes have occurred since this searcher
* ie. reader was opened, otherwise <code>false</code>. * ie. reader was opened, otherwise <code>false</code>.
* @see DirectoryReader#isCurrent() * @see DirectoryReader#isCurrent()
*/ */

View File

@ -144,7 +144,7 @@ public abstract class FSDirectory extends BaseDirectory {
* real path to ensure it can correctly lock the index directory and no other process * real path to ensure it can correctly lock the index directory and no other process
* can interfere with changing possible symlinks to the index directory inbetween. * can interfere with changing possible symlinks to the index directory inbetween.
* If you want to use symlinks and change them dynamically, close all * If you want to use symlinks and change them dynamically, close all
* {@code IndexWriters} and create a new {@code FSDirecory} instance. * {@code IndexWriters} and create a new {@code FSDirectory} instance.
* @param path the path of the directory * @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default * @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory}); * ({@link NativeFSLockFactory});
@ -168,7 +168,7 @@ public abstract class FSDirectory extends BaseDirectory {
* real path to ensure it can correctly lock the index directory and no other process * real path to ensure it can correctly lock the index directory and no other process
* can interfere with changing possible symlinks to the index directory inbetween. * can interfere with changing possible symlinks to the index directory inbetween.
* If you want to use symlinks and change them dynamically, close all * If you want to use symlinks and change them dynamically, close all
* {@code IndexWriters} and create a new {@code FSDirecory} instance. * {@code IndexWriters} and create a new {@code FSDirectory} instance.
* *
* <p>Currently this returns {@link MMapDirectory} for Linux, MacOSX, Solaris, * <p>Currently this returns {@link MMapDirectory} for Linux, MacOSX, Solaris,
* and Windows 64-bit JREs, {@link NIOFSDirectory} for other * and Windows 64-bit JREs, {@link NIOFSDirectory} for other

View File

@ -98,7 +98,7 @@ public abstract class IndexInput extends DataInput implements Cloneable,Closeabl
/** /**
* Creates a slice of this index input, with the given description, offset, and length. * Creates a slice of this index input, with the given description, offset, and length.
* The slice is seeked to the beginning. * The slice is sought to the beginning.
*/ */
public abstract IndexInput slice(String sliceDescription, long offset, long length) throws IOException; public abstract IndexInput slice(String sliceDescription, long offset, long length) throws IOException;

View File

@ -45,7 +45,7 @@ import org.apache.lucene.util.IOUtils;
* could be left when the JVM exits abnormally.</p> * could be left when the JVM exits abnormally.</p>
* *
* <p>The primary benefit of {@link NativeFSLockFactory} is * <p>The primary benefit of {@link NativeFSLockFactory} is
* that locks (not the lock file itsself) will be properly * that locks (not the lock file itself) will be properly
* removed (by the OS) if the JVM has an abnormal exit.</p> * removed (by the OS) if the JVM has an abnormal exit.</p>
* *
* <p>Note that, unlike {@link SimpleFSLockFactory}, the existence of * <p>Note that, unlike {@link SimpleFSLockFactory}, the existence of

View File

@ -346,7 +346,7 @@ public final class ByteBlockPool {
} }
/** /**
* Reads bytes bytes out of the pool starting at the given offset with the given * Reads bytes out of the pool starting at the given offset with the given
* length into the given byte array at offset <tt>off</tt>. * length into the given byte array at offset <tt>off</tt>.
* <p>Note: this method allows to copy across block boundaries.</p> * <p>Note: this method allows to copy across block boundaries.</p>
*/ */

View File

@ -138,7 +138,7 @@ public class CompiledAutomaton {
* to determine whether it is finite. If simplify is true, we run * to determine whether it is finite. If simplify is true, we run
* possibly expensive operations to determine if the automaton is one * possibly expensive operations to determine if the automaton is one
* the cases in {@link CompiledAutomaton.AUTOMATON_TYPE}. If simplify * the cases in {@link CompiledAutomaton.AUTOMATON_TYPE}. If simplify
* requires determinizing the autaomaton then only maxDeterminizedStates * requires determinizing the automaton then only maxDeterminizedStates
* will be created. Any more than that will cause a * will be created. Any more than that will cause a
* TooComplexToDeterminizeException. * TooComplexToDeterminizeException.
*/ */

View File

@ -45,7 +45,7 @@ final public class MinimizationOperations {
/** /**
* Minimizes (and determinizes if not already deterministic) the given * Minimizes (and determinizes if not already deterministic) the given
* automaton using Hopcroft's algorighm. * automaton using Hopcroft's algorithm.
* @param maxDeterminizedStates maximum number of states determinizing the * @param maxDeterminizedStates maximum number of states determinizing the
* automaton can result in. Set higher to allow more complex queries and * automaton can result in. Set higher to allow more complex queries and
* lower to prevent memory exhaustion. * lower to prevent memory exhaustion.

View File

@ -380,7 +380,7 @@ public class RegExp {
* <code>RegExp(s, ALL)</code>. * <code>RegExp(s, ALL)</code>.
* *
* @param s regexp string * @param s regexp string
* @exception IllegalArgumentException if an error occured while parsing the * @exception IllegalArgumentException if an error occurred while parsing the
* regular expression * regular expression
*/ */
public RegExp(String s) throws IllegalArgumentException { public RegExp(String s) throws IllegalArgumentException {
@ -393,7 +393,7 @@ public class RegExp {
* @param s regexp string * @param s regexp string
* @param syntax_flags boolean 'or' of optional syntax constructs to be * @param syntax_flags boolean 'or' of optional syntax constructs to be
* enabled * enabled
* @exception IllegalArgumentException if an error occured while parsing the * @exception IllegalArgumentException if an error occurred while parsing the
* regular expression * regular expression
*/ */
public RegExp(String s, int syntax_flags) throws IllegalArgumentException { public RegExp(String s, int syntax_flags) throws IllegalArgumentException {

View File

@ -103,7 +103,7 @@ public final class Util {
* works when the outputs are ascending in order with * works when the outputs are ascending in order with
* the inputs. * the inputs.
* For example, simple ordinals (0, 1, * For example, simple ordinals (0, 1,
* 2, ...), or file offets (when appending to a file) * 2, ...), or file offsets (when appending to a file)
* fit this. */ * fit this. */
public static IntsRef getByOutput(FST<Long> fst, long targetOutput) throws IOException { public static IntsRef getByOutput(FST<Long> fst, long targetOutput) throws IOException {

View File

@ -93,7 +93,7 @@ public final class DrillDownQuery extends Query {
/** Adds one dimension of drill downs; if you pass the same /** Adds one dimension of drill downs; if you pass the same
* dimension more than once it is OR'd with the previous * dimension more than once it is OR'd with the previous
* cofnstraints on that dimension, and all dimensions are * constraints on that dimension, and all dimensions are
* AND'd against each other and the base query. */ * AND'd against each other and the base query. */
public void add(String dim, String... path) { public void add(String dim, String... path) {
String indexedField = config.getDimConfig(dim).indexFieldName; String indexedField = config.getDimConfig(dim).indexFieldName;

View File

@ -40,7 +40,7 @@ import org.apache.lucene.util.PriorityQueue;
/** {@link Facets} implementation that computes counts for /** {@link Facets} implementation that computes counts for
* all uniqute long values, more efficiently counting small values (0-1023) using an int array, * all unique long values, more efficiently counting small values (0-1023) using an int array,
* and switching to a <code>HashMap</code> for values above 1023. * and switching to a <code>HashMap</code> for values above 1023.
* Retrieve all facet counts, in value order, with {@link #getAllChildrenSortByValue}, * Retrieve all facet counts, in value order, with {@link #getAllChildrenSortByValue},
* or get the topN values sorted by count with {@link #getTopChildrenSortByCount}. * or get the topN values sorted by count with {@link #getTopChildrenSortByCount}.

View File

@ -52,9 +52,9 @@ public abstract class LabelToOrdinal {
} }
/** /**
* Adds a new label if its not yet in the table. * Adds a new label if it is not yet in the table.
* Throws an {@link IllegalArgumentException} if the same label with * Throws an {@link IllegalArgumentException} if the same label with
* a different ordinal was previoulsy added to this table. * a different ordinal was previously added to this table.
*/ */
public abstract void addLabel(FacetLabel label, int ordinal); public abstract void addLabel(FacetLabel label, int ordinal);

View File

@ -19,7 +19,7 @@ package org.apache.lucene.facet.taxonomy.writercache;
import org.apache.lucene.facet.taxonomy.FacetLabel; import org.apache.lucene.facet.taxonomy.FacetLabel;
/** /**
* An an LRU cache of mapping from name to int. * An LRU cache of mapping from name to int.
* Used to cache Ordinals of category paths. * Used to cache Ordinals of category paths.
* It uses as key, hash of the path instead of the path. * It uses as key, hash of the path instead of the path.
* This way the cache takes less RAM, but correctness depends on * This way the cache takes less RAM, but correctness depends on

View File

@ -22,7 +22,7 @@ import java.util.LinkedHashMap;
import org.apache.lucene.facet.taxonomy.FacetLabel; import org.apache.lucene.facet.taxonomy.FacetLabel;
/** /**
* An an LRU cache of mapping from name to int. * An LRU cache of mapping from name to int.
* Used to cache Ordinals of category paths. * Used to cache Ordinals of category paths.
* *
* @lucene.experimental * @lucene.experimental

View File

@ -59,7 +59,7 @@ public class LazyDocument {
* <p> * <p>
* <b>NOTE:</b> This method must be called once for each value of the field * <b>NOTE:</b> This method must be called once for each value of the field
* name specified in sequence that the values exist. This method may not be * name specified in sequence that the values exist. This method may not be
* used to generate multiple, lazy, StorableField instances refering to * used to generate multiple, lazy, StorableField instances referring to
* the same underlying StorableField instance. * the same underlying StorableField instance.
* </p> * </p>
* <p> * <p>

View File

@ -34,7 +34,7 @@ import org.apache.lucene.search.similarities.ClassicSimilarity;
* subclasses can choose between. * subclasses can choose between.
* </p> * </p>
* *
* @see <a href="doc-files/ss.gnuplot">A Gnuplot file used to generate some of the visualizations refrenced from each function.</a> * @see <a href="doc-files/ss.gnuplot">A Gnuplot file used to generate some of the visualizations referenced from each function.</a>
*/ */
public class SweetSpotSimilarity extends ClassicSimilarity { public class SweetSpotSimilarity extends ClassicSimilarity {

View File

@ -71,7 +71,7 @@ interface CharStream {
* Backs up the input stream by amount steps. Lexer calls this method if it * Backs up the input stream by amount steps. Lexer calls this method if it
* had already read some characters, but could not use them to match a * had already read some characters, but could not use them to match a
* (longer) token. So, they will be used again as the prefix of the next * (longer) token. So, they will be used again as the prefix of the next
* token and it is the implemetation's responsibility to do this right. * token and it is the implementation's responsibility to do this right.
*/ */
void backup(int amount); void backup(int amount);

View File

@ -24,7 +24,7 @@ import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
/** Builer for {@link SynonymQueryNode}. */ /** Builder for {@link SynonymQueryNode}. */
public class SynonymQueryNodeBuilder implements StandardQueryBuilder { public class SynonymQueryNodeBuilder implements StandardQueryBuilder {
/** Sole constructor. */ /** Sole constructor. */

View File

@ -88,7 +88,7 @@ public class PointQueryNode extends QueryNodeImpl implements
* *
* @param escaper the {@link EscapeQuerySyntax} used to escape the value {@link String} * @param escaper the {@link EscapeQuerySyntax} used to escape the value {@link String}
* *
* @return the value converte to {@link String} and escaped * @return the value converted to {@link String} and escaped
*/ */
protected CharSequence getTermEscaped(EscapeQuerySyntax escaper) { protected CharSequence getTermEscaped(EscapeQuerySyntax escaper) {
return escaper.escape(numberFormat.format(this.value), return escaper.escape(numberFormat.format(this.value),

View File

@ -71,7 +71,7 @@ interface CharStream {
* Backs up the input stream by amount steps. Lexer calls this method if it * Backs up the input stream by amount steps. Lexer calls this method if it
* had already read some characters, but could not use them to match a * had already read some characters, but could not use them to match a
* (longer) token. So, they will be used again as the prefix of the next * (longer) token. So, they will be used again as the prefix of the next
* token and it is the implemetation's responsibility to do this right. * token and it is the implementation's responsibility to do this right.
*/ */
void backup(int amount); void backup(int amount);

View File

@ -71,7 +71,7 @@ interface CharStream {
* Backs up the input stream by amount steps. Lexer calls this method if it * Backs up the input stream by amount steps. Lexer calls this method if it
* had already read some characters, but could not use them to match a * had already read some characters, but could not use them to match a
* (longer) token. So, they will be used again as the prefix of the next * (longer) token. So, they will be used again as the prefix of the next
* token and it is the implemetation's responsibility to do this right. * token and it is the implementation's responsibility to do this right.
*/ */
void backup(int amount); void backup(int amount);

View File

@ -391,7 +391,7 @@ public class ReplicationClient implements Closeable {
} }
/** /**
* Executes the update operation immediately, irregardess if an update thread * Executes the update operation immediately, irregardless if an update thread
* is running or not. * is running or not.
*/ */
public void updateNow() throws IOException { public void updateNow() throws IOException {

View File

@ -30,7 +30,7 @@ import org.apache.lucene.store.IndexInput;
* revision will contain files from a single source. However, some applications * revision will contain files from a single source. However, some applications
* may require to treat a collection of indexes as a single entity so that the * may require to treat a collection of indexes as a single entity so that the
* files from all sources are replicated together, to guarantee consistency * files from all sources are replicated together, to guarantee consistency
* beween them. For example, an application which indexes facets will need to * between them. For example, an application which indexes facets will need to
* replicate both the search and taxonomy indexes together, to guarantee that * replicate both the search and taxonomy indexes together, to guarantee that
* they match at the client side. * they match at the client side.
* *
@ -60,7 +60,7 @@ public interface Revision extends Comparable<Revision> {
/** /**
* Returns an {@link IndexInput} for the given fileName and source. It is the * Returns an {@link IndexInput} for the given fileName and source. It is the
* caller's respnsibility to close the {@link IndexInput} when it has been * caller's responsibility to close the {@link IndexInput} when it has been
* consumed. * consumed.
*/ */
public InputStream open(String source, String fileName) throws IOException; public InputStream open(String source, String fileName) throws IOException;

View File

@ -730,8 +730,8 @@ public abstract class ReplicaNode extends Node {
} }
/** Carefully determine if the file on the primary, identified by its {@code String fileName} along with the {@link FileMetaData} /** Carefully determine if the file on the primary, identified by its {@code String fileName} along with the {@link FileMetaData}
* "summarizing" its contents, is precisely the same file that we have locally. If the file does not exist locally, or if its its header * "summarizing" its contents, is precisely the same file that we have locally. If the file does not exist locally, or if its header
* (inclues the segment id), length, footer (including checksum) differ, then this returns false, else true. */ * (includes the segment id), length, footer (including checksum) differ, then this returns false, else true. */
private boolean fileIsIdentical(String fileName, FileMetaData srcMetaData) throws IOException { private boolean fileIsIdentical(String fileName, FileMetaData srcMetaData) throws IOException {
FileMetaData destMetaData = readLocalFileMetaData(fileName); FileMetaData destMetaData = readLocalFileMetaData(fileName);

View File

@ -37,7 +37,7 @@ import org.apache.lucene.util.fst.Util;
/** Iterates through terms in this field; this class is public so users /** Iterates through terms in this field; this class is public so users
* can cast it to call {@link #seekExact(BytesRef, long)} for * can cast it to call {@link #seekExact(BytesRef, long)} for
* optimistic-concurreny, and also {@link #getVersion} to get the * optimistic-concurrency, and also {@link #getVersion} to get the
* version of the currently seek'd term. */ * version of the currently seek'd term. */
public final class IDVersionSegmentTermsEnum extends TermsEnum { public final class IDVersionSegmentTermsEnum extends TermsEnum {

View File

@ -677,7 +677,7 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable {
/** /**
* Create the results based on the search hits. * Create the results based on the search hits.
* Can be overridden by subclass to add particular behavior (e.g. weight transformation). * Can be overridden by subclass to add particular behavior (e.g. weight transformation).
* Note that there is no prefix toke (the {@code prefixToken} argument will * Note that there is no prefix token (the {@code prefixToken} argument will
* be null) whenever the final token in the incoming request was in fact finished * be null) whenever the final token in the incoming request was in fact finished
* (had trailing characters, such as white-space). * (had trailing characters, such as white-space).
* *

View File

@ -113,7 +113,7 @@ public class JaspellTernarySearchTrie implements Accountable {
} }
/** /**
* Compares characters by alfabetical order. * Compares characters by alphabetical order.
* *
*@param cCompare2 *@param cCompare2
* The first char in the comparison. * The first char in the comparison.
@ -204,7 +204,7 @@ public class JaspellTernarySearchTrie implements Accountable {
*@param file *@param file
* The <code>Path</code> with the data to load into the Trie. * The <code>Path</code> with the data to load into the Trie.
*@exception IOException *@exception IOException
* A problem occured while reading the data. * A problem occurred while reading the data.
*/ */
public JaspellTernarySearchTrie(Path file) throws IOException { public JaspellTernarySearchTrie(Path file) throws IOException {
this(file, false); this(file, false);
@ -221,7 +221,7 @@ public class JaspellTernarySearchTrie implements Accountable {
* If true, the file is compressed with the GZIP algorithm, and if * If true, the file is compressed with the GZIP algorithm, and if
* false, the file is a normal text document. * false, the file is a normal text document.
*@exception IOException *@exception IOException
* A problem occured while reading the data. * A problem occurred while reading the data.
*/ */
public JaspellTernarySearchTrie(Path file, boolean compression) public JaspellTernarySearchTrie(Path file, boolean compression)
throws IOException { throws IOException {

View File

@ -43218,7 +43218,7 @@ phoneme|3088
aftenposten|3088 aftenposten|3088
hayek|3088 hayek|3088
groups.yahoo.com|3088 groups.yahoo.com|3088
occured|3087 occurred|3087
intimidate|3087 intimidate|3087
workable|3087 workable|3087
groin|3087 groin|3087

View File

@ -106,7 +106,7 @@ public class TestContentStreamDataSource extends AbstractDataImportHandlerTestCa
Thread.sleep(500); Thread.sleep(500);
} }
} }
fail("Commit should have occured but it did not"); fail("Commit should have occurred but it did not");
} }
private static class SolrInstance { private static class SolrInstance {

View File

@ -1369,7 +1369,7 @@ public class CoreContainer {
} }
/** /**
* Returns an immutable Map of Exceptions that occured when initializing * Returns an immutable Map of Exceptions that occurred when initializing
* SolrCores (either at startup, or do to runtime requests to create cores) * SolrCores (either at startup, or do to runtime requests to create cores)
* keyed off of the name (String) of the SolrCore that had the Exception * keyed off of the name (String) of the SolrCore that had the Exception
* during initialization. * during initialization.

View File

@ -178,7 +178,7 @@ public abstract class AnalysisRequestHandlerBase extends RequestHandlerBase {
tokenStream.end(); tokenStream.end();
return tokens; return tokens;
} catch (IOException ioe) { } catch (IOException ioe) {
throw new RuntimeException("Error occured while iterating over tokenstream", ioe); throw new RuntimeException("Error occurred while iterating over tokenstream", ioe);
} }
} }
@ -206,7 +206,7 @@ public abstract class AnalysisRequestHandlerBase extends RequestHandlerBase {
} }
tokenStream.end(); // TODO should we capture? tokenStream.end(); // TODO should we capture?
} catch (IOException ioe) { } catch (IOException ioe) {
throw new RuntimeException("Error occured while iterating over tokenstream", ioe); throw new RuntimeException("Error occurred while iterating over tokenstream", ioe);
} finally { } finally {
IOUtils.closeWhileHandlingException(tokenStream); IOUtils.closeWhileHandlingException(tokenStream);
} }

View File

@ -1890,7 +1890,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
// if we aren't the leader, then we need to check that updates were not re-ordered // if we aren't the leader, then we need to check that updates were not re-ordered
if (bucketVersion != 0 && bucketVersion < versionOnUpdate) { if (bucketVersion != 0 && bucketVersion < versionOnUpdate) {
// we're OK... this update has a version higher than anything we've seen // we're OK... this update has a version higher than anything we've seen
// in this bucket so far, so we know that no reordering has yet occured. // in this bucket so far, so we know that no reordering has yet occurred.
bucket.updateHighest(versionOnUpdate); bucket.updateHighest(versionOnUpdate);
} else { } else {
// there have been updates higher than the current update. we need to check // there have been updates higher than the current update. we need to check

View File

@ -87,7 +87,7 @@ The default is `<em>`.
The default is `</em>`. The default is `</em>`.
`hl.encoder`:: `hl.encoder`::
If blank, the default, then the stored text will be returned without any escaping/encoding performed by the highlighter. If set to `html` then special HMTL/XML characters will be encoded (e.g., `&` becomes `\&amp;`). The pre/post snippet characters are never encoded. If blank, the default, then the stored text will be returned without any escaping/encoding performed by the highlighter. If set to `html` then special HTML/XML characters will be encoded (e.g., `&` becomes `\&amp;`). The pre/post snippet characters are never encoded.
`hl.maxAnalyzedChars`:: `hl.maxAnalyzedChars`::
The character limit to look for highlights, after which no highlighting will be done. This is mostly only a performance concern for an _analysis_ based offset source since it's the slowest. See <<Schema Options and Performance Considerations>>. The character limit to look for highlights, after which no highlighting will be done. This is mostly only a performance concern for an _analysis_ based offset source since it's the slowest. See <<Schema Options and Performance Considerations>>.

View File

@ -657,11 +657,11 @@ public class HttpSolrClient extends SolrClient {
+ getBaseURL(), e); + getBaseURL(), e);
} catch (SocketTimeoutException e) { } catch (SocketTimeoutException e) {
throw new SolrServerException( throw new SolrServerException(
"Timeout occured while waiting response from server at: " "Timeout occurred while waiting response from server at: "
+ getBaseURL(), e); + getBaseURL(), e);
} catch (IOException e) { } catch (IOException e) {
throw new SolrServerException( throw new SolrServerException(
"IOException occured when talking to server at: " + getBaseURL(), e); "IOException occurred when talking to server at: " + getBaseURL(), e);
} finally { } finally {
if (shouldClose) { if (shouldClose) {
Utils.consumeFully(entity); Utils.consumeFully(entity);

View File

@ -25,7 +25,7 @@ import org.apache.solr.common.util.SimpleOrderedMap;
import static org.apache.solr.common.params.CommonParams.ID; import static org.apache.solr.common.params.CommonParams.ID;
/** /**
* Models the basic information related to a single "tolerated" error that occured during updates. * Models the basic information related to a single "tolerated" error that occurred during updates.
* This class is only useful when the <code>TolerantUpdateProcessorFactory</code> is used in an update * This class is only useful when the <code>TolerantUpdateProcessorFactory</code> is used in an update
* processor chain * processor chain
*/ */