diff --git a/lucene/contrib/CHANGES.txt b/lucene/contrib/CHANGES.txt
index b2dda552ffa..0075761b963 100644
--- a/lucene/contrib/CHANGES.txt
+++ b/lucene/contrib/CHANGES.txt
@@ -140,6 +140,11 @@ New features
* LUCENE-2298: Add analyzers/stempel, an algorithmic stemmer with support for
the Polish language. (Andrzej Bialecki via Robert Muir)
+ * LUCENE-2414: Add ICUTokenizer, a tailorable tokenizer that implements Unicode
+ Text Segmentation. This tokenizer is useful for documents or collections with
+ multiple languages. The default configuration includes special support for
+ Thai, Lao, Myanmar, and Khmer. (Robert Muir, Uwe Schindler)
+
Build
* LUCENE-2124: Moved the JDK-based collation support from contrib/collation
diff --git a/lucene/contrib/icu/build.xml b/lucene/contrib/icu/build.xml
index af0794c8919..9bab9a11311 100644
--- a/lucene/contrib/icu/build.xml
+++ b/lucene/contrib/icu/build.xml
@@ -43,7 +43,39 @@
+ * http://bugs.icu-project.org/trac/ticket/5901: RBBI.getRuleStatus(), hoist to + * BreakIterator from RuleBasedBreakIterator + *
+ * DictionaryBasedBreakIterator is a subclass of RuleBasedBreakIterator, but + * doesn't actually behave as a subclass: it always returns 0 for + * getRuleStatus(): + * http://bugs.icu-project.org/trac/ticket/4730: Thai RBBI, no boundary type + * tags + * @lucene.experimental + */ +abstract class BreakIteratorWrapper { + protected final CharArrayIterator textIterator = new CharArrayIterator(); + protected char text[]; + protected int start; + protected int length; + + abstract int next(); + abstract int current(); + abstract int getRuleStatus(); + abstract void setText(CharacterIterator text); + + void setText(char text[], int start, int length) { + this.text = text; + this.start = start; + this.length = length; + textIterator.setText(text, start, length); + setText(textIterator); + } + + /** + * If its a DictionaryBasedBreakIterator, it doesn't return rulestatus, so + * treat it like a generic BreakIterator If its any other + * RuleBasedBreakIterator, the rule status can be used for token type. If its + * any other BreakIterator, the rulestatus method is not available, so treat + * it like a generic BreakIterator. + */ + static BreakIteratorWrapper wrap(BreakIterator breakIterator) { + if (breakIterator instanceof RuleBasedBreakIterator + && !(breakIterator instanceof DictionaryBasedBreakIterator)) + return new RBBIWrapper((RuleBasedBreakIterator) breakIterator); + else + return new BIWrapper(breakIterator); + } + + /** + * RuleBasedBreakIterator wrapper: RuleBasedBreakIterator (as long as its not + * a DictionaryBasedBreakIterator) behaves correctly. + */ + static final class RBBIWrapper extends BreakIteratorWrapper { + private final RuleBasedBreakIterator rbbi; + + RBBIWrapper(RuleBasedBreakIterator rbbi) { + this.rbbi = rbbi; + } + + @Override + int current() { + return rbbi.current(); + } + + @Override + int getRuleStatus() { + return rbbi.getRuleStatus(); + } + + @Override + int next() { + return rbbi.next(); + } + + @Override + void setText(CharacterIterator text) { + rbbi.setText(text); + } + } + + /** + * Generic BreakIterator wrapper: Either the rulestatus method is not + * available or always returns 0. Calculate a rulestatus here so it behaves + * like RuleBasedBreakIterator. + * + * Note: This is slower than RuleBasedBreakIterator. + */ + static final class BIWrapper extends BreakIteratorWrapper { + private final BreakIterator bi; + private int status; + + BIWrapper(BreakIterator bi) { + this.bi = bi; + } + + @Override + int current() { + return bi.current(); + } + + @Override + int getRuleStatus() { + return status; + } + + @Override + int next() { + int current = bi.current(); + int next = bi.next(); + status = calcStatus(current, next); + return next; + } + + private int calcStatus(int current, int next) { + if (current == BreakIterator.DONE || next == BreakIterator.DONE) + return RuleBasedBreakIterator.WORD_NONE; + + int begin = start + current; + int end = start + next; + + int codepoint; + for (int i = begin; i < end; i += UTF16.getCharCount(codepoint)) { + codepoint = UTF16.charAt(text, 0, end, begin); + + if (UCharacter.isDigit(codepoint)) + return RuleBasedBreakIterator.WORD_NUMBER; + else if (UCharacter.isLetter(codepoint)) { + // TODO: try to separately specify ideographic, kana? + // [currently all bundled as letter for this case] + return RuleBasedBreakIterator.WORD_LETTER; + } + } + + return RuleBasedBreakIterator.WORD_NONE; + } + + @Override + void setText(CharacterIterator text) { + bi.setText(text); + status = RuleBasedBreakIterator.WORD_NONE; + } + } +} diff --git a/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/CharArrayIterator.java b/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/CharArrayIterator.java new file mode 100644 index 00000000000..be25dd0eb5c --- /dev/null +++ b/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/CharArrayIterator.java @@ -0,0 +1,118 @@ +package org.apache.lucene.analysis.icu.segmentation; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.text.CharacterIterator; + +/** + * Wraps a char[] as CharacterIterator for processing with a BreakIterator + * @lucene.experimental + */ +final class CharArrayIterator implements CharacterIterator { + private char array[]; + private int start; + private int index; + private int length; + private int limit; + + public char [] getText() { + return array; + } + + public int getStart() { + return start; + } + + public int getLength() { + return length; + } + + /** + * Set a new region of text to be examined by this iterator + * + * @param array text buffer to examine + * @param start offset into buffer + * @param length maximum length to examine + */ + void setText(final char array[], int start, int length) { + this.array = array; + this.start = start; + this.index = start; + this.length = length; + this.limit = start + length; + } + + public char current() { + return (index == limit) ? DONE : array[index]; + } + + public char first() { + index = start; + return current(); + } + + public int getBeginIndex() { + return 0; + } + + public int getEndIndex() { + return length; + } + + public int getIndex() { + return index - start; + } + + public char last() { + index = (limit == start) ? limit : limit - 1; + return current(); + } + + public char next() { + if (++index >= limit) { + index = limit; + return DONE; + } else { + return current(); + } + } + + public char previous() { + if (--index < start) { + index = start; + return DONE; + } else { + return current(); + } + } + + public char setIndex(int position) { + if (position < getBeginIndex() || position > getEndIndex()) + throw new IllegalArgumentException("Illegal Position: " + position); + index = start + position; + return current(); + } + + @Override + public Object clone() { + CharArrayIterator clone = new CharArrayIterator(); + clone.setText(array, start, length); + clone.index = index; + return clone; + } +} diff --git a/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/CompositeBreakIterator.java b/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/CompositeBreakIterator.java new file mode 100644 index 00000000000..ba394b44db9 --- /dev/null +++ b/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/CompositeBreakIterator.java @@ -0,0 +1,126 @@ +package org.apache.lucene.analysis.icu.segmentation; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import com.ibm.icu.lang.UScript; +import com.ibm.icu.text.BreakIterator; + +/** + * An internal BreakIterator for multilingual text, following recommendations + * from: UAX #29: Unicode Text Segmentation. (http://unicode.org/reports/tr29/) + *
+ * See http://unicode.org/reports/tr29/#Tailoring for the motivation of this + * design. + *
+ * Text is first divided into script boundaries. The processing is then + * delegated to the appropriate break iterator for that specific script. + *
+ * This break iterator also allows you to retrieve the ISO 15924 script code + * associated with a piece of text. + *
+ * See also UAX #29, UTR #24 + * @lucene.experimental + */ +final class CompositeBreakIterator { + private final ICUTokenizerConfig config; + private final BreakIteratorWrapper wordBreakers[] = new BreakIteratorWrapper[UScript.CODE_LIMIT]; + + private BreakIteratorWrapper rbbi; + private final ScriptIterator scriptIterator = new ScriptIterator(); + + private char text[]; + + CompositeBreakIterator(ICUTokenizerConfig config) { + this.config = config; + } + + /** + * Retrieve the next break position. If the RBBI range is exhausted within the + * script boundary, examine the next script boundary. + * + * @return the next break position or BreakIterator.DONE + */ + int next() { + int next = rbbi.next(); + while (next == BreakIterator.DONE && scriptIterator.next()) { + rbbi = getBreakIterator(scriptIterator.getScriptCode()); + rbbi.setText(text, scriptIterator.getScriptStart(), + scriptIterator.getScriptLimit() - scriptIterator.getScriptStart()); + next = rbbi.next(); + } + return (next == BreakIterator.DONE) ? BreakIterator.DONE : next + + scriptIterator.getScriptStart(); + } + + /** + * Retrieve the current break position. + * + * @return the current break position or BreakIterator.DONE + */ + int current() { + final int current = rbbi.current(); + return (current == BreakIterator.DONE) ? BreakIterator.DONE : current + + scriptIterator.getScriptStart(); + } + + /** + * Retrieve the rule status code (token type) from the underlying break + * iterator + * + * @return rule status code (see RuleBasedBreakIterator constants) + */ + int getRuleStatus() { + return rbbi.getRuleStatus(); + } + + /** + * Retrieve the UScript script code for the current token. This code can be + * decoded with UScript into a name or ISO 15924 code. + * + * @return UScript script code for the current token. + */ + int getScriptCode() { + return scriptIterator.getScriptCode(); + } + + /** + * Set a new region of text to be examined by this iterator + * + * @param text buffer of text + * @param start offset into buffer + * @param length maximum length to examine + */ + void setText(final char text[], int start, int length) { + this.text = text; + scriptIterator.setText(text, start, length); + if (scriptIterator.next()) { + rbbi = getBreakIterator(scriptIterator.getScriptCode()); + rbbi.setText(text, scriptIterator.getScriptStart(), + scriptIterator.getScriptLimit() - scriptIterator.getScriptStart()); + } else { + rbbi = getBreakIterator(UScript.COMMON); + rbbi.setText(text, 0, 0); + } + } + + private BreakIteratorWrapper getBreakIterator(int scriptCode) { + if (wordBreakers[scriptCode] == null) + wordBreakers[scriptCode] = BreakIteratorWrapper.wrap(config.getBreakIterator(scriptCode)); + return wordBreakers[scriptCode]; + } +} diff --git a/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/DefaultICUTokenizerConfig.java b/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/DefaultICUTokenizerConfig.java new file mode 100644 index 00000000000..4da1e4dc8b4 --- /dev/null +++ b/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/DefaultICUTokenizerConfig.java @@ -0,0 +1,112 @@ +package org.apache.lucene.analysis.icu.segmentation; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.io.InputStream; + +import com.ibm.icu.lang.UScript; +import com.ibm.icu.text.BreakIterator; +import com.ibm.icu.text.RuleBasedBreakIterator; +import com.ibm.icu.util.ULocale; + +/** + * Default {@link ICUTokenizerConfig} that is generally applicable + * to many languages. + *
+ * Generally tokenizes Unicode text according to UAX#29 + * ({@link BreakIterator#getWordInstance(ULocale) BreakIterator.getWordInstance(ULocale.ROOT)}), + * but with the following tailorings: + *
+ * Words are broken across script boundaries, then segmented according to + * the BreakIterator and typing provided by the {@link ICUTokenizerConfig} + *
+ * @see ICUTokenizerConfig + * @lucene.experimental + */ +public final class ICUTokenizer extends Tokenizer { + private static final int IOBUFFER = 4096; + private final char buffer[] = new char[IOBUFFER]; + /** true length of text in the buffer */ + private int length = 0; + /** length in buffer that can be evaluated safely, up to a safe end point */ + private int usableLength = 0; + /** accumulated offset of previous buffers for this reader, for offsetAtt */ + private int offset = 0; + + private final CompositeBreakIterator breaker; /* tokenizes a char[] of text */ + private final ICUTokenizerConfig config; + private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class); + private final ScriptAttribute scriptAtt = addAttribute(ScriptAttribute.class); + + /** + * Construct a new ICUTokenizer that breaks text into words from the given + * Reader. + *+ * The default script-specific handling is used. + * + * @param input Reader containing text to tokenize. + * @see DefaultICUTokenizerConfig + */ + public ICUTokenizer(Reader input) { + this(input, new DefaultICUTokenizerConfig()); + } + + /** + * Construct a new ICUTokenizer that breaks text into words from the given + * Reader, using a tailored BreakIterator configuration. + * + * @param input Reader containing text to tokenize. + * @param config Tailored BreakIterator configuration + */ + public ICUTokenizer(Reader input, ICUTokenizerConfig config) { + super(input); + this.config = config; + breaker = new CompositeBreakIterator(config); + } + + @Override + public boolean incrementToken() throws IOException { + clearAttributes(); + if (length == 0) + refill(); + while (!incrementTokenBuffer()) { + refill(); + if (length <= 0) // no more bytes to read; + return false; + } + return true; + } + + @Override + public void reset() throws IOException { + super.reset(); + breaker.setText(buffer, 0, 0); + length = usableLength = offset = 0; + } + + @Override + public void reset(Reader input) throws IOException { + super.reset(input); + reset(); + } + + @Override + public void end() throws IOException { + final int finalOffset = (length < 0) ? offset : offset + length; + offsetAtt.setOffset(finalOffset, finalOffset); + } + + /* + * This tokenizes text based upon the longest matching rule, and because of + * this, isn't friendly to a Reader. + * + * Text is read from the input stream in 4kB chunks. Within a 4kB chunk of + * text, the last unambiguous break point is found (in this implementation: + * white space character) Any remaining characters represent possible partial + * words, so are appended to the front of the next chunk. + * + * There is the possibility that there are no unambiguous break points within + * an entire 4kB chunk of text (binary data). So there is a maximum word limit + * of 4kB since it will not try to grow the buffer in this case. + */ + + /** + * Returns the last unambiguous break position in the text. + * + * @return position of character, or -1 if one does not exist + */ + private int findSafeEnd() { + for (int i = length - 1; i >= 0; i--) + if (UCharacter.isWhitespace(buffer[i])) + return i + 1; + return -1; + } + + /** + * Refill the buffer, accumulating the offset and setting usableLength to the + * last unambiguous break position + * + * @throws IOException + */ + private void refill() throws IOException { + offset += usableLength; + int leftover = length - usableLength; + System.arraycopy(buffer, usableLength, buffer, 0, leftover); + int requested = buffer.length - leftover; + int returned = input.read(buffer, leftover, requested); + length = returned < 0 ? leftover : returned + leftover; + if (returned < requested) /* reader has been emptied, process the rest */ + usableLength = length; + else { /* still more data to be read, find a safe-stopping place */ + usableLength = findSafeEnd(); + if (usableLength < 0) + usableLength = length; /* + * more than IOBUFFER of text without space, + * gonna possibly truncate tokens + */ + } + + breaker.setText(buffer, 0, Math.max(0, usableLength)); + } + + /* + * return true if there is a token from the buffer, or null if it is + * exhausted. + */ + private boolean incrementTokenBuffer() { + int start = breaker.current(); + if (start == BreakIterator.DONE) + return false; // BreakIterator exhausted + + // find the next set of boundaries, skipping over non-tokens (rule status 0) + int end = breaker.next(); + while (start != BreakIterator.DONE && breaker.getRuleStatus() == 0) { + start = end; + end = breaker.next(); + } + + if (start == BreakIterator.DONE) + return false; // BreakIterator exhausted + + termAtt.copyBuffer(buffer, start, end - start); + offsetAtt.setOffset(correctOffset(offset + start), correctOffset(offset + end)); + typeAtt.setType(config.getType(breaker.getScriptCode(), breaker.getRuleStatus())); + scriptAtt.setCode(breaker.getScriptCode()); + + return true; + } +} diff --git a/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerConfig.java b/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerConfig.java new file mode 100644 index 00000000000..cadc2d64f51 --- /dev/null +++ b/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerConfig.java @@ -0,0 +1,33 @@ +package org.apache.lucene.analysis.icu.segmentation; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import com.ibm.icu.text.BreakIterator; + +/** + * Class that allows for tailored Unicode Text Segmentation on + * a per-writing system basis. + * @lucene.experimental + */ +public abstract class ICUTokenizerConfig { + /** Return a breakiterator capable of processing a given script. */ + public abstract BreakIterator getBreakIterator(int script); + /** Return a token type value for a given script and BreakIterator + * rule status. */ + public abstract String getType(int script, int ruleStatus); +} diff --git a/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/LaoBreakIterator.java b/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/LaoBreakIterator.java new file mode 100644 index 00000000000..ffd4c337fd3 --- /dev/null +++ b/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/LaoBreakIterator.java @@ -0,0 +1,226 @@ +package org.apache.lucene.analysis.icu.segmentation; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.text.CharacterIterator; + +import com.ibm.icu.lang.UCharacter; +import com.ibm.icu.text.BreakIterator; +import com.ibm.icu.text.RuleBasedBreakIterator; +import com.ibm.icu.text.UnicodeSet; + +/** + * Syllable iterator for Lao text. + *
+ * This breaks Lao text into syllables according to: + * Syllabification of Lao Script for Line Breaking + * Phonpasit Phissamay, Valaxay Dalolay, Chitaphone Chanhsililath, Oulaiphone Silimasak, + * Sarmad Hussain, Nadir Durrani, Science Technology and Environment Agency, CRULP. + *
+ * Most work is accomplished with RBBI rules, however some additional special logic is needed + * that cannot be coded in a grammar, and this is implemented here. + *
+ * For example, what appears to be a final consonant might instead be part of the next syllable. + * Rules match in a greedy fashion, leaving an illegal sequence that matches no rules. + *
+ * Take for instance the text ກວ່າດອກ + * The first rule greedily matches ກວ່າດ, but then ອກ is encountered, which is illegal. + * What LaoBreakIterator does, according to the paper: + *
+ * Finally, LaoBreakIterator also takes care of the second concern mentioned in the paper. + * This is the issue of combining marks being in the wrong order (typos). + * @lucene.experimental + */ +public class LaoBreakIterator extends BreakIterator { + RuleBasedBreakIterator rules; + CharArrayIterator text; + + CharArrayIterator working = new CharArrayIterator(); + int workingOffset = 0; + + CharArrayIterator verifyText = new CharArrayIterator(); + RuleBasedBreakIterator verify; + + private static final UnicodeSet laoSet; + static { + laoSet = new UnicodeSet("[:Lao:]"); + laoSet.compact(); + laoSet.freeze(); + } + + public LaoBreakIterator(RuleBasedBreakIterator rules) { + this.rules = (RuleBasedBreakIterator) rules.clone(); + this.verify = (RuleBasedBreakIterator) rules.clone(); + } + + @Override + public int current() { + int current = rules.current(); + return current == BreakIterator.DONE ? BreakIterator.DONE : workingOffset + current; + } + + @Override + public int first() { + working.setText(this.text.getText(), this.text.getStart(), this.text.getLength()); + rules.setText(working); + workingOffset = 0; + int first = rules.first(); + return first == BreakIterator.DONE ? BreakIterator.DONE : workingOffset + first; + } + + @Override + public int following(int offset) { + throw new UnsupportedOperationException(); + } + + @Override + public CharacterIterator getText() { + return text; + } + + @Override + public int last() { + throw new UnsupportedOperationException(); + } + + @Override + public int next() { + int current = current(); + int next = rules.next(); + if (next == BreakIterator.DONE) + return next; + else + next += workingOffset; + + char c = working.current(); + int following = rules.next(); // lookahead + if (following != BreakIterator.DONE) { + following += workingOffset; + if (rules.getRuleStatus() == 0 && laoSet.contains(c) && verifyPushBack(current, next)) { + workingOffset = next - 1; + working.setText(text.getText(), text.getStart() + workingOffset, text.getLength() - workingOffset); + return next - 1; + } + rules.previous(); // undo the lookahead + } + + return next; + } + + @Override + public int next(int n) { + if (n < 0) + throw new UnsupportedOperationException("Backwards traversal is unsupported"); + + int result = current(); + while (n > 0) { + result = next(); + --n; + } + return result; + } + + @Override + public int previous() { + throw new UnsupportedOperationException("Backwards traversal is unsupported"); + } + + @Override + public void setText(CharacterIterator text) { + if (!(text instanceof CharArrayIterator)) + throw new UnsupportedOperationException("unsupported CharacterIterator"); + this.text = (CharArrayIterator) text; + ccReorder(this.text.getText(), this.text.getStart(), this.text.getLength()); + working.setText(this.text.getText(), this.text.getStart(), this.text.getLength()); + rules.setText(working); + workingOffset = 0; + } + + @Override + public void setText(String newText) { + CharArrayIterator ci = new CharArrayIterator(); + ci.setText(newText.toCharArray(), 0, newText.length()); + setText(ci); + } + + private boolean verifyPushBack(int current, int next) { + int shortenedSyllable = next - current - 1; + + verifyText.setText(text.getText(), text.getStart() + current, shortenedSyllable); + verify.setText(verifyText); + if (verify.next() != shortenedSyllable || verify.getRuleStatus() == 0) + return false; + + + verifyText.setText(text.getText(), text.getStart() + next - 1, text.getLength() - next + 1); + verify.setText(verifyText); + + return (verify.next() != BreakIterator.DONE && verify.getRuleStatus() != 0); + } + + // TODO: only bubblesort around runs of combining marks, instead of the entire text. + private void ccReorder(char[] text, int start, int length) { + boolean reordered; + do { + int prevCC = 0; + reordered = false; + for (int i = start; i < start + length; i++) { + final char c = text[i]; + final int cc = UCharacter.getCombiningClass(c); + if (cc > 0 && cc < prevCC) { + // swap + text[i] = text[i - 1]; + text[i - 1] = c; + reordered = true; + } else { + prevCC = cc; + } + } + + } while (reordered == true); + } + + /** + * Clone method. Creates another LaoBreakIterator with the same behavior + * and current state as this one. + * @return The clone. + */ + @Override + public Object clone() { + LaoBreakIterator other = (LaoBreakIterator) super.clone(); + other.rules = (RuleBasedBreakIterator) rules.clone(); + other.verify = (RuleBasedBreakIterator) verify.clone(); + if (text != null) + other.text = (CharArrayIterator) text.clone(); + if (working != null) + other.working = (CharArrayIterator) working.clone(); + if (verifyText != null) + other.verifyText = (CharArrayIterator) verifyText.clone(); + return other; + } +} diff --git a/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ScriptIterator.java b/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ScriptIterator.java new file mode 100644 index 00000000000..4c327bc04fe --- /dev/null +++ b/lucene/contrib/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ScriptIterator.java @@ -0,0 +1,170 @@ +package org.apache.lucene.analysis.icu.segmentation; + +/** + * Copyright (C) 1999-2010, International Business Machines + * Corporation and others. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, and/or sell copies of the + * Software, and to permit persons to whom the Software is furnished to do so, + * provided that the above copyright notice(s) and this permission notice appear + * in all copies of the Software and that both the above copyright notice(s) and + * this permission notice appear in supporting documentation. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE + * LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR + * ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER + * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Except as contained in this notice, the name of a copyright holder shall not + * be used in advertising or otherwise to promote the sale, use or other + * dealings in this Software without prior written authorization of the + * copyright holder. + */ + +import com.ibm.icu.lang.UCharacter; +import com.ibm.icu.lang.UScript; +import com.ibm.icu.text.UTF16; + +/** + * An iterator that locates ISO 15924 script boundaries in text. + *
+ * This is not the same as simply looking at the Unicode block, or even the + * Script property. Some characters are 'common' across multiple scripts, and + * some 'inherit' the script value of text surrounding them. + *
+ * This is similar to ICU (internal-only) UScriptRun, with the following + * differences: + *
+Text Segmentation (Tokenization) divides document and query text into index terms +(typically words). Unicode provides special properties and rules so that this can +be done in a manner that works well with most languages. +
++Text Segmentation implements the word segmentation specified in +Unicode Text Segmentation. +Additionally the algorithm can be tailored based on writing system, for example +text in the Thai script is automatically delegated to a dictionary-based segmentation +algorithm. +
+
+ /**
+ * This tokenizer will work well in general for most languages.
+ */
+ Tokenizer tokenizer = new ICUTokenizer(reader);
+
+
ICUCollationKeyFilter
diff --git a/lucene/contrib/icu/src/resources/org/apache/lucene/analysis/icu/segmentation/Hebrew.brk b/lucene/contrib/icu/src/resources/org/apache/lucene/analysis/icu/segmentation/Hebrew.brk
new file mode 100644
index 00000000000..25e0b18b111
Binary files /dev/null and b/lucene/contrib/icu/src/resources/org/apache/lucene/analysis/icu/segmentation/Hebrew.brk differ
diff --git a/lucene/contrib/icu/src/resources/org/apache/lucene/analysis/icu/segmentation/Khmer.brk b/lucene/contrib/icu/src/resources/org/apache/lucene/analysis/icu/segmentation/Khmer.brk
new file mode 100644
index 00000000000..528c5bc4c42
Binary files /dev/null and b/lucene/contrib/icu/src/resources/org/apache/lucene/analysis/icu/segmentation/Khmer.brk differ
diff --git a/lucene/contrib/icu/src/resources/org/apache/lucene/analysis/icu/segmentation/Lao.brk b/lucene/contrib/icu/src/resources/org/apache/lucene/analysis/icu/segmentation/Lao.brk
new file mode 100644
index 00000000000..4d3dc11ee72
Binary files /dev/null and b/lucene/contrib/icu/src/resources/org/apache/lucene/analysis/icu/segmentation/Lao.brk differ
diff --git a/lucene/contrib/icu/src/resources/org/apache/lucene/analysis/icu/segmentation/Myanmar.brk b/lucene/contrib/icu/src/resources/org/apache/lucene/analysis/icu/segmentation/Myanmar.brk
new file mode 100644
index 00000000000..656304ee158
Binary files /dev/null and b/lucene/contrib/icu/src/resources/org/apache/lucene/analysis/icu/segmentation/Myanmar.brk differ
diff --git a/lucene/contrib/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestCharArrayIterator.java b/lucene/contrib/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestCharArrayIterator.java
new file mode 100644
index 00000000000..02b9d0a45f7
--- /dev/null
+++ b/lucene/contrib/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestCharArrayIterator.java
@@ -0,0 +1,109 @@
+package org.apache.lucene.analysis.icu.segmentation;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.text.CharacterIterator;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestCharArrayIterator extends LuceneTestCase {
+ public void testBasicUsage() {
+ CharArrayIterator ci = new CharArrayIterator();
+ ci.setText("testing".toCharArray(), 0, "testing".length());
+ assertEquals(0, ci.getBeginIndex());
+ assertEquals(7, ci.getEndIndex());
+ assertEquals(0, ci.getIndex());
+ assertEquals('t', ci.current());
+ assertEquals('e', ci.next());
+ assertEquals('g', ci.last());
+ assertEquals('n', ci.previous());
+ assertEquals('t', ci.first());
+ assertEquals(CharacterIterator.DONE, ci.previous());
+ }
+
+ public void testFirst() {
+ CharArrayIterator ci = new CharArrayIterator();
+ ci.setText("testing".toCharArray(), 0, "testing".length());
+ ci.next();
+ // Sets the position to getBeginIndex() and returns the character at that position.
+ assertEquals('t', ci.first());
+ assertEquals(ci.getBeginIndex(), ci.getIndex());
+ // or DONE if the text is empty
+ ci.setText(new char[] {}, 0, 0);
+ assertEquals(CharacterIterator.DONE, ci.first());
+ }
+
+ public void testLast() {
+ CharArrayIterator ci = new CharArrayIterator();
+ ci.setText("testing".toCharArray(), 0, "testing".length());
+ // Sets the position to getEndIndex()-1 (getEndIndex() if the text is empty)
+ // and returns the character at that position.
+ assertEquals('g', ci.last());
+ assertEquals(ci.getIndex(), ci.getEndIndex() - 1);
+ // or DONE if the text is empty
+ ci.setText(new char[] {}, 0, 0);
+ assertEquals(CharacterIterator.DONE, ci.last());
+ assertEquals(ci.getEndIndex(), ci.getIndex());
+ }
+
+ public void testCurrent() {
+ CharArrayIterator ci = new CharArrayIterator();
+ // Gets the character at the current position (as returned by getIndex()).
+ ci.setText("testing".toCharArray(), 0, "testing".length());
+ assertEquals('t', ci.current());
+ ci.last();
+ ci.next();
+ // or DONE if the current position is off the end of the text.
+ assertEquals(CharacterIterator.DONE, ci.current());
+ }
+
+ public void testNext() {
+ CharArrayIterator ci = new CharArrayIterator();
+ ci.setText("te".toCharArray(), 0, 2);
+ // Increments the iterator's index by one and returns the character at the new index.
+ assertEquals('e', ci.next());
+ assertEquals(1, ci.getIndex());
+ // or DONE if the new position is off the end of the text range.
+ assertEquals(CharacterIterator.DONE, ci.next());
+ assertEquals(ci.getEndIndex(), ci.getIndex());
+ }
+
+ public void testSetIndex() {
+ CharArrayIterator ci = new CharArrayIterator();
+ ci.setText("test".toCharArray(), 0, "test".length());
+ try {
+ ci.setIndex(5);
+ fail();
+ } catch (Exception e) {
+ assertTrue(e instanceof IllegalArgumentException);
+ }
+ }
+
+ public void testClone() {
+ char text[] = "testing".toCharArray();
+ CharArrayIterator ci = new CharArrayIterator();
+ ci.setText(text, 0, text.length);
+ ci.next();
+ CharArrayIterator ci2 = (CharArrayIterator) ci.clone();
+ assertEquals(ci.getIndex(), ci2.getIndex());
+ assertEquals(ci.next(), ci2.next());
+ assertEquals(ci.last(), ci2.last());
+ }
+
+
+}
diff --git a/lucene/contrib/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java b/lucene/contrib/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java
new file mode 100644
index 00000000000..ca7b178984b
--- /dev/null
+++ b/lucene/contrib/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java
@@ -0,0 +1,225 @@
+package org.apache.lucene.analysis.icu.segmentation;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.io.StringReader;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.ReusableAnalyzerBase;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.icu.ICUNormalizer2Filter;
+
+import java.util.Arrays;
+
+public class TestICUTokenizer extends BaseTokenStreamTestCase {
+
+ public void testHugeDoc() throws IOException {
+ StringBuilder sb = new StringBuilder();
+ char whitespace[] = new char[4094];
+ Arrays.fill(whitespace, ' ');
+ sb.append(whitespace);
+ sb.append("testing 1234");
+ String input = sb.toString();
+ ICUTokenizer tokenizer = new ICUTokenizer(new StringReader(input));
+ assertTokenStreamContents(tokenizer, new String[] { "testing", "1234" });
+ }
+
+ public void testHugeTerm2() throws IOException {
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < 40960; i++) {
+ sb.append('a');
+ }
+ String input = sb.toString();
+ ICUTokenizer tokenizer = new ICUTokenizer(new StringReader(input));
+ char token[] = new char[4096];
+ Arrays.fill(token, 'a');
+ String expectedToken = new String(token);
+ String expected[] = {
+ expectedToken, expectedToken, expectedToken,
+ expectedToken, expectedToken, expectedToken,
+ expectedToken, expectedToken, expectedToken,
+ expectedToken
+ };
+ assertTokenStreamContents(tokenizer, expected);
+ }
+
+ private Analyzer a = new ReusableAnalyzerBase() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName,
+ Reader reader) {
+ Tokenizer tokenizer = new ICUTokenizer(reader);
+ TokenFilter filter = new ICUNormalizer2Filter(tokenizer);
+ return new TokenStreamComponents(tokenizer, filter);
+ }
+ };
+
+ public void testArmenian() throws Exception {
+ assertAnalyzesTo(a, "Վիքիպեդիայի 13 միլիոն հոդվածները (4,600` հայերեն վիքիպեդիայում) գրվել են կամավորների կողմից ու համարյա բոլոր հոդվածները կարող է խմբագրել ցանկաց մարդ ով կարող է բացել Վիքիպեդիայի կայքը։",
+ new String[] { "վիքիպեդիայի", "13", "միլիոն", "հոդվածները", "4,600", "հայերեն", "վիքիպեդիայում", "գրվել", "են", "կամավորների", "կողմից",
+ "ու", "համարյա", "բոլոր", "հոդվածները", "կարող", "է", "խմբագրել", "ցանկաց", "մարդ", "ով", "կարող", "է", "բացել", "վիքիպեդիայի", "կայքը" } );
+ }
+
+ public void testAmharic() throws Exception {
+ assertAnalyzesTo(a, "ዊኪፔድያ የባለ ብዙ ቋንቋ የተሟላ ትክክለኛና ነጻ መዝገበ ዕውቀት (ኢንሳይክሎፒዲያ) ነው። ማንኛውም",
+ new String[] { "ዊኪፔድያ", "የባለ", "ብዙ", "ቋንቋ", "የተሟላ", "ትክክለኛና", "ነጻ", "መዝገበ", "ዕውቀት", "ኢንሳይክሎፒዲያ", "ነው", "ማንኛውም" } );
+ }
+
+ public void testArabic() throws Exception {
+ assertAnalyzesTo(a, "الفيلم الوثائقي الأول عن ويكيبيديا يسمى \"الحقيقة بالأرقام: قصة ويكيبيديا\" (بالإنجليزية: Truth in Numbers: The Wikipedia Story)، سيتم إطلاقه في 2008.",
+ new String[] { "الفيلم", "الوثائقي", "الأول", "عن", "ويكيبيديا", "يسمى", "الحقيقة", "بالأرقام", "قصة", "ويكيبيديا",
+ "بالإنجليزية", "truth", "in", "numbers", "the", "wikipedia", "story", "سيتم", "إطلاقه", "في", "2008" } );
+ }
+
+ public void testAramaic() throws Exception {
+ assertAnalyzesTo(a, "ܘܝܩܝܦܕܝܐ (ܐܢܓܠܝܐ: Wikipedia) ܗܘ ܐܝܢܣܩܠܘܦܕܝܐ ܚܐܪܬܐ ܕܐܢܛܪܢܛ ܒܠܫܢ̈ܐ ܣܓܝܐ̈ܐ܂ ܫܡܗ ܐܬܐ ܡܢ ܡ̈ܠܬܐ ܕ\"ܘܝܩܝ\" ܘ\"ܐܝܢܣܩܠܘܦܕܝܐ\"܀",
+ new String[] { "ܘܝܩܝܦܕܝܐ", "ܐܢܓܠܝܐ", "wikipedia", "ܗܘ", "ܐܝܢܣܩܠܘܦܕܝܐ", "ܚܐܪܬܐ", "ܕܐܢܛܪܢܛ", "ܒܠܫܢ̈ܐ", "ܣܓܝܐ̈ܐ", "ܫܡܗ",
+ "ܐܬܐ", "ܡܢ", "ܡ̈ܠܬܐ", "ܕ", "ܘܝܩܝ", "ܘ", "ܐܝܢܣܩܠܘܦܕܝܐ"});
+ }
+
+ public void testBengali() throws Exception {
+ assertAnalyzesTo(a, "এই বিশ্বকোষ পরিচালনা করে উইকিমিডিয়া ফাউন্ডেশন (একটি অলাভজনক সংস্থা)। উইকিপিডিয়ার শুরু ১৫ জানুয়ারি, ২০০১ সালে। এখন পর্যন্ত ২০০টিরও বেশী ভাষায় উইকিপিডিয়া রয়েছে।",
+ new String[] { "এই", "বিশ্বকোষ", "পরিচালনা", "করে", "উইকিমিডিয়া", "ফাউন্ডেশন", "একটি", "অলাভজনক", "সংস্থা", "উইকিপিডিয়ার",
+ "শুরু", "১৫", "জানুয়ারি", "২০০১", "সালে", "এখন", "পর্যন্ত", "২০০টিরও", "বেশী", "ভাষায়", "উইকিপিডিয়া", "রয়েছে" });
+ }
+
+ public void testFarsi() throws Exception {
+ assertAnalyzesTo(a, "ویکی پدیای انگلیسی در تاریخ ۲۵ دی ۱۳۷۹ به صورت مکملی برای دانشنامهٔ تخصصی نوپدیا نوشته شد.",
+ new String[] { "ویکی", "پدیای", "انگلیسی", "در", "تاریخ", "۲۵", "دی", "۱۳۷۹", "به", "صورت", "مکملی",
+ "برای", "دانشنامهٔ", "تخصصی", "نوپدیا", "نوشته", "شد" });
+ }
+
+ public void testGreek() throws Exception {
+ assertAnalyzesTo(a, "Γράφεται σε συνεργασία από εθελοντές με το λογισμικό wiki, κάτι που σημαίνει ότι άρθρα μπορεί να προστεθούν ή να αλλάξουν από τον καθένα.",
+ new String[] { "γράφεται", "σε", "συνεργασία", "από", "εθελοντέσ", "με", "το", "λογισμικό", "wiki", "κάτι", "που",
+ "σημαίνει", "ότι", "άρθρα", "μπορεί", "να", "προστεθούν", "ή", "να", "αλλάξουν", "από", "τον", "καθένα" });
+ }
+
+ public void testLao() throws Exception {
+ assertAnalyzesTo(a, "ກວ່າດອກ", new String[] { "ກວ່າ", "ດອກ" });
+ }
+
+ public void testThai() throws Exception {
+ assertAnalyzesTo(a, "การที่ได้ต้องแสดงว่างานดี. แล้วเธอจะไปไหน? ๑๒๓๔",
+ new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี", "แล้ว", "เธอ", "จะ", "ไป", "ไหน", "๑๒๓๔"});
+ }
+
+ public void testTibetan() throws Exception {
+ assertAnalyzesTo(a, "སྣོན་མཛོད་དང་ལས་འདིས་བོད་ཡིག་མི་ཉམས་གོང་འཕེལ་དུ་གཏོང་བར་ཧ་ཅང་དགེ་མཚན་མཆིས་སོ། །",
+ new String[] { "སྣོན", "མཛོད", "དང", "ལས", "འདིས", "བོད", "ཡིག", "མི", "ཉམས", "གོང", "འཕེལ", "དུ", "གཏོང", "བར", "ཧ", "ཅང", "དགེ", "མཚན", "མཆིས", "སོ" });
+ }
+
+ /*
+ * For chinese, tokenize as char (these can later form bigrams or whatever)
+ * TODO: why do full-width numerics have no word-break prop?
+ */
+ public void testChinese() throws Exception {
+ assertAnalyzesTo(a, "我是中国人。 1234 Tests ",
+ new String[] { "我", "是", "中", "国", "人", "tests"});
+ }
+
+ public void testEmpty() throws Exception {
+ assertAnalyzesTo(a, "", new String[] {});
+ assertAnalyzesTo(a, ".", new String[] {});
+ assertAnalyzesTo(a, " ", new String[] {});
+ }
+
+ /* test various jira issues this analyzer is related to */
+
+ public void testLUCENE1545() throws Exception {
+ /*
+ * Standard analyzer does not correctly tokenize combining character U+0364 COMBINING LATIN SMALL LETTRE E.
+ * The word "moͤchte" is incorrectly tokenized into "mo" "chte", the combining character is lost.
+ * Expected result is only on token "moͤchte".
+ */
+ assertAnalyzesTo(a, "moͤchte", new String[] { "moͤchte" });
+ }
+
+ /* Tests from StandardAnalyzer, just to show behavior is similar */
+ public void testAlphanumericSA() throws Exception {
+ // alphanumeric tokens
+ assertAnalyzesTo(a, "B2B", new String[]{"b2b"});
+ assertAnalyzesTo(a, "2B", new String[]{"2b"});
+ }
+
+ public void testDelimitersSA() throws Exception {
+ // other delimiters: "-", "/", ","
+ assertAnalyzesTo(a, "some-dashed-phrase", new String[]{"some", "dashed", "phrase"});
+ assertAnalyzesTo(a, "dogs,chase,cats", new String[]{"dogs", "chase", "cats"});
+ assertAnalyzesTo(a, "ac/dc", new String[]{"ac", "dc"});
+ }
+
+ public void testApostrophesSA() throws Exception {
+ // internal apostrophes: O'Reilly, you're, O'Reilly's
+ assertAnalyzesTo(a, "O'Reilly", new String[]{"o'reilly"});
+ assertAnalyzesTo(a, "you're", new String[]{"you're"});
+ assertAnalyzesTo(a, "she's", new String[]{"she's"});
+ assertAnalyzesTo(a, "Jim's", new String[]{"jim's"});
+ assertAnalyzesTo(a, "don't", new String[]{"don't"});
+ assertAnalyzesTo(a, "O'Reilly's", new String[]{"o'reilly's"});
+ }
+
+ public void testNumericSA() throws Exception {
+ // floating point, serial, model numbers, ip addresses, etc.
+ // every other segment must have at least one digit
+ assertAnalyzesTo(a, "21.35", new String[]{"21.35"});
+ assertAnalyzesTo(a, "R2D2 C3PO", new String[]{"r2d2", "c3po"});
+ assertAnalyzesTo(a, "216.239.63.104", new String[]{"216.239.63.104"});
+ assertAnalyzesTo(a, "216.239.63.104", new String[]{"216.239.63.104"});
+ }
+
+ public void testTextWithNumbersSA() throws Exception {
+ // numbers
+ assertAnalyzesTo(a, "David has 5000 bones", new String[]{"david", "has", "5000", "bones"});
+ }
+
+ public void testVariousTextSA() throws Exception {
+ // various
+ assertAnalyzesTo(a, "C embedded developers wanted", new String[]{"c", "embedded", "developers", "wanted"});
+ assertAnalyzesTo(a, "foo bar FOO BAR", new String[]{"foo", "bar", "foo", "bar"});
+ assertAnalyzesTo(a, "foo bar . FOO <> BAR", new String[]{"foo", "bar", "foo", "bar"});
+ assertAnalyzesTo(a, "\"QUOTED\" word", new String[]{"quoted", "word"});
+ }
+
+ public void testKoreanSA() throws Exception {
+ // Korean words
+ assertAnalyzesTo(a, "안녕하세요 한글입니다", new String[]{"안녕하세요", "한글입니다"});
+ }
+
+ public void testReusableTokenStream() throws Exception {
+ assertAnalyzesToReuse(a, "སྣོན་མཛོད་དང་ལས་འདིས་བོད་ཡིག་མི་ཉམས་གོང་འཕེལ་དུ་གཏོང་བར་ཧ་ཅང་དགེ་མཚན་མཆིས་སོ། །",
+ new String[] { "སྣོན", "མཛོད", "དང", "ལས", "འདིས", "བོད", "ཡིག", "མི", "ཉམས", "གོང",
+ "འཕེལ", "དུ", "གཏོང", "བར", "ཧ", "ཅང", "དགེ", "མཚན", "མཆིས", "སོ" });
+ }
+
+ public void testOffsets() throws Exception {
+ assertAnalyzesTo(a, "David has 5000 bones",
+ new String[] {"david", "has", "5000", "bones"},
+ new int[] {0, 6, 10, 15},
+ new int[] {5, 9, 14, 20});
+ }
+
+ public void testTypes() throws Exception {
+ assertAnalyzesTo(a, "David has 5000 bones",
+ new String[] {"david", "has", "5000", "bones"},
+ new String[] { "