LUCENE-5601: ThaiTokenizer ignores sentenceStart

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1586614 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2014-04-11 11:12:38 +00:00
parent b72cd4b7d9
commit d598f8b599
2 changed files with 8 additions and 1 deletions

View File

@ -99,7 +99,7 @@ public class ThaiTokenizer extends SegmentingTokenizerBase {
}
clearAttributes();
termAtt.copyBuffer(buffer, start, end - start);
termAtt.copyBuffer(buffer, sentenceStart + start, end - start);
offsetAtt.setOffset(correctOffset(offset + sentenceStart + start), correctOffset(offset + sentenceStart + end));
return true;
}

View File

@ -117,4 +117,11 @@ public class TestThaiAnalyzer extends BaseTokenStreamTestCase {
ts.addAttribute(FlagsAttribute.class);
assertTokenStreamContents(ts, new String[] { "ภาษา", "ไทย" });
}
public void testTwoSentences() throws Exception {
assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET), "This is a test. การที่ได้ต้องแสดงว่างานดี",
new String[] { "this", "is", "a", "test", "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี" },
new int[] { 0, 5, 8, 10, 16, 19, 22, 25, 29, 33, 36, 39 },
new int[] { 4, 7, 9, 14, 19, 22, 25, 29, 33, 36, 39, 41 });
}
}