From fb33754168a52ce2be6cf981e996fb4ba1d0c6d4 Mon Sep 17 00:00:00 2001 From: Steven Rowe Date: Sun, 25 Mar 2012 01:20:55 +0000 Subject: [PATCH] LUCENE-3881: Added UAX29URLEmailAnalyzer git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1304975 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/CHANGES.txt | 3 + .../standard/UAX29URLEmailAnalyzer.java | 113 ++++++++ .../lucene/analysis/standard/package.html | 6 + .../core/TestUAX29URLEmailAnalyzer.java | 267 ++++++++++++++++++ 4 files changed, 389 insertions(+) create mode 100755 modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailAnalyzer.java create mode 100755 modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailAnalyzer.java diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index 1cccfd9212c..7e12459bfa2 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -902,6 +902,9 @@ New Features * LUCENE-3789: Expose MTQ TermsEnum via RewriteMethod for non package private access (Simon Willnauer) +* LUCENE-3881: Added UAX29URLEmailAnalyzer: a standard analyzer that recognizes + URLs and emails. (Steve Rowe) + Bug fixes * LUCENE-3595: Fixed FieldCacheRangeFilter and FieldCacheTermsFilter diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailAnalyzer.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailAnalyzer.java new file mode 100755 index 00000000000..e7e79517a8a --- /dev/null +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailAnalyzer.java @@ -0,0 +1,113 @@ +package org.apache.lucene.analysis.standard; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.core.LowerCaseFilter; +import org.apache.lucene.analysis.core.StopAnalyzer; +import org.apache.lucene.analysis.core.StopFilter; +import org.apache.lucene.analysis.util.CharArraySet; +import org.apache.lucene.analysis.util.StopwordAnalyzerBase; +import org.apache.lucene.util.Version; + +import java.io.IOException; +import java.io.Reader; + +/** + * Filters {@link org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer} + * with {@link org.apache.lucene.analysis.standard.StandardFilter}, + * {@link org.apache.lucene.analysis.core.LowerCaseFilter} and + * {@link org.apache.lucene.analysis.core.StopFilter}, using a list of + * English stop words. + * + * + *

+ * You must specify the required {@link org.apache.lucene.util.Version} + * compatibility when creating UAX29URLEmailAnalyzer + *

+ */ +public final class UAX29URLEmailAnalyzer extends StopwordAnalyzerBase { + + /** Default maximum allowed token length */ + public static final int DEFAULT_MAX_TOKEN_LENGTH = StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH; + + private int maxTokenLength = DEFAULT_MAX_TOKEN_LENGTH; + + /** An unmodifiable set containing some common English words that are usually not + useful for searching. */ + public static final CharArraySet STOP_WORDS_SET = StopAnalyzer.ENGLISH_STOP_WORDS_SET; + + /** Builds an analyzer with the given stop words. + * @param matchVersion Lucene version to match See {@link + *
above} + * @param stopWords stop words */ + public UAX29URLEmailAnalyzer(Version matchVersion, CharArraySet stopWords) { + super(matchVersion, stopWords); + } + + /** Builds an analyzer with the default stop words ({@link + * #STOP_WORDS_SET}). + * @param matchVersion Lucene version to match See {@link + * above} + */ + public UAX29URLEmailAnalyzer(Version matchVersion) { + this(matchVersion, STOP_WORDS_SET); + } + + /** Builds an analyzer with the stop words from the given reader. + * @see org.apache.lucene.analysis.util.WordlistLoader#getWordSet(java.io.Reader, org.apache.lucene.util.Version) + * @param matchVersion Lucene version to match See {@link + * above} + * @param stopwords Reader to read stop words from */ + public UAX29URLEmailAnalyzer(Version matchVersion, Reader stopwords) throws IOException { + this(matchVersion, loadStopwordSet(stopwords, matchVersion)); + } + + /** + * Set maximum allowed token length. If a token is seen + * that exceeds this length then it is discarded. This + * setting only takes effect the next time tokenStream or + * tokenStream is called. + */ + public void setMaxTokenLength(int length) { + maxTokenLength = length; + } + + /** + * @see #setMaxTokenLength + */ + public int getMaxTokenLength() { + return maxTokenLength; + } + + @Override + protected TokenStreamComponents createComponents(final String fieldName, final Reader reader) { + final UAX29URLEmailTokenizer src = new UAX29URLEmailTokenizer(matchVersion, reader); + src.setMaxTokenLength(maxTokenLength); + TokenStream tok = new StandardFilter(matchVersion, src); + tok = new LowerCaseFilter(matchVersion, tok); + tok = new StopFilter(matchVersion, tok, stopwords); + return new TokenStreamComponents(src, tok) { + @Override + protected void reset(final Reader reader) throws IOException { + src.setMaxTokenLength(UAX29URLEmailAnalyzer.this.maxTokenLength); + super.reset(reader); + } + }; + } +} diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/package.html b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/package.html index e39f74bfb93..0d76c899150 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/package.html +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/package.html @@ -54,6 +54,12 @@ algorithm, as specified in Unicode Standard Annex #29. URLs and email addresses are also tokenized according to the relevant RFCs. +
+ UAX29URLEmailAnalyzer includes + UAX29URLEmailTokenizer, + StandardFilter, + LowerCaseFilter + and StopFilter. diff --git a/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailAnalyzer.java b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailAnalyzer.java new file mode 100755 index 00000000000..b07da800ad0 --- /dev/null +++ b/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailAnalyzer.java @@ -0,0 +1,267 @@ +package org.apache.lucene.analysis.core; + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; +import org.apache.lucene.analysis.standard.UAX29URLEmailAnalyzer; +import org.apache.lucene.util.Version; + +import java.io.IOException; +import java.util.Arrays; + +public class TestUAX29URLEmailAnalyzer extends BaseTokenStreamTestCase { + + private Analyzer a = new UAX29URLEmailAnalyzer(TEST_VERSION_CURRENT); + + public void testHugeDoc() throws IOException { + StringBuilder sb = new StringBuilder(); + char whitespace[] = new char[4094]; + Arrays.fill(whitespace, ' '); + sb.append(whitespace); + sb.append("testing 1234"); + String input = sb.toString(); + BaseTokenStreamTestCase.assertAnalyzesTo(a, input, new String[]{"testing", "1234"}) ; + } + + public void testArmenian() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "Վիքիպեդիայի 13 միլիոն հոդվածները (4,600` հայերեն վիքիպեդիայում) գրվել են կամավորների կողմից ու համարյա բոլոր հոդվածները կարող է խմբագրել ցանկաց մարդ ով կարող է բացել Վիքիպեդիայի կայքը։", + new String[] { "վիքիպեդիայի", "13", "միլիոն", "հոդվածները", "4,600", "հայերեն", "վիքիպեդիայում", "գրվել", "են", "կամավորների", "կողմից", + "ու", "համարյա", "բոլոր", "հոդվածները", "կարող", "է", "խմբագրել", "ցանկաց", "մարդ", "ով", "կարող", "է", "բացել", "վիքիպեդիայի", "կայքը" } ); + } + + public void testAmharic() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "ዊኪፔድያ የባለ ብዙ ቋንቋ የተሟላ ትክክለኛና ነጻ መዝገበ ዕውቀት (ኢንሳይክሎፒዲያ) ነው። ማንኛውም", + new String[] { "ዊኪፔድያ", "የባለ", "ብዙ", "ቋንቋ", "የተሟላ", "ትክክለኛና", "ነጻ", "መዝገበ", "ዕውቀት", "ኢንሳይክሎፒዲያ", "ነው", "ማንኛውም" } ); + } + + public void testArabic() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "الفيلم الوثائقي الأول عن ويكيبيديا يسمى \"الحقيقة بالأرقام: قصة ويكيبيديا\" (بالإنجليزية: Truth in Numbers: The Wikipedia Story)، سيتم إطلاقه في 2008.", + new String[] { "الفيلم", "الوثائقي", "الأول", "عن", "ويكيبيديا", "يسمى", "الحقيقة", "بالأرقام", "قصة", "ويكيبيديا", + "بالإنجليزية", "truth", "numbers", "wikipedia", "story", "سيتم", "إطلاقه", "في", "2008" } ); + } + + public void testAramaic() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "ܘܝܩܝܦܕܝܐ (ܐܢܓܠܝܐ: Wikipedia) ܗܘ ܐܝܢܣܩܠܘܦܕܝܐ ܚܐܪܬܐ ܕܐܢܛܪܢܛ ܒܠܫܢ̈ܐ ܣܓܝܐ̈ܐ܂ ܫܡܗ ܐܬܐ ܡܢ ܡ̈ܠܬܐ ܕ\"ܘܝܩܝ\" ܘ\"ܐܝܢܣܩܠܘܦܕܝܐ\"܀", + new String[] { "ܘܝܩܝܦܕܝܐ", "ܐܢܓܠܝܐ", "wikipedia", "ܗܘ", "ܐܝܢܣܩܠܘܦܕܝܐ", "ܚܐܪܬܐ", "ܕܐܢܛܪܢܛ", "ܒܠܫܢ̈ܐ", "ܣܓܝܐ̈ܐ", "ܫܡܗ", + "ܐܬܐ", "ܡܢ", "ܡ̈ܠܬܐ", "ܕ", "ܘܝܩܝ", "ܘ", "ܐܝܢܣܩܠܘܦܕܝܐ"}); + } + + public void testBengali() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "এই বিশ্বকোষ পরিচালনা করে উইকিমিডিয়া ফাউন্ডেশন (একটি অলাভজনক সংস্থা)। উইকিপিডিয়ার শুরু ১৫ জানুয়ারি, ২০০১ সালে। এখন পর্যন্ত ২০০টিরও বেশী ভাষায় উইকিপিডিয়া রয়েছে।", + new String[] { "এই", "বিশ্বকোষ", "পরিচালনা", "করে", "উইকিমিডিয়া", "ফাউন্ডেশন", "একটি", "অলাভজনক", "সংস্থা", "উইকিপিডিয়ার", + "শুরু", "১৫", "জানুয়ারি", "২০০১", "সালে", "এখন", "পর্যন্ত", "২০০টিরও", "বেশী", "ভাষায়", "উইকিপিডিয়া", "রয়েছে" }); + } + + public void testFarsi() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "ویکی پدیای انگلیسی در تاریخ ۲۵ دی ۱۳۷۹ به صورت مکملی برای دانشنامهٔ تخصصی نوپدیا نوشته شد.", + new String[] { "ویکی", "پدیای", "انگلیسی", "در", "تاریخ", "۲۵", "دی", "۱۳۷۹", "به", "صورت", "مکملی", + "برای", "دانشنامهٔ", "تخصصی", "نوپدیا", "نوشته", "شد" }); + } + + public void testGreek() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "Γράφεται σε συνεργασία από εθελοντές με το λογισμικό wiki, κάτι που σημαίνει ότι άρθρα μπορεί να προστεθούν ή να αλλάξουν από τον καθένα.", + new String[] { "γράφεται", "σε", "συνεργασία", "από", "εθελοντές", "με", "το", "λογισμικό", "wiki", "κάτι", "που", + "σημαίνει", "ότι", "άρθρα", "μπορεί", "να", "προστεθούν", "ή", "να", "αλλάξουν", "από", "τον", "καθένα" }); + } + + public void testThai() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "การที่ได้ต้องแสดงว่างานดี. แล้วเธอจะไปไหน? ๑๒๓๔", + new String[] { "การที่ได้ต้องแสดงว่างานดี", "แล้วเธอจะไปไหน", "๑๒๓๔" }); + } + + public void testLao() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "ສາທາລະນະລັດ ປະຊາທິປະໄຕ ປະຊາຊົນລາວ", + new String[] { "ສາທາລະນະລັດ", "ປະຊາທິປະໄຕ", "ປະຊາຊົນລາວ" }); + } + + public void testTibetan() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "སྣོན་མཛོད་དང་ལས་འདིས་བོད་ཡིག་མི་ཉམས་གོང་འཕེལ་དུ་གཏོང་བར་ཧ་ཅང་དགེ་མཚན་མཆིས་སོ། །", + new String[] { "སྣོན", "མཛོད", "དང", "ལས", "འདིས", "བོད", "ཡིག", + "མི", "ཉམས", "གོང", "འཕེལ", "དུ", "གཏོང", "བར", + "ཧ", "ཅང", "དགེ", "མཚན", "མཆིས", "སོ" }); + } + + /* + * For chinese, tokenize as char (these can later form bigrams or whatever) + */ + public void testChinese() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "我是中国人。 1234 Tests ", + new String[] { "我", "是", "中", "国", "人", "1234", "tests"}); + } + + public void testEmpty() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "", new String[] {}); + BaseTokenStreamTestCase.assertAnalyzesTo(a, ".", new String[] {}); + BaseTokenStreamTestCase.assertAnalyzesTo(a, " ", new String[] {}); + } + + /* test various jira issues this analyzer is related to */ + + public void testLUCENE1545() throws Exception { + /* + * Standard analyzer does not correctly tokenize combining character U+0364 COMBINING LATIN SMALL LETTER E. + * The word "moͤchte" is incorrectly tokenized into "mo" "chte", the combining character is lost. + * Expected result is only one token "moͤchte". + */ + BaseTokenStreamTestCase.assertAnalyzesTo(a, "moͤchte", new String[] { "moͤchte" }); + } + + /* Tests from StandardAnalyzer, just to show behavior is similar */ + public void testAlphanumericSA() throws Exception { + // alphanumeric tokens + BaseTokenStreamTestCase.assertAnalyzesTo(a, "B2B", new String[]{"b2b"}); + BaseTokenStreamTestCase.assertAnalyzesTo(a, "2B", new String[]{"2b"}); + } + + public void testDelimitersSA() throws Exception { + // other delimiters: "-", "/", "," + BaseTokenStreamTestCase.assertAnalyzesTo(a, "some-dashed-phrase", new String[]{"some", "dashed", "phrase"}); + BaseTokenStreamTestCase.assertAnalyzesTo(a, "dogs,chase,cats", new String[]{"dogs", "chase", "cats"}); + BaseTokenStreamTestCase.assertAnalyzesTo(a, "ac/dc", new String[]{"ac", "dc"}); + } + + public void testApostrophesSA() throws Exception { + // internal apostrophes: O'Reilly, you're, O'Reilly's + BaseTokenStreamTestCase.assertAnalyzesTo(a, "O'Reilly", new String[]{"o'reilly"}); + BaseTokenStreamTestCase.assertAnalyzesTo(a, "you're", new String[]{"you're"}); + BaseTokenStreamTestCase.assertAnalyzesTo(a, "she's", new String[]{"she's"}); + BaseTokenStreamTestCase.assertAnalyzesTo(a, "Jim's", new String[]{"jim's"}); + BaseTokenStreamTestCase.assertAnalyzesTo(a, "don't", new String[]{"don't"}); + BaseTokenStreamTestCase.assertAnalyzesTo(a, "O'Reilly's", new String[]{"o'reilly's"}); + } + + public void testNumericSA() throws Exception { + // floating point, serial, model numbers, ip addresses, etc. + BaseTokenStreamTestCase.assertAnalyzesTo(a, "21.35", new String[]{"21.35"}); + BaseTokenStreamTestCase.assertAnalyzesTo(a, "R2D2 C3PO", new String[]{"r2d2", "c3po"}); + BaseTokenStreamTestCase.assertAnalyzesTo(a, "216.239.63.104", new String[]{"216.239.63.104"}); + BaseTokenStreamTestCase.assertAnalyzesTo(a, "216.239.63.104", new String[]{"216.239.63.104"}); + } + + public void testTextWithNumbersSA() throws Exception { + // numbers + BaseTokenStreamTestCase.assertAnalyzesTo(a, "David has 5000 bones", new String[]{"david", "has", "5000", "bones"}); + } + + public void testVariousTextSA() throws Exception { + // various + BaseTokenStreamTestCase.assertAnalyzesTo(a, "C embedded developers wanted", new String[]{"c", "embedded", "developers", "wanted"}); + BaseTokenStreamTestCase.assertAnalyzesTo(a, "foo bar FOO BAR", new String[]{"foo", "bar", "foo", "bar"}); + BaseTokenStreamTestCase.assertAnalyzesTo(a, "foo bar . FOO <> BAR", new String[]{"foo", "bar", "foo", "bar"}); + BaseTokenStreamTestCase.assertAnalyzesTo(a, "\"QUOTED\" word", new String[]{"quoted", "word"}); + } + + public void testKoreanSA() throws Exception { + // Korean words + BaseTokenStreamTestCase.assertAnalyzesTo(a, "안녕하세요 한글입니다", new String[]{"안녕하세요", "한글입니다"}); + } + + public void testOffsets() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "David has 5000 bones", + new String[] {"david", "has", "5000", "bones"}, + new int[] {0, 6, 10, 15}, + new int[] {5, 9, 14, 20}); + } + + public void testTypes() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "david has 5000 bones", + new String[] {"david", "has", "5000", "bones"}, + new String[] { "", "", "", "" }); + } + + public void testSupplementary() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "𩬅艱鍟䇹愯瀛", + new String[] {"𩬅", "艱", "鍟", "䇹", "愯", "瀛"}, + new String[] { "", "", "", "", "", "" }); + } + + public void testKorean() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "훈민정음", + new String[] { "훈민정음" }, + new String[] { "" }); + } + + public void testJapanese() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, "仮名遣い カタカナ", + new String[] { "仮", "名", "遣", "い", "カタカナ" }, + new String[] { "", "", "", "", "" }); + } + + public void testCombiningMarks() throws Exception { + checkOneTerm(a, "ざ", "ざ"); // hiragana + checkOneTerm(a, "ザ", "ザ"); // katakana + checkOneTerm(a, "壹゙", "壹゙"); // ideographic + checkOneTerm(a, "아゙", "아゙"); // hangul + } + + /** @deprecated remove this and sophisticated backwards layer in 5.0 */ + @Deprecated + public void testCombiningMarksBackwards() throws Exception { + Analyzer a = new UAX29URLEmailAnalyzer(Version.LUCENE_33); + checkOneTerm(a, "ざ", "さ"); // hiragana Bug + checkOneTerm(a, "ザ", "ザ"); // katakana Works + checkOneTerm(a, "壹゙", "壹"); // ideographic Bug + checkOneTerm(a, "아゙", "아゙"); // hangul Works + } + + public void testBasicEmails() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, + "one test@example.com two three [A@example.CO.UK] \"ArakaBanassaMassanaBakarA\" ", + new String[] {"one", "test@example.com", "two", "three", "a@example.co.uk", "arakabanassamassanabakara", "info@info.info",}, + new String[] { "", "", "", "", "", "", "" }); + } + + public void testMailtoSchemeEmails () throws Exception { + // See LUCENE-3880 + BaseTokenStreamTestCase.assertAnalyzesTo(a, "MAILTO:Test@Example.ORG", + new String[] {"mailto", "test@example.org"}, + new String[] { "", "" }); + + // TODO: Support full mailto: scheme URIs. See RFC 6068: http://tools.ietf.org/html/rfc6068 + BaseTokenStreamTestCase.assertAnalyzesTo + (a, "mailto:personA@example.com,personB@example.com?cc=personC@example.com" + + "&subject=Subjectivity&body=Corpusivity%20or%20something%20like%20that", + new String[] { "mailto", + "persona@example.com", + // TODO: recognize ',' address delimiter. Also, see examples of ';' delimiter use at: http://www.mailto.co.uk/ + ",personb@example.com", + "?cc=personc@example.com", // TODO: split field keys/values + "subject", "subjectivity", + "body", "corpusivity", "20or", "20something","20like", "20that" }, // TODO: Hex decoding + re-tokenization + new String[] { "", + "", + "", + "", + "", "", + "", "", "", "", "", "" }); + } + + public void testBasicURLs() throws Exception { + BaseTokenStreamTestCase.assertAnalyzesTo(a, + "a b-D ftp://www.example.com/ABC.txt file:///C:/path/to/a/FILE.txt C", + new String[] {"https://example.net/omg/isnt/that/nice?no=its&n%30t#mntl-e", "b", "d", "ftp://www.example.com/abc.txt", "file:///c:/path/to/a/file.txt", "c" }, + new String[] { "", "", "", "", "", "" }); + } + + + /** blast some random strings through the analyzer */ + public void testRandomStrings() throws Exception { + checkRandomData(random, new UAX29URLEmailAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + } +}