mirror of https://github.com/apache/lucene.git
fix most deprecation warnings
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@151468 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
887f57eb37
commit
10904d02f6
|
@ -60,7 +60,7 @@ public class HtmlDocument {
|
|||
*@exception IOException if I/O exception occurs
|
||||
*@since
|
||||
*/
|
||||
public HtmlDocument(InputStream is) throws IOException {
|
||||
public HtmlDocument(InputStream is) {
|
||||
Tidy tidy = new Tidy();
|
||||
tidy.setQuiet(true);
|
||||
tidy.setShowWarnings(false);
|
||||
|
@ -78,13 +78,13 @@ public class HtmlDocument {
|
|||
*@exception IOException
|
||||
*/
|
||||
public static org.apache.lucene.document.Document
|
||||
getDocument(InputStream is) throws IOException {
|
||||
getDocument(InputStream is) {
|
||||
HtmlDocument htmlDoc = new HtmlDocument(is);
|
||||
org.apache.lucene.document.Document luceneDoc =
|
||||
new org.apache.lucene.document.Document();
|
||||
|
||||
luceneDoc.add(Field.Text("title", htmlDoc.getTitle()));
|
||||
luceneDoc.add(Field.Text("contents", htmlDoc.getBody()));
|
||||
luceneDoc.add(new Field("title", htmlDoc.getTitle(), Field.Store.YES, Field.Index.TOKENIZED));
|
||||
luceneDoc.add(new Field("contents", htmlDoc.getBody(), Field.Store.YES, Field.Index.TOKENIZED));
|
||||
|
||||
return luceneDoc;
|
||||
}
|
||||
|
@ -108,8 +108,8 @@ public class HtmlDocument {
|
|||
org.apache.lucene.document.Document luceneDoc =
|
||||
new org.apache.lucene.document.Document();
|
||||
|
||||
luceneDoc.add(Field.Text("title", htmlDoc.getTitle()));
|
||||
luceneDoc.add(Field.Text("contents", htmlDoc.getBody()));
|
||||
luceneDoc.add(new Field("title", htmlDoc.getTitle(), Field.Store.YES, Field.Index.TOKENIZED));
|
||||
luceneDoc.add(new Field("contents", htmlDoc.getBody(), Field.Store.YES, Field.Index.TOKENIZED));
|
||||
|
||||
String contents = null;
|
||||
BufferedReader br =
|
||||
|
@ -124,7 +124,7 @@ public class HtmlDocument {
|
|||
contents = sw.toString();
|
||||
sw.close();
|
||||
|
||||
luceneDoc.add(Field.UnIndexed("rawcontents", contents));
|
||||
luceneDoc.add(new Field("rawcontents", contents, Field.Store.YES, Field.Index.NO));
|
||||
|
||||
return luceneDoc;
|
||||
}
|
||||
|
|
|
@ -4,8 +4,6 @@ import org.apache.lucene.analysis.Analyzer;
|
|||
import org.apache.lucene.analysis.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.SimpleAnalyzer;
|
||||
import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
||||
import org.apache.lucene.analysis.ru.RussianAnalyzer;
|
||||
import org.apache.lucene.analysis.de.GermanAnalyzer;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.document.DateField;
|
||||
import org.apache.lucene.document.Document;
|
||||
|
@ -250,7 +248,7 @@ public class IndexTask extends Task {
|
|||
int totalIndexed = 0;
|
||||
int totalIgnored = 0;
|
||||
try {
|
||||
writer.mergeFactor = mergeFactor;
|
||||
writer.setMergeFactor(mergeFactor);
|
||||
|
||||
for (int i = 0; i < filesets.size(); i++) {
|
||||
FileSet fs = (FileSet) filesets.get(i);
|
||||
|
@ -309,13 +307,12 @@ public class IndexTask extends Task {
|
|||
} else {
|
||||
// Add the path of the file as a field named "path". Use a Keyword field, so
|
||||
// that the index stores the path, and so that the path is searchable
|
||||
doc.add(Field.Keyword("path", file.getPath()));
|
||||
doc.add(new Field("path", file.getPath(), Field.Store.YES, Field.Index.UN_TOKENIZED));
|
||||
|
||||
// Add the last modified date of the file a field named "modified". Use a
|
||||
// Keyword field, so that it's searchable, but so that no attempt is made
|
||||
// to tokenize the field into words.
|
||||
doc.add(Field.Keyword("modified",
|
||||
DateField.timeToString(file.lastModified())));
|
||||
doc.add(new Field("modified", DateField.timeToString(file.lastModified()), Field.Store.YES, Field.Index.UN_TOKENIZED));
|
||||
|
||||
writer.addDocument(doc);
|
||||
totalIndexed++;
|
||||
|
@ -374,8 +371,6 @@ public class IndexTask extends Task {
|
|||
analyzerLookup.put("standard", StandardAnalyzer.class.getName());
|
||||
analyzerLookup.put("stop", StopAnalyzer.class.getName());
|
||||
analyzerLookup.put("whitespace", WhitespaceAnalyzer.class.getName());
|
||||
analyzerLookup.put("german", GermanAnalyzer.class.getName());
|
||||
analyzerLookup.put("russian", RussianAnalyzer.class.getName());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -62,10 +62,9 @@ public class TextDocument {
|
|||
// make a new, empty document
|
||||
Document doc = new Document();
|
||||
|
||||
doc.add(Field.Text("title", f.getName()));
|
||||
doc.add(Field.Text("contents", textDoc.getContents()));
|
||||
doc.add(Field.UnIndexed("rawcontents",
|
||||
textDoc.getContents()));
|
||||
doc.add(new Field("title", f.getName(), Field.Store.YES, Field.Index.TOKENIZED));
|
||||
doc.add(new Field("contents", textDoc.getContents(), Field.Store.YES, Field.Index.TOKENIZED));
|
||||
doc.add(new Field("rawcontents", textDoc.getContents(), Field.Store.YES, Field.Index.NO));
|
||||
|
||||
// return the document
|
||||
return doc;
|
||||
|
|
Loading…
Reference in New Issue