LUCENE-2480: Remove more dead code

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@958137 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2010-06-25 22:22:16 +00:00
parent ffc358d0d5
commit 52085fa44f
1 changed files with 21 additions and 39 deletions

View File

@ -235,30 +235,24 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
if (!reader.isDeleted(i)) { if (!reader.isDeleted(i)) {
Document d = reader.document(i); Document d = reader.document(i);
List<Fieldable> fields = d.getFields(); List<Fieldable> fields = d.getFields();
if (!oldName.startsWith("19.") && if (d.getField("content3") == null) {
!oldName.startsWith("20.") && final int numFields = 5;
!oldName.startsWith("21.") && assertEquals(numFields, fields.size());
!oldName.startsWith("22.")) { Field f = d.getField("id");
assertEquals(""+i, f.stringValue());
if (d.getField("content3") == null) { f = d.getField("utf8");
final int numFields = 5; assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue());
assertEquals(numFields, fields.size());
Field f = d.getField("id");
assertEquals(""+i, f.stringValue());
f = d.getField("utf8"); f = d.getField("autf8");
assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue()); assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue());
f = d.getField("content2");
assertEquals("here is more content with aaa aaa aaa", f.stringValue());
f = d.getField("autf8"); f = d.getField("fie\u2C77ld");
assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue()); assertEquals("field with non-ascii name", f.stringValue());
}
f = d.getField("content2");
assertEquals("here is more content with aaa aaa aaa", f.stringValue());
f = d.getField("fie\u2C77ld");
assertEquals("field with non-ascii name", f.stringValue());
}
}
} else } else
// Only ID 7 is deleted // Only ID 7 is deleted
assertEquals(7, i); assertEquals(7, i);
@ -273,18 +267,12 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
doTestHits(hits, 34, searcher.getIndexReader()); doTestHits(hits, 34, searcher.getIndexReader());
if (!oldName.startsWith("19.") && hits = searcher.search(new TermQuery(new Term("utf8", "\u0000")), null, 1000).scoreDocs;
!oldName.startsWith("20.") && assertEquals(34, hits.length);
!oldName.startsWith("21.") && hits = searcher.search(new TermQuery(new Term("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne")), null, 1000).scoreDocs;
!oldName.startsWith("22.")) { assertEquals(34, hits.length);
// Test on indices >= 2.3 hits = searcher.search(new TermQuery(new Term("utf8", "ab\ud917\udc17cd")), null, 1000).scoreDocs;
hits = searcher.search(new TermQuery(new Term("utf8", "\u0000")), null, 1000).scoreDocs; assertEquals(34, hits.length);
assertEquals(34, hits.length);
hits = searcher.search(new TermQuery(new Term("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne")), null, 1000).scoreDocs;
assertEquals(34, hits.length);
hits = searcher.search(new TermQuery(new Term("utf8", "ab\ud917\udc17cd")), null, 1000).scoreDocs;
assertEquals(34, hits.length);
}
searcher.close(); searcher.close();
dir.close(); dir.close();
@ -564,12 +552,6 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
return new File(TEMP_DIR, dirName).getCanonicalPath(); return new File(TEMP_DIR, dirName).getCanonicalPath();
} }
static final String TEXT_TO_COMPRESS = "this is a compressed field and should appear in 3.0 as an uncompressed field after merge";
// FieldSelectorResult.SIZE returns compressed size for compressed fields, which are internally handled as binary;
// do it in the same way like FieldsWriter, do not use CompressionTools.compressString() for compressed fields:
static final byte[] BINARY_TO_COMPRESS = new byte[]{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20};
private int countDocs(DocsEnum docs) throws IOException { private int countDocs(DocsEnum docs) throws IOException {
int count = 0; int count = 0;
while((docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { while((docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {