LUCENE-2480: Remove more dead code

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@958137 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2010-06-25 22:22:16 +00:00
parent ffc358d0d5
commit 52085fa44f
1 changed files with 21 additions and 39 deletions

View File

@ -235,29 +235,23 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
if (!reader.isDeleted(i)) {
Document d = reader.document(i);
List<Fieldable> fields = d.getFields();
if (!oldName.startsWith("19.") &&
!oldName.startsWith("20.") &&
!oldName.startsWith("21.") &&
!oldName.startsWith("22.")) {
if (d.getField("content3") == null) {
final int numFields = 5;
assertEquals(numFields, fields.size());
Field f = d.getField("id");
assertEquals(""+i, f.stringValue());
if (d.getField("content3") == null) {
final int numFields = 5;
assertEquals(numFields, fields.size());
Field f = d.getField("id");
assertEquals(""+i, f.stringValue());
f = d.getField("utf8");
assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue());
f = d.getField("utf8");
assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue());
f = d.getField("autf8");
assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue());
f = d.getField("autf8");
assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue());
f = d.getField("content2");
assertEquals("here is more content with aaa aaa aaa", f.stringValue());
f = d.getField("content2");
assertEquals("here is more content with aaa aaa aaa", f.stringValue());
f = d.getField("fie\u2C77ld");
assertEquals("field with non-ascii name", f.stringValue());
}
f = d.getField("fie\u2C77ld");
assertEquals("field with non-ascii name", f.stringValue());
}
} else
// Only ID 7 is deleted
@ -273,18 +267,12 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
doTestHits(hits, 34, searcher.getIndexReader());
if (!oldName.startsWith("19.") &&
!oldName.startsWith("20.") &&
!oldName.startsWith("21.") &&
!oldName.startsWith("22.")) {
// Test on indices >= 2.3
hits = searcher.search(new TermQuery(new Term("utf8", "\u0000")), null, 1000).scoreDocs;
assertEquals(34, hits.length);
hits = searcher.search(new TermQuery(new Term("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne")), null, 1000).scoreDocs;
assertEquals(34, hits.length);
hits = searcher.search(new TermQuery(new Term("utf8", "ab\ud917\udc17cd")), null, 1000).scoreDocs;
assertEquals(34, hits.length);
}
hits = searcher.search(new TermQuery(new Term("utf8", "\u0000")), null, 1000).scoreDocs;
assertEquals(34, hits.length);
hits = searcher.search(new TermQuery(new Term("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne")), null, 1000).scoreDocs;
assertEquals(34, hits.length);
hits = searcher.search(new TermQuery(new Term("utf8", "ab\ud917\udc17cd")), null, 1000).scoreDocs;
assertEquals(34, hits.length);
searcher.close();
dir.close();
@ -564,12 +552,6 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
return new File(TEMP_DIR, dirName).getCanonicalPath();
}
static final String TEXT_TO_COMPRESS = "this is a compressed field and should appear in 3.0 as an uncompressed field after merge";
// FieldSelectorResult.SIZE returns compressed size for compressed fields, which are internally handled as binary;
// do it in the same way like FieldsWriter, do not use CompressionTools.compressString() for compressed fields:
static final byte[] BINARY_TO_COMPRESS = new byte[]{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20};
private int countDocs(DocsEnum docs) throws IOException {
int count = 0;
while((docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {