mirror of https://github.com/apache/lucene.git
LUCENE-10052: first cut at LTC.newBytesRef methods, and switching a few test cases over (#245)
* LUCENE-10052: first cut at LTC.newBytesRef methods, to randomize the offset/length of a BytesRef, and switching a few test cases over
This commit is contained in:
parent
2d21a600ba
commit
65a53450dc
|
@ -391,7 +391,8 @@ public final class PagedBytes implements Accountable {
|
|||
|
||||
@Override
|
||||
public void writeBytes(byte[] b, int offset, int length) {
|
||||
assert b.length >= offset + length;
|
||||
assert b.length >= offset + length
|
||||
: "b.length=" + b.length + " offset=" + offset + " length=" + length;
|
||||
if (length == 0) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -68,12 +68,14 @@ public class TestBinaryDocValuesUpdates extends LuceneTestCase {
|
|||
// encodes a long into a BytesRef as VLong so that we get varying number of bytes when we update
|
||||
static BytesRef toBytes(long value) {
|
||||
// long orig = value;
|
||||
BytesRef bytes = new BytesRef(10); // negative longs may take 10 bytes
|
||||
BytesRef bytes = newBytesRef(10); // negative longs may take 10 bytes
|
||||
int upto = 0;
|
||||
while ((value & ~0x7FL) != 0L) {
|
||||
bytes.bytes[bytes.length++] = (byte) ((value & 0x7FL) | 0x80L);
|
||||
bytes.bytes[bytes.offset + upto++] = (byte) ((value & 0x7FL) | 0x80L);
|
||||
value >>>= 7;
|
||||
}
|
||||
bytes.bytes[bytes.length++] = (byte) value;
|
||||
bytes.bytes[bytes.offset + upto++] = (byte) value;
|
||||
bytes.length = upto;
|
||||
// System.err.println("[" + Thread.currentThread().getName() + "] value=" + orig + ", bytes="
|
||||
// + bytes);
|
||||
return bytes;
|
||||
|
@ -334,10 +336,10 @@ public class TestBinaryDocValuesUpdates extends LuceneTestCase {
|
|||
Document doc = new Document();
|
||||
doc.add(new StringField("dvUpdateKey", "dv", Store.NO));
|
||||
doc.add(new NumericDocValuesField("ndv", i));
|
||||
doc.add(new BinaryDocValuesField("bdv", new BytesRef(Integer.toString(i))));
|
||||
doc.add(new SortedDocValuesField("sdv", new BytesRef(Integer.toString(i))));
|
||||
doc.add(new SortedSetDocValuesField("ssdv", new BytesRef(Integer.toString(i))));
|
||||
doc.add(new SortedSetDocValuesField("ssdv", new BytesRef(Integer.toString(i * 2))));
|
||||
doc.add(new BinaryDocValuesField("bdv", newBytesRef(Integer.toString(i))));
|
||||
doc.add(new SortedDocValuesField("sdv", newBytesRef(Integer.toString(i))));
|
||||
doc.add(new SortedSetDocValuesField("ssdv", newBytesRef(Integer.toString(i))));
|
||||
doc.add(new SortedSetDocValuesField("ssdv", newBytesRef(Integer.toString(i * 2))));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
writer.commit();
|
||||
|
@ -359,7 +361,7 @@ public class TestBinaryDocValuesUpdates extends LuceneTestCase {
|
|||
assertEquals(17, getValue(bdv));
|
||||
assertEquals(i, sdv.nextDoc());
|
||||
BytesRef term = sdv.lookupOrd(sdv.ordValue());
|
||||
assertEquals(new BytesRef(Integer.toString(i)), term);
|
||||
assertEquals(newBytesRef(Integer.toString(i)), term);
|
||||
assertEquals(i, ssdv.nextDoc());
|
||||
long ord = ssdv.nextOrd();
|
||||
term = ssdv.lookupOrd(ord);
|
||||
|
@ -491,7 +493,7 @@ public class TestBinaryDocValuesUpdates extends LuceneTestCase {
|
|||
Document doc = new Document();
|
||||
doc.add(new StringField("key", "doc", Store.NO));
|
||||
doc.add(new BinaryDocValuesField("bdv", toBytes(5L)));
|
||||
doc.add(new SortedDocValuesField("sorted", new BytesRef("value")));
|
||||
doc.add(new SortedDocValuesField("sorted", newBytesRef("value")));
|
||||
writer.addDocument(doc); // flushed document
|
||||
writer.commit();
|
||||
writer.addDocument(doc); // in-memory document
|
||||
|
@ -508,7 +510,7 @@ public class TestBinaryDocValuesUpdates extends LuceneTestCase {
|
|||
assertEquals(17, getValue(bdv));
|
||||
assertEquals(i, sdv.nextDoc());
|
||||
BytesRef term = sdv.lookupOrd(sdv.ordValue());
|
||||
assertEquals(new BytesRef("value"), term);
|
||||
assertEquals(newBytesRef("value"), term);
|
||||
}
|
||||
|
||||
reader.close();
|
||||
|
|
|
@ -80,7 +80,6 @@ import org.apache.lucene.search.TopFieldCollector;
|
|||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
@ -239,7 +238,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
public void testStringAlreadySorted() throws Exception {
|
||||
assertNeedsIndexSortMerge(
|
||||
new SortField("foo", SortField.Type.STRING),
|
||||
(doc) -> doc.add(new SortedDocValuesField("foo", new BytesRef("default"))),
|
||||
(doc) -> doc.add(new SortedDocValuesField("foo", newBytesRef("default"))),
|
||||
(doc) -> doc.add(new SortedDocValuesField("foo", TestUtil.randomBinaryTerm(random()))));
|
||||
}
|
||||
|
||||
|
@ -265,7 +264,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
assertNeedsIndexSortMerge(
|
||||
new SortedSetSortField("foo", false),
|
||||
(doc) -> {
|
||||
doc.add(new SortedSetDocValuesField("foo", new BytesRef("")));
|
||||
doc.add(new SortedSetDocValuesField("foo", newBytesRef("")));
|
||||
int num = random().nextInt(5);
|
||||
for (int j = 0; j < num; j++) {
|
||||
doc.add(new SortedSetDocValuesField("foo", TestUtil.randomBinaryTerm(random())));
|
||||
|
@ -286,19 +285,19 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
iwc.setIndexSort(indexSort);
|
||||
IndexWriter w = new IndexWriter(dir, iwc);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("foo", new BytesRef("zzz")));
|
||||
doc.add(new SortedDocValuesField("foo", newBytesRef("zzz")));
|
||||
w.addDocument(doc);
|
||||
// so we get more than one segment, so that forceMerge actually does merge, since we only get a
|
||||
// sorted segment by merging:
|
||||
w.commit();
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("foo", new BytesRef("aaa")));
|
||||
doc.add(new SortedDocValuesField("foo", newBytesRef("aaa")));
|
||||
w.addDocument(doc);
|
||||
w.commit();
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("foo", new BytesRef("mmm")));
|
||||
doc.add(new SortedDocValuesField("foo", newBytesRef("mmm")));
|
||||
w.addDocument(doc);
|
||||
w.forceMerge(1);
|
||||
|
||||
|
@ -325,7 +324,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
IndexWriter w = new IndexWriter(dir, iwc);
|
||||
Document doc = new Document();
|
||||
doc.add(new NumericDocValuesField("id", 3));
|
||||
doc.add(new SortedSetDocValuesField("foo", new BytesRef("zzz")));
|
||||
doc.add(new SortedSetDocValuesField("foo", newBytesRef("zzz")));
|
||||
w.addDocument(doc);
|
||||
// so we get more than one segment, so that forceMerge actually does merge, since we only get a
|
||||
// sorted segment by merging:
|
||||
|
@ -333,16 +332,16 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("id", 1));
|
||||
doc.add(new SortedSetDocValuesField("foo", new BytesRef("aaa")));
|
||||
doc.add(new SortedSetDocValuesField("foo", new BytesRef("zzz")));
|
||||
doc.add(new SortedSetDocValuesField("foo", new BytesRef("bcg")));
|
||||
doc.add(new SortedSetDocValuesField("foo", newBytesRef("aaa")));
|
||||
doc.add(new SortedSetDocValuesField("foo", newBytesRef("zzz")));
|
||||
doc.add(new SortedSetDocValuesField("foo", newBytesRef("bcg")));
|
||||
w.addDocument(doc);
|
||||
w.commit();
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("id", 2));
|
||||
doc.add(new SortedSetDocValuesField("foo", new BytesRef("mmm")));
|
||||
doc.add(new SortedSetDocValuesField("foo", new BytesRef("pppp")));
|
||||
doc.add(new SortedSetDocValuesField("foo", newBytesRef("mmm")));
|
||||
doc.add(new SortedSetDocValuesField("foo", newBytesRef("pppp")));
|
||||
w.addDocument(doc);
|
||||
w.forceMerge(1);
|
||||
|
||||
|
@ -371,7 +370,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
iwc.setIndexSort(indexSort);
|
||||
IndexWriter w = new IndexWriter(dir, iwc);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("foo", new BytesRef("zzz")));
|
||||
doc.add(new SortedDocValuesField("foo", newBytesRef("zzz")));
|
||||
w.addDocument(doc);
|
||||
// so we get more than one segment, so that forceMerge actually does merge, since we only get
|
||||
// a sorted segment by merging:
|
||||
|
@ -382,7 +381,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
w.commit();
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("foo", new BytesRef("mmm")));
|
||||
doc.add(new SortedDocValuesField("foo", newBytesRef("mmm")));
|
||||
w.addDocument(doc);
|
||||
w.forceMerge(1);
|
||||
|
||||
|
@ -419,9 +418,9 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
IndexWriter w = new IndexWriter(dir, iwc);
|
||||
Document doc = new Document();
|
||||
doc.add(new NumericDocValuesField("id", 3));
|
||||
doc.add(new SortedSetDocValuesField("foo", new BytesRef("zzz")));
|
||||
doc.add(new SortedSetDocValuesField("foo", new BytesRef("zzza")));
|
||||
doc.add(new SortedSetDocValuesField("foo", new BytesRef("zzzd")));
|
||||
doc.add(new SortedSetDocValuesField("foo", newBytesRef("zzz")));
|
||||
doc.add(new SortedSetDocValuesField("foo", newBytesRef("zzza")));
|
||||
doc.add(new SortedSetDocValuesField("foo", newBytesRef("zzzd")));
|
||||
w.addDocument(doc);
|
||||
// so we get more than one segment, so that forceMerge actually does merge, since we only get
|
||||
// a sorted segment by merging:
|
||||
|
@ -435,8 +434,8 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("id", 2));
|
||||
doc.add(new SortedSetDocValuesField("foo", new BytesRef("mmm")));
|
||||
doc.add(new SortedSetDocValuesField("foo", new BytesRef("nnnn")));
|
||||
doc.add(new SortedSetDocValuesField("foo", newBytesRef("mmm")));
|
||||
doc.add(new SortedSetDocValuesField("foo", newBytesRef("nnnn")));
|
||||
w.addDocument(doc);
|
||||
w.forceMerge(1);
|
||||
|
||||
|
@ -475,7 +474,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
iwc.setIndexSort(indexSort);
|
||||
IndexWriter w = new IndexWriter(dir, iwc);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("foo", new BytesRef("zzz")));
|
||||
doc.add(new SortedDocValuesField("foo", newBytesRef("zzz")));
|
||||
w.addDocument(doc);
|
||||
// so we get more than one segment, so that forceMerge actually does merge, since we only get
|
||||
// a sorted segment by merging:
|
||||
|
@ -486,7 +485,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
w.commit();
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("foo", new BytesRef("mmm")));
|
||||
doc.add(new SortedDocValuesField("foo", newBytesRef("mmm")));
|
||||
w.addDocument(doc);
|
||||
w.forceMerge(1);
|
||||
|
||||
|
@ -523,8 +522,8 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
IndexWriter w = new IndexWriter(dir, iwc);
|
||||
Document doc = new Document();
|
||||
doc.add(new NumericDocValuesField("id", 2));
|
||||
doc.add(new SortedSetDocValuesField("foo", new BytesRef("zzz")));
|
||||
doc.add(new SortedSetDocValuesField("foo", new BytesRef("zzzd")));
|
||||
doc.add(new SortedSetDocValuesField("foo", newBytesRef("zzz")));
|
||||
doc.add(new SortedSetDocValuesField("foo", newBytesRef("zzzd")));
|
||||
w.addDocument(doc);
|
||||
// so we get more than one segment, so that forceMerge actually does merge, since we only get
|
||||
// a sorted segment by merging:
|
||||
|
@ -538,8 +537,8 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("id", 1));
|
||||
doc.add(new SortedSetDocValuesField("foo", new BytesRef("mmm")));
|
||||
doc.add(new SortedSetDocValuesField("foo", new BytesRef("ppp")));
|
||||
doc.add(new SortedSetDocValuesField("foo", newBytesRef("mmm")));
|
||||
doc.add(new SortedSetDocValuesField("foo", newBytesRef("ppp")));
|
||||
w.addDocument(doc);
|
||||
w.forceMerge(1);
|
||||
|
||||
|
@ -1951,7 +1950,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
iwc.setIndexSort(indexSort);
|
||||
IndexWriter w = new IndexWriter(dir, iwc);
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("id", new BytesRef("0"), Store.NO));
|
||||
doc.add(new StringField("id", newBytesRef("0"), Store.NO));
|
||||
doc.add(new NumericDocValuesField("foo", random().nextInt()));
|
||||
w.addDocument(doc);
|
||||
w.commit();
|
||||
|
@ -2282,7 +2281,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
|
||||
clearAttributes();
|
||||
term.append("#all#");
|
||||
payload.setPayload(new BytesRef(Integer.toString(pos)));
|
||||
payload.setPayload(newBytesRef(Integer.toString(pos)));
|
||||
offset.setOffset(off, off);
|
||||
--pos;
|
||||
++off;
|
||||
|
@ -2331,13 +2330,13 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
.collect(Collectors.joining(" "));
|
||||
TextField norms = new TextField("norms", value, Store.NO);
|
||||
doc.add(norms);
|
||||
doc.add(new BinaryDocValuesField("binary", new BytesRef(Integer.toString(id))));
|
||||
doc.add(new SortedDocValuesField("sorted", new BytesRef(Integer.toString(id))));
|
||||
doc.add(new BinaryDocValuesField("binary", newBytesRef(Integer.toString(id))));
|
||||
doc.add(new SortedDocValuesField("sorted", newBytesRef(Integer.toString(id))));
|
||||
doc.add(
|
||||
new SortedSetDocValuesField("multi_valued_string", new BytesRef(Integer.toString(id))));
|
||||
new SortedSetDocValuesField("multi_valued_string", newBytesRef(Integer.toString(id))));
|
||||
doc.add(
|
||||
new SortedSetDocValuesField(
|
||||
"multi_valued_string", new BytesRef(Integer.toString(id + 1))));
|
||||
"multi_valued_string", newBytesRef(Integer.toString(id + 1))));
|
||||
doc.add(new SortedNumericDocValuesField("multi_valued_numeric", id));
|
||||
doc.add(new SortedNumericDocValuesField("multi_valued_numeric", id + 1));
|
||||
doc.add(new Field("term_vectors", Integer.toString(id), TERM_VECTORS_TYPE));
|
||||
|
@ -2568,7 +2567,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
System.out.println(" long=" + docValues.longValue);
|
||||
System.out.println(" float=" + docValues.floatValue);
|
||||
System.out.println(" double=" + docValues.doubleValue);
|
||||
System.out.println(" bytes=" + new BytesRef(docValues.bytesValue));
|
||||
System.out.println(" bytes=" + newBytesRef(docValues.bytesValue));
|
||||
System.out.println(" mvf=" + Arrays.toString(docValues.floatValues));
|
||||
}
|
||||
|
||||
|
@ -2579,7 +2578,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
doc.add(new NumericDocValuesField("long", docValues.longValue));
|
||||
doc.add(new DoubleDocValuesField("double", docValues.doubleValue));
|
||||
doc.add(new FloatDocValuesField("float", docValues.floatValue));
|
||||
doc.add(new SortedDocValuesField("bytes", new BytesRef(docValues.bytesValue)));
|
||||
doc.add(new SortedDocValuesField("bytes", newBytesRef(docValues.bytesValue)));
|
||||
|
||||
for (int value : docValues.intValues) {
|
||||
doc.add(new SortedNumericDocValuesField("multi_valued_int", value));
|
||||
|
@ -2602,7 +2601,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
}
|
||||
|
||||
for (byte[] value : docValues.bytesValues) {
|
||||
doc.add(new SortedSetDocValuesField("multi_valued_bytes", new BytesRef(value)));
|
||||
doc.add(new SortedSetDocValuesField("multi_valued_bytes", newBytesRef(value)));
|
||||
}
|
||||
|
||||
w1.addDocument(doc);
|
||||
|
@ -2690,7 +2689,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
} else {
|
||||
value = "bar1";
|
||||
}
|
||||
doc.add(new SortedDocValuesField("foo", new BytesRef(value)));
|
||||
doc.add(new SortedDocValuesField("foo", newBytesRef(value)));
|
||||
w.addDocument(doc);
|
||||
if (id == 500) {
|
||||
w.commit();
|
||||
|
@ -2723,7 +2722,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
doc.add(new NumericDocValuesField("dense_int", i));
|
||||
if (i < 64) {
|
||||
doc.add(new NumericDocValuesField("sparse_int", i));
|
||||
doc.add(new BinaryDocValuesField("sparse_binary", new BytesRef(Integer.toString(i))));
|
||||
doc.add(new BinaryDocValuesField("sparse_binary", newBytesRef(Integer.toString(i))));
|
||||
textField.setStringValue("foo");
|
||||
doc.add(textField);
|
||||
}
|
||||
|
@ -2749,7 +2748,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
assertTrue(normsValues.advanceExact(docID));
|
||||
assertEquals(1, normsValues.longValue());
|
||||
assertEquals(127 - docID, (int) sparseValues.longValue());
|
||||
assertEquals(new BytesRef(Integer.toString(127 - docID)), sparseBinaryValues.binaryValue());
|
||||
assertEquals(newBytesRef(Integer.toString(127 - docID)), sparseBinaryValues.binaryValue());
|
||||
} else {
|
||||
assertFalse(sparseBinaryValues.advanceExact(docID));
|
||||
assertFalse(sparseValues.advanceExact(docID));
|
||||
|
@ -2794,8 +2793,8 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
public void testWrongSortFieldType() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
List<Field> dvs = new ArrayList<>();
|
||||
dvs.add(new SortedDocValuesField("field", new BytesRef("")));
|
||||
dvs.add(new SortedSetDocValuesField("field", new BytesRef("")));
|
||||
dvs.add(new SortedDocValuesField("field", newBytesRef("")));
|
||||
dvs.add(new SortedSetDocValuesField("field", newBytesRef("")));
|
||||
dvs.add(new NumericDocValuesField("field", 42));
|
||||
dvs.add(new SortedNumericDocValuesField("field", 42));
|
||||
|
||||
|
|
|
@ -496,7 +496,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
Term t = new Term("field", "a");
|
||||
assertEquals(1, reader.docFreq(t));
|
||||
PostingsEnum td =
|
||||
TestUtil.docs(random(), reader, "field", new BytesRef("a"), null, PostingsEnum.FREQS);
|
||||
TestUtil.docs(random(), reader, "field", newBytesRef("a"), null, PostingsEnum.FREQS);
|
||||
td.nextDoc();
|
||||
assertEquals(128 * 1024, td.freq());
|
||||
reader.close();
|
||||
|
@ -682,9 +682,9 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
DirectoryReader reader = DirectoryReader.open(dir);
|
||||
LeafReader subreader = getOnlyLeafReader(reader);
|
||||
TermsEnum te = subreader.terms("").iterator();
|
||||
assertEquals(new BytesRef("a"), te.next());
|
||||
assertEquals(new BytesRef("b"), te.next());
|
||||
assertEquals(new BytesRef("c"), te.next());
|
||||
assertEquals(newBytesRef("a"), te.next());
|
||||
assertEquals(newBytesRef("b"), te.next());
|
||||
assertEquals(newBytesRef("c"), te.next());
|
||||
assertNull(te.next());
|
||||
reader.close();
|
||||
dir.close();
|
||||
|
@ -703,10 +703,10 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
DirectoryReader reader = DirectoryReader.open(dir);
|
||||
LeafReader subreader = getOnlyLeafReader(reader);
|
||||
TermsEnum te = subreader.terms("").iterator();
|
||||
assertEquals(new BytesRef(""), te.next());
|
||||
assertEquals(new BytesRef("a"), te.next());
|
||||
assertEquals(new BytesRef("b"), te.next());
|
||||
assertEquals(new BytesRef("c"), te.next());
|
||||
assertEquals(newBytesRef(""), te.next());
|
||||
assertEquals(newBytesRef("a"), te.next());
|
||||
assertEquals(newBytesRef("b"), te.next());
|
||||
assertEquals(newBytesRef("c"), te.next());
|
||||
assertNull(te.next());
|
||||
reader.close();
|
||||
dir.close();
|
||||
|
@ -904,22 +904,22 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
Document doc = new Document();
|
||||
doc.add(newStringField(random, "id", "500", Field.Store.NO));
|
||||
doc.add(newField(random, "field", "some prepackaged text contents", storedTextType));
|
||||
doc.add(new BinaryDocValuesField("binarydv", new BytesRef("500")));
|
||||
doc.add(new BinaryDocValuesField("binarydv", newBytesRef("500")));
|
||||
doc.add(new NumericDocValuesField("numericdv", 500));
|
||||
doc.add(new SortedDocValuesField("sorteddv", new BytesRef("500")));
|
||||
doc.add(new SortedSetDocValuesField("sortedsetdv", new BytesRef("one")));
|
||||
doc.add(new SortedSetDocValuesField("sortedsetdv", new BytesRef("two")));
|
||||
doc.add(new SortedDocValuesField("sorteddv", newBytesRef("500")));
|
||||
doc.add(new SortedSetDocValuesField("sortedsetdv", newBytesRef("one")));
|
||||
doc.add(new SortedSetDocValuesField("sortedsetdv", newBytesRef("two")));
|
||||
doc.add(new SortedNumericDocValuesField("sortednumericdv", 4));
|
||||
doc.add(new SortedNumericDocValuesField("sortednumericdv", 3));
|
||||
w.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(newStringField(random, "id", "501", Field.Store.NO));
|
||||
doc.add(newField(random, "field", "some more contents", storedTextType));
|
||||
doc.add(new BinaryDocValuesField("binarydv", new BytesRef("501")));
|
||||
doc.add(new BinaryDocValuesField("binarydv", newBytesRef("501")));
|
||||
doc.add(new NumericDocValuesField("numericdv", 501));
|
||||
doc.add(new SortedDocValuesField("sorteddv", new BytesRef("501")));
|
||||
doc.add(new SortedSetDocValuesField("sortedsetdv", new BytesRef("two")));
|
||||
doc.add(new SortedSetDocValuesField("sortedsetdv", new BytesRef("three")));
|
||||
doc.add(new SortedDocValuesField("sorteddv", newBytesRef("501")));
|
||||
doc.add(new SortedSetDocValuesField("sortedsetdv", newBytesRef("two")));
|
||||
doc.add(new SortedSetDocValuesField("sortedsetdv", newBytesRef("three")));
|
||||
doc.add(new SortedNumericDocValuesField("sortednumericdv", 6));
|
||||
doc.add(new SortedNumericDocValuesField("sortednumericdv", 1));
|
||||
w.addDocument(doc);
|
||||
|
@ -978,10 +978,10 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
Document doc = new Document();
|
||||
Field idField = newStringField(random, "id", "", Field.Store.NO);
|
||||
Field binaryDVField = new BinaryDocValuesField("binarydv", new BytesRef());
|
||||
Field binaryDVField = new BinaryDocValuesField("binarydv", newBytesRef());
|
||||
Field numericDVField = new NumericDocValuesField("numericdv", 0);
|
||||
Field sortedDVField = new SortedDocValuesField("sorteddv", new BytesRef());
|
||||
Field sortedSetDVField = new SortedSetDocValuesField("sortedsetdv", new BytesRef());
|
||||
Field sortedDVField = new SortedDocValuesField("sorteddv", newBytesRef());
|
||||
Field sortedSetDVField = new SortedSetDocValuesField("sortedsetdv", newBytesRef());
|
||||
doc.add(idField);
|
||||
doc.add(newField(random, "field", "some text contents", storedTextType));
|
||||
doc.add(binaryDVField);
|
||||
|
@ -991,10 +991,10 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
for (int i = 0; i < 100; i++) {
|
||||
// log.println("\nTEST: i=" + i);
|
||||
idField.setStringValue(Integer.toString(i));
|
||||
binaryDVField.setBytesValue(new BytesRef(idField.stringValue()));
|
||||
binaryDVField.setBytesValue(newBytesRef(idField.stringValue()));
|
||||
numericDVField.setLongValue(i);
|
||||
sortedDVField.setBytesValue(new BytesRef(idField.stringValue()));
|
||||
sortedSetDVField.setBytesValue(new BytesRef(idField.stringValue()));
|
||||
sortedDVField.setBytesValue(newBytesRef(idField.stringValue()));
|
||||
sortedSetDVField.setBytesValue(newBytesRef(idField.stringValue()));
|
||||
int action = random.nextInt(100);
|
||||
if (action == 17) {
|
||||
w.addIndexes(adder);
|
||||
|
@ -1226,27 +1226,27 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
// test that the terms were indexed.
|
||||
assertTrue(
|
||||
TestUtil.docs(random(), ir, "binary", new BytesRef("doc1field1"), null, PostingsEnum.NONE)
|
||||
TestUtil.docs(random(), ir, "binary", newBytesRef("doc1field1"), null, PostingsEnum.NONE)
|
||||
.nextDoc()
|
||||
!= DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(
|
||||
TestUtil.docs(random(), ir, "binary", new BytesRef("doc2field1"), null, PostingsEnum.NONE)
|
||||
TestUtil.docs(random(), ir, "binary", newBytesRef("doc2field1"), null, PostingsEnum.NONE)
|
||||
.nextDoc()
|
||||
!= DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(
|
||||
TestUtil.docs(random(), ir, "binary", new BytesRef("doc3field1"), null, PostingsEnum.NONE)
|
||||
TestUtil.docs(random(), ir, "binary", newBytesRef("doc3field1"), null, PostingsEnum.NONE)
|
||||
.nextDoc()
|
||||
!= DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(
|
||||
TestUtil.docs(random(), ir, "string", new BytesRef("doc1field2"), null, PostingsEnum.NONE)
|
||||
TestUtil.docs(random(), ir, "string", newBytesRef("doc1field2"), null, PostingsEnum.NONE)
|
||||
.nextDoc()
|
||||
!= DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(
|
||||
TestUtil.docs(random(), ir, "string", new BytesRef("doc2field2"), null, PostingsEnum.NONE)
|
||||
TestUtil.docs(random(), ir, "string", newBytesRef("doc2field2"), null, PostingsEnum.NONE)
|
||||
.nextDoc()
|
||||
!= DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(
|
||||
TestUtil.docs(random(), ir, "string", new BytesRef("doc3field2"), null, PostingsEnum.NONE)
|
||||
TestUtil.docs(random(), ir, "string", newBytesRef("doc3field2"), null, PostingsEnum.NONE)
|
||||
.nextDoc()
|
||||
!= DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
||||
|
@ -2422,8 +2422,8 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
iwc.setMergePolicy(newLogMergePolicy());
|
||||
IndexWriter iwriter = new IndexWriter(directory, iwc);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("foo!")));
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("bar!")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("foo!")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("bar!")));
|
||||
expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> {
|
||||
|
@ -2440,7 +2440,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("foo!")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("foo!")));
|
||||
w.addDocument(doc);
|
||||
w.close();
|
||||
// Close again should have no effect
|
||||
|
@ -2452,7 +2452,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("foo!")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("foo!")));
|
||||
w.addDocument(doc);
|
||||
w.rollback();
|
||||
// Close after rollback should have no effect
|
||||
|
@ -2464,7 +2464,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("foo!")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("foo!")));
|
||||
w.addDocument(doc);
|
||||
w.close();
|
||||
// Rollback after close should have no effect
|
||||
|
@ -2521,7 +2521,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
});
|
||||
IndexWriter w = new IndexWriter(dir, iwc);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("foo!")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("foo!")));
|
||||
w.addDocument(doc);
|
||||
w.commit();
|
||||
w.addDocument(doc);
|
||||
|
@ -3810,7 +3810,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
IndexWriter w = new IndexWriter(d, newIndexWriterConfig(new MockAnalyzer(random())));
|
||||
Document doc = new Document();
|
||||
Token token = new Token("bar", 0, 3);
|
||||
BytesRef evil = new BytesRef(new byte[1024]);
|
||||
BytesRef evil = newBytesRef(new byte[1024]);
|
||||
evil.offset = 1000; // offset + length is now out of bounds.
|
||||
token.setPayload(evil);
|
||||
doc.add(new TextField("foo", new CannedTokenStream(token)));
|
||||
|
@ -3902,7 +3902,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
Bits liveDocs = sr.getLiveDocs();
|
||||
for (Integer dId : uniqueDocs) {
|
||||
boolean mustBeHardDeleted = dId % 2 == 0;
|
||||
if (iterator.seekExact(new BytesRef(dId.toString()))) {
|
||||
if (iterator.seekExact(newBytesRef(dId.toString()))) {
|
||||
PostingsEnum postings = iterator.postings(null);
|
||||
while (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
if (liveDocs.get(postings.docID())) {
|
||||
|
|
|
@ -139,7 +139,7 @@ public class TestIndexableField extends LuceneTestCase {
|
|||
for (int idx = 0; idx < bytes.length; idx++) {
|
||||
bytes[idx] = (byte) (counter + idx);
|
||||
}
|
||||
return new BytesRef(bytes, 0, bytes.length);
|
||||
return newBytesRef(bytes, 0, bytes.length);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
|
@ -296,14 +296,14 @@ public class TestIndexableField extends LuceneTestCase {
|
|||
final Terms tfv = r.getTermVectors(docID).terms(name);
|
||||
assertNotNull(tfv);
|
||||
TermsEnum termsEnum = tfv.iterator();
|
||||
assertEquals(new BytesRef("" + counter), termsEnum.next());
|
||||
assertEquals(newBytesRef("" + counter), termsEnum.next());
|
||||
assertEquals(1, termsEnum.totalTermFreq());
|
||||
PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(1, dpEnum.freq());
|
||||
assertEquals(1, dpEnum.nextPosition());
|
||||
|
||||
assertEquals(new BytesRef("text"), termsEnum.next());
|
||||
assertEquals(newBytesRef("text"), termsEnum.next());
|
||||
assertEquals(1, termsEnum.totalTermFreq());
|
||||
dpEnum = termsEnum.postings(dpEnum, PostingsEnum.ALL);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
|
|
@ -84,15 +84,15 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
protected void addRandomFields(Document doc) {
|
||||
if (usually()) {
|
||||
doc.add(new NumericDocValuesField("ndv", random().nextInt(1 << 12)));
|
||||
doc.add(new BinaryDocValuesField("bdv", new BytesRef(TestUtil.randomSimpleString(random()))));
|
||||
doc.add(new BinaryDocValuesField("bdv", newBytesRef(TestUtil.randomSimpleString(random()))));
|
||||
doc.add(
|
||||
new SortedDocValuesField("sdv", new BytesRef(TestUtil.randomSimpleString(random(), 2))));
|
||||
new SortedDocValuesField("sdv", newBytesRef(TestUtil.randomSimpleString(random(), 2))));
|
||||
}
|
||||
int numValues = random().nextInt(5);
|
||||
for (int i = 0; i < numValues; ++i) {
|
||||
doc.add(
|
||||
new SortedSetDocValuesField(
|
||||
"ssdv", new BytesRef(TestUtil.randomSimpleString(random(), 2))));
|
||||
"ssdv", newBytesRef(TestUtil.randomSimpleString(random(), 2))));
|
||||
}
|
||||
numValues = random().nextInt(5);
|
||||
for (int i = 0; i < numValues; ++i) {
|
||||
|
@ -221,8 +221,8 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
"longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
|
||||
String text = "This is the text to be indexed. " + longTerm;
|
||||
doc.add(newTextField("fieldname", text, Field.Store.YES));
|
||||
doc.add(new BinaryDocValuesField("dv1", new BytesRef(longTerm)));
|
||||
doc.add(new BinaryDocValuesField("dv2", new BytesRef(text)));
|
||||
doc.add(new BinaryDocValuesField("dv1", newBytesRef(longTerm)));
|
||||
doc.add(new BinaryDocValuesField("dv2", newBytesRef(text)));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.close();
|
||||
|
||||
|
@ -243,11 +243,11 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv1");
|
||||
assertEquals(hitDocID, dv.advance(hitDocID));
|
||||
BytesRef scratch = dv.binaryValue();
|
||||
assertEquals(new BytesRef(longTerm), scratch);
|
||||
assertEquals(newBytesRef(longTerm), scratch);
|
||||
dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv2");
|
||||
assertEquals(hitDocID, dv.advance(hitDocID));
|
||||
scratch = dv.binaryValue();
|
||||
assertEquals(new BytesRef(text), scratch);
|
||||
assertEquals(newBytesRef(text), scratch);
|
||||
}
|
||||
|
||||
ireader.close();
|
||||
|
@ -272,7 +272,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
for (int j = 0; j < value.length; j++) {
|
||||
value[j] = (byte) random().nextInt(vocabRange);
|
||||
}
|
||||
BytesRef bytesRef = new BytesRef(value);
|
||||
BytesRef bytesRef = newBytesRef(value);
|
||||
writtenValues.put(i, bytesRef);
|
||||
doc.add(newTextField("id", Integer.toString(i), Field.Store.YES));
|
||||
doc.add(new BinaryDocValuesField("dv1", bytesRef));
|
||||
|
@ -314,7 +314,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
String text = "This is the text to be indexed. " + longTerm;
|
||||
doc.add(newTextField("fieldname", text, Field.Store.YES));
|
||||
doc.add(new NumericDocValuesField("dv1", 5));
|
||||
doc.add(new BinaryDocValuesField("dv2", new BytesRef("hello world")));
|
||||
doc.add(new BinaryDocValuesField("dv2", newBytesRef("hello world")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.close();
|
||||
|
||||
|
@ -337,7 +337,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(5, dv.longValue());
|
||||
BinaryDocValues dv2 = ireader.leaves().get(0).reader().getBinaryDocValues("dv2");
|
||||
assertEquals(docID, dv2.advance(docID));
|
||||
assertEquals(new BytesRef("hello world"), dv2.binaryValue());
|
||||
assertEquals(newBytesRef("hello world"), dv2.binaryValue());
|
||||
}
|
||||
|
||||
ireader.close();
|
||||
|
@ -352,9 +352,9 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
"longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
|
||||
String text = "This is the text to be indexed. " + longTerm;
|
||||
doc.add(newTextField("fieldname", text, Field.Store.YES));
|
||||
doc.add(new SortedDocValuesField("dv1", new BytesRef("hello hello")));
|
||||
doc.add(new SortedDocValuesField("dv1", newBytesRef("hello hello")));
|
||||
doc.add(new NumericDocValuesField("dv2", 5));
|
||||
doc.add(new BinaryDocValuesField("dv3", new BytesRef("hello world")));
|
||||
doc.add(new BinaryDocValuesField("dv3", newBytesRef("hello world")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.close();
|
||||
|
||||
|
@ -376,13 +376,13 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(docID, dv.advance(docID));
|
||||
int ord = dv.ordValue();
|
||||
BytesRef scratch = dv.lookupOrd(ord);
|
||||
assertEquals(new BytesRef("hello hello"), scratch);
|
||||
assertEquals(newBytesRef("hello hello"), scratch);
|
||||
NumericDocValues dv2 = ireader.leaves().get(0).reader().getNumericDocValues("dv2");
|
||||
assertEquals(docID, dv2.advance(docID));
|
||||
assertEquals(5, dv2.longValue());
|
||||
BinaryDocValues dv3 = ireader.leaves().get(0).reader().getBinaryDocValues("dv3");
|
||||
assertEquals(docID, dv3.advance(docID));
|
||||
assertEquals(new BytesRef("hello world"), dv3.binaryValue());
|
||||
assertEquals(newBytesRef("hello world"), dv3.binaryValue());
|
||||
}
|
||||
|
||||
ireader.close();
|
||||
|
@ -397,8 +397,8 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
"longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
|
||||
String text = "This is the text to be indexed. " + longTerm;
|
||||
doc.add(newTextField("fieldname", text, Field.Store.YES));
|
||||
doc.add(new BinaryDocValuesField("dv1", new BytesRef("hello world")));
|
||||
doc.add(new SortedDocValuesField("dv2", new BytesRef("hello hello")));
|
||||
doc.add(new BinaryDocValuesField("dv1", newBytesRef("hello world")));
|
||||
doc.add(new SortedDocValuesField("dv2", newBytesRef("hello hello")));
|
||||
doc.add(new NumericDocValuesField("dv3", 5));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.close();
|
||||
|
@ -411,7 +411,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
Query query = new TermQuery(new Term("fieldname", "text"));
|
||||
TopDocs hits = isearcher.search(query, 1);
|
||||
assertEquals(1, hits.totalHits.value);
|
||||
BytesRef scratch = new BytesRef();
|
||||
BytesRef scratch = newBytesRef();
|
||||
// Iterate through the results:
|
||||
for (int i = 0; i < hits.scoreDocs.length; i++) {
|
||||
int docID = hits.scoreDocs[i].doc;
|
||||
|
@ -422,13 +422,13 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(docID, dv.advance(docID));
|
||||
int ord = dv.ordValue();
|
||||
scratch = dv.lookupOrd(ord);
|
||||
assertEquals(new BytesRef("hello hello"), scratch);
|
||||
assertEquals(newBytesRef("hello hello"), scratch);
|
||||
NumericDocValues dv2 = ireader.leaves().get(0).reader().getNumericDocValues("dv3");
|
||||
assertEquals(docID, dv2.advance(docID));
|
||||
assertEquals(5, dv2.longValue());
|
||||
BinaryDocValues dv3 = ireader.leaves().get(0).reader().getBinaryDocValues("dv1");
|
||||
assertEquals(docID, dv3.advance(docID));
|
||||
assertEquals(new BytesRef("hello world"), dv3.binaryValue());
|
||||
assertEquals(newBytesRef("hello world"), dv3.binaryValue());
|
||||
}
|
||||
|
||||
ireader.close();
|
||||
|
@ -572,7 +572,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
"longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
|
||||
String text = "This is the text to be indexed. " + longTerm;
|
||||
doc.add(newTextField("fieldname", text, Field.Store.YES));
|
||||
doc.add(new BinaryDocValuesField("dv", new BytesRef("hello world")));
|
||||
doc.add(new BinaryDocValuesField("dv", newBytesRef("hello world")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.close();
|
||||
|
||||
|
@ -592,7 +592,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assert ireader.leaves().size() == 1;
|
||||
BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv");
|
||||
assertEquals(hitDocID, dv.advance(hitDocID));
|
||||
assertEquals(new BytesRef("hello world"), dv.binaryValue());
|
||||
assertEquals(newBytesRef("hello world"), dv.binaryValue());
|
||||
}
|
||||
|
||||
ireader.close();
|
||||
|
@ -608,12 +608,12 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("id", "0", StringField.TYPE_STORED));
|
||||
doc.add(new BinaryDocValuesField("dv", new BytesRef("hello world 1")));
|
||||
doc.add(new BinaryDocValuesField("dv", newBytesRef("hello world 1")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.commit();
|
||||
doc = new Document();
|
||||
doc.add(newField("id", "1", StringField.TYPE_STORED));
|
||||
doc.add(new BinaryDocValuesField("dv", new BytesRef("hello 2")));
|
||||
doc.add(new BinaryDocValuesField("dv", newBytesRef("hello 2")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.forceMerge(1);
|
||||
iwriter.close();
|
||||
|
@ -650,7 +650,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
iwriter.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new StringField("id", "1", Field.Store.NO));
|
||||
doc.add(new BinaryDocValuesField("field", new BytesRef("hi")));
|
||||
doc.add(new BinaryDocValuesField("field", newBytesRef("hi")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.commit();
|
||||
iwriter.deleteDocuments(new Term("id", "1"));
|
||||
|
@ -677,7 +677,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
"longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
|
||||
String text = "This is the text to be indexed. " + longTerm;
|
||||
doc.add(newTextField("fieldname", text, Field.Store.YES));
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("hello world")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("hello world")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.close();
|
||||
|
||||
|
@ -689,7 +689,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
Query query = new TermQuery(new Term("fieldname", "text"));
|
||||
TopDocs hits = isearcher.search(query, 1);
|
||||
assertEquals(1, hits.totalHits.value);
|
||||
BytesRef scratch = new BytesRef();
|
||||
BytesRef scratch = newBytesRef();
|
||||
// Iterate through the results:
|
||||
for (int i = 0; i < hits.scoreDocs.length; i++) {
|
||||
int docID = hits.scoreDocs[i].doc;
|
||||
|
@ -699,7 +699,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
SortedDocValues dv = ireader.leaves().get(0).reader().getSortedDocValues("dv");
|
||||
assertEquals(docID, dv.advance(docID));
|
||||
scratch = dv.lookupOrd(dv.ordValue());
|
||||
assertEquals(new BytesRef("hello world"), scratch);
|
||||
assertEquals(newBytesRef("hello world"), scratch);
|
||||
}
|
||||
|
||||
ireader.close();
|
||||
|
@ -714,10 +714,10 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
conf.setMergePolicy(newLogMergePolicy());
|
||||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("hello world 1")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("hello world 1")));
|
||||
iwriter.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("hello world 2")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("hello world 2")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.forceMerge(1);
|
||||
iwriter.close();
|
||||
|
@ -726,7 +726,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
IndexReader ireader = DirectoryReader.open(directory); // read-only=true
|
||||
assert ireader.leaves().size() == 1;
|
||||
SortedDocValues dv = ireader.leaves().get(0).reader().getSortedDocValues("dv");
|
||||
BytesRef scratch = new BytesRef();
|
||||
BytesRef scratch = newBytesRef();
|
||||
assertEquals(0, dv.nextDoc());
|
||||
scratch = dv.lookupOrd(dv.ordValue());
|
||||
assertEquals("hello world 1", scratch.utf8ToString());
|
||||
|
@ -746,13 +746,13 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
conf.setMergePolicy(newLogMergePolicy());
|
||||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("hello world 1")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("hello world 1")));
|
||||
iwriter.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("hello world 2")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("hello world 2")));
|
||||
iwriter.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("hello world 1")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("hello world 1")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.forceMerge(1);
|
||||
iwriter.close();
|
||||
|
@ -786,12 +786,12 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("id", "0", StringField.TYPE_STORED));
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("hello world 1")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("hello world 1")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.commit();
|
||||
doc = new Document();
|
||||
doc.add(newField("id", "1", StringField.TYPE_STORED));
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("hello world 2")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("hello world 2")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.forceMerge(1);
|
||||
iwriter.close();
|
||||
|
@ -803,9 +803,9 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(2, dv.getValueCount()); // 2 ords
|
||||
assertEquals(0, dv.nextDoc());
|
||||
BytesRef scratch = dv.lookupOrd(dv.ordValue());
|
||||
assertEquals(new BytesRef("hello world 1"), scratch);
|
||||
assertEquals(newBytesRef("hello world 1"), scratch);
|
||||
scratch = dv.lookupOrd(1);
|
||||
assertEquals(new BytesRef("hello world 2"), scratch);
|
||||
assertEquals(newBytesRef("hello world 2"), scratch);
|
||||
for (int i = 0; i < 2; i++) {
|
||||
Document doc2 = ireader.leaves().get(0).reader().document(i);
|
||||
String expected;
|
||||
|
@ -837,7 +837,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
iwriter.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new StringField("id", "1", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedDocValuesField("field", newBytesRef("hello")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.commit();
|
||||
iwriter.deleteDocuments(new Term("id", "1"));
|
||||
|
@ -861,7 +861,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
conf.setMergePolicy(newLogMergePolicy());
|
||||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new BinaryDocValuesField("dv", new BytesRef("hello\nworld\r1")));
|
||||
doc.add(new BinaryDocValuesField("dv", newBytesRef("hello\nworld\r1")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.close();
|
||||
|
||||
|
@ -870,7 +870,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assert ireader.leaves().size() == 1;
|
||||
BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv");
|
||||
assertEquals(0, dv.nextDoc());
|
||||
assertEquals(new BytesRef("hello\nworld\r1"), dv.binaryValue());
|
||||
assertEquals(newBytesRef("hello\nworld\r1"), dv.binaryValue());
|
||||
|
||||
ireader.close();
|
||||
directory.close();
|
||||
|
@ -884,7 +884,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
conf.setMergePolicy(newLogMergePolicy());
|
||||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("hello world 2")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("hello world 2")));
|
||||
iwriter.addDocument(doc);
|
||||
// 2nd doc missing the DV field
|
||||
iwriter.addDocument(new Document());
|
||||
|
@ -896,7 +896,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
SortedDocValues dv = ireader.leaves().get(0).reader().getSortedDocValues("dv");
|
||||
assertEquals(0, dv.nextDoc());
|
||||
BytesRef scratch = dv.lookupOrd(dv.ordValue());
|
||||
assertEquals(new BytesRef("hello world 2"), scratch);
|
||||
assertEquals(newBytesRef("hello world 2"), scratch);
|
||||
assertEquals(NO_MORE_DOCS, dv.nextDoc());
|
||||
ireader.close();
|
||||
directory.close();
|
||||
|
@ -910,15 +910,15 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedDocValuesField("field", newBytesRef("hello")));
|
||||
iwriter.addDocument(doc);
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("field", new BytesRef("world")));
|
||||
doc.add(new SortedDocValuesField("field", newBytesRef("world")));
|
||||
iwriter.addDocument(doc);
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("field", new BytesRef("beer")));
|
||||
doc.add(new SortedDocValuesField("field", newBytesRef("beer")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.forceMerge(1);
|
||||
|
||||
|
@ -939,27 +939,27 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(2, termsEnum.ord());
|
||||
|
||||
// seekCeil()
|
||||
assertEquals(SeekStatus.NOT_FOUND, termsEnum.seekCeil(new BytesRef("ha!")));
|
||||
assertEquals(SeekStatus.NOT_FOUND, termsEnum.seekCeil(newBytesRef("ha!")));
|
||||
assertEquals("hello", termsEnum.term().utf8ToString());
|
||||
assertEquals(1, termsEnum.ord());
|
||||
assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef("beer")));
|
||||
assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(newBytesRef("beer")));
|
||||
assertEquals("beer", termsEnum.term().utf8ToString());
|
||||
assertEquals(0, termsEnum.ord());
|
||||
assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("zzz")));
|
||||
assertEquals(SeekStatus.NOT_FOUND, termsEnum.seekCeil(new BytesRef("aba")));
|
||||
assertEquals(SeekStatus.END, termsEnum.seekCeil(newBytesRef("zzz")));
|
||||
assertEquals(SeekStatus.NOT_FOUND, termsEnum.seekCeil(newBytesRef("aba")));
|
||||
assertEquals(0, termsEnum.ord());
|
||||
|
||||
// seekExact()
|
||||
assertTrue(termsEnum.seekExact(new BytesRef("beer")));
|
||||
assertTrue(termsEnum.seekExact(newBytesRef("beer")));
|
||||
assertEquals("beer", termsEnum.term().utf8ToString());
|
||||
assertEquals(0, termsEnum.ord());
|
||||
assertTrue(termsEnum.seekExact(new BytesRef("hello")));
|
||||
assertTrue(termsEnum.seekExact(newBytesRef("hello")));
|
||||
assertEquals(Codec.getDefault().toString(), "hello", termsEnum.term().utf8ToString());
|
||||
assertEquals(1, termsEnum.ord());
|
||||
assertTrue(termsEnum.seekExact(new BytesRef("world")));
|
||||
assertTrue(termsEnum.seekExact(newBytesRef("world")));
|
||||
assertEquals("world", termsEnum.term().utf8ToString());
|
||||
assertEquals(2, termsEnum.ord());
|
||||
assertFalse(termsEnum.seekExact(new BytesRef("bogus")));
|
||||
assertFalse(termsEnum.seekExact(newBytesRef("bogus")));
|
||||
|
||||
// seek(ord)
|
||||
termsEnum.seekExact(0);
|
||||
|
@ -998,10 +998,10 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
conf.setMergePolicy(newLogMergePolicy());
|
||||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("")));
|
||||
iwriter.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.forceMerge(1);
|
||||
iwriter.close();
|
||||
|
@ -1029,10 +1029,10 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
conf.setMergePolicy(newLogMergePolicy());
|
||||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new BinaryDocValuesField("dv", new BytesRef("")));
|
||||
doc.add(new BinaryDocValuesField("dv", newBytesRef("")));
|
||||
iwriter.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new BinaryDocValuesField("dv", new BytesRef("")));
|
||||
doc.add(new BinaryDocValuesField("dv", newBytesRef("")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.forceMerge(1);
|
||||
iwriter.close();
|
||||
|
@ -1059,8 +1059,8 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
|
||||
Document doc = new Document();
|
||||
byte bytes[] = new byte[32766];
|
||||
BytesRef b = new BytesRef(bytes);
|
||||
random().nextBytes(bytes);
|
||||
BytesRef b = newBytesRef(bytes);
|
||||
doc.add(new BinaryDocValuesField("dv", b));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.close();
|
||||
|
@ -1070,7 +1070,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assert ireader.leaves().size() == 1;
|
||||
BinaryDocValues dv = ireader.leaves().get(0).reader().getBinaryDocValues("dv");
|
||||
assertEquals(0, dv.nextDoc());
|
||||
assertEquals(new BytesRef(bytes), dv.binaryValue());
|
||||
assertEquals(newBytesRef(bytes), dv.binaryValue());
|
||||
|
||||
ireader.close();
|
||||
directory.close();
|
||||
|
@ -1085,8 +1085,8 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
|
||||
Document doc = new Document();
|
||||
byte bytes[] = new byte[32766];
|
||||
BytesRef b = new BytesRef(bytes);
|
||||
random().nextBytes(bytes);
|
||||
BytesRef b = newBytesRef(bytes);
|
||||
doc.add(new SortedDocValuesField("dv", b));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.close();
|
||||
|
@ -1096,7 +1096,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assert ireader.leaves().size() == 1;
|
||||
SortedDocValues dv = DocValues.getSorted(ireader.leaves().get(0).reader(), "dv");
|
||||
assertEquals(0, dv.nextDoc());
|
||||
assertEquals(new BytesRef(bytes), dv.lookupOrd(dv.ordValue()));
|
||||
assertEquals(newBytesRef(bytes), dv.lookupOrd(dv.ordValue()));
|
||||
ireader.close();
|
||||
directory.close();
|
||||
}
|
||||
|
@ -1109,7 +1109,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
conf.setMergePolicy(newLogMergePolicy());
|
||||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new BinaryDocValuesField("dv", new BytesRef("boo!")));
|
||||
doc.add(new BinaryDocValuesField("dv", newBytesRef("boo!")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.close();
|
||||
|
||||
|
@ -1132,7 +1132,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
conf.setMergePolicy(newLogMergePolicy());
|
||||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef("boo!")));
|
||||
doc.add(new SortedDocValuesField("dv", newBytesRef("boo!")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.close();
|
||||
|
||||
|
@ -1206,7 +1206,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
Document doc = new Document();
|
||||
doc.add(newTextField("id", "" + i, Field.Store.YES));
|
||||
String string = TestUtil.randomRealisticUnicodeString(random(), 1, maxLength);
|
||||
BytesRef br = new BytesRef(string);
|
||||
BytesRef br = newBytesRef(string);
|
||||
doc.add(new SortedDocValuesField("field", br));
|
||||
hash.add(br);
|
||||
docToString.put("" + i, string);
|
||||
|
@ -1229,7 +1229,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
String id = "" + i + numDocs;
|
||||
doc.add(newTextField("id", id, Field.Store.YES));
|
||||
String string = TestUtil.randomRealisticUnicodeString(random(), 1, maxLength);
|
||||
BytesRef br = new BytesRef(string);
|
||||
BytesRef br = newBytesRef(string);
|
||||
hash.add(br);
|
||||
docToString.put(id, string);
|
||||
doc.add(new SortedDocValuesField("field", br));
|
||||
|
@ -1239,7 +1239,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
IndexReader reader = w.getReader();
|
||||
SortedDocValues docValues = MultiDocValues.getSortedValues(reader, "field");
|
||||
int[] sort = hash.sort();
|
||||
BytesRef expected = new BytesRef();
|
||||
BytesRef expected = newBytesRef();
|
||||
assertEquals(hash.size(), docValues.getValueCount());
|
||||
for (int i = 0; i < hash.size(); i++) {
|
||||
hash.get(sort[i], expected);
|
||||
|
@ -1253,9 +1253,9 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
for (Entry<String, String> entry : entrySet) {
|
||||
// pk lookup
|
||||
PostingsEnum termPostingsEnum =
|
||||
TestUtil.docs(random(), reader, "id", new BytesRef(entry.getKey()), null, 0);
|
||||
TestUtil.docs(random(), reader, "id", newBytesRef(entry.getKey()), null, 0);
|
||||
int docId = termPostingsEnum.nextDoc();
|
||||
expected = new BytesRef(entry.getValue());
|
||||
expected = newBytesRef(entry.getValue());
|
||||
docValues = MultiDocValues.getSortedValues(reader, "field");
|
||||
assertEquals(docId, docValues.advance(docId));
|
||||
final BytesRef actual = docValues.lookupOrd(docValues.ordValue());
|
||||
|
@ -1497,7 +1497,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
Document doc = new Document();
|
||||
Field idField = new StringField("id", "", Field.Store.NO);
|
||||
Field storedField = new StoredField("stored", new byte[0]);
|
||||
Field dvField = new BinaryDocValuesField("dv", new BytesRef());
|
||||
Field dvField = new BinaryDocValuesField("dv", newBytesRef());
|
||||
doc.add(idField);
|
||||
doc.add(storedField);
|
||||
doc.add(dvField);
|
||||
|
@ -1624,7 +1624,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
Document doc = new Document();
|
||||
Field idField = new StringField("id", "", Field.Store.NO);
|
||||
Field storedField = new StoredField("stored", new byte[0]);
|
||||
Field dvField = new SortedDocValuesField("dv", new BytesRef());
|
||||
Field dvField = new SortedDocValuesField("dv", newBytesRef());
|
||||
doc.add(idField);
|
||||
doc.add(storedField);
|
||||
doc.add(dvField);
|
||||
|
@ -1746,7 +1746,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("hello")));
|
||||
iwriter.addDocument(doc);
|
||||
|
||||
DirectoryReader ireader = iwriter.getReader();
|
||||
|
@ -1758,7 +1758,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(NO_MORE_ORDS, dv.nextOrd());
|
||||
|
||||
BytesRef bytes = dv.lookupOrd(0);
|
||||
assertEquals(new BytesRef("hello"), bytes);
|
||||
assertEquals(newBytesRef("hello"), bytes);
|
||||
|
||||
ireader.close();
|
||||
directory.close();
|
||||
|
@ -1769,8 +1769,8 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field2", new BytesRef("world")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field2", newBytesRef("world")));
|
||||
iwriter.addDocument(doc);
|
||||
|
||||
DirectoryReader ireader = iwriter.getReader();
|
||||
|
@ -1783,7 +1783,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(NO_MORE_ORDS, dv.nextOrd());
|
||||
|
||||
BytesRef bytes = dv.lookupOrd(0);
|
||||
assertEquals(new BytesRef("hello"), bytes);
|
||||
assertEquals(newBytesRef("hello"), bytes);
|
||||
|
||||
dv = getOnlyLeafReader(ireader).getSortedSetDocValues("field2");
|
||||
|
||||
|
@ -1792,7 +1792,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(NO_MORE_ORDS, dv.nextOrd());
|
||||
|
||||
bytes = dv.lookupOrd(0);
|
||||
assertEquals(new BytesRef("world"), bytes);
|
||||
assertEquals(newBytesRef("world"), bytes);
|
||||
|
||||
ireader.close();
|
||||
directory.close();
|
||||
|
@ -1806,12 +1806,12 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("hello")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.commit();
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("world")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("world")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.forceMerge(1);
|
||||
|
||||
|
@ -1826,14 +1826,14 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(NO_MORE_ORDS, dv.nextOrd());
|
||||
|
||||
BytesRef bytes = dv.lookupOrd(0);
|
||||
assertEquals(new BytesRef("hello"), bytes);
|
||||
assertEquals(newBytesRef("hello"), bytes);
|
||||
|
||||
assertEquals(1, dv.nextDoc());
|
||||
assertEquals(1, dv.nextOrd());
|
||||
assertEquals(NO_MORE_ORDS, dv.nextOrd());
|
||||
|
||||
bytes = dv.lookupOrd(1);
|
||||
assertEquals(new BytesRef("world"), bytes);
|
||||
assertEquals(newBytesRef("world"), bytes);
|
||||
|
||||
ireader.close();
|
||||
directory.close();
|
||||
|
@ -1844,8 +1844,8 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("world")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("world")));
|
||||
iwriter.addDocument(doc);
|
||||
|
||||
DirectoryReader ireader = iwriter.getReader();
|
||||
|
@ -1859,10 +1859,10 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(NO_MORE_ORDS, dv.nextOrd());
|
||||
|
||||
BytesRef bytes = dv.lookupOrd(0);
|
||||
assertEquals(new BytesRef("hello"), bytes);
|
||||
assertEquals(newBytesRef("hello"), bytes);
|
||||
|
||||
bytes = dv.lookupOrd(1);
|
||||
assertEquals(new BytesRef("world"), bytes);
|
||||
assertEquals(newBytesRef("world"), bytes);
|
||||
|
||||
ireader.close();
|
||||
directory.close();
|
||||
|
@ -1873,8 +1873,8 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("world")));
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("world")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("hello")));
|
||||
iwriter.addDocument(doc);
|
||||
|
||||
DirectoryReader ireader = iwriter.getReader();
|
||||
|
@ -1888,10 +1888,10 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(NO_MORE_ORDS, dv.nextOrd());
|
||||
|
||||
BytesRef bytes = dv.lookupOrd(0);
|
||||
assertEquals(new BytesRef("hello"), bytes);
|
||||
assertEquals(newBytesRef("hello"), bytes);
|
||||
|
||||
bytes = dv.lookupOrd(1);
|
||||
assertEquals(new BytesRef("world"), bytes);
|
||||
assertEquals(newBytesRef("world"), bytes);
|
||||
|
||||
ireader.close();
|
||||
directory.close();
|
||||
|
@ -1905,14 +1905,14 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("world")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("world")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.commit();
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("beer")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("beer")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.forceMerge(1);
|
||||
|
||||
|
@ -1933,13 +1933,13 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(NO_MORE_ORDS, dv.nextOrd());
|
||||
|
||||
BytesRef bytes = dv.lookupOrd(0);
|
||||
assertEquals(new BytesRef("beer"), bytes);
|
||||
assertEquals(newBytesRef("beer"), bytes);
|
||||
|
||||
bytes = dv.lookupOrd(1);
|
||||
assertEquals(new BytesRef("hello"), bytes);
|
||||
assertEquals(newBytesRef("hello"), bytes);
|
||||
|
||||
bytes = dv.lookupOrd(2);
|
||||
assertEquals(new BytesRef("world"), bytes);
|
||||
assertEquals(newBytesRef("world"), bytes);
|
||||
|
||||
ireader.close();
|
||||
directory.close();
|
||||
|
@ -1953,7 +1953,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("hello")));
|
||||
iwriter.addDocument(doc);
|
||||
|
||||
doc = new Document();
|
||||
|
@ -1970,7 +1970,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(NO_MORE_ORDS, dv.nextOrd());
|
||||
|
||||
BytesRef bytes = dv.lookupOrd(0);
|
||||
assertEquals(new BytesRef("hello"), bytes);
|
||||
assertEquals(newBytesRef("hello"), bytes);
|
||||
|
||||
ireader.close();
|
||||
directory.close();
|
||||
|
@ -1984,7 +1984,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("hello")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.commit();
|
||||
|
||||
|
@ -2003,7 +2003,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(NO_MORE_ORDS, dv.nextOrd());
|
||||
|
||||
BytesRef bytes = dv.lookupOrd(0);
|
||||
assertEquals(new BytesRef("hello"), bytes);
|
||||
assertEquals(newBytesRef("hello"), bytes);
|
||||
|
||||
ireader.close();
|
||||
directory.close();
|
||||
|
@ -2020,7 +2020,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
iwriter.addDocument(doc);
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("hello")));
|
||||
iwriter.addDocument(doc);
|
||||
|
||||
iwriter.forceMerge(1);
|
||||
|
@ -2035,7 +2035,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(NO_MORE_ORDS, dv.nextOrd());
|
||||
|
||||
BytesRef bytes = dv.lookupOrd(0);
|
||||
assertEquals(new BytesRef("hello"), bytes);
|
||||
assertEquals(newBytesRef("hello"), bytes);
|
||||
|
||||
ireader.close();
|
||||
directory.close();
|
||||
|
@ -2053,7 +2053,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
iwriter.commit();
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("hello")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.forceMerge(1);
|
||||
|
||||
|
@ -2068,7 +2068,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(NO_MORE_ORDS, dv.nextOrd());
|
||||
|
||||
BytesRef bytes = dv.lookupOrd(0);
|
||||
assertEquals(new BytesRef("hello"), bytes);
|
||||
assertEquals(newBytesRef("hello"), bytes);
|
||||
|
||||
ireader.close();
|
||||
directory.close();
|
||||
|
@ -2086,7 +2086,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
iwriter.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new StringField("id", "1", Field.Store.NO));
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("hello")));
|
||||
iwriter.addDocument(doc);
|
||||
iwriter.commit();
|
||||
iwriter.deleteDocuments(new Term("id", "1"));
|
||||
|
@ -2110,9 +2110,9 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("world")));
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("beer")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("world")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("beer")));
|
||||
iwriter.addDocument(doc);
|
||||
|
||||
DirectoryReader ireader = iwriter.getReader();
|
||||
|
@ -2132,25 +2132,25 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
assertEquals(2, termsEnum.ord());
|
||||
|
||||
// seekCeil()
|
||||
assertEquals(SeekStatus.NOT_FOUND, termsEnum.seekCeil(new BytesRef("ha!")));
|
||||
assertEquals(SeekStatus.NOT_FOUND, termsEnum.seekCeil(newBytesRef("ha!")));
|
||||
assertEquals("hello", termsEnum.term().utf8ToString());
|
||||
assertEquals(1, termsEnum.ord());
|
||||
assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef("beer")));
|
||||
assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(newBytesRef("beer")));
|
||||
assertEquals("beer", termsEnum.term().utf8ToString());
|
||||
assertEquals(0, termsEnum.ord());
|
||||
assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("zzz")));
|
||||
assertEquals(SeekStatus.END, termsEnum.seekCeil(newBytesRef("zzz")));
|
||||
|
||||
// seekExact()
|
||||
assertTrue(termsEnum.seekExact(new BytesRef("beer")));
|
||||
assertTrue(termsEnum.seekExact(newBytesRef("beer")));
|
||||
assertEquals("beer", termsEnum.term().utf8ToString());
|
||||
assertEquals(0, termsEnum.ord());
|
||||
assertTrue(termsEnum.seekExact(new BytesRef("hello")));
|
||||
assertTrue(termsEnum.seekExact(newBytesRef("hello")));
|
||||
assertEquals("hello", termsEnum.term().utf8ToString());
|
||||
assertEquals(1, termsEnum.ord());
|
||||
assertTrue(termsEnum.seekExact(new BytesRef("world")));
|
||||
assertTrue(termsEnum.seekExact(newBytesRef("world")));
|
||||
assertEquals("world", termsEnum.term().utf8ToString());
|
||||
assertEquals(2, termsEnum.ord());
|
||||
assertFalse(termsEnum.seekExact(new BytesRef("bogus")));
|
||||
assertFalse(termsEnum.seekExact(newBytesRef("bogus")));
|
||||
|
||||
// seek(ord)
|
||||
termsEnum.seekExact(0);
|
||||
|
@ -2219,7 +2219,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
ArrayList<String> unordered = new ArrayList<>(values);
|
||||
Collections.shuffle(unordered, random());
|
||||
for (String v : unordered) {
|
||||
doc.add(new SortedSetDocValuesField("dv", new BytesRef(v)));
|
||||
doc.add(new SortedSetDocValuesField("dv", newBytesRef(v)));
|
||||
}
|
||||
|
||||
writer.addDocument(doc);
|
||||
|
@ -2540,7 +2540,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("id", "0", Field.Store.YES));
|
||||
doc.add(new BinaryDocValuesField("dv1", new BytesRef()));
|
||||
doc.add(new BinaryDocValuesField("dv1", newBytesRef()));
|
||||
iw.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new StringField("id", "1", Field.Store.YES));
|
||||
|
@ -2553,7 +2553,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
LeafReader ar = ir.leaves().get(0).reader();
|
||||
BinaryDocValues dv = ar.getBinaryDocValues("dv1");
|
||||
assertEquals(0, dv.nextDoc());
|
||||
assertEquals(new BytesRef(), dv.binaryValue());
|
||||
assertEquals(newBytesRef(), dv.binaryValue());
|
||||
assertEquals(NO_MORE_DOCS, dv.nextDoc());
|
||||
ir.close();
|
||||
directory.close();
|
||||
|
@ -2566,7 +2566,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("id", "0", Field.Store.YES));
|
||||
doc.add(new BinaryDocValuesField("dv1", new BytesRef()));
|
||||
doc.add(new BinaryDocValuesField("dv1", newBytesRef()));
|
||||
iw.addDocument(doc);
|
||||
iw.commit();
|
||||
doc = new Document();
|
||||
|
@ -2580,7 +2580,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
LeafReader ar = ir.leaves().get(0).reader();
|
||||
BinaryDocValues dv = ar.getBinaryDocValues("dv1");
|
||||
assertEquals(0, dv.nextDoc());
|
||||
assertEquals(new BytesRef(), dv.binaryValue());
|
||||
assertEquals(newBytesRef(), dv.binaryValue());
|
||||
assertEquals(NO_MORE_DOCS, dv.nextDoc());
|
||||
ir.close();
|
||||
directory.close();
|
||||
|
@ -2593,7 +2593,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf);
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("id", "0", Field.Store.YES));
|
||||
doc.add(new BinaryDocValuesField("dv1", new BytesRef()));
|
||||
doc.add(new BinaryDocValuesField("dv1", newBytesRef()));
|
||||
iw.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new StringField("id", "1", Field.Store.YES));
|
||||
|
@ -2601,7 +2601,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
iw.commit();
|
||||
doc = new Document();
|
||||
doc.add(new StringField("id", "2", Field.Store.YES));
|
||||
doc.add(new BinaryDocValuesField("dv1", new BytesRef("boo")));
|
||||
doc.add(new BinaryDocValuesField("dv1", newBytesRef("boo")));
|
||||
iw.addDocument(doc);
|
||||
iw.forceMerge(1);
|
||||
iw.close();
|
||||
|
@ -2611,9 +2611,9 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
LeafReader ar = ir.leaves().get(0).reader();
|
||||
BinaryDocValues dv = ar.getBinaryDocValues("dv1");
|
||||
assertEquals(0, dv.nextDoc());
|
||||
assertEquals(new BytesRef(), dv.binaryValue());
|
||||
assertEquals(newBytesRef(), dv.binaryValue());
|
||||
assertEquals(2, dv.nextDoc());
|
||||
assertEquals(new BytesRef("boo"), dv.binaryValue());
|
||||
assertEquals(newBytesRef("boo"), dv.binaryValue());
|
||||
assertEquals(NO_MORE_DOCS, dv.nextDoc());
|
||||
ir.close();
|
||||
directory.close();
|
||||
|
@ -2627,8 +2627,8 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
Document doc = new Document();
|
||||
Field idField = new StringField("id", "", Field.Store.NO);
|
||||
Field storedBinField = new StoredField("storedBin", new byte[0]);
|
||||
Field dvBinField = new BinaryDocValuesField("dvBin", new BytesRef());
|
||||
Field dvSortedField = new SortedDocValuesField("dvSorted", new BytesRef());
|
||||
Field dvBinField = new BinaryDocValuesField("dvBin", newBytesRef());
|
||||
Field dvSortedField = new SortedDocValuesField("dvSorted", newBytesRef());
|
||||
Field storedNumericField = new StoredField("storedNum", "");
|
||||
Field dvNumericField = new NumericDocValuesField("dvNum", 0);
|
||||
doc.add(idField);
|
||||
|
@ -2720,8 +2720,8 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
|
||||
Field idField = new StringField("id", "", Field.Store.NO);
|
||||
Field storedBinField = new StoredField("storedBin", new byte[0]);
|
||||
Field dvBinField = new BinaryDocValuesField("dvBin", new BytesRef());
|
||||
Field dvSortedField = new SortedDocValuesField("dvSorted", new BytesRef());
|
||||
Field dvBinField = new BinaryDocValuesField("dvBin", newBytesRef());
|
||||
Field dvSortedField = new SortedDocValuesField("dvSorted", newBytesRef());
|
||||
Field storedNumericField = new StoredField("storedNum", "");
|
||||
Field dvNumericField = new NumericDocValuesField("dvNum", 0);
|
||||
|
||||
|
@ -2755,7 +2755,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
values.add(TestUtil.randomSimpleString(random()));
|
||||
}
|
||||
for (String v : values) {
|
||||
doc.add(new SortedSetDocValuesField("dvSortedSet", new BytesRef(v)));
|
||||
doc.add(new SortedSetDocValuesField("dvSortedSet", newBytesRef(v)));
|
||||
doc.add(new StoredField("storedSortedSet", v));
|
||||
}
|
||||
int numSortedNumericFields = random().nextInt(3);
|
||||
|
@ -2881,15 +2881,15 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
for (int j = 0; j < numSortedSets; j++) {
|
||||
doc.add(
|
||||
new SortedSetDocValuesField(
|
||||
"ss" + j, new BytesRef(TestUtil.randomSimpleString(random()))));
|
||||
"ss" + j, newBytesRef(TestUtil.randomSimpleString(random()))));
|
||||
doc.add(
|
||||
new SortedSetDocValuesField(
|
||||
"ss" + j, new BytesRef(TestUtil.randomSimpleString(random()))));
|
||||
"ss" + j, newBytesRef(TestUtil.randomSimpleString(random()))));
|
||||
}
|
||||
|
||||
for (int j = 0; j < numBinaries; j++) {
|
||||
doc.add(
|
||||
new BinaryDocValuesField("b" + j, new BytesRef(TestUtil.randomSimpleString(random()))));
|
||||
new BinaryDocValuesField("b" + j, newBytesRef(TestUtil.randomSimpleString(random()))));
|
||||
}
|
||||
|
||||
for (int j = 0; j < numSortedNums; j++) {
|
||||
|
@ -2954,9 +2954,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
}
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir);
|
||||
BytesRef bytes = new BytesRef();
|
||||
bytes.bytes = new byte[1 << i];
|
||||
bytes.length = 1 << i;
|
||||
BytesRef bytes = newBytesRef(new byte[1 << i], 0, 1 << i);
|
||||
for (int j = 0; j < 4; j++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new BinaryDocValuesField("field", bytes));
|
||||
|
@ -2964,7 +2962,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
}
|
||||
Document doc = new Document();
|
||||
doc.add(new StoredField("id", "5"));
|
||||
doc.add(new BinaryDocValuesField("field", new BytesRef()));
|
||||
doc.add(new BinaryDocValuesField("field", newBytesRef()));
|
||||
w.addDocument(doc);
|
||||
IndexReader r = w.getReader();
|
||||
w.close();
|
||||
|
@ -3184,12 +3182,12 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
|
||||
|
||||
Document doc = new Document();
|
||||
SortedDocValuesField field = new SortedDocValuesField("field", new BytesRef("2"));
|
||||
SortedDocValuesField field = new SortedDocValuesField("field", newBytesRef("2"));
|
||||
doc.add(field);
|
||||
iwriter.addDocument(doc);
|
||||
field.setBytesValue(new BytesRef("1"));
|
||||
field.setBytesValue(newBytesRef("1"));
|
||||
iwriter.addDocument(doc);
|
||||
field.setBytesValue(new BytesRef("3"));
|
||||
field.setBytesValue(newBytesRef("3"));
|
||||
iwriter.addDocument(doc);
|
||||
|
||||
iwriter.commit();
|
||||
|
@ -3213,14 +3211,14 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig);
|
||||
|
||||
Document doc = new Document();
|
||||
SortedSetDocValuesField field1 = new SortedSetDocValuesField("field", new BytesRef("2"));
|
||||
SortedSetDocValuesField field2 = new SortedSetDocValuesField("field", new BytesRef("3"));
|
||||
SortedSetDocValuesField field1 = new SortedSetDocValuesField("field", newBytesRef("2"));
|
||||
SortedSetDocValuesField field2 = new SortedSetDocValuesField("field", newBytesRef("3"));
|
||||
doc.add(field1);
|
||||
doc.add(field2);
|
||||
iwriter.addDocument(doc);
|
||||
field1.setBytesValue(new BytesRef("1"));
|
||||
field1.setBytesValue(newBytesRef("1"));
|
||||
iwriter.addDocument(doc);
|
||||
field2.setBytesValue(new BytesRef("2"));
|
||||
field2.setBytesValue(newBytesRef("2"));
|
||||
iwriter.addDocument(doc);
|
||||
|
||||
iwriter.commit();
|
||||
|
@ -3322,7 +3320,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("id", "1", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedDocValuesField("field", newBytesRef("hello")));
|
||||
iwriter.addDocument(doc);
|
||||
final int numEmptyDocs = atLeast(1024);
|
||||
for (int i = 0; i < numEmptyDocs; ++i) {
|
||||
|
@ -3352,7 +3350,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("id", "1", Field.Store.NO));
|
||||
doc.add(new SortedSetDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new SortedSetDocValuesField("field", newBytesRef("hello")));
|
||||
iwriter.addDocument(doc);
|
||||
final int numEmptyDocs = atLeast(1024);
|
||||
for (int i = 0; i < numEmptyDocs; ++i) {
|
||||
|
@ -3443,7 +3441,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("id", "1", Field.Store.NO));
|
||||
doc.add(new BinaryDocValuesField("field", new BytesRef("hello")));
|
||||
doc.add(new BinaryDocValuesField("field", newBytesRef("hello")));
|
||||
iwriter.addDocument(doc);
|
||||
final int numEmptyDocs = atLeast(1024);
|
||||
for (int i = 0; i < numEmptyDocs; ++i) {
|
||||
|
@ -3491,7 +3489,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
|
|||
public Field next() {
|
||||
byte[] bytes = new byte[random().nextInt(10)];
|
||||
random().nextBytes(bytes);
|
||||
return new BinaryDocValuesField("field", new BytesRef(bytes));
|
||||
return new BinaryDocValuesField("field", newBytesRef(bytes));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -279,7 +279,8 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
final BytesRef payload = new BytesRef(len);
|
||||
random().nextBytes(payload.bytes);
|
||||
payload.length = len;
|
||||
return payload;
|
||||
|
||||
return newBytesRef(payload);
|
||||
}
|
||||
|
||||
public boolean hasPayloads() {
|
||||
|
@ -397,7 +398,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
termBytes = new BytesRef[disctinctTerms];
|
||||
for (int i = 0; i < disctinctTerms; ++i) {
|
||||
terms[i] = TestUtil.randomRealisticUnicodeString(random());
|
||||
termBytes[i] = new BytesRef(terms[i]);
|
||||
termBytes[i] = newBytesRef(terms[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -837,7 +838,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
assertNotNull(termsEnum);
|
||||
assertEquals(new BytesRef("bar"), termsEnum.next());
|
||||
assertEquals(newBytesRef("bar"), termsEnum.next());
|
||||
|
||||
// simple use (FREQS)
|
||||
PostingsEnum postings = termsEnum.postings(null);
|
||||
|
@ -920,7 +921,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
assertNotNull(termsEnum);
|
||||
assertEquals(new BytesRef("bar"), termsEnum.next());
|
||||
assertEquals(newBytesRef("bar"), termsEnum.next());
|
||||
|
||||
// simple use (FREQS)
|
||||
PostingsEnum postings = termsEnum.postings(null);
|
||||
|
@ -1102,7 +1103,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
assertNotNull(termsEnum);
|
||||
assertEquals(new BytesRef("bar"), termsEnum.next());
|
||||
assertEquals(newBytesRef("bar"), termsEnum.next());
|
||||
|
||||
// simple usage (FREQS)
|
||||
PostingsEnum postings = termsEnum.postings(null);
|
||||
|
@ -1295,7 +1296,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
assertNotNull(termsEnum);
|
||||
assertEquals(new BytesRef("bar"), termsEnum.next());
|
||||
assertEquals(newBytesRef("bar"), termsEnum.next());
|
||||
|
||||
// simple usage (FREQS)
|
||||
PostingsEnum postings = termsEnum.postings(null);
|
||||
|
@ -1472,9 +1473,9 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
IndexWriter iw = new IndexWriter(dir, iwc);
|
||||
Document doc = new Document();
|
||||
Token token1 = new Token("bar", 0, 3);
|
||||
token1.setPayload(new BytesRef("pay1"));
|
||||
token1.setPayload(newBytesRef("pay1"));
|
||||
Token token2 = new Token("bar", 4, 7);
|
||||
token2.setPayload(new BytesRef("pay2"));
|
||||
token2.setPayload(newBytesRef("pay2"));
|
||||
FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
|
||||
ft.setStoreTermVectors(true);
|
||||
ft.setStoreTermVectorPositions(true);
|
||||
|
@ -1486,7 +1487,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
assertNotNull(termsEnum);
|
||||
assertEquals(new BytesRef("bar"), termsEnum.next());
|
||||
assertEquals(newBytesRef("bar"), termsEnum.next());
|
||||
|
||||
// sugar method (FREQS)
|
||||
PostingsEnum postings = termsEnum.postings(null);
|
||||
|
@ -1532,14 +1533,14 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum.getPayload() == null
|
||||
|| new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload()));
|
||||
|| newBytesRef("pay1").equals(docsAndPositionsEnum.getPayload()));
|
||||
assertEquals(1, docsAndPositionsEnum.nextPosition());
|
||||
assertEquals(-1, docsAndPositionsEnum.startOffset());
|
||||
assertEquals(-1, docsAndPositionsEnum.endOffset());
|
||||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum.getPayload() == null
|
||||
|| new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload()));
|
||||
|| newBytesRef("pay2").equals(docsAndPositionsEnum.getPayload()));
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
|
||||
|
||||
// now reuse the positions
|
||||
|
@ -1554,14 +1555,14 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum2.getPayload() == null
|
||||
|| new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload()));
|
||||
|| newBytesRef("pay1").equals(docsAndPositionsEnum2.getPayload()));
|
||||
assertEquals(1, docsAndPositionsEnum2.nextPosition());
|
||||
assertEquals(-1, docsAndPositionsEnum2.startOffset());
|
||||
assertEquals(-1, docsAndPositionsEnum2.endOffset());
|
||||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum2.getPayload() == null
|
||||
|| new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload()));
|
||||
|| newBytesRef("pay2").equals(docsAndPositionsEnum2.getPayload()));
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
|
||||
|
||||
// payloads
|
||||
|
@ -1573,11 +1574,11 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
assertEquals(0, docsAndPositionsEnum.nextPosition());
|
||||
assertEquals(-1, docsAndPositionsEnum.startOffset());
|
||||
assertEquals(-1, docsAndPositionsEnum.endOffset());
|
||||
assertEquals(new BytesRef("pay1"), docsAndPositionsEnum.getPayload());
|
||||
assertEquals(newBytesRef("pay1"), docsAndPositionsEnum.getPayload());
|
||||
assertEquals(1, docsAndPositionsEnum.nextPosition());
|
||||
assertEquals(-1, docsAndPositionsEnum.startOffset());
|
||||
assertEquals(-1, docsAndPositionsEnum.endOffset());
|
||||
assertEquals(new BytesRef("pay2"), docsAndPositionsEnum.getPayload());
|
||||
assertEquals(newBytesRef("pay2"), docsAndPositionsEnum.getPayload());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
|
||||
// reuse
|
||||
docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.PAYLOADS);
|
||||
|
@ -1587,11 +1588,11 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
assertEquals(0, docsAndPositionsEnum2.nextPosition());
|
||||
assertEquals(-1, docsAndPositionsEnum2.startOffset());
|
||||
assertEquals(-1, docsAndPositionsEnum2.endOffset());
|
||||
assertEquals(new BytesRef("pay1"), docsAndPositionsEnum2.getPayload());
|
||||
assertEquals(newBytesRef("pay1"), docsAndPositionsEnum2.getPayload());
|
||||
assertEquals(1, docsAndPositionsEnum2.nextPosition());
|
||||
assertEquals(-1, docsAndPositionsEnum2.startOffset());
|
||||
assertEquals(-1, docsAndPositionsEnum2.endOffset());
|
||||
assertEquals(new BytesRef("pay2"), docsAndPositionsEnum2.getPayload());
|
||||
assertEquals(newBytesRef("pay2"), docsAndPositionsEnum2.getPayload());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
|
||||
|
||||
docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.OFFSETS);
|
||||
|
@ -1605,14 +1606,14 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum.getPayload() == null
|
||||
|| new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload()));
|
||||
|| newBytesRef("pay1").equals(docsAndPositionsEnum.getPayload()));
|
||||
assertEquals(1, docsAndPositionsEnum.nextPosition());
|
||||
assertEquals(-1, docsAndPositionsEnum.startOffset());
|
||||
assertEquals(-1, docsAndPositionsEnum.endOffset());
|
||||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum.getPayload() == null
|
||||
|| new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload()));
|
||||
|| newBytesRef("pay2").equals(docsAndPositionsEnum.getPayload()));
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
|
||||
// reuse
|
||||
docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.OFFSETS);
|
||||
|
@ -1625,14 +1626,14 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum2.getPayload() == null
|
||||
|| new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload()));
|
||||
|| newBytesRef("pay1").equals(docsAndPositionsEnum2.getPayload()));
|
||||
assertEquals(1, docsAndPositionsEnum2.nextPosition());
|
||||
assertEquals(-1, docsAndPositionsEnum2.startOffset());
|
||||
assertEquals(-1, docsAndPositionsEnum2.endOffset());
|
||||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum2.getPayload() == null
|
||||
|| new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload()));
|
||||
|| newBytesRef("pay2").equals(docsAndPositionsEnum2.getPayload()));
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
|
||||
|
||||
docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.ALL);
|
||||
|
@ -1643,11 +1644,11 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
assertEquals(0, docsAndPositionsEnum.nextPosition());
|
||||
assertEquals(-1, docsAndPositionsEnum.startOffset());
|
||||
assertEquals(-1, docsAndPositionsEnum.endOffset());
|
||||
assertEquals(new BytesRef("pay1"), docsAndPositionsEnum.getPayload());
|
||||
assertEquals(newBytesRef("pay1"), docsAndPositionsEnum.getPayload());
|
||||
assertEquals(1, docsAndPositionsEnum.nextPosition());
|
||||
assertEquals(-1, docsAndPositionsEnum.startOffset());
|
||||
assertEquals(-1, docsAndPositionsEnum.endOffset());
|
||||
assertEquals(new BytesRef("pay2"), docsAndPositionsEnum.getPayload());
|
||||
assertEquals(newBytesRef("pay2"), docsAndPositionsEnum.getPayload());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
|
||||
docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.ALL);
|
||||
assertEquals(-1, docsAndPositionsEnum2.docID());
|
||||
|
@ -1656,11 +1657,11 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
assertEquals(0, docsAndPositionsEnum2.nextPosition());
|
||||
assertEquals(-1, docsAndPositionsEnum2.startOffset());
|
||||
assertEquals(-1, docsAndPositionsEnum2.endOffset());
|
||||
assertEquals(new BytesRef("pay1"), docsAndPositionsEnum2.getPayload());
|
||||
assertEquals(newBytesRef("pay1"), docsAndPositionsEnum2.getPayload());
|
||||
assertEquals(1, docsAndPositionsEnum2.nextPosition());
|
||||
assertEquals(-1, docsAndPositionsEnum2.startOffset());
|
||||
assertEquals(-1, docsAndPositionsEnum2.endOffset());
|
||||
assertEquals(new BytesRef("pay2"), docsAndPositionsEnum2.getPayload());
|
||||
assertEquals(newBytesRef("pay2"), docsAndPositionsEnum2.getPayload());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
|
||||
|
||||
iw.close();
|
||||
|
@ -1674,9 +1675,9 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
IndexWriter iw = new IndexWriter(dir, iwc);
|
||||
Document doc = new Document();
|
||||
Token token1 = new Token("bar", 0, 3);
|
||||
token1.setPayload(new BytesRef("pay1"));
|
||||
token1.setPayload(newBytesRef("pay1"));
|
||||
Token token2 = new Token("bar", 4, 7);
|
||||
token2.setPayload(new BytesRef("pay2"));
|
||||
token2.setPayload(newBytesRef("pay2"));
|
||||
FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
|
||||
ft.setStoreTermVectors(true);
|
||||
ft.setStoreTermVectorPositions(true);
|
||||
|
@ -1689,7 +1690,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
assertNotNull(termsEnum);
|
||||
assertEquals(new BytesRef("bar"), termsEnum.next());
|
||||
assertEquals(newBytesRef("bar"), termsEnum.next());
|
||||
|
||||
// sugar method (FREQS)
|
||||
PostingsEnum postings = termsEnum.postings(null);
|
||||
|
@ -1736,7 +1737,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum.getPayload() == null
|
||||
|| new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload()));
|
||||
|| newBytesRef("pay1").equals(docsAndPositionsEnum.getPayload()));
|
||||
assertEquals(1, docsAndPositionsEnum.nextPosition());
|
||||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(docsAndPositionsEnum.startOffset() == -1 || docsAndPositionsEnum.startOffset() == 4);
|
||||
|
@ -1744,7 +1745,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum.getPayload() == null
|
||||
|| new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload()));
|
||||
|| newBytesRef("pay2").equals(docsAndPositionsEnum.getPayload()));
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
|
||||
|
||||
// now reuse the positions
|
||||
|
@ -1761,7 +1762,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum2.getPayload() == null
|
||||
|| new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload()));
|
||||
|| newBytesRef("pay1").equals(docsAndPositionsEnum2.getPayload()));
|
||||
assertEquals(1, docsAndPositionsEnum2.nextPosition());
|
||||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
|
@ -1770,7 +1771,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum2.getPayload() == null
|
||||
|| new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload()));
|
||||
|| newBytesRef("pay2").equals(docsAndPositionsEnum2.getPayload()));
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
|
||||
|
||||
// payloads
|
||||
|
@ -1783,12 +1784,12 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(docsAndPositionsEnum.startOffset() == -1 || docsAndPositionsEnum.startOffset() == 0);
|
||||
assertTrue(docsAndPositionsEnum.endOffset() == -1 || docsAndPositionsEnum.endOffset() == 3);
|
||||
assertEquals(new BytesRef("pay1"), docsAndPositionsEnum.getPayload());
|
||||
assertEquals(newBytesRef("pay1"), docsAndPositionsEnum.getPayload());
|
||||
assertEquals(1, docsAndPositionsEnum.nextPosition());
|
||||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(docsAndPositionsEnum.startOffset() == -1 || docsAndPositionsEnum.startOffset() == 4);
|
||||
assertTrue(docsAndPositionsEnum.endOffset() == -1 || docsAndPositionsEnum.endOffset() == 7);
|
||||
assertEquals(new BytesRef("pay2"), docsAndPositionsEnum.getPayload());
|
||||
assertEquals(newBytesRef("pay2"), docsAndPositionsEnum.getPayload());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
|
||||
// reuse
|
||||
docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.PAYLOADS);
|
||||
|
@ -1800,13 +1801,13 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
assertTrue(
|
||||
docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 0);
|
||||
assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 3);
|
||||
assertEquals(new BytesRef("pay1"), docsAndPositionsEnum2.getPayload());
|
||||
assertEquals(newBytesRef("pay1"), docsAndPositionsEnum2.getPayload());
|
||||
assertEquals(1, docsAndPositionsEnum2.nextPosition());
|
||||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum2.startOffset() == -1 || docsAndPositionsEnum2.startOffset() == 4);
|
||||
assertTrue(docsAndPositionsEnum2.endOffset() == -1 || docsAndPositionsEnum2.endOffset() == 7);
|
||||
assertEquals(new BytesRef("pay2"), docsAndPositionsEnum2.getPayload());
|
||||
assertEquals(newBytesRef("pay2"), docsAndPositionsEnum2.getPayload());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
|
||||
|
||||
docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.OFFSETS);
|
||||
|
@ -1820,14 +1821,14 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum.getPayload() == null
|
||||
|| new BytesRef("pay1").equals(docsAndPositionsEnum.getPayload()));
|
||||
|| newBytesRef("pay1").equals(docsAndPositionsEnum.getPayload()));
|
||||
assertEquals(1, docsAndPositionsEnum.nextPosition());
|
||||
assertEquals(4, docsAndPositionsEnum.startOffset());
|
||||
assertEquals(7, docsAndPositionsEnum.endOffset());
|
||||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum.getPayload() == null
|
||||
|| new BytesRef("pay2").equals(docsAndPositionsEnum.getPayload()));
|
||||
|| newBytesRef("pay2").equals(docsAndPositionsEnum.getPayload()));
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
|
||||
// reuse
|
||||
docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.OFFSETS);
|
||||
|
@ -1840,14 +1841,14 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum2.getPayload() == null
|
||||
|| new BytesRef("pay1").equals(docsAndPositionsEnum2.getPayload()));
|
||||
|| newBytesRef("pay1").equals(docsAndPositionsEnum2.getPayload()));
|
||||
assertEquals(1, docsAndPositionsEnum2.nextPosition());
|
||||
assertEquals(4, docsAndPositionsEnum2.startOffset());
|
||||
assertEquals(7, docsAndPositionsEnum2.endOffset());
|
||||
// we don't define what it is, but if its something else, we should look into it?
|
||||
assertTrue(
|
||||
docsAndPositionsEnum2.getPayload() == null
|
||||
|| new BytesRef("pay2").equals(docsAndPositionsEnum2.getPayload()));
|
||||
|| newBytesRef("pay2").equals(docsAndPositionsEnum2.getPayload()));
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
|
||||
|
||||
docsAndPositionsEnum = termsEnum.postings(null, PostingsEnum.ALL);
|
||||
|
@ -1858,11 +1859,11 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
assertEquals(0, docsAndPositionsEnum.nextPosition());
|
||||
assertEquals(0, docsAndPositionsEnum.startOffset());
|
||||
assertEquals(3, docsAndPositionsEnum.endOffset());
|
||||
assertEquals(new BytesRef("pay1"), docsAndPositionsEnum.getPayload());
|
||||
assertEquals(newBytesRef("pay1"), docsAndPositionsEnum.getPayload());
|
||||
assertEquals(1, docsAndPositionsEnum.nextPosition());
|
||||
assertEquals(4, docsAndPositionsEnum.startOffset());
|
||||
assertEquals(7, docsAndPositionsEnum.endOffset());
|
||||
assertEquals(new BytesRef("pay2"), docsAndPositionsEnum.getPayload());
|
||||
assertEquals(newBytesRef("pay2"), docsAndPositionsEnum.getPayload());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
|
||||
docsAndPositionsEnum2 = termsEnum.postings(docsAndPositionsEnum, PostingsEnum.ALL);
|
||||
assertEquals(-1, docsAndPositionsEnum2.docID());
|
||||
|
@ -1871,11 +1872,11 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
|
|||
assertEquals(0, docsAndPositionsEnum2.nextPosition());
|
||||
assertEquals(0, docsAndPositionsEnum2.startOffset());
|
||||
assertEquals(3, docsAndPositionsEnum2.endOffset());
|
||||
assertEquals(new BytesRef("pay1"), docsAndPositionsEnum2.getPayload());
|
||||
assertEquals(newBytesRef("pay1"), docsAndPositionsEnum2.getPayload());
|
||||
assertEquals(1, docsAndPositionsEnum2.nextPosition());
|
||||
assertEquals(4, docsAndPositionsEnum2.startOffset());
|
||||
assertEquals(7, docsAndPositionsEnum2.endOffset());
|
||||
assertEquals(new BytesRef("pay2"), docsAndPositionsEnum2.getPayload());
|
||||
assertEquals(newBytesRef("pay2"), docsAndPositionsEnum2.getPayload());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsAndPositionsEnum2.nextDoc());
|
||||
|
||||
iw.close();
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.util;
|
||||
|
||||
import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean;
|
||||
|
@ -41,6 +42,7 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
|
|||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies.Consequence;
|
||||
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomNumbers;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
import com.carrotsearch.randomizedtesting.rules.NoClassHooksShadowingRule;
|
||||
import com.carrotsearch.randomizedtesting.rules.NoInstanceHooksOverridesRule;
|
||||
|
@ -60,6 +62,7 @@ import java.lang.annotation.Target;
|
|||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.Method;
|
||||
import java.net.URI;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.FileSystem;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.nio.file.Path;
|
||||
|
@ -3167,4 +3170,86 @@ public abstract class LuceneTestCase extends Assert {
|
|||
}
|
||||
return conf;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a {@link BytesRef} holding UTF-8 bytes for the incoming String, that sometimes uses a
|
||||
* non-zero {@code offset}, and non-zero end-padding, to tickle latent bugs that fail to look at
|
||||
* {@code BytesRef.offset}.
|
||||
*/
|
||||
public static BytesRef newBytesRef(String s) {
|
||||
return newBytesRef(s.getBytes(StandardCharsets.UTF_8));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a copy of the incoming {@link BytesRef} that sometimes uses a non-zero {@code offset},
|
||||
* and non-zero end-padding, to tickle latent bugs that fail to look at {@code BytesRef.offset}.
|
||||
*/
|
||||
public static BytesRef newBytesRef(BytesRef b) {
|
||||
assert b.isValid();
|
||||
return newBytesRef(b.bytes, b.offset, b.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a random BytesRef from the incoming bytes that sometimes uses a non-zero {@code
|
||||
* offset}, and non-zero end-padding, to tickle latent bugs that fail to look at {@code
|
||||
* BytesRef.offset}.
|
||||
*/
|
||||
public static BytesRef newBytesRef(byte[] b) {
|
||||
return newBytesRef(b, 0, b.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a random empty BytesRef that sometimes uses a non-zero {@code offset}, and non-zero
|
||||
* end-padding, to tickle latent bugs that fail to look at {@code BytesRef.offset}.
|
||||
*/
|
||||
public static BytesRef newBytesRef() {
|
||||
return newBytesRef(new byte[0], 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a random empty BytesRef, with at least the requested length of bytes free, that
|
||||
* sometimes uses a non-zero {@code offset}, and non-zero end-padding, to tickle latent bugs that
|
||||
* fail to look at {@code BytesRef.offset}.
|
||||
*/
|
||||
public static BytesRef newBytesRef(int byteLength) {
|
||||
return newBytesRef(new byte[byteLength], 0, byteLength);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a copy of the incoming bytes slice that sometimes uses a non-zero {@code offset}, and
|
||||
* non-zero end-padding, to tickle latent bugs that fail to look at {@code BytesRef.offset}.
|
||||
*/
|
||||
public static BytesRef newBytesRef(byte[] bytesIn, int offset, int length) {
|
||||
// System.out.println("LTC.newBytesRef! bytesIn.length=" + bytesIn.length + " offset=" + offset
|
||||
// + " length=" + length);
|
||||
|
||||
assert bytesIn.length >= offset + length
|
||||
: "got offset=" + offset + " length=" + length + " bytesIn.length=" + bytesIn.length;
|
||||
|
||||
// randomly set a non-zero offset
|
||||
int startOffset;
|
||||
if (random().nextBoolean()) {
|
||||
startOffset = RandomNumbers.randomIntBetween(random(), 1, 20);
|
||||
} else {
|
||||
startOffset = 0;
|
||||
}
|
||||
|
||||
// also randomly set an end padding:
|
||||
int endPadding;
|
||||
if (random().nextBoolean()) {
|
||||
endPadding = RandomNumbers.randomIntBetween(random(), 1, 20);
|
||||
} else {
|
||||
endPadding = 0;
|
||||
}
|
||||
|
||||
byte[] bytes = new byte[startOffset + length + endPadding];
|
||||
|
||||
System.arraycopy(bytesIn, offset, bytes, startOffset, length);
|
||||
// System.out.println("LTC: return bytes.length=" + bytes.length + " startOffset=" +
|
||||
// startOffset + " length=" + bytesIn.length);
|
||||
|
||||
BytesRef it = new BytesRef(bytes, startOffset, bytesIn.length);
|
||||
assert it.isValid();
|
||||
return it;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue