mirror of https://github.com/apache/lucene.git
add failing test
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene4547@1436045 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b1ad844149
commit
9502b8e6cc
|
@ -41,6 +41,14 @@ public abstract class SortedDocValues extends BinaryDocValues {
|
|||
}
|
||||
}
|
||||
|
||||
// nocommit make this final, and impl seekExact(term) to
|
||||
// fwd to lookupTerm
|
||||
|
||||
// nocommit should we nuke this? the iterator can be
|
||||
// efficiently built "on top" since ord is part of the
|
||||
// API? why must it be impl'd here...?
|
||||
// SortedDocValuesTermsEnum.
|
||||
|
||||
public TermsEnum getTermsEnum() {
|
||||
// nocommit who tests this base impl ...
|
||||
// Default impl just uses the existing API; subclasses
|
||||
|
@ -161,6 +169,8 @@ public abstract class SortedDocValues extends BinaryDocValues {
|
|||
* @param key Key to look up
|
||||
* @param spare Spare BytesRef
|
||||
**/
|
||||
// nocommit make this protected so codecs can impl better
|
||||
// version ...
|
||||
public int lookupTerm(BytesRef key, BytesRef spare) {
|
||||
|
||||
int low = 0;
|
||||
|
|
|
@ -1133,6 +1133,9 @@ class FieldCacheImpl implements FieldCache {
|
|||
final int maxDoc = reader.maxDoc();
|
||||
SortedDocValues valuesIn = reader.getSortedDocValues(key.field);
|
||||
if (valuesIn != null) {
|
||||
// nocommit we need thread DV test that would
|
||||
// uncover this bug!!
|
||||
// nocommit we should not cache in this case?
|
||||
return valuesIn;
|
||||
} else {
|
||||
|
||||
|
|
|
@ -0,0 +1,112 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.PackedLongDocValuesField;
|
||||
import org.apache.lucene.document.SortedBytesDocValuesField;
|
||||
import org.apache.lucene.document.StraightBytesDocValuesField;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
public class TestDocValuesWithThreads extends LuceneTestCase {
|
||||
|
||||
public void test() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
|
||||
|
||||
// nocommit binary, sorted too
|
||||
final List<Long> numbers = new ArrayList<Long>();
|
||||
final List<BytesRef> binary = new ArrayList<BytesRef>();
|
||||
final List<BytesRef> sorted = new ArrayList<BytesRef>();
|
||||
final int numDocs = atLeast(100);
|
||||
for(int i=0;i<numDocs;i++) {
|
||||
Document d = new Document();
|
||||
long number = random().nextLong();
|
||||
d.add(new PackedLongDocValuesField("number", number));
|
||||
BytesRef bytes = new BytesRef(_TestUtil.randomRealisticUnicodeString(random()));
|
||||
d.add(new StraightBytesDocValuesField("bytes", bytes));
|
||||
binary.add(bytes);
|
||||
bytes = new BytesRef(_TestUtil.randomRealisticUnicodeString(random()));
|
||||
d.add(new SortedBytesDocValuesField("sorted", bytes));
|
||||
sorted.add(bytes);
|
||||
w.addDocument(d);
|
||||
numbers.add(number);
|
||||
}
|
||||
|
||||
w.forceMerge(1);
|
||||
final IndexReader r = w.getReader();
|
||||
w.close();
|
||||
|
||||
assertEquals(1, r.leaves().size());
|
||||
final AtomicReader ar = r.leaves().get(0).reader();
|
||||
|
||||
int numThreads = _TestUtil.nextInt(random(), 2, 5);
|
||||
List<Thread> threads = new ArrayList<Thread>();
|
||||
final CountDownLatch startingGun = new CountDownLatch(1);
|
||||
for(int t=0;t<numThreads;t++) {
|
||||
final Random threadRandom = new Random(random().nextLong());
|
||||
Thread thread = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
//NumericDocValues ndv = ar.getNumericDocValues("number");
|
||||
FieldCache.Longs ndv = FieldCache.DEFAULT.getLongs(ar, "number", false);
|
||||
//BinaryDocValues bdv = ar.getBinaryDocValues("bytes");
|
||||
BinaryDocValues bdv = FieldCache.DEFAULT.getTerms(ar, "bytes");
|
||||
SortedDocValues sdv = FieldCache.DEFAULT.getTermsIndex(ar, "sorted");
|
||||
startingGun.await();
|
||||
int iters = atLeast(1000);
|
||||
BytesRef scratch = new BytesRef();
|
||||
for(int iter=0;iter<iters;iter++) {
|
||||
int docID = threadRandom.nextInt(numDocs);
|
||||
assertEquals(numbers.get(docID).longValue(), ndv.get(docID));
|
||||
bdv.get(docID, scratch);
|
||||
assertEquals(binary.get(docID), scratch);
|
||||
sdv.get(docID, scratch);
|
||||
assertEquals(sorted.get(docID), scratch);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
};
|
||||
thread.start();
|
||||
threads.add(thread);
|
||||
}
|
||||
|
||||
startingGun.countDown();
|
||||
|
||||
for(Thread thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
|
||||
r.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
}
|
|
@ -27,10 +27,8 @@ import org.apache.lucene.facet.index.params.CategoryListParams;
|
|||
import org.apache.lucene.facet.index.params.FacetIndexingParams;
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.DocValues.Source;
|
||||
import org.apache.lucene.index.DocValues.Type;
|
||||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
|
@ -45,9 +43,9 @@ import org.apache.lucene.util.BytesRef;
|
|||
|
||||
/**
|
||||
* A {@link FilterAtomicReader} for migrating a facets index which encodes
|
||||
* category ordinals in a payload to {@link DocValues}. To migrate the index,
|
||||
* category ordinals in a payload to {@link BinaryDocValues}. To migrate the index,
|
||||
* you should build a mapping from a field (String) to term ({@link Term}),
|
||||
* which denotes under which DocValues field to put the data encoded in the
|
||||
* which denotes under which BinaryDocValues field to put the data encoded in the
|
||||
* matching term's payload. You can follow the code example below to migrate an
|
||||
* existing index:
|
||||
*
|
||||
|
|
Loading…
Reference in New Issue