mirror of https://github.com/apache/lucene.git
fix grouping
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene4547@1412257 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
7e4bb062e4
commit
aed1b577a8
|
@ -519,11 +519,10 @@ public interface FieldCache {
|
|||
// nocommit: can we merge this api with the SortedDocValues api?
|
||||
public abstract static class DocTermsIndex {
|
||||
|
||||
// nocommit remove this?
|
||||
public int binarySearchLookup(BytesRef key, BytesRef spare) {
|
||||
// this special case is the reason that Arrays.binarySearch() isn't useful.
|
||||
if (key == null) {
|
||||
return -1;
|
||||
throw new IllegalArgumentException("key must not be null");
|
||||
}
|
||||
|
||||
int low = 0;
|
||||
|
|
|
@ -974,6 +974,9 @@ class FieldCacheImpl implements FieldCache {
|
|||
|
||||
@Override
|
||||
public int getOrd(int docID) {
|
||||
// Subtract 1, matching the 1+ord we did when
|
||||
// storing, so that missing values, which are 0 in the
|
||||
// packed ints, are returned as -1 ord:
|
||||
return (int) docToTermOrd.get(docID)-1;
|
||||
}
|
||||
|
||||
|
@ -984,6 +987,9 @@ class FieldCacheImpl implements FieldCache {
|
|||
|
||||
@Override
|
||||
public BytesRef lookup(int ord, BytesRef ret) {
|
||||
if (ord < 0) {
|
||||
throw new IllegalArgumentException("ord must be >=0 (got ord=" + ord + ")");
|
||||
}
|
||||
return bytes.fill(ret, termOrdToBytesOffset.get(ord));
|
||||
}
|
||||
|
||||
|
@ -1235,7 +1241,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
GrowableWriter termOrdToBytesOffset = new GrowableWriter(startBytesBPV, 1+startNumUniqueTerms, acceptableOverheadRatio);
|
||||
final GrowableWriter docToTermOrd = new GrowableWriter(startTermsBPV, maxDoc, acceptableOverheadRatio);
|
||||
|
||||
// 0 is reserved for "unset"
|
||||
int termOrd = 0;
|
||||
|
||||
// nocommit use Uninvert?
|
||||
|
@ -1266,6 +1271,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
// Store 1+ ord into packed bits
|
||||
docToTermOrd.set(docID, 1+termOrd);
|
||||
}
|
||||
termOrd++;
|
||||
|
|
|
@ -124,7 +124,7 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
|
|||
|
||||
protected void retrieveGroupHeadAndAddIfNotExist(int doc) throws IOException {
|
||||
final int ord = groupIndex.getOrd(doc);
|
||||
final BytesRef groupValue = ord == 0 ? null : groupIndex.lookup(ord, scratchBytesRef);
|
||||
final BytesRef groupValue = ord == -1 ? null : groupIndex.lookup(ord, scratchBytesRef);
|
||||
GroupHead groupHead = groups.get(groupValue);
|
||||
if (groupHead == null) {
|
||||
groupHead = new GroupHead(groupValue, sortWithinGroup, doc);
|
||||
|
@ -205,7 +205,7 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
|
|||
|
||||
OrdScoreAllGroupHeadsCollector(String groupField, Sort sortWithinGroup, int initialSize) {
|
||||
super(groupField, sortWithinGroup.getSort().length);
|
||||
ordSet = new SentinelIntSet(initialSize, -1);
|
||||
ordSet = new SentinelIntSet(initialSize, -2);
|
||||
collectedGroups = new ArrayList<GroupHead>(initialSize);
|
||||
|
||||
final SortField[] sortFields = sortWithinGroup.getSort();
|
||||
|
@ -230,14 +230,14 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
|
|||
GroupHead groupHead;
|
||||
if (!ordSet.exists(key)) {
|
||||
ordSet.put(key);
|
||||
BytesRef term = key == 0 ? null : groupIndex.getTerm(doc, new BytesRef());
|
||||
BytesRef term = key == -1 ? null : groupIndex.getTerm(doc, new BytesRef());
|
||||
groupHead = new GroupHead(doc, term);
|
||||
collectedGroups.add(groupHead);
|
||||
segmentGroupHeads[key] = groupHead;
|
||||
segmentGroupHeads[key+1] = groupHead;
|
||||
temporalResult.stop = true;
|
||||
} else {
|
||||
temporalResult.stop = false;
|
||||
groupHead = segmentGroupHeads[key];
|
||||
groupHead = segmentGroupHeads[key+1];
|
||||
}
|
||||
temporalResult.groupHead = groupHead;
|
||||
}
|
||||
|
@ -255,19 +255,29 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
|
|||
|
||||
// Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
|
||||
ordSet.clear();
|
||||
segmentGroupHeads = new GroupHead[groupIndex.numOrd()];
|
||||
segmentGroupHeads = new GroupHead[groupIndex.numOrd()+1];
|
||||
for (GroupHead collectedGroup : collectedGroups) {
|
||||
int ord = groupIndex.binarySearchLookup(collectedGroup.groupValue, scratchBytesRef);
|
||||
if (ord >= 0) {
|
||||
int ord;
|
||||
if (collectedGroup.groupValue == null) {
|
||||
ord = -1;
|
||||
} else {
|
||||
ord = groupIndex.binarySearchLookup(collectedGroup.groupValue, scratchBytesRef);
|
||||
}
|
||||
if (collectedGroup.groupValue == null || ord >= 0) {
|
||||
ordSet.put(ord);
|
||||
segmentGroupHeads[ord] = collectedGroup;
|
||||
segmentGroupHeads[ord+1] = collectedGroup;
|
||||
|
||||
for (int i = 0; i < sortsIndex.length; i++) {
|
||||
if (fields[i].getType() == SortField.Type.SCORE) {
|
||||
continue;
|
||||
}
|
||||
|
||||
collectedGroup.sortOrds[i] = sortsIndex[i].binarySearchLookup(collectedGroup.sortValues[i], scratchBytesRef);
|
||||
int sortOrd;
|
||||
if (collectedGroup.sortValues[i] == null) {
|
||||
sortOrd = -1;
|
||||
} else {
|
||||
sortOrd = sortsIndex[i].binarySearchLookup(collectedGroup.sortValues[i], scratchBytesRef);
|
||||
}
|
||||
collectedGroup.sortOrds[i] = sortOrd;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -343,7 +353,7 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
|
|||
|
||||
OrdAllGroupHeadsCollector(String groupField, Sort sortWithinGroup, int initialSize) {
|
||||
super(groupField, sortWithinGroup.getSort().length);
|
||||
ordSet = new SentinelIntSet(initialSize, -1);
|
||||
ordSet = new SentinelIntSet(initialSize, -2);
|
||||
collectedGroups = new ArrayList<GroupHead>(initialSize);
|
||||
|
||||
final SortField[] sortFields = sortWithinGroup.getSort();
|
||||
|
@ -367,14 +377,14 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
|
|||
GroupHead groupHead;
|
||||
if (!ordSet.exists(key)) {
|
||||
ordSet.put(key);
|
||||
BytesRef term = key == 0 ? null : groupIndex.getTerm(doc, new BytesRef());
|
||||
BytesRef term = key == -1 ? null : groupIndex.getTerm(doc, new BytesRef());
|
||||
groupHead = new GroupHead(doc, term);
|
||||
collectedGroups.add(groupHead);
|
||||
segmentGroupHeads[key] = groupHead;
|
||||
segmentGroupHeads[key+1] = groupHead;
|
||||
temporalResult.stop = true;
|
||||
} else {
|
||||
temporalResult.stop = false;
|
||||
groupHead = segmentGroupHeads[key];
|
||||
groupHead = segmentGroupHeads[key+1];
|
||||
}
|
||||
temporalResult.groupHead = groupHead;
|
||||
}
|
||||
|
@ -388,15 +398,26 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
|
|||
|
||||
// Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
|
||||
ordSet.clear();
|
||||
segmentGroupHeads = new GroupHead[groupIndex.numOrd()];
|
||||
segmentGroupHeads = new GroupHead[groupIndex.numOrd()+1];
|
||||
for (GroupHead collectedGroup : collectedGroups) {
|
||||
int groupOrd = groupIndex.binarySearchLookup(collectedGroup.groupValue, scratchBytesRef);
|
||||
if (groupOrd >= 0) {
|
||||
int groupOrd;
|
||||
if (collectedGroup.groupValue == null) {
|
||||
groupOrd = -1;
|
||||
} else {
|
||||
groupOrd = groupIndex.binarySearchLookup(collectedGroup.groupValue, scratchBytesRef);
|
||||
}
|
||||
if (collectedGroup.groupValue == null || groupOrd >= 0) {
|
||||
ordSet.put(groupOrd);
|
||||
segmentGroupHeads[groupOrd] = collectedGroup;
|
||||
segmentGroupHeads[groupOrd+1] = collectedGroup;
|
||||
|
||||
for (int i = 0; i < sortsIndex.length; i++) {
|
||||
collectedGroup.sortOrds[i] = sortsIndex[i].binarySearchLookup(collectedGroup.sortValues[i], scratchBytesRef);
|
||||
int sortOrd;
|
||||
if (collectedGroup.sortValues[i] == null) {
|
||||
sortOrd = -1;
|
||||
} else {
|
||||
sortOrd = sortsIndex[i].binarySearchLookup(collectedGroup.sortValues[i], scratchBytesRef);
|
||||
}
|
||||
collectedGroup.sortOrds[i] = sortOrd;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -451,7 +472,7 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
|
|||
|
||||
ScoreAllGroupHeadsCollector(String groupField, Sort sortWithinGroup, int initialSize) {
|
||||
super(groupField, sortWithinGroup.getSort().length);
|
||||
ordSet = new SentinelIntSet(initialSize, -1);
|
||||
ordSet = new SentinelIntSet(initialSize, -2);
|
||||
collectedGroups = new ArrayList<GroupHead>(initialSize);
|
||||
|
||||
final SortField[] sortFields = sortWithinGroup.getSort();
|
||||
|
@ -475,14 +496,14 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
|
|||
GroupHead groupHead;
|
||||
if (!ordSet.exists(key)) {
|
||||
ordSet.put(key);
|
||||
BytesRef term = key == 0 ? null : groupIndex.getTerm(doc, new BytesRef());
|
||||
BytesRef term = key == -1 ? null : groupIndex.getTerm(doc, new BytesRef());
|
||||
groupHead = new GroupHead(doc, term);
|
||||
collectedGroups.add(groupHead);
|
||||
segmentGroupHeads[key] = groupHead;
|
||||
segmentGroupHeads[key+1] = groupHead;
|
||||
temporalResult.stop = true;
|
||||
} else {
|
||||
temporalResult.stop = false;
|
||||
groupHead = segmentGroupHeads[key];
|
||||
groupHead = segmentGroupHeads[key+1];
|
||||
}
|
||||
temporalResult.groupHead = groupHead;
|
||||
}
|
||||
|
@ -493,12 +514,17 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
|
|||
|
||||
// Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
|
||||
ordSet.clear();
|
||||
segmentGroupHeads = new GroupHead[groupIndex.numOrd()];
|
||||
segmentGroupHeads = new GroupHead[groupIndex.numOrd()+1];
|
||||
for (GroupHead collectedGroup : collectedGroups) {
|
||||
int ord = groupIndex.binarySearchLookup(collectedGroup.groupValue, scratchBytesRef);
|
||||
if (ord >= 0) {
|
||||
int ord;
|
||||
if (collectedGroup.groupValue == null) {
|
||||
ord = -1;
|
||||
} else {
|
||||
ord = groupIndex.binarySearchLookup(collectedGroup.groupValue, scratchBytesRef);
|
||||
}
|
||||
if (collectedGroup.groupValue == null || ord >= 0) {
|
||||
ordSet.put(ord);
|
||||
segmentGroupHeads[ord] = collectedGroup;
|
||||
segmentGroupHeads[ord+1] = collectedGroup;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -537,5 +563,4 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ public class TermAllGroupsCollector extends AbstractAllGroupsCollector<BytesRef>
|
|||
* heap usage is 4 bytes * initialSize.
|
||||
*/
|
||||
public TermAllGroupsCollector(String groupField, int initialSize) {
|
||||
ordSet = new SentinelIntSet(initialSize, -1);
|
||||
ordSet = new SentinelIntSet(initialSize, -2);
|
||||
groups = new ArrayList<BytesRef>(initialSize);
|
||||
this.groupField = groupField;
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ public class TermAllGroupsCollector extends AbstractAllGroupsCollector<BytesRef>
|
|||
int key = index.getOrd(doc);
|
||||
if (!ordSet.exists(key)) {
|
||||
ordSet.put(key);
|
||||
BytesRef term = key == 0 ? null : index.lookup(key, new BytesRef());
|
||||
BytesRef term = key == -1 ? null : index.lookup(key, new BytesRef());
|
||||
groups.add(term);
|
||||
}
|
||||
}
|
||||
|
@ -104,11 +104,14 @@ public class TermAllGroupsCollector extends AbstractAllGroupsCollector<BytesRef>
|
|||
// Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
|
||||
ordSet.clear();
|
||||
for (BytesRef countedGroup : groups) {
|
||||
int ord = index.binarySearchLookup(countedGroup, spareBytesRef);
|
||||
if (ord >= 0) {
|
||||
ordSet.put(ord);
|
||||
if (countedGroup == null) {
|
||||
ordSet.put(-1);
|
||||
} else {
|
||||
int ord = index.binarySearchLookup(countedGroup, spareBytesRef);
|
||||
if (ord >= 0) {
|
||||
ordSet.put(ord);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ public class TermDistinctValuesCollector extends AbstractDistinctValuesCollector
|
|||
for (SearchGroup<BytesRef> group : groups) {
|
||||
this.groups.add(new GroupCount(group.groupValue));
|
||||
}
|
||||
ordSet = new SentinelIntSet(groups.size(), -1);
|
||||
ordSet = new SentinelIntSet(groups.size(), -2);
|
||||
groupCounts = new GroupCount[ordSet.keys.length];
|
||||
}
|
||||
|
||||
|
@ -69,11 +69,12 @@ public class TermDistinctValuesCollector extends AbstractDistinctValuesCollector
|
|||
if (slot < 0) {
|
||||
return;
|
||||
}
|
||||
int groupOrd = groupFieldTermIndex.getOrd(doc);
|
||||
|
||||
GroupCount gc = groupCounts[slot];
|
||||
int countOrd = countFieldTermIndex.getOrd(doc);
|
||||
if (doesNotContainsOrd(countOrd, gc.ords)) {
|
||||
if (countOrd == 0) {
|
||||
if (doesNotContainOrd(countOrd, gc.ords)) {
|
||||
if (countOrd == -1) {
|
||||
gc.uniqueValues.add(null);
|
||||
} else {
|
||||
gc.uniqueValues.add(countFieldTermIndex.lookup(countOrd, new BytesRef()));
|
||||
|
@ -87,7 +88,7 @@ public class TermDistinctValuesCollector extends AbstractDistinctValuesCollector
|
|||
}
|
||||
}
|
||||
|
||||
private boolean doesNotContainsOrd(int ord, int[] ords) {
|
||||
private boolean doesNotContainOrd(int ord, int[] ords) {
|
||||
if (ords.length == 0) {
|
||||
return true;
|
||||
} else if (ords.length == 1) {
|
||||
|
@ -103,21 +104,21 @@ public class TermDistinctValuesCollector extends AbstractDistinctValuesCollector
|
|||
public void setNextReader(AtomicReaderContext context) throws IOException {
|
||||
groupFieldTermIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
|
||||
countFieldTermIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), countField);
|
||||
|
||||
ordSet.clear();
|
||||
BytesRef scratch = new BytesRef();
|
||||
for (GroupCount group : groups) {
|
||||
int groupOrd = group.groupValue == null ? 0 : groupFieldTermIndex.binarySearchLookup(group.groupValue, spare);
|
||||
if (groupOrd < 0) {
|
||||
int groupOrd = group.groupValue == null ? -1 : groupFieldTermIndex.binarySearchLookup(group.groupValue, spare);
|
||||
if (group.groupValue != null && groupOrd < 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
groupCounts[ordSet.put(groupOrd)] = group;
|
||||
group.ords = new int[group.uniqueValues.size()];
|
||||
Arrays.fill(group.ords, -1);
|
||||
Arrays.fill(group.ords, -2);
|
||||
int i = 0;
|
||||
for (BytesRef value : group.uniqueValues) {
|
||||
int countOrd = value == null ? 0 : countFieldTermIndex.binarySearchLookup(value, new BytesRef());
|
||||
if (countOrd >= 0) {
|
||||
int countOrd = value == null ? -1 : countFieldTermIndex.binarySearchLookup(value, scratch);
|
||||
if (value == null || countOrd >= 0) {
|
||||
group.ords[i++] = countOrd;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ public class TermFirstPassGroupingCollector extends AbstractFirstPassGroupingCol
|
|||
@Override
|
||||
protected BytesRef getDocGroupValue(int doc) {
|
||||
final int ord = index.getOrd(doc);
|
||||
return ord == 0 ? null : index.lookup(ord, scratchBytesRef);
|
||||
return ord == -1 ? null : index.lookup(ord, scratchBytesRef);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -70,7 +70,7 @@ public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollecto
|
|||
TermGroupFacetCollector(String groupField, String facetField, BytesRef facetPrefix, int initialSize) {
|
||||
super(groupField, facetField, facetPrefix);
|
||||
groupedFacetHits = new ArrayList<GroupedFacetHit>(initialSize);
|
||||
segmentGroupedFacetHits = new SentinelIntSet(initialSize, -1);
|
||||
segmentGroupedFacetHits = new SentinelIntSet(initialSize, Integer.MIN_VALUE);
|
||||
}
|
||||
|
||||
// Implementation for single valued facet fields.
|
||||
|
@ -89,19 +89,19 @@ public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollecto
|
|||
}
|
||||
|
||||
int groupOrd = groupFieldTermsIndex.getOrd(doc);
|
||||
int segmentGroupedFacetsIndex = (groupOrd * facetFieldTermsIndex.numOrd()) + facetOrd;
|
||||
int segmentGroupedFacetsIndex = groupOrd * (facetFieldTermsIndex.numOrd()+1) + facetOrd;
|
||||
if (segmentGroupedFacetHits.exists(segmentGroupedFacetsIndex)) {
|
||||
return;
|
||||
}
|
||||
|
||||
segmentTotalCount++;
|
||||
segmentFacetCounts[facetOrd]++;
|
||||
segmentFacetCounts[facetOrd+1]++;
|
||||
|
||||
segmentGroupedFacetHits.put(segmentGroupedFacetsIndex);
|
||||
groupedFacetHits.add(
|
||||
new GroupedFacetHit(
|
||||
groupOrd == 0 ? null : groupFieldTermsIndex.lookup(groupOrd, new BytesRef()),
|
||||
facetOrd == 0 ? null : facetFieldTermsIndex.lookup(facetOrd, new BytesRef())
|
||||
groupOrd == -1 ? null : groupFieldTermsIndex.lookup(groupOrd, new BytesRef()),
|
||||
facetOrd == -1 ? null : facetFieldTermsIndex.lookup(facetOrd, new BytesRef())
|
||||
)
|
||||
);
|
||||
}
|
||||
|
@ -113,22 +113,24 @@ public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollecto
|
|||
|
||||
groupFieldTermsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
|
||||
facetFieldTermsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), facetField);
|
||||
segmentFacetCounts = new int[facetFieldTermsIndex.numOrd()];
|
||||
|
||||
// 1+ to allow for the -1 "not set":
|
||||
segmentFacetCounts = new int[facetFieldTermsIndex.numOrd()+1];
|
||||
segmentTotalCount = 0;
|
||||
|
||||
segmentGroupedFacetHits.clear();
|
||||
for (GroupedFacetHit groupedFacetHit : groupedFacetHits) {
|
||||
int facetOrd = facetFieldTermsIndex.binarySearchLookup(groupedFacetHit.facetValue, spare);
|
||||
if (facetOrd < 0) {
|
||||
int facetOrd = groupedFacetHit.facetValue == null ? -1 : facetFieldTermsIndex.binarySearchLookup(groupedFacetHit.facetValue, spare);
|
||||
if (groupedFacetHit.facetValue != null && facetOrd < 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int groupOrd = groupFieldTermsIndex.binarySearchLookup(groupedFacetHit.groupValue, spare);
|
||||
if (groupOrd < 0) {
|
||||
int groupOrd = groupedFacetHit.groupValue == null ? -1 : groupFieldTermsIndex.binarySearchLookup(groupedFacetHit.groupValue, spare);
|
||||
if (groupedFacetHit.groupValue != null && groupOrd < 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int segmentGroupedFacetsIndex = (groupOrd * facetFieldTermsIndex.numOrd()) + facetOrd;
|
||||
int segmentGroupedFacetsIndex = groupOrd * (facetFieldTermsIndex.numOrd()+1) + facetOrd;
|
||||
segmentGroupedFacetHits.put(segmentGroupedFacetsIndex);
|
||||
}
|
||||
|
||||
|
@ -141,9 +143,10 @@ public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollecto
|
|||
BytesRef facetEndPrefix = BytesRef.deepCopyOf(facetPrefix);
|
||||
facetEndPrefix.append(UnicodeUtil.BIG_TERM);
|
||||
endFacetOrd = facetFieldTermsIndex.binarySearchLookup(facetEndPrefix, spare);
|
||||
assert endFacetOrd < 0;
|
||||
endFacetOrd = -endFacetOrd - 1; // Points to the ord one higher than facetEndPrefix
|
||||
} else {
|
||||
startFacetOrd = 0;
|
||||
startFacetOrd = -1;
|
||||
endFacetOrd = facetFieldTermsIndex.numOrd();
|
||||
}
|
||||
}
|
||||
|
@ -157,11 +160,12 @@ public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollecto
|
|||
final TermsEnum tenum;
|
||||
|
||||
SegmentResult(int[] counts, int total, TermsEnum tenum, int startFacetOrd, int endFacetOrd) throws IOException {
|
||||
super(counts, total - counts[0], counts[0], endFacetOrd);
|
||||
super(counts, total - counts[0], counts[0], endFacetOrd+1);
|
||||
this.tenum = tenum;
|
||||
this.mergePos = startFacetOrd == 0 ? 1 : startFacetOrd;
|
||||
this.mergePos = startFacetOrd == -1 ? 1 : startFacetOrd+1;
|
||||
if (mergePos < maxTermPos) {
|
||||
tenum.seekExact(mergePos);
|
||||
assert tenum != null;
|
||||
tenum.seekExact(startFacetOrd == -1 ? 0 : startFacetOrd);
|
||||
mergeTerm = tenum.term();
|
||||
}
|
||||
}
|
||||
|
@ -169,9 +173,7 @@ public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollecto
|
|||
protected void nextTerm() throws IOException {
|
||||
mergeTerm = tenum.next();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Implementation for multi valued facet fields.
|
||||
|
@ -198,7 +200,7 @@ public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollecto
|
|||
|
||||
segmentGroupedFacetHits.put(segmentGroupedFacetsIndex);
|
||||
groupedFacetHits.add(
|
||||
new GroupedFacetHit(groupOrd == 0 ? null : groupFieldTermsIndex.lookup(groupOrd, new BytesRef()), null)
|
||||
new GroupedFacetHit(groupOrd == -1 ? null : groupFieldTermsIndex.lookup(groupOrd, new BytesRef()), null)
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
@ -223,7 +225,7 @@ public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollecto
|
|||
continue;
|
||||
}
|
||||
|
||||
int segmentGroupedFacetsIndex = (groupOrd * (facetFieldDocTermOrds.numTerms() + 1)) + facetOrd;
|
||||
int segmentGroupedFacetsIndex = groupOrd * (facetFieldDocTermOrds.numTerms() + 1) + facetOrd;
|
||||
if (segmentGroupedFacetHits.exists(segmentGroupedFacetsIndex)) {
|
||||
continue;
|
||||
}
|
||||
|
@ -234,7 +236,7 @@ public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollecto
|
|||
segmentGroupedFacetHits.put(segmentGroupedFacetsIndex);
|
||||
groupedFacetHits.add(
|
||||
new GroupedFacetHit(
|
||||
groupOrd == 0 ? null : groupFieldTermsIndex.lookup(groupOrd, new BytesRef()),
|
||||
groupOrd == -1 ? null : groupFieldTermsIndex.lookup(groupOrd, new BytesRef()),
|
||||
facetOrd == facetFieldDocTermOrds.numTerms() ? null : BytesRef.deepCopyOf(facetFieldDocTermOrds.lookupTerm(facetOrdTermsEnum, facetOrd))
|
||||
)
|
||||
);
|
||||
|
@ -257,8 +259,8 @@ public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollecto
|
|||
|
||||
segmentGroupedFacetHits.clear();
|
||||
for (GroupedFacetHit groupedFacetHit : groupedFacetHits) {
|
||||
int groupOrd = groupFieldTermsIndex.binarySearchLookup(groupedFacetHit.groupValue, spare);
|
||||
if (groupOrd < 0) {
|
||||
int groupOrd = groupedFacetHit.groupValue == null ? -1 : groupFieldTermsIndex.binarySearchLookup(groupedFacetHit.groupValue, spare);
|
||||
if (groupedFacetHit.groupValue != null && groupOrd < 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -273,7 +275,7 @@ public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollecto
|
|||
}
|
||||
|
||||
// (facetFieldDocTermOrds.numTerms() + 1) for all possible facet values and docs not containing facet field
|
||||
int segmentGroupedFacetsIndex = (groupOrd * (facetFieldDocTermOrds.numTerms() + 1)) + facetOrd;
|
||||
int segmentGroupedFacetsIndex = groupOrd * (facetFieldDocTermOrds.numTerms() + 1) + facetOrd;
|
||||
segmentGroupedFacetHits.put(segmentGroupedFacetsIndex);
|
||||
}
|
||||
|
||||
|
@ -329,10 +331,8 @@ public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollecto
|
|||
protected void nextTerm() throws IOException {
|
||||
mergeTerm = tenum.next();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class GroupedFacetHit {
|
||||
|
|
|
@ -47,7 +47,7 @@ public class TermSecondPassGroupingCollector extends AbstractSecondPassGroupingC
|
|||
int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields)
|
||||
throws IOException {
|
||||
super(groups, groupSort, withinGroupSort, maxDocsPerGroup, getScores, getMaxScores, fillSortFields);
|
||||
ordSet = new SentinelIntSet(groupMap.size(), -1);
|
||||
ordSet = new SentinelIntSet(groupMap.size(), -2);
|
||||
this.groupField = groupField;
|
||||
groupDocs = (SearchGroupDocs<BytesRef>[]) new SearchGroupDocs[ordSet.keys.length];
|
||||
}
|
||||
|
@ -61,8 +61,8 @@ public class TermSecondPassGroupingCollector extends AbstractSecondPassGroupingC
|
|||
ordSet.clear();
|
||||
for (SearchGroupDocs<BytesRef> group : groupMap.values()) {
|
||||
// System.out.println(" group=" + (group.groupValue == null ? "null" : group.groupValue.utf8ToString()));
|
||||
int ord = group.groupValue == null ? 0 : index.binarySearchLookup(group.groupValue, spareBytesRef);
|
||||
if (ord >= 0) {
|
||||
int ord = group.groupValue == null ? -1 : index.binarySearchLookup(group.groupValue, spareBytesRef);
|
||||
if (group.groupValue == null || ord >= 0) {
|
||||
groupDocs[ordSet.put(ord)] = group;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,10 +30,10 @@ public abstract class AbstractGroupingTestCase extends LuceneTestCase {
|
|||
String randomValue;
|
||||
do {
|
||||
// B/c of DV based impl we can't see the difference between an empty string and a null value.
|
||||
// For that reason we don't generate empty string groups.
|
||||
// For that reason we don't generate empty string
|
||||
// groups.
|
||||
randomValue = _TestUtil.randomRealisticUnicodeString(random());
|
||||
} while ("".equals(randomValue));
|
||||
return randomValue;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -213,13 +213,13 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase {
|
|||
if (canUseIDV) {
|
||||
switch(valueType) {
|
||||
case BYTES_VAR_DEREF:
|
||||
valuesField = new DerefBytesDocValuesField("group", new BytesRef());
|
||||
valuesField = new DerefBytesDocValuesField("group_dv", new BytesRef());
|
||||
break;
|
||||
case BYTES_VAR_STRAIGHT:
|
||||
valuesField = new StraightBytesDocValuesField("group", new BytesRef());
|
||||
valuesField = new StraightBytesDocValuesField("group_dv", new BytesRef());
|
||||
break;
|
||||
case BYTES_VAR_SORTED:
|
||||
valuesField = new SortedBytesDocValuesField("group", new BytesRef());
|
||||
valuesField = new SortedBytesDocValuesField("group_dv", new BytesRef());
|
||||
break;
|
||||
default:
|
||||
fail("unhandled type");
|
||||
|
@ -525,7 +525,7 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase {
|
|||
collector = new FunctionAllGroupHeadsCollector(vs, new HashMap<Object, Object>(), sortWithinGroup);
|
||||
} else if (canUseIDV && random().nextBoolean()) {
|
||||
boolean diskResident = random().nextBoolean();
|
||||
collector = DVAllGroupHeadsCollector.create(groupField, sortWithinGroup, valueType, diskResident);
|
||||
collector = DVAllGroupHeadsCollector.create(groupField + "_dv", sortWithinGroup, valueType, diskResident);
|
||||
} else {
|
||||
collector = TermAllGroupHeadsCollector.create(groupField, sortWithinGroup);
|
||||
}
|
||||
|
@ -543,13 +543,13 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase {
|
|||
Field valuesField = null;
|
||||
switch(valueType) {
|
||||
case BYTES_VAR_DEREF:
|
||||
valuesField = new DerefBytesDocValuesField(groupField, new BytesRef(value));
|
||||
valuesField = new DerefBytesDocValuesField(groupField + "_dv", new BytesRef(value));
|
||||
break;
|
||||
case BYTES_VAR_STRAIGHT:
|
||||
valuesField = new StraightBytesDocValuesField(groupField, new BytesRef(value));
|
||||
valuesField = new StraightBytesDocValuesField(groupField + "_dv", new BytesRef(value));
|
||||
break;
|
||||
case BYTES_VAR_SORTED:
|
||||
valuesField = new SortedBytesDocValuesField(groupField, new BytesRef(value));
|
||||
valuesField = new SortedBytesDocValuesField(groupField + "_dv", new BytesRef(value));
|
||||
break;
|
||||
default:
|
||||
fail("unhandled type");
|
||||
|
|
|
@ -45,7 +45,9 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
|
|||
private final static NullComparator nullComparator = new NullComparator();
|
||||
|
||||
private final String groupField = "author";
|
||||
private final String dvGroupField = "author_dv";
|
||||
private final String countField = "publisher";
|
||||
private final String dvCountField = "publisher_dv";
|
||||
|
||||
public void testSimple() throws Exception {
|
||||
Random random = random();
|
||||
|
@ -249,7 +251,15 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
|
|||
if (VERBOSE) {
|
||||
System.out.println("Index iter=" + indexIter);
|
||||
System.out.println("Search iter=" + searchIter);
|
||||
System.out.println("Collector class name=" + distinctValuesCollector.getClass().getName());
|
||||
System.out.println("1st pass collector class name=" + firstCollector.getClass().getName());
|
||||
System.out.println("2nd pass collector class name=" + distinctValuesCollector.getClass().getName());
|
||||
System.out.println("Search term=" + term);
|
||||
System.out.println("DVType=" + dvType);
|
||||
System.out.println("1st pass groups=" + firstCollector.getTopGroups(0, false));
|
||||
System.out.println("Expected:");
|
||||
printGroups(expectedResult);
|
||||
System.out.println("Actual:");
|
||||
printGroups(actualResult);
|
||||
}
|
||||
|
||||
assertEquals(expectedResult.size(), actualResult.size());
|
||||
|
@ -262,7 +272,7 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
|
|||
Collections.sort(expectedUniqueValues, nullComparator);
|
||||
List<Comparable<?>> actualUniqueValues = new ArrayList<Comparable<?>>(actual.uniqueValues);
|
||||
Collections.sort(actualUniqueValues, nullComparator);
|
||||
for (int j = 0; j < expected.uniqueValues.size(); j++) {
|
||||
for (int j = 0; j < expectedUniqueValues.size(); j++) {
|
||||
assertValues(expectedUniqueValues.get(j), actualUniqueValues.get(j));
|
||||
}
|
||||
}
|
||||
|
@ -272,6 +282,25 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void printGroups(List<AbstractDistinctValuesCollector.GroupCount<Comparable<?>>> results) {
|
||||
for(int i=0;i<results.size();i++) {
|
||||
AbstractDistinctValuesCollector.GroupCount<Comparable<?>> group = results.get(i);
|
||||
Object gv = group.groupValue;
|
||||
if (gv instanceof BytesRef) {
|
||||
System.out.println(i + ": groupValue=" + ((BytesRef) gv).utf8ToString());
|
||||
} else {
|
||||
System.out.println(i + ": groupValue=" + gv);
|
||||
}
|
||||
for(Object o : group.uniqueValues) {
|
||||
if (o instanceof BytesRef) {
|
||||
System.out.println(" " + ((BytesRef) o).utf8ToString());
|
||||
} else {
|
||||
System.out.println(" " + o);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void assertValues(Object expected, Object actual) {
|
||||
if (expected == null) {
|
||||
compareNull(actual);
|
||||
|
@ -316,24 +345,25 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
|
|||
}
|
||||
|
||||
private void addField(Document doc, String field, String value, DocValues.Type type) {
|
||||
doc.add(new StringField(field, value, Field.Store.NO));
|
||||
doc.add(new StringField(field, value, Field.Store.YES));
|
||||
if (type == null) {
|
||||
return;
|
||||
}
|
||||
String dvField = field + "_dv";
|
||||
|
||||
Field valuesField = null;
|
||||
switch (type) {
|
||||
case VAR_INTS:
|
||||
valuesField = new PackedLongDocValuesField(field, Integer.parseInt(value));
|
||||
valuesField = new PackedLongDocValuesField(dvField, Integer.parseInt(value));
|
||||
break;
|
||||
case FLOAT_64:
|
||||
valuesField = new DoubleDocValuesField(field, Double.parseDouble(value));
|
||||
valuesField = new DoubleDocValuesField(dvField, Double.parseDouble(value));
|
||||
break;
|
||||
case BYTES_VAR_STRAIGHT:
|
||||
valuesField = new StraightBytesDocValuesField(field, new BytesRef(value));
|
||||
valuesField = new StraightBytesDocValuesField(dvField, new BytesRef(value));
|
||||
break;
|
||||
case BYTES_VAR_SORTED:
|
||||
valuesField = new SortedBytesDocValuesField(field, new BytesRef(value));
|
||||
valuesField = new SortedBytesDocValuesField(dvField, new BytesRef(value));
|
||||
break;
|
||||
}
|
||||
doc.add(valuesField);
|
||||
|
@ -348,7 +378,7 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
|
|||
Collection<SearchGroup<T>> searchGroups = firstPassGroupingCollector.getTopGroups(0, false);
|
||||
if (DVFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) {
|
||||
boolean diskResident = random.nextBoolean();
|
||||
return DVDistinctValuesCollector.create(groupField, countField, searchGroups, diskResident, dvType);
|
||||
return DVDistinctValuesCollector.create(dvGroupField, dvCountField, searchGroups, diskResident, dvType);
|
||||
} else if (FunctionFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) {
|
||||
return (AbstractDistinctValuesCollector) new FunctionDistinctValuesCollector(new HashMap<Object, Object>(), new BytesRefFieldSource(groupField), new BytesRefFieldSource(countField), (Collection) searchGroups);
|
||||
} else {
|
||||
|
@ -362,7 +392,7 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
|
|||
if (dvType != null) {
|
||||
if (random.nextBoolean()) {
|
||||
boolean diskResident = random.nextBoolean();
|
||||
return DVFirstPassGroupingCollector.create(groupSort, topNGroups, groupField, dvType, diskResident);
|
||||
return DVFirstPassGroupingCollector.create(groupSort, topNGroups, dvGroupField, dvType, diskResident);
|
||||
} else if (random.nextBoolean()) {
|
||||
return (AbstractFirstPassGroupingCollector<T>) new FunctionFirstPassGroupingCollector(new BytesRefFieldSource(groupField), new HashMap<Object, Object>(), groupSort, topNGroups);
|
||||
} else {
|
||||
|
@ -450,18 +480,25 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
|
|||
countsVals.add(countValue);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("id", String.format(Locale.ROOT, "%09d", i), Field.Store.NO));
|
||||
doc.add(new StringField("id", String.format(Locale.ROOT, "%09d", i), Field.Store.YES));
|
||||
if (groupValue != null) {
|
||||
addField(doc, groupField, groupValue, dvType);
|
||||
}
|
||||
if (countValue != null) {
|
||||
addField(doc, countField, countValue, dvType);
|
||||
}
|
||||
doc.add(new TextField("content", content, Field.Store.NO));
|
||||
doc.add(new TextField("content", content, Field.Store.YES));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
|
||||
DirectoryReader reader = w.getReader();
|
||||
if (VERBOSE) {
|
||||
for(int docID=0;docID<reader.maxDoc();docID++) {
|
||||
StoredDocument doc = reader.document(docID);
|
||||
System.out.println("docID=" + docID + " id=" + doc.get("id") + " content=" + doc.get("content") + " author=" + doc.get("author") + " publisher=" + doc.get("publisher"));
|
||||
}
|
||||
}
|
||||
|
||||
w.close();
|
||||
return new IndexContext(dir, reader, dvType, searchTermToGroupCounts, contentStrings.toArray(new String[contentStrings.size()]));
|
||||
}
|
||||
|
|
|
@ -55,38 +55,38 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase {
|
|||
|
||||
// 0
|
||||
Document doc = new Document();
|
||||
addField(doc, groupField, "a", canUseDV);
|
||||
addField(doc, "airport", "ams", canUseDV);
|
||||
addField(doc, "duration", "5", canUseDV);
|
||||
addField(doc, groupField, "a", useDv);
|
||||
addField(doc, "airport", "ams", useDv);
|
||||
addField(doc, "duration", "5", useDv);
|
||||
w.addDocument(doc);
|
||||
|
||||
// 1
|
||||
doc = new Document();
|
||||
addField(doc, groupField, "a", canUseDV);
|
||||
addField(doc, "airport", "dus", canUseDV);
|
||||
addField(doc, "duration", "10", canUseDV);
|
||||
addField(doc, groupField, "a", useDv);
|
||||
addField(doc, "airport", "dus", useDv);
|
||||
addField(doc, "duration", "10", useDv);
|
||||
w.addDocument(doc);
|
||||
|
||||
// 2
|
||||
doc = new Document();
|
||||
addField(doc, groupField, "b", canUseDV);
|
||||
addField(doc, "airport", "ams", canUseDV);
|
||||
addField(doc, "duration", "10", canUseDV);
|
||||
addField(doc, groupField, "b", useDv);
|
||||
addField(doc, "airport", "ams", useDv);
|
||||
addField(doc, "duration", "10", useDv);
|
||||
w.addDocument(doc);
|
||||
w.commit(); // To ensure a second segment
|
||||
|
||||
// 3
|
||||
doc = new Document();
|
||||
addField(doc, groupField, "b", canUseDV);
|
||||
addField(doc, "airport", "ams", canUseDV);
|
||||
addField(doc, "duration", "5", canUseDV);
|
||||
addField(doc, groupField, "b", useDv);
|
||||
addField(doc, "airport", "ams", useDv);
|
||||
addField(doc, "duration", "5", useDv);
|
||||
w.addDocument(doc);
|
||||
|
||||
// 4
|
||||
doc = new Document();
|
||||
addField(doc, groupField, "b", canUseDV);
|
||||
addField(doc, "airport", "ams", canUseDV);
|
||||
addField(doc, "duration", "5", canUseDV);
|
||||
addField(doc, groupField, "b", useDv);
|
||||
addField(doc, "airport", "ams", useDv);
|
||||
addField(doc, "duration", "5", useDv);
|
||||
w.addDocument(doc);
|
||||
|
||||
IndexSearcher indexSearcher = new IndexSearcher(w.getReader());
|
||||
|
@ -119,29 +119,29 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase {
|
|||
|
||||
// 5
|
||||
doc = new Document();
|
||||
addField(doc, groupField, "b", canUseDV);
|
||||
addField(doc, "duration", "5", canUseDV);
|
||||
addField(doc, groupField, "b", useDv);
|
||||
addField(doc, "duration", "5", useDv);
|
||||
w.addDocument(doc);
|
||||
|
||||
// 6
|
||||
doc = new Document();
|
||||
addField(doc, groupField, "b", canUseDV);
|
||||
addField(doc, "airport", "bru", canUseDV);
|
||||
addField(doc, "duration", "10", canUseDV);
|
||||
addField(doc, groupField, "b", useDv);
|
||||
addField(doc, "airport", "bru", useDv);
|
||||
addField(doc, "duration", "10", useDv);
|
||||
w.addDocument(doc);
|
||||
|
||||
// 7
|
||||
doc = new Document();
|
||||
addField(doc, groupField, "b", canUseDV);
|
||||
addField(doc, "airport", "bru", canUseDV);
|
||||
addField(doc, "duration", "15", canUseDV);
|
||||
addField(doc, groupField, "b", useDv);
|
||||
addField(doc, "airport", "bru", useDv);
|
||||
addField(doc, "duration", "15", useDv);
|
||||
w.addDocument(doc);
|
||||
|
||||
// 8
|
||||
doc = new Document();
|
||||
addField(doc, groupField, "a", canUseDV);
|
||||
addField(doc, "airport", "bru", canUseDV);
|
||||
addField(doc, "duration", "10", canUseDV);
|
||||
addField(doc, groupField, "a", useDv);
|
||||
addField(doc, "airport", "bru", useDv);
|
||||
addField(doc, "duration", "10", useDv);
|
||||
w.addDocument(doc);
|
||||
|
||||
indexSearcher.getIndexReader().close();
|
||||
|
@ -172,16 +172,16 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase {
|
|||
|
||||
// 9
|
||||
doc = new Document();
|
||||
addField(doc, groupField, "c", canUseDV);
|
||||
addField(doc, "airport", "bru", canUseDV);
|
||||
addField(doc, "duration", "15", canUseDV);
|
||||
addField(doc, groupField, "c", useDv);
|
||||
addField(doc, "airport", "bru", useDv);
|
||||
addField(doc, "duration", "15", useDv);
|
||||
w.addDocument(doc);
|
||||
|
||||
// 10
|
||||
doc = new Document();
|
||||
addField(doc, groupField, "c", canUseDV);
|
||||
addField(doc, "airport", "dus", canUseDV);
|
||||
addField(doc, "duration", "10", canUseDV);
|
||||
addField(doc, groupField, "c", useDv);
|
||||
addField(doc, "airport", "dus", useDv);
|
||||
addField(doc, "duration", "10", useDv);
|
||||
w.addDocument(doc);
|
||||
|
||||
indexSearcher.getIndexReader().close();
|
||||
|
@ -344,7 +344,7 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase {
|
|||
}
|
||||
|
||||
GroupedFacetResult expectedFacetResult = createExpectedFacetResult(searchTerm, context, offset, limit, minCount, orderByCount, facetPrefix);
|
||||
AbstractGroupFacetCollector groupFacetCollector = createRandomCollector("group", "facet", facetPrefix, multipleFacetsPerDocument, useDv);
|
||||
AbstractGroupFacetCollector groupFacetCollector = createRandomCollector(useDv ? "group_dv" : "group", useDv ? "facet_dv" : "facet", facetPrefix, multipleFacetsPerDocument, useDv);
|
||||
searcher.search(new TermQuery(new Term("content", searchTerm)), groupFacetCollector);
|
||||
TermGroupFacetCollector.GroupedFacetResult actualFacetResult = groupFacetCollector.mergeSegmentResults(size, minCount, orderByCount);
|
||||
|
||||
|
@ -456,7 +456,7 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase {
|
|||
Document docNoFacet = new Document();
|
||||
Document docNoGroupNoFacet = new Document();
|
||||
Field group = newStringField("group", "", Field.Store.NO);
|
||||
Field groupDc = new SortedBytesDocValuesField("group", new BytesRef());
|
||||
Field groupDc = new SortedBytesDocValuesField("group_dv", new BytesRef());
|
||||
if (useDv) {
|
||||
doc.add(groupDc);
|
||||
docNoFacet.add(groupDc);
|
||||
|
@ -469,7 +469,7 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase {
|
|||
facetFields[0] = newStringField("facet", "", Field.Store.NO);
|
||||
doc.add(facetFields[0]);
|
||||
docNoGroup.add(facetFields[0]);
|
||||
facetFields[1] = new SortedBytesDocValuesField("facet", new BytesRef());
|
||||
facetFields[1] = new SortedBytesDocValuesField("facet_dv", new BytesRef());
|
||||
doc.add(facetFields[1]);
|
||||
docNoGroup.add(facetFields[1]);
|
||||
} else {
|
||||
|
|
|
@ -174,7 +174,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
private void addGroupField(Document doc, String groupField, String value, boolean canUseIDV) {
|
||||
doc.add(new TextField(groupField, value, Field.Store.YES));
|
||||
if (canUseIDV) {
|
||||
doc.add(new SortedBytesDocValuesField(groupField, new BytesRef(value)));
|
||||
doc.add(new SortedBytesDocValuesField(groupField + "_dv", new BytesRef(value)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -182,7 +182,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
AbstractFirstPassGroupingCollector<?> selected;
|
||||
if (canUseIDV && random().nextBoolean()) {
|
||||
boolean diskResident = random().nextBoolean();
|
||||
selected = DVFirstPassGroupingCollector.create(groupSort, topDocs, groupField, Type.BYTES_VAR_SORTED, diskResident);
|
||||
selected = DVFirstPassGroupingCollector.create(groupSort, topDocs, groupField + "_dv", Type.BYTES_VAR_SORTED, diskResident);
|
||||
} else if (random().nextBoolean()) {
|
||||
ValueSource vs = new BytesRefFieldSource(groupField);
|
||||
selected = new FunctionFirstPassGroupingCollector(vs, new HashMap<Object, Object>(), groupSort, topDocs);
|
||||
|
@ -198,7 +198,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
private AbstractFirstPassGroupingCollector<?> createFirstPassCollector(String groupField, Sort groupSort, int topDocs, AbstractFirstPassGroupingCollector<?> firstPassGroupingCollector) throws IOException {
|
||||
if (DVFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) {
|
||||
boolean diskResident = random().nextBoolean();
|
||||
return DVFirstPassGroupingCollector.create(groupSort, topDocs, groupField, Type.BYTES_VAR_SORTED, diskResident);
|
||||
return DVFirstPassGroupingCollector.create(groupSort, topDocs, groupField + "_dv", Type.BYTES_VAR_SORTED, diskResident);
|
||||
} else if (TermFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) {
|
||||
ValueSource vs = new BytesRefFieldSource(groupField);
|
||||
return new FunctionFirstPassGroupingCollector(vs, new HashMap<Object, Object>(), groupSort, topDocs);
|
||||
|
@ -221,7 +221,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
if (DVFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) {
|
||||
boolean diskResident = random().nextBoolean();
|
||||
Collection<SearchGroup<T>> searchGroups = firstPassGroupingCollector.getTopGroups(groupOffset, fillSortFields);
|
||||
return DVSecondPassGroupingCollector.create(groupField, diskResident, Type.BYTES_VAR_SORTED, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields);
|
||||
return DVSecondPassGroupingCollector.create(groupField + "_dv", diskResident, Type.BYTES_VAR_SORTED, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields);
|
||||
} else if (TermFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) {
|
||||
Collection<SearchGroup<BytesRef>> searchGroups = firstPassGroupingCollector.getTopGroups(groupOffset, fillSortFields);
|
||||
return (AbstractSecondPassGroupingCollector) new TermSecondPassGroupingCollector(groupField, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup , getScores, getMaxScores, fillSortFields);
|
||||
|
@ -245,7 +245,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
boolean fillSortFields) throws IOException {
|
||||
if (DVFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) {
|
||||
boolean diskResident = random().nextBoolean();
|
||||
return DVSecondPassGroupingCollector.create(groupField, diskResident, Type.BYTES_VAR_SORTED, (Collection) searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields);
|
||||
return DVSecondPassGroupingCollector.create(groupField + "_dv", diskResident, Type.BYTES_VAR_SORTED, (Collection) searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields);
|
||||
} else if (firstPassGroupingCollector.getClass().isAssignableFrom(TermFirstPassGroupingCollector.class)) {
|
||||
return new TermSecondPassGroupingCollector(groupField, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup , getScores, getMaxScores, fillSortFields);
|
||||
} else {
|
||||
|
@ -275,7 +275,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
return new TermAllGroupsCollector(groupField);
|
||||
} else if (firstPassGroupingCollector.getClass().isAssignableFrom(DVFirstPassGroupingCollector.class)) {
|
||||
boolean diskResident = random().nextBoolean();
|
||||
return DVAllGroupsCollector.create(groupField, Type.BYTES_VAR_SORTED, diskResident);
|
||||
return DVAllGroupsCollector.create(groupField + "_dv", Type.BYTES_VAR_SORTED, diskResident);
|
||||
} else {
|
||||
ValueSource vs = new BytesRefFieldSource(groupField);
|
||||
return new FunctionAllGroupsCollector(vs, new HashMap<Object, Object>());
|
||||
|
@ -696,7 +696,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
|
||||
Document doc = new Document();
|
||||
Document docNoGroup = new Document();
|
||||
Field idvGroupField = new SortedBytesDocValuesField("group", new BytesRef());
|
||||
Field idvGroupField = new SortedBytesDocValuesField("group_dv", new BytesRef());
|
||||
if (canUseIDV) {
|
||||
doc.add(idvGroupField);
|
||||
}
|
||||
|
@ -765,6 +765,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
|
||||
try {
|
||||
final IndexSearcher s = newSearcher(r);
|
||||
|
||||
if (SlowCompositeReaderWrapper.class.isAssignableFrom(s.getIndexReader().getClass())) {
|
||||
canUseIDV = false;
|
||||
} else {
|
||||
|
@ -1162,7 +1163,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
// TODO: swap in caching, all groups collector hereassertEquals(expected.totalHitCount, actual.totalHitCount);
|
||||
// too...
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: " + subSearchers.length + " shards: " + Arrays.toString(subSearchers));
|
||||
System.out.println("TEST: " + subSearchers.length + " shards: " + Arrays.toString(subSearchers) + " canUseIDV=" + canUseIDV);
|
||||
}
|
||||
// Run 1st pass collector to get top groups per shard
|
||||
final Weight w = topSearcher.createNormalizedWeight(query);
|
||||
|
@ -1184,6 +1185,10 @@ public class TestGrouping extends LuceneTestCase {
|
|||
} else {
|
||||
firstPassCollector = createFirstPassCollector("group", groupSort, groupOffset + topNGroups, firstPassCollector);
|
||||
}
|
||||
if (VERBOSE) {
|
||||
System.out.println(" shard=" + shardIDX);
|
||||
System.out.println(" 1st pass collector=" + firstPassCollector);
|
||||
}
|
||||
firstPassGroupingCollectors.add(firstPassCollector);
|
||||
subSearchers[shardIDX].search(w, firstPassCollector);
|
||||
final Collection<SearchGroup<BytesRef>> topGroups = getSearchGroups(firstPassCollector, 0, true);
|
||||
|
|
|
@ -59,14 +59,14 @@ public abstract class DocTermsIndexDocValues extends FunctionValues {
|
|||
|
||||
@Override
|
||||
public boolean exists(int doc) {
|
||||
return termsIndex.getOrd(doc) != 0;
|
||||
return termsIndex.getOrd(doc) != -1;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean bytesVal(int doc, BytesRef target) {
|
||||
int ord=termsIndex.getOrd(doc);
|
||||
if (ord==0) {
|
||||
if (ord==-1) {
|
||||
target.length = 0;
|
||||
return false;
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ public abstract class DocTermsIndexDocValues extends FunctionValues {
|
|||
@Override
|
||||
public String strVal(int doc) {
|
||||
int ord=termsIndex.getOrd(doc);
|
||||
if (ord==0) return null;
|
||||
if (ord==-1) return null;
|
||||
termsIndex.lookup(ord, spare);
|
||||
UnicodeUtil.UTF8toUTF16(spare, spareChars);
|
||||
return spareChars.toString();
|
||||
|
@ -149,8 +149,12 @@ public abstract class DocTermsIndexDocValues extends FunctionValues {
|
|||
@Override
|
||||
public void fillValue(int doc) {
|
||||
int ord = termsIndex.getOrd(doc);
|
||||
mval.exists = ord != 0;
|
||||
mval.value = termsIndex.lookup(ord, mval.value);
|
||||
mval.exists = ord != -1;
|
||||
if (!mval.exists) {
|
||||
mval.value.length = 0;
|
||||
} else {
|
||||
mval.value = termsIndex.lookup(ord, mval.value);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue