fixed several merge problems

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/docvalues@1131275 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Simon Willnauer 2011-06-03 22:40:42 +00:00
commit bfc324a4db
44 changed files with 2191 additions and 1457 deletions

View File

@ -28,14 +28,15 @@ use strict;
use warnings;
use Getopt::Long;
use LWP::Simple;
require LWP::Parallel::UserAgent;
my $version;
my $interval = 300;
my $quiet = 0;
my $result = GetOptions ("version=s" => \$version, "interval=i" => \$interval, "quiet" => \$quiet);
my $result = GetOptions ("version=s" => \$version, "interval=i" => \$interval);
my $usage = "$0 -v version [ -i interval (seconds; default: 300)] [ -quiet ]";
my $usage = "$0 -v version [ -i interval (seconds; default: 300) ]";
unless ($result) {
print STDERR $usage;
@ -47,26 +48,71 @@ unless (defined($version) && $version =~ /\d+(?:\.\d+)+/) {
}
my $previously_selected = select STDOUT;
$| = 1; # turn off buffering of STDOUT, so "."s are printed immediately
$| = 1; # turn off buffering of STDOUT, so status is printed immediately
select $previously_selected;
my $apache_backup_url = "http://www.apache.org/dist//lucene/java/$version/lucene-$version.tgz.asc";
my $maven_url = "http://repo2.maven.org/maven2/org/apache/lucene/lucene-core/$version/lucene-core-$version.pom";
my $apache_url_suffix = "lucene/java/$version/lucene-$version.tgz.asc";
my $apache_mirrors_list_url = "http://www.apache.org/mirrors/";
my $maven_url = "http://repo2.maven.org/maven2/org/apache/lucene/lucene-core/$version/lucene-core-$version.pom.asc";
my $apache_available = 0;
my $maven_available = 0;
until ($apache_available && $maven_available) {
unless ($apache_available) {
my $content = get($apache_backup_url);
$apache_available = defined($content);
print "\nDownloadable: $apache_backup_url\n" if ($apache_available);
my @apache_mirrors = ();
my $apache_mirrors_list_page = get($apache_mirrors_list_url);
if (defined($apache_mirrors_list_page)) {
#<TR>
# <TD ALIGN=RIGHT><A HREF="http://apache.dattatec.com/">apache.dattatec.com</A>&nbsp;&nbsp;<A HREF="http://apache.dattatec.com/">@</A></TD>
#
# <TD>http</TD>
# <TD ALIGN=RIGHT>8 hours<BR><IMG BORDER=1 SRC="icons/mms14.gif" ALT=""></TD>
# <TD ALIGN=RIGHT>5 hours<BR><IMG BORDER=1 SRC="icons/mms14.gif" ALT=""></TD>
# <TD>ok</TD>
#</TR>
while ($apache_mirrors_list_page =~ m~<TR>(.*?)</TR>~gis) {
my $mirror_entry = $1;
next unless ($mirror_entry =~ m~<TD>\s*ok\s*</TD>\s*$~i); # skip mirrors with problems
if ($mirror_entry =~ m~<A\s+HREF\s*=\s*"([^"]+)"\s*>~i) {
my $mirror_url = $1;
push @apache_mirrors, "$mirror_url/$apache_url_suffix";
}
}
} else {
print STDERR "Error fetching Apache mirrors list $apache_mirrors_list_url";
exit(1);
}
my $num_apache_mirrors = $#apache_mirrors;
print "# Apache Mirrors: $num_apache_mirrors\n";
while (1) {
unless ($maven_available) {
my $content = get($maven_url);
$maven_available = defined($content);
print "\nDownloadable: $maven_url\n" if ($maven_available);
}
print "." unless ($quiet);
sleep($interval) unless ($apache_available && $maven_available);
@apache_mirrors = &check_mirrors;
my $num_downloadable_apache_mirrors
= $num_apache_mirrors - $#apache_mirrors;
print "Available: ";
print "Maven Central; " if ($maven_available);
printf "%d/%d Apache Mirrors (%0.1f%%)\n", $num_downloadable_apache_mirrors,
$num_apache_mirrors, ($num_downloadable_apache_mirrors*100/$num_apache_mirrors);
last if ($maven_available && $num_downloadable_apache_mirrors == $num_apache_mirrors);
sleep($interval);
}
sub check_mirrors {
my $agent = LWP::Parallel::UserAgent->new();
$agent->timeout(30);
$agent->redirect(1); # follow redirects
$agent->register($_) for (@apache_mirrors);
my $entries = $agent->wait();
my @not_yet_downloadable_apache_mirrors;
for my $entry (keys %$entries) {
my $response = $entries->{$entry}->response;
push @not_yet_downloadable_apache_mirrors, $response->request->uri
unless ($response->is_success);
}
return @not_yet_downloadable_apache_mirrors;
}

View File

@ -181,9 +181,9 @@ final class TermsHashPerField extends InvertedDocConsumerPerField {
// term text into textStart address
// Get the text & hash of this term.
int termID;
try{
termID = bytesHash.add(termBytesRef, termAtt.fillBytesRef());
}catch (MaxBytesLengthExceededException e) {
try {
termID = bytesHash.add(termBytesRef, termAtt.fillBytesRef());
} catch (MaxBytesLengthExceededException e) {
// Not enough room in current block
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter

View File

@ -1,134 +0,0 @@
package org.apache.lucene.search.grouping;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.util.BytesRef;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/**
* A collector that collects all groups that match the
* query. Only the group value is collected, and the order
* is undefined. This collector does not determine
* the most relevant document of a group.
*
* <p/>
* Implementation detail: an int hash set (SentinelIntSet)
* is used to detect if a group is already added to the
* total count. For each segment the int set is cleared and filled
* with previous counted groups that occur in the new
* segment.
*
* @lucene.experimental
*/
public class AllGroupsCollector extends Collector {
private static final int DEFAULT_INITIAL_SIZE = 128;
private final String groupField;
private final SentinelIntSet ordSet;
private final List<BytesRef> groups;
private final BytesRef spareBytesRef = new BytesRef();
private FieldCache.DocTermsIndex index;
/**
* Expert: Constructs a {@link AllGroupsCollector}
*
* @param groupField The field to group by
* @param initialSize The initial allocation size of the
* internal int set and group list
* which should roughly match the total
* number of expected unique groups. Be aware that the
* heap usage is 4 bytes * initialSize.
*/
public AllGroupsCollector(String groupField, int initialSize) {
this.groupField = groupField;
ordSet = new SentinelIntSet(initialSize, -1);
groups = new ArrayList<BytesRef>(initialSize);
}
/**
* Constructs a {@link AllGroupsCollector}. This sets the
* initial allocation size for the internal int set and group
* list to 128.
*
* @param groupField The field to group by
*/
public AllGroupsCollector(String groupField) {
this(groupField, DEFAULT_INITIAL_SIZE);
}
public void setScorer(Scorer scorer) throws IOException {
}
public void collect(int doc) throws IOException {
int key = index.getOrd(doc);
if (!ordSet.exists(key)) {
ordSet.put(key);
BytesRef term = key == 0 ? null : index.lookup(key, new BytesRef());
groups.add(term);
}
}
/**
* Returns the total number of groups for the executed search.
* This is a convenience method. The following code snippet has the same effect: <pre>getGroups().size()</pre>
*
* @return The total number of groups for the executed search
*/
public int getGroupCount() {
return groups.size();
}
/**
* Returns the group values
* <p/>
* This is an unordered collections of group values. For each group that matched the query there is a {@link BytesRef}
* representing a group value.
*
* @return the group values
*/
public Collection<BytesRef> getGroups() {
return groups;
}
public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
index = FieldCache.DEFAULT.getTermsIndex(context.reader, groupField);
// Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
ordSet.clear();
for (BytesRef countedGroup : groups) {
int ord = index.binarySearchLookup(countedGroup, spareBytesRef);
if (ord >= 0) {
ordSet.put(ord);
}
}
}
public boolean acceptsDocsOutOfOrder() {
return true;
}
}

View File

@ -212,7 +212,7 @@ public class BlockGroupingCollector extends Collector {
// Swap pending scores
final float[] savScores = og.scores;
og.scores = pendingSubScores;
pendingSubScores = og.scores;
pendingSubScores = savScores;
}
og.readerContext = currentReaderContext;
//og.groupOrd = lastGroupOrd;

View File

@ -1,367 +0,0 @@
package org.apache.lucene.search.grouping;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
import java.util.HashMap;
import java.util.TreeSet;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.FieldComparator;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.util.BytesRef;
/** FirstPassGroupingCollector is the first of two passes necessary
* to collect grouped hits. This pass gathers the top N sorted
* groups.
*
* <p>See {@link org.apache.lucene.search.grouping} for more
* details including a full code example.</p>
*
* @lucene.experimental
*/
public class FirstPassGroupingCollector extends Collector {
private final String groupField;
private final Sort groupSort;
private final FieldComparator[] comparators;
private final int[] reversed;
private final int topNGroups;
private final HashMap<BytesRef, CollectedSearchGroup> groupMap;
private final BytesRef scratchBytesRef = new BytesRef();
private final int compIDXEnd;
// Set once we reach topNGroups unique groups:
private TreeSet<CollectedSearchGroup> orderedGroups;
private int docBase;
private int spareSlot;
private FieldCache.DocTermsIndex index;
/**
* Create the first pass collector.
*
* @param groupField The field used to group
* documents. This field must be single-valued and
* indexed (FieldCache is used to access its value
* per-document).
* @param groupSort The {@link Sort} used to sort the
* groups. The top sorted document within each group
* according to groupSort, determines how that group
* sorts against other groups. This must be non-null,
* ie, if you want to groupSort by relevance use
* Sort.RELEVANCE.
* @param topNGroups How many top groups to keep.
*/
public FirstPassGroupingCollector(String groupField, Sort groupSort, int topNGroups) throws IOException {
if (topNGroups < 1) {
throw new IllegalArgumentException("topNGroups must be >= 1 (got " + topNGroups + ")");
}
this.groupField = groupField;
// TODO: allow null groupSort to mean "by relevance",
// and specialize it?
this.groupSort = groupSort;
this.topNGroups = topNGroups;
final SortField[] sortFields = groupSort.getSort();
comparators = new FieldComparator[sortFields.length];
compIDXEnd = comparators.length - 1;
reversed = new int[sortFields.length];
for (int i = 0; i < sortFields.length; i++) {
final SortField sortField = sortFields[i];
// use topNGroups + 1 so we have a spare slot to use for comparing (tracked by this.spareSlot):
comparators[i] = sortField.getComparator(topNGroups + 1, i);
reversed[i] = sortField.getReverse() ? -1 : 1;
}
spareSlot = topNGroups;
groupMap = new HashMap<BytesRef, CollectedSearchGroup>(topNGroups);
}
/** Returns top groups, starting from offset. This may
* return null, if no groups were collected, or if the
* number of unique groups collected is <= offset. */
public Collection<SearchGroup> getTopGroups(int groupOffset, boolean fillFields) {
//System.out.println("FP.getTopGroups groupOffset=" + groupOffset + " fillFields=" + fillFields + " groupMap.size()=" + groupMap.size());
if (groupOffset < 0) {
throw new IllegalArgumentException("groupOffset must be >= 0 (got " + groupOffset + ")");
}
if (groupMap.size() <= groupOffset) {
return null;
}
if (orderedGroups == null) {
buildSortedSet();
}
final Collection<SearchGroup> result = new ArrayList<SearchGroup>();
int upto = 0;
final int sortFieldCount = groupSort.getSort().length;
for(CollectedSearchGroup group : orderedGroups) {
if (upto++ < groupOffset) {
continue;
}
//System.out.println(" group=" + (group.groupValue == null ? "null" : group.groupValue.utf8ToString()));
SearchGroup searchGroup = new SearchGroup();
searchGroup.groupValue = group.groupValue;
if (fillFields) {
searchGroup.sortValues = new Comparable[sortFieldCount];
for(int sortFieldIDX=0;sortFieldIDX<sortFieldCount;sortFieldIDX++) {
searchGroup.sortValues[sortFieldIDX] = comparators[sortFieldIDX].value(group.comparatorSlot);
}
}
result.add(searchGroup);
}
//System.out.println(" return " + result.size() + " groups");
return result;
}
public String getGroupField() {
return groupField;
}
@Override
public void setScorer(Scorer scorer) throws IOException {
for (FieldComparator comparator : comparators) {
comparator.setScorer(scorer);
}
}
@Override
public void collect(int doc) throws IOException {
//System.out.println("FP.collect doc=" + doc);
// If orderedGroups != null we already have collected N groups and
// can short circuit by comparing this document to the bottom group,
// without having to find what group this document belongs to.
// Even if this document belongs to a group in the top N, we'll know that
// we don't have to update that group.
// Downside: if the number of unique groups is very low, this is
// wasted effort as we will most likely be updating an existing group.
if (orderedGroups != null) {
for (int compIDX = 0;; compIDX++) {
final int c = reversed[compIDX] * comparators[compIDX].compareBottom(doc);
if (c < 0) {
// Definitely not competitive. So don't even bother to continue
return;
} else if (c > 0) {
// Definitely competitive.
break;
} else if (compIDX == compIDXEnd) {
// Here c=0. If we're at the last comparator, this doc is not
// competitive, since docs are visited in doc Id order, which means
// this doc cannot compete with any other document in the queue.
return;
}
}
}
// TODO: should we add option to mean "ignore docs that
// don't have the group field" (instead of stuffing them
// under null group)?
final int ord = index.getOrd(doc);
//System.out.println(" ord=" + ord);
final BytesRef br = ord == 0 ? null : index.lookup(ord, scratchBytesRef);
//System.out.println(" group=" + (br == null ? "null" : br.utf8ToString()));
final CollectedSearchGroup group = groupMap.get(br);
if (group == null) {
// First time we are seeing this group, or, we've seen
// it before but it fell out of the top N and is now
// coming back
if (groupMap.size() < topNGroups) {
// Still in startup transient: we have not
// seen enough unique groups to start pruning them;
// just keep collecting them
// Add a new CollectedSearchGroup:
CollectedSearchGroup sg = new CollectedSearchGroup();
sg.groupValue = ord == 0 ? null : new BytesRef(scratchBytesRef);
sg.comparatorSlot = groupMap.size();
sg.topDoc = docBase + doc;
for (FieldComparator fc : comparators) {
fc.copy(sg.comparatorSlot, doc);
}
groupMap.put(sg.groupValue, sg);
if (groupMap.size() == topNGroups) {
// End of startup transient: we now have max
// number of groups; from here on we will drop
// bottom group when we insert new one:
buildSortedSet();
}
return;
}
// We already tested that the document is competitive, so replace
// the bottom group with this new group.
// java 6-only: final CollectedSearchGroup bottomGroup = orderedGroups.pollLast();
final CollectedSearchGroup bottomGroup = orderedGroups.last();
orderedGroups.remove(bottomGroup);
assert orderedGroups.size() == topNGroups -1;
groupMap.remove(bottomGroup.groupValue);
// reuse the removed CollectedSearchGroup
if (br == null) {
bottomGroup.groupValue = null;
} else if (bottomGroup.groupValue != null) {
bottomGroup.groupValue.copy(br);
} else {
bottomGroup.groupValue = new BytesRef(br);
}
bottomGroup.topDoc = docBase + doc;
for (FieldComparator fc : comparators) {
fc.copy(bottomGroup.comparatorSlot, doc);
}
groupMap.put(bottomGroup.groupValue, bottomGroup);
orderedGroups.add(bottomGroup);
assert orderedGroups.size() == topNGroups;
final int lastComparatorSlot = orderedGroups.last().comparatorSlot;
for (FieldComparator fc : comparators) {
fc.setBottom(lastComparatorSlot);
}
return;
}
// Update existing group:
for (int compIDX = 0;; compIDX++) {
final FieldComparator fc = comparators[compIDX];
fc.copy(spareSlot, doc);
final int c = reversed[compIDX] * fc.compare(group.comparatorSlot, spareSlot);
if (c < 0) {
// Definitely not competitive.
return;
} else if (c > 0) {
// Definitely competitive; set remaining comparators:
for (int compIDX2=compIDX+1; compIDX2<comparators.length; compIDX2++) {
comparators[compIDX2].copy(spareSlot, doc);
}
break;
} else if (compIDX == compIDXEnd) {
// Here c=0. If we're at the last comparator, this doc is not
// competitive, since docs are visited in doc Id order, which means
// this doc cannot compete with any other document in the queue.
return;
}
}
// Remove before updating the group since lookup is done via comparators
// TODO: optimize this
final CollectedSearchGroup prevLast;
if (orderedGroups != null) {
prevLast = orderedGroups.last();
orderedGroups.remove(group);
assert orderedGroups.size() == topNGroups-1;
} else {
prevLast = null;
}
group.topDoc = docBase + doc;
// Swap slots
final int tmp = spareSlot;
spareSlot = group.comparatorSlot;
group.comparatorSlot = tmp;
// Re-add the changed group
if (orderedGroups != null) {
orderedGroups.add(group);
assert orderedGroups.size() == topNGroups;
final CollectedSearchGroup newLast = orderedGroups.last();
// If we changed the value of the last group, or changed which group was last, then update bottom:
if (group == newLast || prevLast != newLast) {
for (FieldComparator fc : comparators) {
fc.setBottom(newLast.comparatorSlot);
}
}
}
}
private void buildSortedSet() {
final Comparator<CollectedSearchGroup> comparator = new Comparator<CollectedSearchGroup>() {
public int compare(CollectedSearchGroup o1, CollectedSearchGroup o2) {
for (int compIDX = 0;; compIDX++) {
FieldComparator fc = comparators[compIDX];
final int c = reversed[compIDX] * fc.compare(o1.comparatorSlot, o2.comparatorSlot);
if (c != 0) {
return c;
} else if (compIDX == compIDXEnd) {
return o1.topDoc - o2.topDoc;
}
}
}
};
orderedGroups = new TreeSet<CollectedSearchGroup>(comparator);
orderedGroups.addAll(groupMap.values());
assert orderedGroups.size() > 0;
for (FieldComparator fc : comparators) {
fc.setBottom(orderedGroups.last().comparatorSlot);
}
}
@Override
public boolean acceptsDocsOutOfOrder() {
return false;
}
@Override
public void setNextReader(AtomicReaderContext readerContext) throws IOException {
docBase = readerContext.docBase;
index = FieldCache.DEFAULT.getTermsIndex(readerContext.reader, groupField);
for (int i=0; i<comparators.length; i++) {
comparators[i] = comparators[i].setNextReader(readerContext);
}
}
}
class CollectedSearchGroup extends SearchGroup {
int topDoc;
int comparatorSlot;
}

View File

@ -1,172 +0,0 @@
package org.apache.lucene.search.grouping;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopDocsCollector;
import org.apache.lucene.search.TopFieldCollector;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.util.BytesRef;
/**
* SecondPassGroupingCollector is the second of two passes
* necessary to collect grouped docs. This pass gathers the
* top N documents per top group computed from the
* first pass.
*
* <p>See {@link org.apache.lucene.search.grouping} for more
* details including a full code example.</p>
*
* @lucene.experimental
*/
public class SecondPassGroupingCollector extends Collector {
private final HashMap<BytesRef, SearchGroupDocs> groupMap;
private FieldCache.DocTermsIndex index;
private final String groupField;
private final int maxDocsPerGroup;
private final SentinelIntSet ordSet;
private final SearchGroupDocs[] groupDocs;
private final BytesRef spareBytesRef = new BytesRef();
private final Collection<SearchGroup> groups;
private final Sort withinGroupSort;
private final Sort groupSort;
private int totalHitCount;
private int totalGroupedHitCount;
public SecondPassGroupingCollector(String groupField, Collection<SearchGroup> groups, Sort groupSort, Sort withinGroupSort,
int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields)
throws IOException {
//System.out.println("SP init");
if (groups.size() == 0) {
throw new IllegalArgumentException("no groups to collect (groups.size() is 0)");
}
this.groupSort = groupSort;
this.withinGroupSort = withinGroupSort;
this.groups = groups;
this.groupField = groupField;
this.maxDocsPerGroup = maxDocsPerGroup;
groupMap = new HashMap<BytesRef, SearchGroupDocs>(groups.size());
for (SearchGroup group : groups) {
//System.out.println(" prep group=" + (group.groupValue == null ? "null" : group.groupValue.utf8ToString()));
final TopDocsCollector collector;
if (withinGroupSort == null) {
// Sort by score
collector = TopScoreDocCollector.create(maxDocsPerGroup, true);
} else {
// Sort by fields
collector = TopFieldCollector.create(withinGroupSort, maxDocsPerGroup, fillSortFields, getScores, getMaxScores, true);
}
groupMap.put(group.groupValue,
new SearchGroupDocs(group.groupValue,
collector));
}
ordSet = new SentinelIntSet(groupMap.size(), -1);
groupDocs = new SearchGroupDocs[ordSet.keys.length];
}
@Override
public void setScorer(Scorer scorer) throws IOException {
for (SearchGroupDocs group : groupMap.values()) {
group.collector.setScorer(scorer);
}
}
@Override
public void collect(int doc) throws IOException {
final int slot = ordSet.find(index.getOrd(doc));
//System.out.println("SP.collect doc=" + doc + " slot=" + slot);
totalHitCount++;
if (slot >= 0) {
totalGroupedHitCount++;
groupDocs[slot].collector.collect(doc);
}
}
@Override
public void setNextReader(AtomicReaderContext readerContext) throws IOException {
//System.out.println("SP.setNextReader");
for (SearchGroupDocs group : groupMap.values()) {
group.collector.setNextReader(readerContext);
}
index = FieldCache.DEFAULT.getTermsIndex(readerContext.reader, groupField);
// Rebuild ordSet
ordSet.clear();
for (SearchGroupDocs group : groupMap.values()) {
//System.out.println(" group=" + (group.groupValue == null ? "null" : group.groupValue.utf8ToString()));
int ord = group.groupValue == null ? 0 : index.binarySearchLookup(group.groupValue, spareBytesRef);
if (ord >= 0) {
groupDocs[ordSet.put(ord)] = group;
}
}
}
@Override
public boolean acceptsDocsOutOfOrder() {
return false;
}
public TopGroups getTopGroups(int withinGroupOffset) {
final GroupDocs[] groupDocsResult = new GroupDocs[groups.size()];
int groupIDX = 0;
for(SearchGroup group : groups) {
final SearchGroupDocs groupDocs = groupMap.get(group.groupValue);
final TopDocs topDocs = groupDocs.collector.topDocs(withinGroupOffset, maxDocsPerGroup);
groupDocsResult[groupIDX++] = new GroupDocs(topDocs.getMaxScore(),
topDocs.totalHits,
topDocs.scoreDocs,
groupDocs.groupValue,
group.sortValues);
}
return new TopGroups(groupSort.getSort(),
withinGroupSort == null ? null : withinGroupSort.getSort(),
totalHitCount, totalGroupedHitCount, groupDocsResult);
}
}
// TODO: merge with SearchGroup or not?
// ad: don't need to build a new hashmap
// disad: blows up the size of SearchGroup if we need many of them, and couples implementations
class SearchGroupDocs {
public final BytesRef groupValue;
public final TopDocsCollector collector;
public SearchGroupDocs(BytesRef groupValue, TopDocsCollector collector) {
this.groupValue = groupValue;
this.collector = collector;
}
}

View File

@ -26,7 +26,7 @@ import java.io.IOException;
/**
* Concrete implementation of {@link AbstractFirstPassGroupingCollector} that groups based on
* field values and more specifically uses {@link org.apache.lucene.search.FieldCache.DocTerms}
* field values and more specifically uses {@link org.apache.lucene.search.FieldCache.DocTermsIndex}
* to collect groups.
*
* @lucene.experimental

View File

@ -27,7 +27,7 @@ import java.util.Collection;
/**
* Concrete implementation of {@link AbstractSecondPassGroupingCollector} that groups based on
* field values and more specifically uses {@link org.apache.lucene.search.FieldCache.DocTerms}
* field values and more specifically uses {@link org.apache.lucene.search.FieldCache.DocTermsIndex}
* to collect grouped docs.
*
* @lucene.experimental

View File

@ -27,7 +27,7 @@ import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
public class AllGroupsCollectorTest extends LuceneTestCase {
public class TermAllGroupsCollectorTest extends LuceneTestCase {
public void testTotalGroupCount() throws Exception {

View File

@ -154,7 +154,10 @@ public class TestGrouping extends LuceneTestCase {
final BytesRef group;
final BytesRef sort1;
final BytesRef sort2;
// content must be "realN ..."
final String content;
float score;
float score2;
public GroupDoc(int id, BytesRef group, BytesRef sort1, BytesRef sort2, String content) {
this.id = id;
@ -167,16 +170,21 @@ public class TestGrouping extends LuceneTestCase {
private Sort getRandomSort() {
final List<SortField> sortFields = new ArrayList<SortField>();
if (random.nextBoolean()) {
if (random.nextInt(7) == 2) {
sortFields.add(SortField.FIELD_SCORE);
} else {
if (random.nextBoolean()) {
if (random.nextBoolean()) {
sortFields.add(new SortField("sort1", SortField.STRING, random.nextBoolean()));
} else {
sortFields.add(new SortField("sort2", SortField.STRING, random.nextBoolean()));
}
} else if (random.nextBoolean()) {
sortFields.add(new SortField("sort1", SortField.STRING, random.nextBoolean()));
} else {
sortFields.add(new SortField("sort2", SortField.STRING, random.nextBoolean()));
}
} else if (random.nextBoolean()) {
sortFields.add(new SortField("sort1", SortField.STRING, random.nextBoolean()));
sortFields.add(new SortField("sort2", SortField.STRING, random.nextBoolean()));
}
// Break ties:
sortFields.add(new SortField("id", SortField.INT));
return new Sort(sortFields.toArray(new SortField[sortFields.size()]));
}
@ -188,7 +196,15 @@ public class TestGrouping extends LuceneTestCase {
public int compare(GroupDoc d1, GroupDoc d2) {
for(SortField sf : sortFields) {
final int cmp;
if (sf.getField().equals("sort1")) {
if (sf.getType() == SortField.SCORE) {
if (d1.score > d2.score) {
cmp = -1;
} else if (d1.score < d2.score) {
cmp = 1;
} else {
cmp = 0;
}
} else if (sf.getField().equals("sort1")) {
cmp = d1.sort1.compareTo(d2.sort1);
} else if (sf.getField().equals("sort2")) {
cmp = d1.sort2.compareTo(d2.sort2);
@ -213,7 +229,9 @@ public class TestGrouping extends LuceneTestCase {
for(int fieldIDX=0;fieldIDX<sortFields.length;fieldIDX++) {
final Comparable<?> c;
final SortField sf = sortFields[fieldIDX];
if (sf.getField().equals("sort1")) {
if (sf.getType() == SortField.SCORE) {
c = new Float(d.score);
} else if (sf.getField().equals("sort1")) {
c = d.sort1;
} else if (sf.getField().equals("sort2")) {
c = d.sort2;
@ -237,17 +255,17 @@ public class TestGrouping extends LuceneTestCase {
*/
private TopGroups<BytesRef> slowGrouping(GroupDoc[] groupDocs,
String searchTerm,
boolean fillFields,
boolean getScores,
boolean getMaxScores,
boolean doAllGroups,
Sort groupSort,
Sort docSort,
int topNGroups,
int docsPerGroup,
int groupOffset,
int docOffset) {
String searchTerm,
boolean fillFields,
boolean getScores,
boolean getMaxScores,
boolean doAllGroups,
Sort groupSort,
Sort docSort,
int topNGroups,
int docsPerGroup,
int groupOffset,
int docOffset) {
final Comparator<GroupDoc> groupSortComp = getComparator(groupSort);
@ -262,11 +280,11 @@ public class TestGrouping extends LuceneTestCase {
//System.out.println("TEST: slowGrouping");
for(GroupDoc d : groupDocs) {
// TODO: would be better to filter by searchTerm before sorting!
if (!d.content.equals(searchTerm)) {
if (!d.content.startsWith(searchTerm)) {
continue;
}
totalHitCount++;
//System.out.println(" match id=" + d.id);
//System.out.println(" match id=" + d.id + " score=" + d.score);
if (doAllGroups) {
if (!knownGroups.contains(d.group)) {
@ -312,9 +330,9 @@ public class TestGrouping extends LuceneTestCase {
final GroupDoc d = docs.get(docIDX);
final FieldDoc fd;
if (fillFields) {
fd = new FieldDoc(d.id, 0.0f, fillFields(d, docSort));
fd = new FieldDoc(d.id, getScores ? d.score : Float.NaN, fillFields(d, docSort));
} else {
fd = new FieldDoc(d.id, 0.0f);
fd = new FieldDoc(d.id, getScores ? d.score : Float.NaN);
}
hits[docIDX-docOffset] = fd;
}
@ -373,7 +391,7 @@ public class TestGrouping extends LuceneTestCase {
doc.add(newField("sort1", groupValue.sort1.utf8ToString(), Field.Index.NOT_ANALYZED));
doc.add(newField("sort2", groupValue.sort2.utf8ToString(), Field.Index.NOT_ANALYZED));
doc.add(new NumericField("id").setIntValue(groupValue.id));
doc.add(newField("content", groupValue.content, Field.Index.NOT_ANALYZED));
doc.add(newField("content", groupValue.content, Field.Index.ANALYZED));
//System.out.println("TEST: doc content=" + groupValue.content + " group=" + (groupValue.group == null ? "null" : groupValue.group.utf8ToString()) + " sort1=" + groupValue.sort1.utf8ToString() + " id=" + groupValue.id);
}
// So we can pull filter marking last doc in block:
@ -421,7 +439,22 @@ public class TestGrouping extends LuceneTestCase {
groups.add(new BytesRef(_TestUtil.randomRealisticUnicodeString(random)));
//groups.add(new BytesRef(_TestUtil.randomSimpleString(random)));
}
final String[] contentStrings = new String[] {"a", "b", "c", "d"};
final String[] contentStrings = new String[_TestUtil.nextInt(random, 2, 20)];
if (VERBOSE) {
System.out.println("TEST: create fake content");
}
for(int contentIDX=0;contentIDX<contentStrings.length;contentIDX++) {
final StringBuilder sb = new StringBuilder();
sb.append("real" + random.nextInt(3)).append(' ');
final int fakeCount = random.nextInt(10);
for(int fakeIDX=0;fakeIDX<fakeCount;fakeIDX++) {
sb.append("fake ");
}
contentStrings[contentIDX] = sb.toString();
if (VERBOSE) {
System.out.println(" content=" + sb.toString());
}
}
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(
@ -440,7 +473,7 @@ public class TestGrouping extends LuceneTestCase {
Field sort2 = newField("sort2", "", Field.Index.NOT_ANALYZED);
doc.add(sort2);
docNoGroup.add(sort2);
Field content = newField("content", "", Field.Index.NOT_ANALYZED);
Field content = newField("content", "", Field.Index.ANALYZED);
doc.add(content);
docNoGroup.add(content);
NumericField id = new NumericField("id");
@ -480,40 +513,96 @@ public class TestGrouping extends LuceneTestCase {
}
}
final GroupDoc[] groupDocsByID = new GroupDoc[groupDocs.length];
System.arraycopy(groupDocs, 0, groupDocsByID, 0, groupDocs.length);
final IndexReader r = w.getReader();
w.close();
// Build 2nd index, where docs are added in blocks by
// group, so we can use single pass collector
final Directory dir2 = newDirectory();
final IndexReader r2 = getDocBlockReader(dir2, groupDocs);
final Filter lastDocInBlock = new CachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("groupend", "x"))));
final IndexSearcher s = new IndexSearcher(r);
final IndexSearcher s2 = new IndexSearcher(r2);
// NOTE: intentional but temporary field cache insanity!
final int[] docIDToID = FieldCache.DEFAULT.getInts(r, "id");
final int[] docIDToID2 = FieldCache.DEFAULT.getInts(r2, "id");
IndexReader r2 = null;
Directory dir2 = null;
try {
final IndexSearcher s = new IndexSearcher(r);
for(int contentID=0;contentID<3;contentID++) {
final ScoreDoc[] hits = s.search(new TermQuery(new Term("content", "real"+contentID)), numDocs).scoreDocs;
for(ScoreDoc hit : hits) {
final GroupDoc gd = groupDocs[docIDToID[hit.doc]];
assertTrue(gd.score == 0.0);
gd.score = hit.score;
assertEquals(gd.id, docIDToID[hit.doc]);
//System.out.println(" score=" + hit.score + " id=" + docIDToID[hit.doc]);
}
}
for(GroupDoc gd : groupDocs) {
assertTrue(gd.score != 0.0);
}
// Build 2nd index, where docs are added in blocks by
// group, so we can use single pass collector
dir2 = newDirectory();
r2 = getDocBlockReader(dir2, groupDocs);
final Filter lastDocInBlock = new CachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("groupend", "x"))));
final int[] docIDToID2 = FieldCache.DEFAULT.getInts(r2, "id");
final IndexSearcher s2 = new IndexSearcher(r2);
// Reader2 only increases maxDoc() vs reader, which
// means a monotonic shift in scores, so we can
// reliably remap them w/ Map:
final Map<Float,Float> scoreMap = new HashMap<Float,Float>();
// Tricky: must separately set .score2, because the doc
// block index was created with possible deletions!
for(int contentID=0;contentID<3;contentID++) {
//System.out.println("term=real" + contentID + " dfold=" + s.docFreq(new Term("content", "real"+contentID)) +
//" dfnew=" + s2.docFreq(new Term("content", "real"+contentID)));
final ScoreDoc[] hits = s2.search(new TermQuery(new Term("content", "real"+contentID)), numDocs).scoreDocs;
for(ScoreDoc hit : hits) {
final GroupDoc gd = groupDocsByID[docIDToID2[hit.doc]];
assertTrue(gd.score2 == 0.0);
gd.score2 = hit.score;
assertEquals(gd.id, docIDToID2[hit.doc]);
//System.out.println(" score=" + hit.score + " id=" + docIDToID2[hit.doc]);
scoreMap.put(gd.score, gd.score2);
}
}
for(int searchIter=0;searchIter<100;searchIter++) {
if (VERBOSE) {
System.out.println("TEST: searchIter=" + searchIter);
}
final String searchTerm = contentStrings[random.nextInt(contentStrings.length)];
final String searchTerm = "real" + random.nextInt(3);
final boolean fillFields = random.nextBoolean();
final boolean getScores = random.nextBoolean();
boolean getScores = random.nextBoolean();
final boolean getMaxScores = random.nextBoolean();
final Sort groupSort = getRandomSort();
//final Sort groupSort = new Sort(new SortField[] {new SortField("sort1", SortField.STRING), new SortField("id", SortField.INT)});
// TODO: also test null (= sort by relevance)
final Sort docSort = getRandomSort();
for(SortField sf : docSort.getSort()) {
if (sf.getType() == SortField.SCORE) {
getScores = true;
}
}
for(SortField sf : groupSort.getSort()) {
if (sf.getType() == SortField.SCORE) {
getScores = true;
}
}
final int topNGroups = _TestUtil.nextInt(random, 1, 30);
//final int topNGroups = 4;
final int docsPerGroup = _TestUtil.nextInt(random, 1, 50);
final int groupOffset = _TestUtil.nextInt(random, 0, (topNGroups-1)/2);
//final int groupOffset = 0;
@ -523,7 +612,7 @@ public class TestGrouping extends LuceneTestCase {
final boolean doCache = random.nextBoolean();
final boolean doAllGroups = random.nextBoolean();
if (VERBOSE) {
System.out.println("TEST: groupSort=" + groupSort + " docSort=" + docSort + " searchTerm=" + searchTerm + " topNGroups=" + topNGroups + " groupOffset=" + groupOffset + " docOffset=" + docOffset + " doCache=" + doCache + " docsPerGroup=" + docsPerGroup + " doAllGroups=" + doAllGroups);
System.out.println("TEST: groupSort=" + groupSort + " docSort=" + docSort + " searchTerm=" + searchTerm + " topNGroups=" + topNGroups + " groupOffset=" + groupOffset + " docOffset=" + docOffset + " doCache=" + doCache + " docsPerGroup=" + docsPerGroup + " doAllGroups=" + doAllGroups + " getScores=" + getScores + " getMaxScores=" + getMaxScores);
}
final TermAllGroupsCollector allGroupsCollector;
@ -636,13 +725,12 @@ public class TestGrouping extends LuceneTestCase {
for(GroupDocs<BytesRef> gd : expectedGroups.groups) {
System.out.println(" group=" + (gd.groupValue == null ? "null" : gd.groupValue.utf8ToString()));
for(ScoreDoc sd : gd.scoreDocs) {
System.out.println(" id=" + sd.doc);
System.out.println(" id=" + sd.doc + " score=" + sd.score);
}
}
}
}
// NOTE: intentional but temporary field cache insanity!
assertEquals(docIDToID, expectedGroups, groupsResult, true);
assertEquals(docIDToID, expectedGroups, groupsResult, true, getScores);
final boolean needsScores = getScores || getMaxScores || docSort == null;
final BlockGroupingCollector c3 = new BlockGroupingCollector(groupSort, groupOffset+topNGroups, needsScores, lastDocInBlock);
@ -665,11 +753,53 @@ public class TestGrouping extends LuceneTestCase {
} else {
groupsResult2 = tempTopGroups2;
}
assertEquals(docIDToID2, expectedGroups, groupsResult2, false);
if (expectedGroups != null) {
// Fixup scores for reader2
for (GroupDocs groupDocsHits : expectedGroups.groups) {
for(ScoreDoc hit : groupDocsHits.scoreDocs) {
final GroupDoc gd = groupDocsByID[hit.doc];
assertEquals(gd.id, hit.doc);
//System.out.println("fixup score " + hit.score + " to " + gd.score2 + " vs " + gd.score);
hit.score = gd.score2;
}
}
final SortField[] sortFields = groupSort.getSort();
for(int groupSortIDX=0;groupSortIDX<sortFields.length;groupSortIDX++) {
if (sortFields[groupSortIDX].getType() == SortField.SCORE) {
for (GroupDocs groupDocsHits : expectedGroups.groups) {
if (groupDocsHits.groupSortValues != null) {
groupDocsHits.groupSortValues[groupSortIDX] = scoreMap.get(groupDocsHits.groupSortValues[groupSortIDX]);
assertNotNull(groupDocsHits.groupSortValues[groupSortIDX]);
}
}
}
}
final SortField[] docSortFields = docSort.getSort();
for(int docSortIDX=0;docSortIDX<docSortFields.length;docSortIDX++) {
if (docSortFields[docSortIDX].getType() == SortField.SCORE) {
for (GroupDocs groupDocsHits : expectedGroups.groups) {
for(ScoreDoc _hit : groupDocsHits.scoreDocs) {
FieldDoc hit = (FieldDoc) _hit;
if (hit.fields != null) {
hit.fields[docSortIDX] = scoreMap.get(hit.fields[docSortIDX]);
assertNotNull(hit.fields[docSortIDX]);
}
}
}
}
}
}
assertEquals(docIDToID2, expectedGroups, groupsResult2, false, getScores);
}
} finally {
FieldCache.DEFAULT.purge(r);
FieldCache.DEFAULT.purge(r2);
if (r2 != null) {
FieldCache.DEFAULT.purge(r2);
}
}
r.close();
@ -680,7 +810,7 @@ public class TestGrouping extends LuceneTestCase {
}
}
private void assertEquals(int[] docIDtoID, TopGroups expected, TopGroups actual, boolean verifyGroupValues) {
private void assertEquals(int[] docIDtoID, TopGroups expected, TopGroups actual, boolean verifyGroupValues, boolean testScores) {
if (expected == null) {
assertNull(actual);
return;
@ -716,9 +846,14 @@ public class TestGrouping extends LuceneTestCase {
for(int docIDX=0;docIDX<expectedFDs.length;docIDX++) {
final FieldDoc expectedFD = (FieldDoc) expectedFDs[docIDX];
final FieldDoc actualFD = (FieldDoc) actualFDs[docIDX];
//System.out.println(" actual doc=" + docIDtoID[actualFD.doc] + " score=" + actualFD.score);
assertEquals(expectedFD.doc, docIDtoID[actualFD.doc]);
// TODO
// assertEquals(expectedFD.score, actualFD.score);
if (testScores) {
assertEquals(expectedFD.score, actualFD.score);
} else {
// TODO: too anal for now
//assertEquals(Float.NaN, actualFD.score);
}
assertArrayEquals(expectedFD.fields, actualFD.fields);
}
}

View File

@ -144,6 +144,10 @@ New Features
to IndexReader.open (in the case you have a custom IndexReaderFactory).
(simonw via rmuir)
* SOLR-2136: Boolean type added to function queries, along with
new functions exists(), if(), and(), or(), xor(), not(), def(),
and true and false constants. (yonik)
Optimizations
----------------------

View File

@ -0,0 +1 @@
<!-- admin-extra.menu-bottom.html -->

View File

@ -0,0 +1 @@
<!-- admin-extra.menu-top.html -->

View File

@ -17,12 +17,16 @@
package org.apache.solr.schema;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.SortField;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
import org.apache.solr.search.MutableValue;
import org.apache.solr.search.MutableValueBool;
import org.apache.solr.search.MutableValueInt;
import org.apache.solr.search.QParser;
import org.apache.solr.search.function.ValueSource;
import org.apache.solr.search.function.OrdFieldSource;
import org.apache.solr.search.function.*;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
@ -50,7 +54,7 @@ public class BoolField extends FieldType {
@Override
public ValueSource getValueSource(SchemaField field, QParser qparser) {
field.checkFieldCacheSource(qparser);
return new OrdFieldSource(field.name);
return new BoolFieldSource(field.name);
}
// avoid instantiating every time...
@ -121,7 +125,7 @@ public class BoolField extends FieldType {
@Override
public Object toObject(SchemaField sf, BytesRef term) {
return term.bytes[0] == 'T';
return term.bytes[term.offset] == 'T';
}
@Override
@ -145,6 +149,83 @@ public class BoolField extends FieldType {
@Override
public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException {
writer.writeBool(name, f.stringValue().charAt(0) =='T');
writer.writeBool(name, f.stringValue().charAt(0) == 'T');
}
}
// TODO - this can be much more efficient - use OpenBitSet or Bits
class BoolFieldSource extends ValueSource {
protected String field;
public BoolFieldSource(String field) {
this.field = field;
}
@Override
public String description() {
return "bool(" + field + ')';
}
@Override
public DocValues getValues(Map context, IndexReader.AtomicReaderContext readerContext) throws IOException {
final FieldCache.DocTermsIndex sindex = FieldCache.DEFAULT.getTermsIndex(readerContext.reader, field);
// figure out what ord maps to true
int nord = sindex.numOrd();
BytesRef br = new BytesRef();
int tord = -1;
for (int i=1; i<nord; i++) {
sindex.lookup(i, br);
if (br.length==1 && br.bytes[br.offset]=='T') {
tord = i;
break;
}
}
final int trueOrd = tord;
return new BoolDocValues(this) {
@Override
public boolean boolVal(int doc) {
return sindex.getOrd(doc) == trueOrd;
}
@Override
public boolean exists(int doc) {
return sindex.getOrd(doc) != 0;
}
@Override
public ValueFiller getValueFiller() {
return new ValueFiller() {
private final MutableValueBool mval = new MutableValueBool();
@Override
public MutableValue getValue() {
return mval;
}
@Override
public void fillValue(int doc) {
int ord = sindex.getOrd(doc);
mval.value = (ord == trueOrd);
mval.exists = (ord != 0);
}
};
}
};
}
@Override
public boolean equals(Object o) {
return o.getClass() == BoolFieldSource.class && this.field.equals(((BoolFieldSource)o).field);
}
private static final int hcode = OrdFieldSource.class.hashCode();
@Override
public int hashCode() {
return hcode + field.hashCode();
};
}

View File

@ -364,8 +364,14 @@ public class FunctionQParser extends QParser {
sp.expect(")");
}
else {
SchemaField f = req.getSchema().getField(id);
valueSource = f.getType().getValueSource(f, this);
if ("true".equals(id)) {
valueSource = new BoolConstValueSource(true);
} else if ("false".equals(id)) {
valueSource = new BoolConstValueSource(false);
} else {
SchemaField f = req.getSchema().getField(id);
valueSource = f.getType().getValueSource(f, this);
}
}
}

View File

@ -0,0 +1,60 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search;
public class MutableValueBool extends MutableValue {
public boolean value;
@Override
public Object toObject() {
return exists ? value : null;
}
@Override
public void copy(MutableValue source) {
MutableValueBool s = (MutableValueBool) source;
value = s.value;
exists = s.exists;
}
@Override
public MutableValue duplicate() {
MutableValueBool v = new MutableValueBool();
v.value = this.value;
v.exists = this.exists;
return v;
}
@Override
public boolean equalsSameType(Object other) {
MutableValueBool b = (MutableValueBool)other;
return value == b.value && exists == b.exists;
}
@Override
public int compareSameType(Object other) {
MutableValueBool b = (MutableValueBool)other;
if (value != b.value) return value ? 1 : 0;
if (exists == b.exists) return 0;
return exists ? 1 : -1;
}
@Override
public int hashCode() {
return value ? 2 : (exists ? 1 : 0);
}
}

View File

@ -579,6 +579,134 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin {
return new NumDocsValueSource();
}
});
addParser("true", new ValueSourceParser() {
@Override
public ValueSource parse(FunctionQParser fp) throws ParseException {
return new BoolConstValueSource(true);
}
});
addParser("false", new ValueSourceParser() {
@Override
public ValueSource parse(FunctionQParser fp) throws ParseException {
return new BoolConstValueSource(false);
}
});
addParser("exists", new ValueSourceParser() {
@Override
public ValueSource parse(FunctionQParser fp) throws ParseException {
ValueSource vs = fp.parseValueSource();
return new SimpleBoolFunction(vs) {
@Override
protected String name() {
return "exists";
}
@Override
protected boolean func(int doc, DocValues vals) {
return vals.exists(doc);
}
};
}
});
addParser("not", new ValueSourceParser() {
@Override
public ValueSource parse(FunctionQParser fp) throws ParseException {
ValueSource vs = fp.parseValueSource();
return new SimpleBoolFunction(vs) {
@Override
protected boolean func(int doc, DocValues vals) {
return !vals.boolVal(doc);
}
@Override
protected String name() {
return "not";
}
};
}
});
addParser("and", new ValueSourceParser() {
@Override
public ValueSource parse(FunctionQParser fp) throws ParseException {
List<ValueSource> sources = fp.parseValueSourceList();
return new MultiBoolFunction(sources) {
@Override
protected String name() {
return "and";
}
@Override
protected boolean func(int doc, DocValues[] vals) {
for (DocValues dv : vals)
if (!dv.boolVal(doc)) return false;
return true;
}
};
}
});
addParser("or", new ValueSourceParser() {
@Override
public ValueSource parse(FunctionQParser fp) throws ParseException {
List<ValueSource> sources = fp.parseValueSourceList();
return new MultiBoolFunction(sources) {
@Override
protected String name() {
return "or";
}
@Override
protected boolean func(int doc, DocValues[] vals) {
for (DocValues dv : vals)
if (dv.boolVal(doc)) return true;
return false;
}
};
}
});
addParser("xor", new ValueSourceParser() {
@Override
public ValueSource parse(FunctionQParser fp) throws ParseException {
List<ValueSource> sources = fp.parseValueSourceList();
return new MultiBoolFunction(sources) {
@Override
protected String name() {
return "xor";
}
@Override
protected boolean func(int doc, DocValues[] vals) {
int nTrue=0, nFalse=0;
for (DocValues dv : vals) {
if (dv.boolVal(doc)) nTrue++;
else nFalse++;
}
return nTrue != 0 && nFalse != 0;
}
};
}
});
addParser("if", new ValueSourceParser() {
@Override
public ValueSource parse(FunctionQParser fp) throws ParseException {
ValueSource ifValueSource = fp.parseValueSource();
ValueSource trueValueSource = fp.parseValueSource();
ValueSource falseValueSource = fp.parseValueSource();
return new IfFunction(ifValueSource, trueValueSource, falseValueSource);
}
});
addParser("def", new ValueSourceParser() {
@Override
public ValueSource parse(FunctionQParser fp) throws ParseException {
return new DefFunction(fp.parseValueSourceList());
}
});
}
private static TInfo parseTerm(FunctionQParser fp) throws ParseException {
@ -857,6 +985,11 @@ class LongConstValueSource extends ConstNumberSource {
public Number getNumber() {
return constant;
}
@Override
public boolean getBool() {
return constant != 0;
}
}
@ -981,3 +1114,69 @@ abstract class Double2Parser extends NamedParser {
}
}
class BoolConstValueSource extends ConstNumberSource {
final boolean constant;
public BoolConstValueSource(boolean constant) {
this.constant = constant;
}
@Override
public String description() {
return "const(" + constant + ")";
}
@Override
public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
return new BoolDocValues(this) {
@Override
public boolean boolVal(int doc) {
return constant;
}
};
}
@Override
public int hashCode() {
return constant ? 0x12345678 : 0x87654321;
}
@Override
public boolean equals(Object o) {
if (BoolConstValueSource.class != o.getClass()) return false;
BoolConstValueSource other = (BoolConstValueSource) o;
return this.constant == other.constant;
}
@Override
public int getInt() {
return constant ? 1 : 0;
}
@Override
public long getLong() {
return constant ? 1 : 0;
}
@Override
public float getFloat() {
return constant ? 1 : 0;
}
@Override
public double getDouble() {
return constant ? 1 : 0;
}
@Override
public Number getNumber() {
return constant ? 1 : 0;
}
@Override
public boolean getBool() {
return constant;
}
}

View File

@ -0,0 +1,79 @@
package org.apache.solr.search.function;
import org.apache.solr.search.MutableValue;
import org.apache.solr.search.MutableValueBool;
import org.apache.solr.search.MutableValueInt;
public abstract class BoolDocValues extends DocValues {
protected final ValueSource vs;
public BoolDocValues(ValueSource vs) {
this.vs = vs;
}
@Override
public abstract boolean boolVal(int doc);
@Override
public byte byteVal(int doc) {
return boolVal(doc) ? (byte)1 : (byte)0;
}
@Override
public short shortVal(int doc) {
return boolVal(doc) ? (short)1 : (short)0;
}
@Override
public float floatVal(int doc) {
return boolVal(doc) ? (float)1 : (float)0;
}
@Override
public int intVal(int doc) {
return boolVal(doc) ? 1 : 0;
}
@Override
public long longVal(int doc) {
return boolVal(doc) ? (long)1 : (long)0;
}
@Override
public double doubleVal(int doc) {
return boolVal(doc) ? (double)1 : (double)0;
}
@Override
public String strVal(int doc) {
return Boolean.toString(boolVal(doc));
}
@Override
public Object objectVal(int doc) {
return exists(doc) ? boolVal(doc) : null;
}
@Override
public String toString(int doc) {
return vs.description() + '=' + strVal(doc);
}
@Override
public ValueFiller getValueFiller() {
return new ValueFiller() {
private final MutableValueBool mval = new MutableValueBool();
@Override
public MutableValue getValue() {
return mval;
}
@Override
public void fillValue(int doc) {
mval.value = boolVal(doc);
mval.exists = exists(doc);
}
};
}
}

View File

@ -0,0 +1,23 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.function;
public abstract class BoolFunction extends ValueSource {
// TODO: placeholder to return type, among other common future functionality
}

View File

@ -26,4 +26,5 @@ public abstract class ConstNumberSource extends ValueSource {
public abstract float getFloat();
public abstract double getDouble();
public abstract Number getNumber();
public abstract boolean getBool();
}

View File

@ -66,6 +66,10 @@ public class ConstValueSource extends ConstNumberSource {
public Object objectVal(int doc) {
return constant;
}
@Override
public boolean boolVal(int doc) {
return constant != 0.0f;
}
};
}
@ -105,4 +109,9 @@ public class ConstValueSource extends ConstNumberSource {
public Number getNumber() {
return constant;
}
@Override
public boolean getBool() {
return constant != 0.0f;
}
}

View File

@ -0,0 +1,124 @@
package org.apache.solr.search.function;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.util.BytesRef;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
public class DefFunction extends MultiFunction {
public DefFunction(List<ValueSource> sources) {
super(sources);
}
@Override
protected String name() {
return "def";
}
@Override
public DocValues getValues(Map fcontext, AtomicReaderContext readerContext) throws IOException {
return new Values(valsArr(sources, fcontext, readerContext)) {
final int upto = valsArr.length - 1;
private DocValues get(int doc) {
for (int i=0; i<upto; i++) {
DocValues vals = valsArr[i];
if (vals.exists(doc)) {
return vals;
}
}
return valsArr[upto];
}
@Override
public byte byteVal(int doc) {
return get(doc).byteVal(doc);
}
@Override
public short shortVal(int doc) {
return get(doc).shortVal(doc);
}
@Override
public float floatVal(int doc) {
return get(doc).floatVal(doc);
}
@Override
public int intVal(int doc) {
return get(doc).intVal(doc);
}
@Override
public long longVal(int doc) {
return get(doc).longVal(doc);
}
@Override
public double doubleVal(int doc) {
return get(doc).doubleVal(doc);
}
@Override
public String strVal(int doc) {
return get(doc).strVal(doc);
}
@Override
public boolean boolVal(int doc) {
return get(doc).boolVal(doc);
}
@Override
public boolean bytesVal(int doc, BytesRef target) {
return get(doc).bytesVal(doc, target);
}
@Override
public Object objectVal(int doc) {
return get(doc).objectVal(doc);
}
@Override
public boolean exists(int doc) {
// return true if any source is exists?
for (DocValues vals : valsArr) {
if (vals.exists(doc)) {
return true;
}
}
return false;
}
@Override
public ValueFiller getValueFiller() {
// TODO: need ValueSource.type() to determine correct type
return super.getValueFiller();
}
};
}
}

View File

@ -48,6 +48,10 @@ public abstract class DocValues {
// TODO: should we make a termVal, returns BytesRef?
public String strVal(int doc) { throw new UnsupportedOperationException(); }
public boolean boolVal(int doc) {
return intVal(doc) != 0;
}
/** returns the bytes representation of the string val - TODO: should this return the indexed raw bytes not? */
public boolean bytesVal(int doc, BytesRef target) {
String s = strVal(doc);

View File

@ -115,4 +115,9 @@ public class DoubleConstValueSource extends ConstNumberSource {
public Number getNumber() {
return constant;
}
@Override
public boolean getBool() {
return constant != 0;
}
}

View File

@ -35,6 +35,11 @@ public abstract class DoubleDocValues extends DocValues {
return (long)doubleVal(doc);
}
@Override
public boolean boolVal(int doc) {
return doubleVal(doc) != 0;
}
@Override
public abstract double doubleVal(int doc);

View File

@ -53,40 +53,15 @@ public class DoubleFieldSource extends NumericFieldCacheSource<DoubleValues> {
final double[] arr = vals.values;
final Bits valid = vals.valid;
return new DocValues() {
@Override
public float floatVal(int doc) {
return (float) arr[doc];
}
@Override
public int intVal(int doc) {
return (int) arr[doc];
}
@Override
public long longVal(int doc) {
return (long) arr[doc];
}
return new DoubleDocValues(this) {
@Override
public double doubleVal(int doc) {
return arr[doc];
}
@Override
public String strVal(int doc) {
return Double.toString(arr[doc]);
}
@Override
public Object objectVal(int doc) {
return valid.get(doc) ? arr[doc] : null;
}
@Override
public String toString(int doc) {
return description() + '=' + doubleVal(doc);
public boolean exists(int doc) {
return valid.get(doc);
}
@Override
@ -147,7 +122,7 @@ public class DoubleFieldSource extends NumericFieldCacheSource<DoubleValues> {
}
}
@Override
@Override
public ValueFiller getValueFiller() {
return new ValueFiller() {
private final double[] doubleArr = arr;

View File

@ -0,0 +1,148 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.util.BytesRef;
import java.io.IOException;
import java.util.List;
import java.util.Map;
public class IfFunction extends BoolFunction {
private ValueSource ifSource;
private ValueSource trueSource;
private ValueSource falseSource;
public IfFunction(ValueSource ifSource, ValueSource trueSource, ValueSource falseSource) {
this.ifSource = ifSource;
this.trueSource = trueSource;
this.falseSource = falseSource;
}
@Override
public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final DocValues ifVals = ifSource.getValues(context, readerContext);
final DocValues trueVals = trueSource.getValues(context, readerContext);
final DocValues falseVals = falseSource.getValues(context, readerContext);
return new DocValues() {
@Override
public byte byteVal(int doc) {
return ifVals.boolVal(doc) ? trueVals.byteVal(doc) : falseVals.byteVal(doc);
}
@Override
public short shortVal(int doc) {
return ifVals.boolVal(doc) ? trueVals.shortVal(doc) : falseVals.shortVal(doc);
}
@Override
public float floatVal(int doc) {
return ifVals.boolVal(doc) ? trueVals.floatVal(doc) : falseVals.floatVal(doc);
}
@Override
public int intVal(int doc) {
return ifVals.boolVal(doc) ? trueVals.intVal(doc) : falseVals.intVal(doc);
}
@Override
public long longVal(int doc) {
return ifVals.boolVal(doc) ? trueVals.longVal(doc) : falseVals.longVal(doc);
}
@Override
public double doubleVal(int doc) {
return ifVals.boolVal(doc) ? trueVals.doubleVal(doc) : falseVals.doubleVal(doc);
}
@Override
public String strVal(int doc) {
return ifVals.boolVal(doc) ? trueVals.strVal(doc) : falseVals.strVal(doc);
}
@Override
public boolean boolVal(int doc) {
return ifVals.boolVal(doc) ? trueVals.boolVal(doc) : falseVals.boolVal(doc);
}
@Override
public boolean bytesVal(int doc, BytesRef target) {
return ifVals.boolVal(doc) ? trueVals.bytesVal(doc, target) : falseVals.bytesVal(doc, target);
}
@Override
public Object objectVal(int doc) {
return ifVals.boolVal(doc) ? trueVals.objectVal(doc) : falseVals.objectVal(doc);
}
@Override
public boolean exists(int doc) {
return true; // TODO: flow through to any sub-sources?
}
@Override
public ValueFiller getValueFiller() {
// TODO: we need types of trueSource / falseSource to handle this
// for now, use float.
return super.getValueFiller();
}
@Override
public String toString(int doc) {
return "if(" + ifVals.toString(doc) + ',' + trueVals.toString(doc) + ',' + falseVals.toString(doc) + ')';
}
};
}
@Override
public String description() {
return "if(" + ifSource.description() + ',' + trueSource.description() + ',' + falseSource + ')';
}
@Override
public int hashCode() {
int h = ifSource.hashCode();
h = h * 31 + trueSource.hashCode();
h = h * 31 + falseSource.hashCode();
return h;
}
@Override
public boolean equals(Object o) {
if (!(o instanceof IfFunction)) return false;
IfFunction other = (IfFunction)o;
return ifSource.equals(other.ifSource)
&& trueSource.equals(other.trueSource)
&& falseSource.equals(other.falseSource);
}
@Override
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
ifSource.createWeight(context, searcher);
trueSource.createWeight(context, searcher);
falseSource.createWeight(context, searcher);
}
}

View File

@ -38,6 +38,11 @@ public abstract class LongDocValues extends DocValues {
return (double)longVal(doc);
}
@Override
public boolean boolVal(int doc) {
return longVal(doc) != 0;
}
@Override
public String strVal(int doc) {
return Long.toString(longVal(doc));

View File

@ -0,0 +1,105 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.search.IndexSearcher;
import java.io.IOException;
import java.util.List;
import java.util.Map;
public abstract class MultiBoolFunction extends BoolFunction {
protected final List<ValueSource> sources;
public MultiBoolFunction(List<ValueSource> sources) {
this.sources = sources;
}
protected abstract String name();
protected abstract boolean func(int doc, DocValues[] vals);
@Override
public BoolDocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final DocValues[] vals = new DocValues[sources.size()];
int i=0;
for (ValueSource source : sources) {
vals[i++] = source.getValues(context, readerContext);
}
return new BoolDocValues(this) {
@Override
public boolean boolVal(int doc) {
return func(doc, vals);
}
@Override
public String toString(int doc) {
StringBuilder sb = new StringBuilder(name());
sb.append('(');
boolean first = true;
for (DocValues dv : vals) {
if (first) {
first = false;
} else {
sb.append(',');
}
sb.append(dv.toString(doc));
}
return sb.toString();
}
};
}
@Override
public String description() {
StringBuilder sb = new StringBuilder(name());
sb.append('(');
boolean first = true;
for (ValueSource source : sources) {
if (first) {
first = false;
} else {
sb.append(',');
}
sb.append(source.description());
}
return sb.toString();
}
@Override
public int hashCode() {
return sources.hashCode() + name().hashCode();
}
@Override
public boolean equals(Object o) {
if (this.getClass() != o.getClass()) return false;
MultiBoolFunction other = (MultiBoolFunction)o;
return this.sources.equals(other.sources);
}
@Override
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
for (ValueSource source : sources) {
source.createWeight(context, searcher);
}
}
}

View File

@ -0,0 +1,122 @@
package org.apache.solr.search.function;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.util.BytesRef;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
public abstract class MultiFunction extends ValueSource {
protected final List<ValueSource> sources;
public MultiFunction(List<ValueSource> sources) {
this.sources = sources;
}
abstract protected String name();
@Override
public String description() {
return description(name(), sources);
}
public static String description(String name, List<ValueSource> sources) {
StringBuilder sb = new StringBuilder();
sb.append(name).append('(');
boolean firstTime=true;
for (ValueSource source : sources) {
if (firstTime) {
firstTime=false;
} else {
sb.append(',');
}
sb.append(source);
}
sb.append(')');
return sb.toString();
}
public static DocValues[] valsArr(List<ValueSource> sources, Map fcontext, AtomicReaderContext readerContext) throws IOException {
final DocValues[] valsArr = new DocValues[sources.size()];
int i=0;
for (ValueSource source : sources) {
valsArr[i++] = source.getValues(fcontext, readerContext);
}
return valsArr;
}
public class Values extends DocValues {
final DocValues[] valsArr;
public Values(DocValues[] valsArr) {
this.valsArr = valsArr;
}
@Override
public String toString(int doc) {
return MultiFunction.toString(name(), valsArr, doc);
}
@Override
public ValueFiller getValueFiller() {
// TODO: need ValueSource.type() to determine correct type
return super.getValueFiller();
}
}
public static String toString(String name, DocValues[] valsArr, int doc) {
StringBuilder sb = new StringBuilder();
sb.append(name).append('(');
boolean firstTime=true;
for (DocValues vals : valsArr) {
if (firstTime) {
firstTime=false;
} else {
sb.append(',');
}
sb.append(vals.toString(doc));
}
sb.append(')');
return sb.toString();
}
@Override
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
for (ValueSource source : sources)
source.createWeight(context, searcher);
}
@Override
public int hashCode() {
return sources.hashCode() + name().hashCode();
}
@Override
public boolean equals(Object o) {
if (this.getClass() != o.getClass()) return false;
MultiFunction other = (MultiFunction)o;
return this.sources.equals(other.sources);
}
}

View File

@ -0,0 +1,74 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.search.IndexSearcher;
import java.io.IOException;
import java.util.Map;
public abstract class SimpleBoolFunction extends BoolFunction {
protected final ValueSource source;
public SimpleBoolFunction(ValueSource source) {
this.source = source;
}
protected abstract String name();
protected abstract boolean func(int doc, DocValues vals);
@Override
public BoolDocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final DocValues vals = source.getValues(context, readerContext);
return new BoolDocValues(this) {
@Override
public boolean boolVal(int doc) {
return func(doc, vals);
}
@Override
public String toString(int doc) {
return name() + '(' + vals.toString(doc) + ')';
}
};
}
@Override
public String description() {
return name() + '(' + source.description() + ')';
}
@Override
public int hashCode() {
return source.hashCode() + name().hashCode();
}
@Override
public boolean equals(Object o) {
if (this.getClass() != o.getClass()) return false;
SingleFunction other = (SingleFunction)o;
return this.source.equals(other.source);
}
@Override
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
source.createWeight(context, searcher);
}
}

View File

@ -21,6 +21,11 @@ public abstract class StrDocValues extends DocValues {
return exists(doc) ? strVal(doc) : null;
}
@Override
public boolean boolVal(int doc) {
return exists(doc);
}
@Override
public String toString(int doc) {
return vs.description() + "='" + strVal(doc) + "'";

View File

@ -78,6 +78,10 @@ public abstract class StringIndexDocValues extends DocValues {
return spareChars.toString();
}
@Override
public boolean boolVal(int doc) {
return exists(doc);
}
@Override
public abstract Object objectVal(int doc); // force subclasses to override

View File

@ -119,7 +119,29 @@ public class TestQueryTypes extends AbstractSolrTestCase {
assertQ(req( "q", "{!frange v="+f+" l='"+v+"' u='"+v+"'}" )
,"//result[@numFound='1']"
);
// exists()
assertQ(req( "fq","id:999", "q", "{!frange l=1 u=1}if(exists("+f+"),1,0)" )
,"//result[@numFound='1']"
);
// boolean value of non-zero values (just leave off the exists from the prev test)
assertQ(req( "fq","id:999", "q", "{!frange l=1 u=1}if("+f+",1,0)" )
,"//result[@numFound='1']"
);
if (!"id".equals(f)) {
assertQ(req( "fq","id:1", "q", "{!frange l=1 u=1}if(exists("+f+"),1,0)" )
,"//result[@numFound='0']"
);
// boolean value of zero/missing values (just leave off the exists from the prev test)
assertQ(req( "fq","id:1", "q", "{!frange l=1 u=1}if("+f+",1,0)" )
,"//result[@numFound='0']"
);
}
// function query... just make sure it doesn't throw an exception
if ("v_s".equals(f)) continue; // in this context, functions must be able to be interpreted as a float
assertQ(req( "q", "+id:999 _val_:\"" + f + "\"")

View File

@ -581,4 +581,56 @@ public class TestFunctionQuery extends SolrTestCaseJ4 {
purgeFieldCache(FieldCache.DEFAULT); // avoid FC insanity
}
@Test
public void testBooleanFunctions() throws Exception {
assertU(adoc("id", "1", "text", "hello", "foo_s","A", "foo_ti", "0", "foo_tl","0"));
assertU(adoc("id", "2" , "foo_ti","10", "foo_tl","11"));
assertU(commit());
// true and false functions and constants
assertJQ(req("q", "id:1", "fl", "t:true(),f:false(),tt:{!func}true,ff:{!func}false")
, "/response/docs/[0]=={'t':true,'f':false,'tt':true,'ff':false}");
// test that exists(query) depends on the query matching the document
assertJQ(req("q", "id:1", "fl", "t:exists(query($q1)),f:exists(query($q2))", "q1","text:hello", "q2","text:there")
, "/response/docs/[0]=={'t':true,'f':false}");
// test if()
assertJQ(req("q", "id:1", "fl", "a1:if(true,'A','B')", "fl","b1:if(false,'A','B')")
, "/response/docs/[0]=={'a1':'A', 'b1':'B'}");
// test boolean operators
assertJQ(req("q", "id:1", "fl", "t1:and(true,true)", "fl","f1:and(true,false)", "fl","f2:and(false,true)", "fl","f3:and(false,false)")
, "/response/docs/[0]=={'t1':true, 'f1':false, 'f2':false, 'f3':false}");
assertJQ(req("q", "id:1", "fl", "t1:or(true,true)", "fl","t2:or(true,false)", "fl","t3:or(false,true)", "fl","f1:or(false,false)")
, "/response/docs/[0]=={'t1':true, 't2':true, 't3':true, 'f1':false}");
assertJQ(req("q", "id:1", "fl", "f1:xor(true,true)", "fl","t1:xor(true,false)", "fl","t2:xor(false,true)", "fl","f2:xor(false,false)")
, "/response/docs/[0]=={'t1':true, 't2':true, 'f1':false, 'f2':false}");
assertJQ(req("q", "id:1", "fl", "t:not(false),f:not(true)")
, "/response/docs/[0]=={'t':true, 'f':false}");
// def(), the default function that returns the first value that exists
assertJQ(req("q", "id:1", "fl", "x:def(id,123.0), y:def(foo_f,234.0)")
, "/response/docs/[0]=={'x':1.0, 'y':234.0}");
assertJQ(req("q", "id:1", "fl", "x:def(foo_s,'Q'), y:def(missing_s,'W')")
, "/response/docs/[0]=={'x':'A', 'y':'W'}");
// test constant conversion to boolean
assertJQ(req("q", "id:1", "fl", "a:not(0), b:not(1), c:not(0.0), d:not(1.1), e:not('A')")
, "/response/docs/[0]=={'a':true, 'b':false, 'c':true, 'd':false, 'e':false}");
}
@Test
public void testPseudoFieldFunctions() throws Exception {
assertU(adoc("id", "1", "text", "hello", "foo_s","A"));
assertU(adoc("id", "2"));
assertU(commit());
assertJQ(req("q", "id:1", "fl", "a:1,b:2.0,c:'X',d:{!func}foo_s,e:{!func}bar_s") // if exists() is false, no pseudo-field should be added
, "/response/docs/[0]=={'a':1, 'b':2.0,'c':'X','d':'A'}");
}
}

View File

@ -462,6 +462,7 @@ ul
#content #dashboard .block
{
background-image: none;
width: 49%;
}
@ -550,85 +551,13 @@ ul
display: block;
}
#content #dashboard #replication.is-master .slave
#content #dashboard #replication #details table thead td span
{
display: none;
}
#content #dashboard #replication table
{
border-collapse: collapse;
}
#content #dashboard #replication table th,
#content #dashboard #replication table td
{
border: 1px solid #f0f0f0;
padding: 2px 5px;
}
#content #dashboard #replication table thead td
{
border: 0;
}
#content #dashboard #replication table thead th,
#content #dashboard #replication table tbody td
{
border-right: 0;
}
#content #dashboard #replication table thead th
{
border-top: 0;
font-weight: bold;
}
#content #dashboard #replication table tbody th,
#content #dashboard #replication table tbody td
{
border-bottom: 0;
text-align: right;
}
#content #dashboard #replication table tbody th
{
border-left: 0;
}
#content #dashboard #replication table tbody th,
#content #dashboard #replication dt
{
width: 100px;
}
#content #dashboard #replication dl
{
display: none;
margin-top: 10px;
}
#content #dashboard #replication dt,
#content #dashboard #replication dd
{
display: block;
padding-top: 1px;
padding-bottom: 1px;
}
#content #dashboard #replication dt
{
border-right: 1px solid #f0f0f0;
float: left;
padding-left: 5px;
padding-right: 5px;
margin-right: 3px;
text-align: right;
}
#content #dashboard #dataimport
{
background-color: #0ff;
float: right;
}
@ -711,6 +640,19 @@ ul
max-width: 99%;
}
#content #analysis #analysis-error
{
background-color: #f00;
background-image: url( ../img/ico/construction.png );
background-position: 10px 50%;
color: #fff;
display: none;
font-weight: bold;
margin-bottom: 20px;
padding: 10px;
padding-left: 35px;
}
#content #analysis .analysis-result h2
{
position: relative;
@ -1334,6 +1276,12 @@ ul
padding-left: 10px;
}
#content #schema-browser #related #f-df-t
{
border-bottom: 1px solid #f0f0f0;
padding-bottom: 15px;
}
#content #schema-browser #related dl
{
margin-top: 15px;
@ -1367,7 +1315,9 @@ ul
#content #schema-browser #related .dynamic-field .dynamic-field,
#content #schema-browser #related .dynamic-field .dynamic-field a,
#content #schema-browser #related .type .type,
#content #schema-browser #related .type .type a
#content #schema-browser #related .type .type a,
#content #schema-browser #related .active,
#content #schema-browser #related .active a
{
color: #333;
}
@ -1378,6 +1328,11 @@ ul
color: #666;
}
#content #schema-browser #data
{
display: none;
}
#content #schema-browser #data #index dt
{
display: none;
@ -1491,6 +1446,7 @@ ul
#content #schema-browser #data #field .topterms-holder
{
display: none;
float: left;
}
@ -2830,6 +2786,7 @@ ul
#content #replication #details table tbody .size
{
text-align: right;
white-space: nowrap;
}
#content #replication #details table tbody .generation div

View File

@ -35,14 +35,14 @@
<div id="wip-notice">
<p>This interface is work in progress. It works best in Chrome.</p>
<p><a href="admin/">Use the <span>old admin interface</span> if there are problems with this one.</a></p>
<p><a href="admin">Use the <span>old admin interface</span> if there are problems with this one.</a></p>
<p><a href="https://issues.apache.org/jira/browse/SOLR-2399">Bugs/Requests/Suggestions: <span>SOLR-2399</span></a></p>
</div>
<p id="environment">&nbsp;</p>
<p id="environment">&nbsp;</p>
</div>
<div id="main" class="clearfix">
<div id="content-wrapper">

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,11 @@
<div id="analysis">
<div class="block" id="analysis-error">
This Functionality requires the <code>/analysis/field</code> Handler to be registered and active!
</div>
<div class="block" id="field-analysis">
<h2><span>Field Analysis</span></h2>

View File

@ -18,8 +18,10 @@
<form>
<input type="hidden" name="action" value="RENAME">
<p class="clearfix"><label for="rename_core">from:</label>
<input type="text" name="core" id="rename_core" disabled="disabled"></p>
<input type="text" name="core" id="rename_core" readonly="readonly"></p>
<p class="clearfix"><label for="rename_other">to:</label>
<input type="text" name="other" id="rename_other"></p>
@ -42,12 +44,15 @@
<form>
<input type="hidden" name="action" value="SWAP">
<input type="hidden" name="core">
<p class="clearfix"><label for="swap_core">this:</label>
<select name="core" id="swap_core" class="core" disabled="disabled">
<select id="swap_core" class="core" disabled="disabled">
</select></p>
<p class="clearfix"><label for="swap_other">and:</label>
<select class="other" id="swap_other" class="other">
<select name="other" id="swap_other" class="other">
</select></p>
<p class="clearfix buttons">
@ -181,6 +186,8 @@
<form>
<input type="hidden" name="action" value="CREATE">
<p class="clearfix"><label for="add_name">name:</label>
<input type="text" name="name" id="add_name"></p>

View File

@ -63,96 +63,52 @@
<div class="message"></div>
</div>
<div class="content">
<div class="content clearfix"id="details">
<table border="0" cellspacing="0" cellpadding="0">
<thead>
<tr>
<td>&nbsp;</td>
<th class="slave">slave</th>
<th>master</th>
<td><span>Index</span></td>
<th>Version</th>
<th><abbr title="Generation">Gen</abbr></th>
<th>Size</th>
</tr>
</thead>
<tbody>
<tr>
<th>indexVersion</th>
<td class="slave value details_slave_master-details_index-version"></td>
<td class="value details_index-version"></td>
<tr class="master">
<th>Master:</th>
<td class="version"><div>x</div></td>
<td class="generation"><div>y</div></td>
<td class="size"><div>z</div></td>
</tr>
<tr>
<th>generation</th>
<td class="slave value details_slave_master-details_generation"></td>
<td class="value details_generation"></td>
</tr>
<tr>
<th>indexSize</th>
<td class="slave value details_slave_master-details_index-size"></td>
<td class="value details_index-size"></td>
<tr class="slave slaveOnly">
<th>Slave:</th>
<td class="version"><div>a</div></td>
<td class="generation"><div>c</div></td>
<td class="size"><div>c</div></td>
</tr>
</tbody>
</table>
<dl class="clearfix slave">
<dt class="details_slave_master-url">masterUrl</dt>
<dd class="value details_slave_master-url"></dd>
<dt class="details_slave_poll-interval">poll every</dt>
<dd class="value details_slave_poll-interval"></dd>
<dt class="details_slave_index-replicated-at">last replicated</dt>
<dd class="value timeago details_slave_index-replicated-at"></dd>
<dt class="details_slave_next-execution-at">replicate next</dt>
<dd class="value timeago details_slave_next-execution-at"></dd>
<dt class="details_slave_replication-failed-at">last failed</dt>
<dd class="value timeago details_slave_replication-failed-at"></dd>
</dl>
<!--
indexVersion:
1295900553587
generation:
2
indexSize:
4.25 KB
// slave
indexVersion:
1295900553587
generation:
2
indexSize:
4.25 KB
masterUrl:
http://localhost:8985/solr/replication
pollInterval:
00:00:60
indexReplicatedAt:
Tue Mar 01 19:37:00 UTC 2011
nextExecutionAt:
Tue Mar 01 19:38:00 UTC 2011
replicationFailedAt:
Tue Mar 01 19:37:00 UTC 2011
lastCycleBytesDownloaded:
0
previousCycleTimeInSeconds:
0
isPollingDisabled:
false
isReplicating:
false
-->
</div>
</div>
<div class="block" id="dataimport">
<h2><span>DataImport-Handler</span></h2>
<h2><span>Dataimport</span></h2>
<div class="message-container">
<div class="message"></div>

View File

@ -4,19 +4,135 @@
<div id="data">
#data
<div id="field">
<div class="field-options">
<dl class="options clearfix">
<dt class="field-type">Field-Type:</dt>
<dt class="properties">Properties:</dt>
<dt class="schema">Schema:</dt>
<dt class="index">Index:</dt>
<dt class="position-increment-gap"><abbr title="Position Increment Gap">PI Gap</abbr>:</dt>
<dt class="docs">Docs:</dt>
<dt class="distinct">Distinct:</dt>
</dl>
<ul class="analyzer">
<li class="clearfix index">
<p>Index Analyzer:</p>
<dl>
<dt></dt>
</dl>
<ul>
<li class="clearfix tokenizer">
<p>Tokenizer:</p>
<dl>
</dl>
</li>
<li class="clearfix filters">
<p>Filters:</p>
<dl>
</dl>
</li>
</ul>
</li>
<li class="clearfix query">
<p>Query Analyzer:</p>
<dl>
<dt></dt>
</dl>
<ul>
<li class="clearfix tokenizer">
<p>Tokenizer:</p>
<dl>
</dl>
</li>
<li class="clearfix filters">
<p>Filters:</p>
<dl>
</dl>
</li>
</ul>
</li>
</ul>
</div>
<div class="topterms-holder">
<p class="head">Top <span class="shown"></span><span class="max-holder">/<span class="max"></span></span> Terms:</p>
<table border="0" cellspacing="0" cellpadding="0">
<thead>
<tr>
<th class="position" title="Position">&nbsp;</th>
<th class="term">Term</th>
<th class="frequency" title="Frequency">Frq</th>
</tr>
</thead>
</table>
<p class="navi clearfix">
<a class="less"><span>less</span></a>
<a class="more"><span>more</span></a>
</p>
</div>
<div class="histogram-holder">
<p class="head">Histogram:</p>
<div class="histogram"></div>
<dl class="clearfix">
</dl>
</div>
</div>
</div>
<div id="related">
<select>
<option value="" selected="selected">Please select ..</option>
<option value="" selected="selected">Please select </option>
</select>
<dl>
<dl id="f-df-t">
</dl>
<dl class="ukf-dsf">
<dt class="unique-key-field">Unique Key Field</dt>
<dt class="default-search-field">Default Search Field</dt>
</dl>
</div>
</div>

View File

@ -1,109 +0,0 @@
<div id="field">
<div class="field-options">
<dl class="options clearfix">
<dt class="field-type">Field-Type:</dt>
<dt class="properties">Properties:</dt>
<dt class="schema">Schema:</dt>
<dt class="index">Index:</dt>
<dt class="position-increment-gap"><abbr title="Position Increment Gap">PI Gap</abbr>:</dt>
<dt class="docs">Docs:</dt>
<dt class="distinct">Distinct:</dt>
</dl>
<ul class="analyzer">
<li class="clearfix index">
<p>Index Analyzer:</p>
<dl>
<dt></dt>
</dl>
<ul>
<li class="clearfix tokenizer">
<p>Tokenizer:</p>
<dl>
</dl>
</li>
<li class="clearfix filters">
<p>Filters:</p>
<dl>
</dl>
</li>
</ul>
</li>
<li class="clearfix query">
<p>Query Analyzer:</p>
<dl>
<dt></dt>
</dl>
<ul>
<li class="clearfix tokenizer">
<p>Tokenizer:</p>
<dl>
</dl>
</li>
<li class="clearfix filters">
<p>Filters:</p>
<dl>
</dl>
</li>
</ul>
</li>
</ul>
</div>
<div class="topterms-holder">
<p class="head">Top <span class="shown"></span><span class="max-holder">/<span class="max"></span></span> Terms:</p>
<table border="0" cellspacing="0" cellpadding="0">
<thead>
<tr>
<th class="position" title="Position">&nbsp;</th>
<th class="term">Term</th>
<th class="frequency" title="Frequency">Frq</th>
</tr>
</thead>
</table>
<p class="navi clearfix">
<a class="less"><span>less</span></a>
<a class="more"><span>more</span></a>
</p>
</div>
<div class="histogram-holder">
<p class="head">Histogram:</p>
<div class="histogram"></div>
<dl class="clearfix">
</dl>
</div>
</div>

View File

@ -1,11 +0,0 @@
<div id="index">
<dl class="clearfix">
<dt class="unique-key-field">Unique Key Field:</dt>
<dt class="default-search-field">Default Search Field:</dt>
</dl>
</div>