diff --git a/dev-tools/scripts/poll-mirrors.pl b/dev-tools/scripts/poll-mirrors.pl new file mode 100755 index 00000000000..e09006fb0ed --- /dev/null +++ b/dev-tools/scripts/poll-mirrors.pl @@ -0,0 +1,128 @@ +#!/usr/bin/perl +# +# poll-mirrors.pl +# +# This script is designed to poll download sites after posting a release +# and print out notice as each becomes available. The RM can use this +# script to delay the release announcement until the release can be +# downloaded. +# +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; +use Getopt::Long; +use POSIX qw/strftime/; +use LWP::UserAgent; + +my $version; +my $interval = 300; +my $quiet = 0; + +my $result = GetOptions ("version=s" => \$version, "interval=i" => \$interval); + +my $usage = "$0 -v version [ -i interval (seconds; default: 300) ]"; + +unless ($result) { + print STDERR $usage; + exit(1); +} +unless (defined($version) && $version =~ /\d+(?:\.\d+)+/) { + print STDERR "You must specify the release version.\n$usage"; + exit(1); +} + +my $previously_selected = select STDOUT; +$| = 1; # turn off buffering of STDOUT, so status is printed immediately +select $previously_selected; + +my $apache_url_suffix = "lucene/java/$version/lucene-$version.zip.asc"; +my $apache_mirrors_list_url = "http://www.apache.org/mirrors/"; +my $maven_url = "http://repo1.maven.org/maven2/org/apache/lucene/lucene-core/$version/lucene-core-$version.pom.asc"; + +my $agent = LWP::UserAgent->new(); +$agent->timeout(2); + +my $maven_available = 0; + +my @apache_mirrors = (); + +my $apache_mirrors_list_page = $agent->get($apache_mirrors_list_url)->decoded_content; +if (defined($apache_mirrors_list_page)) { + # + # apache.dattatec.com  @ + # + # http + # 8 hours
+ # 5 hours
+ # ok + # + while ($apache_mirrors_list_page =~ m~(.*?)~gis) { + my $mirror_entry = $1; + next unless ($mirror_entry =~ m~\s*ok\s*\s*$~i); # skip mirrors with problems + if ($mirror_entry =~ m~~i) { + my $mirror_url = $1; + push @apache_mirrors, "$mirror_url/$apache_url_suffix"; + } + } +} else { + print STDERR "Error fetching Apache mirrors list $apache_mirrors_list_url"; + exit(1); +} + +my $num_apache_mirrors = $#apache_mirrors; + +my $sleep_interval = 0; +while (1) { + print "\n", strftime('%d-%b-%Y %H:%M:%S', localtime); + print "\nPolling $#apache_mirrors Apache Mirrors"; + print " and Maven Central" unless ($maven_available); + print "...\n"; + + my $start = time(); + $maven_available = (200 == $agent->get($maven_url)->code) + unless ($maven_available); + @apache_mirrors = &check_mirrors; + my $stop = time(); + $sleep_interval = $interval - ($stop - $start); + + my $num_downloadable_apache_mirrors = $num_apache_mirrors - $#apache_mirrors; + print "$version is ", ($maven_available ? "" : "not "), + "downloadable from Maven Central.\n"; + printf "$version is downloadable from %d/%d Apache Mirrors (%0.1f%%)\n", + $num_downloadable_apache_mirrors, $num_apache_mirrors, + ($num_downloadable_apache_mirrors*100/$num_apache_mirrors); + + last if ($maven_available && 0 == $#apache_mirrors); + + if ($sleep_interval > 0) { + print "Sleeping for $sleep_interval seconds...\n"; + sleep($sleep_interval) + } +} + +sub check_mirrors { + my @not_yet_downloadable_apache_mirrors; + for my $mirror (@apache_mirrors) { + push @not_yet_downloadable_apache_mirrors, $mirror + unless (200 == $agent->get($mirror)->code); + print "."; + } + print "\n"; + return @not_yet_downloadable_apache_mirrors; +} diff --git a/dev-tools/scripts/smokeTestRelease.py b/dev-tools/scripts/smokeTestRelease.py new file mode 100644 index 00000000000..754840affa5 --- /dev/null +++ b/dev-tools/scripts/smokeTestRelease.py @@ -0,0 +1,407 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import hashlib +import httplib +import re +import urllib2 +import urlparse +import sys +import HTMLParser + +# This tool expects to find /lucene and /solr off the base URL. You +# must have a working gpg, tar, unzip in your path. This has only +# been tested on Linux so far! + +# http://s.apache.org/lusolr32rc2 + +# TODO +# + verify KEYS contains key that signed the release +# + make sure changes HTML looks ok +# - verify license/notice of all dep jars +# - check maven +# - check JAR manifest version +# - check license/notice exist +# - check no "extra" files +# - make sure jars exist inside bin release +# - run "ant test" +# - make sure docs exist +# - use java5 for lucene/modules + +reHREF = re.compile('(.*?)') + +# Set to True to avoid re-downloading the packages... +DEBUG = False + +def getHREFs(urlString): + + # Deref any redirects + while True: + url = urlparse.urlparse(urlString) + h = httplib.HTTPConnection(url.netloc) + h.request('GET', url.path) + r = h.getresponse() + newLoc = r.getheader('location') + if newLoc is not None: + urlString = newLoc + else: + break + + links = [] + for subUrl, text in reHREF.findall(urllib2.urlopen(urlString).read()): + fullURL = urlparse.urljoin(urlString, subUrl) + links.append((text, fullURL)) + return links + +def download(name, urlString, tmpDir): + fileName = '%s/%s' % (tmpDir, name) + if DEBUG and os.path.exists(fileName): + if fileName.find('.asc') == -1: + print ' already done: %.1f MB' % (os.path.getsize(fileName)/1024./1024.) + return + fIn = urllib2.urlopen(urlString) + fOut = open(fileName, 'wb') + success = False + try: + while True: + s = fIn.read(65536) + if s == '': + break + fOut.write(s) + fOut.close() + fIn.close() + success = True + finally: + fIn.close() + fOut.close() + if not success: + os.remove(fileName) + if fileName.find('.asc') == -1: + print ' %.1f MB' % (os.path.getsize(fileName)/1024./1024.) + +def load(urlString): + return urllib2.urlopen(urlString).read() + +def checkSigs(project, urlString, version, tmpDir): + + print ' test basics...' + ents = getDirEntries(urlString) + artifact = None + keysURL = None + changesURL = None + mavenURL = None + expectedSigs = ['asc', 'md5', 'sha1'] + artifacts = [] + for text, subURL in ents: + if text == 'KEYS': + keysURL = subURL + elif text == 'maven/': + mavenURL = subURL + elif text.startswith('changes'): + if text not in ('changes/', 'changes-%s/' % version): + raise RuntimeError('%s: found %s vs expected changes-%s/' % (project, text, version)) + changesURL = subURL + elif artifact == None: + artifact = text + artifactURL = subURL + if project == 'solr': + expected = 'apache-solr-%s' % version + else: + expected = 'lucene-%s' % version + if not artifact.startswith(expected): + raise RuntimeError('%s: unknown artifact %s: expected prefix %s' % (project, text, expected)) + sigs = [] + elif text.startswith(artifact + '.'): + sigs.append(text[len(artifact)+1:]) + else: + if sigs != expectedSigs: + raise RuntimeError('%s: artifact %s has wrong sigs: expected %s but got %s' % (project, artifact, expectedSigs, sigs)) + artifacts.append((artifact, artifactURL)) + artifact = text + artifactURL = subURL + sigs = [] + + if sigs != []: + artifacts.append((artifact, artifactURL)) + if sigs != expectedSigs: + raise RuntimeError('%s: artifact %s has wrong sigs: expected %s but got %s' % (project, artifact, expectedSigs, sigs)) + + if project == 'lucene': + expected = ['lucene-%s-src.tgz' % version, + 'lucene-%s.tgz' % version, + 'lucene-%s.zip' % version] + else: + expected = ['apache-solr-%s-src.tgz' % version, + 'apache-solr-%s.tgz' % version, + 'apache-solr-%s.zip' % version] + + actual = [x[0] for x in artifacts] + if expected != actual: + raise RuntimeError('%s: wrong artifacts: expected %s but got %s' % (project, expected, actual)) + + if keysURL is None: + raise RuntimeError('%s is missing KEYS' % project) + + download('%s.KEYS' % project, keysURL, tmpDir) + + keysFile = '%s/%s.KEYS' % (tmpDir, project) + + # Set up clean gpg world; import keys file: + gpgHomeDir = '%s/%s.gpg' % (tmpDir, project) + if os.path.exists(gpgHomeDir): + shutil.rmtree(gpgHomeDir) + os.makedirs(gpgHomeDir, 0700) + run('gpg --homedir %s --import %s' % (gpgHomeDir, keysFile), + '%s/%s.gpg.import.log 2>&1' % (tmpDir, project)) + + if mavenURL is None: + raise RuntimeError('%s is missing maven' % project) + + if project == 'lucene': + if changesURL is None: + raise RuntimeError('%s is missing changes-%s' % (project, version)) + testChanges(project, version, changesURL) + + for artifact, urlString in artifacts: + print ' download %s...' % artifact + download(artifact, urlString, tmpDir) + verifyDigests(artifact, urlString, tmpDir) + + print ' verify sig' + # Test sig + download(artifact + '.asc', urlString + '.asc', tmpDir) + sigFile = '%s/%s.asc' % (tmpDir, artifact) + artifactFile = '%s/%s' % (tmpDir, artifact) + logFile = '%s/%s.%s.gpg.verify.log' % (tmpDir, project, artifact) + run('gpg --homedir %s --verify %s %s' % (gpgHomeDir, sigFile, artifactFile), + logFile) + # Forward any GPG warnings: + f = open(logFile, 'rb') + for line in f.readlines(): + if line.lower().find('warning') != -1: + print ' GPG: %s' % line.strip() + f.close() + +def testChanges(project, version, changesURLString): + print ' check changes HTML...' + changesURL = None + contribChangesURL = None + for text, subURL in getDirEntries(changesURLString): + if text == 'Changes.html': + changesURL = subURL + elif text == 'Contrib-Changes.html': + contribChangesURL = subURL + + if changesURL is None: + raise RuntimeError('did not see Changes.html link from %s' % changesURLString) + if contribChangesURL is None: + raise RuntimeError('did not see Contrib-Changes.html link from %s' % changesURLString) + + s = load(changesURL) + + if s.find('Release %s' % version) == -1: + raise RuntimeError('did not see "Release %s" in %s' % (version, changesURL)) + +def run(command, logFile): + if os.system('%s > %s 2>&1' % (command, logFile)): + raise RuntimeError('command "%s" failed; see log file %s' % (command, logFile)) + +def verifyDigests(artifact, urlString, tmpDir): + print ' verify md5/sha1 digests' + md5Expected, t = load(urlString + '.md5').strip().split() + if t != '*'+artifact: + raise RuntimeError('MD5 %s.md5 lists artifact %s but expected *%s' % (urlString, t, artifact)) + + sha1Expected, t = load(urlString + '.sha1').strip().split() + if t != '*'+artifact: + raise RuntimeError('SHA1 %s.sha1 lists artifact %s but expected *%s' % (urlString, t, artifact)) + + m = hashlib.md5() + s = hashlib.sha1() + f = open('%s/%s' % (tmpDir, artifact)) + while True: + x = f.read(65536) + if x == '': + break + m.update(x) + s.update(x) + f.close() + md5Actual = m.hexdigest() + sha1Actual = s.hexdigest() + if md5Actual != md5Expected: + raise RuntimeError('MD5 digest mismatch for %s: expected %s but got %s' % (artifact, md5Expected, md5Actual)) + if sha1Actual != sha1Expected: + raise RuntimeError('SHA1 digest mismatch for %s: expected %s but got %s' % (artifact, sha1Expected, sha1Actual)) + +def getDirEntries(urlString): + links = getHREFs(urlString) + for i, (text, subURL) in enumerate(links): + if text == 'Parent Directory': + return links[(i+1):] + +def unpack(project, tmpDir, artifact, version): + destDir = '%s/unpack' % tmpDir + if os.path.exists(destDir): + shutil.rmtree(destDir) + os.makedirs(destDir) + os.chdir(destDir) + print ' unpack %s...' % artifact + unpackLogFile = '%s/%s-unpack-%s.log' % (tmpDir, project, artifact) + if artifact.endswith('.tar.gz') or artifact.endswith('.tgz'): + run('tar xzf %s/%s' % (tmpDir, artifact), unpackLogFile) + elif artifact.endswith('.zip'): + run('unzip %s/%s' % (tmpDir, artifact), unpackLogFile) + + # make sure it unpacks to proper subdir + l = os.listdir(destDir) + if project == 'solr': + expected = 'apache-%s-%s' % (project, version) + else: + expected = '%s-%s' % (project, version) + if l != [expected]: + raise RuntimeError('unpack produced entries %s; expected only %s' % (l, expected)) + + unpackPath = '%s/%s' % (destDir, expected) + verifyUnpacked(project, artifact, unpackPath, version) + +def verifyUnpacked(project, artifact, unpackPath, version): + os.chdir(unpackPath) + isSrc = artifact.find('-src') != -1 + l = os.listdir(unpackPath) + textFiles = ['LICENSE', 'NOTICE', 'README'] + if project == 'lucene': + textFiles.extend(('JRE_VERSION_MIGRATION', 'CHANGES')) + if isSrc: + textFiles.append('BUILD') + for fileName in textFiles: + fileName += '.txt' + if fileName not in l: + raise RuntimeError('file "%s" is missing from artifact %s' % (fileName, artifact)) + l.remove(fileName) + + if not isSrc: + if project == 'lucene': + expectedJARs = ('lucene-core-%s' % version, + 'lucene-core-%s-javadoc' % version, + 'lucene-test-framework-%s' % version, + 'lucene-test-framework-%s-javadoc' % version) + else: + expectedJARs = () + + for fileName in expectedJARs: + fileName += '.jar' + if fileName not in l: + raise RuntimeError('%s: file "%s" is missing from artifact %s' % (project, fileName, artifact)) + l.remove(fileName) + + if project == 'lucene': + extras = ('lib', 'docs', 'contrib') + if isSrc: + extras += ('build.xml', 'index.html', 'common-build.xml', 'src', 'backwards') + else: + extras = () + + for e in extras: + if e not in l: + raise RuntimeError('%s: %s missing from artifact %s' % (project, e, artifact)) + l.remove(e) + + if project == 'lucene': + if len(l) > 0: + raise RuntimeError('%s: unexpected files/dirs in artifact %s: %s' % (project, artifact, l)) + + if isSrc: + if project == 'lucene': + print ' run tests w/ Java 5...' + run('export JAVA_HOME=/usr/local/src/jdk1.5.0_22; ant test', '%s/test.log' % unpackPath) + run('export JAVA_HOME=/usr/local/src/jdk1.5.0_22; ant jar', '%s/compile.log' % unpackPath) + testDemo(isSrc) + else: + print ' run tests w/ Java 6...' + run('export JAVA_HOME=/usr/local/src/jdk1.6.0_21; ant test', '%s/test.log' % unpackPath) + else: + if project == 'lucene': + testDemo(isSrc) + +def testDemo(isSrc): + print ' test demo...' + if isSrc: + cp = 'build/lucene-core-3.2-SNAPSHOT.jar:build/contrib/demo/lucene-demo-3.2-SNAPSHOT.jar' + docsDir = 'src' + else: + cp = 'lucene-core-3.2.0.jar:contrib/demo/lucene-demo-3.2.0.jar' + docsDir = 'docs' + run('export JAVA_HOME=/usr/local/src/jdk1.5.0_22; java -cp %s org.apache.lucene.demo.IndexFiles -index index -docs %s' % (cp, docsDir), 'index.log') + run('export JAVA_HOME=/usr/local/src/jdk1.5.0_22; java -cp %s org.apache.lucene.demo.SearchFiles -index index -query lucene' % cp, 'search.log') + reMatchingDocs = re.compile('(\d+) total matching documents') + m = reMatchingDocs.search(open('search.log', 'rb').read()) + if m is None: + raise RuntimeError('lucene demo\'s SearchFiles found no results') + else: + numHits = int(m.group(1)) + if numHits < 100: + raise RuntimeError('lucene demo\'s SearchFiles found too few results: %s' % numHits) + print ' got %d hits for query "lucene"' % numHits + +def main(): + + if len(sys.argv) != 4: + print + print 'Usage python -u %s BaseURL version tmpDir' % sys.argv[0] + print + sys.exit(1) + + baseURL = sys.argv[1] + version = sys.argv[2] + tmpDir = os.path.abspath(sys.argv[3]) + + if not DEBUG: + if os.path.exists(tmpDir): + raise RuntimeError('temp dir %s exists; please remove first' % tmpDir) + os.makedirs(tmpDir) + + lucenePath = None + solrPath = None + print 'Load release URL...' + for text, subURL in getDirEntries(baseURL): + if text.lower().find('lucene') != -1: + lucenePath = subURL + elif text.lower().find('solr') != -1: + solrPath = subURL + + if lucenePath is None: + raise RuntimeError('could not find lucene subdir') + if solrPath is None: + raise RuntimeError('could not find solr subdir') + + print + print 'Test Lucene...' + checkSigs('lucene', lucenePath, version, tmpDir) + for artifact in ('lucene-%s.tgz' % version, 'lucene-%s.zip' % version): + unpack('lucene', tmpDir, artifact, version) + unpack('lucene', tmpDir, 'lucene-%s-src.tgz' % version, version) + + print + print 'Test Solr...' + checkSigs('solr', solrPath, version, tmpDir) + for artifact in ('apache-solr-%s.tgz' % version, 'apache-solr-%s.zip' % version): + unpack('solr', tmpDir, artifact, version) + unpack('solr', tmpDir, 'apache-solr-%s-src.tgz' % version, version) + +if __name__ == '__main__': + main() + diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index 94224583d87..a95e04a4f7b 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -433,6 +433,10 @@ Bug fixes with more document deletions is requested before a reader with fewer deletions, provided they share some segments. (yonik) +* LUCENE-2645: Fix false assertion error when same token was added one + after another with 0 posIncr. (Kurosaka Teruhiko via Mike + McCandless) + ======================= Lucene 3.x (not yet released) ================ Changes in backwards compatibility policy @@ -458,6 +462,9 @@ Bug fixes including locks, and fails if the test fails to release all of them. (Mike McCandless, Robert Muir, Shai Erera, Simon Willnauer) +* LUCENE-3102: CachingCollector.replay was failing to call setScorer + per-segment (Martijn van Groningen via Mike McCandless) + New Features * LUCENE-3140: Added experimental FST implementation to Lucene. diff --git a/lucene/contrib/CHANGES.txt b/lucene/contrib/CHANGES.txt index 1aeba0c32cb..e45af1f3640 100644 --- a/lucene/contrib/CHANGES.txt +++ b/lucene/contrib/CHANGES.txt @@ -75,6 +75,10 @@ API Changes * LUCENE-3141: add getter method to access fragInfos in FieldFragList. (Sujit Pal via Koji Sekiguchi) + * LUCENE-3099: Allow subclasses to determine the group value for + First/SecondPassGroupingCollector. (Martijn van Groningen, Mike + McCandless) + Build * LUCENE-3149: Upgrade contrib/icu's ICU jar file to ICU 4.8. diff --git a/lucene/src/java/org/apache/lucene/index/TermsHashPerField.java b/lucene/src/java/org/apache/lucene/index/TermsHashPerField.java index f3d705e4433..1e8df8beef6 100644 --- a/lucene/src/java/org/apache/lucene/index/TermsHashPerField.java +++ b/lucene/src/java/org/apache/lucene/index/TermsHashPerField.java @@ -181,9 +181,9 @@ final class TermsHashPerField extends InvertedDocConsumerPerField { // term text into textStart address // Get the text & hash of this term. int termID; - try{ - termID = bytesHash.add(termBytesRef, termAtt.fillBytesRef()); - }catch (MaxBytesLengthExceededException e) { + try { + termID = bytesHash.add(termBytesRef, termAtt.fillBytesRef()); + } catch (MaxBytesLengthExceededException e) { // Not enough room in current block // Just skip this term, to remain as robust as // possible during indexing. A TokenFilter diff --git a/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsWriterImpl.java b/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsWriterImpl.java index e4c5a484865..ac72d5a0125 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsWriterImpl.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsWriterImpl.java @@ -230,7 +230,7 @@ public final class SepPostingsWriterImpl extends PostingsWriterBase { assert !omitTF; final int delta = position - lastPosition; - assert delta > 0 || position == 0: "position=" + position + " lastPosition=" + lastPosition; // not quite right (if pos=0 is repeated twice we don't catch it) + assert delta >= 0: "position=" + position + " lastPosition=" + lastPosition; // not quite right (if pos=0 is repeated twice we don't catch it) lastPosition = position; if (storePayloads) { diff --git a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsWriter.java b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsWriter.java index 457e3c24821..474485be200 100644 --- a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsWriter.java +++ b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsWriter.java @@ -193,8 +193,8 @@ public final class StandardPostingsWriter extends PostingsWriterBase { assert proxOut != null; final int delta = position - lastPosition; - - assert delta > 0 || position == 0: "position=" + position + " lastPosition=" + lastPosition; // not quite right (if pos=0 is repeated twice we don't catch it) + + assert delta >= 0: "position=" + position + " lastPosition=" + lastPosition; lastPosition = position; diff --git a/lucene/src/java/org/apache/lucene/search/CachingCollector.java b/lucene/src/java/org/apache/lucene/search/CachingCollector.java index 2b90a394748..c17602794fb 100644 --- a/lucene/src/java/org/apache/lucene/search/CachingCollector.java +++ b/lucene/src/java/org/apache/lucene/search/CachingCollector.java @@ -168,10 +168,10 @@ public abstract class CachingCollector extends Collector { int curUpto = 0; int curBase = 0; int chunkUpto = 0; - other.setScorer(cachedScorer); curDocs = EMPTY_INT_ARRAY; for (SegStart seg : cachedSegs) { other.setNextReader(seg.readerContext); + other.setScorer(cachedScorer); while (curBase + curUpto < seg.end) { if (curUpto == curDocs.length) { curBase += curDocs.length; diff --git a/lucene/src/java/org/apache/lucene/util/fst/Builder.java b/lucene/src/java/org/apache/lucene/util/fst/Builder.java index b5736264e2d..99378cfb2fa 100644 --- a/lucene/src/java/org/apache/lucene/util/fst/Builder.java +++ b/lucene/src/java/org/apache/lucene/util/fst/Builder.java @@ -21,6 +21,7 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.RamUsageEstimator; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IntsRef; +import org.apache.lucene.util.fst.FST.INPUT_TYPE; import java.io.IOException; @@ -69,6 +70,42 @@ public class Builder { // current "frontier" private UnCompiledNode[] frontier; + /** + * Instantiates an FST/FSA builder without any pruning. A shortcut + * to {@link #Builder(FST.INPUT_TYPE, int, int, boolean, Outputs)} with + * pruning options turned off. + */ + public Builder(FST.INPUT_TYPE inputType, Outputs outputs) + { + this(inputType, 0, 0, true, outputs); + } + + /** + * Instantiates an FST/FSA builder with all the possible tuning and construction + * tweaks. Read parameter documentation carefully. + * + * @param inputType + * The input type (transition labels). Can be anything from {@link INPUT_TYPE} + * enumeration. Shorter types will consume less memory. Strings (character sequences) are + * represented as {@link INPUT_TYPE#BYTE4} (full unicode codepoints). + * + * @param minSuffixCount1 + * If pruning the input graph during construction, this threshold is used for telling + * if a node is kept or pruned. If transition_count(node) >= minSuffixCount1, the node + * is kept. + * + * @param minSuffixCount2 + * (Note: only Mike McCandless knows what this one is really doing...) + * + * @param doMinSuffix + * If true, the shared suffixes will be compacted into unique paths. + * This requires an additional hash map for lookups in memory. Setting this parameter to + * false creates a single path for all input sequences. This will result in a larger + * graph, but may require less memory and will speed up construction. + * @param outputs The output type for each input sequence. Applies only if building an FST. For + * FSA, use {@link NoOutputs#getSingleton()} and {@link NoOutputs#getNoOutput()} as the + * singleton output object. + */ public Builder(FST.INPUT_TYPE inputType, int minSuffixCount1, int minSuffixCount2, boolean doMinSuffix, Outputs outputs) { this.minSuffixCount1 = minSuffixCount1; this.minSuffixCount2 = minSuffixCount2; diff --git a/lucene/src/java/org/apache/lucene/util/fst/FST.java b/lucene/src/java/org/apache/lucene/util/fst/FST.java index 04428c6b356..bbed472acee 100644 --- a/lucene/src/java/org/apache/lucene/util/fst/FST.java +++ b/lucene/src/java/org/apache/lucene/util/fst/FST.java @@ -147,7 +147,7 @@ public class FST { return flag(BIT_LAST_ARC); } - boolean isFinal() { + public boolean isFinal() { return flag(BIT_FINAL_ARC); } }; diff --git a/lucene/src/java/org/apache/lucene/util/fst/NoOutputs.java b/lucene/src/java/org/apache/lucene/util/fst/NoOutputs.java index 40404a3fb90..1b1e97e12f5 100644 --- a/lucene/src/java/org/apache/lucene/util/fst/NoOutputs.java +++ b/lucene/src/java/org/apache/lucene/util/fst/NoOutputs.java @@ -28,7 +28,7 @@ import org.apache.lucene.store.DataOutput; public final class NoOutputs extends Outputs { - final Object NO_OUTPUT = new Object() { + static final Object NO_OUTPUT = new Object() { // NodeHash calls hashCode for this output; we fix this // so we get deterministic hashing. @Override diff --git a/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java b/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java index 655d81b092a..322461e7645 100644 --- a/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java +++ b/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java @@ -60,6 +60,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.LockFactory; import org.apache.lucene.store.MockDirectoryWrapper; +import org.apache.lucene.store.MockDirectoryWrapper.Throttling; import org.apache.lucene.util.FieldCacheSanityChecker.Insanity; import org.junit.*; import org.junit.rules.TestWatchman; @@ -160,6 +161,8 @@ public abstract class LuceneTestCase extends Assert { public static final String TEST_LINE_DOCS_FILE = System.getProperty("tests.linedocsfile", "europarl.lines.txt.gz"); /** whether or not to clean threads between test invocations: "false", "perMethod", "perClass" */ public static final String TEST_CLEAN_THREADS = System.getProperty("tests.cleanthreads", "perClass"); + /** whether or not to clean threads between test invocations: "false", "perMethod", "perClass" */ + public static final Throttling TEST_THROTTLING = TEST_NIGHTLY ? Throttling.SOMETIMES : Throttling.NEVER; private static final Pattern codecWithParam = Pattern.compile("(.*)\\(\\s*(\\d+)\\s*\\)"); @@ -938,8 +941,9 @@ public abstract class LuceneTestCase extends Assert { Directory impl = newDirectoryImpl(r, TEST_DIRECTORY); MockDirectoryWrapper dir = new MockDirectoryWrapper(r, impl); stores.put(dir, Thread.currentThread().getStackTrace()); + dir.setThrottling(TEST_THROTTLING); return dir; - } + } /** * Returns a new Directory instance, with contents copied from the @@ -985,6 +989,7 @@ public abstract class LuceneTestCase extends Assert { dir.setLockFactory(lf); } stores.put(dir, Thread.currentThread().getStackTrace()); + dir.setThrottling(TEST_THROTTLING); return dir; } catch (Exception e) { throw new RuntimeException(e); @@ -1003,6 +1008,7 @@ public abstract class LuceneTestCase extends Assert { } MockDirectoryWrapper dir = new MockDirectoryWrapper(r, impl); stores.put(dir, Thread.currentThread().getStackTrace()); + dir.setThrottling(TEST_THROTTLING); return dir; } diff --git a/lucene/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java b/lucene/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java new file mode 100644 index 00000000000..b46c37d965d --- /dev/null +++ b/lucene/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java @@ -0,0 +1,82 @@ +package org.apache.lucene.index; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.io.Reader; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.LuceneTestCase; + +public class TestSameTokenSamePosition extends LuceneTestCase { + + /** + * Attempt to reproduce an assertion error that happens + * only with the trunk version around April 2011. + * @param args + */ + public void test() throws Exception { + Directory dir = newDirectory(); + RandomIndexWriter riw = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new BugReproAnalyzer())); + Document doc = new Document(); + doc.add(new Field("eng", "Six drunken" /*This shouldn't matter. */, + Field.Store.YES, Field.Index.ANALYZED)); + riw.addDocument(doc); + riw.close(); + dir.close(); + } +} + +final class BugReproAnalyzer extends Analyzer{ + @Override + public TokenStream tokenStream(String arg0, Reader arg1) { + return new BugReproAnalyzerTokenizer(); + } +} + +final class BugReproAnalyzerTokenizer extends TokenStream { + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); + private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class); + int tokenCount = 4; + int nextTokenIndex = 0; + String terms[] = new String[]{"six", "six", "drunken", "drunken"}; + int starts[] = new int[]{0, 0, 4, 4}; + int ends[] = new int[]{3, 3, 11, 11}; + int incs[] = new int[]{1, 0, 1, 0}; + + @Override + public boolean incrementToken() throws IOException { + if (nextTokenIndex < tokenCount) { + termAtt.setEmpty().append(terms[nextTokenIndex]); + offsetAtt.setOffset(starts[nextTokenIndex], ends[nextTokenIndex]); + posIncAtt.setPositionIncrement(incs[nextTokenIndex]); + nextTokenIndex++; + return true; + } else { + return false; + } + } +} diff --git a/modules/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupsCollector.java b/modules/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupsCollector.java new file mode 100644 index 00000000000..b8ac5f84411 --- /dev/null +++ b/modules/grouping/src/java/org/apache/lucene/search/grouping/AbstractAllGroupsCollector.java @@ -0,0 +1,67 @@ +package org.apache.lucene.search.grouping; + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.util.BytesRef; + +import java.io.IOException; +import java.util.Collection; + +/** + * A collector that collects all groups that match the + * query. Only the group value is collected, and the order + * is undefined. This collector does not determine + * the most relevant document of a group. + * + *

+ * This is an abstract version. Concrete implementations define + * what a group actually is and how it is internally collected. + * + * @lucene.experimental + */ +public abstract class AbstractAllGroupsCollector extends Collector { + + /** + * Returns the total number of groups for the executed search. + * This is a convenience method. The following code snippet has the same effect:

getGroups().size()
+ * + * @return The total number of groups for the executed search + */ + public int getGroupCount() { + return getGroups().size(); + } + + /** + * Returns the group values + *

+ * This is an unordered collections of group values. For each group that matched the query there is a {@link BytesRef} + * representing a group value. + * + * @return the group values + */ + public abstract Collection getGroups(); + + // Empty not necessary + public void setScorer(Scorer scorer) throws IOException {} + + public boolean acceptsDocsOutOfOrder() { + return true; + } +} \ No newline at end of file diff --git a/modules/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java b/modules/grouping/src/java/org/apache/lucene/search/grouping/AbstractFirstPassGroupingCollector.java similarity index 78% rename from modules/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java rename to modules/grouping/src/java/org/apache/lucene/search/grouping/AbstractFirstPassGroupingCollector.java index 64dd0429f41..95f56911e7c 100644 --- a/modules/grouping/src/java/org/apache/lucene/search/grouping/FirstPassGroupingCollector.java +++ b/modules/grouping/src/java/org/apache/lucene/search/grouping/AbstractFirstPassGroupingCollector.java @@ -17,56 +17,39 @@ package org.apache.lucene.search.grouping; * limitations under the License. */ -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Comparator; -import java.util.HashMap; -import java.util.TreeSet; - import org.apache.lucene.index.IndexReader.AtomicReaderContext; -import org.apache.lucene.search.Collector; -import org.apache.lucene.search.FieldCache; -import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.apache.lucene.util.BytesRef; +import org.apache.lucene.search.*; + +import java.io.IOException; +import java.util.*; /** FirstPassGroupingCollector is the first of two passes necessary * to collect grouped hits. This pass gathers the top N sorted - * groups. + * groups. Concrete subclasses define what a group is and how it + * is internally collected. * *

See {@link org.apache.lucene.search.grouping} for more * details including a full code example.

* * @lucene.experimental */ +abstract public class AbstractFirstPassGroupingCollector extends Collector { -public class FirstPassGroupingCollector extends Collector { - - private final String groupField; private final Sort groupSort; private final FieldComparator[] comparators; private final int[] reversed; private final int topNGroups; - private final HashMap groupMap; - private final BytesRef scratchBytesRef = new BytesRef(); + private final HashMap> groupMap; private final int compIDXEnd; // Set once we reach topNGroups unique groups: - private TreeSet orderedGroups; + private TreeSet> orderedGroups; private int docBase; private int spareSlot; - private FieldCache.DocTermsIndex index; /** * Create the first pass collector. * - * @param groupField The field used to group - * documents. This field must be single-valued and - * indexed (FieldCache is used to access its value - * per-document). * @param groupSort The {@link Sort} used to sort the * groups. The top sorted document within each group * according to groupSort, determines how that group @@ -74,13 +57,13 @@ public class FirstPassGroupingCollector extends Collector { * ie, if you want to groupSort by relevance use * Sort.RELEVANCE. * @param topNGroups How many top groups to keep. + * @throws IOException If I/O related errors occur */ - public FirstPassGroupingCollector(String groupField, Sort groupSort, int topNGroups) throws IOException { + public AbstractFirstPassGroupingCollector(Sort groupSort, int topNGroups) throws IOException { if (topNGroups < 1) { throw new IllegalArgumentException("topNGroups must be >= 1 (got " + topNGroups + ")"); } - this.groupField = groupField; // TODO: allow null groupSort to mean "by relevance", // and specialize it? this.groupSort = groupSort; @@ -100,13 +83,19 @@ public class FirstPassGroupingCollector extends Collector { } spareSlot = topNGroups; - groupMap = new HashMap(topNGroups); + groupMap = new HashMap>(topNGroups); } - /** Returns top groups, starting from offset. This may - * return null, if no groups were collected, or if the - * number of unique groups collected is <= offset. */ - public Collection getTopGroups(int groupOffset, boolean fillFields) { + /** + * Returns top groups, starting from offset. This may + * return null, if no groups were collected, or if the + * number of unique groups collected is <= offset. + * + * @param groupOffset The offset in the collected groups + * @param fillFields Whether to fill to {@link SearchGroup#sortValues} + * @return top groups, starting from offset + */ + public Collection> getTopGroups(int groupOffset, boolean fillFields) { //System.out.println("FP.getTopGroups groupOffset=" + groupOffset + " fillFields=" + fillFields + " groupMap.size()=" + groupMap.size()); @@ -122,15 +111,15 @@ public class FirstPassGroupingCollector extends Collector { buildSortedSet(); } - final Collection result = new ArrayList(); + final Collection> result = new ArrayList>(); int upto = 0; final int sortFieldCount = groupSort.getSort().length; - for(CollectedSearchGroup group : orderedGroups) { + for(CollectedSearchGroup group : orderedGroups) { if (upto++ < groupOffset) { continue; } //System.out.println(" group=" + (group.groupValue == null ? "null" : group.groupValue.utf8ToString())); - SearchGroup searchGroup = new SearchGroup(); + SearchGroup searchGroup = new SearchGroup(); searchGroup.groupValue = group.groupValue; if (fillFields) { searchGroup.sortValues = new Comparable[sortFieldCount]; @@ -144,10 +133,6 @@ public class FirstPassGroupingCollector extends Collector { return result; } - public String getGroupField() { - return groupField; - } - @Override public void setScorer(Scorer scorer) throws IOException { for (FieldComparator comparator : comparators) { @@ -189,13 +174,9 @@ public class FirstPassGroupingCollector extends Collector { // TODO: should we add option to mean "ignore docs that // don't have the group field" (instead of stuffing them // under null group)? - final int ord = index.getOrd(doc); - //System.out.println(" ord=" + ord); + final GROUP_VALUE_TYPE groupValue = getDocGroupValue(doc); - final BytesRef br = ord == 0 ? null : index.lookup(ord, scratchBytesRef); - //System.out.println(" group=" + (br == null ? "null" : br.utf8ToString())); - - final CollectedSearchGroup group = groupMap.get(br); + final CollectedSearchGroup group = groupMap.get(groupValue); if (group == null) { @@ -210,8 +191,8 @@ public class FirstPassGroupingCollector extends Collector { // just keep collecting them // Add a new CollectedSearchGroup: - CollectedSearchGroup sg = new CollectedSearchGroup(); - sg.groupValue = ord == 0 ? null : new BytesRef(scratchBytesRef); + CollectedSearchGroup sg = new CollectedSearchGroup(); + sg.groupValue = copyDocGroupValue(groupValue, null); sg.comparatorSlot = groupMap.size(); sg.topDoc = docBase + doc; for (FieldComparator fc : comparators) { @@ -233,20 +214,14 @@ public class FirstPassGroupingCollector extends Collector { // the bottom group with this new group. // java 6-only: final CollectedSearchGroup bottomGroup = orderedGroups.pollLast(); - final CollectedSearchGroup bottomGroup = orderedGroups.last(); + final CollectedSearchGroup bottomGroup = orderedGroups.last(); orderedGroups.remove(bottomGroup); assert orderedGroups.size() == topNGroups -1; groupMap.remove(bottomGroup.groupValue); // reuse the removed CollectedSearchGroup - if (br == null) { - bottomGroup.groupValue = null; - } else if (bottomGroup.groupValue != null) { - bottomGroup.groupValue.copy(br); - } else { - bottomGroup.groupValue = new BytesRef(br); - } + bottomGroup.groupValue = copyDocGroupValue(groupValue, bottomGroup.groupValue); bottomGroup.topDoc = docBase + doc; for (FieldComparator fc : comparators) { @@ -291,7 +266,7 @@ public class FirstPassGroupingCollector extends Collector { // Remove before updating the group since lookup is done via comparators // TODO: optimize this - final CollectedSearchGroup prevLast; + final CollectedSearchGroup prevLast; if (orderedGroups != null) { prevLast = orderedGroups.last(); orderedGroups.remove(group); @@ -336,7 +311,7 @@ public class FirstPassGroupingCollector extends Collector { } }; - orderedGroups = new TreeSet(comparator); + orderedGroups = new TreeSet>(comparator); orderedGroups.addAll(groupMap.values()); assert orderedGroups.size() > 0; @@ -353,15 +328,31 @@ public class FirstPassGroupingCollector extends Collector { @Override public void setNextReader(AtomicReaderContext readerContext) throws IOException { docBase = readerContext.docBase; - index = FieldCache.DEFAULT.getTermsIndex(readerContext.reader, groupField); - for (int i=0; i extends SearchGroup { int topDoc; int comparatorSlot; } diff --git a/modules/grouping/src/java/org/apache/lucene/search/grouping/AbstractSecondPassGroupingCollector.java b/modules/grouping/src/java/org/apache/lucene/search/grouping/AbstractSecondPassGroupingCollector.java new file mode 100644 index 00000000000..4d91d218a7c --- /dev/null +++ b/modules/grouping/src/java/org/apache/lucene/search/grouping/AbstractSecondPassGroupingCollector.java @@ -0,0 +1,156 @@ +package org.apache.lucene.search.grouping; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.lucene.index.IndexReader.AtomicReaderContext; +import org.apache.lucene.search.*; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +/** + * SecondPassGroupingCollector is the second of two passes + * necessary to collect grouped docs. This pass gathers the + * top N documents per top group computed from the + * first pass. Concrete subclasses define what a group is and how it + * is internally collected. + * + *

See {@link org.apache.lucene.search.grouping} for more + * details including a full code example.

+ * + * @lucene.experimental + */ +public abstract class AbstractSecondPassGroupingCollector extends Collector { + + protected final Map> groupMap; + private final int maxDocsPerGroup; + protected SearchGroupDocs[] groupDocs; + private final Collection> groups; + private final Sort withinGroupSort; + private final Sort groupSort; + + private int totalHitCount; + private int totalGroupedHitCount; + + public AbstractSecondPassGroupingCollector(Collection> groups, Sort groupSort, Sort withinGroupSort, + int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields) + throws IOException { + + //System.out.println("SP init"); + if (groups.size() == 0) { + throw new IllegalArgumentException("no groups to collect (groups.size() is 0)"); + } + + this.groupSort = groupSort; + this.withinGroupSort = withinGroupSort; + this.groups = groups; + this.maxDocsPerGroup = maxDocsPerGroup; + groupMap = new HashMap>(groups.size()); + + for (SearchGroup group : groups) { + //System.out.println(" prep group=" + (group.groupValue == null ? "null" : group.groupValue.utf8ToString())); + final TopDocsCollector collector; + if (withinGroupSort == null) { + // Sort by score + collector = TopScoreDocCollector.create(maxDocsPerGroup, true); + } else { + // Sort by fields + collector = TopFieldCollector.create(withinGroupSort, maxDocsPerGroup, fillSortFields, getScores, getMaxScores, true); + } + groupMap.put(group.groupValue, + new SearchGroupDocs(group.groupValue, + collector)); + } + } + + @Override + public void setScorer(Scorer scorer) throws IOException { + for (SearchGroupDocs group : groupMap.values()) { + group.collector.setScorer(scorer); + } + } + + @Override + public void collect(int doc) throws IOException { + totalHitCount++; + SearchGroupDocs group = retrieveGroup(doc); + if (group != null) { + totalGroupedHitCount++; + group.collector.collect(doc); + } + } + + /** + * Returns the group the specified doc belongs to or null if no group could be retrieved. + * + * @param doc The specified doc + * @return the group the specified doc belongs to or null if no group could be retrieved + * @throws IOException If an I/O related error occurred + */ + protected abstract SearchGroupDocs retrieveGroup(int doc) throws IOException; + + @Override + public void setNextReader(AtomicReaderContext readerContext) throws IOException { + //System.out.println("SP.setNextReader"); + for (SearchGroupDocs group : groupMap.values()) { + group.collector.setNextReader(readerContext); + } + } + + @Override + public boolean acceptsDocsOutOfOrder() { + return false; + } + + public TopGroups getTopGroups(int withinGroupOffset) { + @SuppressWarnings("unchecked") + final GroupDocs[] groupDocsResult = (GroupDocs[]) new GroupDocs[groups.size()]; + + int groupIDX = 0; + for(SearchGroup group : groups) { + final SearchGroupDocs groupDocs = groupMap.get(group.groupValue); + final TopDocs topDocs = groupDocs.collector.topDocs(withinGroupOffset, maxDocsPerGroup); + groupDocsResult[groupIDX++] = new GroupDocs(topDocs.getMaxScore(), + topDocs.totalHits, + topDocs.scoreDocs, + groupDocs.groupValue, + group.sortValues); + } + + return new TopGroups(groupSort.getSort(), + withinGroupSort == null ? null : withinGroupSort.getSort(), + totalHitCount, totalGroupedHitCount, groupDocsResult); + } + + + // TODO: merge with SearchGroup or not? + // ad: don't need to build a new hashmap + // disad: blows up the size of SearchGroup if we need many of them, and couples implementations + public class SearchGroupDocs { + + public final GROUP_VALUE_TYPE groupValue; + public final TopDocsCollector collector; + + public SearchGroupDocs(GROUP_VALUE_TYPE groupValue, TopDocsCollector collector) { + this.groupValue = groupValue; + this.collector = collector; + } + } +} diff --git a/modules/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java b/modules/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java index d2c6eb18a13..7eb26fd9617 100644 --- a/modules/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java +++ b/modules/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java @@ -49,7 +49,7 @@ import org.apache.lucene.util.PriorityQueue; * being that the documents in each group must always be * indexed as a block. This collector also fills in * TopGroups.totalGroupCount without requiring the separate - * {@link AllGroupsCollector}. However, this collector does + * {@link TermAllGroupsCollector}. However, this collector does * not fill in the groupValue of each group; this field * will always be null. * @@ -212,7 +212,7 @@ public class BlockGroupingCollector extends Collector { // Swap pending scores final float[] savScores = og.scores; og.scores = pendingSubScores; - pendingSubScores = og.scores; + pendingSubScores = savScores; } og.readerContext = currentReaderContext; //og.groupOrd = lastGroupOrd; @@ -317,7 +317,8 @@ public class BlockGroupingCollector extends Collector { final FakeScorer fakeScorer = new FakeScorer(); - final GroupDocs[] groups = new GroupDocs[groupQueue.size() - groupOffset]; + @SuppressWarnings("unchecked") + final GroupDocs[] groups = new GroupDocs[groupQueue.size() - groupOffset]; for(int downTo=groupQueue.size()-groupOffset-1;downTo>=0;downTo--) { final OneGroup og = groupQueue.pop(); @@ -360,7 +361,7 @@ public class BlockGroupingCollector extends Collector { final TopDocs topDocs = collector.topDocs(withinGroupOffset, maxDocsPerGroup); - groups[downTo] = new GroupDocs(topDocs.getMaxScore(), + groups[downTo] = new GroupDocs(topDocs.getMaxScore(), og.count, topDocs.scoreDocs, null, @@ -375,7 +376,7 @@ public class BlockGroupingCollector extends Collector { } */ - return new TopGroups(new TopGroups(groupSort.getSort(), + return new TopGroups(new TopGroups(groupSort.getSort(), withinGroupSort == null ? null : withinGroupSort.getSort(), totalHitCount, totalGroupedHitCount, groups), totalGroupCount); diff --git a/modules/grouping/src/java/org/apache/lucene/search/grouping/GroupDocs.java b/modules/grouping/src/java/org/apache/lucene/search/grouping/GroupDocs.java index 164ba050ce9..9de84254874 100644 --- a/modules/grouping/src/java/org/apache/lucene/search/grouping/GroupDocs.java +++ b/modules/grouping/src/java/org/apache/lucene/search/grouping/GroupDocs.java @@ -18,15 +18,14 @@ package org.apache.lucene.search.grouping; */ import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.util.BytesRef; /** Represents one group in the results. * * @lucene.experimental */ -public class GroupDocs { +public class GroupDocs { /** The groupField value for all docs in this group; this * may be null if hits did not have the groupField. */ - public final BytesRef groupValue; + public final GROUP_VALUE_TYPE groupValue; /** Max score in this group */ public final float maxScore; @@ -40,13 +39,13 @@ public class GroupDocs { public final int totalHits; /** Matches the groupSort passed to {@link - * FirstPassGroupingCollector}. */ + * AbstractFirstPassGroupingCollector}. */ public final Comparable[] groupSortValues; public GroupDocs(float maxScore, int totalHits, ScoreDoc[] scoreDocs, - BytesRef groupValue, + GROUP_VALUE_TYPE groupValue, Comparable[] groupSortValues) { this.maxScore = maxScore; this.totalHits = totalHits; diff --git a/modules/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java b/modules/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java index ebee113818f..11820da35f6 100644 --- a/modules/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java +++ b/modules/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java @@ -17,10 +17,16 @@ package org.apache.lucene.search.grouping; * limitations under the License. */ -import org.apache.lucene.util.BytesRef; +/** + * Represents a group that is found during the first pass search. + * + * @lucene.experimental + */ +public class SearchGroup { -/** @lucene.experimental */ -public class SearchGroup { - public BytesRef groupValue; + /** The value that defines this group */ + public GROUP_VALUE_TYPE groupValue; + + /** The sort values used during sorting. Can be null. */ public Comparable[] sortValues; } diff --git a/modules/grouping/src/java/org/apache/lucene/search/grouping/SecondPassGroupingCollector.java b/modules/grouping/src/java/org/apache/lucene/search/grouping/SecondPassGroupingCollector.java deleted file mode 100644 index 1d486f7cd6e..00000000000 --- a/modules/grouping/src/java/org/apache/lucene/search/grouping/SecondPassGroupingCollector.java +++ /dev/null @@ -1,172 +0,0 @@ -package org.apache.lucene.search.grouping; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.IOException; -import java.util.Collection; -import java.util.HashMap; - -import org.apache.lucene.index.IndexReader.AtomicReaderContext; -import org.apache.lucene.search.Collector; -import org.apache.lucene.search.FieldCache; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopDocsCollector; -import org.apache.lucene.search.TopFieldCollector; -import org.apache.lucene.search.TopScoreDocCollector; -import org.apache.lucene.util.BytesRef; - -/** - * SecondPassGroupingCollector is the second of two passes - * necessary to collect grouped docs. This pass gathers the - * top N documents per top group computed from the - * first pass. - * - *

See {@link org.apache.lucene.search.grouping} for more - * details including a full code example.

- * - * @lucene.experimental - */ -public class SecondPassGroupingCollector extends Collector { - private final HashMap groupMap; - - private FieldCache.DocTermsIndex index; - private final String groupField; - private final int maxDocsPerGroup; - private final SentinelIntSet ordSet; - private final SearchGroupDocs[] groupDocs; - private final BytesRef spareBytesRef = new BytesRef(); - private final Collection groups; - private final Sort withinGroupSort; - private final Sort groupSort; - - private int totalHitCount; - private int totalGroupedHitCount; - - public SecondPassGroupingCollector(String groupField, Collection groups, Sort groupSort, Sort withinGroupSort, - int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields) - throws IOException { - - //System.out.println("SP init"); - if (groups.size() == 0) { - throw new IllegalArgumentException("no groups to collect (groups.size() is 0)"); - } - - this.groupSort = groupSort; - this.withinGroupSort = withinGroupSort; - this.groups = groups; - this.groupField = groupField; - this.maxDocsPerGroup = maxDocsPerGroup; - - groupMap = new HashMap(groups.size()); - - for (SearchGroup group : groups) { - //System.out.println(" prep group=" + (group.groupValue == null ? "null" : group.groupValue.utf8ToString())); - final TopDocsCollector collector; - if (withinGroupSort == null) { - // Sort by score - collector = TopScoreDocCollector.create(maxDocsPerGroup, true); - } else { - // Sort by fields - collector = TopFieldCollector.create(withinGroupSort, maxDocsPerGroup, fillSortFields, getScores, getMaxScores, true); - } - groupMap.put(group.groupValue, - new SearchGroupDocs(group.groupValue, - collector)); - } - - ordSet = new SentinelIntSet(groupMap.size(), -1); - groupDocs = new SearchGroupDocs[ordSet.keys.length]; - } - - @Override - public void setScorer(Scorer scorer) throws IOException { - for (SearchGroupDocs group : groupMap.values()) { - group.collector.setScorer(scorer); - } - } - - @Override - public void collect(int doc) throws IOException { - final int slot = ordSet.find(index.getOrd(doc)); - //System.out.println("SP.collect doc=" + doc + " slot=" + slot); - totalHitCount++; - if (slot >= 0) { - totalGroupedHitCount++; - groupDocs[slot].collector.collect(doc); - } - } - - @Override - public void setNextReader(AtomicReaderContext readerContext) throws IOException { - //System.out.println("SP.setNextReader"); - for (SearchGroupDocs group : groupMap.values()) { - group.collector.setNextReader(readerContext); - } - index = FieldCache.DEFAULT.getTermsIndex(readerContext.reader, groupField); - - // Rebuild ordSet - ordSet.clear(); - for (SearchGroupDocs group : groupMap.values()) { - //System.out.println(" group=" + (group.groupValue == null ? "null" : group.groupValue.utf8ToString())); - int ord = group.groupValue == null ? 0 : index.binarySearchLookup(group.groupValue, spareBytesRef); - if (ord >= 0) { - groupDocs[ordSet.put(ord)] = group; - } - } - } - - @Override - public boolean acceptsDocsOutOfOrder() { - return false; - } - - public TopGroups getTopGroups(int withinGroupOffset) { - final GroupDocs[] groupDocsResult = new GroupDocs[groups.size()]; - - int groupIDX = 0; - for(SearchGroup group : groups) { - final SearchGroupDocs groupDocs = groupMap.get(group.groupValue); - final TopDocs topDocs = groupDocs.collector.topDocs(withinGroupOffset, maxDocsPerGroup); - groupDocsResult[groupIDX++] = new GroupDocs(topDocs.getMaxScore(), - topDocs.totalHits, - topDocs.scoreDocs, - groupDocs.groupValue, - group.sortValues); - } - - return new TopGroups(groupSort.getSort(), - withinGroupSort == null ? null : withinGroupSort.getSort(), - totalHitCount, totalGroupedHitCount, groupDocsResult); - } -} - - -// TODO: merge with SearchGroup or not? -// ad: don't need to build a new hashmap -// disad: blows up the size of SearchGroup if we need many of them, and couples implementations -class SearchGroupDocs { - public final BytesRef groupValue; - public final TopDocsCollector collector; - - public SearchGroupDocs(BytesRef groupValue, TopDocsCollector collector) { - this.groupValue = groupValue; - this.collector = collector; - } -} diff --git a/modules/grouping/src/java/org/apache/lucene/search/grouping/AllGroupsCollector.java b/modules/grouping/src/java/org/apache/lucene/search/grouping/TermAllGroupsCollector.java similarity index 71% rename from modules/grouping/src/java/org/apache/lucene/search/grouping/AllGroupsCollector.java rename to modules/grouping/src/java/org/apache/lucene/search/grouping/TermAllGroupsCollector.java index 496ced1f232..6d0ac38b305 100644 --- a/modules/grouping/src/java/org/apache/lucene/search/grouping/AllGroupsCollector.java +++ b/modules/grouping/src/java/org/apache/lucene/search/grouping/TermAllGroupsCollector.java @@ -18,9 +18,7 @@ package org.apache.lucene.search.grouping; */ import org.apache.lucene.index.IndexReader; -import org.apache.lucene.search.Collector; import org.apache.lucene.search.FieldCache; -import org.apache.lucene.search.Scorer; import org.apache.lucene.util.BytesRef; import java.io.IOException; @@ -43,47 +41,44 @@ import java.util.List; * * @lucene.experimental */ -public class AllGroupsCollector extends Collector { +public class TermAllGroupsCollector extends AbstractAllGroupsCollector { private static final int DEFAULT_INITIAL_SIZE = 128; private final String groupField; private final SentinelIntSet ordSet; private final List groups; - private final BytesRef spareBytesRef = new BytesRef(); private FieldCache.DocTermsIndex index; + private final BytesRef spareBytesRef = new BytesRef(); /** - * Expert: Constructs a {@link AllGroupsCollector} + * Expert: Constructs a {@link AbstractAllGroupsCollector} * * @param groupField The field to group by * @param initialSize The initial allocation size of the - * internal int set and group list - * which should roughly match the total - * number of expected unique groups. Be aware that the - * heap usage is 4 bytes * initialSize. + * internal int set and group list + * which should roughly match the total + * number of expected unique groups. Be aware that the + * heap usage is 4 bytes * initialSize. */ - public AllGroupsCollector(String groupField, int initialSize) { - this.groupField = groupField; + public TermAllGroupsCollector(String groupField, int initialSize) { ordSet = new SentinelIntSet(initialSize, -1); groups = new ArrayList(initialSize); + this.groupField = groupField; } /** - * Constructs a {@link AllGroupsCollector}. This sets the + * Constructs a {@link AbstractAllGroupsCollector}. This sets the * initial allocation size for the internal int set and group * list to 128. * * @param groupField The field to group by */ - public AllGroupsCollector(String groupField) { + public TermAllGroupsCollector(String groupField) { this(groupField, DEFAULT_INITIAL_SIZE); } - public void setScorer(Scorer scorer) throws IOException { - } - public void collect(int doc) throws IOException { int key = index.getOrd(doc); if (!ordSet.exists(key)) { @@ -94,22 +89,7 @@ public class AllGroupsCollector extends Collector { } /** - * Returns the total number of groups for the executed search. - * This is a convenience method. The following code snippet has the same effect:
getGroups().size()
- * - * @return The total number of groups for the executed search - */ - public int getGroupCount() { - return groups.size(); - } - - /** - * Returns the group values - *

- * This is an unordered collections of group values. For each group that matched the query there is a {@link BytesRef} - * representing a group value. - * - * @return the group values + * {@inheritDoc} */ public Collection getGroups() { return groups; @@ -128,7 +108,4 @@ public class AllGroupsCollector extends Collector { } } - public boolean acceptsDocsOutOfOrder() { - return true; - } -} \ No newline at end of file +} diff --git a/modules/grouping/src/java/org/apache/lucene/search/grouping/TermFirstPassGroupingCollector.java b/modules/grouping/src/java/org/apache/lucene/search/grouping/TermFirstPassGroupingCollector.java new file mode 100644 index 00000000000..2ac341fc2d6 --- /dev/null +++ b/modules/grouping/src/java/org/apache/lucene/search/grouping/TermFirstPassGroupingCollector.java @@ -0,0 +1,85 @@ +package org.apache.lucene.search.grouping; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.lucene.index.IndexReader.AtomicReaderContext; +import org.apache.lucene.search.FieldCache; +import org.apache.lucene.search.Sort; +import org.apache.lucene.util.BytesRef; + +import java.io.IOException; + +/** + * Concrete implementation of {@link AbstractFirstPassGroupingCollector} that groups based on + * field values and more specifically uses {@link org.apache.lucene.search.FieldCache.DocTermsIndex} + * to collect groups. + * + * @lucene.experimental + */ +public class TermFirstPassGroupingCollector extends AbstractFirstPassGroupingCollector { + + private final BytesRef scratchBytesRef = new BytesRef(); + private FieldCache.DocTermsIndex index; + + private String groupField; + + /** + * Create the first pass collector. + * + * @param groupField The field used to group + * documents. This field must be single-valued and + * indexed (FieldCache is used to access its value + * per-document). + * @param groupSort The {@link Sort} used to sort the + * groups. The top sorted document within each group + * according to groupSort, determines how that group + * sorts against other groups. This must be non-null, + * ie, if you want to groupSort by relevance use + * Sort.RELEVANCE. + * @param topNGroups How many top groups to keep. + * @throws IOException When I/O related errors occur + */ + public TermFirstPassGroupingCollector(String groupField, Sort groupSort, int topNGroups) throws IOException { + super(groupSort, topNGroups); + this.groupField = groupField; + } + + @Override + protected BytesRef getDocGroupValue(int doc) { + final int ord = index.getOrd(doc); + return ord == 0 ? null : index.lookup(ord, scratchBytesRef); + } + + @Override + protected BytesRef copyDocGroupValue(BytesRef groupValue, BytesRef reuse) { + if (groupValue == null) { + return null; + } else if (reuse != null) { + reuse.copy(groupValue); + return reuse; + } else { + return new BytesRef(groupValue); + } + } + + @Override + public void setNextReader(AtomicReaderContext readerContext) throws IOException { + super.setNextReader(readerContext); + index = FieldCache.DEFAULT.getTermsIndex(readerContext.reader, groupField); + } +} diff --git a/modules/grouping/src/java/org/apache/lucene/search/grouping/TermSecondPassGroupingCollector.java b/modules/grouping/src/java/org/apache/lucene/search/grouping/TermSecondPassGroupingCollector.java new file mode 100644 index 00000000000..bf81f98ed90 --- /dev/null +++ b/modules/grouping/src/java/org/apache/lucene/search/grouping/TermSecondPassGroupingCollector.java @@ -0,0 +1,76 @@ +package org.apache.lucene.search.grouping; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.lucene.index.IndexReader.AtomicReaderContext; +import org.apache.lucene.search.FieldCache; +import org.apache.lucene.search.Sort; +import org.apache.lucene.util.BytesRef; + +import java.io.IOException; +import java.util.Collection; + +/** + * Concrete implementation of {@link AbstractSecondPassGroupingCollector} that groups based on + * field values and more specifically uses {@link org.apache.lucene.search.FieldCache.DocTermsIndex} + * to collect grouped docs. + * + * @lucene.experimental + */ +public class TermSecondPassGroupingCollector extends AbstractSecondPassGroupingCollector { + + private final SentinelIntSet ordSet; + private FieldCache.DocTermsIndex index; + private final BytesRef spareBytesRef = new BytesRef(); + private final String groupField; + + @SuppressWarnings("unchecked") + public TermSecondPassGroupingCollector(String groupField, Collection> groups, Sort groupSort, Sort withinGroupSort, + int maxDocsPerGroup, boolean getScores, boolean getMaxScores, boolean fillSortFields) + throws IOException { + super(groups, groupSort, withinGroupSort, maxDocsPerGroup, getScores, getMaxScores, fillSortFields); + ordSet = new SentinelIntSet(groupMap.size(), -1); + this.groupField = groupField; + groupDocs = (SearchGroupDocs[]) new SearchGroupDocs[ordSet.keys.length]; + } + + @Override + public void setNextReader(AtomicReaderContext readerContext) throws IOException { + super.setNextReader(readerContext); + index = FieldCache.DEFAULT.getTermsIndex(readerContext.reader, groupField); + + // Rebuild ordSet + ordSet.clear(); + for (SearchGroupDocs group : groupMap.values()) { +// System.out.println(" group=" + (group.groupValue == null ? "null" : group.groupValue.utf8ToString())); + int ord = group.groupValue == null ? 0 : index.binarySearchLookup(group.groupValue, spareBytesRef); + if (ord >= 0) { + groupDocs[ordSet.put(ord)] = group; + } + } + } + + @Override + protected SearchGroupDocs retrieveGroup(int doc) throws IOException { + int slot = ordSet.find(index.getOrd(doc)); + if (slot >= 0) { + return groupDocs[slot]; + } + return null; + } +} \ No newline at end of file diff --git a/modules/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java b/modules/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java index 2dbb38fa186..a46aa410c20 100644 --- a/modules/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java +++ b/modules/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java @@ -22,7 +22,7 @@ import org.apache.lucene.search.SortField; /** Represents result returned by a grouping search. * * @lucene.experimental */ -public class TopGroups { +public class TopGroups { /** Number of documents matching the search */ public final int totalHitCount; @@ -33,7 +33,7 @@ public class TopGroups { public final Integer totalGroupCount; /** Group results in groupSort order */ - public final GroupDocs[] groups; + public final GroupDocs[] groups; /** How groups are sorted against each other */ public final SortField[] groupSort; @@ -41,7 +41,7 @@ public class TopGroups { /** How docs are sorted within each group */ public final SortField[] withinGroupSort; - public TopGroups(SortField[] groupSort, SortField[] withinGroupSort, int totalHitCount, int totalGroupedHitCount, GroupDocs[] groups) { + public TopGroups(SortField[] groupSort, SortField[] withinGroupSort, int totalHitCount, int totalGroupedHitCount, GroupDocs[] groups) { this.groupSort = groupSort; this.withinGroupSort = withinGroupSort; this.totalHitCount = totalHitCount; @@ -50,7 +50,7 @@ public class TopGroups { this.totalGroupCount = null; } - public TopGroups(TopGroups oldTopGroups, Integer totalGroupCount) { + public TopGroups(TopGroups oldTopGroups, Integer totalGroupCount) { this.groupSort = oldTopGroups.groupSort; this.withinGroupSort = oldTopGroups.withinGroupSort; this.totalHitCount = oldTopGroups.totalHitCount; diff --git a/modules/grouping/src/java/org/apache/lucene/search/grouping/package.html b/modules/grouping/src/java/org/apache/lucene/search/grouping/package.html index 4b2f02a7caa..cd4717caf43 100644 --- a/modules/grouping/src/java/org/apache/lucene/search/grouping/package.html +++ b/modules/grouping/src/java/org/apache/lucene/search/grouping/package.html @@ -43,55 +43,37 @@ field fall into a single group.

+

The implementation is two-pass: the first pass ({@link + org.apache.lucene.search.grouping.TermFirstPassGroupingCollector}) + gathers the top groups, and the second pass ({@link + org.apache.lucene.search.grouping.TermSecondPassGroupingCollector}) + gathers documents within those groups. If the search is costly to + run you may want to use the {@link + org.apache.lucene.search.CachingCollector} class, which + caches hits and can (quickly) replay them for the second pass. This + way you only run the query once, but you pay a RAM cost to (briefly) + hold all hits. Results are returned as a {@link + org.apache.lucene.search.grouping.TopGroups} instance.

+

-There are two grouping implementations here: -

    -
  • - Arbitrary grouping that can group by any single-valued indexed - field, implemented as a two-pass collector: the first pass ({@link - org.apache.lucene.search.grouping.FirstPassGroupingCollector}) - gathers the top groups, and the second pass ({@link - org.apache.lucene.search.grouping.SecondPassGroupingCollector}) - gathers documents within those groups. If the search is costly to - run you may want to use the {@link - org.apache.lucene.search.CachingCollector} class, which caches - hits and can (quickly) replay them for the second pass. This way - you only run the query once, but you pay a RAM cost to (briefly) - hold all hits. Results are returned as a {@link - org.apache.lucene.search.grouping.TopGroups} instance.

    -
  • -
  • - Indexed groups, using a single pass collector (BlockGroupingCollectorDoc) that - is able to group according to the doc blocks created during - indexing using IndexWriter's add/updateDocuments API. - This is faster (~25% faster QPS) than the generic two-pass - collector, but it only works for doc blocks so you must statically - commit (during indexing) to which grouping you'll need at search - time. + This module abstracts away what defines group and how it is collected. All grouping collectors + are abstract and have currently term based implementations. One can implement + collectors that for example group on multiple fields. +

    -

    This implementation does not rely on a single valued grouping - field; rather, the blocks in the index define the groups, so your - application is free to determine what the grouping criteria is. - At search time, you must provide a Filter that marks - the last document in each group. This is a substantial memory - savings because this collector does not load - a DocTermsIndex from the - FieldCache. -

  • -
- -

The benefit of the arbitrary grouping implementation is you don't have -to commit at indexing time to a static grouping of your documents. -But the downside is it's somewhat slower to run, and requires more RAM -(a FieldCache.DocTermsIndex entry is created). +

+ This module abstracts away what defines group and how it is collected. All grouping collectors + are abstract and have currently term based implementations. One can implement + collectors that for example group on multiple fields. +

Known limitations:

  • For the two-pass grouping collector, the group field must be a single-valued indexed field. {@link org.apache.lucene.search.FieldCache} is used to load the {@link org.apache.lucene.search.FieldCache.DocTermsIndex} for this field. -
  • Unlike Solr's implementation, this module cannot group by - function query values nor by arbitrary queries. +
  • Although Solr support grouping by function and this module has abstraction of what a group is, there are currently only + implementations for grouping based on terms.
  • Sharding is not directly supported, though is not too difficult, if you can merge the top groups and top documents per group yourself. @@ -101,14 +83,14 @@ But the downside is it's somewhat slower to run, and requires more RAM (using the {@link org.apache.lucene.search.CachingCollector}):

    -  FirstPassGroupingCollector c1 = new FirstPassGroupingCollector("author", groupSort, groupOffset+topNGroups);
    +  TermFirstPassGroupingCollector c1 = new TermFirstPassGroupingCollector("author", groupSort, groupOffset+topNGroups);
     
       boolean cacheScores = true;
       double maxCacheRAMMB = 4.0;
       CachingCollector cachedCollector = CachingCollector.create(c1, cacheScores, maxCacheRAMMB);
       s.search(new TermQuery(new Term("content", searchTerm)), cachedCollector);
     
    -  Collection topGroups = c1.getTopGroups(groupOffset, fillFields);
    +  Collection> topGroups = c1.getTopGroups(groupOffset, fillFields);
     
       if (topGroups == null) {
         // No groups matched
    @@ -118,12 +100,12 @@ But the downside is it's somewhat slower to run, and requires more RAM
       boolean getScores = true;
       boolean getMaxScores = true;
       boolean fillFields = true;
    -  SecondPassGroupingCollector c2 = new SecondPassGroupingCollector("author", topGroups, groupSort, docSort, docOffset+docsPerGroup, getScores, getMaxScores, fillFields);
    +  TermSecondPassGroupingCollector c2 = new TermSecondPassGroupingCollector("author", topGroups, groupSort, docSort, docOffset+docsPerGroup, getScores, getMaxScores, fillFields);
     
       //Optionally compute total group count
    -  AllGroupsCollector allGroupsCollector = null;
    +  TermAllGroupsCollector allGroupsCollector = null;
       if (requiredTotalGroupCount) {
    -    allGroupsCollector = new AllGroupsCollector("author");
    +    allGroupsCollector = new TermAllGroupsCollector("author");
         c2 = MultiCollector.wrap(c2, allGroupsCollector);
       }
     
    @@ -135,9 +117,9 @@ But the downside is it's somewhat slower to run, and requires more RAM
         s.search(new TermQuery(new Term("content", searchTerm)), c2);
       }
             
    -  TopGroups groupsResult = c2.getTopGroups(docOffset);
    +  TopGroups groupsResult = c2.getTopGroups(docOffset);
       if (requiredTotalGroupCount) {
    -    groupResult = new TopGroups(groupsResult, allGroupsCollector.getGroupCount());
    +    groupResult = new TopGroups(groupsResult, allGroupsCollector.getGroupCount());
       }
     
       // Render groupsResult...
    diff --git a/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java b/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java
    similarity index 93%
    rename from modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java
    rename to modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java
    index 00153e7f997..0e6004e0696 100644
    --- a/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java
    +++ b/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java
    @@ -27,7 +27,7 @@ import org.apache.lucene.search.TermQuery;
     import org.apache.lucene.store.Directory;
     import org.apache.lucene.util.LuceneTestCase;
     
    -public class AllGroupsCollectorTest extends LuceneTestCase {
    +public class TermAllGroupsCollectorTest extends LuceneTestCase {
     
       public void testTotalGroupCount() throws Exception {
     
    @@ -91,15 +91,15 @@ public class AllGroupsCollectorTest extends LuceneTestCase {
         IndexSearcher indexSearcher = new IndexSearcher(w.getReader());
         w.close();
     
    -    AllGroupsCollector c1 = new AllGroupsCollector(groupField);
    +    TermAllGroupsCollector c1 = new TermAllGroupsCollector(groupField);
         indexSearcher.search(new TermQuery(new Term("content", "random")), c1);
         assertEquals(4, c1.getGroupCount());
     
    -    AllGroupsCollector c2 = new AllGroupsCollector(groupField);
    +    TermAllGroupsCollector c2 = new TermAllGroupsCollector(groupField);
         indexSearcher.search(new TermQuery(new Term("content", "some")), c2);
         assertEquals(3, c2.getGroupCount());
     
    -    AllGroupsCollector c3 = new AllGroupsCollector(groupField);
    +    TermAllGroupsCollector c3 = new TermAllGroupsCollector(groupField);
         indexSearcher.search(new TermQuery(new Term("content", "blob")), c3);
         assertEquals(2, c3.getGroupCount());
     
    diff --git a/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
    index 9dc49faa71b..89a9ecb9329 100644
    --- a/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
    +++ b/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
    @@ -17,9 +17,6 @@
     
     package org.apache.lucene.search.grouping;
     
    -import java.util.*;
    -import java.io.IOException;
    -
     import org.apache.lucene.analysis.MockAnalyzer;
     import org.apache.lucene.document.Document;
     import org.apache.lucene.document.Field;
    @@ -33,6 +30,9 @@ import org.apache.lucene.util.BytesRef;
     import org.apache.lucene.util.LuceneTestCase;
     import org.apache.lucene.util._TestUtil;
     
    +import java.io.IOException;
    +import java.util.*;
    +
     // TODO
     //   - should test relevance sort too
     //   - test null
    @@ -103,10 +103,10 @@ public class TestGrouping extends LuceneTestCase {
         w.close();
     
         final Sort groupSort = Sort.RELEVANCE;
    -    final FirstPassGroupingCollector c1 = new FirstPassGroupingCollector(groupField, groupSort, 10);
    +    final TermFirstPassGroupingCollector c1 = new TermFirstPassGroupingCollector(groupField, groupSort, 10);
         indexSearcher.search(new TermQuery(new Term("content", "random")), c1);
     
    -    final SecondPassGroupingCollector c2 = new SecondPassGroupingCollector(groupField, c1.getTopGroups(0, true), groupSort, null, 5, true, false, true);
    +    final TermSecondPassGroupingCollector c2 = new TermSecondPassGroupingCollector(groupField, c1.getTopGroups(0, true), groupSort, null, 5, true, false, true);
         indexSearcher.search(new TermQuery(new Term("content", "random")), c2);
     
         final TopGroups groups = c2.getTopGroups(0);
    @@ -154,7 +154,10 @@ public class TestGrouping extends LuceneTestCase {
         final BytesRef group;
         final BytesRef sort1;
         final BytesRef sort2;
    +    // content must be "realN ..."
         final String content;
    +    float score;
    +    float score2;
     
         public GroupDoc(int id, BytesRef group, BytesRef sort1, BytesRef sort2, String content) {
           this.id = id;
    @@ -167,16 +170,21 @@ public class TestGrouping extends LuceneTestCase {
     
       private Sort getRandomSort() {
         final List sortFields = new ArrayList();
    -    if (random.nextBoolean()) {
    +    if (random.nextInt(7) == 2) {
    +      sortFields.add(SortField.FIELD_SCORE);
    +    } else {
           if (random.nextBoolean()) {
    +        if (random.nextBoolean()) {
    +          sortFields.add(new SortField("sort1", SortField.STRING, random.nextBoolean()));
    +        } else {
    +          sortFields.add(new SortField("sort2", SortField.STRING, random.nextBoolean()));
    +        }
    +      } else if (random.nextBoolean()) {
             sortFields.add(new SortField("sort1", SortField.STRING, random.nextBoolean()));
    -      } else {
             sortFields.add(new SortField("sort2", SortField.STRING, random.nextBoolean()));
           }
    -    } else if (random.nextBoolean()) {
    -      sortFields.add(new SortField("sort1", SortField.STRING, random.nextBoolean()));
    -      sortFields.add(new SortField("sort2", SortField.STRING, random.nextBoolean()));
         }
    +    // Break ties:
         sortFields.add(new SortField("id", SortField.INT));
         return new Sort(sortFields.toArray(new SortField[sortFields.size()]));
       }
    @@ -188,7 +196,15 @@ public class TestGrouping extends LuceneTestCase {
           public int compare(GroupDoc d1, GroupDoc d2) {
             for(SortField sf : sortFields) {
               final int cmp;
    -          if (sf.getField().equals("sort1")) {
    +          if (sf.getType() == SortField.SCORE) {
    +            if (d1.score > d2.score) {
    +              cmp = -1;
    +            } else if (d1.score < d2.score) {
    +              cmp = 1;
    +            } else {
    +              cmp = 0;
    +            }
    +          } else if (sf.getField().equals("sort1")) {
                 cmp = d1.sort1.compareTo(d2.sort1);
               } else if (sf.getField().equals("sort2")) {
                 cmp = d1.sort2.compareTo(d2.sort2);
    @@ -213,7 +229,9 @@ public class TestGrouping extends LuceneTestCase {
         for(int fieldIDX=0;fieldIDX c;
           final SortField sf = sortFields[fieldIDX];
    -      if (sf.getField().equals("sort1")) {
    +      if (sf.getType() == SortField.SCORE) {
    +        c = new Float(d.score);
    +      } else if (sf.getField().equals("sort1")) {
             c = d.sort1;
           } else if (sf.getField().equals("sort2")) {
             c = d.sort2;
    @@ -236,18 +254,18 @@ public class TestGrouping extends LuceneTestCase {
       }
       */
     
    -  private TopGroups slowGrouping(GroupDoc[] groupDocs,
    -                                 String searchTerm,
    -                                 boolean fillFields,
    -                                 boolean getScores,
    -                                 boolean getMaxScores,
    -                                 boolean doAllGroups,
    -                                 Sort groupSort,
    -                                 Sort docSort,
    -                                 int topNGroups,
    -                                 int docsPerGroup,
    -                                 int groupOffset,
    -                                 int docOffset) {
    +  private TopGroups slowGrouping(GroupDoc[] groupDocs,
    +                                           String searchTerm,
    +                                           boolean fillFields,
    +                                           boolean getScores,
    +                                           boolean getMaxScores,
    +                                           boolean doAllGroups,
    +                                           Sort groupSort,
    +                                           Sort docSort,
    +                                           int topNGroups,
    +                                           int docsPerGroup,
    +                                           int groupOffset,
    +                                           int docOffset) {
     
         final Comparator groupSortComp = getComparator(groupSort);
     
    @@ -262,11 +280,11 @@ public class TestGrouping extends LuceneTestCase {
         //System.out.println("TEST: slowGrouping");
         for(GroupDoc d : groupDocs) {
           // TODO: would be better to filter by searchTerm before sorting!
    -      if (!d.content.equals(searchTerm)) {
    +      if (!d.content.startsWith(searchTerm)) {
             continue;
           }
           totalHitCount++;
    -      //System.out.println("  match id=" + d.id);
    +      //System.out.println("  match id=" + d.id + " score=" + d.score);
     
           if (doAllGroups) {
             if (!knownGroups.contains(d.group)) {
    @@ -296,7 +314,8 @@ public class TestGrouping extends LuceneTestCase {
         final int limit = Math.min(groupOffset + topNGroups, groups.size());
     
         final Comparator docSortComp = getComparator(docSort);
    -    final GroupDocs[] result = new GroupDocs[limit-groupOffset];
    +    @SuppressWarnings("unchecked")
    +    final GroupDocs[] result = new GroupDocs[limit-groupOffset];
         int totalGroupedHitCount = 0;
         for(int idx=groupOffset;idx < limit;idx++) {
           final BytesRef group = sortedGroups.get(idx);
    @@ -311,9 +330,9 @@ public class TestGrouping extends LuceneTestCase {
               final GroupDoc d = docs.get(docIDX);
               final FieldDoc fd;
               if (fillFields) {
    -            fd = new FieldDoc(d.id, 0.0f, fillFields(d, docSort));
    +            fd = new FieldDoc(d.id, getScores ? d.score : Float.NaN, fillFields(d, docSort));
               } else {
    -            fd = new FieldDoc(d.id, 0.0f);
    +            fd = new FieldDoc(d.id, getScores ? d.score : Float.NaN);
               }
               hits[docIDX-docOffset] = fd;
             }
    @@ -321,7 +340,7 @@ public class TestGrouping extends LuceneTestCase {
             hits = new ScoreDoc[0];
           }
     
    -      result[idx-groupOffset] = new GroupDocs(0.0f,
    +      result[idx-groupOffset] = new GroupDocs(0.0f,
                                                   docs.size(),
                                                   hits,
                                                   group,
    @@ -329,12 +348,12 @@ public class TestGrouping extends LuceneTestCase {
         }
     
         if (doAllGroups) {
    -      return new TopGroups(
    -          new TopGroups(groupSort.getSort(), docSort.getSort(), totalHitCount, totalGroupedHitCount, result),
    +      return new TopGroups(
    +          new TopGroups(groupSort.getSort(), docSort.getSort(), totalHitCount, totalGroupedHitCount, result),
               knownGroups.size()
           );
         } else {
    -      return new TopGroups(groupSort.getSort(), docSort.getSort(), totalHitCount, totalGroupedHitCount, result);
    +      return new TopGroups(groupSort.getSort(), docSort.getSort(), totalHitCount, totalGroupedHitCount, result);
         }
       }
     
    @@ -372,7 +391,7 @@ public class TestGrouping extends LuceneTestCase {
             doc.add(newField("sort1", groupValue.sort1.utf8ToString(), Field.Index.NOT_ANALYZED));
             doc.add(newField("sort2", groupValue.sort2.utf8ToString(), Field.Index.NOT_ANALYZED));
             doc.add(new NumericField("id").setIntValue(groupValue.id));
    -        doc.add(newField("content", groupValue.content, Field.Index.NOT_ANALYZED));
    +        doc.add(newField("content", groupValue.content, Field.Index.ANALYZED));
             //System.out.println("TEST:     doc content=" + groupValue.content + " group=" + (groupValue.group == null ? "null" : groupValue.group.utf8ToString()) + " sort1=" + groupValue.sort1.utf8ToString() + " id=" + groupValue.id);
           }
           // So we can pull filter marking last doc in block:
    @@ -420,7 +439,22 @@ public class TestGrouping extends LuceneTestCase {
             groups.add(new BytesRef(_TestUtil.randomRealisticUnicodeString(random)));
             //groups.add(new BytesRef(_TestUtil.randomSimpleString(random)));
           }
    -      final String[] contentStrings = new String[] {"a", "b", "c", "d"};
    +      final String[] contentStrings = new String[_TestUtil.nextInt(random, 2, 20)];
    +      if (VERBOSE) {
    +        System.out.println("TEST: create fake content");
    +      }
    +      for(int contentIDX=0;contentIDX> scoreMap = new HashMap>();
    +
    +        // Tricky: must separately set .score2, because the doc
    +        // block index was created with possible deletions!
    +        //System.out.println("fixup score2");
    +        for(int contentID=0;contentID<3;contentID++) {
    +          //System.out.println("  term=real" + contentID);
    +          final Map termScoreMap = new HashMap();
    +          scoreMap.put("real"+contentID, termScoreMap);
    +          //System.out.println("term=real" + contentID + " dfold=" + s.docFreq(new Term("content", "real"+contentID)) +
    +          //" dfnew=" + s2.docFreq(new Term("content", "real"+contentID)));
    +          final ScoreDoc[] hits = s2.search(new TermQuery(new Term("content", "real"+contentID)), numDocs).scoreDocs;
    +          for(ScoreDoc hit : hits) {
    +            final GroupDoc gd = groupDocsByID[docIDToID2[hit.doc]];
    +            assertTrue(gd.score2 == 0.0);
    +            gd.score2 = hit.score;
    +            assertEquals(gd.id, docIDToID2[hit.doc]);
    +            //System.out.println("    score=" + gd.score + " score2=" + hit.score + " id=" + docIDToID2[hit.doc]);
    +            termScoreMap.put(gd.score, gd.score2);
    +          }
    +        }
    +
             for(int searchIter=0;searchIter<100;searchIter++) {
     
               if (VERBOSE) {
                 System.out.println("TEST: searchIter=" + searchIter);
               }
     
    -          final String searchTerm = contentStrings[random.nextInt(contentStrings.length)];
    +          final String searchTerm = "real" + random.nextInt(3);
               final boolean fillFields = random.nextBoolean();
    -          final boolean getScores = random.nextBoolean();
    +          boolean getScores = random.nextBoolean();
               final boolean getMaxScores = random.nextBoolean();
               final Sort groupSort = getRandomSort();
               //final Sort groupSort = new Sort(new SortField[] {new SortField("sort1", SortField.STRING), new SortField("id", SortField.INT)});
               // TODO: also test null (= sort by relevance)
               final Sort docSort = getRandomSort();
     
    +          for(SortField sf : docSort.getSort()) {
    +            if (sf.getType() == SortField.SCORE) {
    +              getScores = true;
    +            }
    +          }
    +
    +          for(SortField sf : groupSort.getSort()) {
    +            if (sf.getType() == SortField.SCORE) {
    +              getScores = true;
    +            }
    +          }
    +
               final int topNGroups = _TestUtil.nextInt(random, 1, 30);
               //final int topNGroups = 4;
               final int docsPerGroup = _TestUtil.nextInt(random, 1, 50);
    +
               final int groupOffset = _TestUtil.nextInt(random, 0, (topNGroups-1)/2);
               //final int groupOffset = 0;
     
    @@ -522,17 +616,17 @@ public class TestGrouping extends LuceneTestCase {
               final boolean doCache = random.nextBoolean();
               final boolean doAllGroups = random.nextBoolean();
               if (VERBOSE) {
    -            System.out.println("TEST: groupSort=" + groupSort + " docSort=" + docSort + " searchTerm=" + searchTerm + " topNGroups=" + topNGroups + " groupOffset=" + groupOffset + " docOffset=" + docOffset + " doCache=" + doCache + " docsPerGroup=" + docsPerGroup + " doAllGroups=" + doAllGroups);
    +            System.out.println("TEST: groupSort=" + groupSort + " docSort=" + docSort + " searchTerm=" + searchTerm + " topNGroups=" + topNGroups + " groupOffset=" + groupOffset + " docOffset=" + docOffset + " doCache=" + doCache + " docsPerGroup=" + docsPerGroup + " doAllGroups=" + doAllGroups + " getScores=" + getScores + " getMaxScores=" + getMaxScores);
               }
     
    -          final AllGroupsCollector allGroupsCollector;
    +          final TermAllGroupsCollector allGroupsCollector;
               if (doAllGroups) {
    -            allGroupsCollector = new AllGroupsCollector("group");
    +            allGroupsCollector = new TermAllGroupsCollector("group");
               } else {
                 allGroupsCollector = null;
               }
     
    -          final FirstPassGroupingCollector c1 = new FirstPassGroupingCollector("group", groupSort, groupOffset+topNGroups);
    +          final TermFirstPassGroupingCollector c1 = new TermFirstPassGroupingCollector("group", groupSort, groupOffset+topNGroups);
               final CachingCollector cCache;
               final Collector c;
             
    @@ -583,19 +677,19 @@ public class TestGrouping extends LuceneTestCase {
                 }
               }
     
    -          final Collection topGroups = c1.getTopGroups(groupOffset, fillFields);
    +          final Collection> topGroups = c1.getTopGroups(groupOffset, fillFields);
               final TopGroups groupsResult;
     
               if (topGroups != null) {
     
                 if (VERBOSE) {
                   System.out.println("TEST: topGroups");
    -              for (SearchGroup searchGroup : topGroups) {
    +              for (SearchGroup searchGroup : topGroups) {
                     System.out.println("  " + (searchGroup.groupValue == null ? "null" : searchGroup.groupValue.utf8ToString()) + ": " + Arrays.deepToString(searchGroup.sortValues));
                   }
                 }
     
    -            final SecondPassGroupingCollector c2 = new SecondPassGroupingCollector("group", topGroups, groupSort, docSort, docOffset+docsPerGroup, getScores, getMaxScores, fillFields);
    +            final TermSecondPassGroupingCollector c2 = new TermSecondPassGroupingCollector("group", topGroups, groupSort, docSort, docOffset+docsPerGroup, getScores, getMaxScores, fillFields);
                 if (doCache) {
                   if (cCache.isCached()) {
                     if (VERBOSE) {
    @@ -613,8 +707,8 @@ public class TestGrouping extends LuceneTestCase {
                 }
     
                 if (doAllGroups) {
    -              TopGroups tempTopGroups = c2.getTopGroups(docOffset);
    -              groupsResult = new TopGroups(tempTopGroups, allGroupsCollector.getGroupCount());
    +              TopGroups tempTopGroups = c2.getTopGroups(docOffset);
    +              groupsResult = new TopGroups(tempTopGroups, allGroupsCollector.getGroupCount());
                 } else {
                   groupsResult = c2.getTopGroups(docOffset);
                 }
    @@ -625,49 +719,93 @@ public class TestGrouping extends LuceneTestCase {
                 }
               }
     
    -          final TopGroups expectedGroups = slowGrouping(groupDocs, searchTerm, fillFields, getScores, getMaxScores, doAllGroups, groupSort, docSort, topNGroups, docsPerGroup, groupOffset, docOffset);
    +          final TopGroups expectedGroups = slowGrouping(groupDocs, searchTerm, fillFields, getScores, getMaxScores, doAllGroups, groupSort, docSort, topNGroups, docsPerGroup, groupOffset, docOffset);
     
               if (VERBOSE) {
                 if (expectedGroups == null) {
                   System.out.println("TEST: no expected groups");
                 } else {
                   System.out.println("TEST: expected groups");
    -              for(GroupDocs gd : expectedGroups.groups) {
    +              for(GroupDocs gd : expectedGroups.groups) {
                     System.out.println("  group=" + (gd.groupValue == null ? "null" : gd.groupValue.utf8ToString()));
                     for(ScoreDoc sd : gd.scoreDocs) {
    -                  System.out.println("    id=" + sd.doc);
    +                  System.out.println("    id=" + sd.doc + " score=" + sd.score);
                     }
                   }
                 }
               }
    -          // NOTE: intentional but temporary field cache insanity!
    -          assertEquals(docIDToID, expectedGroups, groupsResult, true);
    +          assertEquals(docIDToID, expectedGroups, groupsResult, true, getScores);
     
               final boolean needsScores = getScores || getMaxScores || docSort == null;
               final BlockGroupingCollector c3 = new BlockGroupingCollector(groupSort, groupOffset+topNGroups, needsScores, lastDocInBlock);
    -          final AllGroupsCollector allGroupsCollector2;
    +          final TermAllGroupsCollector allGroupsCollector2;
               final Collector c4;
               if (doAllGroups) {
    -            allGroupsCollector2 = new AllGroupsCollector("group");
    +            allGroupsCollector2 = new TermAllGroupsCollector("group");
                 c4 = MultiCollector.wrap(c3, allGroupsCollector2);
               } else {
                 allGroupsCollector2 = null;
                 c4 = c3;
               }
               s2.search(new TermQuery(new Term("content", searchTerm)), c4);
    -          final TopGroups tempTopGroups2 = c3.getTopGroups(docSort, groupOffset, docOffset, docOffset+docsPerGroup, fillFields);
    +          @SuppressWarnings("unchecked")
    +          final TopGroups tempTopGroups2 = c3.getTopGroups(docSort, groupOffset, docOffset, docOffset+docsPerGroup, fillFields);
               final TopGroups groupsResult2;
               if (doAllGroups && tempTopGroups2 != null) {
                 assertEquals((int) tempTopGroups2.totalGroupCount, allGroupsCollector2.getGroupCount());
    -            groupsResult2 = new TopGroups(tempTopGroups2, allGroupsCollector2.getGroupCount());
    +            groupsResult2 = new TopGroups(tempTopGroups2, allGroupsCollector2.getGroupCount());
               } else {
                 groupsResult2 = tempTopGroups2;
               }
    -          assertEquals(docIDToID2, expectedGroups, groupsResult2, false);
    +
    +          if (expectedGroups != null) {
    +            // Fixup scores for reader2
    +            for (GroupDocs groupDocsHits : expectedGroups.groups) {
    +              for(ScoreDoc hit : groupDocsHits.scoreDocs) {
    +                final GroupDoc gd = groupDocsByID[hit.doc];
    +                assertEquals(gd.id, hit.doc);
    +                //System.out.println("fixup score " + hit.score + " to " + gd.score2 + " vs " + gd.score);
    +                hit.score = gd.score2;
    +              }
    +            }
    +
    +            final SortField[] sortFields = groupSort.getSort();
    +            final Map termScoreMap = scoreMap.get(searchTerm);
    +            for(int groupSortIDX=0;groupSortIDX sources = fp.parseValueSourceList();
    +        return new MultiBoolFunction(sources) {
    +          @Override
    +          protected String name() {
    +            return "and";
    +          }
    +          @Override
    +          protected boolean func(int doc, DocValues[] vals) {
    +            for (DocValues dv : vals)
    +              if (!dv.boolVal(doc)) return false;
    +            return true;
    +          }
    +        };
    +      }
    +    });
    +
    +    addParser("or", new ValueSourceParser() {
    +      @Override
    +      public ValueSource parse(FunctionQParser fp) throws ParseException {
    +        List sources = fp.parseValueSourceList();
    +        return new MultiBoolFunction(sources) {
    +          @Override
    +          protected String name() {
    +            return "or";
    +          }
    +          @Override
    +          protected boolean func(int doc, DocValues[] vals) {
    +            for (DocValues dv : vals)
    +              if (dv.boolVal(doc)) return true;
    +            return false;
    +          }
    +        };
    +      }
    +    });
    +
    +    addParser("xor", new ValueSourceParser() {
    +      @Override
    +      public ValueSource parse(FunctionQParser fp) throws ParseException {
    +        List sources = fp.parseValueSourceList();
    +        return new MultiBoolFunction(sources) {
    +          @Override
    +          protected String name() {
    +            return "xor";
    +          }
    +          @Override
    +          protected boolean func(int doc, DocValues[] vals) {
    +            int nTrue=0, nFalse=0;
    +            for (DocValues dv : vals) {
    +              if (dv.boolVal(doc)) nTrue++;
    +              else nFalse++;
    +            }
    +            return nTrue != 0 && nFalse != 0;
    +          }
    +        };
    +      }
    +    });
    +
    +    addParser("if", new ValueSourceParser() {
    +      @Override
    +      public ValueSource parse(FunctionQParser fp) throws ParseException {
    +        ValueSource ifValueSource = fp.parseValueSource();
    +        ValueSource trueValueSource = fp.parseValueSource();
    +        ValueSource falseValueSource = fp.parseValueSource();
    +
    +        return new IfFunction(ifValueSource, trueValueSource, falseValueSource);
    +      }
    +    });
    +
    +    addParser("def", new ValueSourceParser() {
    +      @Override
    +      public ValueSource parse(FunctionQParser fp) throws ParseException {
    +        return new DefFunction(fp.parseValueSourceList());
    +      }
    +    });
    +
       }
     
       private static TInfo parseTerm(FunctionQParser fp) throws ParseException {
    @@ -857,6 +985,11 @@ class LongConstValueSource extends ConstNumberSource {
       public Number getNumber() {
         return constant;
       }
    +
    +  @Override
    +  public boolean getBool() {
    +    return constant != 0;
    +  }
     }
     
     
    @@ -981,3 +1114,69 @@ abstract class Double2Parser extends NamedParser {
       }
     
     }
    +
    +
    +class BoolConstValueSource extends ConstNumberSource {
    +  final boolean constant;
    +
    +  public BoolConstValueSource(boolean constant) {
    +    this.constant = constant;
    +  }
    +
    +  @Override
    +  public String description() {
    +    return "const(" + constant + ")";
    +  }
    +
    +  @Override
    +  public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
    +    return new BoolDocValues(this) {
    +      @Override
    +      public boolean boolVal(int doc) {
    +        return constant;
    +      }
    +    };
    +  }
    +
    +  @Override
    +  public int hashCode() {
    +    return constant ? 0x12345678 : 0x87654321;
    +  }
    +
    +  @Override
    +  public boolean equals(Object o) {
    +    if (BoolConstValueSource.class != o.getClass()) return false;
    +    BoolConstValueSource other = (BoolConstValueSource) o;
    +    return this.constant == other.constant;
    +  }
    +
    +    @Override
    +  public int getInt() {
    +    return constant ? 1 : 0;
    +  }
    +
    +  @Override
    +  public long getLong() {
    +    return constant ? 1 : 0;
    +  }
    +
    +  @Override
    +  public float getFloat() {
    +    return constant ? 1 : 0;
    +  }
    +
    +  @Override
    +  public double getDouble() {
    +    return constant ? 1 : 0;
    +  }
    +
    +  @Override
    +  public Number getNumber() {
    +    return constant ? 1 : 0;
    +  }
    +
    +  @Override
    +  public boolean getBool() {
    +    return constant;
    +  }
    +}
    diff --git a/solr/src/java/org/apache/solr/search/function/BoolDocValues.java b/solr/src/java/org/apache/solr/search/function/BoolDocValues.java
    new file mode 100644
    index 00000000000..443f379ab95
    --- /dev/null
    +++ b/solr/src/java/org/apache/solr/search/function/BoolDocValues.java
    @@ -0,0 +1,79 @@
    +package org.apache.solr.search.function;
    +
    +import org.apache.solr.search.MutableValue;
    +import org.apache.solr.search.MutableValueBool;
    +import org.apache.solr.search.MutableValueInt;
    +
    +public abstract class BoolDocValues extends DocValues {
    +  protected final ValueSource vs;
    +
    +  public BoolDocValues(ValueSource vs) {
    +    this.vs = vs;
    +  }
    +
    +  @Override
    +  public abstract boolean boolVal(int doc);
    +
    +  @Override
    +  public byte byteVal(int doc) {
    +    return boolVal(doc) ? (byte)1 : (byte)0;
    +  }
    +
    +  @Override
    +  public short shortVal(int doc) {
    +    return boolVal(doc) ? (short)1 : (short)0;
    +  }
    +
    +  @Override
    +  public float floatVal(int doc) {
    +    return boolVal(doc) ? (float)1 : (float)0;
    +  }
    +
    +  @Override
    +  public int intVal(int doc) {
    +    return boolVal(doc) ? 1 : 0;
    +  }
    +
    +  @Override
    +  public long longVal(int doc) {
    +    return boolVal(doc) ? (long)1 : (long)0;
    +  }
    +
    +  @Override
    +  public double doubleVal(int doc) {
    +    return boolVal(doc) ? (double)1 : (double)0;
    +  }
    +
    +  @Override
    +  public String strVal(int doc) {
    +    return Boolean.toString(boolVal(doc));
    +  }
    +
    +  @Override
    +  public Object objectVal(int doc) {
    +    return exists(doc) ? boolVal(doc) : null;
    +  }
    +
    +  @Override
    +  public String toString(int doc) {
    +    return vs.description() + '=' + strVal(doc);
    +  }
    +
    +  @Override
    +  public ValueFiller getValueFiller() {
    +    return new ValueFiller() {
    +      private final MutableValueBool mval = new MutableValueBool();
    +
    +      @Override
    +      public MutableValue getValue() {
    +        return mval;
    +      }
    +
    +      @Override
    +      public void fillValue(int doc) {
    +        mval.value = boolVal(doc);
    +        mval.exists = exists(doc);
    +      }
    +    };
    +  }
    +}
    diff --git a/solr/src/java/org/apache/solr/search/function/BoolFunction.java b/solr/src/java/org/apache/solr/search/function/BoolFunction.java
    new file mode 100644
    index 00000000000..b7898d15184
    --- /dev/null
    +++ b/solr/src/java/org/apache/solr/search/function/BoolFunction.java
    @@ -0,0 +1,23 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.solr.search.function;
    +
    +
    +public abstract class BoolFunction extends ValueSource {
    +  // TODO: placeholder to return type, among other common future functionality
    +}
    diff --git a/solr/src/java/org/apache/solr/search/function/ConstNumberSource.java b/solr/src/java/org/apache/solr/search/function/ConstNumberSource.java
    index da9ef1f051a..fac0611cc71 100755
    --- a/solr/src/java/org/apache/solr/search/function/ConstNumberSource.java
    +++ b/solr/src/java/org/apache/solr/search/function/ConstNumberSource.java
    @@ -26,4 +26,5 @@ public abstract class ConstNumberSource extends ValueSource {
       public abstract float getFloat();
       public abstract double getDouble();  
       public abstract Number getNumber();  
    +  public abstract boolean getBool();
     }
    diff --git a/solr/src/java/org/apache/solr/search/function/ConstValueSource.java b/solr/src/java/org/apache/solr/search/function/ConstValueSource.java
    index ad495a18007..fc0b9334427 100755
    --- a/solr/src/java/org/apache/solr/search/function/ConstValueSource.java
    +++ b/solr/src/java/org/apache/solr/search/function/ConstValueSource.java
    @@ -66,6 +66,10 @@ public class ConstValueSource extends ConstNumberSource {
           public Object objectVal(int doc) {
             return constant;
           }
    +      @Override
    +      public boolean boolVal(int doc) {
    +        return constant != 0.0f;
    +      }
         };
       }
     
    @@ -105,4 +109,9 @@ public class ConstValueSource extends ConstNumberSource {
       public Number getNumber() {
         return constant;
       }
    +
    +  @Override
    +  public boolean getBool() {
    +    return constant != 0.0f;
    +  }
     }
    diff --git a/solr/src/java/org/apache/solr/search/function/DefFunction.java b/solr/src/java/org/apache/solr/search/function/DefFunction.java
    new file mode 100644
    index 00000000000..b2f99a3c6fa
    --- /dev/null
    +++ b/solr/src/java/org/apache/solr/search/function/DefFunction.java
    @@ -0,0 +1,124 @@
    +package org.apache.solr.search.function;
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +import org.apache.lucene.index.IndexReader.AtomicReaderContext;
    +import org.apache.lucene.search.IndexSearcher;
    +import org.apache.lucene.util.BytesRef;
    +
    +import java.io.IOException;
    +import java.util.Arrays;
    +import java.util.List;
    +import java.util.Map;
    +
    +public class DefFunction extends MultiFunction {
    +  public DefFunction(List sources) {
    +    super(sources);
    +  }
    +
    +  @Override
    +  protected String name() {
    +    return "def";
    +  }
    +
    +
    +  @Override
    +  public DocValues getValues(Map fcontext, AtomicReaderContext readerContext) throws IOException {
    +
    +
    +    return new Values(valsArr(sources, fcontext, readerContext)) {
    +      final int upto = valsArr.length - 1;
    +
    +      private DocValues get(int doc) {
    +        for (int i=0; i {
         final double[] arr = vals.values;
         final Bits valid = vals.valid;
         
    -    return new DocValues() {
    -      @Override
    -      public float floatVal(int doc) {
    -        return (float) arr[doc];
    -      }
    -
    -      @Override
    -      public int intVal(int doc) {
    -        return (int) arr[doc];
    -      }
    -
    -      @Override
    -      public long longVal(int doc) {
    -        return (long) arr[doc];
    -      }
    -
    +    return new DoubleDocValues(this) {
           @Override
           public double doubleVal(int doc) {
             return arr[doc];
           }
     
           @Override
    -      public String strVal(int doc) {
    -        return Double.toString(arr[doc]);
    -      }
    -
    -      @Override
    -      public Object objectVal(int doc) {
    -        return valid.get(doc) ? arr[doc] : null;
    -      }
    -
    -      @Override
    -      public String toString(int doc) {
    -        return description() + '=' + doubleVal(doc);
    +      public boolean exists(int doc) {
    +        return valid.get(doc);
           }
     
           @Override
    @@ -147,7 +122,7 @@ public class DoubleFieldSource extends NumericFieldCacheSource {
             }
           }
     
    -            @Override
    +      @Override
           public ValueFiller getValueFiller() {
             return new ValueFiller() {
               private final double[] doubleArr = arr;
    diff --git a/solr/src/java/org/apache/solr/search/function/IfFunction.java b/solr/src/java/org/apache/solr/search/function/IfFunction.java
    new file mode 100644
    index 00000000000..00ad2f437d6
    --- /dev/null
    +++ b/solr/src/java/org/apache/solr/search/function/IfFunction.java
    @@ -0,0 +1,148 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.solr.search.function;
    +
    +import org.apache.lucene.index.IndexReader;
    +import org.apache.lucene.index.IndexReader.AtomicReaderContext;
    +import org.apache.lucene.search.Explanation;
    +import org.apache.lucene.search.IndexSearcher;
    +import org.apache.lucene.util.BytesRef;
    +
    +import java.io.IOException;
    +import java.util.List;
    +import java.util.Map;
    +
    +
    +public class IfFunction extends BoolFunction {
    +  private ValueSource ifSource;
    +  private ValueSource trueSource;
    +  private ValueSource falseSource;
    +
    +
    +  public IfFunction(ValueSource ifSource, ValueSource trueSource, ValueSource falseSource) {
    +    this.ifSource = ifSource;
    +    this.trueSource = trueSource;
    +    this.falseSource = falseSource;
    +  }
    +
    +  @Override
    +  public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
    +    final DocValues ifVals = ifSource.getValues(context, readerContext);
    +    final DocValues trueVals = trueSource.getValues(context, readerContext);
    +    final DocValues falseVals = falseSource.getValues(context, readerContext);
    +
    +    return new DocValues() {
    +      @Override
    +      public byte byteVal(int doc) {
    +        return ifVals.boolVal(doc) ? trueVals.byteVal(doc) : falseVals.byteVal(doc);
    +      }
    +
    +      @Override
    +      public short shortVal(int doc) {
    +        return ifVals.boolVal(doc) ? trueVals.shortVal(doc) : falseVals.shortVal(doc);
    +      }
    +
    +      @Override
    +      public float floatVal(int doc) {
    +        return ifVals.boolVal(doc) ? trueVals.floatVal(doc) : falseVals.floatVal(doc);
    +      }
    +
    +      @Override
    +      public int intVal(int doc) {
    +        return ifVals.boolVal(doc) ? trueVals.intVal(doc) : falseVals.intVal(doc);
    +      }
    +
    +      @Override
    +      public long longVal(int doc) {
    +        return ifVals.boolVal(doc) ? trueVals.longVal(doc) : falseVals.longVal(doc);
    +      }
    +
    +      @Override
    +      public double doubleVal(int doc) {
    +        return ifVals.boolVal(doc) ? trueVals.doubleVal(doc) : falseVals.doubleVal(doc);
    +      }
    +
    +      @Override
    +      public String strVal(int doc) {
    +        return ifVals.boolVal(doc) ? trueVals.strVal(doc) : falseVals.strVal(doc);
    +      }
    +
    +      @Override
    +      public boolean boolVal(int doc) {
    +        return ifVals.boolVal(doc) ? trueVals.boolVal(doc) : falseVals.boolVal(doc);
    +      }
    +
    +      @Override
    +      public boolean bytesVal(int doc, BytesRef target) {
    +        return ifVals.boolVal(doc) ? trueVals.bytesVal(doc, target) : falseVals.bytesVal(doc, target);
    +      }
    +
    +      @Override
    +      public Object objectVal(int doc) {
    +        return ifVals.boolVal(doc) ? trueVals.objectVal(doc) : falseVals.objectVal(doc);
    +      }
    +
    +      @Override
    +      public boolean exists(int doc) {
    +        return true; // TODO: flow through to any sub-sources?
    +      }
    +
    +      @Override
    +      public ValueFiller getValueFiller() {
    +        // TODO: we need types of trueSource / falseSource to handle this
    +        // for now, use float.
    +        return super.getValueFiller();
    +      }
    +
    +      @Override
    +      public String toString(int doc) {
    +        return "if(" + ifVals.toString(doc) + ',' + trueVals.toString(doc) + ',' + falseVals.toString(doc) + ')';
    +      }
    +    };
    +
    +  }
    +
    +  @Override
    +  public String description() {
    +    return "if(" + ifSource.description() + ',' + trueSource.description() + ',' + falseSource + ')';
    +  }
    +
    +  @Override
    +  public int hashCode() {
    +    int h = ifSource.hashCode();
    +    h = h * 31 + trueSource.hashCode();
    +    h = h * 31 + falseSource.hashCode();
    +    return h;
    +  }
    +
    +  @Override
    +  public boolean equals(Object o) {
    +    if (!(o instanceof IfFunction)) return false;
    +    IfFunction other = (IfFunction)o;
    +    return ifSource.equals(other.ifSource)
    +        && trueSource.equals(other.trueSource)
    +        && falseSource.equals(other.falseSource);
    +  }
    +
    +  @Override
    +  public void createWeight(Map context, IndexSearcher searcher) throws IOException {
    +    ifSource.createWeight(context, searcher);
    +    trueSource.createWeight(context, searcher);
    +    falseSource.createWeight(context, searcher);
    +  }
    +}
    \ No newline at end of file
    diff --git a/solr/src/java/org/apache/solr/search/function/LongDocValues.java b/solr/src/java/org/apache/solr/search/function/LongDocValues.java
    index f5117bd0d43..f0e8f6d8ee9 100644
    --- a/solr/src/java/org/apache/solr/search/function/LongDocValues.java
    +++ b/solr/src/java/org/apache/solr/search/function/LongDocValues.java
    @@ -38,6 +38,11 @@ public abstract class LongDocValues extends DocValues {
         return (double)longVal(doc);
       }
     
    +  @Override
    +  public boolean boolVal(int doc) {
    +    return longVal(doc) != 0;
    +  }
    +
       @Override
       public String strVal(int doc) {
         return Long.toString(longVal(doc));
    diff --git a/solr/src/java/org/apache/solr/search/function/MultiBoolFunction.java b/solr/src/java/org/apache/solr/search/function/MultiBoolFunction.java
    new file mode 100644
    index 00000000000..033ef6ebae9
    --- /dev/null
    +++ b/solr/src/java/org/apache/solr/search/function/MultiBoolFunction.java
    @@ -0,0 +1,105 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.solr.search.function;
    +
    +import org.apache.lucene.index.IndexReader.AtomicReaderContext;
    +import org.apache.lucene.search.IndexSearcher;
    +
    +import java.io.IOException;
    +import java.util.List;
    +import java.util.Map;
    +
    +
    +public abstract class MultiBoolFunction extends BoolFunction {
    +  protected final List sources;
    +
    +  public MultiBoolFunction(List sources) {
    +    this.sources = sources;
    +  }
    +
    +  protected abstract String name();
    +
    +  protected abstract boolean func(int doc, DocValues[] vals);
    +
    +  @Override
    +  public BoolDocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
    +    final DocValues[] vals =  new DocValues[sources.size()];
    +    int i=0;
    +    for (ValueSource source : sources) {
    +      vals[i++] = source.getValues(context, readerContext);
    +    }
    +
    +    return new BoolDocValues(this) {
    +      @Override
    +      public boolean boolVal(int doc) {
    +	      return func(doc, vals);
    +      }
    +
    +      @Override
    +      public String toString(int doc) {
    +        StringBuilder sb = new StringBuilder(name());
    +        sb.append('(');
    +        boolean first = true;
    +        for (DocValues dv : vals) {
    +          if (first) {
    +            first = false;
    +          } else {
    +            sb.append(',');
    +          }
    +          sb.append(dv.toString(doc));
    +        }
    +        return sb.toString();
    +      }
    +    };
    +  }
    +
    +  @Override
    +  public String description() {
    +    StringBuilder sb = new StringBuilder(name());
    +    sb.append('(');
    +    boolean first = true;
    +    for (ValueSource source : sources) {
    +      if (first) {
    +        first = false;
    +      } else {
    +        sb.append(',');
    +      }
    +      sb.append(source.description());
    +    }
    +    return sb.toString();
    +  }
    +
    +  @Override
    +  public int hashCode() {
    +    return sources.hashCode() + name().hashCode();
    +  }
    +
    +  @Override
    +  public boolean equals(Object o) {
    +    if (this.getClass() != o.getClass()) return false;
    +    MultiBoolFunction other = (MultiBoolFunction)o;
    +    return this.sources.equals(other.sources);
    +  }
    +
    +  @Override
    +  public void createWeight(Map context, IndexSearcher searcher) throws IOException {
    +    for (ValueSource source : sources) {
    +      source.createWeight(context, searcher);
    +    }
    +  }
    +}
    \ No newline at end of file
    diff --git a/solr/src/java/org/apache/solr/search/function/MultiFunction.java b/solr/src/java/org/apache/solr/search/function/MultiFunction.java
    new file mode 100644
    index 00000000000..941b3415ba7
    --- /dev/null
    +++ b/solr/src/java/org/apache/solr/search/function/MultiFunction.java
    @@ -0,0 +1,122 @@
    +package org.apache.solr.search.function;
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +import org.apache.lucene.index.IndexReader.AtomicReaderContext;
    +import org.apache.lucene.search.IndexSearcher;
    +import org.apache.lucene.util.BytesRef;
    +
    +import java.io.IOException;
    +import java.util.Arrays;
    +import java.util.List;
    +import java.util.Map;
    +
    +
    +public abstract class MultiFunction extends ValueSource {
    +  protected final List sources;
    +
    +  public MultiFunction(List sources) {
    +    this.sources = sources;
    +  }
    +
    +  abstract protected String name();
    +
    +  @Override
    +  public String description() {
    +    return description(name(), sources);
    +  }
    +
    +  public static String description(String name, List sources) {
    +    StringBuilder sb = new StringBuilder();
    +    sb.append(name).append('(');
    +    boolean firstTime=true;
    +    for (ValueSource source : sources) {
    +      if (firstTime) {
    +        firstTime=false;
    +      } else {
    +        sb.append(',');
    +      }
    +      sb.append(source);
    +    }
    +    sb.append(')');
    +    return sb.toString();
    +  }
    +
    +  public static DocValues[] valsArr(List sources, Map fcontext, AtomicReaderContext readerContext) throws IOException {
    +    final DocValues[] valsArr = new DocValues[sources.size()];
    +    int i=0;
    +    for (ValueSource source : sources) {
    +      valsArr[i++] = source.getValues(fcontext, readerContext);
    +    }
    +    return valsArr;
    +  }
    +
    +  public class Values extends DocValues {
    +    final DocValues[] valsArr;
    +
    +    public Values(DocValues[] valsArr) {
    +      this.valsArr = valsArr;
    +    }
    +
    +    @Override
    +    public String toString(int doc) {
    +      return MultiFunction.toString(name(), valsArr, doc);
    +    }
    +
    +    @Override
    +    public ValueFiller getValueFiller() {
    +      // TODO: need ValueSource.type() to determine correct type
    +      return super.getValueFiller();
    +    }
    +  }
    +
    +
    +  public static String toString(String name, DocValues[] valsArr, int doc) {
    +    StringBuilder sb = new StringBuilder();
    +    sb.append(name).append('(');
    +    boolean firstTime=true;
    +    for (DocValues vals : valsArr) {
    +      if (firstTime) {
    +        firstTime=false;
    +      } else {
    +        sb.append(',');
    +      }
    +      sb.append(vals.toString(doc));
    +    }
    +    sb.append(')');
    +    return sb.toString();
    +  }
    +
    +  @Override
    +  public void createWeight(Map context, IndexSearcher searcher) throws IOException {
    +    for (ValueSource source : sources)
    +      source.createWeight(context, searcher);
    +  }
    +
    +  @Override
    +  public int hashCode() {
    +    return sources.hashCode() + name().hashCode();
    +  }
    +
    +  @Override
    +  public boolean equals(Object o) {
    +    if (this.getClass() != o.getClass()) return false;
    +    MultiFunction other = (MultiFunction)o;
    +    return this.sources.equals(other.sources);
    +  }
    +}
    +
    diff --git a/solr/src/java/org/apache/solr/search/function/SimpleBoolFunction.java b/solr/src/java/org/apache/solr/search/function/SimpleBoolFunction.java
    new file mode 100644
    index 00000000000..6a4da8b229d
    --- /dev/null
    +++ b/solr/src/java/org/apache/solr/search/function/SimpleBoolFunction.java
    @@ -0,0 +1,74 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.solr.search.function;
    +
    +import org.apache.lucene.index.IndexReader.AtomicReaderContext;
    +import org.apache.lucene.search.IndexSearcher;
    +
    +import java.io.IOException;
    +import java.util.Map;
    +
    +
    +public abstract class SimpleBoolFunction extends BoolFunction {
    +  protected final ValueSource source;
    +
    +  public SimpleBoolFunction(ValueSource source) {
    +    this.source = source;
    +  }
    +
    +  protected abstract String name();
    +
    +  protected abstract boolean func(int doc, DocValues vals);
    +
    +  @Override
    +  public BoolDocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
    +    final DocValues vals =  source.getValues(context, readerContext);
    +    return new BoolDocValues(this) {
    +      @Override
    +      public boolean boolVal(int doc) {
    +	      return func(doc, vals);
    +      }
    +      @Override
    +      public String toString(int doc) {
    +	      return name() + '(' + vals.toString(doc) + ')';
    +      }
    +    };
    +  }
    +
    +  @Override
    +  public String description() {
    +    return name() + '(' + source.description() + ')';
    +  }
    +
    +  @Override
    +  public int hashCode() {
    +    return source.hashCode() + name().hashCode();
    +  }
    +
    +  @Override
    +  public boolean equals(Object o) {
    +    if (this.getClass() != o.getClass()) return false;
    +    SingleFunction other = (SingleFunction)o;
    +    return this.source.equals(other.source);
    +  }
    +
    +  @Override
    +  public void createWeight(Map context, IndexSearcher searcher) throws IOException {
    +    source.createWeight(context, searcher);
    +  }
    +}
    \ No newline at end of file
    diff --git a/solr/src/java/org/apache/solr/search/function/StrDocValues.java b/solr/src/java/org/apache/solr/search/function/StrDocValues.java
    index e4c28da47eb..5726824388c 100644
    --- a/solr/src/java/org/apache/solr/search/function/StrDocValues.java
    +++ b/solr/src/java/org/apache/solr/search/function/StrDocValues.java
    @@ -21,6 +21,11 @@ public abstract class StrDocValues extends DocValues {
         return exists(doc) ? strVal(doc) : null;
       }
     
    +  @Override
    +  public boolean boolVal(int doc) {
    +    return exists(doc);
    +  }
    +
       @Override
       public String toString(int doc) {
         return vs.description() + "='" + strVal(doc) + "'";
    diff --git a/solr/src/java/org/apache/solr/search/function/StringIndexDocValues.java b/solr/src/java/org/apache/solr/search/function/StringIndexDocValues.java
    index 95d7d0cd823..71db0ab36d0 100755
    --- a/solr/src/java/org/apache/solr/search/function/StringIndexDocValues.java
    +++ b/solr/src/java/org/apache/solr/search/function/StringIndexDocValues.java
    @@ -78,6 +78,10 @@ public abstract class StringIndexDocValues extends DocValues {
         return spareChars.toString();
       }
     
    +  @Override
    +  public boolean boolVal(int doc) {
    +    return exists(doc);
    +  }
     
       @Override
       public abstract Object objectVal(int doc);  // force subclasses to override
    diff --git a/solr/src/test/org/apache/solr/search/TestQueryTypes.java b/solr/src/test/org/apache/solr/search/TestQueryTypes.java
    index ca49dd72b3f..efd6c68f547 100755
    --- a/solr/src/test/org/apache/solr/search/TestQueryTypes.java
    +++ b/solr/src/test/org/apache/solr/search/TestQueryTypes.java
    @@ -119,7 +119,29 @@ public class TestQueryTypes extends AbstractSolrTestCase {
           assertQ(req( "q", "{!frange v="+f+" l='"+v+"' u='"+v+"'}" )
                   ,"//result[@numFound='1']"
                   );
    -      
    +
    +      // exists()
    +      assertQ(req( "fq","id:999", "q", "{!frange l=1 u=1}if(exists("+f+"),1,0)" )
    +              ,"//result[@numFound='1']"
    +              );
    +
    +      // boolean value of non-zero values (just leave off the exists from the prev test)
    +      assertQ(req( "fq","id:999", "q", "{!frange l=1 u=1}if("+f+",1,0)" )
    +              ,"//result[@numFound='1']"
    +              );
    +
    +      if (!"id".equals(f)) {
    +        assertQ(req( "fq","id:1", "q", "{!frange l=1 u=1}if(exists("+f+"),1,0)" )
    +            ,"//result[@numFound='0']"
    +        );
    +
    +       // boolean value of zero/missing values (just leave off the exists from the prev test)
    +       assertQ(req( "fq","id:1", "q", "{!frange l=1 u=1}if("+f+",1,0)" )
    +            ,"//result[@numFound='0']"
    +        );
    +
    +      }
    +
           // function query... just make sure it doesn't throw an exception
           if ("v_s".equals(f)) continue;  // in this context, functions must be able to be interpreted as a float
           assertQ(req( "q", "+id:999 _val_:\"" + f + "\"")
    diff --git a/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java b/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java
    index 1bf6dd8edfc..4648b424126 100755
    --- a/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java
    +++ b/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java
    @@ -581,4 +581,56 @@ public class TestFunctionQuery extends SolrTestCaseJ4 {
         purgeFieldCache(FieldCache.DEFAULT);   // avoid FC insanity    
       }
     
    +    @Test
    +  public void testBooleanFunctions() throws Exception {
    +    assertU(adoc("id", "1", "text", "hello", "foo_s","A", "foo_ti", "0", "foo_tl","0"));
    +    assertU(adoc("id", "2"                              , "foo_ti","10", "foo_tl","11"));
    +    assertU(commit());
    +
    +    // true and false functions and constants
    +    assertJQ(req("q", "id:1", "fl", "t:true(),f:false(),tt:{!func}true,ff:{!func}false")
    +        , "/response/docs/[0]=={'t':true,'f':false,'tt':true,'ff':false}");
    +
    +    // test that exists(query) depends on the query matching the document
    +    assertJQ(req("q", "id:1", "fl", "t:exists(query($q1)),f:exists(query($q2))", "q1","text:hello", "q2","text:there")
    +        , "/response/docs/[0]=={'t':true,'f':false}");
    +
    +    // test if()
    +    assertJQ(req("q", "id:1", "fl", "a1:if(true,'A','B')", "fl","b1:if(false,'A','B')")
    +        , "/response/docs/[0]=={'a1':'A', 'b1':'B'}");
    +
    +    // test boolean operators
    +    assertJQ(req("q", "id:1", "fl", "t1:and(true,true)", "fl","f1:and(true,false)", "fl","f2:and(false,true)", "fl","f3:and(false,false)")
    +        , "/response/docs/[0]=={'t1':true, 'f1':false, 'f2':false, 'f3':false}");
    +    assertJQ(req("q", "id:1", "fl", "t1:or(true,true)", "fl","t2:or(true,false)", "fl","t3:or(false,true)", "fl","f1:or(false,false)")
    +        , "/response/docs/[0]=={'t1':true, 't2':true, 't3':true, 'f1':false}");
    +    assertJQ(req("q", "id:1", "fl", "f1:xor(true,true)", "fl","t1:xor(true,false)", "fl","t2:xor(false,true)", "fl","f2:xor(false,false)")
    +        , "/response/docs/[0]=={'t1':true, 't2':true, 'f1':false, 'f2':false}");
    +    assertJQ(req("q", "id:1", "fl", "t:not(false),f:not(true)")
    +        , "/response/docs/[0]=={'t':true, 'f':false}");
    +
    +
    +    // def(), the default function that returns the first value that exists
    +    assertJQ(req("q", "id:1", "fl", "x:def(id,123.0), y:def(foo_f,234.0)")
    +        , "/response/docs/[0]=={'x':1.0, 'y':234.0}");
    +    assertJQ(req("q", "id:1", "fl", "x:def(foo_s,'Q'), y:def(missing_s,'W')")
    +        , "/response/docs/[0]=={'x':'A', 'y':'W'}");
    +
    +    // test constant conversion to boolean
    +    assertJQ(req("q", "id:1", "fl", "a:not(0), b:not(1), c:not(0.0), d:not(1.1), e:not('A')")
    +        , "/response/docs/[0]=={'a':true, 'b':false, 'c':true, 'd':false, 'e':false}");
    +
    +  }
    +
    +
    +  @Test
    +  public void testPseudoFieldFunctions() throws Exception {
    +    assertU(adoc("id", "1", "text", "hello", "foo_s","A"));
    +    assertU(adoc("id", "2"));
    +    assertU(commit());
    +
    +    assertJQ(req("q", "id:1", "fl", "a:1,b:2.0,c:'X',d:{!func}foo_s,e:{!func}bar_s")  // if exists() is false, no pseudo-field should be added
    +        , "/response/docs/[0]=={'a':1, 'b':2.0,'c':'X','d':'A'}");
    +  }
    +
     }
    diff --git a/solr/src/webapp/web/css/screen.css b/solr/src/webapp/web/css/screen.css
    index fc50d0309de..53e0387f8b1 100644
    --- a/solr/src/webapp/web/css/screen.css
    +++ b/solr/src/webapp/web/css/screen.css
    @@ -462,6 +462,7 @@ ul
     
     #content #dashboard .block
     {
    +    background-image: none;
         width: 49%;
     }
     
    @@ -550,85 +551,13 @@ ul
         display: block;
     }
     
    -#content #dashboard #replication.is-master .slave
    +#content #dashboard #replication #details table thead td span
     {
         display: none;
     }
     
    -#content #dashboard #replication table
    -{
    -    border-collapse: collapse;
    -}
    -
    -#content #dashboard #replication table th,
    -#content #dashboard #replication table td
    -{
    -    border: 1px solid #f0f0f0;
    -    padding: 2px 5px;
    -}
    -
    -#content #dashboard #replication table thead td
    -{
    -    border: 0;
    -}
    -
    -#content #dashboard #replication table thead th,
    -#content #dashboard #replication table tbody td
    -{
    -    border-right: 0;
    -}
    -
    -#content #dashboard #replication table thead th
    -{
    -    border-top: 0;
    -    font-weight: bold;
    -}
    -
    -#content #dashboard #replication table tbody th,
    -#content #dashboard #replication table tbody td
    -{
    -    border-bottom: 0;
    -    text-align: right;
    -}
    -
    -#content #dashboard #replication table tbody th
    -{
    -    border-left: 0;
    -}
    -
    -#content #dashboard #replication table tbody th,
    -#content #dashboard #replication dt
    -{
    -    width: 100px;
    -}
    -
    -#content #dashboard #replication dl
    -{
    -    display: none;
    -    margin-top: 10px;
    -}
    -
    -#content #dashboard #replication dt,
    -#content #dashboard #replication dd
    -{
    -    display: block;
    -    padding-top: 1px;
    -    padding-bottom: 1px;
    -}
    -
    -#content #dashboard #replication dt
    -{
    -    border-right: 1px solid #f0f0f0;
    -    float: left;
    -    padding-left: 5px;
    -    padding-right: 5px;
    -    margin-right: 3px;
    -    text-align: right;
    -}
    -
     #content #dashboard #dataimport
     {
    -    background-color: #0ff;
         float: right;
     }
     
    @@ -711,6 +640,19 @@ ul
         max-width: 99%;
     }
     
    +#content #analysis #analysis-error
    +{
    +    background-color: #f00;
    +    background-image: url( ../img/ico/construction.png );
    +    background-position: 10px 50%;
    +    color: #fff;
    +    display: none;
    +    font-weight: bold;
    +    margin-bottom: 20px;
    +    padding: 10px;
    +    padding-left: 35px;
    +}
    +
     #content #analysis .analysis-result h2
     {
         position: relative;
    @@ -1334,6 +1276,12 @@ ul
         padding-left: 10px;
     }
     
    +#content #schema-browser #related #f-df-t
    +{
    +    border-bottom: 1px solid #f0f0f0;
    +    padding-bottom: 15px;
    +}
    +
     #content #schema-browser #related dl
     {
         margin-top: 15px;
    @@ -1367,7 +1315,9 @@ ul
     #content #schema-browser #related .dynamic-field .dynamic-field,
     #content #schema-browser #related .dynamic-field .dynamic-field a,
     #content #schema-browser #related .type .type,
    -#content #schema-browser #related .type .type a
    +#content #schema-browser #related .type .type a,
    +#content #schema-browser #related .active,
    +#content #schema-browser #related .active a
     {
         color: #333;
     }
    @@ -1378,6 +1328,11 @@ ul
         color: #666;
     }
     
    +#content #schema-browser #data
    +{
    +    display: none;
    +}
    +
     #content #schema-browser #data #index dt
     {
         display: none;
    @@ -1491,6 +1446,7 @@ ul
     
     #content #schema-browser #data #field .topterms-holder
     {
    +    display: none;
         float: left;
     }
     
    @@ -2830,6 +2786,7 @@ ul
     #content #replication #details table tbody .size
     {
         text-align: right;
    +    white-space: nowrap;
     }
     
     #content #replication #details table tbody .generation div
    diff --git a/solr/src/webapp/web/index.jsp b/solr/src/webapp/web/index.jsp
    index dec2ddc4b34..a632b365327 100644
    --- a/solr/src/webapp/web/index.jsp
    +++ b/solr/src/webapp/web/index.jsp
    @@ -35,14 +35,14 @@
     
                 
     
    -	    

     

    +

     

    - +
    diff --git a/solr/src/webapp/web/js/script.js b/solr/src/webapp/web/js/script.js index 264d88435a7..47f0a056e77 100644 --- a/solr/src/webapp/web/js/script.js +++ b/solr/src/webapp/web/js/script.js @@ -258,7 +258,7 @@ var sammy = $.sammy // #/cores this.get ( - /^#\/cores$/, + /^#\/(cores)$/, function( context ) { sammy.trigger @@ -286,7 +286,7 @@ var sammy = $.sammy // #/cores this.get ( - /^#\/cores\//, + /^#\/(cores)\//, function( context ) { var content_element = $( '#content' ); @@ -386,25 +386,24 @@ var sammy = $.sammy ); var core_names = []; - var core_selects = $( '.swap select', cores_element ); + var core_selects = $( '#actions select', cores_element ); for( var key in cores ) { - core_names.push( '' ) + core_names.push( '' ) } - core_selects .html( core_names.join( "\n") ); - $( 'option[value=' + current_core + ']', core_selects.filter( '.core' ) ) + $( 'option[value="' + current_core + '"]', core_selects.filter( '#swap_core' ) ) .attr( 'selected', 'selected' ); - $( 'option[value=' + current_core + ']', core_selects.filter( '.other' ) ) + $( 'option[value="' + current_core + '"]', core_selects.filter( '.other' ) ) .attr( 'disabled', 'disabled' ) .addClass( 'disabled' ); - $( '.rename input[name=core]', cores_element ) + $( 'input[name="core"]', cores_element ) .val( current_core ); // layout @@ -445,6 +444,57 @@ var sammy = $.sammy } ); + $( 'form a.submit', button_holder_element ) + .die( 'click' ) + .live + ( + 'click', + function( event ) + { + var element = $( this ); + var form_element = element.parents( 'form' ); + var action = $( 'input[name="action"]', form_element ).val().toLowerCase(); + + form_element + .ajaxSubmit + ( + { + url : app.config.solr_path + app.config.core_admin_path + '?wt=json', + dataType : 'json', + beforeSubmit : function( array, form, options ) + { + //loader + }, + success : function( response, status_text, xhr, form ) + { + delete app.cores_data; + + if( 'rename' === action ) + { + context.redirect( path_parts[1] + $( 'input[name="other"]', form_element ).val() ); + } + else if( 'swap' === action ) + { + window.location.reload(); + } + + $( 'a.reset', form ) + .trigger( 'click' ); + }, + error : function( xhr, text_status, error_thrown ) + { + }, + complete : function() + { + //loader + } + } + ); + + return false; + } + ); + $( 'form a.reset', button_holder_element ) .die( 'click' ) .live @@ -452,12 +502,101 @@ var sammy = $.sammy 'click', function( event ) { + $( this ).parents( 'form' ) + .resetForm(); + $( this ).parents( '.button-holder' ) .trigger( 'toggle' ); + + return false; } ); - $( '#actions .optimize', cores_element ) + var reload_button = $( '#actions .reload', cores_element ); + reload_button + .die( 'click' ) + .live + ( + 'click', + function( event ) + { + $.ajax + ( + { + url : app.config.solr_path + app.config.core_admin_path + '?wt=json&action=RELOAD&core=' + current_core, + dataType : 'json', + context : $( this ), + beforeSend : function( xhr, settings ) + { + this + .addClass( 'loader' ); + }, + success : function( response, text_status, xhr ) + { + this + .addClass( 'success' ); + + window.setTimeout + ( + function() + { + reload_button + .removeClass( 'success' ); + }, + 5000 + ); + }, + error : function( xhr, text_status, error_thrown ) + { + }, + complete : function( xhr, text_status ) + { + this + .removeClass( 'loader' ); + } + } + ); + } + ); + + $( '#actions .unload', cores_element ) + .die( 'click' ) + .live + ( + 'click', + function( event ) + { + $.ajax + ( + { + url : app.config.solr_path + app.config.core_admin_path + '?wt=json&action=UNLOAD&core=' + current_core, + dataType : 'json', + context : $( this ), + beforeSend : function( xhr, settings ) + { + this + .addClass( 'loader' ); + }, + success : function( response, text_status, xhr ) + { + delete app.cores_data; + context.redirect( path_parts[1].substr( 0, path_parts[1].length - 1 ) ); + }, + error : function( xhr, text_status, error_thrown ) + { + }, + complete : function( xhr, text_status ) + { + this + .removeClass( 'loader' ); + } + } + ); + } + ); + + var optimize_button = $( '#actions .optimize', cores_element ); + optimize_button .die( 'click' ) .live ( @@ -477,6 +616,19 @@ var sammy = $.sammy }, success : function( response, text_status, xhr ) { + this + .addClass( 'success' ); + + window.setTimeout + ( + function() + { + optimize_button + .removeClass( 'success' ); + }, + 5000 + ); + $( '.optimized dd.ico-0', index_data_element ) .removeClass( 'ico-0' ) .addClass( 'ico-1' ); @@ -1262,6 +1414,9 @@ var sammy = $.sammy error : function( xhr, text_status, error_thrown ) { console.debug( arguments ); + + $( '#content' ) + .html( 'sorry, no replication-handler defined!' ); }, complete : function( xhr, text_status ) { @@ -1323,19 +1478,44 @@ var sammy = $.sammy 'schema_browser_navi', function( event, params ) { - var related_navigation_element = $( '#related dl', params.schema_browser_element ); + var related_navigation_element = $( '#related dl#f-df-t', params.schema_browser_element ); + var related_navigation_meta = $( '#related dl.ukf-dsf', params.schema_browser_element ); var related_select_element = $( '#related select', params.schema_browser_element ) var type = 'index'; - if( !params.route_params ) + var sammy_basepath = '#/' + $( 'p a', params.active_core ).html() + '/schema-browser'; + + if( !related_navigation_meta.hasClass( 'done' ) ) { - related_navigation_element - .hide(); - - $( 'option:selected', related_select_element ) - .removeAttr( 'selected' ); + if( app.schema_browser_data.unique_key_field ) + { + $( '.unique-key-field', related_navigation_meta ) + .show() + .after + ( + '
    ' + + app.schema_browser_data.unique_key_field + '
    ' + ); + } + + if( app.schema_browser_data.default_search_field ) + { + $( '.default-search-field', related_navigation_meta ) + .show() + .after + ( + '
    ' + + app.schema_browser_data.default_search_field + '
    ' + ); + } + + related_navigation_meta + .addClass( 'done' ); } - else + + if( params.route_params ) { var type = params.route_params.splat[3]; var value = params.route_params.splat[4]; @@ -1348,7 +1528,7 @@ var sammy = $.sammy 'types' : [] } - $( 'option[value=' + params.route_params.splat[2] + ']', related_select_element ) + $( 'option[value="' + params.route_params.splat[2] + '"]', related_select_element ) .attr( 'selected', 'selected' ); if( 'field' === type ) @@ -1396,7 +1576,6 @@ var sammy = $.sammy } } - var sammy_basepath = '#/' + $( 'p a', params.active_core ).html() + '/schema-browser'; var navigation_content = ''; if( 0 !== navigation_data.fields.length ) @@ -1464,20 +1643,41 @@ var sammy = $.sammy .attr( 'class', type ) .html( navigation_content ); } - - $.get - ( - 'tpl/schema-browser_'+ type + '.html', - function( template ) - { - var data_element = $( '#data', params.schema_browser_element ); + else + { + related_navigation_element + .hide(); - data_element - .html( template ); + $( 'option:selected', related_select_element ) + .removeAttr( 'selected' ); + } - params.callback( app.schema_browser_data, data_element ); - } - ); + if( 'field' === type && value === app.schema_browser_data.unique_key_field ) + { + $( '.unique-key-field', related_navigation_meta ) + .addClass( 'active' ); + } + else + { + $( '.unique-key-field', related_navigation_meta ) + .removeClass( 'active' ); + } + + if( 'field' === type && value === app.schema_browser_data.default_search_field ) + { + $( '.default-search-field', related_navigation_meta ) + .addClass( 'active' ); + } + else + { + $( '.default-search-field', related_navigation_meta ) + .removeClass( 'active' ); + } + + if( params.callback ) + { + params.callback( app.schema_browser_data, $( '#data', params.schema_browser_element ) ); + } } ); @@ -1787,22 +1987,9 @@ var sammy = $.sammy { var callback = function( schema_browser_data, data_element ) { - var sammy_basepath = '#/' + $( 'p a', context.active_core ).html() + '/schema-browser' - - if( schema_browser_data.unique_key_field ) - { - $( '.unique-key-field', data_element ) - .show() - .after( '
    ' + schema_browser_data.unique_key_field + '
    ' ); - } - - if( schema_browser_data.default_search_field ) - { - $( '.default-search-field', data_element ) - .show() - .after( '
    ' + schema_browser_data.default_search_field + '
    ' ); - } - } + data_element + .hide(); + }; sammy.trigger ( @@ -1815,20 +2002,28 @@ var sammy = $.sammy } ); - // #/:core/schema-browser/field/$field + // #/:core/schema-browser/field|dynamic-field|type/$field this.get ( - /^#\/([\w\d]+)\/(schema-browser)(\/(field)\/(.+))$/, + /^#\/([\w\d]+)\/(schema-browser)(\/(field|dynamic-field|type)\/(.+))$/, function( context ) { var callback = function( schema_browser_data, data_element ) { var field = context.params.splat[4]; + + var type = context.params.splat[3]; + var is_f = 'field' === type; + var is_df = 'dynamic-field' === type; + var is_t = 'type' === type; var options_element = $( '.options', data_element ); - var sammy_basepath = '#/' + $( 'p a', context.active_core ).html() + '/schema-browser' + var sammy_basepath = context.path.indexOf( '/', context.path.indexOf( '/', 2 ) + 1 ); - var keystring_to_list = function( keystring ) + data_element + .show(); + + var keystring_to_list = function( keystring, element_class ) { var key_list = keystring.replace( /-/g, '' ).split( '' ); var list = []; @@ -1849,7 +2044,12 @@ var sammy = $.sammy if( option_key ) { - list.push( '
    ' + option_key + ',
    ' ); + list.push + ( + '
    ' + + option_key + + ',
    ' + ); } } @@ -1858,192 +2058,265 @@ var sammy = $.sammy return list; } - // -- properties - if( schema_browser_data.fields[field].flags ) + var flags = null; + + if( is_f && schema_browser_data.fields[field] && schema_browser_data.fields[field].flags ) { - var properties_element = $( '.properties', options_element ); - var properties_keys = keystring_to_list( schema_browser_data.fields[field].flags ); + flags = schema_browser_data.fields[field].flags; + } + else if( is_df && schema_browser_data.dynamic_fields[field] && schema_browser_data.dynamic_fields[field].flags ) + { + flags = schema_browser_data.dynamic_fields[field].flags; + } + + // -- properties + var properties_element = $( 'dt.properties', options_element ); + if( flags ) + { + var properties_keys = keystring_to_list( flags, 'properties' ); + + $( 'dd.properties', options_element ) + .remove(); properties_element .show() .after( properties_keys.join( "\n" ) ); } + else + { + $( '.properties', options_element ) + .hide(); + } // -- schema - if( schema_browser_data.fields[field].schema ) + var schema_element = $( 'dt.schema', options_element ); + if( is_f && schema_browser_data.fields[field] && schema_browser_data.fields[field].schema ) { - var schema_element = $( '.schema', options_element ); - var schema_keys = keystring_to_list( schema_browser_data.fields[field].schema ); + var schema_keys = keystring_to_list( schema_browser_data.fields[field].schema, 'schema' ); + + $( 'dd.schema', options_element ) + .remove(); schema_element .show() .after( schema_keys.join( "\n" ) ); } + else + { + $( '.schema', options_element ) + .hide(); + } // -- index - if( schema_browser_data.fields[field].index ) + var index_element = $( 'dt.index', options_element ); + if( is_f && schema_browser_data.fields[field] && schema_browser_data.fields[field].index ) { - var index_element = $( '.index', options_element ); var index_keys = []; if( 0 === schema_browser_data.fields[field].index.indexOf( '(' ) ) { - index_keys.push( '
    ' + schema_browser_data.fields[field].index + '
    ' ); + index_keys.push( '
    ' + schema_browser_data.fields[field].index + '
    ' ); } else { - index_keys = keystring_to_list( schema_browser_data.fields[field].index ); + index_keys = keystring_to_list( schema_browser_data.fields[field].index, 'index' ); } + $( 'dd.index', options_element ) + .remove(); + index_element .show() .after( index_keys.join( "\n" ) ); } + else + { + $( '.index', options_element ) + .hide(); + } // -- docs - if( schema_browser_data.fields[field].docs ) - { - var docs_element = $( '.docs', options_element ); + var docs_element = $( 'dt.docs', options_element ); + if( is_f && schema_browser_data.fields[field] && schema_browser_data.fields[field].docs ) + { + $( 'dd.docs', options_element ) + .remove(); docs_element .show() - .after( '
    ' + schema_browser_data.fields[field].docs + '
    ' ); + .after( '
    ' + schema_browser_data.fields[field].docs + '
    ' ); + } + else + { + $( '.docs', options_element ) + .hide(); } // -- distinct - if( schema_browser_data.fields[field].distinct ) + var distinct_element = $( 'dt.distinct', options_element ); + if( is_f && schema_browser_data.fields[field] && schema_browser_data.fields[field].distinct ) { - var distinct_element = $( '.distinct', options_element ); + $( 'dd.distinct', options_element ) + .remove(); distinct_element .show() - .after( '
    ' + schema_browser_data.fields[field].distinct + '
    ' ); + .after( '
    ' + schema_browser_data.fields[field].distinct + '
    ' ); + } + else + { + $( '.distinct', options_element ) + .hide(); } // -- position-increment-gap - if( schema_browser_data.fields[field].positionIncrementGap ) + var pig_element = $( 'dt.position-increment-gap', options_element ); + if( is_f && schema_browser_data.fields[field] && schema_browser_data.fields[field].positionIncrementGap ) { - var pig_element = $( '.position-increment-gap', options_element ); + $( 'dt.position-increment-gap', options_element ) + .remove(); pig_element .show() - .after( '
    ' + schema_browser_data.fields[field].positionIncrementGap + '
    ' ); + .after( '
    ' + schema_browser_data.fields[field].positionIncrementGap + '
    ' ); + } + else + { + $( '.position-increment-gap', options_element ) + .hide(); + } + + var analyzer_element = $( '.analyzer', data_element ); + var analyzer_data = null; + + if( is_f ) + { + analyzer_data = schema_browser_data.types[schema_browser_data.relations.f_t[field]]; + } + else if( is_df ) + { + analyzer_data = schema_browser_data.types[schema_browser_data.relations.df_t[field]]; + } + else if( is_t ) + { + analyzer_data = schema_browser_data.types[field]; } - var analyzer_data = schema_browser_data.types[schema_browser_data.relations.f_t[field]]; - var analyzer_element = $( '.analyzer', data_element ); - - var transform_analyzer_data_into_list = function( analyzer_data ) + if( analyzer_data ) { - var args = []; - for( var key in analyzer_data.args ) + var transform_analyzer_data_into_list = function( analyzer_data ) { - var arg_class = ''; - var arg_content = ''; + var args = []; + for( var key in analyzer_data.args ) + { + var arg_class = ''; + var arg_content = ''; - if( 'true' === analyzer_data.args[key] || '1' === analyzer_data.args[key] ) - { - arg_class = 'ico-1'; - arg_content = key; - } - else if( 'false' === analyzer_data.args[key] || '0' === analyzer_data.args[key] ) - { - arg_class = 'ico-0'; - arg_content = key; - } - else - { - arg_content = key + ': '; - - if( 'synonyms' === key || 'words' === key ) + if( 'true' === analyzer_data.args[key] || '1' === analyzer_data.args[key] ) { - // @TODO: set link target for file - arg_content += '' + analyzer_data.args[key] + ''; + arg_class = 'ico-1'; + arg_content = key; + } + else if( 'false' === analyzer_data.args[key] || '0' === analyzer_data.args[key] ) + { + arg_class = 'ico-0'; + arg_content = key; } else { - arg_content += analyzer_data.args[key]; + arg_content = key + ': '; + + if( 'synonyms' === key || 'words' === key ) + { + // @TODO: set link target for file + arg_content += '' + analyzer_data.args[key] + ''; + } + else + { + arg_content += analyzer_data.args[key]; + } } + + args.push( '
    ' + arg_content + '
    ' ); } - args.push( '
    ' + arg_content + '
    ' ); + var list_content = '
    ' + analyzer_data.className + '
    '; + if( 0 !== args.length ) + { + args.sort(); + list_content += args.join( "\n" ); + } + + return list_content; } - var list_content = '
    ' + analyzer_data.className + '
    '; - if( 0 !== args.length ) + // -- field-type + var field_type_element = $( 'dt.field-type', options_element ); + + $( 'dd.field-type', options_element ) + .remove(); + + field_type_element + .show() + .after( '
    ' + analyzer_data.className + '
    ' ); + + + for( var key in analyzer_data ) { - args.sort(); - list_content += args.join( "\n" ); - } - - return list_content; - } - - // -- field-type - var field_type_element = $( '.field-type', options_element ); - - field_type_element - .show() - .after( '
    ' + analyzer_data.className + '
    ' ); - - - for( var key in analyzer_data ) - { - var key_match = key.match( /^(.+)Analyzer$/ ); - if( !key_match ) - { - continue; - } - - var analyzer_key_element = $( '.' + key_match[1], analyzer_element ); - var analyzer_key_data = analyzer_data[key]; - - analyzer_element.show(); - analyzer_key_element.show(); - - if( analyzer_key_data.className ) - { - $( 'dl:first dt', analyzer_key_element ) - .html( analyzer_key_data.className ); - } - - for( var type in analyzer_key_data ) - { - if( 'object' !== typeof analyzer_key_data[type] ) + var key_match = key.match( /^(.+)Analyzer$/ ); + if( !key_match ) { continue; } - var type_element = $( '.' + type, analyzer_key_element ); - var type_content = []; + var analyzer_key_element = $( '.' + key_match[1], analyzer_element ); + var analyzer_key_data = analyzer_data[key]; - type_element.show(); + analyzer_element.show(); + analyzer_key_element.show(); - if( analyzer_key_data[type].className ) + if( analyzer_key_data.className ) { - type_content.push( transform_analyzer_data_into_list( analyzer_key_data[type] ) ); + $( 'dl:first dt', analyzer_key_element ) + .html( analyzer_key_data.className ); } - else + + $( 'ul li', analyzer_key_element ) + .hide(); + + for( var type in analyzer_key_data ) { - for( var entry in analyzer_key_data[type] ) + if( 'object' !== typeof analyzer_key_data[type] ) { - type_content.push( transform_analyzer_data_into_list( analyzer_key_data[type][entry] ) ); + continue; } - } - $( 'dl', type_element ) - .append( type_content.join( "\n" ) ); + var type_element = $( '.' + type, analyzer_key_element ); + var type_content = []; + + type_element.show(); + + if( analyzer_key_data[type].className ) + { + type_content.push( transform_analyzer_data_into_list( analyzer_key_data[type] ) ); + } + else + { + for( var entry in analyzer_key_data[type] ) + { + type_content.push( transform_analyzer_data_into_list( analyzer_key_data[type][entry] ) ); + } + } + + $( 'dl', type_element ) + .empty() + .append( type_content.join( "\n" ) ); + } } } - var topterms_holder_element = $( '.topterms-holder', data_element ); - if( !schema_browser_data.fields[field].topTerms_hash ) - { - topterms_holder_element - .hide(); - } - else + if( is_f && schema_browser_data.fields[field] && schema_browser_data.fields[field].topTerms_hash ) { topterms_holder_element .show(); @@ -2077,6 +2350,7 @@ var sammy = $.sammy topterms_content += ''; topterms_table_element + .empty() .append( topterms_content ); $( 'tbody', topterms_table_element ) @@ -2139,14 +2413,14 @@ var sammy = $.sammy } ); } - - var histogram_holder_element = $( '.histogram-holder', data_element ); - if( !schema_browser_data.fields[field].histogram_hash ) + else { - histogram_holder_element + topterms_holder_element .hide(); } - else + + var histogram_holder_element = $( '.histogram-holder', data_element ); + if( is_f && schema_browser_data.fields[field] && schema_browser_data.fields[field].histogram_hash ) { histogram_holder_element .show(); @@ -2184,52 +2458,11 @@ var sammy = $.sammy } ); } - } - - sammy.trigger - ( - 'schema_browser_load', + else { - callback : callback, - active_core : this.active_core, - route_params : this.params + histogram_holder_element + .hide(); } - ); - } - ); - - // #/:core/schema-browser/dynamic-field/$field - this.get - ( - /^#\/([\w\d]+)\/(schema-browser)(\/(dynamic-field)\/(.+))$/, - function( context ) - { - var callback = function( schema_browser_data, data_element ) - { - console.debug( data_element ); - } - - sammy.trigger - ( - 'schema_browser_load', - { - callback : callback, - active_core : this.active_core, - route_params : this.params - } - ); - } - ); - - // #/:core/schema-browser/type/$type - this.get - ( - /^#\/([\w\d]+)\/(schema-browser)(\/(type)\/(.+))$/, - function( context ) - { - var callback = function( schema_browser_data, data_element ) - { - console.debug( data_element ); } sammy.trigger @@ -2287,7 +2520,7 @@ var sammy = $.sammy // #/:core/dataimport this.get ( - /^#\/([\w\d]+)\/dataimport$/, + /^#\/([\w\d]+)\/(dataimport)$/, function( context ) { sammy.trigger @@ -2297,6 +2530,14 @@ var sammy = $.sammy active_core : this.active_core, callback : function( dataimport_handlers ) { + if( 0 === dataimport_handlers.length ) + { + $( '#content' ) + .html( 'sorry, no dataimport-handler defined!' ); + + return false; + } + context.redirect( context.path + '/' + dataimport_handlers[0] ); } } @@ -2307,7 +2548,7 @@ var sammy = $.sammy // #/:core/dataimport this.get ( - /^#\/([\w\d]+)\/dataimport\//, + /^#\/([\w\d]+)\/(dataimport)\//, function( context ) { var core_basepath = this.active_core.attr( 'data-basepath' ); @@ -2341,6 +2582,7 @@ var sammy = $.sammy active_core : context.active_core, callback : function( dataimport_handlers ) { + var handlers_element = $( '.handler', form_element ); var handlers = []; @@ -2357,7 +2599,7 @@ var sammy = $.sammy $( 'ul', handlers_element ) .html( handlers.join( "\n") ) ; - $( 'a[href=' + context.path + ']', handlers_element ).parent() + $( 'a[href="' + context.path + '"]', handlers_element ).parent() .addClass( 'active' ); handlers_element @@ -2529,20 +2771,11 @@ var sammy = $.sammy { started_at = (new Date()).toGMTString(); } - console.debug( 'started_at @ ', started_at ); function dataimport_compute_details( response, details_element ) { var details = []; - - console.debug( 'elapsed @ ', $( 'str[name="Time Elapsed"]', response ).text() ); - console.debug( 'taken @ ', $( 'str[name="Time taken "]', response ).text() ); - console.debug( 'requests @ ', $( 'str[name="Total Requests made to DataSource"]', response ).text() ); - console.debug( 'fetched @ ', $( 'str[name="Total Rows Fetched"]', response ).text() ); - console.debug( 'skipped @ ', $( 'str[name="Total Documents Skipped"]', response ).text() ); - console.debug( 'processed @ ', $( 'str[name="Total Documents Processed"]', response ).text() ); - var requests = parseInt( $( 'str[name="Total Requests made to DataSource"]', response ).text() ); if( NaN !== requests ) { @@ -2604,7 +2837,6 @@ var sammy = $.sammy $( '.info strong', state_element ) .text( $( 'str[name=""]', response ).text() ); - console.debug( 'failure' ); console.debug( 'rollback @ ', rollback_element.text() ); } else if( 'idle' === status && 0 !== messages_count ) @@ -2620,7 +2852,6 @@ var sammy = $.sammy $( '.info strong', state_element ) .text( $( 'str[name=""]', response ).text() ); - console.debug( 'success' ); dataimport_compute_details( response, $( '.info .details', state_element ) ); } else if( 'busy' === status ) @@ -2639,7 +2870,6 @@ var sammy = $.sammy $( '.info strong', state_element ) .text( 'Indexing ...' ); - console.debug( 'indexing' ); dataimport_compute_details( response, $( '.info .details', state_element ) ); window.setTimeout( dataimport_fetch_status, 2000 ); @@ -2781,7 +3011,7 @@ var sammy = $.sammy content += '
    ' + "\n"; content += '

    ' + key + '

    ' + "\n"; content += '
    ' + "\n"; - content += '
      ' + "\n"; + content += '
        '; for( var sort_key in sort_table[key] ) { @@ -2863,6 +3093,16 @@ var sammy = $.sammy } ); + $( '.block .content > ul:empty', this ) + .each + ( + function( index, element ) + { + $( element ).parents( '.block' ) + .hide(); + } + ); + $( '.entry', this ) .each ( @@ -3005,15 +3245,12 @@ var sammy = $.sammy // #/:core/analysis this.get ( - /^#\/([\w\d]+)\/analysis$/, + /^#\/([\w\d]+)\/(analysis)$/, function( context ) { var core_basepath = this.active_core.attr( 'data-basepath' ); var content_element = $( '#content' ); - $( 'li.analysis', this.active_core ) - .addClass( 'active' ); - $.get ( 'tpl/analysis.html', @@ -3073,8 +3310,8 @@ var sammy = $.sammy this .html( content ); - - $( 'option[value=fieldname\=' + response.schema.defaultSearchField + ']', this ) + + $( 'option[value="fieldname\=' + response.schema.defaultSearchField + '"]', this ) .attr( 'selected', 'selected' ); }, error : function( xhr, text_status, error_thrown) @@ -3123,6 +3360,11 @@ var sammy = $.sammy build_analysis_table( 'type', name, response.analysis.field_types[name] ); } }, + error : function( xhr, text_status, error_thrown ) + { + $( '#analysis-error', analysis_element ) + .show(); + }, complete : function() { //loader @@ -3318,70 +3560,7 @@ var sammy = $.sammy .html( template ); var dashboard_element = $( '#dashboard' ); - - /* - $.ajax - ( - { - url : core_basepath + '/admin/system?wt=json', - dataType : 'json', - context : $( '#system', dashboard_element ), - beforeSend : function( xhr, settings ) - { - $( 'h2', this ) - .addClass( 'loader' ); - - $( '.message', this ) - .show() - .html( 'Loading' ); - }, - success : function( response, text_status, xhr ) - { - $( '.message', this ) - .empty() - .hide(); - - $( 'dl', this ) - .show(); - - var data = { - 'core_now' : response['core']['now'], - 'core_start' : response['core']['start'], - 'core_host' : response['core']['host'], - 'core_schema' : response['core']['schema'], - 'lucene_solr-spec-version' : response['lucene']['solr-spec-version'], - 'lucene_solr-impl-version' : response['lucene']['solr-impl-version'], - 'lucene_lucene-spec-version' : response['lucene']['lucene-spec-version'], - 'lucene_lucene-impl-version' : response['lucene']['lucene-impl-version'] - }; - - for( var key in data ) - { - $( '.' + key, this ) - .show(); - - $( '.value.' + key, this ) - .html( data[key] ); - } - }, - error : function( xhr, text_status, error_thrown) - { - this - .addClass( 'disabled' ); - - $( '.message', this ) - .show() - .html( 'System-Handler is not configured' ); - }, - complete : function( xhr, text_status ) - { - $( 'h2', this ) - .removeClass( 'loader' ); - } - } - ); - //*/ - + $.ajax ( { @@ -3515,7 +3694,7 @@ var sammy = $.sammy $( '.timeago', this ) .timeago(); }, - error : function( xhr, text_status, error_thrown) + error : function( xhr, text_status, error_thrown ) { this .addClass( 'disabled' ); @@ -3562,68 +3741,68 @@ var sammy = $.sammy $( '.replication', context.active_core ) .show(); - var is_master = 'undefined' === typeof( response['details']['slave'] ); + var data = response.details; + var is_slave = 'undefined' !== typeof( data.slave ); var headline = $( 'h2 span', this ); + var details_element = $( '#details', this ); + var current_type_element = $( ( is_slave ? '.slave' : '.master' ), this ); - if( is_master ) + if( is_slave ) { this - .addClass( 'is-master' ); - - headline - .html( headline.html() + ' (Master)' ); - } - else - { - this - .addClass( 'is-slave' ); + .addClass( 'slave' ); headline .html( headline.html() + ' (Slave)' ); } - - var data = { - 'details_index-version' : response['details']['indexVersion'], - 'details_generation' : response['details']['generation'], - 'details_index-size' : response['details']['indexSize'] - }; - - if( !is_master ) + else { - $.extend - ( - data, - { - 'details_slave_master-details_index-version' : response['details']['slave']['masterDetails']['indexVersion'], - 'details_slave_master-details_generation' : response['details']['slave']['masterDetails']['generation'], - 'details_slave_master-details_index-size' : response['details']['slave']['masterDetails']['indexSize'], - 'details_slave_master-url' : response['details']['slave']['masterUrl'], - 'details_slave_poll-interval' : response['details']['slave']['pollInterval'], - 'details_slave_next-execution-at' : response['details']['slave']['nextExecutionAt'], - 'details_slave_index-replicated-at' : response['details']['slave']['indexReplicatedAt'], - 'details_slave_last-cycle-bytes-downloaded' : response['details']['slave']['lastCycleBytesDownloaded'], - 'details_slave_replication-failed-at' : response['details']['slave']['replicationFailedAt'], - 'details_slave_previous-cycle-time-in-seconds' : response['details']['slave']['previousCycleTimeInSeconds'], - 'details_slave_is-polling-disabled' : response['details']['slave']['isPollingDisabled'], - 'details_slave_is-replicating' : response['details']['slave']['isReplicating'] - } - ); - - $( 'dl', this ) - .show(); - } - - for( var key in data ) - { - $( '.' + key, this ) - .show(); + this + .addClass( 'master' ); - $( '.value.' + key, this ) - .html( data[key] ); + headline + .html( headline.html() + ' (Master)' ); } - // $( '.timeago', this ) - // .timeago(); + $( '.version div', current_type_element ) + .html( data.indexVersion ); + $( '.generation div', current_type_element ) + .html( data.generation ); + $( '.size div', current_type_element ) + .html( data.indexSize ); + + if( is_slave ) + { + var master_element = $( '.master', details_element ); + $( '.version div', master_element ) + .html( data.slave.masterDetails.indexVersion ); + $( '.generation div', master_element ) + .html( data.slave.masterDetails.generation ); + $( '.size div', master_element ) + .html( data.slave.masterDetails.indexSize ); + + if( data.indexVersion !== data.slave.masterDetails.indexVersion ) + { + $( '.version', details_element ) + .addClass( 'diff' ); + } + else + { + $( '.version', details_element ) + .removeClass( 'diff' ); + } + + if( data.generation !== data.slave.masterDetails.generation ) + { + $( '.generation', details_element ) + .addClass( 'diff' ); + } + else + { + $( '.generation', details_element ) + .removeClass( 'diff' ); + } + } }, error : function( xhr, text_status, error_thrown) { @@ -3642,7 +3821,6 @@ var sammy = $.sammy } ); - /* $.ajax ( { @@ -3688,7 +3866,7 @@ var sammy = $.sammy $( '.message', this ) .show() - .html( 'DataImport is not configured' ); + .html( 'Dataimport is not configured' ); }, complete : function( xhr, text_status ) { @@ -3697,7 +3875,6 @@ var sammy = $.sammy } } ); - //*/ $.ajax ( @@ -3775,7 +3952,7 @@ var sammy = $.sammy { this .html( template ); - + var memory_data = {}; if( app.dashboard_values['jvm']['memory']['raw'] ) { @@ -3824,32 +4001,37 @@ var sammy = $.sammy .show(); } - var cmd_arg_key_element = $( '.command_line_args dt', this ); - var cmd_arg_element = $( '.command_line_args dd', this ); - - for( var key in app.dashboard_values['jvm']['jmx']['commandLineArgs'] ) + var commandLineArgs = app.dashboard_values['jvm']['jmx']['commandLineArgs']; + if( 0 !== commandLineArgs.length ) { - cmd_arg_element = cmd_arg_element.clone(); - cmd_arg_element.html( app.dashboard_values['jvm']['jmx']['commandLineArgs'][key] ); + var cmd_arg_element = $( '.command_line_args dt', this ); + var cmd_arg_key_element = $( '.command_line_args dt', this ); + var cmd_arg_element = $( '.command_line_args dd', this ); - cmd_arg_key_element - .after( cmd_arg_element ); + for( var key in commandLineArgs ) + { + cmd_arg_element = cmd_arg_element.clone(); + cmd_arg_element.html( commandLineArgs[key] ); + + cmd_arg_key_element + .after( cmd_arg_element ); + } + + cmd_arg_key_element.closest( 'li' ) + .show(); + + $( '.command_line_args dd:last', this ) + .remove(); + + $( '.command_line_args dd:odd', this ) + .addClass( 'odd' ); } - cmd_arg_key_element.closest( 'li' ) - .show(); - - $( '.command_line_args dd:last', this ) - .remove(); - $( '.timeago', this ) .timeago(); $( 'li:visible:odd', this ) .addClass( 'odd' ); - - $( '.command_line_args dd:odd', this ) - .addClass( 'odd' ); // -- memory bar @@ -3912,7 +4094,7 @@ var sammy = $.sammy } ); -var solr_admin = function() +var solr_admin = function( app_config ) { menu_element = null, @@ -3921,14 +4103,14 @@ var solr_admin = function() active_core = null, environment_basepath = null, - config = null, + config = app_config, params = null, dashboard_values = null, schema_browser_data = null, this.init_menu = function() { - $( '.ping a', this.menu_element ) + $( '.ping a', menu_element ) .live ( 'click', @@ -3943,7 +4125,7 @@ var solr_admin = function() } ); - $( 'a[rel]', this.menu_element ) + $( 'a[rel]', menu_element ) .live ( 'click', @@ -3954,30 +4136,15 @@ var solr_admin = function() } ); } - - this.__construct = function() - { - this.menu_element = $( '#menu ul' ); - - this.init_menu(); - } - this.__construct(); -} -var app; -$( document ).ready -( - function() + this.init_cores = function() { - jQuery.timeago.settings.allowFuture = true; - - app = new solr_admin(); - app.config = app_config; + var self = this; $.ajax ( { - url : app.config.solr_path + app.config.core_admin_path + '?wt=json', + url : config.solr_path + config.core_admin_path + '?wt=json', dataType : 'json', beforeSend : function( arr, form, options ) { @@ -3986,28 +4153,28 @@ $( document ).ready }, success : function( response ) { - app.cores_data = response.status; - app.is_multicore = 'undefined' === typeof response.status['']; + self.cores_data = response.status; + is_multicore = 'undefined' === typeof response.status['']; - if( app.is_multicore ) + if( is_multicore ) { - $( '#cores', app.menu_element ) + $( '#cores', menu_element ) .show(); } for( var core_name in response.status ) { - var core_path = app.config.solr_path + '/' + core_name; + var core_path = config.solr_path + '/' + core_name; if( !core_name ) { core_name = 'singlecore'; - core_path = app.config.solr_path + core_path = config.solr_path } - if( !app.environment_basepath ) + if( !environment_basepath ) { - app.environment_basepath = core_path; + environment_basepath = core_path; } var core_tpl = '
      • ' + "\n" @@ -4017,32 +4184,32 @@ $( document ).ready + '
      • Query
      • ' + "\n" + '
      • Schema
      • ' + "\n" + '
      • Config
      • ' + "\n" - + '
      • Replication
      • ' + "\n" - + '
      • Analysis
      • ' + "\n" - + '
      • Schema Browser
      • ' + "\n" - + '
      • Statistics
      • ' + "\n" + + '
      • Replication
      • ' + "\n" + + '
      • Analysis
      • ' + "\n" + + '
      • Schema Browser
      • ' + "\n" + + '
      • Statistics
      • ' + "\n" + '
      • Ping
      • ' + "\n" - + '
      • Plugins
      • ' + "\n" + + '
      • Plugins
      • ' + "\n" + '
      • Dataimport
      • ' + "\n" + '
      ' + "\n" + ''; - app.menu_element + menu_element .append( core_tpl ); } $.ajax ( { - url : app.environment_basepath + '/admin/system?wt=json', + url : environment_basepath + '/admin/system?wt=json', dataType : 'json', beforeSend : function( arr, form, options ) { }, success : function( response ) { - app.dashboard_values = response; + self.dashboard_values = response; var environment_args = null; var cloud_args = null; @@ -4116,4 +4283,27 @@ $( document ).ready } ); } + + this.__construct = function() + { + menu_element = $( '#menu ul' ); + + this.init_menu(); + this.init_cores(); + + this.menu_element = menu_element; + this.config = config; + } + this.__construct(); +} + +var app; +$( document ).ready +( + function() + { + jQuery.timeago.settings.allowFuture = true; + + app = new solr_admin( app_config ); + } ); \ No newline at end of file diff --git a/solr/src/webapp/web/tpl/analysis.html b/solr/src/webapp/web/tpl/analysis.html index a1271c9b4c5..8f250a55b71 100644 --- a/solr/src/webapp/web/tpl/analysis.html +++ b/solr/src/webapp/web/tpl/analysis.html @@ -1,5 +1,11 @@
      +
      + + This Functionality requires the /analysis/field Handler to be registered and active! + +
      +

      Field Analysis

      diff --git a/solr/src/webapp/web/tpl/cores.html b/solr/src/webapp/web/tpl/cores.html index fa5cd16e28d..5baf4bb2a86 100644 --- a/solr/src/webapp/web/tpl/cores.html +++ b/solr/src/webapp/web/tpl/cores.html @@ -18,8 +18,10 @@
      + +

      -

      +

      @@ -42,12 +44,15 @@ + + +

      -

      -

      @@ -181,6 +186,8 @@ + +

      diff --git a/solr/src/webapp/web/tpl/dashboard.html b/solr/src/webapp/web/tpl/dashboard.html index 0a2556099dd..7c9997197ba 100644 --- a/solr/src/webapp/web/tpl/dashboard.html +++ b/solr/src/webapp/web/tpl/dashboard.html @@ -63,96 +63,52 @@
      -
      - +
      + + + - - - + + + + + + + - - - - + + + + + + + + - - - - - - - - - + + + + + + + + + +
       slavemasterIndexVersionGenSize
      indexVersion
      Master:
      x
      y
      z
      generation
      indexSize
      Slave:
      a
      c
      c
      - -
      - -
      masterUrl
      -
      - -
      poll every
      -
      - -
      last replicated
      -
      - -
      replicate next
      -
      - -
      last failed
      -
      - -
      - -
      -

      DataImport-Handler

      +

      Dataimport

      diff --git a/solr/src/webapp/web/tpl/schema-browser.html b/solr/src/webapp/web/tpl/schema-browser.html index a5a6154373e..592c8547981 100644 --- a/solr/src/webapp/web/tpl/schema-browser.html +++ b/solr/src/webapp/web/tpl/schema-browser.html @@ -4,19 +4,135 @@
      - #data +
      + +
      + +
      + +
      Field-Type:
      + +
      Properties:
      + +
      Schema:
      + +
      Index:
      + +
      PI Gap:
      + +
      Docs:
      + +
      Distinct:
      + +
      + +
        +
      • + +

        Index Analyzer:

        +
        +
        +
        + +
          +
        • +

          Tokenizer:

          +
          +
          +
        • +
        • +

          Filters:

          +
          +
          +
        • +
        + +
      • +
      • + +

        Query Analyzer:

        +
        +
        +
        + +
          +
        • +

          Tokenizer:

          +
          +
          +
        • +
        • +

          Filters:

          +
          +
          +
        • +
        + +
      • +
      + +
      + +
      + +

      Top / Terms:

      + + + + + + + + + + + + + + + +
       TermFrq
      + + + +
      + +
      + +

      Histogram:

      + +
      + +
      + +
      + +
      + +
      diff --git a/solr/src/webapp/web/tpl/schema-browser_field.html b/solr/src/webapp/web/tpl/schema-browser_field.html deleted file mode 100644 index 1cbc3a6e589..00000000000 --- a/solr/src/webapp/web/tpl/schema-browser_field.html +++ /dev/null @@ -1,109 +0,0 @@ -
      - -
      - -
      - -
      Field-Type:
      - -
      Properties:
      - -
      Schema:
      - -
      Index:
      - -
      PI Gap:
      - -
      Docs:
      - -
      Distinct:
      - -
      - -
        -
      • - -

        Index Analyzer:

        -
        -
        -
        - -
          -
        • -

          Tokenizer:

          -
          -
          -
        • -
        • -

          Filters:

          -
          -
          -
        • -
        - -
      • -
      • - -

        Query Analyzer:

        -
        -
        -
        - -
          -
        • -

          Tokenizer:

          -
          -
          -
        • -
        • -

          Filters:

          -
          -
          -
        • -
        - -
      • -
      - -
      - -
      - -

      Top / Terms:

      - - - - - - - - - - - - - - - -
       TermFrq
      - - - -
      - -
      - -

      Histogram:

      - -
      - -
      - -
      - -
      - -
      \ No newline at end of file diff --git a/solr/src/webapp/web/tpl/schema-browser_index.html b/solr/src/webapp/web/tpl/schema-browser_index.html deleted file mode 100644 index f872f11f5ec..00000000000 --- a/solr/src/webapp/web/tpl/schema-browser_index.html +++ /dev/null @@ -1,11 +0,0 @@ -
      - -
      - -
      Unique Key Field:
      - -
      Default Search Field:
      - -
      - -
      \ No newline at end of file