mirror of https://github.com/apache/lucene.git
catch up with trunk
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/docvalues@1133734 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
345bc1653d
|
@ -82,7 +82,7 @@
|
|||
<module name="lucene" />
|
||||
<option name="TEST_OBJECT" value="package" />
|
||||
<option name="WORKING_DIRECTORY" value="file://$PROJECT_DIR$/lucene/build" />
|
||||
<option name="VM_PARAMETERS" value="-ea -Dlucene.version=4.0-SNAPSHOT -DtempDir=temp -Dtests.linedocsfile=europarl.lines.txt.gz" />
|
||||
<option name="VM_PARAMETERS" value="-ea -Dlucene.version=4.0-SNAPSHOT -DtempDir=temp" />
|
||||
<option name="TEST_SEARCH_SCOPE"><value defaultName="singleModule" /></option>
|
||||
</configuration>
|
||||
<configuration default="false" name="memory contrib" type="JUnit" factoryName="JUnit">
|
||||
|
|
|
@ -575,6 +575,11 @@
|
|||
<groupId>org.apache.felix</groupId>
|
||||
<artifactId>maven-bundle-plugin</artifactId>
|
||||
<version>2.3.4</version>
|
||||
<configuration>
|
||||
<instructions>
|
||||
<Export-Package>*;-split-package:=merge-first</Export-Package>
|
||||
</instructions>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>bundle-manifest</id>
|
||||
|
|
|
@ -64,6 +64,10 @@
|
|||
<version>${project.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>javax.servlet</groupId>
|
||||
<artifactId>servlet-api</artifactId>
|
||||
|
|
|
@ -152,10 +152,6 @@
|
|||
<artifactId>servlet-api</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
|
|
|
@ -1,118 +1,128 @@
|
|||
#!/usr/bin/perl
|
||||
#
|
||||
# poll-mirrors.pl
|
||||
#
|
||||
# This script is designed to poll download sites after posting a release
|
||||
# and print out notice as each becomes available. The RM can use this
|
||||
# script to delay the release announcement until the release can be
|
||||
# downloaded.
|
||||
#
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Getopt::Long;
|
||||
use LWP::Simple;
|
||||
require LWP::Parallel::UserAgent;
|
||||
|
||||
my $version;
|
||||
my $interval = 300;
|
||||
my $quiet = 0;
|
||||
|
||||
my $result = GetOptions ("version=s" => \$version, "interval=i" => \$interval);
|
||||
|
||||
my $usage = "$0 -v version [ -i interval (seconds; default: 300) ]";
|
||||
|
||||
unless ($result) {
|
||||
print STDERR $usage;
|
||||
exit(1);
|
||||
}
|
||||
unless (defined($version) && $version =~ /\d+(?:\.\d+)+/) {
|
||||
print STDERR "You must specify the release version.\n$usage";
|
||||
exit(1);
|
||||
}
|
||||
|
||||
my $previously_selected = select STDOUT;
|
||||
$| = 1; # turn off buffering of STDOUT, so status is printed immediately
|
||||
select $previously_selected;
|
||||
|
||||
my $apache_url_suffix = "lucene/java/$version/lucene-$version.tgz.asc";
|
||||
my $apache_mirrors_list_url = "http://www.apache.org/mirrors/";
|
||||
my $maven_url = "http://repo2.maven.org/maven2/org/apache/lucene/lucene-core/$version/lucene-core-$version.pom.asc";
|
||||
|
||||
my $maven_available = 0;
|
||||
|
||||
my @apache_mirrors = ();
|
||||
|
||||
my $apache_mirrors_list_page = get($apache_mirrors_list_url);
|
||||
if (defined($apache_mirrors_list_page)) {
|
||||
#<TR>
|
||||
# <TD ALIGN=RIGHT><A HREF="http://apache.dattatec.com/">apache.dattatec.com</A> <A HREF="http://apache.dattatec.com/">@</A></TD>
|
||||
#
|
||||
# <TD>http</TD>
|
||||
# <TD ALIGN=RIGHT>8 hours<BR><IMG BORDER=1 SRC="icons/mms14.gif" ALT=""></TD>
|
||||
# <TD ALIGN=RIGHT>5 hours<BR><IMG BORDER=1 SRC="icons/mms14.gif" ALT=""></TD>
|
||||
# <TD>ok</TD>
|
||||
#</TR>
|
||||
while ($apache_mirrors_list_page =~ m~<TR>(.*?)</TR>~gis) {
|
||||
my $mirror_entry = $1;
|
||||
next unless ($mirror_entry =~ m~<TD>\s*ok\s*</TD>\s*$~i); # skip mirrors with problems
|
||||
if ($mirror_entry =~ m~<A\s+HREF\s*=\s*"([^"]+)"\s*>~i) {
|
||||
my $mirror_url = $1;
|
||||
push @apache_mirrors, "$mirror_url/$apache_url_suffix";
|
||||
}
|
||||
}
|
||||
} else {
|
||||
print STDERR "Error fetching Apache mirrors list $apache_mirrors_list_url";
|
||||
exit(1);
|
||||
}
|
||||
|
||||
my $num_apache_mirrors = $#apache_mirrors;
|
||||
print "# Apache Mirrors: $num_apache_mirrors\n";
|
||||
|
||||
while (1) {
|
||||
unless ($maven_available) {
|
||||
my $content = get($maven_url);
|
||||
$maven_available = defined($content);
|
||||
}
|
||||
@apache_mirrors = &check_mirrors;
|
||||
my $num_downloadable_apache_mirrors
|
||||
= $num_apache_mirrors - $#apache_mirrors;
|
||||
|
||||
print "Available: ";
|
||||
print "Maven Central; " if ($maven_available);
|
||||
printf "%d/%d Apache Mirrors (%0.1f%%)\n", $num_downloadable_apache_mirrors,
|
||||
$num_apache_mirrors, ($num_downloadable_apache_mirrors*100/$num_apache_mirrors);
|
||||
last if ($maven_available && $num_downloadable_apache_mirrors == $num_apache_mirrors);
|
||||
sleep($interval);
|
||||
}
|
||||
|
||||
sub check_mirrors {
|
||||
my $agent = LWP::Parallel::UserAgent->new();
|
||||
$agent->timeout(30);
|
||||
$agent->redirect(1); # follow redirects
|
||||
$agent->register($_) for (@apache_mirrors);
|
||||
my $entries = $agent->wait();
|
||||
my @not_yet_downloadable_apache_mirrors;
|
||||
for my $entry (keys %$entries) {
|
||||
my $response = $entries->{$entry}->response;
|
||||
push @not_yet_downloadable_apache_mirrors, $response->request->uri
|
||||
unless ($response->is_success);
|
||||
}
|
||||
return @not_yet_downloadable_apache_mirrors;
|
||||
}
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# poll-mirrors.pl
|
||||
#
|
||||
# This script is designed to poll download sites after posting a release
|
||||
# and print out notice as each becomes available. The RM can use this
|
||||
# script to delay the release announcement until the release can be
|
||||
# downloaded.
|
||||
#
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Getopt::Long;
|
||||
use POSIX qw/strftime/;
|
||||
use LWP::UserAgent;
|
||||
|
||||
my $version;
|
||||
my $interval = 300;
|
||||
my $quiet = 0;
|
||||
|
||||
my $result = GetOptions ("version=s" => \$version, "interval=i" => \$interval);
|
||||
|
||||
my $usage = "$0 -v version [ -i interval (seconds; default: 300) ]";
|
||||
|
||||
unless ($result) {
|
||||
print STDERR $usage;
|
||||
exit(1);
|
||||
}
|
||||
unless (defined($version) && $version =~ /\d+(?:\.\d+)+/) {
|
||||
print STDERR "You must specify the release version.\n$usage";
|
||||
exit(1);
|
||||
}
|
||||
|
||||
my $previously_selected = select STDOUT;
|
||||
$| = 1; # turn off buffering of STDOUT, so status is printed immediately
|
||||
select $previously_selected;
|
||||
|
||||
my $apache_url_suffix = "lucene/java/$version/lucene-$version.zip.asc";
|
||||
my $apache_mirrors_list_url = "http://www.apache.org/mirrors/";
|
||||
my $maven_url = "http://repo1.maven.org/maven2/org/apache/lucene/lucene-core/$version/lucene-core-$version.pom.asc";
|
||||
|
||||
my $agent = LWP::UserAgent->new();
|
||||
$agent->timeout(2);
|
||||
|
||||
my $maven_available = 0;
|
||||
|
||||
my @apache_mirrors = ();
|
||||
|
||||
my $apache_mirrors_list_page = $agent->get($apache_mirrors_list_url)->decoded_content;
|
||||
if (defined($apache_mirrors_list_page)) {
|
||||
#<TR>
|
||||
# <TD ALIGN=RIGHT><A HREF="http://apache.dattatec.com/">apache.dattatec.com</A> <A HREF="http://apache.dattatec.com/">@</A></TD>
|
||||
#
|
||||
# <TD>http</TD>
|
||||
# <TD ALIGN=RIGHT>8 hours<BR><IMG BORDER=1 SRC="icons/mms14.gif" ALT=""></TD>
|
||||
# <TD ALIGN=RIGHT>5 hours<BR><IMG BORDER=1 SRC="icons/mms14.gif" ALT=""></TD>
|
||||
# <TD>ok</TD>
|
||||
#</TR>
|
||||
while ($apache_mirrors_list_page =~ m~<TR>(.*?)</TR>~gis) {
|
||||
my $mirror_entry = $1;
|
||||
next unless ($mirror_entry =~ m~<TD>\s*ok\s*</TD>\s*$~i); # skip mirrors with problems
|
||||
if ($mirror_entry =~ m~<A\s+HREF\s*=\s*"([^"]+)"\s*>~i) {
|
||||
my $mirror_url = $1;
|
||||
push @apache_mirrors, "$mirror_url/$apache_url_suffix";
|
||||
}
|
||||
}
|
||||
} else {
|
||||
print STDERR "Error fetching Apache mirrors list $apache_mirrors_list_url";
|
||||
exit(1);
|
||||
}
|
||||
|
||||
my $num_apache_mirrors = $#apache_mirrors;
|
||||
|
||||
my $sleep_interval = 0;
|
||||
while (1) {
|
||||
print "\n", strftime('%d-%b-%Y %H:%M:%S', localtime);
|
||||
print "\nPolling $#apache_mirrors Apache Mirrors";
|
||||
print " and Maven Central" unless ($maven_available);
|
||||
print "...\n";
|
||||
|
||||
my $start = time();
|
||||
$maven_available = (200 == $agent->get($maven_url)->code)
|
||||
unless ($maven_available);
|
||||
@apache_mirrors = &check_mirrors;
|
||||
my $stop = time();
|
||||
$sleep_interval = $interval - ($stop - $start);
|
||||
|
||||
my $num_downloadable_apache_mirrors = $num_apache_mirrors - $#apache_mirrors;
|
||||
print "$version is ", ($maven_available ? "" : "not "),
|
||||
"downloadable from Maven Central.\n";
|
||||
printf "$version is downloadable from %d/%d Apache Mirrors (%0.1f%%)\n",
|
||||
$num_downloadable_apache_mirrors, $num_apache_mirrors,
|
||||
($num_downloadable_apache_mirrors*100/$num_apache_mirrors);
|
||||
|
||||
last if ($maven_available && 0 == $#apache_mirrors);
|
||||
|
||||
if ($sleep_interval > 0) {
|
||||
print "Sleeping for $sleep_interval seconds...\n";
|
||||
sleep($sleep_interval)
|
||||
}
|
||||
}
|
||||
|
||||
sub check_mirrors {
|
||||
my @not_yet_downloadable_apache_mirrors;
|
||||
for my $mirror (@apache_mirrors) {
|
||||
push @not_yet_downloadable_apache_mirrors, $mirror
|
||||
unless (200 == $agent->get($mirror)->code);
|
||||
print ".";
|
||||
}
|
||||
print "\n";
|
||||
return @not_yet_downloadable_apache_mirrors;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,407 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import hashlib
|
||||
import httplib
|
||||
import re
|
||||
import urllib2
|
||||
import urlparse
|
||||
import sys
|
||||
import HTMLParser
|
||||
|
||||
# This tool expects to find /lucene and /solr off the base URL. You
|
||||
# must have a working gpg, tar, unzip in your path. This has only
|
||||
# been tested on Linux so far!
|
||||
|
||||
# http://s.apache.org/lusolr32rc2
|
||||
|
||||
# TODO
|
||||
# + verify KEYS contains key that signed the release
|
||||
# + make sure changes HTML looks ok
|
||||
# - verify license/notice of all dep jars
|
||||
# - check maven
|
||||
# - check JAR manifest version
|
||||
# - check license/notice exist
|
||||
# - check no "extra" files
|
||||
# - make sure jars exist inside bin release
|
||||
# - run "ant test"
|
||||
# - make sure docs exist
|
||||
# - use java5 for lucene/modules
|
||||
|
||||
reHREF = re.compile('<a href="(.*?)">(.*?)</a>')
|
||||
|
||||
# Set to True to avoid re-downloading the packages...
|
||||
DEBUG = False
|
||||
|
||||
def getHREFs(urlString):
|
||||
|
||||
# Deref any redirects
|
||||
while True:
|
||||
url = urlparse.urlparse(urlString)
|
||||
h = httplib.HTTPConnection(url.netloc)
|
||||
h.request('GET', url.path)
|
||||
r = h.getresponse()
|
||||
newLoc = r.getheader('location')
|
||||
if newLoc is not None:
|
||||
urlString = newLoc
|
||||
else:
|
||||
break
|
||||
|
||||
links = []
|
||||
for subUrl, text in reHREF.findall(urllib2.urlopen(urlString).read()):
|
||||
fullURL = urlparse.urljoin(urlString, subUrl)
|
||||
links.append((text, fullURL))
|
||||
return links
|
||||
|
||||
def download(name, urlString, tmpDir):
|
||||
fileName = '%s/%s' % (tmpDir, name)
|
||||
if DEBUG and os.path.exists(fileName):
|
||||
if fileName.find('.asc') == -1:
|
||||
print ' already done: %.1f MB' % (os.path.getsize(fileName)/1024./1024.)
|
||||
return
|
||||
fIn = urllib2.urlopen(urlString)
|
||||
fOut = open(fileName, 'wb')
|
||||
success = False
|
||||
try:
|
||||
while True:
|
||||
s = fIn.read(65536)
|
||||
if s == '':
|
||||
break
|
||||
fOut.write(s)
|
||||
fOut.close()
|
||||
fIn.close()
|
||||
success = True
|
||||
finally:
|
||||
fIn.close()
|
||||
fOut.close()
|
||||
if not success:
|
||||
os.remove(fileName)
|
||||
if fileName.find('.asc') == -1:
|
||||
print ' %.1f MB' % (os.path.getsize(fileName)/1024./1024.)
|
||||
|
||||
def load(urlString):
|
||||
return urllib2.urlopen(urlString).read()
|
||||
|
||||
def checkSigs(project, urlString, version, tmpDir):
|
||||
|
||||
print ' test basics...'
|
||||
ents = getDirEntries(urlString)
|
||||
artifact = None
|
||||
keysURL = None
|
||||
changesURL = None
|
||||
mavenURL = None
|
||||
expectedSigs = ['asc', 'md5', 'sha1']
|
||||
artifacts = []
|
||||
for text, subURL in ents:
|
||||
if text == 'KEYS':
|
||||
keysURL = subURL
|
||||
elif text == 'maven/':
|
||||
mavenURL = subURL
|
||||
elif text.startswith('changes'):
|
||||
if text not in ('changes/', 'changes-%s/' % version):
|
||||
raise RuntimeError('%s: found %s vs expected changes-%s/' % (project, text, version))
|
||||
changesURL = subURL
|
||||
elif artifact == None:
|
||||
artifact = text
|
||||
artifactURL = subURL
|
||||
if project == 'solr':
|
||||
expected = 'apache-solr-%s' % version
|
||||
else:
|
||||
expected = 'lucene-%s' % version
|
||||
if not artifact.startswith(expected):
|
||||
raise RuntimeError('%s: unknown artifact %s: expected prefix %s' % (project, text, expected))
|
||||
sigs = []
|
||||
elif text.startswith(artifact + '.'):
|
||||
sigs.append(text[len(artifact)+1:])
|
||||
else:
|
||||
if sigs != expectedSigs:
|
||||
raise RuntimeError('%s: artifact %s has wrong sigs: expected %s but got %s' % (project, artifact, expectedSigs, sigs))
|
||||
artifacts.append((artifact, artifactURL))
|
||||
artifact = text
|
||||
artifactURL = subURL
|
||||
sigs = []
|
||||
|
||||
if sigs != []:
|
||||
artifacts.append((artifact, artifactURL))
|
||||
if sigs != expectedSigs:
|
||||
raise RuntimeError('%s: artifact %s has wrong sigs: expected %s but got %s' % (project, artifact, expectedSigs, sigs))
|
||||
|
||||
if project == 'lucene':
|
||||
expected = ['lucene-%s-src.tgz' % version,
|
||||
'lucene-%s.tgz' % version,
|
||||
'lucene-%s.zip' % version]
|
||||
else:
|
||||
expected = ['apache-solr-%s-src.tgz' % version,
|
||||
'apache-solr-%s.tgz' % version,
|
||||
'apache-solr-%s.zip' % version]
|
||||
|
||||
actual = [x[0] for x in artifacts]
|
||||
if expected != actual:
|
||||
raise RuntimeError('%s: wrong artifacts: expected %s but got %s' % (project, expected, actual))
|
||||
|
||||
if keysURL is None:
|
||||
raise RuntimeError('%s is missing KEYS' % project)
|
||||
|
||||
download('%s.KEYS' % project, keysURL, tmpDir)
|
||||
|
||||
keysFile = '%s/%s.KEYS' % (tmpDir, project)
|
||||
|
||||
# Set up clean gpg world; import keys file:
|
||||
gpgHomeDir = '%s/%s.gpg' % (tmpDir, project)
|
||||
if os.path.exists(gpgHomeDir):
|
||||
shutil.rmtree(gpgHomeDir)
|
||||
os.makedirs(gpgHomeDir, 0700)
|
||||
run('gpg --homedir %s --import %s' % (gpgHomeDir, keysFile),
|
||||
'%s/%s.gpg.import.log 2>&1' % (tmpDir, project))
|
||||
|
||||
if mavenURL is None:
|
||||
raise RuntimeError('%s is missing maven' % project)
|
||||
|
||||
if project == 'lucene':
|
||||
if changesURL is None:
|
||||
raise RuntimeError('%s is missing changes-%s' % (project, version))
|
||||
testChanges(project, version, changesURL)
|
||||
|
||||
for artifact, urlString in artifacts:
|
||||
print ' download %s...' % artifact
|
||||
download(artifact, urlString, tmpDir)
|
||||
verifyDigests(artifact, urlString, tmpDir)
|
||||
|
||||
print ' verify sig'
|
||||
# Test sig
|
||||
download(artifact + '.asc', urlString + '.asc', tmpDir)
|
||||
sigFile = '%s/%s.asc' % (tmpDir, artifact)
|
||||
artifactFile = '%s/%s' % (tmpDir, artifact)
|
||||
logFile = '%s/%s.%s.gpg.verify.log' % (tmpDir, project, artifact)
|
||||
run('gpg --homedir %s --verify %s %s' % (gpgHomeDir, sigFile, artifactFile),
|
||||
logFile)
|
||||
# Forward any GPG warnings:
|
||||
f = open(logFile, 'rb')
|
||||
for line in f.readlines():
|
||||
if line.lower().find('warning') != -1:
|
||||
print ' GPG: %s' % line.strip()
|
||||
f.close()
|
||||
|
||||
def testChanges(project, version, changesURLString):
|
||||
print ' check changes HTML...'
|
||||
changesURL = None
|
||||
contribChangesURL = None
|
||||
for text, subURL in getDirEntries(changesURLString):
|
||||
if text == 'Changes.html':
|
||||
changesURL = subURL
|
||||
elif text == 'Contrib-Changes.html':
|
||||
contribChangesURL = subURL
|
||||
|
||||
if changesURL is None:
|
||||
raise RuntimeError('did not see Changes.html link from %s' % changesURLString)
|
||||
if contribChangesURL is None:
|
||||
raise RuntimeError('did not see Contrib-Changes.html link from %s' % changesURLString)
|
||||
|
||||
s = load(changesURL)
|
||||
|
||||
if s.find('Release %s' % version) == -1:
|
||||
raise RuntimeError('did not see "Release %s" in %s' % (version, changesURL))
|
||||
|
||||
def run(command, logFile):
|
||||
if os.system('%s > %s 2>&1' % (command, logFile)):
|
||||
raise RuntimeError('command "%s" failed; see log file %s' % (command, logFile))
|
||||
|
||||
def verifyDigests(artifact, urlString, tmpDir):
|
||||
print ' verify md5/sha1 digests'
|
||||
md5Expected, t = load(urlString + '.md5').strip().split()
|
||||
if t != '*'+artifact:
|
||||
raise RuntimeError('MD5 %s.md5 lists artifact %s but expected *%s' % (urlString, t, artifact))
|
||||
|
||||
sha1Expected, t = load(urlString + '.sha1').strip().split()
|
||||
if t != '*'+artifact:
|
||||
raise RuntimeError('SHA1 %s.sha1 lists artifact %s but expected *%s' % (urlString, t, artifact))
|
||||
|
||||
m = hashlib.md5()
|
||||
s = hashlib.sha1()
|
||||
f = open('%s/%s' % (tmpDir, artifact))
|
||||
while True:
|
||||
x = f.read(65536)
|
||||
if x == '':
|
||||
break
|
||||
m.update(x)
|
||||
s.update(x)
|
||||
f.close()
|
||||
md5Actual = m.hexdigest()
|
||||
sha1Actual = s.hexdigest()
|
||||
if md5Actual != md5Expected:
|
||||
raise RuntimeError('MD5 digest mismatch for %s: expected %s but got %s' % (artifact, md5Expected, md5Actual))
|
||||
if sha1Actual != sha1Expected:
|
||||
raise RuntimeError('SHA1 digest mismatch for %s: expected %s but got %s' % (artifact, sha1Expected, sha1Actual))
|
||||
|
||||
def getDirEntries(urlString):
|
||||
links = getHREFs(urlString)
|
||||
for i, (text, subURL) in enumerate(links):
|
||||
if text == 'Parent Directory':
|
||||
return links[(i+1):]
|
||||
|
||||
def unpack(project, tmpDir, artifact, version):
|
||||
destDir = '%s/unpack' % tmpDir
|
||||
if os.path.exists(destDir):
|
||||
shutil.rmtree(destDir)
|
||||
os.makedirs(destDir)
|
||||
os.chdir(destDir)
|
||||
print ' unpack %s...' % artifact
|
||||
unpackLogFile = '%s/%s-unpack-%s.log' % (tmpDir, project, artifact)
|
||||
if artifact.endswith('.tar.gz') or artifact.endswith('.tgz'):
|
||||
run('tar xzf %s/%s' % (tmpDir, artifact), unpackLogFile)
|
||||
elif artifact.endswith('.zip'):
|
||||
run('unzip %s/%s' % (tmpDir, artifact), unpackLogFile)
|
||||
|
||||
# make sure it unpacks to proper subdir
|
||||
l = os.listdir(destDir)
|
||||
if project == 'solr':
|
||||
expected = 'apache-%s-%s' % (project, version)
|
||||
else:
|
||||
expected = '%s-%s' % (project, version)
|
||||
if l != [expected]:
|
||||
raise RuntimeError('unpack produced entries %s; expected only %s' % (l, expected))
|
||||
|
||||
unpackPath = '%s/%s' % (destDir, expected)
|
||||
verifyUnpacked(project, artifact, unpackPath, version)
|
||||
|
||||
def verifyUnpacked(project, artifact, unpackPath, version):
|
||||
os.chdir(unpackPath)
|
||||
isSrc = artifact.find('-src') != -1
|
||||
l = os.listdir(unpackPath)
|
||||
textFiles = ['LICENSE', 'NOTICE', 'README']
|
||||
if project == 'lucene':
|
||||
textFiles.extend(('JRE_VERSION_MIGRATION', 'CHANGES'))
|
||||
if isSrc:
|
||||
textFiles.append('BUILD')
|
||||
for fileName in textFiles:
|
||||
fileName += '.txt'
|
||||
if fileName not in l:
|
||||
raise RuntimeError('file "%s" is missing from artifact %s' % (fileName, artifact))
|
||||
l.remove(fileName)
|
||||
|
||||
if not isSrc:
|
||||
if project == 'lucene':
|
||||
expectedJARs = ('lucene-core-%s' % version,
|
||||
'lucene-core-%s-javadoc' % version,
|
||||
'lucene-test-framework-%s' % version,
|
||||
'lucene-test-framework-%s-javadoc' % version)
|
||||
else:
|
||||
expectedJARs = ()
|
||||
|
||||
for fileName in expectedJARs:
|
||||
fileName += '.jar'
|
||||
if fileName not in l:
|
||||
raise RuntimeError('%s: file "%s" is missing from artifact %s' % (project, fileName, artifact))
|
||||
l.remove(fileName)
|
||||
|
||||
if project == 'lucene':
|
||||
extras = ('lib', 'docs', 'contrib')
|
||||
if isSrc:
|
||||
extras += ('build.xml', 'index.html', 'common-build.xml', 'src', 'backwards')
|
||||
else:
|
||||
extras = ()
|
||||
|
||||
for e in extras:
|
||||
if e not in l:
|
||||
raise RuntimeError('%s: %s missing from artifact %s' % (project, e, artifact))
|
||||
l.remove(e)
|
||||
|
||||
if project == 'lucene':
|
||||
if len(l) > 0:
|
||||
raise RuntimeError('%s: unexpected files/dirs in artifact %s: %s' % (project, artifact, l))
|
||||
|
||||
if isSrc:
|
||||
if project == 'lucene':
|
||||
print ' run tests w/ Java 5...'
|
||||
run('export JAVA_HOME=/usr/local/src/jdk1.5.0_22; ant test', '%s/test.log' % unpackPath)
|
||||
run('export JAVA_HOME=/usr/local/src/jdk1.5.0_22; ant jar', '%s/compile.log' % unpackPath)
|
||||
testDemo(isSrc)
|
||||
else:
|
||||
print ' run tests w/ Java 6...'
|
||||
run('export JAVA_HOME=/usr/local/src/jdk1.6.0_21; ant test', '%s/test.log' % unpackPath)
|
||||
else:
|
||||
if project == 'lucene':
|
||||
testDemo(isSrc)
|
||||
|
||||
def testDemo(isSrc):
|
||||
print ' test demo...'
|
||||
if isSrc:
|
||||
cp = 'build/lucene-core-3.2-SNAPSHOT.jar:build/contrib/demo/lucene-demo-3.2-SNAPSHOT.jar'
|
||||
docsDir = 'src'
|
||||
else:
|
||||
cp = 'lucene-core-3.2.0.jar:contrib/demo/lucene-demo-3.2.0.jar'
|
||||
docsDir = 'docs'
|
||||
run('export JAVA_HOME=/usr/local/src/jdk1.5.0_22; java -cp %s org.apache.lucene.demo.IndexFiles -index index -docs %s' % (cp, docsDir), 'index.log')
|
||||
run('export JAVA_HOME=/usr/local/src/jdk1.5.0_22; java -cp %s org.apache.lucene.demo.SearchFiles -index index -query lucene' % cp, 'search.log')
|
||||
reMatchingDocs = re.compile('(\d+) total matching documents')
|
||||
m = reMatchingDocs.search(open('search.log', 'rb').read())
|
||||
if m is None:
|
||||
raise RuntimeError('lucene demo\'s SearchFiles found no results')
|
||||
else:
|
||||
numHits = int(m.group(1))
|
||||
if numHits < 100:
|
||||
raise RuntimeError('lucene demo\'s SearchFiles found too few results: %s' % numHits)
|
||||
print ' got %d hits for query "lucene"' % numHits
|
||||
|
||||
def main():
|
||||
|
||||
if len(sys.argv) != 4:
|
||||
print
|
||||
print 'Usage python -u %s BaseURL version tmpDir' % sys.argv[0]
|
||||
print
|
||||
sys.exit(1)
|
||||
|
||||
baseURL = sys.argv[1]
|
||||
version = sys.argv[2]
|
||||
tmpDir = os.path.abspath(sys.argv[3])
|
||||
|
||||
if not DEBUG:
|
||||
if os.path.exists(tmpDir):
|
||||
raise RuntimeError('temp dir %s exists; please remove first' % tmpDir)
|
||||
os.makedirs(tmpDir)
|
||||
|
||||
lucenePath = None
|
||||
solrPath = None
|
||||
print 'Load release URL...'
|
||||
for text, subURL in getDirEntries(baseURL):
|
||||
if text.lower().find('lucene') != -1:
|
||||
lucenePath = subURL
|
||||
elif text.lower().find('solr') != -1:
|
||||
solrPath = subURL
|
||||
|
||||
if lucenePath is None:
|
||||
raise RuntimeError('could not find lucene subdir')
|
||||
if solrPath is None:
|
||||
raise RuntimeError('could not find solr subdir')
|
||||
|
||||
print
|
||||
print 'Test Lucene...'
|
||||
checkSigs('lucene', lucenePath, version, tmpDir)
|
||||
for artifact in ('lucene-%s.tgz' % version, 'lucene-%s.zip' % version):
|
||||
unpack('lucene', tmpDir, artifact, version)
|
||||
unpack('lucene', tmpDir, 'lucene-%s-src.tgz' % version, version)
|
||||
|
||||
print
|
||||
print 'Test Solr...'
|
||||
checkSigs('solr', solrPath, version, tmpDir)
|
||||
for artifact in ('apache-solr-%s.tgz' % version, 'apache-solr-%s.zip' % version):
|
||||
unpack('solr', tmpDir, artifact, version)
|
||||
unpack('solr', tmpDir, 'apache-solr-%s-src.tgz' % version, version)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
@ -423,6 +423,9 @@ Optimizations
|
|||
|
||||
* LUCENE-2897: Apply deleted terms while flushing a segment. We still
|
||||
buffer deleted terms to later apply to past segments. (Mike McCandless)
|
||||
|
||||
* LUCENE-1736: DateTools.java general improvements.
|
||||
(David Smiley via Steve Rowe)
|
||||
|
||||
Bug fixes
|
||||
|
||||
|
@ -433,6 +436,10 @@ Bug fixes
|
|||
with more document deletions is requested before a reader with fewer
|
||||
deletions, provided they share some segments. (yonik)
|
||||
|
||||
* LUCENE-2645: Fix false assertion error when same token was added one
|
||||
after another with 0 posIncr. (David Smiley, Kurosaka Teruhiko via Mike
|
||||
McCandless)
|
||||
|
||||
======================= Lucene 3.x (not yet released) ================
|
||||
|
||||
Changes in backwards compatibility policy
|
||||
|
|
|
@ -79,6 +79,12 @@ API Changes
|
|||
First/SecondPassGroupingCollector. (Martijn van Groningen, Mike
|
||||
McCandless)
|
||||
|
||||
Bug Fixes
|
||||
|
||||
* LUCENE-3185: Fix bug in NRTCachingDirectory.deleteFile that would
|
||||
always throw exception and sometimes fail to actually delete the
|
||||
file. (Mike McCandless)
|
||||
|
||||
Build
|
||||
|
||||
* LUCENE-3149: Upgrade contrib/icu's ICU jar file to ICU 4.8.
|
||||
|
|
|
@ -165,7 +165,7 @@ public class TokenSources {
|
|||
this.tokens = tokens;
|
||||
termAtt = addAttribute(CharTermAttribute.class);
|
||||
offsetAtt = addAttribute(OffsetAttribute.class);
|
||||
posincAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
|
||||
posincAtt = addAttribute(PositionIncrementAttribute.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -97,7 +97,7 @@ public class HighFreqTerms {
|
|||
private static void usage() {
|
||||
System.out
|
||||
.println("\n\n"
|
||||
+ "java org.apache.lucene.misc.HighFreqTerms <index dir> [-t][number_terms] [field]\n\t -t: include totalTermFreq\n\n");
|
||||
+ "java org.apache.lucene.misc.HighFreqTerms <index dir> [-t] [number_terms] [field]\n\t -t: include totalTermFreq\n\n");
|
||||
}
|
||||
/**
|
||||
*
|
||||
|
|
|
@ -89,10 +89,10 @@ public class NRTCachingDirectory extends Directory {
|
|||
private final long maxCachedBytes;
|
||||
|
||||
private static final boolean VERBOSE = false;
|
||||
|
||||
|
||||
/**
|
||||
* We will cache a newly created output if 1) it's a
|
||||
* flush or a merge and the estimated size of the merged segmnt is <=
|
||||
* flush or a merge and the estimated size of the merged segment is <=
|
||||
* maxMergeSizeMB, and 2) the total cached bytes is <=
|
||||
* maxCachedMB */
|
||||
public NRTCachingDirectory(Directory delegate, double maxMergeSizeMB, double maxCachedMB) {
|
||||
|
@ -101,6 +101,36 @@ public class NRTCachingDirectory extends Directory {
|
|||
maxCachedBytes = (long) (maxCachedMB*1024*1024);
|
||||
}
|
||||
|
||||
@Override
|
||||
public LockFactory getLockFactory() {
|
||||
return delegate.getLockFactory();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setLockFactory(LockFactory lf) throws IOException {
|
||||
delegate.setLockFactory(lf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getLockID() {
|
||||
return delegate.getLockID();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(String name) {
|
||||
return delegate.makeLock(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearLock(String name) throws IOException {
|
||||
delegate.clearLock(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "NRTCachingDirectory(" + delegate + "; maxCacheMB=" + (maxCachedBytes/1024/1024.) + " maxMergeSizeMB=" + (maxMergeSizeBytes/1024/1024.) + ")";
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized String[] listAll() throws IOException {
|
||||
final Set<String> files = new HashSet<String>();
|
||||
|
@ -108,7 +138,9 @@ public class NRTCachingDirectory extends Directory {
|
|||
files.add(f);
|
||||
}
|
||||
for(String f : delegate.listAll()) {
|
||||
assert !files.contains(f);
|
||||
// Cannot do this -- if lucene calls createOutput but
|
||||
// file already exists then this falsely trips:
|
||||
//assert !files.contains(f): "file \"" + f + "\" is in both dirs";
|
||||
files.add(f);
|
||||
}
|
||||
return files.toArray(new String[files.size()]);
|
||||
|
@ -136,12 +168,15 @@ public class NRTCachingDirectory extends Directory {
|
|||
|
||||
@Override
|
||||
public synchronized void deleteFile(String name) throws IOException {
|
||||
// Delete from both, in case we are currently uncaching:
|
||||
if (VERBOSE) {
|
||||
System.out.println("nrtdir.deleteFile name=" + name);
|
||||
}
|
||||
cache.deleteFile(name);
|
||||
delegate.deleteFile(name);
|
||||
if (cache.fileExists(name)) {
|
||||
assert !delegate.fileExists(name);
|
||||
cache.deleteFile(name);
|
||||
} else {
|
||||
delegate.deleteFile(name);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -207,17 +242,7 @@ public class NRTCachingDirectory extends Directory {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(String name) {
|
||||
return delegate.makeLock(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearLock(String name) throws IOException {
|
||||
delegate.clearLock(name);
|
||||
}
|
||||
|
||||
/** Close thius directory, which flushes any cached files
|
||||
/** Close this directory, which flushes any cached files
|
||||
* to the delegate and then closes the delegate. */
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
|
@ -277,4 +302,3 @@ public class NRTCachingDirectory extends Directory {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -111,4 +111,12 @@ public class TestNRTCachingDirectory extends LuceneTestCase {
|
|||
conf.setMergeScheduler(cachedFSDir.getMergeScheduler());
|
||||
IndexWriter writer = new IndexWriter(cachedFSDir, conf);
|
||||
}
|
||||
|
||||
public void testDeleteFile() throws Exception {
|
||||
Directory dir = new NRTCachingDirectory(newDirectory(), 2.0, 25.0);
|
||||
dir.createOutput("foo.txt").close();
|
||||
dir.deleteFile("foo.txt");
|
||||
assertEquals(0, dir.listAll().length);
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,14 +17,15 @@ package org.apache.lucene.document;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.search.NumericRangeQuery; // for javadocs
|
||||
import org.apache.lucene.util.NumericUtils; // for javadocs
|
||||
|
||||
import java.text.ParseException;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
import java.util.TimeZone;
|
||||
import java.util.Locale;
|
||||
import org.apache.lucene.search.NumericRangeQuery; // for javadocs
|
||||
import org.apache.lucene.util.NumericUtils; // for javadocs
|
||||
import java.util.TimeZone;
|
||||
|
||||
/**
|
||||
* Provides support for converting dates to strings and vice-versa.
|
||||
|
@ -47,38 +48,27 @@ import org.apache.lucene.util.NumericUtils; // for javadocs
|
|||
*/
|
||||
public class DateTools {
|
||||
|
||||
private static final class DateFormats {
|
||||
final static TimeZone GMT = TimeZone.getTimeZone("GMT");
|
||||
final static TimeZone GMT = TimeZone.getTimeZone("GMT");
|
||||
|
||||
final SimpleDateFormat YEAR_FORMAT = new SimpleDateFormat("yyyy", Locale.US);
|
||||
final SimpleDateFormat MONTH_FORMAT = new SimpleDateFormat("yyyyMM", Locale.US);
|
||||
final SimpleDateFormat DAY_FORMAT = new SimpleDateFormat("yyyyMMdd", Locale.US);
|
||||
final SimpleDateFormat HOUR_FORMAT = new SimpleDateFormat("yyyyMMddHH", Locale.US);
|
||||
final SimpleDateFormat MINUTE_FORMAT = new SimpleDateFormat("yyyyMMddHHmm", Locale.US);
|
||||
final SimpleDateFormat SECOND_FORMAT = new SimpleDateFormat("yyyyMMddHHmmss", Locale.US);
|
||||
final SimpleDateFormat MILLISECOND_FORMAT = new SimpleDateFormat("yyyyMMddHHmmssSSS", Locale.US);
|
||||
{
|
||||
// times need to be normalized so the value doesn't depend on the
|
||||
// location the index is created/used:
|
||||
YEAR_FORMAT.setTimeZone(GMT);
|
||||
MONTH_FORMAT.setTimeZone(GMT);
|
||||
DAY_FORMAT.setTimeZone(GMT);
|
||||
HOUR_FORMAT.setTimeZone(GMT);
|
||||
MINUTE_FORMAT.setTimeZone(GMT);
|
||||
SECOND_FORMAT.setTimeZone(GMT);
|
||||
MILLISECOND_FORMAT.setTimeZone(GMT);
|
||||
}
|
||||
|
||||
final Calendar calInstance = Calendar.getInstance(GMT, Locale.US);
|
||||
}
|
||||
|
||||
private static final ThreadLocal<DateFormats> FORMATS = new ThreadLocal<DateFormats>() {
|
||||
private static final ThreadLocal<Calendar> TL_CAL = new ThreadLocal<Calendar>() {
|
||||
@Override
|
||||
protected DateFormats initialValue() {
|
||||
return new DateFormats();
|
||||
protected Calendar initialValue() {
|
||||
return Calendar.getInstance(GMT, Locale.US);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
//indexed by format length
|
||||
private static final ThreadLocal<SimpleDateFormat[]> TL_FORMATS = new ThreadLocal<SimpleDateFormat[]>() {
|
||||
@Override
|
||||
protected SimpleDateFormat[] initialValue() {
|
||||
SimpleDateFormat[] arr = new SimpleDateFormat[Resolution.MILLISECOND.formatLen+1];
|
||||
for (Resolution resolution : Resolution.values()) {
|
||||
arr[resolution.formatLen] = (SimpleDateFormat)resolution.format.clone();
|
||||
}
|
||||
return arr;
|
||||
}
|
||||
};
|
||||
|
||||
// cannot create, the class has static methods only
|
||||
private DateTools() {}
|
||||
|
||||
|
@ -105,22 +95,8 @@ public class DateTools {
|
|||
* depending on <code>resolution</code>; using GMT as timezone
|
||||
*/
|
||||
public static String timeToString(long time, Resolution resolution) {
|
||||
final DateFormats formats = FORMATS.get();
|
||||
|
||||
formats.calInstance.setTimeInMillis(round(time, resolution));
|
||||
final Date date = formats.calInstance.getTime();
|
||||
|
||||
switch (resolution) {
|
||||
case YEAR: return formats.YEAR_FORMAT.format(date);
|
||||
case MONTH:return formats.MONTH_FORMAT.format(date);
|
||||
case DAY: return formats.DAY_FORMAT.format(date);
|
||||
case HOUR: return formats.HOUR_FORMAT.format(date);
|
||||
case MINUTE: return formats.MINUTE_FORMAT.format(date);
|
||||
case SECOND: return formats.SECOND_FORMAT.format(date);
|
||||
case MILLISECOND: return formats.MILLISECOND_FORMAT.format(date);
|
||||
}
|
||||
|
||||
throw new IllegalArgumentException("unknown resolution " + resolution);
|
||||
final Date date = new Date(round(time, resolution));
|
||||
return TL_FORMATS.get()[resolution.formatLen].format(date);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -148,24 +124,11 @@ public class DateTools {
|
|||
* expected format
|
||||
*/
|
||||
public static Date stringToDate(String dateString) throws ParseException {
|
||||
final DateFormats formats = FORMATS.get();
|
||||
|
||||
if (dateString.length() == 4) {
|
||||
return formats.YEAR_FORMAT.parse(dateString);
|
||||
} else if (dateString.length() == 6) {
|
||||
return formats.MONTH_FORMAT.parse(dateString);
|
||||
} else if (dateString.length() == 8) {
|
||||
return formats.DAY_FORMAT.parse(dateString);
|
||||
} else if (dateString.length() == 10) {
|
||||
return formats.HOUR_FORMAT.parse(dateString);
|
||||
} else if (dateString.length() == 12) {
|
||||
return formats.MINUTE_FORMAT.parse(dateString);
|
||||
} else if (dateString.length() == 14) {
|
||||
return formats.SECOND_FORMAT.parse(dateString);
|
||||
} else if (dateString.length() == 17) {
|
||||
return formats.MILLISECOND_FORMAT.parse(dateString);
|
||||
try {
|
||||
return TL_FORMATS.get()[dateString.length()].parse(dateString);
|
||||
} catch (Exception e) {
|
||||
throw new ParseException("Input is not a valid date string: " + dateString, 0);
|
||||
}
|
||||
throw new ParseException("Input is not valid date string: " + dateString, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -191,44 +154,25 @@ public class DateTools {
|
|||
* @return the date with all values more precise than <code>resolution</code>
|
||||
* set to 0 or 1, expressed as milliseconds since January 1, 1970, 00:00:00 GMT
|
||||
*/
|
||||
@SuppressWarnings("fallthrough")
|
||||
public static long round(long time, Resolution resolution) {
|
||||
final Calendar calInstance = FORMATS.get().calInstance;
|
||||
final Calendar calInstance = TL_CAL.get();
|
||||
calInstance.setTimeInMillis(time);
|
||||
|
||||
switch (resolution) {
|
||||
//NOTE: switch statement fall-through is deliberate
|
||||
case YEAR:
|
||||
calInstance.set(Calendar.MONTH, 0);
|
||||
calInstance.set(Calendar.DAY_OF_MONTH, 1);
|
||||
calInstance.set(Calendar.HOUR_OF_DAY, 0);
|
||||
calInstance.set(Calendar.MINUTE, 0);
|
||||
calInstance.set(Calendar.SECOND, 0);
|
||||
calInstance.set(Calendar.MILLISECOND, 0);
|
||||
break;
|
||||
case MONTH:
|
||||
calInstance.set(Calendar.DAY_OF_MONTH, 1);
|
||||
calInstance.set(Calendar.HOUR_OF_DAY, 0);
|
||||
calInstance.set(Calendar.MINUTE, 0);
|
||||
calInstance.set(Calendar.SECOND, 0);
|
||||
calInstance.set(Calendar.MILLISECOND, 0);
|
||||
break;
|
||||
case DAY:
|
||||
calInstance.set(Calendar.HOUR_OF_DAY, 0);
|
||||
calInstance.set(Calendar.MINUTE, 0);
|
||||
calInstance.set(Calendar.SECOND, 0);
|
||||
calInstance.set(Calendar.MILLISECOND, 0);
|
||||
break;
|
||||
case HOUR:
|
||||
calInstance.set(Calendar.MINUTE, 0);
|
||||
calInstance.set(Calendar.SECOND, 0);
|
||||
calInstance.set(Calendar.MILLISECOND, 0);
|
||||
break;
|
||||
case MINUTE:
|
||||
calInstance.set(Calendar.SECOND, 0);
|
||||
calInstance.set(Calendar.MILLISECOND, 0);
|
||||
break;
|
||||
case SECOND:
|
||||
calInstance.set(Calendar.MILLISECOND, 0);
|
||||
break;
|
||||
case MILLISECOND:
|
||||
// don't cut off anything
|
||||
break;
|
||||
|
@ -241,7 +185,18 @@ public class DateTools {
|
|||
/** Specifies the time granularity. */
|
||||
public static enum Resolution {
|
||||
|
||||
YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, MILLISECOND;
|
||||
YEAR(4), MONTH(6), DAY(8), HOUR(10), MINUTE(12), SECOND(14), MILLISECOND(17);
|
||||
|
||||
final int formatLen;
|
||||
final SimpleDateFormat format;//should be cloned before use, since it's not threadsafe
|
||||
|
||||
Resolution(int formatLen) {
|
||||
this.formatLen = formatLen;
|
||||
// formatLen 10's place: 11111111
|
||||
// formatLen 1's place: 12345678901234567
|
||||
this.format = new SimpleDateFormat("yyyyMMddHHmmssSSS".substring(0,formatLen),Locale.US);
|
||||
this.format.setTimeZone(GMT);
|
||||
}
|
||||
|
||||
/** this method returns the name of the resolution
|
||||
* in lowercase (for backwards compatibility) */
|
||||
|
|
|
@ -22,7 +22,6 @@ import java.io.FilenameFilter;
|
|||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
|
|
|
@ -1999,6 +1999,9 @@ public class IndexWriter implements Closeable {
|
|||
// will always write to a new generation ("write
|
||||
// once").
|
||||
segmentInfos.rollbackSegmentInfos(rollbackSegments);
|
||||
if (infoStream != null ) {
|
||||
message("rollback: infos=" + segString(segmentInfos));
|
||||
}
|
||||
|
||||
docWriter.abort();
|
||||
|
||||
|
@ -2439,6 +2442,8 @@ public class IndexWriter implements Closeable {
|
|||
flush(false, true);
|
||||
|
||||
String mergedName = newSegmentName();
|
||||
// TODO: somehow we should fix this merge so it's
|
||||
// abortable so that IW.close(false) is able to stop it
|
||||
SegmentMerger merger = new SegmentMerger(directory, config.getTermIndexInterval(),
|
||||
mergedName, null, payloadProcessorProvider,
|
||||
globalFieldNumberMap.newFieldInfos(SegmentCodecsBuilder.create(codecs)));
|
||||
|
@ -2456,6 +2461,11 @@ public class IndexWriter implements Closeable {
|
|||
|
||||
boolean useCompoundFile;
|
||||
synchronized(this) { // Guard segmentInfos
|
||||
if (stopMerges) {
|
||||
deleter.deleteNewFiles(info.files());
|
||||
return;
|
||||
}
|
||||
ensureOpen();
|
||||
useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, info);
|
||||
}
|
||||
|
||||
|
@ -2471,6 +2481,11 @@ public class IndexWriter implements Closeable {
|
|||
|
||||
// Register the new segment
|
||||
synchronized(this) {
|
||||
if (stopMerges) {
|
||||
deleter.deleteNewFiles(info.files());
|
||||
return;
|
||||
}
|
||||
ensureOpen();
|
||||
segmentInfos.add(info);
|
||||
checkpoint();
|
||||
}
|
||||
|
@ -3076,6 +3091,7 @@ public class IndexWriter implements Closeable {
|
|||
boolean success = false;
|
||||
|
||||
final long t0 = System.currentTimeMillis();
|
||||
//System.out.println(Thread.currentThread().getName() + ": merge start: size=" + (merge.estimatedMergeBytes/1024./1024.) + " MB\n merge=" + merge.segString(directory) + "\n idx=" + segString());
|
||||
|
||||
try {
|
||||
try {
|
||||
|
@ -3116,6 +3132,7 @@ public class IndexWriter implements Closeable {
|
|||
if (infoStream != null && merge.info != null) {
|
||||
message("merge time " + (System.currentTimeMillis()-t0) + " msec for " + merge.info.docCount + " docs");
|
||||
}
|
||||
//System.out.println(Thread.currentThread().getName() + ": merge end");
|
||||
}
|
||||
|
||||
/** Hook that's called when the specified merge is complete. */
|
||||
|
@ -3734,6 +3751,8 @@ public class IndexWriter implements Closeable {
|
|||
|
||||
assert testPoint("midStartCommit");
|
||||
|
||||
boolean pendingCommitSet = false;
|
||||
|
||||
try {
|
||||
// This call can take a long time -- 10s of seconds
|
||||
// or more. We do it without sync:
|
||||
|
@ -3753,6 +3772,7 @@ public class IndexWriter implements Closeable {
|
|||
toSync.prepareCommit(directory);
|
||||
|
||||
pendingCommit = toSync;
|
||||
pendingCommitSet = true;
|
||||
pendingCommitChangeCount = myChangeCount;
|
||||
}
|
||||
|
||||
|
@ -3770,7 +3790,7 @@ public class IndexWriter implements Closeable {
|
|||
// double-write a segments_N file.
|
||||
segmentInfos.updateGeneration(toSync);
|
||||
|
||||
if (pendingCommit == null) {
|
||||
if (!pendingCommitSet) {
|
||||
if (infoStream != null) {
|
||||
message("hit exception committing segments file");
|
||||
}
|
||||
|
|
|
@ -230,7 +230,7 @@ public final class SepPostingsWriterImpl extends PostingsWriterBase {
|
|||
assert !omitTF;
|
||||
|
||||
final int delta = position - lastPosition;
|
||||
assert delta > 0 || position == 0: "position=" + position + " lastPosition=" + lastPosition; // not quite right (if pos=0 is repeated twice we don't catch it)
|
||||
assert delta >= 0: "position=" + position + " lastPosition=" + lastPosition; // not quite right (if pos=0 is repeated twice we don't catch it)
|
||||
lastPosition = position;
|
||||
|
||||
if (storePayloads) {
|
||||
|
|
|
@ -193,8 +193,8 @@ public final class StandardPostingsWriter extends PostingsWriterBase {
|
|||
assert proxOut != null;
|
||||
|
||||
final int delta = position - lastPosition;
|
||||
|
||||
assert delta > 0 || position == 0: "position=" + position + " lastPosition=" + lastPosition; // not quite right (if pos=0 is repeated twice we don't catch it)
|
||||
|
||||
assert delta >= 0: "position=" + position + " lastPosition=" + lastPosition;
|
||||
|
||||
lastPosition = position;
|
||||
|
||||
|
|
|
@ -23,7 +23,10 @@
|
|||
|
||||
use strict;
|
||||
use warnings;
|
||||
use JSON;
|
||||
use LWP::Simple;
|
||||
|
||||
my $project_info_url = 'https://issues.apache.org/jira/rest/api/2.0.alpha1/project/LUCENE';
|
||||
my $jira_url_prefix = 'http://issues.apache.org/jira/browse/';
|
||||
my $bugzilla_url_prefix = 'http://issues.apache.org/bugzilla/show_bug.cgi?id=';
|
||||
my %release_dates = &setup_release_dates;
|
||||
|
@ -648,8 +651,13 @@ sub get_release_date {
|
|||
# Returns a list of alternating release names and dates, for use in populating
|
||||
# the %release_dates hash.
|
||||
#
|
||||
# Pulls release dates via the JIRA REST API. JIRA does not list
|
||||
# X.Y RCZ releases independently from releases X.Y, so the RC dates
|
||||
# as well as those named "final" are included below.
|
||||
#
|
||||
sub setup_release_dates {
|
||||
return ( '0.01' => '2000-03-30', '0.04' => '2000-04-19',
|
||||
my %release_dates
|
||||
= ( '0.01' => '2000-03-30', '0.04' => '2000-04-19',
|
||||
'1.0' => '2000-10-04', '1.01b' => '2001-06-02',
|
||||
'1.2 RC1' => '2001-10-02', '1.2 RC2' => '2001-10-19',
|
||||
'1.2 RC3' => '2002-01-27', '1.2 RC4' => '2002-02-14',
|
||||
|
@ -667,6 +675,20 @@ sub setup_release_dates {
|
|||
'2.4.0' => '2008-10-06', '2.4.1' => '2009-03-09',
|
||||
'2.9.0' => '2009-09-23', '2.9.1' => '2009-11-06',
|
||||
'3.0.0' => '2009-11-25');
|
||||
my $project_info_json = get($project_info_url);
|
||||
my $project_info = decode_json($project_info_json);
|
||||
for my $version (@{$project_info->{versions}}) {
|
||||
if ($version->{releaseDate}) {
|
||||
my $date = substr($version->{releaseDate}, 0, 10);
|
||||
my $version_name = $version->{name};
|
||||
$release_dates{$version->{name}} = $date;
|
||||
if ($version_name =~ /^\d+\.\d+$/) {
|
||||
my $full_version_name = "$version->{name}.0";
|
||||
$release_dates{$full_version_name} = $date;
|
||||
}
|
||||
}
|
||||
}
|
||||
return %release_dates;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.util.HashMap;
|
|||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
|
||||
|
||||
/**
|
||||
|
@ -127,13 +128,16 @@ public final class MockAnalyzer extends Analyzer {
|
|||
private synchronized TokenFilter maybePayload(TokenFilter stream, String fieldName) {
|
||||
Integer val = previousMappings.get(fieldName);
|
||||
if (val == null) {
|
||||
switch(random.nextInt(3)) {
|
||||
case 0: val = -1; // no payloads
|
||||
break;
|
||||
case 1: val = Integer.MAX_VALUE; // variable length payload
|
||||
break;
|
||||
case 2: val = random.nextInt(12); // fixed length payload
|
||||
break;
|
||||
val = -1; // no payloads
|
||||
if (LuceneTestCase.rarely(random)) {
|
||||
switch(random.nextInt(3)) {
|
||||
case 0: val = -1; // no payloads
|
||||
break;
|
||||
case 1: val = Integer.MAX_VALUE; // variable length payload
|
||||
break;
|
||||
case 2: val = random.nextInt(12); // fixed length payload
|
||||
break;
|
||||
}
|
||||
}
|
||||
previousMappings.put(fieldName, val); // save it so we are consistent for this field
|
||||
}
|
||||
|
|
|
@ -352,7 +352,7 @@ public class RandomIndexWriter implements Closeable {
|
|||
public void close() throws IOException {
|
||||
// if someone isn't using getReader() API, we want to be sure to
|
||||
// maybeOptimize since presumably they might open a reader on the dir.
|
||||
if (getReaderCalled == false && r.nextInt(4) == 2) {
|
||||
if (getReaderCalled == false && r.nextInt(8) == 2) {
|
||||
doRandomOptimize();
|
||||
}
|
||||
w.close();
|
||||
|
|
|
@ -110,7 +110,7 @@ public class MockIndexOutputWrapper extends IndexOutput {
|
|||
}
|
||||
throw new IOException(message);
|
||||
} else {
|
||||
if (dir.randomState.nextBoolean()) {
|
||||
if (dir.randomState.nextInt(200) == 0) {
|
||||
final int half = len/2;
|
||||
delegate.writeBytes(b, offset, half);
|
||||
Thread.yield();
|
||||
|
|
|
@ -724,6 +724,47 @@ public abstract class LuceneTestCase extends Assert {
|
|||
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a number of at least <code>i</code>
|
||||
* <p>
|
||||
* The actual number returned will be influenced by whether {@link #TEST_NIGHTLY}
|
||||
* is active and {@link #RANDOM_MULTIPLIER}, but also with some random fudge.
|
||||
*/
|
||||
public static int atLeast(Random random, int i) {
|
||||
int min = (TEST_NIGHTLY ? 5*i : i) * RANDOM_MULTIPLIER;
|
||||
int max = min+(min/2);
|
||||
return _TestUtil.nextInt(random, min, max);
|
||||
}
|
||||
|
||||
public static int atLeast(int i) {
|
||||
return atLeast(random, i);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if something should happen rarely,
|
||||
* <p>
|
||||
* The actual number returned will be influenced by whether {@link #TEST_NIGHTLY}
|
||||
* is active and {@link #RANDOM_MULTIPLIER}.
|
||||
*/
|
||||
public static boolean rarely(Random random) {
|
||||
int p = TEST_NIGHTLY ? 25 : 5;
|
||||
p += (p * Math.log(RANDOM_MULTIPLIER));
|
||||
int min = 100 - Math.min(p, 90); // never more than 90
|
||||
return random.nextInt(100) >= min;
|
||||
}
|
||||
|
||||
public static boolean rarely() {
|
||||
return rarely(random);
|
||||
}
|
||||
|
||||
public static boolean usually(Random random) {
|
||||
return !rarely(random);
|
||||
}
|
||||
|
||||
public static boolean usually() {
|
||||
return usually(random);
|
||||
}
|
||||
|
||||
// @deprecated (4.0) These deprecated methods should be removed soon, when all tests using no Epsilon are fixed:
|
||||
@Deprecated
|
||||
|
@ -836,14 +877,22 @@ public abstract class LuceneTestCase extends Assert {
|
|||
c.setMergeScheduler(new SerialMergeScheduler());
|
||||
}
|
||||
if (r.nextBoolean()) {
|
||||
if (r.nextInt(20) == 17) {
|
||||
c.setMaxBufferedDocs(2);
|
||||
if (rarely(r)) {
|
||||
// crazy value
|
||||
c.setMaxBufferedDocs(_TestUtil.nextInt(r, 2, 7));
|
||||
} else {
|
||||
c.setMaxBufferedDocs(_TestUtil.nextInt(r, 2, 1000));
|
||||
// reasonable value
|
||||
c.setMaxBufferedDocs(_TestUtil.nextInt(r, 8, 1000));
|
||||
}
|
||||
}
|
||||
if (r.nextBoolean()) {
|
||||
c.setTermIndexInterval(_TestUtil.nextInt(r, 1, 1000));
|
||||
if (rarely(r)) {
|
||||
// crazy value
|
||||
c.setTermIndexInterval(random.nextBoolean() ? _TestUtil.nextInt(r, 1, 31) : _TestUtil.nextInt(r, 129, 1000));
|
||||
} else {
|
||||
// reasonable value
|
||||
c.setTermIndexInterval(_TestUtil.nextInt(r, 32, 128));
|
||||
}
|
||||
}
|
||||
if (r.nextBoolean()) {
|
||||
c.setIndexerThreadPool(new ThreadAffinityDocumentsWriterThreadPool(_TestUtil.nextInt(r, 1, 20)));
|
||||
|
@ -874,22 +923,22 @@ public abstract class LuceneTestCase extends Assert {
|
|||
LogMergePolicy logmp = r.nextBoolean() ? new LogDocMergePolicy() : new LogByteSizeMergePolicy();
|
||||
logmp.setUseCompoundFile(r.nextBoolean());
|
||||
logmp.setCalibrateSizeByDeletes(r.nextBoolean());
|
||||
if (r.nextInt(3) == 2) {
|
||||
logmp.setMergeFactor(2);
|
||||
if (rarely(r)) {
|
||||
logmp.setMergeFactor(_TestUtil.nextInt(r, 2, 4));
|
||||
} else {
|
||||
logmp.setMergeFactor(_TestUtil.nextInt(r, 2, 20));
|
||||
logmp.setMergeFactor(_TestUtil.nextInt(r, 5, 50));
|
||||
}
|
||||
return logmp;
|
||||
}
|
||||
|
||||
public static TieredMergePolicy newTieredMergePolicy(Random r) {
|
||||
TieredMergePolicy tmp = new TieredMergePolicy();
|
||||
if (r.nextInt(3) == 2) {
|
||||
tmp.setMaxMergeAtOnce(2);
|
||||
tmp.setMaxMergeAtOnceExplicit(2);
|
||||
if (rarely(r)) {
|
||||
tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 2, 4));
|
||||
tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 2, 4));
|
||||
} else {
|
||||
tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 2, 20));
|
||||
tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 2, 30));
|
||||
tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 5, 50));
|
||||
tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 5, 50));
|
||||
}
|
||||
tmp.setMaxMergedSegmentMB(0.2 + r.nextDouble() * 2.0);
|
||||
tmp.setFloorSegmentMB(0.2 + r.nextDouble() * 2.0);
|
||||
|
@ -1052,8 +1101,13 @@ public abstract class LuceneTestCase extends Assert {
|
|||
/** Returns a new field instance, using the specified random.
|
||||
* See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
|
||||
public static Field newField(Random random, String name, String value, Store store, Index index, TermVector tv) {
|
||||
if (usually(random)) {
|
||||
// most of the time, don't modify the params
|
||||
return new Field(name, value, store, index, tv);
|
||||
}
|
||||
|
||||
if (!index.isIndexed())
|
||||
return new Field(name, value, store, index);
|
||||
return new Field(name, value, store, index, tv);
|
||||
|
||||
if (!store.isStored() && random.nextBoolean())
|
||||
store = Store.YES; // randomly store it
|
||||
|
@ -1115,7 +1169,7 @@ public abstract class LuceneTestCase extends Assert {
|
|||
};
|
||||
|
||||
public static String randomDirectory(Random random) {
|
||||
if (random.nextInt(10) == 0) {
|
||||
if (rarely(random)) {
|
||||
return CORE_DIRECTORIES[random.nextInt(CORE_DIRECTORIES.length)];
|
||||
} else {
|
||||
return "RAMDirectory";
|
||||
|
@ -1179,7 +1233,7 @@ public abstract class LuceneTestCase extends Assert {
|
|||
public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) throws IOException {
|
||||
|
||||
if (random.nextBoolean()) {
|
||||
if (maybeWrap && random.nextBoolean()) {
|
||||
if (maybeWrap && rarely()) {
|
||||
return new IndexSearcher(new SlowMultiReaderWrapper(r));
|
||||
} else {
|
||||
return new IndexSearcher(r);
|
||||
|
@ -1241,6 +1295,7 @@ public abstract class LuceneTestCase extends Assert {
|
|||
if (!TEST_TIMEZONE.equals("random")) sb.append(" -Dtests.timezone=").append(TEST_TIMEZONE);
|
||||
if (!TEST_DIRECTORY.equals("random")) sb.append(" -Dtests.directory=").append(TEST_DIRECTORY);
|
||||
if (RANDOM_MULTIPLIER > 1) sb.append(" -Dtests.multiplier=").append(RANDOM_MULTIPLIER);
|
||||
if (TEST_NIGHTLY) sb.append(" -Dtests.nightly=true");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
|
@ -1407,6 +1462,10 @@ public abstract class LuceneTestCase extends Assert {
|
|||
Codec codec = previousMappings.get(name);
|
||||
if (codec == null) {
|
||||
codec = knownCodecs.get(Math.abs(perFieldSeed ^ name.hashCode()) % knownCodecs.size());
|
||||
if (codec instanceof SimpleTextCodec && perFieldSeed % 5 != 0) {
|
||||
// make simpletext rarer, choose again
|
||||
codec = knownCodecs.get(Math.abs(perFieldSeed ^ name.toUpperCase(Locale.ENGLISH).hashCode()) % knownCodecs.size());
|
||||
}
|
||||
previousMappings.put(name, codec);
|
||||
}
|
||||
return codec.name;
|
||||
|
|
|
@ -51,12 +51,17 @@ import org.junit.Assert;
|
|||
|
||||
public class _TestUtil {
|
||||
|
||||
/** Returns temp dir, containing String arg in its name;
|
||||
/** Returns temp dir, based on String arg in its name;
|
||||
* does not create the directory. */
|
||||
public static File getTempDir(String desc) {
|
||||
File f = new File(LuceneTestCase.TEMP_DIR, desc + "." + LuceneTestCase.random.nextLong());
|
||||
LuceneTestCase.registerTempFile(f);
|
||||
return f;
|
||||
try {
|
||||
File f = createTempFile(desc, "tmp", LuceneTestCase.TEMP_DIR);
|
||||
f.delete();
|
||||
LuceneTestCase.registerTempFile(f);
|
||||
return f;
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -516,7 +516,7 @@ public class TestExternalCodecs extends LuceneTestCase {
|
|||
provider.register(new RAMOnlyCodec());
|
||||
provider.setDefaultFieldCodec("RamOnly");
|
||||
|
||||
final int NUM_DOCS = 173;
|
||||
final int NUM_DOCS = atLeast(173);
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
dir.setCheckIndexOnClose(false); // we use a custom codec provider
|
||||
IndexWriter w = new IndexWriter(
|
||||
|
|
|
@ -59,7 +59,8 @@ public class TestSearchForDuplicates extends LuceneTestCase {
|
|||
public void testRun() throws Exception {
|
||||
StringWriter sw = new StringWriter();
|
||||
PrintWriter pw = new PrintWriter(sw, true);
|
||||
doTest(random, pw, false);
|
||||
final int MAX_DOCS = atLeast(225);
|
||||
doTest(random, pw, false, MAX_DOCS);
|
||||
pw.close();
|
||||
sw.close();
|
||||
String multiFileOutput = sw.getBuffer().toString();
|
||||
|
@ -67,7 +68,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
|
|||
|
||||
sw = new StringWriter();
|
||||
pw = new PrintWriter(sw, true);
|
||||
doTest(random, pw, true);
|
||||
doTest(random, pw, true, MAX_DOCS);
|
||||
pw.close();
|
||||
sw.close();
|
||||
String singleFileOutput = sw.getBuffer().toString();
|
||||
|
@ -76,7 +77,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
|
|||
}
|
||||
|
||||
|
||||
private void doTest(Random random, PrintWriter out, boolean useCompoundFiles) throws Exception {
|
||||
private void doTest(Random random, PrintWriter out, boolean useCompoundFiles, int MAX_DOCS) throws Exception {
|
||||
Directory directory = newDirectory();
|
||||
Analyzer analyzer = new MockAnalyzer(random);
|
||||
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
|
||||
|
@ -90,8 +91,6 @@ public class TestSearchForDuplicates extends LuceneTestCase {
|
|||
writer.setInfoStream(System.out);
|
||||
}
|
||||
|
||||
final int MAX_DOCS = 225;
|
||||
|
||||
for (int j = 0; j < MAX_DOCS; j++) {
|
||||
Document d = new Document();
|
||||
d.add(newField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES, Field.Index.ANALYZED));
|
||||
|
|
|
@ -114,6 +114,6 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase {
|
|||
|
||||
/** blast some random strings through the analyzer */
|
||||
public void testRandomStrings() throws Exception {
|
||||
checkRandomData(random, new MockAnalyzer(random), 10000*RANDOM_MULTIPLIER);
|
||||
checkRandomData(random, new MockAnalyzer(random), atLeast(1000));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -155,6 +155,7 @@ public class Test2BTerms extends LuceneTestCase {
|
|||
|
||||
MockDirectoryWrapper dir = newFSDirectory(_TestUtil.getTempDir("2BTerms"));
|
||||
dir.setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
dir.setCheckIndexOnClose(false); // don't double-checkindex
|
||||
//Directory dir = newFSDirectory(new File("/p/lucene/indices/2bindex"));
|
||||
|
||||
if (true) {
|
||||
|
|
|
@ -18,6 +18,7 @@ package org.apache.lucene.index;
|
|||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
|
@ -760,7 +761,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
// from multiple threads
|
||||
public void testAddIndexesWithThreads() throws Throwable {
|
||||
|
||||
final int NUM_ITER = 15;
|
||||
final int NUM_ITER = TEST_NIGHTLY ? 15 : 5;
|
||||
final int NUM_COPY = 3;
|
||||
CommitAndAddIndexes c = new CommitAndAddIndexes(NUM_COPY);
|
||||
c.writer2.setInfoStream(VERBOSE ? System.out : null);
|
||||
|
@ -778,8 +779,6 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
|
||||
assertTrue("found unexpected failures: " + c.failures, c.failures.isEmpty());
|
||||
|
||||
_TestUtil.checkIndex(c.dir2);
|
||||
|
||||
IndexReader reader = IndexReader.open(c.dir2, true);
|
||||
assertEquals(expectedNumDocs, reader.numDocs());
|
||||
reader.close();
|
||||
|
@ -816,8 +815,6 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
|
||||
c.joinThreads();
|
||||
|
||||
_TestUtil.checkIndex(c.dir2);
|
||||
|
||||
c.closeDir();
|
||||
|
||||
assertTrue(c.failures.size() == 0);
|
||||
|
@ -870,6 +867,8 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
|
||||
if (t instanceof AlreadyClosedException || t instanceof MergePolicy.MergeAbortedException || t instanceof NullPointerException) {
|
||||
report = !didClose;
|
||||
} else if (t instanceof FileNotFoundException) {
|
||||
report = !didClose;
|
||||
} else if (t instanceof IOException) {
|
||||
Throwable t2 = t.getCause();
|
||||
if (t2 instanceof MergePolicy.MergeAbortedException) {
|
||||
|
@ -908,8 +907,6 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
if (VERBOSE) {
|
||||
System.out.println("TEST: done join threads");
|
||||
}
|
||||
_TestUtil.checkIndex(c.dir2);
|
||||
|
||||
c.closeDir();
|
||||
|
||||
assertTrue(c.failures.size() == 0);
|
||||
|
@ -918,11 +915,11 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
// LUCENE-1335: test simultaneous addIndexes & close
|
||||
public void testAddIndexesWithRollback() throws Throwable {
|
||||
|
||||
final int NUM_COPY = 50;
|
||||
final int NUM_COPY = TEST_NIGHTLY ? 50 : 5;
|
||||
CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
|
||||
c.launchThreads(-1);
|
||||
|
||||
Thread.sleep(_TestUtil.nextInt(random, 100, 500));
|
||||
Thread.sleep(_TestUtil.nextInt(random, 10, 500));
|
||||
|
||||
// Close w/o first stopping/joining the threads
|
||||
if (VERBOSE) {
|
||||
|
@ -933,8 +930,6 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
|
||||
c.joinThreads();
|
||||
|
||||
_TestUtil.checkIndex(c.dir2);
|
||||
|
||||
c.closeDir();
|
||||
|
||||
assertTrue(c.failures.size() == 0);
|
||||
|
@ -1039,7 +1034,6 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
writer.addIndexes(aux, aux2);
|
||||
assertEquals(190, writer.maxDoc());
|
||||
writer.close();
|
||||
_TestUtil.checkIndex(dir, provider);
|
||||
|
||||
dir.close();
|
||||
aux.close();
|
||||
|
|
|
@ -47,7 +47,7 @@ public class TestAtomicUpdate extends LuceneTestCase {
|
|||
private static abstract class TimedThread extends Thread {
|
||||
volatile boolean failed;
|
||||
int count;
|
||||
private static float RUN_TIME_SEC = 0.5f * RANDOM_MULTIPLIER;
|
||||
private static float RUN_TIME_MSEC = atLeast(500);
|
||||
private TimedThread[] allThreads;
|
||||
|
||||
abstract public void doWork() throws Throwable;
|
||||
|
@ -58,7 +58,7 @@ public class TestAtomicUpdate extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
public void run() {
|
||||
final long stopTime = System.currentTimeMillis() + (long) (1000*RUN_TIME_SEC);
|
||||
final long stopTime = System.currentTimeMillis() + (long) RUN_TIME_MSEC;
|
||||
|
||||
count = 0;
|
||||
|
||||
|
|
|
@ -188,8 +188,6 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
w.setInfoStream(VERBOSE ? System.out : null);
|
||||
w.optimize();
|
||||
w.close();
|
||||
|
||||
_TestUtil.checkIndex(dir);
|
||||
|
||||
dir.close();
|
||||
_TestUtil.rmDir(oldIndxeDir);
|
||||
|
@ -207,8 +205,6 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
w.addIndexes(dir);
|
||||
w.close();
|
||||
|
||||
_TestUtil.checkIndex(targetDir);
|
||||
|
||||
dir.close();
|
||||
targetDir.close();
|
||||
|
@ -229,9 +225,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
w.addIndexes(reader);
|
||||
w.close();
|
||||
reader.close();
|
||||
|
||||
_TestUtil.checkIndex(targetDir);
|
||||
|
||||
|
||||
dir.close();
|
||||
targetDir.close();
|
||||
_TestUtil.rmDir(oldIndxeDir);
|
||||
|
@ -743,8 +737,6 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
.upgrade();
|
||||
|
||||
checkAllSegmentsUpgraded(dir);
|
||||
|
||||
_TestUtil.checkIndex(dir);
|
||||
|
||||
dir.close();
|
||||
_TestUtil.rmDir(oldIndxeDir);
|
||||
|
|
|
@ -23,7 +23,7 @@ public class TestByteSlices extends LuceneTestCase {
|
|||
public void testBasic() throws Throwable {
|
||||
ByteBlockPool pool = new ByteBlockPool(new RecyclingByteBlockAllocator(ByteBlockPool.BYTE_BLOCK_SIZE, Integer.MAX_VALUE));
|
||||
|
||||
final int NUM_STREAM = 100 * RANDOM_MULTIPLIER;
|
||||
final int NUM_STREAM = atLeast(100);
|
||||
|
||||
ByteSliceWriter writer = new ByteSliceWriter(pool);
|
||||
|
||||
|
@ -40,7 +40,7 @@ public class TestByteSlices extends LuceneTestCase {
|
|||
counters[stream] = 0;
|
||||
}
|
||||
|
||||
int num = 10000 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(10000);
|
||||
for (int iter = 0; iter < num; iter++) {
|
||||
int stream = random.nextInt(NUM_STREAM);
|
||||
if (VERBOSE)
|
||||
|
|
|
@ -64,7 +64,7 @@ import org.apache.lucene.util._TestUtil;
|
|||
public class TestCodecs extends LuceneTestCase {
|
||||
private static String[] fieldNames = new String[] {"one", "two", "three", "four"};
|
||||
|
||||
private final static int NUM_TEST_ITER = 20 * RANDOM_MULTIPLIER;
|
||||
private final static int NUM_TEST_ITER = atLeast(20);
|
||||
private final static int NUM_TEST_THREADS = 3;
|
||||
private final static int NUM_FIELDS = 4;
|
||||
private final static int NUM_TERMS_RAND = 50; // must be > 16 to test skipping
|
||||
|
|
|
@ -155,7 +155,8 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testFieldNumberGaps() throws IOException {
|
||||
for (int i = 0; i < 39; i++) {
|
||||
int numIters = atLeast(13);
|
||||
for (int i = 0; i < numIters; i++) {
|
||||
Directory dir = newDirectory();
|
||||
{
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
|
@ -270,8 +271,8 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
|
|||
|
||||
@Test
|
||||
public void testManyFields() throws Exception {
|
||||
final int NUM_DOCS = 2000;
|
||||
final int MAX_FIELDS = 50;
|
||||
final int NUM_DOCS = atLeast(200);
|
||||
final int MAX_FIELDS = atLeast(50);
|
||||
|
||||
int[][] docs = new int[NUM_DOCS][4];
|
||||
for (int i = 0; i < docs.length; i++) {
|
||||
|
|
|
@ -230,7 +230,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
public void testRandom() throws Exception {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
|
||||
final int NUM_TERMS = 100 * RANDOM_MULTIPLIER;
|
||||
final int NUM_TERMS = atLeast(20);
|
||||
final Set<BytesRef> terms = new HashSet<BytesRef>();
|
||||
while(terms.size() < NUM_TERMS) {
|
||||
final String s = _TestUtil.randomRealisticUnicodeString(random);
|
||||
|
@ -242,7 +242,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
final BytesRef[] termsArray = terms.toArray(new BytesRef[terms.size()]);
|
||||
Arrays.sort(termsArray);
|
||||
|
||||
final int NUM_DOCS = 1000 * RANDOM_MULTIPLIER;
|
||||
final int NUM_DOCS = atLeast(100);
|
||||
|
||||
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
|
||||
|
||||
|
@ -280,7 +280,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
}
|
||||
for(int ord : ordsForDocSet) {
|
||||
ordsForDoc[upto++] = ord;
|
||||
Field field = newField("field", termsArray[ord].utf8ToString(), Field.Index.NOT_ANALYZED);
|
||||
Field field = newField("field", termsArray[ord].utf8ToString(), Field.Index.NOT_ANALYZED_NO_NORMS);
|
||||
if (VERBOSE) {
|
||||
System.out.println(" f=" + termsArray[ord].utf8ToString());
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
}
|
||||
final String[] prefixesArray = prefixes.toArray(new String[prefixes.size()]);
|
||||
|
||||
final int NUM_TERMS = 100 * RANDOM_MULTIPLIER;
|
||||
final int NUM_TERMS = atLeast(20);
|
||||
final Set<BytesRef> terms = new HashSet<BytesRef>();
|
||||
while(terms.size() < NUM_TERMS) {
|
||||
final String s = prefixesArray[random.nextInt(prefixesArray.length)] + _TestUtil.randomRealisticUnicodeString(random);
|
||||
|
@ -345,7 +345,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
final BytesRef[] termsArray = terms.toArray(new BytesRef[terms.size()]);
|
||||
Arrays.sort(termsArray);
|
||||
|
||||
final int NUM_DOCS = 1000 * RANDOM_MULTIPLIER;
|
||||
final int NUM_DOCS = atLeast(100);
|
||||
|
||||
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
|
||||
|
||||
|
@ -383,7 +383,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
}
|
||||
for(int ord : ordsForDocSet) {
|
||||
ordsForDoc[upto++] = ord;
|
||||
Field field = newField("field", termsArray[ord].utf8ToString(), Field.Index.NOT_ANALYZED);
|
||||
Field field = newField("field", termsArray[ord].utf8ToString(), Field.Index.NOT_ANALYZED_NO_NORMS);
|
||||
if (VERBOSE) {
|
||||
System.out.println(" f=" + termsArray[ord].utf8ToString());
|
||||
}
|
||||
|
@ -474,9 +474,9 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
final TermsEnum te = dto.getOrdTermsEnum(r);
|
||||
if (te == null) {
|
||||
if (prefixRef == null) {
|
||||
assertNull(r.fields().terms("field"));
|
||||
assertNull(MultiFields.getTerms(r, "field"));
|
||||
} else {
|
||||
Terms terms = r.fields().terms("field");
|
||||
Terms terms = MultiFields.getTerms(r, "field");
|
||||
if (terms != null) {
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
TermsEnum.SeekStatus result = termsEnum.seek(prefixRef, false);
|
||||
|
|
|
@ -51,13 +51,14 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
Document doc = new Document();
|
||||
doc.add(newField(fieldName, "1 2 3 4 5 6 7 8 9 10 "
|
||||
+ "1 2 3 4 5 6 7 8 9 10 " + "1 2 3 4 5 6 7 8 9 10 "
|
||||
+ "1 2 3 4 5 6 7 8 9 10", Field.Store.YES, Field.Index.ANALYZED));
|
||||
+ "1 2 3 4 5 6 7 8 9 10", Field.Store.NO, Field.Index.ANALYZED_NO_NORMS));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
IndexReader reader = writer.getReader();
|
||||
writer.close();
|
||||
|
||||
for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) {
|
||||
int num = atLeast(13);
|
||||
for (int i = 0; i < num; i++) {
|
||||
BytesRef bytes = new BytesRef("1");
|
||||
ReaderContext topReaderContext = reader.getTopReaderContext();
|
||||
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
|
||||
|
@ -112,7 +113,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
||||
int numDocs = 131;
|
||||
int numDocs = atLeast(47);
|
||||
int max = 1051;
|
||||
int term = random.nextInt(max);
|
||||
Integer[][] positionsInDoc = new Integer[numDocs][];
|
||||
|
@ -120,7 +121,8 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
Document doc = new Document();
|
||||
ArrayList<Integer> positions = new ArrayList<Integer>();
|
||||
StringBuilder builder = new StringBuilder();
|
||||
for (int j = 0; j < 3049; j++) {
|
||||
int num = atLeast(131);
|
||||
for (int j = 0; j < num; j++) {
|
||||
int nextInt = random.nextInt(max);
|
||||
builder.append(nextInt).append(" ");
|
||||
if (nextInt == term) {
|
||||
|
@ -129,10 +131,10 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
}
|
||||
if (positions.size() == 0) {
|
||||
builder.append(term);
|
||||
positions.add(3049);
|
||||
positions.add(num);
|
||||
}
|
||||
doc.add(newField(fieldName, builder.toString(), Field.Store.YES,
|
||||
Field.Index.ANALYZED));
|
||||
doc.add(newField(fieldName, builder.toString(), Field.Store.NO,
|
||||
Field.Index.ANALYZED_NO_NORMS));
|
||||
positionsInDoc[i] = positions.toArray(new Integer[0]);
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
@ -140,7 +142,8 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
IndexReader reader = writer.getReader();
|
||||
writer.close();
|
||||
|
||||
for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) {
|
||||
int num = atLeast(13);
|
||||
for (int i = 0; i < num; i++) {
|
||||
BytesRef bytes = new BytesRef("" + term);
|
||||
ReaderContext topReaderContext = reader.getTopReaderContext();
|
||||
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
|
||||
|
@ -192,7 +195,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
||||
int numDocs = 499;
|
||||
int numDocs = atLeast(49);
|
||||
int max = 15678;
|
||||
int term = random.nextInt(max);
|
||||
int[] freqInDoc = new int[numDocs];
|
||||
|
@ -201,20 +204,21 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
StringBuilder builder = new StringBuilder();
|
||||
for (int j = 0; j < 199; j++) {
|
||||
int nextInt = random.nextInt(max);
|
||||
builder.append(nextInt).append(" ");
|
||||
builder.append(nextInt).append(' ');
|
||||
if (nextInt == term) {
|
||||
freqInDoc[i]++;
|
||||
}
|
||||
}
|
||||
doc.add(newField(fieldName, builder.toString(), Field.Store.YES,
|
||||
Field.Index.ANALYZED));
|
||||
doc.add(newField(fieldName, builder.toString(), Field.Store.NO,
|
||||
Field.Index.ANALYZED_NO_NORMS));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
IndexReader reader = writer.getReader();
|
||||
writer.close();
|
||||
|
||||
for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) {
|
||||
int num = atLeast(13);
|
||||
for (int i = 0; i < num; i++) {
|
||||
BytesRef bytes = new BytesRef("" + term);
|
||||
ReaderContext topReaderContext = reader.getTopReaderContext();
|
||||
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
|
||||
|
@ -281,8 +285,8 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
builder.append("odd ");
|
||||
}
|
||||
}
|
||||
doc.add(newField(fieldName, builder.toString(), Field.Store.YES,
|
||||
Field.Index.ANALYZED));
|
||||
doc.add(newField(fieldName, builder.toString(), Field.Store.NO,
|
||||
Field.Index.ANALYZED_NO_NORMS));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
|
@ -290,7 +294,8 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
IndexReader reader = writer.getReader();
|
||||
writer.close();
|
||||
|
||||
for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) {
|
||||
int num = atLeast(13);
|
||||
for (int i = 0; i < num; i++) {
|
||||
BytesRef bytes = new BytesRef("even");
|
||||
|
||||
ReaderContext topReaderContext = reader.getTopReaderContext();
|
||||
|
|
|
@ -39,16 +39,17 @@ import org.apache.lucene.store.IndexInput;
|
|||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
public class TestFieldsReader extends LuceneTestCase {
|
||||
private Directory dir;
|
||||
private Document testDoc = new Document();
|
||||
private FieldInfos fieldInfos = null;
|
||||
private static Directory dir;
|
||||
private static Document testDoc = new Document();
|
||||
private static FieldInfos fieldInfos = null;
|
||||
private final static String TEST_SEGMENT_NAME = "_0";
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
fieldInfos = new FieldInfos();
|
||||
DocHelper.setupDoc(testDoc);
|
||||
_TestUtil.add(testDoc, fieldInfos);
|
||||
|
@ -61,10 +62,12 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
FaultyIndexInput.doFail = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
dir.close();
|
||||
super.tearDown();
|
||||
dir = null;
|
||||
fieldInfos = null;
|
||||
testDoc = null;
|
||||
}
|
||||
public void test() throws IOException {
|
||||
assertTrue(dir != null);
|
||||
|
@ -302,7 +305,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
FieldsReader reader;
|
||||
long lazyTime = 0;
|
||||
long regularTime = 0;
|
||||
int length = 50;
|
||||
int length = 10;
|
||||
Set<String> lazyFieldNames = new HashSet<String>();
|
||||
lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY);
|
||||
SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(Collections. <String> emptySet(), lazyFieldNames);
|
||||
|
@ -513,7 +516,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
public void testNumericField() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random, dir);
|
||||
final int numDocs = _TestUtil.nextInt(random, 500, 1000) * RANDOM_MULTIPLIER;
|
||||
final int numDocs = atLeast(500);
|
||||
final Number[] answers = new Number[numDocs];
|
||||
final NumericField.DataType[] typeAnswers = new NumericField.DataType[numDocs];
|
||||
for(int id=0;id<numDocs;id++) {
|
||||
|
|
|
@ -30,18 +30,23 @@ import org.apache.lucene.store.LockObtainFailedException;
|
|||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LineFileDocs;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.junit.Before;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
public class TestFlushByRamOrCountsPolicy extends LuceneTestCase {
|
||||
|
||||
private LineFileDocs lineDocFile;
|
||||
private static LineFileDocs lineDocFile;
|
||||
|
||||
@Before
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
lineDocFile = new LineFileDocs(random);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
lineDocFile.close();
|
||||
lineDocFile = null;
|
||||
}
|
||||
|
||||
public void testFlushByRam() throws CorruptIndexException,
|
||||
LockObtainFailedException, IOException, InterruptedException {
|
||||
|
@ -231,8 +236,8 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase {
|
|||
for (int i = 0; i < numThreads.length; i++) {
|
||||
AtomicInteger numDocs = new AtomicInteger(numDocumentsToIndex);
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
// mock a very slow harddisk here so that flushing is very slow
|
||||
dir.setThrottling(MockDirectoryWrapper.Throttling.ALWAYS);
|
||||
// mock a very slow harddisk sometimes here so that flushing is very slow
|
||||
dir.setThrottling(MockDirectoryWrapper.Throttling.SOMETIMES);
|
||||
IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer(random));
|
||||
iwc.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
|
|
|
@ -42,7 +42,8 @@ import org.apache.lucene.util._TestUtil;
|
|||
public class TestGlobalFieldNumbers extends LuceneTestCase {
|
||||
|
||||
public void testGlobalFieldNumberFiles() throws IOException {
|
||||
for (int i = 0; i < 39; i++) {
|
||||
int num = atLeast(3);
|
||||
for (int i = 0; i < num; i++) {
|
||||
Directory dir = newDirectory();
|
||||
{
|
||||
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
|
@ -113,7 +114,8 @@ public class TestGlobalFieldNumbers extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testIndexReaderCommit() throws IOException {
|
||||
for (int i = 0; i < 39; i++) {
|
||||
int num = atLeast(3);
|
||||
for (int i = 0; i < num; i++) {
|
||||
Directory dir = newDirectory();
|
||||
{
|
||||
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
|
@ -156,7 +158,8 @@ public class TestGlobalFieldNumbers extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testGlobalFieldNumberFilesAcrossCommits() throws IOException {
|
||||
for (int i = 0; i < 39; i++) {
|
||||
int num = atLeast(3);
|
||||
for (int i = 0; i < num; i++) {
|
||||
Directory dir = newDirectory();
|
||||
{
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
|
@ -207,7 +210,8 @@ public class TestGlobalFieldNumbers extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testGlobalFieldNumberOnOldCommit() throws IOException {
|
||||
for (int i = 0; i < 39; i++) {
|
||||
int num = atLeast(3);
|
||||
for (int i = 0; i < num; i++) {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(
|
||||
|
@ -282,9 +286,9 @@ public class TestGlobalFieldNumbers extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testOptimize() throws IOException {
|
||||
for (int i = 0; i < 2; i++) {
|
||||
for (int i = 0; i < 2*RANDOM_MULTIPLIER; i++) {
|
||||
Set<String> fieldNames = new HashSet<String>();
|
||||
final int numFields = 2 + random.nextInt(200);
|
||||
final int numFields = 2 + (TEST_NIGHTLY ? random.nextInt(200) : random.nextInt(20));
|
||||
for (int j = 0; j < numFields; j++) {
|
||||
fieldNames.add("field_" + j);
|
||||
}
|
||||
|
@ -306,9 +310,9 @@ public class TestGlobalFieldNumbers extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testAddIndexesStableFieldNumbers() throws IOException {
|
||||
for (int i = 0; i < 2; i++) {
|
||||
for (int i = 0; i < 2*RANDOM_MULTIPLIER; i++) {
|
||||
Set<String> fieldNames = new HashSet<String>();
|
||||
final int numFields = 2 + random.nextInt(50);
|
||||
final int numFields = 2 + (TEST_NIGHTLY ? random.nextInt(50) : random.nextInt(10));
|
||||
for (int j = 0; j < numFields; j++) {
|
||||
fieldNames.add("field_" + j);
|
||||
}
|
||||
|
|
|
@ -53,6 +53,8 @@ public class TestIndexFileDeleter extends LuceneTestCase {
|
|||
setMergePolicy(mergePolicy)
|
||||
);
|
||||
|
||||
writer.setInfoStream(VERBOSE ? System.out : null);
|
||||
|
||||
int i;
|
||||
for(i=0;i<35;i++) {
|
||||
addDoc(writer, i);
|
||||
|
@ -146,7 +148,9 @@ public class TestIndexFileDeleter extends LuceneTestCase {
|
|||
copyFile(dir, "segments_2", "segments_1");
|
||||
|
||||
// Create a bogus cfs file shadowing a non-cfs segment:
|
||||
copyFile(dir, "_1.cfs", "_2.cfs");
|
||||
assertTrue(dir.fileExists("_3.fdt"));
|
||||
assertTrue(!dir.fileExists("_3.cfs"));
|
||||
copyFile(dir, "_1.cfs", "_3.cfs");
|
||||
|
||||
String[] filesPre = dir.listAll();
|
||||
|
||||
|
|
|
@ -304,7 +304,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
d.close();
|
||||
}
|
||||
|
||||
private void assertTermDocsCount(String msg,
|
||||
static void assertTermDocsCount(String msg,
|
||||
IndexReader reader,
|
||||
Term term,
|
||||
int expected)
|
||||
|
@ -322,50 +322,6 @@ public class TestIndexReader extends LuceneTestCase
|
|||
assertEquals(msg + ", count mismatch", expected, count);
|
||||
}
|
||||
|
||||
public void testBasicDelete() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexWriter writer = null;
|
||||
IndexReader reader = null;
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
|
||||
// add 100 documents with term : aaa
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
writer.setInfoStream(VERBOSE ? System.out : null);
|
||||
for (int i = 0; i < 100; i++) {
|
||||
addDoc(writer, searchTerm.text());
|
||||
}
|
||||
writer.close();
|
||||
|
||||
// OPEN READER AT THIS POINT - this should fix the view of the
|
||||
// index at the point of having 100 "aaa" documents and 0 "bbb"
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertTermDocsCount("first reader", reader, searchTerm, 100);
|
||||
reader.close();
|
||||
|
||||
// DELETE DOCUMENTS CONTAINING TERM: aaa
|
||||
int deleted = 0;
|
||||
reader = IndexReader.open(dir, false);
|
||||
deleted = reader.deleteDocuments(searchTerm);
|
||||
assertEquals("deleted count", 100, deleted);
|
||||
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
|
||||
|
||||
// open a 2nd reader to make sure first reader can
|
||||
// commit its changes (.del) while second reader
|
||||
// is open:
|
||||
IndexReader reader2 = IndexReader.open(dir, false);
|
||||
reader.close();
|
||||
|
||||
// CREATE A NEW READER and re-test
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals("deleted docFreq", 0, reader.docFreq(searchTerm));
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
|
||||
reader.close();
|
||||
reader2.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testBinaryFields() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
|
@ -600,11 +556,6 @@ public class TestIndexReader extends LuceneTestCase
|
|||
dir.close();
|
||||
}
|
||||
|
||||
|
||||
public void testDeleteReaderWriterConflictUnoptimized() throws IOException{
|
||||
deleteReaderWriterConflict(false);
|
||||
}
|
||||
|
||||
/* ??? public void testOpenEmptyDirectory() throws IOException{
|
||||
String dirName = "test.empty";
|
||||
File fileDirName = new File(dirName);
|
||||
|
@ -620,90 +571,6 @@ public class TestIndexReader extends LuceneTestCase
|
|||
rmDir(fileDirName);
|
||||
}*/
|
||||
|
||||
public void testDeleteReaderWriterConflictOptimized() throws IOException{
|
||||
deleteReaderWriterConflict(true);
|
||||
}
|
||||
|
||||
private void deleteReaderWriterConflict(boolean optimize) throws IOException {
|
||||
//Directory dir = new RAMDirectory();
|
||||
Directory dir = newDirectory();
|
||||
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
Term searchTerm2 = new Term("content", "bbb");
|
||||
|
||||
// add 100 documents with term : aaa
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
|
||||
for (int i = 0; i < 100; i++) {
|
||||
addDoc(writer, searchTerm.text());
|
||||
}
|
||||
writer.close();
|
||||
|
||||
// OPEN READER AT THIS POINT - this should fix the view of the
|
||||
// index at the point of having 100 "aaa" documents and 0 "bbb"
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertEquals("first docFreq", 0, reader.docFreq(searchTerm2));
|
||||
assertTermDocsCount("first reader", reader, searchTerm, 100);
|
||||
assertTermDocsCount("first reader", reader, searchTerm2, 0);
|
||||
|
||||
// add 100 documents with term : bbb
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
|
||||
for (int i = 0; i < 100; i++) {
|
||||
addDoc(writer, searchTerm2.text());
|
||||
}
|
||||
|
||||
// REQUEST OPTIMIZATION
|
||||
// This causes a new segment to become current for all subsequent
|
||||
// searchers. Because of this, deletions made via a previously open
|
||||
// reader, which would be applied to that reader's segment, are lost
|
||||
// for subsequent searchers/readers
|
||||
if(optimize)
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
// The reader should not see the new data
|
||||
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertEquals("first docFreq", 0, reader.docFreq(searchTerm2));
|
||||
assertTermDocsCount("first reader", reader, searchTerm, 100);
|
||||
assertTermDocsCount("first reader", reader, searchTerm2, 0);
|
||||
|
||||
|
||||
// DELETE DOCUMENTS CONTAINING TERM: aaa
|
||||
// NOTE: the reader was created when only "aaa" documents were in
|
||||
int deleted = 0;
|
||||
try {
|
||||
deleted = reader.deleteDocuments(searchTerm);
|
||||
fail("Delete allowed on an index reader with stale segment information");
|
||||
} catch (StaleReaderException e) {
|
||||
/* success */
|
||||
}
|
||||
|
||||
// Re-open index reader and try again. This time it should see
|
||||
// the new data.
|
||||
reader.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertEquals("first docFreq", 100, reader.docFreq(searchTerm2));
|
||||
assertTermDocsCount("first reader", reader, searchTerm, 100);
|
||||
assertTermDocsCount("first reader", reader, searchTerm2, 100);
|
||||
|
||||
deleted = reader.deleteDocuments(searchTerm);
|
||||
assertEquals("deleted count", 100, deleted);
|
||||
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2));
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100);
|
||||
reader.close();
|
||||
|
||||
// CREATE A NEW READER and re-test
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2));
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100);
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testFilesOpenClose() throws IOException {
|
||||
// Create initial data set
|
||||
File dirFile = _TestUtil.getTempDir("TestIndexReader.testFilesOpenClose");
|
||||
|
@ -812,259 +679,6 @@ public class TestIndexReader extends LuceneTestCase
|
|||
dir.close();
|
||||
}
|
||||
|
||||
public void testUndeleteAll() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
addDocumentWithFields(writer);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocument(0);
|
||||
reader.deleteDocument(1);
|
||||
reader.undeleteAll();
|
||||
reader.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll()
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testUndeleteAllAfterClose() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
addDocumentWithFields(writer);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocument(0);
|
||||
reader.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader.undeleteAll();
|
||||
assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll()
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testUndeleteAllAfterCloseThenReopen() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
addDocumentWithFields(writer);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocument(0);
|
||||
reader.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader.undeleteAll();
|
||||
reader.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll()
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testDeleteReaderReaderConflictUnoptimized() throws IOException{
|
||||
deleteReaderReaderConflict(false);
|
||||
}
|
||||
|
||||
public void testDeleteReaderReaderConflictOptimized() throws IOException{
|
||||
deleteReaderReaderConflict(true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Make sure if reader tries to commit but hits disk
|
||||
* full that reader remains consistent and usable.
|
||||
*/
|
||||
public void testDiskFull() throws IOException {
|
||||
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
int START_COUNT = 157;
|
||||
int END_COUNT = 144;
|
||||
|
||||
// First build up a starting index:
|
||||
MockDirectoryWrapper startDir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: create initial index");
|
||||
writer.setInfoStream(System.out);
|
||||
}
|
||||
for(int i=0;i<157;i++) {
|
||||
Document d = new Document();
|
||||
d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
d.add(newField("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED));
|
||||
writer.addDocument(d);
|
||||
if (0==i%10)
|
||||
writer.commit();
|
||||
}
|
||||
writer.close();
|
||||
|
||||
{
|
||||
IndexReader r = IndexReader.open(startDir);
|
||||
IndexSearcher searcher = newSearcher(r);
|
||||
ScoreDoc[] hits = null;
|
||||
try {
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
fail("exception when init searching: " + e);
|
||||
}
|
||||
searcher.close();
|
||||
r.close();
|
||||
}
|
||||
|
||||
long diskUsage = startDir.getRecomputedActualSizeInBytes();
|
||||
long diskFree = diskUsage+100;
|
||||
|
||||
IOException err = null;
|
||||
|
||||
boolean done = false;
|
||||
boolean gotExc = false;
|
||||
|
||||
// Iterate w/ ever increasing free disk space:
|
||||
while(!done) {
|
||||
MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir));
|
||||
|
||||
// If IndexReader hits disk full, it can write to
|
||||
// the same files again.
|
||||
dir.setPreventDoubleWrite(false);
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
|
||||
// For each disk size, first try to commit against
|
||||
// dir that will hit random IOExceptions & disk
|
||||
// full; after, give it infinite disk space & turn
|
||||
// off random IOExceptions & retry w/ same reader:
|
||||
boolean success = false;
|
||||
|
||||
for(int x=0;x<2;x++) {
|
||||
|
||||
double rate = 0.05;
|
||||
double diskRatio = ((double) diskFree)/diskUsage;
|
||||
long thisDiskFree;
|
||||
String testName;
|
||||
|
||||
if (0 == x) {
|
||||
thisDiskFree = diskFree;
|
||||
if (diskRatio >= 2.0) {
|
||||
rate /= 2;
|
||||
}
|
||||
if (diskRatio >= 4.0) {
|
||||
rate /= 2;
|
||||
}
|
||||
if (diskRatio >= 6.0) {
|
||||
rate = 0.0;
|
||||
}
|
||||
if (VERBOSE) {
|
||||
System.out.println("\ncycle: " + diskFree + " bytes");
|
||||
}
|
||||
testName = "disk full during reader.close() @ " + thisDiskFree + " bytes";
|
||||
} else {
|
||||
thisDiskFree = 0;
|
||||
rate = 0.0;
|
||||
if (VERBOSE) {
|
||||
System.out.println("\ncycle: same writer: unlimited disk space");
|
||||
}
|
||||
testName = "reader re-use after disk full";
|
||||
}
|
||||
|
||||
dir.setMaxSizeInBytes(thisDiskFree);
|
||||
dir.setRandomIOExceptionRate(rate);
|
||||
Similarity sim = new DefaultSimilarity();
|
||||
try {
|
||||
if (0 == x) {
|
||||
int docId = 12;
|
||||
for(int i=0;i<13;i++) {
|
||||
reader.deleteDocument(docId);
|
||||
reader.setNorm(docId, "content", sim.encodeNormValue(2.0f));
|
||||
docId += 12;
|
||||
}
|
||||
}
|
||||
reader.close();
|
||||
success = true;
|
||||
if (0 == x) {
|
||||
done = true;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
if (VERBOSE) {
|
||||
System.out.println(" hit IOException: " + e);
|
||||
e.printStackTrace(System.out);
|
||||
}
|
||||
err = e;
|
||||
gotExc = true;
|
||||
if (1 == x) {
|
||||
e.printStackTrace();
|
||||
fail(testName + " hit IOException after disk space was freed up");
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, verify index is not corrupt, and, if
|
||||
// we succeeded, we see all docs changed, and if
|
||||
// we failed, we see either all docs or no docs
|
||||
// changed (transactional semantics):
|
||||
IndexReader newReader = null;
|
||||
try {
|
||||
newReader = IndexReader.open(dir, false);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
fail(testName + ":exception when creating IndexReader after disk full during close: " + e);
|
||||
}
|
||||
/*
|
||||
int result = newReader.docFreq(searchTerm);
|
||||
if (success) {
|
||||
if (result != END_COUNT) {
|
||||
fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT);
|
||||
}
|
||||
} else {
|
||||
// On hitting exception we still may have added
|
||||
// all docs:
|
||||
if (result != START_COUNT && result != END_COUNT) {
|
||||
err.printStackTrace();
|
||||
fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
IndexSearcher searcher = newSearcher(newReader);
|
||||
ScoreDoc[] hits = null;
|
||||
try {
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
fail(testName + ": exception when searching: " + e);
|
||||
}
|
||||
int result2 = hits.length;
|
||||
if (success) {
|
||||
if (result2 != END_COUNT) {
|
||||
fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT);
|
||||
}
|
||||
} else {
|
||||
// On hitting exception we still may have added
|
||||
// all docs:
|
||||
if (result2 != START_COUNT && result2 != END_COUNT) {
|
||||
err.printStackTrace();
|
||||
fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT);
|
||||
}
|
||||
}
|
||||
|
||||
searcher.close();
|
||||
newReader.close();
|
||||
|
||||
if (result2 == END_COUNT) {
|
||||
if (!gotExc)
|
||||
fail("never hit disk full");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
dir.close();
|
||||
|
||||
// Try again with 10 more bytes of free space:
|
||||
diskFree += 10;
|
||||
}
|
||||
|
||||
startDir.close();
|
||||
}
|
||||
|
||||
public void testDocsOutOfOrderJIRA140() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
|
@ -1161,133 +775,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
dir.close();
|
||||
}
|
||||
|
||||
public void testMultiReaderDeletes() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter w= new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
||||
Document doc = new Document();
|
||||
doc.add(newField("f", "doctor", Field.Store.NO, Field.Index.NOT_ANALYZED));
|
||||
w.addDocument(doc);
|
||||
doc = new Document();
|
||||
w.commit();
|
||||
doc.add(newField("f", "who", Field.Store.NO, Field.Index.NOT_ANALYZED));
|
||||
w.addDocument(doc);
|
||||
IndexReader r = new SlowMultiReaderWrapper(w.getReader());
|
||||
w.close();
|
||||
|
||||
assertNull(r.getDeletedDocs());
|
||||
r.close();
|
||||
|
||||
r = new SlowMultiReaderWrapper(IndexReader.open(dir, false));
|
||||
|
||||
assertNull(r.getDeletedDocs());
|
||||
assertEquals(1, r.deleteDocuments(new Term("f", "doctor")));
|
||||
assertNotNull(r.getDeletedDocs());
|
||||
assertTrue(r.getDeletedDocs().get(0));
|
||||
assertEquals(1, r.deleteDocuments(new Term("f", "who")));
|
||||
assertTrue(r.getDeletedDocs().get(1));
|
||||
r.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private void deleteReaderReaderConflict(boolean optimize) throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
Term searchTerm1 = new Term("content", "aaa");
|
||||
Term searchTerm2 = new Term("content", "bbb");
|
||||
Term searchTerm3 = new Term("content", "ccc");
|
||||
|
||||
// add 100 documents with term : aaa
|
||||
// add 100 documents with term : bbb
|
||||
// add 100 documents with term : ccc
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
|
||||
for (int i = 0; i < 100; i++) {
|
||||
addDoc(writer, searchTerm1.text());
|
||||
addDoc(writer, searchTerm2.text());
|
||||
addDoc(writer, searchTerm3.text());
|
||||
}
|
||||
if(optimize)
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
// OPEN TWO READERS
|
||||
// Both readers get segment info as exists at this time
|
||||
IndexReader reader1 = IndexReader.open(dir, false);
|
||||
assertEquals("first opened", 100, reader1.docFreq(searchTerm1));
|
||||
assertEquals("first opened", 100, reader1.docFreq(searchTerm2));
|
||||
assertEquals("first opened", 100, reader1.docFreq(searchTerm3));
|
||||
assertTermDocsCount("first opened", reader1, searchTerm1, 100);
|
||||
assertTermDocsCount("first opened", reader1, searchTerm2, 100);
|
||||
assertTermDocsCount("first opened", reader1, searchTerm3, 100);
|
||||
|
||||
IndexReader reader2 = IndexReader.open(dir, false);
|
||||
assertEquals("first opened", 100, reader2.docFreq(searchTerm1));
|
||||
assertEquals("first opened", 100, reader2.docFreq(searchTerm2));
|
||||
assertEquals("first opened", 100, reader2.docFreq(searchTerm3));
|
||||
assertTermDocsCount("first opened", reader2, searchTerm1, 100);
|
||||
assertTermDocsCount("first opened", reader2, searchTerm2, 100);
|
||||
assertTermDocsCount("first opened", reader2, searchTerm3, 100);
|
||||
|
||||
// DELETE DOCS FROM READER 2 and CLOSE IT
|
||||
// delete documents containing term: aaa
|
||||
// when the reader is closed, the segment info is updated and
|
||||
// the first reader is now stale
|
||||
reader2.deleteDocuments(searchTerm1);
|
||||
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm1));
|
||||
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm2));
|
||||
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm3));
|
||||
assertTermDocsCount("after delete 1", reader2, searchTerm1, 0);
|
||||
assertTermDocsCount("after delete 1", reader2, searchTerm2, 100);
|
||||
assertTermDocsCount("after delete 1", reader2, searchTerm3, 100);
|
||||
reader2.close();
|
||||
|
||||
// Make sure reader 1 is unchanged since it was open earlier
|
||||
assertEquals("after delete 1", 100, reader1.docFreq(searchTerm1));
|
||||
assertEquals("after delete 1", 100, reader1.docFreq(searchTerm2));
|
||||
assertEquals("after delete 1", 100, reader1.docFreq(searchTerm3));
|
||||
assertTermDocsCount("after delete 1", reader1, searchTerm1, 100);
|
||||
assertTermDocsCount("after delete 1", reader1, searchTerm2, 100);
|
||||
assertTermDocsCount("after delete 1", reader1, searchTerm3, 100);
|
||||
|
||||
|
||||
// ATTEMPT TO DELETE FROM STALE READER
|
||||
// delete documents containing term: bbb
|
||||
try {
|
||||
reader1.deleteDocuments(searchTerm2);
|
||||
fail("Delete allowed from a stale index reader");
|
||||
} catch (IOException e) {
|
||||
/* success */
|
||||
}
|
||||
|
||||
// RECREATE READER AND TRY AGAIN
|
||||
reader1.close();
|
||||
reader1 = IndexReader.open(dir, false);
|
||||
assertEquals("reopened", 100, reader1.docFreq(searchTerm1));
|
||||
assertEquals("reopened", 100, reader1.docFreq(searchTerm2));
|
||||
assertEquals("reopened", 100, reader1.docFreq(searchTerm3));
|
||||
assertTermDocsCount("reopened", reader1, searchTerm1, 0);
|
||||
assertTermDocsCount("reopened", reader1, searchTerm2, 100);
|
||||
assertTermDocsCount("reopened", reader1, searchTerm3, 100);
|
||||
|
||||
reader1.deleteDocuments(searchTerm2);
|
||||
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm1));
|
||||
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm2));
|
||||
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm3));
|
||||
assertTermDocsCount("deleted 2", reader1, searchTerm1, 0);
|
||||
assertTermDocsCount("deleted 2", reader1, searchTerm2, 0);
|
||||
assertTermDocsCount("deleted 2", reader1, searchTerm3, 100);
|
||||
reader1.close();
|
||||
|
||||
// Open another reader to confirm that everything is deleted
|
||||
reader2 = IndexReader.open(dir, false);
|
||||
assertTermDocsCount("reopened 2", reader2, searchTerm1, 0);
|
||||
assertTermDocsCount("reopened 2", reader2, searchTerm2, 0);
|
||||
assertTermDocsCount("reopened 2", reader2, searchTerm3, 100);
|
||||
reader2.close();
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private void addDocumentWithFields(IndexWriter writer) throws IOException
|
||||
static void addDocumentWithFields(IndexWriter writer) throws IOException
|
||||
{
|
||||
Document doc = new Document();
|
||||
doc.add(newField("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
|
@ -1297,7 +785,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
private void addDocumentWithDifferentFields(IndexWriter writer) throws IOException
|
||||
static void addDocumentWithDifferentFields(IndexWriter writer) throws IOException
|
||||
{
|
||||
Document doc = new Document();
|
||||
doc.add(newField("keyword2","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
|
@ -1307,7 +795,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
private void addDocumentWithTermVectorFields(IndexWriter writer) throws IOException
|
||||
static void addDocumentWithTermVectorFields(IndexWriter writer) throws IOException
|
||||
{
|
||||
Document doc = new Document();
|
||||
doc.add(newField("tvnot","tvnot", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
|
||||
|
@ -1319,7 +807,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
private void addDoc(IndexWriter writer, String value) throws IOException {
|
||||
static void addDoc(IndexWriter writer, String value) throws IOException {
|
||||
Document doc = new Document();
|
||||
doc.add(newField("content", value, Field.Store.NO, Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
|
@ -1557,28 +1045,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-1647
|
||||
public void testIndexReaderUnDeleteAll() throws Exception {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
dir.setPreventDoubleWrite(false);
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
writer.addDocument(createDocument("a"));
|
||||
writer.addDocument(createDocument("b"));
|
||||
writer.addDocument(createDocument("c"));
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocuments(new Term("id", "a"));
|
||||
reader.flush();
|
||||
reader.deleteDocuments(new Term("id", "b"));
|
||||
reader.undeleteAll();
|
||||
reader.deleteDocuments(new Term("id", "b"));
|
||||
reader.close();
|
||||
IndexReader.open(dir,true).close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private Document createDocument(String id) {
|
||||
static Document createDocument(String id) {
|
||||
Document doc = new Document();
|
||||
doc.add(newField("id", id, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
|
||||
return doc;
|
||||
|
@ -1692,54 +1159,6 @@ public class TestIndexReader extends LuceneTestCase
|
|||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-1579: Make sure all SegmentReaders are new when
|
||||
// reopen switches readOnly
|
||||
public void testReopenChangeReadonly() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setMaxBufferedDocs(-1).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
writer.commit();
|
||||
|
||||
// Open reader1
|
||||
IndexReader r = IndexReader.open(dir, false);
|
||||
assertTrue(r instanceof DirectoryReader);
|
||||
IndexReader r1 = getOnlySegmentReader(r);
|
||||
final int[] ints = FieldCache.DEFAULT.getInts(r1, "number");
|
||||
assertEquals(1, ints.length);
|
||||
assertEquals(17, ints[0]);
|
||||
|
||||
// Reopen to readonly w/ no chnages
|
||||
IndexReader r3 = r.reopen(true);
|
||||
assertTrue(((DirectoryReader) r3).readOnly);
|
||||
r3.close();
|
||||
|
||||
// Add new segment
|
||||
writer.addDocument(doc);
|
||||
writer.commit();
|
||||
|
||||
// Reopen reader1 --> reader2
|
||||
IndexReader r2 = r.reopen(true);
|
||||
r.close();
|
||||
assertTrue(((DirectoryReader) r2).readOnly);
|
||||
IndexReader[] subs = r2.getSequentialSubReaders();
|
||||
final int[] ints2 = FieldCache.DEFAULT.getInts(subs[0], "number");
|
||||
r2.close();
|
||||
|
||||
assertTrue(((SegmentReader) subs[0]).readOnly);
|
||||
assertTrue(((SegmentReader) subs[1]).readOnly);
|
||||
assertTrue(ints == ints2);
|
||||
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-1586: getUniqueTermCount
|
||||
public void testUniqueTermCount() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
|
|
|
@ -0,0 +1,374 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
import static org.apache.lucene.index.TestIndexReader.addDoc;
|
||||
import static org.apache.lucene.index.TestIndexReader.addDocumentWithFields;
|
||||
import static org.apache.lucene.index.TestIndexReader.assertTermDocsCount;
|
||||
import static org.apache.lucene.index.TestIndexReader.createDocument;
|
||||
|
||||
public class TestIndexReaderDelete extends LuceneTestCase {
|
||||
private void deleteReaderReaderConflict(boolean optimize) throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
Term searchTerm1 = new Term("content", "aaa");
|
||||
Term searchTerm2 = new Term("content", "bbb");
|
||||
Term searchTerm3 = new Term("content", "ccc");
|
||||
|
||||
// add 100 documents with term : aaa
|
||||
// add 100 documents with term : bbb
|
||||
// add 100 documents with term : ccc
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
|
||||
for (int i = 0; i < 100; i++) {
|
||||
addDoc(writer, searchTerm1.text());
|
||||
addDoc(writer, searchTerm2.text());
|
||||
addDoc(writer, searchTerm3.text());
|
||||
}
|
||||
if(optimize)
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
// OPEN TWO READERS
|
||||
// Both readers get segment info as exists at this time
|
||||
IndexReader reader1 = IndexReader.open(dir, false);
|
||||
assertEquals("first opened", 100, reader1.docFreq(searchTerm1));
|
||||
assertEquals("first opened", 100, reader1.docFreq(searchTerm2));
|
||||
assertEquals("first opened", 100, reader1.docFreq(searchTerm3));
|
||||
assertTermDocsCount("first opened", reader1, searchTerm1, 100);
|
||||
assertTermDocsCount("first opened", reader1, searchTerm2, 100);
|
||||
assertTermDocsCount("first opened", reader1, searchTerm3, 100);
|
||||
|
||||
IndexReader reader2 = IndexReader.open(dir, false);
|
||||
assertEquals("first opened", 100, reader2.docFreq(searchTerm1));
|
||||
assertEquals("first opened", 100, reader2.docFreq(searchTerm2));
|
||||
assertEquals("first opened", 100, reader2.docFreq(searchTerm3));
|
||||
assertTermDocsCount("first opened", reader2, searchTerm1, 100);
|
||||
assertTermDocsCount("first opened", reader2, searchTerm2, 100);
|
||||
assertTermDocsCount("first opened", reader2, searchTerm3, 100);
|
||||
|
||||
// DELETE DOCS FROM READER 2 and CLOSE IT
|
||||
// delete documents containing term: aaa
|
||||
// when the reader is closed, the segment info is updated and
|
||||
// the first reader is now stale
|
||||
reader2.deleteDocuments(searchTerm1);
|
||||
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm1));
|
||||
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm2));
|
||||
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm3));
|
||||
assertTermDocsCount("after delete 1", reader2, searchTerm1, 0);
|
||||
assertTermDocsCount("after delete 1", reader2, searchTerm2, 100);
|
||||
assertTermDocsCount("after delete 1", reader2, searchTerm3, 100);
|
||||
reader2.close();
|
||||
|
||||
// Make sure reader 1 is unchanged since it was open earlier
|
||||
assertEquals("after delete 1", 100, reader1.docFreq(searchTerm1));
|
||||
assertEquals("after delete 1", 100, reader1.docFreq(searchTerm2));
|
||||
assertEquals("after delete 1", 100, reader1.docFreq(searchTerm3));
|
||||
assertTermDocsCount("after delete 1", reader1, searchTerm1, 100);
|
||||
assertTermDocsCount("after delete 1", reader1, searchTerm2, 100);
|
||||
assertTermDocsCount("after delete 1", reader1, searchTerm3, 100);
|
||||
|
||||
|
||||
// ATTEMPT TO DELETE FROM STALE READER
|
||||
// delete documents containing term: bbb
|
||||
try {
|
||||
reader1.deleteDocuments(searchTerm2);
|
||||
fail("Delete allowed from a stale index reader");
|
||||
} catch (IOException e) {
|
||||
/* success */
|
||||
}
|
||||
|
||||
// RECREATE READER AND TRY AGAIN
|
||||
reader1.close();
|
||||
reader1 = IndexReader.open(dir, false);
|
||||
assertEquals("reopened", 100, reader1.docFreq(searchTerm1));
|
||||
assertEquals("reopened", 100, reader1.docFreq(searchTerm2));
|
||||
assertEquals("reopened", 100, reader1.docFreq(searchTerm3));
|
||||
assertTermDocsCount("reopened", reader1, searchTerm1, 0);
|
||||
assertTermDocsCount("reopened", reader1, searchTerm2, 100);
|
||||
assertTermDocsCount("reopened", reader1, searchTerm3, 100);
|
||||
|
||||
reader1.deleteDocuments(searchTerm2);
|
||||
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm1));
|
||||
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm2));
|
||||
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm3));
|
||||
assertTermDocsCount("deleted 2", reader1, searchTerm1, 0);
|
||||
assertTermDocsCount("deleted 2", reader1, searchTerm2, 0);
|
||||
assertTermDocsCount("deleted 2", reader1, searchTerm3, 100);
|
||||
reader1.close();
|
||||
|
||||
// Open another reader to confirm that everything is deleted
|
||||
reader2 = IndexReader.open(dir, false);
|
||||
assertTermDocsCount("reopened 2", reader2, searchTerm1, 0);
|
||||
assertTermDocsCount("reopened 2", reader2, searchTerm2, 0);
|
||||
assertTermDocsCount("reopened 2", reader2, searchTerm3, 100);
|
||||
reader2.close();
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private void deleteReaderWriterConflict(boolean optimize) throws IOException {
|
||||
//Directory dir = new RAMDirectory();
|
||||
Directory dir = newDirectory();
|
||||
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
Term searchTerm2 = new Term("content", "bbb");
|
||||
|
||||
// add 100 documents with term : aaa
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
|
||||
for (int i = 0; i < 100; i++) {
|
||||
addDoc(writer, searchTerm.text());
|
||||
}
|
||||
writer.close();
|
||||
|
||||
// OPEN READER AT THIS POINT - this should fix the view of the
|
||||
// index at the point of having 100 "aaa" documents and 0 "bbb"
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertEquals("first docFreq", 0, reader.docFreq(searchTerm2));
|
||||
assertTermDocsCount("first reader", reader, searchTerm, 100);
|
||||
assertTermDocsCount("first reader", reader, searchTerm2, 0);
|
||||
|
||||
// add 100 documents with term : bbb
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
|
||||
for (int i = 0; i < 100; i++) {
|
||||
addDoc(writer, searchTerm2.text());
|
||||
}
|
||||
|
||||
// REQUEST OPTIMIZATION
|
||||
// This causes a new segment to become current for all subsequent
|
||||
// searchers. Because of this, deletions made via a previously open
|
||||
// reader, which would be applied to that reader's segment, are lost
|
||||
// for subsequent searchers/readers
|
||||
if(optimize)
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
// The reader should not see the new data
|
||||
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertEquals("first docFreq", 0, reader.docFreq(searchTerm2));
|
||||
assertTermDocsCount("first reader", reader, searchTerm, 100);
|
||||
assertTermDocsCount("first reader", reader, searchTerm2, 0);
|
||||
|
||||
|
||||
// DELETE DOCUMENTS CONTAINING TERM: aaa
|
||||
// NOTE: the reader was created when only "aaa" documents were in
|
||||
int deleted = 0;
|
||||
try {
|
||||
deleted = reader.deleteDocuments(searchTerm);
|
||||
fail("Delete allowed on an index reader with stale segment information");
|
||||
} catch (StaleReaderException e) {
|
||||
/* success */
|
||||
}
|
||||
|
||||
// Re-open index reader and try again. This time it should see
|
||||
// the new data.
|
||||
reader.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertEquals("first docFreq", 100, reader.docFreq(searchTerm2));
|
||||
assertTermDocsCount("first reader", reader, searchTerm, 100);
|
||||
assertTermDocsCount("first reader", reader, searchTerm2, 100);
|
||||
|
||||
deleted = reader.deleteDocuments(searchTerm);
|
||||
assertEquals("deleted count", 100, deleted);
|
||||
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2));
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100);
|
||||
reader.close();
|
||||
|
||||
// CREATE A NEW READER and re-test
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2));
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100);
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testBasicDelete() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexWriter writer = null;
|
||||
IndexReader reader = null;
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
|
||||
// add 100 documents with term : aaa
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
writer.setInfoStream(VERBOSE ? System.out : null);
|
||||
for (int i = 0; i < 100; i++) {
|
||||
addDoc(writer, searchTerm.text());
|
||||
}
|
||||
writer.close();
|
||||
|
||||
// OPEN READER AT THIS POINT - this should fix the view of the
|
||||
// index at the point of having 100 "aaa" documents and 0 "bbb"
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertTermDocsCount("first reader", reader, searchTerm, 100);
|
||||
reader.close();
|
||||
|
||||
// DELETE DOCUMENTS CONTAINING TERM: aaa
|
||||
int deleted = 0;
|
||||
reader = IndexReader.open(dir, false);
|
||||
deleted = reader.deleteDocuments(searchTerm);
|
||||
assertEquals("deleted count", 100, deleted);
|
||||
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
|
||||
|
||||
// open a 2nd reader to make sure first reader can
|
||||
// commit its changes (.del) while second reader
|
||||
// is open:
|
||||
IndexReader reader2 = IndexReader.open(dir, false);
|
||||
reader.close();
|
||||
|
||||
// CREATE A NEW READER and re-test
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals("deleted docFreq", 0, reader.docFreq(searchTerm));
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
|
||||
reader.close();
|
||||
reader2.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testDeleteReaderReaderConflictUnoptimized() throws IOException {
|
||||
deleteReaderReaderConflict(false);
|
||||
}
|
||||
|
||||
public void testDeleteReaderReaderConflictOptimized() throws IOException {
|
||||
deleteReaderReaderConflict(true);
|
||||
}
|
||||
|
||||
public void testDeleteReaderWriterConflictUnoptimized() throws IOException {
|
||||
deleteReaderWriterConflict(false);
|
||||
}
|
||||
|
||||
public void testDeleteReaderWriterConflictOptimized() throws IOException {
|
||||
deleteReaderWriterConflict(true);
|
||||
}
|
||||
|
||||
public void testMultiReaderDeletes() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter w= new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
||||
Document doc = new Document();
|
||||
doc.add(newField("f", "doctor", Field.Store.NO, Field.Index.NOT_ANALYZED));
|
||||
w.addDocument(doc);
|
||||
doc = new Document();
|
||||
w.commit();
|
||||
doc.add(newField("f", "who", Field.Store.NO, Field.Index.NOT_ANALYZED));
|
||||
w.addDocument(doc);
|
||||
IndexReader r = new SlowMultiReaderWrapper(w.getReader());
|
||||
w.close();
|
||||
|
||||
assertNull(r.getDeletedDocs());
|
||||
r.close();
|
||||
|
||||
r = new SlowMultiReaderWrapper(IndexReader.open(dir, false));
|
||||
|
||||
assertNull(r.getDeletedDocs());
|
||||
assertEquals(1, r.deleteDocuments(new Term("f", "doctor")));
|
||||
assertNotNull(r.getDeletedDocs());
|
||||
assertTrue(r.getDeletedDocs().get(0));
|
||||
assertEquals(1, r.deleteDocuments(new Term("f", "who")));
|
||||
assertTrue(r.getDeletedDocs().get(1));
|
||||
r.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testUndeleteAll() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
addDocumentWithFields(writer);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocument(0);
|
||||
reader.deleteDocument(1);
|
||||
reader.undeleteAll();
|
||||
reader.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll()
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testUndeleteAllAfterClose() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
addDocumentWithFields(writer);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocument(0);
|
||||
reader.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader.undeleteAll();
|
||||
assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll()
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testUndeleteAllAfterCloseThenReopen() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
addDocumentWithFields(writer);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocument(0);
|
||||
reader.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader.undeleteAll();
|
||||
reader.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll()
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-1647
|
||||
public void testIndexReaderUnDeleteAll() throws Exception {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
dir.setPreventDoubleWrite(false);
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
writer.addDocument(createDocument("a"));
|
||||
writer.addDocument(createDocument("b"));
|
||||
writer.addDocument(createDocument("c"));
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocuments(new Term("id", "a"));
|
||||
reader.flush();
|
||||
reader.deleteDocuments(new Term("id", "b"));
|
||||
reader.undeleteAll();
|
||||
reader.deleteDocuments(new Term("id", "b"));
|
||||
reader.close();
|
||||
IndexReader.open(dir,true).close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,229 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.search.DefaultSimilarity;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.Similarity;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
public class TestIndexReaderOnDiskFull extends LuceneTestCase {
|
||||
/**
|
||||
* Make sure if reader tries to commit but hits disk
|
||||
* full that reader remains consistent and usable.
|
||||
*/
|
||||
public void testDiskFull() throws IOException {
|
||||
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
int START_COUNT = 157;
|
||||
int END_COUNT = 144;
|
||||
|
||||
// First build up a starting index:
|
||||
MockDirectoryWrapper startDir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: create initial index");
|
||||
writer.setInfoStream(System.out);
|
||||
}
|
||||
for(int i=0;i<157;i++) {
|
||||
Document d = new Document();
|
||||
d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
d.add(newField("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED));
|
||||
writer.addDocument(d);
|
||||
if (0==i%10)
|
||||
writer.commit();
|
||||
}
|
||||
writer.close();
|
||||
|
||||
{
|
||||
IndexReader r = IndexReader.open(startDir);
|
||||
IndexSearcher searcher = newSearcher(r);
|
||||
ScoreDoc[] hits = null;
|
||||
try {
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
fail("exception when init searching: " + e);
|
||||
}
|
||||
searcher.close();
|
||||
r.close();
|
||||
}
|
||||
|
||||
long diskUsage = startDir.getRecomputedActualSizeInBytes();
|
||||
long diskFree = diskUsage+_TestUtil.nextInt(random, 50, 200);
|
||||
|
||||
IOException err = null;
|
||||
|
||||
boolean done = false;
|
||||
boolean gotExc = false;
|
||||
|
||||
// Iterate w/ ever increasing free disk space:
|
||||
while(!done) {
|
||||
MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir));
|
||||
|
||||
// If IndexReader hits disk full, it can write to
|
||||
// the same files again.
|
||||
dir.setPreventDoubleWrite(false);
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
|
||||
// For each disk size, first try to commit against
|
||||
// dir that will hit random IOExceptions & disk
|
||||
// full; after, give it infinite disk space & turn
|
||||
// off random IOExceptions & retry w/ same reader:
|
||||
boolean success = false;
|
||||
|
||||
for(int x=0;x<2;x++) {
|
||||
|
||||
double rate = 0.05;
|
||||
double diskRatio = ((double) diskFree)/diskUsage;
|
||||
long thisDiskFree;
|
||||
String testName;
|
||||
|
||||
if (0 == x) {
|
||||
thisDiskFree = diskFree;
|
||||
if (diskRatio >= 2.0) {
|
||||
rate /= 2;
|
||||
}
|
||||
if (diskRatio >= 4.0) {
|
||||
rate /= 2;
|
||||
}
|
||||
if (diskRatio >= 6.0) {
|
||||
rate = 0.0;
|
||||
}
|
||||
if (VERBOSE) {
|
||||
System.out.println("\ncycle: " + diskFree + " bytes");
|
||||
}
|
||||
testName = "disk full during reader.close() @ " + thisDiskFree + " bytes";
|
||||
} else {
|
||||
thisDiskFree = 0;
|
||||
rate = 0.0;
|
||||
if (VERBOSE) {
|
||||
System.out.println("\ncycle: same writer: unlimited disk space");
|
||||
}
|
||||
testName = "reader re-use after disk full";
|
||||
}
|
||||
|
||||
dir.setMaxSizeInBytes(thisDiskFree);
|
||||
dir.setRandomIOExceptionRate(rate);
|
||||
Similarity sim = new DefaultSimilarity();
|
||||
try {
|
||||
if (0 == x) {
|
||||
int docId = 12;
|
||||
for(int i=0;i<13;i++) {
|
||||
reader.deleteDocument(docId);
|
||||
reader.setNorm(docId, "content", sim.encodeNormValue(2.0f));
|
||||
docId += 12;
|
||||
}
|
||||
}
|
||||
reader.close();
|
||||
success = true;
|
||||
if (0 == x) {
|
||||
done = true;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
if (VERBOSE) {
|
||||
System.out.println(" hit IOException: " + e);
|
||||
e.printStackTrace(System.out);
|
||||
}
|
||||
err = e;
|
||||
gotExc = true;
|
||||
if (1 == x) {
|
||||
e.printStackTrace();
|
||||
fail(testName + " hit IOException after disk space was freed up");
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, verify index is not corrupt, and, if
|
||||
// we succeeded, we see all docs changed, and if
|
||||
// we failed, we see either all docs or no docs
|
||||
// changed (transactional semantics):
|
||||
IndexReader newReader = null;
|
||||
try {
|
||||
newReader = IndexReader.open(dir, false);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
fail(testName + ":exception when creating IndexReader after disk full during close: " + e);
|
||||
}
|
||||
/*
|
||||
int result = newReader.docFreq(searchTerm);
|
||||
if (success) {
|
||||
if (result != END_COUNT) {
|
||||
fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT);
|
||||
}
|
||||
} else {
|
||||
// On hitting exception we still may have added
|
||||
// all docs:
|
||||
if (result != START_COUNT && result != END_COUNT) {
|
||||
err.printStackTrace();
|
||||
fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
IndexSearcher searcher = newSearcher(newReader);
|
||||
ScoreDoc[] hits = null;
|
||||
try {
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
fail(testName + ": exception when searching: " + e);
|
||||
}
|
||||
int result2 = hits.length;
|
||||
if (success) {
|
||||
if (result2 != END_COUNT) {
|
||||
fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT);
|
||||
}
|
||||
} else {
|
||||
// On hitting exception we still may have added
|
||||
// all docs:
|
||||
if (result2 != START_COUNT && result2 != END_COUNT) {
|
||||
err.printStackTrace();
|
||||
fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT);
|
||||
}
|
||||
}
|
||||
|
||||
searcher.close();
|
||||
newReader.close();
|
||||
|
||||
if (result2 == END_COUNT) {
|
||||
if (!gotExc)
|
||||
fail("never hit disk full");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
dir.close();
|
||||
|
||||
// Try again with more bytes of free space:
|
||||
diskFree += TEST_NIGHTLY ? _TestUtil.nextInt(random, 5, 20) : _TestUtil.nextInt(random, 50, 200);
|
||||
}
|
||||
|
||||
startDir.close();
|
||||
}
|
||||
}
|
|
@ -36,6 +36,7 @@ import org.apache.lucene.document.Field.Store;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.search.DefaultSimilarity;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.Similarity;
|
||||
|
@ -49,8 +50,6 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
public class TestIndexReaderReopen extends LuceneTestCase {
|
||||
|
||||
private File indexDir;
|
||||
|
||||
public void testReopen() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
|
@ -150,22 +149,12 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
// at the end of every iteration, commit the index and reopen/recreate the reader.
|
||||
// in each iteration verify the work of previous iteration.
|
||||
// try this once with reopen once recreate, on both RAMDir and FSDir.
|
||||
public void testCommitReopenFS () throws IOException {
|
||||
Directory dir = newFSDirectory(indexDir);
|
||||
doTestReopenWithCommit(random, dir, true);
|
||||
dir.close();
|
||||
}
|
||||
public void testCommitRecreateFS () throws IOException {
|
||||
Directory dir = newFSDirectory(indexDir);
|
||||
doTestReopenWithCommit(random, dir, false);
|
||||
dir.close();
|
||||
}
|
||||
public void testCommitReopenRAM () throws IOException {
|
||||
public void testCommitReopen () throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
doTestReopenWithCommit(random, dir, true);
|
||||
dir.close();
|
||||
}
|
||||
public void testCommitRecreateRAM () throws IOException {
|
||||
public void testCommitRecreate () throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
doTestReopenWithCommit(random, dir, false);
|
||||
dir.close();
|
||||
|
@ -698,7 +687,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
|
||||
public void testThreadSafety() throws Exception {
|
||||
final Directory dir = newDirectory();
|
||||
final int n = 30 * RANDOM_MULTIPLIER;
|
||||
final int n = atLeast(30);
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
for (int i = 0; i < n; i++) {
|
||||
|
@ -1085,13 +1074,6 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
protected abstract IndexReader openReader() throws IOException;
|
||||
protected abstract void modifyIndex(int i) throws IOException;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
indexDir = _TestUtil.getTempDir("IndexReaderReopen");
|
||||
}
|
||||
|
||||
public void testCloseOrig() throws Throwable {
|
||||
Directory dir = newDirectory();
|
||||
|
@ -1244,4 +1226,52 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
r.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-1579: Make sure all SegmentReaders are new when
|
||||
// reopen switches readOnly
|
||||
public void testReopenChangeReadonly() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setMaxBufferedDocs(-1).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
writer.commit();
|
||||
|
||||
// Open reader1
|
||||
IndexReader r = IndexReader.open(dir, false);
|
||||
assertTrue(r instanceof DirectoryReader);
|
||||
IndexReader r1 = getOnlySegmentReader(r);
|
||||
final int[] ints = FieldCache.DEFAULT.getInts(r1, "number");
|
||||
assertEquals(1, ints.length);
|
||||
assertEquals(17, ints[0]);
|
||||
|
||||
// Reopen to readonly w/ no chnages
|
||||
IndexReader r3 = r.reopen(true);
|
||||
assertTrue(((DirectoryReader) r3).readOnly);
|
||||
r3.close();
|
||||
|
||||
// Add new segment
|
||||
writer.addDocument(doc);
|
||||
writer.commit();
|
||||
|
||||
// Reopen reader1 --> reader2
|
||||
IndexReader r2 = r.reopen(true);
|
||||
r.close();
|
||||
assertTrue(((DirectoryReader) r2).readOnly);
|
||||
IndexReader[] subs = r2.getSequentialSubReaders();
|
||||
final int[] ints2 = FieldCache.DEFAULT.getInts(subs[0], "number");
|
||||
r2.close();
|
||||
|
||||
assertTrue(((SegmentReader) subs[0]).readOnly);
|
||||
assertTrue(((SegmentReader) subs[1]).readOnly);
|
||||
assertTrue(ints == ints2);
|
||||
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,668 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.analysis.MockFixedLengthPayloadFilter;
|
||||
import org.apache.lucene.analysis.MockTokenizer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
public class TestIndexWriterCommit extends LuceneTestCase {
|
||||
/*
|
||||
* Simple test for "commit on close": open writer then
|
||||
* add a bunch of docs, making sure reader does not see
|
||||
* these docs until writer is closed.
|
||||
*/
|
||||
public void testCommitOnClose() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
for (int i = 0; i < 14; i++) {
|
||||
TestIndexWriter.addDoc(writer);
|
||||
}
|
||||
writer.close();
|
||||
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
IndexSearcher searcher = new IndexSearcher(dir, false);
|
||||
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("first number of hits", 14, hits.length);
|
||||
searcher.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
for(int i=0;i<3;i++) {
|
||||
for(int j=0;j<11;j++) {
|
||||
TestIndexWriter.addDoc(writer);
|
||||
}
|
||||
searcher = new IndexSearcher(dir, false);
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
|
||||
searcher.close();
|
||||
assertTrue("reader should have still been current", reader.isCurrent());
|
||||
}
|
||||
|
||||
// Now, close the writer:
|
||||
writer.close();
|
||||
assertFalse("reader should not be current now", reader.isCurrent());
|
||||
|
||||
searcher = new IndexSearcher(dir, false);
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
|
||||
searcher.close();
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/*
|
||||
* Simple test for "commit on close": open writer, then
|
||||
* add a bunch of docs, making sure reader does not see
|
||||
* them until writer has closed. Then instead of
|
||||
* closing the writer, call abort and verify reader sees
|
||||
* nothing was added. Then verify we can open the index
|
||||
* and add docs to it.
|
||||
*/
|
||||
public void testCommitOnCloseAbort() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10));
|
||||
for (int i = 0; i < 14; i++) {
|
||||
TestIndexWriter.addDoc(writer);
|
||||
}
|
||||
writer.close();
|
||||
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
IndexSearcher searcher = new IndexSearcher(dir, false);
|
||||
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("first number of hits", 14, hits.length);
|
||||
searcher.close();
|
||||
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
|
||||
for(int j=0;j<17;j++) {
|
||||
TestIndexWriter.addDoc(writer);
|
||||
}
|
||||
// Delete all docs:
|
||||
writer.deleteDocuments(searchTerm);
|
||||
|
||||
searcher = new IndexSearcher(dir, false);
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
|
||||
searcher.close();
|
||||
|
||||
// Now, close the writer:
|
||||
writer.rollback();
|
||||
|
||||
TestIndexWriter.assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
|
||||
|
||||
searcher = new IndexSearcher(dir, false);
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("saw changes after writer.abort", 14, hits.length);
|
||||
searcher.close();
|
||||
|
||||
// Now make sure we can re-open the index, add docs,
|
||||
// and all is good:
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
|
||||
|
||||
// On abort, writer in fact may write to the same
|
||||
// segments_N file:
|
||||
dir.setPreventDoubleWrite(false);
|
||||
|
||||
for(int i=0;i<12;i++) {
|
||||
for(int j=0;j<17;j++) {
|
||||
TestIndexWriter.addDoc(writer);
|
||||
}
|
||||
searcher = new IndexSearcher(dir, false);
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
|
||||
searcher.close();
|
||||
}
|
||||
|
||||
writer.close();
|
||||
searcher = new IndexSearcher(dir, false);
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("didn't see changes after close", 218, hits.length);
|
||||
searcher.close();
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/*
|
||||
* Verify that a writer with "commit on close" indeed
|
||||
* cleans up the temp segments created after opening
|
||||
* that are not referenced by the starting segments
|
||||
* file. We check this by using MockDirectoryWrapper to
|
||||
* measure max temp disk space used.
|
||||
*/
|
||||
public void testCommitOnCloseDiskUsage() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
Analyzer analyzer;
|
||||
if (random.nextBoolean()) {
|
||||
// no payloads
|
||||
analyzer = new Analyzer() {
|
||||
@Override
|
||||
public TokenStream tokenStream(String fieldName, Reader reader) {
|
||||
return new MockTokenizer(reader, MockTokenizer.WHITESPACE, true);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
// fixed length payloads
|
||||
final int length = random.nextInt(200);
|
||||
analyzer = new Analyzer() {
|
||||
@Override
|
||||
public TokenStream tokenStream(String fieldName, Reader reader) {
|
||||
return new MockFixedLengthPayloadFilter(random,
|
||||
new MockTokenizer(reader, MockTokenizer.WHITESPACE, true),
|
||||
length);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).
|
||||
setMaxBufferedDocs(10).
|
||||
setReaderPooling(false).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
for(int j=0;j<30;j++) {
|
||||
TestIndexWriter.addDocWithIndex(writer, j);
|
||||
}
|
||||
writer.close();
|
||||
dir.resetMaxUsedSizeInBytes();
|
||||
|
||||
dir.setTrackDiskUsage(true);
|
||||
long startDiskUsage = dir.getMaxUsedSizeInBytes();
|
||||
writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)
|
||||
.setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(10).
|
||||
setMergeScheduler(new SerialMergeScheduler()).
|
||||
setReaderPooling(false).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
|
||||
);
|
||||
for(int j=0;j<1470;j++) {
|
||||
TestIndexWriter.addDocWithIndex(writer, j);
|
||||
}
|
||||
long midDiskUsage = dir.getMaxUsedSizeInBytes();
|
||||
dir.resetMaxUsedSizeInBytes();
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
IndexReader.open(dir, true).close();
|
||||
|
||||
long endDiskUsage = dir.getMaxUsedSizeInBytes();
|
||||
|
||||
// Ending index is 50X as large as starting index; due
|
||||
// to 3X disk usage normally we allow 150X max
|
||||
// transient usage. If something is wrong w/ deleter
|
||||
// and it doesn't delete intermediate segments then it
|
||||
// will exceed this 150X:
|
||||
// System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
|
||||
assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage*150),
|
||||
midDiskUsage < 150*startDiskUsage);
|
||||
assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage*150),
|
||||
endDiskUsage < 150*startDiskUsage);
|
||||
dir.close();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Verify that calling optimize when writer is open for
|
||||
* "commit on close" works correctly both for rollback()
|
||||
* and close().
|
||||
*/
|
||||
public void testCommitOnCloseOptimize() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
// Must disable throwing exc on double-write: this
|
||||
// test uses IW.rollback which easily results in
|
||||
// writing to same file more than once
|
||||
dir.setPreventDoubleWrite(false);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setMaxBufferedDocs(10).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
for(int j=0;j<17;j++) {
|
||||
TestIndexWriter.addDocWithIndex(writer, j);
|
||||
}
|
||||
writer.close();
|
||||
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
|
||||
writer.optimize();
|
||||
|
||||
if (VERBOSE) {
|
||||
writer.setInfoStream(System.out);
|
||||
}
|
||||
|
||||
// Open a reader before closing (commiting) the writer:
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
|
||||
// Reader should see index as unoptimized at this
|
||||
// point:
|
||||
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
|
||||
reader.close();
|
||||
|
||||
// Abort the writer:
|
||||
writer.rollback();
|
||||
TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after optimize");
|
||||
|
||||
// Open a reader after aborting writer:
|
||||
reader = IndexReader.open(dir, true);
|
||||
|
||||
// Reader should still see index as unoptimized:
|
||||
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
|
||||
reader.close();
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: do real optimize");
|
||||
}
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
|
||||
if (VERBOSE) {
|
||||
writer.setInfoStream(System.out);
|
||||
}
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: writer closed");
|
||||
}
|
||||
TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after optimize");
|
||||
|
||||
// Open a reader after aborting writer:
|
||||
reader = IndexReader.open(dir, true);
|
||||
|
||||
// Reader should still see index as unoptimized:
|
||||
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-2095: make sure with multiple threads commit
|
||||
// doesn't return until all changes are in fact in the
|
||||
// index
|
||||
public void testCommitThreadSafety() throws Throwable {
|
||||
final int NUM_THREADS = 5;
|
||||
final double RUN_SEC = 0.5;
|
||||
final Directory dir = newDirectory();
|
||||
final RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
||||
_TestUtil.reduceOpenFiles(w.w);
|
||||
w.commit();
|
||||
final AtomicBoolean failed = new AtomicBoolean();
|
||||
Thread[] threads = new Thread[NUM_THREADS];
|
||||
final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
|
||||
for(int i=0;i<NUM_THREADS;i++) {
|
||||
final int finalI = i;
|
||||
threads[i] = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
final Document doc = new Document();
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
|
||||
doc.add(f);
|
||||
int count = 0;
|
||||
do {
|
||||
if (failed.get()) break;
|
||||
for(int j=0;j<10;j++) {
|
||||
final String s = finalI + "_" + String.valueOf(count++);
|
||||
f.setValue(s);
|
||||
w.addDocument(doc);
|
||||
w.commit();
|
||||
IndexReader r2 = r.reopen();
|
||||
assertTrue(r2 != r);
|
||||
r.close();
|
||||
r = r2;
|
||||
assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
|
||||
}
|
||||
} while(System.currentTimeMillis() < endTime);
|
||||
r.close();
|
||||
} catch (Throwable t) {
|
||||
failed.set(true);
|
||||
throw new RuntimeException(t);
|
||||
}
|
||||
}
|
||||
};
|
||||
threads[i].start();
|
||||
}
|
||||
for(int i=0;i<NUM_THREADS;i++) {
|
||||
threads[i].join();
|
||||
}
|
||||
assertFalse(failed.get());
|
||||
w.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-1044: test writer.commit() when ac=false
|
||||
public void testForceCommit() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setMaxBufferedDocs(2).
|
||||
setMergePolicy(newLogMergePolicy(5))
|
||||
);
|
||||
writer.commit();
|
||||
|
||||
for (int i = 0; i < 23; i++)
|
||||
TestIndexWriter.addDoc(writer);
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
assertEquals(0, reader.numDocs());
|
||||
writer.commit();
|
||||
IndexReader reader2 = reader.reopen();
|
||||
assertEquals(0, reader.numDocs());
|
||||
assertEquals(23, reader2.numDocs());
|
||||
reader.close();
|
||||
|
||||
for (int i = 0; i < 17; i++)
|
||||
TestIndexWriter.addDoc(writer);
|
||||
assertEquals(23, reader2.numDocs());
|
||||
reader2.close();
|
||||
reader = IndexReader.open(dir, true);
|
||||
assertEquals(23, reader.numDocs());
|
||||
reader.close();
|
||||
writer.commit();
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
assertEquals(40, reader.numDocs());
|
||||
reader.close();
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testFutureCommit() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
|
||||
Document doc = new Document();
|
||||
w.addDocument(doc);
|
||||
|
||||
// commit to "first"
|
||||
Map<String,String> commitData = new HashMap<String,String>();
|
||||
commitData.put("tag", "first");
|
||||
w.commit(commitData);
|
||||
|
||||
// commit to "second"
|
||||
w.addDocument(doc);
|
||||
commitData.put("tag", "second");
|
||||
w.commit(commitData);
|
||||
w.close();
|
||||
|
||||
// open "first" with IndexWriter
|
||||
IndexCommit commit = null;
|
||||
for(IndexCommit c : IndexReader.listCommits(dir)) {
|
||||
if (c.getUserData().get("tag").equals("first")) {
|
||||
commit = c;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assertNotNull(commit);
|
||||
|
||||
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
|
||||
|
||||
assertEquals(1, w.numDocs());
|
||||
|
||||
// commit IndexWriter to "third"
|
||||
w.addDocument(doc);
|
||||
commitData.put("tag", "third");
|
||||
w.commit(commitData);
|
||||
w.close();
|
||||
|
||||
// make sure "second" commit is still there
|
||||
commit = null;
|
||||
for(IndexCommit c : IndexReader.listCommits(dir)) {
|
||||
if (c.getUserData().get("tag").equals("second")) {
|
||||
commit = c;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assertNotNull(commit);
|
||||
|
||||
IndexReader r = IndexReader.open(commit, true);
|
||||
assertEquals(2, r.numDocs());
|
||||
r.close();
|
||||
|
||||
// open "second", w/ writeable IndexReader & commit
|
||||
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
|
||||
assertEquals(2, r.numDocs());
|
||||
r.deleteDocument(0);
|
||||
r.deleteDocument(1);
|
||||
commitData.put("tag", "fourth");
|
||||
r.commit(commitData);
|
||||
r.close();
|
||||
|
||||
// make sure "third" commit is still there
|
||||
commit = null;
|
||||
for(IndexCommit c : IndexReader.listCommits(dir)) {
|
||||
if (c.getUserData().get("tag").equals("third")) {
|
||||
commit = c;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assertNotNull(commit);
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testNoCommits() throws Exception {
|
||||
// Tests that if we don't call commit(), the directory has 0 commits. This has
|
||||
// changed since LUCENE-2386, where before IW would always commit on a fresh
|
||||
// new index.
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
try {
|
||||
IndexReader.listCommits(dir);
|
||||
fail("listCommits should have thrown an exception over empty index");
|
||||
} catch (IndexNotFoundException e) {
|
||||
// that's expected !
|
||||
}
|
||||
// No changes still should generate a commit, because it's a new index.
|
||||
writer.close();
|
||||
assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-1274: test writer.prepareCommit()
|
||||
public void testPrepareCommit() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setMaxBufferedDocs(2).
|
||||
setMergePolicy(newLogMergePolicy(5))
|
||||
);
|
||||
writer.commit();
|
||||
|
||||
for (int i = 0; i < 23; i++)
|
||||
TestIndexWriter.addDoc(writer);
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
assertEquals(0, reader.numDocs());
|
||||
|
||||
writer.prepareCommit();
|
||||
|
||||
IndexReader reader2 = IndexReader.open(dir, true);
|
||||
assertEquals(0, reader2.numDocs());
|
||||
|
||||
writer.commit();
|
||||
|
||||
IndexReader reader3 = reader.reopen();
|
||||
assertEquals(0, reader.numDocs());
|
||||
assertEquals(0, reader2.numDocs());
|
||||
assertEquals(23, reader3.numDocs());
|
||||
reader.close();
|
||||
reader2.close();
|
||||
|
||||
for (int i = 0; i < 17; i++)
|
||||
TestIndexWriter.addDoc(writer);
|
||||
|
||||
assertEquals(23, reader3.numDocs());
|
||||
reader3.close();
|
||||
reader = IndexReader.open(dir, true);
|
||||
assertEquals(23, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
writer.prepareCommit();
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
assertEquals(23, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
writer.commit();
|
||||
reader = IndexReader.open(dir, true);
|
||||
assertEquals(40, reader.numDocs());
|
||||
reader.close();
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-1274: test writer.prepareCommit()
|
||||
public void testPrepareCommitRollback() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
dir.setPreventDoubleWrite(false);
|
||||
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setMaxBufferedDocs(2).
|
||||
setMergePolicy(newLogMergePolicy(5))
|
||||
);
|
||||
writer.commit();
|
||||
|
||||
for (int i = 0; i < 23; i++)
|
||||
TestIndexWriter.addDoc(writer);
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
assertEquals(0, reader.numDocs());
|
||||
|
||||
writer.prepareCommit();
|
||||
|
||||
IndexReader reader2 = IndexReader.open(dir, true);
|
||||
assertEquals(0, reader2.numDocs());
|
||||
|
||||
writer.rollback();
|
||||
|
||||
IndexReader reader3 = reader.reopen();
|
||||
assertEquals(0, reader.numDocs());
|
||||
assertEquals(0, reader2.numDocs());
|
||||
assertEquals(0, reader3.numDocs());
|
||||
reader.close();
|
||||
reader2.close();
|
||||
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
for (int i = 0; i < 17; i++)
|
||||
TestIndexWriter.addDoc(writer);
|
||||
|
||||
assertEquals(0, reader3.numDocs());
|
||||
reader3.close();
|
||||
reader = IndexReader.open(dir, true);
|
||||
assertEquals(0, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
writer.prepareCommit();
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
assertEquals(0, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
writer.commit();
|
||||
reader = IndexReader.open(dir, true);
|
||||
assertEquals(17, reader.numDocs());
|
||||
reader.close();
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-1274
|
||||
public void testPrepareCommitNoChanges() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
writer.prepareCommit();
|
||||
writer.commit();
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
assertEquals(0, reader.numDocs());
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-1382
|
||||
public void testCommitUserData() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
|
||||
for(int j=0;j<17;j++)
|
||||
TestIndexWriter.addDoc(w);
|
||||
w.close();
|
||||
|
||||
assertEquals(0, IndexReader.getCommitUserData(dir).size());
|
||||
|
||||
IndexReader r = IndexReader.open(dir, true);
|
||||
// commit(Map) never called for this index
|
||||
assertEquals(0, r.getCommitUserData().size());
|
||||
r.close();
|
||||
|
||||
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
|
||||
for(int j=0;j<17;j++)
|
||||
TestIndexWriter.addDoc(w);
|
||||
Map<String,String> data = new HashMap<String,String>();
|
||||
data.put("label", "test1");
|
||||
w.commit(data);
|
||||
w.close();
|
||||
|
||||
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
|
||||
|
||||
r = IndexReader.open(dir, true);
|
||||
assertEquals("test1", r.getCommitUserData().get("label"));
|
||||
r.close();
|
||||
|
||||
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
w.optimize();
|
||||
w.close();
|
||||
|
||||
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
|
||||
|
||||
dir.close();
|
||||
}
|
||||
}
|
|
@ -18,11 +18,21 @@ package org.apache.lucene.index;
|
|||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.analysis.MockTokenizer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.Field.Index;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import org.apache.lucene.document.Field.TermVector;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
|
@ -860,4 +870,79 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
modifier.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testDeleteAllSlowly() throws Exception {
|
||||
final Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random, dir);
|
||||
final int NUM_DOCS = atLeast(1000);
|
||||
final List<Integer> ids = new ArrayList<Integer>(NUM_DOCS);
|
||||
for(int id=0;id<NUM_DOCS;id++) {
|
||||
ids.add(id);
|
||||
}
|
||||
Collections.shuffle(ids, random);
|
||||
for(int id : ids) {
|
||||
Document doc = new Document();
|
||||
doc.add(newField("id", ""+id, Field.Index.NOT_ANALYZED));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
Collections.shuffle(ids, random);
|
||||
int upto = 0;
|
||||
while(upto < ids.size()) {
|
||||
final int left = ids.size() - upto;
|
||||
final int inc = Math.min(left, _TestUtil.nextInt(random, 1, 20));
|
||||
final int limit = upto + inc;
|
||||
while(upto < limit) {
|
||||
w.deleteDocuments(new Term("id", ""+ids.get(upto++)));
|
||||
}
|
||||
final IndexReader r = w.getReader();
|
||||
assertEquals(NUM_DOCS - upto, r.numDocs());
|
||||
r.close();
|
||||
}
|
||||
|
||||
w.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testIndexingThenDeleting() throws Exception {
|
||||
final Random r = random;
|
||||
Directory dir = newDirectory();
|
||||
// note this test explicitly disables payloads
|
||||
final Analyzer analyzer = new Analyzer() {
|
||||
@Override
|
||||
public TokenStream tokenStream(String fieldName, Reader reader) {
|
||||
return new MockTokenizer(reader, MockTokenizer.WHITESPACE, true);
|
||||
}
|
||||
};
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setRAMBufferSizeMB(1.0).setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH).setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH));
|
||||
w.setInfoStream(VERBOSE ? System.out : null);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
|
||||
int num = atLeast(3);
|
||||
for (int iter = 0; iter < num; iter++) {
|
||||
int count = 0;
|
||||
|
||||
final boolean doIndexing = r.nextBoolean();
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: iter doIndexing=" + doIndexing);
|
||||
}
|
||||
if (doIndexing) {
|
||||
// Add docs until a flush is triggered
|
||||
final int startFlushCount = w.getFlushCount();
|
||||
while(w.getFlushCount() == startFlushCount) {
|
||||
w.addDocument(doc);
|
||||
count++;
|
||||
}
|
||||
} else {
|
||||
// Delete docs until a flush is triggered
|
||||
final int startFlushCount = w.getFlushCount();
|
||||
while(w.getFlushCount() == startFlushCount) {
|
||||
w.deleteDocuments(new Term("foo", ""+count));
|
||||
count++;
|
||||
}
|
||||
}
|
||||
assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 3000);
|
||||
}
|
||||
w.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -236,7 +236,6 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
assertEquals(count, count2);
|
||||
r2.close();
|
||||
|
||||
_TestUtil.checkIndex(dir);
|
||||
dir.close();
|
||||
}
|
||||
|
||||
|
@ -287,7 +286,6 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
assertEquals(count, count2);
|
||||
r2.close();
|
||||
|
||||
_TestUtil.checkIndex(dir);
|
||||
dir.close();
|
||||
}
|
||||
|
||||
|
@ -349,7 +347,6 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
// expected
|
||||
}
|
||||
w.close();
|
||||
_TestUtil.checkIndex(dir);
|
||||
dir.close();
|
||||
}
|
||||
|
||||
|
@ -933,7 +930,8 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
addDoc(w);
|
||||
w.close();
|
||||
|
||||
for(int i=0;i<200;i++) {
|
||||
int iter = TEST_NIGHTLY ? 200 : 20;
|
||||
for(int i=0;i<iter;i++) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: iter " + i);
|
||||
}
|
||||
|
@ -1225,7 +1223,8 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
FailOnTermVectors[] failures = new FailOnTermVectors[] {
|
||||
new FailOnTermVectors(FailOnTermVectors.AFTER_INIT_STAGE),
|
||||
new FailOnTermVectors(FailOnTermVectors.INIT_STAGE), };
|
||||
for (int j = 0; j < 3 * RANDOM_MULTIPLIER; j++) {
|
||||
int num = atLeast(3);
|
||||
for (int j = 0; j < num; j++) {
|
||||
for (FailOnTermVectors failure : failures) {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
|
||||
|
|
|
@ -104,9 +104,12 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setMaxBufferedDocs(10).
|
||||
setMergePolicy(newLogMergePolicy())
|
||||
setMergePolicy(newLogMergePolicy()).
|
||||
setMergeScheduler(new SerialMergeScheduler())
|
||||
);
|
||||
|
||||
writer.setInfoStream(VERBOSE ? System.out : null);
|
||||
|
||||
for (int i = 0; i < 250; i++) {
|
||||
addDoc(writer);
|
||||
checkInvariants(writer);
|
||||
|
|
|
@ -19,6 +19,9 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.Field.Index;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import org.apache.lucene.document.Field.TermVector;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
|
@ -113,4 +116,187 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
|||
}
|
||||
writer.close();
|
||||
}
|
||||
|
||||
// LUCENE-325: test expungeDeletes, when 2 singular merges
|
||||
// are required
|
||||
public void testExpungeDeletes() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
|
||||
IndexWriterConfig.DISABLE_AUTO_FLUSH));
|
||||
writer.setInfoStream(VERBOSE ? System.out : null);
|
||||
Document document = new Document();
|
||||
|
||||
document = new Document();
|
||||
Field storedField = newField("stored", "stored", Field.Store.YES,
|
||||
Field.Index.NO);
|
||||
document.add(storedField);
|
||||
Field termVectorField = newField("termVector", "termVector",
|
||||
Field.Store.NO, Field.Index.NOT_ANALYZED,
|
||||
Field.TermVector.WITH_POSITIONS_OFFSETS);
|
||||
document.add(termVectorField);
|
||||
for(int i=0;i<10;i++)
|
||||
writer.addDocument(document);
|
||||
writer.close();
|
||||
|
||||
IndexReader ir = IndexReader.open(dir, false);
|
||||
assertEquals(10, ir.maxDoc());
|
||||
assertEquals(10, ir.numDocs());
|
||||
ir.deleteDocument(0);
|
||||
ir.deleteDocument(7);
|
||||
assertEquals(8, ir.numDocs());
|
||||
ir.close();
|
||||
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
||||
assertEquals(8, writer.numDocs());
|
||||
assertEquals(10, writer.maxDoc());
|
||||
writer.expungeDeletes();
|
||||
assertEquals(8, writer.numDocs());
|
||||
writer.close();
|
||||
ir = IndexReader.open(dir, true);
|
||||
assertEquals(8, ir.maxDoc());
|
||||
assertEquals(8, ir.numDocs());
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
|
||||
public void testExpungeDeletes2() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setMaxBufferedDocs(2).
|
||||
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
|
||||
setMergePolicy(newLogMergePolicy(50))
|
||||
);
|
||||
|
||||
Document document = new Document();
|
||||
|
||||
document = new Document();
|
||||
Field storedField = newField("stored", "stored", Store.YES,
|
||||
Index.NO);
|
||||
document.add(storedField);
|
||||
Field termVectorField = newField("termVector", "termVector",
|
||||
Store.NO, Index.NOT_ANALYZED,
|
||||
TermVector.WITH_POSITIONS_OFFSETS);
|
||||
document.add(termVectorField);
|
||||
for(int i=0;i<98;i++)
|
||||
writer.addDocument(document);
|
||||
writer.close();
|
||||
|
||||
IndexReader ir = IndexReader.open(dir, false);
|
||||
assertEquals(98, ir.maxDoc());
|
||||
assertEquals(98, ir.numDocs());
|
||||
for(int i=0;i<98;i+=2)
|
||||
ir.deleteDocument(i);
|
||||
assertEquals(49, ir.numDocs());
|
||||
ir.close();
|
||||
|
||||
writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setMergePolicy(newLogMergePolicy(3))
|
||||
);
|
||||
assertEquals(49, writer.numDocs());
|
||||
writer.expungeDeletes();
|
||||
writer.close();
|
||||
ir = IndexReader.open(dir, true);
|
||||
assertEquals(49, ir.maxDoc());
|
||||
assertEquals(49, ir.numDocs());
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-325: test expungeDeletes without waiting, when
|
||||
// many adjacent merges are required
|
||||
public void testExpungeDeletes3() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setMaxBufferedDocs(2).
|
||||
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
|
||||
setMergePolicy(newLogMergePolicy(50))
|
||||
);
|
||||
|
||||
Document document = new Document();
|
||||
|
||||
document = new Document();
|
||||
Field storedField = newField("stored", "stored", Field.Store.YES,
|
||||
Field.Index.NO);
|
||||
document.add(storedField);
|
||||
Field termVectorField = newField("termVector", "termVector",
|
||||
Field.Store.NO, Field.Index.NOT_ANALYZED,
|
||||
Field.TermVector.WITH_POSITIONS_OFFSETS);
|
||||
document.add(termVectorField);
|
||||
for(int i=0;i<98;i++)
|
||||
writer.addDocument(document);
|
||||
writer.close();
|
||||
|
||||
IndexReader ir = IndexReader.open(dir, false);
|
||||
assertEquals(98, ir.maxDoc());
|
||||
assertEquals(98, ir.numDocs());
|
||||
for(int i=0;i<98;i+=2)
|
||||
ir.deleteDocument(i);
|
||||
assertEquals(49, ir.numDocs());
|
||||
ir.close();
|
||||
|
||||
writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setMergePolicy(newLogMergePolicy(3))
|
||||
);
|
||||
writer.expungeDeletes(false);
|
||||
writer.close();
|
||||
ir = IndexReader.open(dir, true);
|
||||
assertEquals(49, ir.maxDoc());
|
||||
assertEquals(49, ir.numDocs());
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// Just intercepts all merges & verifies that we are never
|
||||
// merging a segment with >= 20 (maxMergeDocs) docs
|
||||
private class MyMergeScheduler extends MergeScheduler {
|
||||
@Override
|
||||
synchronized public void merge(IndexWriter writer)
|
||||
throws CorruptIndexException, IOException {
|
||||
|
||||
while(true) {
|
||||
MergePolicy.OneMerge merge = writer.getNextMerge();
|
||||
if (merge == null) {
|
||||
break;
|
||||
}
|
||||
for(int i=0;i<merge.segments.size();i++) {
|
||||
assert merge.segments.get(i).docCount < 20;
|
||||
}
|
||||
writer.merge(merge);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {}
|
||||
}
|
||||
|
||||
// LUCENE-1013
|
||||
public void testSetMaxMergeDocs() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy());
|
||||
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
|
||||
lmp.setMaxMergeDocs(20);
|
||||
lmp.setMergeFactor(2);
|
||||
IndexWriter iw = new IndexWriter(dir, conf);
|
||||
iw.setInfoStream(VERBOSE ? System.out : null);
|
||||
Document document = new Document();
|
||||
document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
|
||||
Field.TermVector.YES));
|
||||
for(int i=0;i<177;i++)
|
||||
iw.addDocument(document);
|
||||
iw.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -114,7 +114,6 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
|
|||
|
||||
// Make sure reader can open the index:
|
||||
IndexReader.open(dir, true).close();
|
||||
_TestUtil.checkIndex(dir);
|
||||
}
|
||||
|
||||
dir.close();
|
||||
|
@ -491,7 +490,6 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
|
|||
w.addDocument(doc);
|
||||
w.close();
|
||||
|
||||
_TestUtil.checkIndex(dir);
|
||||
dir.close();
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.util.ArrayList;
|
|||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
|
@ -63,7 +64,7 @@ public class TestIndexWriterOnJRECrash extends TestNRTThreads {
|
|||
}
|
||||
} else {
|
||||
// we are the fork, setup a crashing thread
|
||||
final int crashTime = _TestUtil.nextInt(random, 500, 4000);
|
||||
final int crashTime = TEST_NIGHTLY ? _TestUtil.nextInt(random, 500, 4000) : _TestUtil.nextInt(random, 300, 1000);
|
||||
Thread t = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
|
@ -123,7 +124,8 @@ public class TestIndexWriterOnJRECrash extends TestNRTThreads {
|
|||
*/
|
||||
public boolean checkIndexes(File file) throws IOException {
|
||||
if (file.isDirectory()) {
|
||||
Directory dir = newFSDirectory(file);
|
||||
MockDirectoryWrapper dir = newFSDirectory(file);
|
||||
dir.setCheckIndexOnClose(false); // don't double-checkindex
|
||||
if (IndexReader.indexExists(dir)) {
|
||||
if (VERBOSE) {
|
||||
System.err.println("Checking index: " + file);
|
||||
|
|
|
@ -0,0 +1,215 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.Field.Index;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import org.apache.lucene.document.Field.TermVector;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
public class TestIndexWriterOptimize extends LuceneTestCase {
|
||||
public void testOptimizeMaxNumSegments() throws IOException {
|
||||
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
|
||||
final Document doc = new Document();
|
||||
doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
|
||||
final int incrMin = TEST_NIGHTLY ? 15 : 40;
|
||||
for(int numDocs=10;numDocs<500;numDocs += _TestUtil.nextInt(random, incrMin, 5*incrMin)) {
|
||||
LogDocMergePolicy ldmp = new LogDocMergePolicy();
|
||||
ldmp.setMinMergeDocs(1);
|
||||
ldmp.setMergeFactor(5);
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
|
||||
ldmp));
|
||||
for(int j=0;j<numDocs;j++)
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
|
||||
SegmentInfos sis = new SegmentInfos();
|
||||
sis.read(dir);
|
||||
final int segCount = sis.size();
|
||||
|
||||
ldmp = new LogDocMergePolicy();
|
||||
ldmp.setMergeFactor(5);
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer(random)).setMergePolicy(ldmp));
|
||||
writer.optimize(3);
|
||||
writer.close();
|
||||
|
||||
sis = new SegmentInfos();
|
||||
sis.read(dir);
|
||||
final int optSegCount = sis.size();
|
||||
|
||||
if (segCount < 3)
|
||||
assertEquals(segCount, optSegCount);
|
||||
else
|
||||
assertEquals(3, optSegCount);
|
||||
}
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testOptimizeMaxNumSegments2() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
|
||||
final Document doc = new Document();
|
||||
doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
|
||||
|
||||
LogDocMergePolicy ldmp = new LogDocMergePolicy();
|
||||
ldmp.setMinMergeDocs(1);
|
||||
ldmp.setMergeFactor(4);
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setMaxBufferedDocs(2).setMergePolicy(ldmp).setMergeScheduler(new ConcurrentMergeScheduler()));
|
||||
|
||||
for(int iter=0;iter<10;iter++) {
|
||||
for(int i=0;i<19;i++)
|
||||
writer.addDocument(doc);
|
||||
|
||||
writer.commit();
|
||||
writer.waitForMerges();
|
||||
writer.commit();
|
||||
|
||||
SegmentInfos sis = new SegmentInfos();
|
||||
sis.read(dir);
|
||||
|
||||
final int segCount = sis.size();
|
||||
|
||||
writer.optimize(7);
|
||||
writer.commit();
|
||||
writer.waitForMerges();
|
||||
|
||||
sis = new SegmentInfos();
|
||||
sis.read(dir);
|
||||
final int optSegCount = sis.size();
|
||||
|
||||
if (segCount < 7)
|
||||
assertEquals(segCount, optSegCount);
|
||||
else
|
||||
assertEquals(7, optSegCount);
|
||||
}
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Make sure optimize doesn't use any more than 1X
|
||||
* starting index size as its temporary free space
|
||||
* required.
|
||||
*/
|
||||
public void testOptimizeTempSpaceUsage() throws IOException {
|
||||
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10).setMergePolicy(newLogMergePolicy()));
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: config1=" + writer.getConfig());
|
||||
}
|
||||
|
||||
for(int j=0;j<500;j++) {
|
||||
TestIndexWriter.addDocWithIndex(writer, j);
|
||||
}
|
||||
final int termIndexInterval = writer.getConfig().getTermIndexInterval();
|
||||
// force one extra segment w/ different doc store so
|
||||
// we see the doc stores get merged
|
||||
writer.commit();
|
||||
TestIndexWriter.addDocWithIndex(writer, 500);
|
||||
writer.close();
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: start disk usage");
|
||||
}
|
||||
long startDiskUsage = 0;
|
||||
String[] files = dir.listAll();
|
||||
for(int i=0;i<files.length;i++) {
|
||||
startDiskUsage += dir.fileLength(files[i]);
|
||||
if (VERBOSE) {
|
||||
System.out.println(files[i] + ": " + dir.fileLength(files[i]));
|
||||
}
|
||||
}
|
||||
|
||||
dir.resetMaxUsedSizeInBytes();
|
||||
dir.setTrackDiskUsage(true);
|
||||
|
||||
// Import to use same term index interval else a
|
||||
// smaller one here could increase the disk usage and
|
||||
// cause a false failure:
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval).setMergePolicy(newLogMergePolicy()));
|
||||
writer.setInfoStream(VERBOSE ? System.out : null);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
|
||||
assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
|
||||
maxDiskUsage <= 4*startDiskUsage);
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// Test calling optimize(false) whereby optimize is kicked
|
||||
// off but we don't wait for it to finish (but
|
||||
// writer.close()) does wait
|
||||
public void testBackgroundOptimize() throws IOException {
|
||||
|
||||
Directory dir = newDirectory();
|
||||
for(int pass=0;pass<2;pass++) {
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setOpenMode(OpenMode.CREATE).
|
||||
setMaxBufferedDocs(2).
|
||||
setMergePolicy(newLogMergePolicy(51))
|
||||
);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("field", "aaa", Store.NO, Index.NOT_ANALYZED));
|
||||
for(int i=0;i<100;i++)
|
||||
writer.addDocument(doc);
|
||||
writer.optimize(false);
|
||||
|
||||
if (0 == pass) {
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
assertTrue(reader.isOptimized());
|
||||
reader.close();
|
||||
} else {
|
||||
// Get another segment to flush so we can verify it is
|
||||
// NOT included in the optimization
|
||||
writer.addDocument(doc);
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
assertTrue(!reader.isOptimized());
|
||||
reader.close();
|
||||
|
||||
SegmentInfos infos = new SegmentInfos();
|
||||
infos.read(dir);
|
||||
assertEquals(2, infos.size());
|
||||
}
|
||||
}
|
||||
|
||||
dir.close();
|
||||
}
|
||||
}
|
|
@ -658,7 +658,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);
|
||||
|
||||
int num = 100 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(100);
|
||||
for (int i = 0; i < num; i++) {
|
||||
writer.addDocument(createDocument(i, "test", 4));
|
||||
}
|
||||
|
@ -821,7 +821,6 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
|
||||
writer.close();
|
||||
|
||||
_TestUtil.checkIndex(dir1);
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
|
@ -908,7 +907,6 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
assertEquals(0, excs.size());
|
||||
writer.close();
|
||||
|
||||
_TestUtil.checkIndex(dir1);
|
||||
r.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,337 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CharsRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.UnicodeUtil;
|
||||
|
||||
public class TestIndexWriterUnicode extends LuceneTestCase {
|
||||
|
||||
final String[] utf8Data = new String[] {
|
||||
// unpaired low surrogate
|
||||
"ab\udc17cd", "ab\ufffdcd",
|
||||
"\udc17abcd", "\ufffdabcd",
|
||||
"\udc17", "\ufffd",
|
||||
"ab\udc17\udc17cd", "ab\ufffd\ufffdcd",
|
||||
"\udc17\udc17abcd", "\ufffd\ufffdabcd",
|
||||
"\udc17\udc17", "\ufffd\ufffd",
|
||||
|
||||
// unpaired high surrogate
|
||||
"ab\ud917cd", "ab\ufffdcd",
|
||||
"\ud917abcd", "\ufffdabcd",
|
||||
"\ud917", "\ufffd",
|
||||
"ab\ud917\ud917cd", "ab\ufffd\ufffdcd",
|
||||
"\ud917\ud917abcd", "\ufffd\ufffdabcd",
|
||||
"\ud917\ud917", "\ufffd\ufffd",
|
||||
|
||||
// backwards surrogates
|
||||
"ab\udc17\ud917cd", "ab\ufffd\ufffdcd",
|
||||
"\udc17\ud917abcd", "\ufffd\ufffdabcd",
|
||||
"\udc17\ud917", "\ufffd\ufffd",
|
||||
"ab\udc17\ud917\udc17\ud917cd", "ab\ufffd\ud917\udc17\ufffdcd",
|
||||
"\udc17\ud917\udc17\ud917abcd", "\ufffd\ud917\udc17\ufffdabcd",
|
||||
"\udc17\ud917\udc17\ud917", "\ufffd\ud917\udc17\ufffd"
|
||||
};
|
||||
|
||||
private int nextInt(int lim) {
|
||||
return random.nextInt(lim);
|
||||
}
|
||||
|
||||
private int nextInt(int start, int end) {
|
||||
return start + nextInt(end-start);
|
||||
}
|
||||
|
||||
private boolean fillUnicode(char[] buffer, char[] expected, int offset, int count) {
|
||||
final int len = offset + count;
|
||||
boolean hasIllegal = false;
|
||||
|
||||
if (offset > 0 && buffer[offset] >= 0xdc00 && buffer[offset] < 0xe000)
|
||||
// Don't start in the middle of a valid surrogate pair
|
||||
offset--;
|
||||
|
||||
for(int i=offset;i<len;i++) {
|
||||
int t = nextInt(6);
|
||||
if (0 == t && i < len-1) {
|
||||
// Make a surrogate pair
|
||||
// High surrogate
|
||||
expected[i] = buffer[i++] = (char) nextInt(0xd800, 0xdc00);
|
||||
// Low surrogate
|
||||
expected[i] = buffer[i] = (char) nextInt(0xdc00, 0xe000);
|
||||
} else if (t <= 1)
|
||||
expected[i] = buffer[i] = (char) nextInt(0x80);
|
||||
else if (2 == t)
|
||||
expected[i] = buffer[i] = (char) nextInt(0x80, 0x800);
|
||||
else if (3 == t)
|
||||
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
|
||||
else if (4 == t)
|
||||
expected[i] = buffer[i] = (char) nextInt(0xe000, 0xffff);
|
||||
else if (5 == t && i < len-1) {
|
||||
// Illegal unpaired surrogate
|
||||
if (nextInt(10) == 7) {
|
||||
if (random.nextBoolean())
|
||||
buffer[i] = (char) nextInt(0xd800, 0xdc00);
|
||||
else
|
||||
buffer[i] = (char) nextInt(0xdc00, 0xe000);
|
||||
expected[i++] = 0xfffd;
|
||||
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
|
||||
hasIllegal = true;
|
||||
} else
|
||||
expected[i] = buffer[i] = (char) nextInt(0x800, 0xd800);
|
||||
} else {
|
||||
expected[i] = buffer[i] = ' ';
|
||||
}
|
||||
}
|
||||
|
||||
return hasIllegal;
|
||||
}
|
||||
|
||||
// both start & end are inclusive
|
||||
private final int getInt(Random r, int start, int end) {
|
||||
return start + r.nextInt(1+end-start);
|
||||
}
|
||||
|
||||
private final String asUnicodeChar(char c) {
|
||||
return "U+" + Integer.toHexString(c);
|
||||
}
|
||||
|
||||
private final String termDesc(String s) {
|
||||
final String s0;
|
||||
assertTrue(s.length() <= 2);
|
||||
if (s.length() == 1) {
|
||||
s0 = asUnicodeChar(s.charAt(0));
|
||||
} else {
|
||||
s0 = asUnicodeChar(s.charAt(0)) + "," + asUnicodeChar(s.charAt(1));
|
||||
}
|
||||
return s0;
|
||||
}
|
||||
|
||||
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
|
||||
TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
|
||||
|
||||
BytesRef last = new BytesRef();
|
||||
|
||||
Set<String> seenTerms = new HashSet<String>();
|
||||
|
||||
while(true) {
|
||||
final BytesRef term = terms.next();
|
||||
if (term == null) {
|
||||
break;
|
||||
}
|
||||
|
||||
assertTrue(last.compareTo(term) < 0);
|
||||
last.copy(term);
|
||||
|
||||
final String s = term.utf8ToString();
|
||||
assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s));
|
||||
seenTerms.add(s);
|
||||
}
|
||||
|
||||
if (isTop) {
|
||||
assertTrue(allTerms.equals(seenTerms));
|
||||
}
|
||||
|
||||
// Test seeking:
|
||||
Iterator<String> it = seenTerms.iterator();
|
||||
while(it.hasNext()) {
|
||||
BytesRef tr = new BytesRef(it.next());
|
||||
assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()),
|
||||
TermsEnum.SeekStatus.FOUND,
|
||||
terms.seek(tr));
|
||||
}
|
||||
}
|
||||
|
||||
// LUCENE-510
|
||||
public void testRandomUnicodeStrings() throws Throwable {
|
||||
char[] buffer = new char[20];
|
||||
char[] expected = new char[20];
|
||||
|
||||
BytesRef utf8 = new BytesRef(20);
|
||||
CharsRef utf16 = new CharsRef(20);
|
||||
|
||||
int num = atLeast(100000);
|
||||
for (int iter = 0; iter < num; iter++) {
|
||||
boolean hasIllegal = fillUnicode(buffer, expected, 0, 20);
|
||||
|
||||
UnicodeUtil.UTF16toUTF8(buffer, 0, 20, utf8);
|
||||
if (!hasIllegal) {
|
||||
byte[] b = new String(buffer, 0, 20).getBytes("UTF-8");
|
||||
assertEquals(b.length, utf8.length);
|
||||
for(int i=0;i<b.length;i++)
|
||||
assertEquals(b[i], utf8.bytes[i]);
|
||||
}
|
||||
|
||||
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
|
||||
assertEquals(utf16.length, 20);
|
||||
for(int i=0;i<20;i++)
|
||||
assertEquals(expected[i], utf16.chars[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// LUCENE-510
|
||||
public void testAllUnicodeChars() throws Throwable {
|
||||
|
||||
BytesRef utf8 = new BytesRef(10);
|
||||
CharsRef utf16 = new CharsRef(10);
|
||||
char[] chars = new char[2];
|
||||
for(int ch=0;ch<0x0010FFFF;ch++) {
|
||||
|
||||
if (ch == 0xd800)
|
||||
// Skip invalid code points
|
||||
ch = 0xe000;
|
||||
|
||||
int len = 0;
|
||||
if (ch <= 0xffff) {
|
||||
chars[len++] = (char) ch;
|
||||
} else {
|
||||
chars[len++] = (char) (((ch-0x0010000) >> 10) + UnicodeUtil.UNI_SUR_HIGH_START);
|
||||
chars[len++] = (char) (((ch-0x0010000) & 0x3FFL) + UnicodeUtil.UNI_SUR_LOW_START);
|
||||
}
|
||||
|
||||
UnicodeUtil.UTF16toUTF8(chars, 0, len, utf8);
|
||||
|
||||
String s1 = new String(chars, 0, len);
|
||||
String s2 = new String(utf8.bytes, 0, utf8.length, "UTF-8");
|
||||
assertEquals("codepoint " + ch, s1, s2);
|
||||
|
||||
UnicodeUtil.UTF8toUTF16(utf8.bytes, 0, utf8.length, utf16);
|
||||
assertEquals("codepoint " + ch, s1, new String(utf16.chars, 0, utf16.length));
|
||||
|
||||
byte[] b = s1.getBytes("UTF-8");
|
||||
assertEquals(utf8.length, b.length);
|
||||
for(int j=0;j<utf8.length;j++)
|
||||
assertEquals(utf8.bytes[j], b[j]);
|
||||
}
|
||||
}
|
||||
|
||||
public void testEmbeddedFFFF() throws Throwable {
|
||||
Directory d = newDirectory();
|
||||
IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
Document doc = new Document();
|
||||
doc.add(newField("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
|
||||
w.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(newField("field", "a", Field.Store.NO, Field.Index.ANALYZED));
|
||||
w.addDocument(doc);
|
||||
IndexReader r = w.getReader();
|
||||
assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
|
||||
r.close();
|
||||
w.close();
|
||||
d.close();
|
||||
}
|
||||
|
||||
// LUCENE-510
|
||||
public void testInvalidUTF16() throws Throwable {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new TestIndexWriter.StringSplitAnalyzer()));
|
||||
Document doc = new Document();
|
||||
|
||||
final int count = utf8Data.length/2;
|
||||
for(int i=0;i<count;i++)
|
||||
doc.add(newField("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
|
||||
w.addDocument(doc);
|
||||
w.close();
|
||||
|
||||
IndexReader ir = IndexReader.open(dir, true);
|
||||
Document doc2 = ir.document(0);
|
||||
for(int i=0;i<count;i++) {
|
||||
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
|
||||
assertEquals("field " + i + " is incorrect", utf8Data[2*i+1], doc2.getField("f"+i).stringValue());
|
||||
}
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// Make sure terms, including ones with surrogate pairs,
|
||||
// sort in codepoint sort order by default
|
||||
public void testTermUTF16SortOrder() throws Throwable {
|
||||
Random rnd = random;
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
|
||||
Document d = new Document();
|
||||
// Single segment
|
||||
Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
|
||||
d.add(f);
|
||||
char[] chars = new char[2];
|
||||
final Set<String> allTerms = new HashSet<String>();
|
||||
|
||||
int num = atLeast(200);
|
||||
for (int i = 0; i < num; i++) {
|
||||
|
||||
final String s;
|
||||
if (rnd.nextBoolean()) {
|
||||
// Single char
|
||||
if (rnd.nextBoolean()) {
|
||||
// Above surrogates
|
||||
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
|
||||
} else {
|
||||
// Below surrogates
|
||||
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
|
||||
}
|
||||
s = new String(chars, 0, 1);
|
||||
} else {
|
||||
// Surrogate pair
|
||||
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
|
||||
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
|
||||
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
|
||||
s = new String(chars, 0, 2);
|
||||
}
|
||||
allTerms.add(s);
|
||||
f.setValue(s);
|
||||
|
||||
writer.addDocument(d);
|
||||
|
||||
if ((1+i) % 42 == 0) {
|
||||
writer.commit();
|
||||
}
|
||||
}
|
||||
|
||||
IndexReader r = writer.getReader();
|
||||
|
||||
// Test each sub-segment
|
||||
final IndexReader[] subs = r.getSequentialSubReaders();
|
||||
for(int i=0;i<subs.length;i++) {
|
||||
checkTermsOrder(subs[i], allTerms, false);
|
||||
}
|
||||
checkTermsOrder(r, allTerms, true);
|
||||
|
||||
// Test multi segment
|
||||
r.close();
|
||||
|
||||
writer.optimize();
|
||||
|
||||
// Test optimized single segment
|
||||
r = writer.getReader();
|
||||
checkTermsOrder(r, allTerms, true);
|
||||
r.close();
|
||||
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
|
@ -25,6 +25,8 @@ import org.apache.lucene.analysis.MockAnalyzer;
|
|||
import org.apache.lucene.document.*;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
|
||||
/**
|
||||
|
@ -33,8 +35,8 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
*/
|
||||
public class TestLazyBug extends LuceneTestCase {
|
||||
|
||||
public static int NUM_DOCS = 500;
|
||||
public static int NUM_FIELDS = 100;
|
||||
public static int NUM_DOCS = TEST_NIGHTLY ? 500 : 50;
|
||||
public static int NUM_FIELDS = TEST_NIGHTLY ? 100 : 10;
|
||||
|
||||
private static String[] data = new String[] {
|
||||
"now",
|
||||
|
@ -49,6 +51,19 @@ public class TestLazyBug extends LuceneTestCase {
|
|||
private static Set<String> dataset = asSet(data);
|
||||
|
||||
private static String MAGIC_FIELD = "f"+(NUM_FIELDS/3);
|
||||
|
||||
private static Directory directory;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
directory = makeIndex();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
directory.close();
|
||||
directory = null;
|
||||
}
|
||||
|
||||
private static FieldSelector SELECTOR = new FieldSelector() {
|
||||
public FieldSelectorResult accept(String f) {
|
||||
|
@ -59,7 +74,7 @@ public class TestLazyBug extends LuceneTestCase {
|
|||
}
|
||||
};
|
||||
|
||||
private Directory makeIndex() throws Exception {
|
||||
private static Directory makeIndex() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
try {
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
|
@ -72,7 +87,7 @@ public class TestLazyBug extends LuceneTestCase {
|
|||
doc.add(newField("f"+f,
|
||||
data[f % data.length]
|
||||
+ '#' + data[random.nextInt(data.length)],
|
||||
Field.Store.YES,
|
||||
Field.Store.NO,
|
||||
Field.Index.ANALYZED));
|
||||
}
|
||||
writer.addDocument(doc);
|
||||
|
@ -85,8 +100,7 @@ public class TestLazyBug extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void doTest(int[] docs) throws Exception {
|
||||
Directory dir = makeIndex();
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(directory, true);
|
||||
for (int i = 0; i < docs.length; i++) {
|
||||
Document d = reader.document(docs[i], SELECTOR);
|
||||
d.get(MAGIC_FIELD);
|
||||
|
@ -109,19 +123,18 @@ public class TestLazyBug extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testLazyWorks() throws Exception {
|
||||
doTest(new int[] { 399 });
|
||||
doTest(new int[] { NUM_DOCS-1 });
|
||||
}
|
||||
|
||||
public void testLazyAlsoWorks() throws Exception {
|
||||
doTest(new int[] { 399, 150 });
|
||||
doTest(new int[] { NUM_DOCS-1, NUM_DOCS/2 });
|
||||
}
|
||||
|
||||
public void testLazyBroken() throws Exception {
|
||||
doTest(new int[] { 150, 399 });
|
||||
doTest(new int[] { NUM_DOCS/2, NUM_DOCS-1 });
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ public class TestLongPostings extends LuceneTestCase {
|
|||
// randomness (ie same seed will point to same dir):
|
||||
Directory dir = newFSDirectory(_TestUtil.getTempDir("longpostings" + "." + random.nextLong()));
|
||||
|
||||
final int NUM_DOCS = (int) ((TEST_NIGHTLY ? 4e6 : (RANDOM_MULTIPLIER*2e4)) * (1+random.nextDouble()));
|
||||
final int NUM_DOCS = atLeast(2000);
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: NUM_DOCS=" + NUM_DOCS);
|
||||
|
@ -145,7 +145,8 @@ public class TestLongPostings extends LuceneTestCase {
|
|||
assertTrue(r.docFreq(new Term("field", s1)) > 0);
|
||||
assertTrue(r.docFreq(new Term("field", s2)) > 0);
|
||||
|
||||
for(int iter=0;iter<1000*RANDOM_MULTIPLIER;iter++) {
|
||||
int num = atLeast(1000);
|
||||
for(int iter=0;iter<num;iter++) {
|
||||
|
||||
final String term;
|
||||
final boolean doS1;
|
||||
|
|
|
@ -27,7 +27,7 @@ public class TestMultiFields extends LuceneTestCase {
|
|||
|
||||
public void testRandom() throws Exception {
|
||||
|
||||
int num = 2 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(2);
|
||||
for (int iter = 0; iter < num; iter++) {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.lucene.search.Sort;
|
|||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -104,7 +103,8 @@ public class TestNRTThreads extends LuceneTestCase {
|
|||
|
||||
final LineFileDocs docs = new LineFileDocs(random);
|
||||
final File tempDir = _TestUtil.getTempDir("nrtopenfiles");
|
||||
final MockDirectoryWrapper dir = new MockDirectoryWrapper(random, FSDirectory.open(tempDir));
|
||||
final MockDirectoryWrapper dir = newFSDirectory(tempDir);
|
||||
dir.setCheckIndexOnClose(false); // don't double-checkIndex, we do it ourselves.
|
||||
final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
|
||||
|
||||
if (LuceneTestCase.TEST_NIGHTLY) {
|
||||
|
@ -157,7 +157,7 @@ public class TestNRTThreads extends LuceneTestCase {
|
|||
final int NUM_INDEX_THREADS = 2;
|
||||
final int NUM_SEARCH_THREADS = 3;
|
||||
|
||||
final int RUN_TIME_SEC = LuceneTestCase.TEST_NIGHTLY ? 300 : 5;
|
||||
final int RUN_TIME_SEC = LuceneTestCase.TEST_NIGHTLY ? 300 : RANDOM_MULTIPLIER;
|
||||
|
||||
final AtomicBoolean failed = new AtomicBoolean();
|
||||
final AtomicInteger addCount = new AtomicInteger();
|
||||
|
@ -328,11 +328,11 @@ public class TestNRTThreads extends LuceneTestCase {
|
|||
if (addedField != null) {
|
||||
doc.removeField(addedField);
|
||||
}
|
||||
} catch (Exception exc) {
|
||||
} catch (Throwable t) {
|
||||
System.out.println(Thread.currentThread().getName() + ": hit exc");
|
||||
exc.printStackTrace();
|
||||
t.printStackTrace();
|
||||
failed.set(true);
|
||||
throw new RuntimeException(exc);
|
||||
throw new RuntimeException(t);
|
||||
}
|
||||
}
|
||||
if (VERBOSE) {
|
||||
|
@ -448,6 +448,7 @@ public class TestNRTThreads extends LuceneTestCase {
|
|||
System.out.println(Thread.currentThread().getName() + ": search done");
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
System.out.println(Thread.currentThread().getName() + ": hit exc");
|
||||
failed.set(true);
|
||||
t.printStackTrace(System.out);
|
||||
throw new RuntimeException(t);
|
||||
|
|
|
@ -143,7 +143,8 @@ public class TestNorms extends LuceneTestCase {
|
|||
}
|
||||
|
||||
private void doTestNorms(Random random, Directory dir) throws IOException {
|
||||
for (int i=0; i<5; i++) {
|
||||
int num = atLeast(1);
|
||||
for (int i=0; i<num; i++) {
|
||||
addDocs(random, dir,12,true);
|
||||
verifyIndex(dir);
|
||||
modifyNormsForF1(dir);
|
||||
|
|
|
@ -64,7 +64,6 @@ public class TestOmitNorms extends LuceneTestCase {
|
|||
writer.optimize();
|
||||
// flush
|
||||
writer.close();
|
||||
_TestUtil.checkIndex(ram);
|
||||
|
||||
SegmentReader reader = getOnlySegmentReader(IndexReader.open(ram, false));
|
||||
FieldInfos fi = reader.fieldInfos();
|
||||
|
@ -121,8 +120,6 @@ public class TestOmitNorms extends LuceneTestCase {
|
|||
// flush
|
||||
writer.close();
|
||||
|
||||
_TestUtil.checkIndex(ram);
|
||||
|
||||
SegmentReader reader = getOnlySegmentReader(IndexReader.open(ram, false));
|
||||
FieldInfos fi = reader.fieldInfos();
|
||||
assertTrue("OmitNorms field bit should be set.", fi.fieldInfo("f1").omitNorms);
|
||||
|
@ -170,8 +167,6 @@ public class TestOmitNorms extends LuceneTestCase {
|
|||
// flush
|
||||
writer.close();
|
||||
|
||||
_TestUtil.checkIndex(ram);
|
||||
|
||||
SegmentReader reader = getOnlySegmentReader(IndexReader.open(ram, false));
|
||||
FieldInfos fi = reader.fieldInfos();
|
||||
assertTrue("OmitNorms field bit should not be set.", !fi.fieldInfo("f1").omitNorms);
|
||||
|
@ -218,7 +213,6 @@ public class TestOmitNorms extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
assertNoNrm(ram);
|
||||
_TestUtil.checkIndex(ram);
|
||||
ram.close();
|
||||
}
|
||||
|
||||
|
|
|
@ -97,7 +97,6 @@ public class TestOmitTf extends LuceneTestCase {
|
|||
writer.optimize();
|
||||
// flush
|
||||
writer.close();
|
||||
_TestUtil.checkIndex(ram);
|
||||
|
||||
SegmentReader reader = getOnlySegmentReader(IndexReader.open(ram, false));
|
||||
FieldInfos fi = reader.fieldInfos();
|
||||
|
@ -153,8 +152,6 @@ public class TestOmitTf extends LuceneTestCase {
|
|||
// flush
|
||||
writer.close();
|
||||
|
||||
_TestUtil.checkIndex(ram);
|
||||
|
||||
SegmentReader reader = getOnlySegmentReader(IndexReader.open(ram, false));
|
||||
FieldInfos fi = reader.fieldInfos();
|
||||
assertTrue("OmitTermFreqAndPositions field bit should be set.", fi.fieldInfo("f1").omitTermFreqAndPositions);
|
||||
|
@ -200,8 +197,6 @@ public class TestOmitTf extends LuceneTestCase {
|
|||
// flush
|
||||
writer.close();
|
||||
|
||||
_TestUtil.checkIndex(ram);
|
||||
|
||||
SegmentReader reader = getOnlySegmentReader(IndexReader.open(ram, false));
|
||||
FieldInfos fi = reader.fieldInfos();
|
||||
assertTrue("OmitTermFreqAndPositions field bit should not be set.", !fi.fieldInfo("f1").omitTermFreqAndPositions);
|
||||
|
@ -245,7 +240,6 @@ public class TestOmitTf extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
assertNoPrx(ram);
|
||||
_TestUtil.checkIndex(ram);
|
||||
ram.close();
|
||||
}
|
||||
|
||||
|
@ -282,7 +276,6 @@ public class TestOmitTf extends LuceneTestCase {
|
|||
writer.optimize();
|
||||
// flush
|
||||
writer.close();
|
||||
_TestUtil.checkIndex(dir);
|
||||
|
||||
/*
|
||||
* Verify the index
|
||||
|
|
|
@ -62,7 +62,6 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase {
|
|||
|
||||
iwOut.optimize();
|
||||
iwOut.close();
|
||||
_TestUtil.checkIndex(rdOut);
|
||||
rdOut.close();
|
||||
rd1.close();
|
||||
rd2.close();
|
||||
|
@ -122,7 +121,6 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase {
|
|||
iwOut.optimize();
|
||||
iwOut.close();
|
||||
|
||||
_TestUtil.checkIndex(rdOut);
|
||||
rdOut.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -474,7 +474,7 @@ public class TestPayloads extends LuceneTestCase {
|
|||
|
||||
public void testThreadSafety() throws Exception {
|
||||
final int numThreads = 5;
|
||||
final int numDocs = 50 * RANDOM_MULTIPLIER;
|
||||
final int numDocs = atLeast(50);
|
||||
final ByteArrayPool pool = new ByteArrayPool(numThreads, 5);
|
||||
|
||||
Directory dir = newDirectory();
|
||||
|
@ -615,8 +615,6 @@ public class TestPayloads extends LuceneTestCase {
|
|||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
_TestUtil.checkIndex(dir);
|
||||
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -274,7 +274,8 @@ public class TestPerFieldCodecSupport extends LuceneTestCase {
|
|||
Index[] indexValue = new Index[] { Index.ANALYZED, Index.ANALYZED_NO_NORMS,
|
||||
Index.NOT_ANALYZED, Index.NOT_ANALYZED_NO_NORMS };
|
||||
final int docsPerRound = 97;
|
||||
for (int i = 0; i < 5; i++) {
|
||||
int numRounds = atLeast(1);
|
||||
for (int i = 0; i < numRounds; i++) {
|
||||
CodecProvider provider = new CodecProvider();
|
||||
Codec[] codecs = new Codec[] { new StandardCodec(),
|
||||
new SimpleTextCodec(), new MockSepCodec(),
|
||||
|
@ -284,7 +285,8 @@ public class TestPerFieldCodecSupport extends LuceneTestCase {
|
|||
for (Codec codec : codecs) {
|
||||
provider.register(codec);
|
||||
}
|
||||
for (int j = 0; j < 30 * RANDOM_MULTIPLIER; j++) {
|
||||
int num = atLeast(30);
|
||||
for (int j = 0; j < num; j++) {
|
||||
provider.setFieldCodec("" + j, codecs[random.nextInt(codecs.length)].name);
|
||||
}
|
||||
IndexWriterConfig config = newIndexWriterConfig(random,
|
||||
|
@ -294,7 +296,8 @@ public class TestPerFieldCodecSupport extends LuceneTestCase {
|
|||
IndexWriter writer = newWriter(dir, config);
|
||||
for (int j = 0; j < docsPerRound; j++) {
|
||||
final Document doc = new Document();
|
||||
for (int k = 0; k < 30 * RANDOM_MULTIPLIER; k++) {
|
||||
num = atLeast(30);
|
||||
for (int k = 0; k < num; k++) {
|
||||
Field field = newField("" + k, _TestUtil
|
||||
.randomRealisticUnicodeString(random, 128), indexValue[random
|
||||
.nextInt(indexValue.length)]);
|
||||
|
@ -308,7 +311,6 @@ public class TestPerFieldCodecSupport extends LuceneTestCase {
|
|||
writer.commit();
|
||||
assertEquals((i + 1) * docsPerRound, writer.maxDoc());
|
||||
writer.close();
|
||||
_TestUtil.checkIndex(dir, provider);
|
||||
}
|
||||
dir.close();
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ public class TestRollingUpdates extends LuceneTestCase {
|
|||
final LineFileDocs docs = new LineFileDocs(random);
|
||||
|
||||
final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
final int SIZE = 200 * RANDOM_MULTIPLIER;
|
||||
final int SIZE = atLeast(20);
|
||||
int id = 0;
|
||||
IndexReader r = null;
|
||||
final int numUpdates = (int) (SIZE * (2+random.nextDouble()));
|
||||
|
@ -82,9 +82,8 @@ public class TestRollingUpdates extends LuceneTestCase {
|
|||
for (int r = 0; r < 3; r++) {
|
||||
final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
|
||||
final int SIZE = 200 * RANDOM_MULTIPLIER;
|
||||
final int numUpdates = (int) (SIZE * (2 + random.nextDouble()));
|
||||
int numThreads = 3 + random.nextInt(12);
|
||||
final int numUpdates = atLeast(20);
|
||||
int numThreads = _TestUtil.nextInt(random, 2, 6);
|
||||
IndexingThread[] threads = new IndexingThread[numThreads];
|
||||
for (int i = 0; i < numThreads; i++) {
|
||||
threads[i] = new IndexingThread(docs, w, numUpdates);
|
||||
|
@ -97,6 +96,7 @@ public class TestRollingUpdates extends LuceneTestCase {
|
|||
|
||||
w.close();
|
||||
}
|
||||
|
||||
IndexReader open = IndexReader.open(dir);
|
||||
assertEquals(1, open.numDocs());
|
||||
open.close();
|
||||
|
@ -123,9 +123,10 @@ public class TestRollingUpdates extends LuceneTestCase {
|
|||
Document doc = new Document();// docs.nextDoc();
|
||||
doc.add(newField("id", "test", Index.NOT_ANALYZED));
|
||||
writer.updateDocument(new Term("id", "test"), doc);
|
||||
if (random.nextInt(10) == 0) {
|
||||
if (open == null)
|
||||
if (random.nextInt(3) == 0) {
|
||||
if (open == null) {
|
||||
open = IndexReader.open(writer, true);
|
||||
}
|
||||
IndexReader reader = open.reopen();
|
||||
if (reader != open) {
|
||||
open.close();
|
||||
|
@ -134,11 +135,12 @@ public class TestRollingUpdates extends LuceneTestCase {
|
|||
assertEquals("iter: " + i + " numDocs: "+ open.numDocs() + " del: " + open.numDeletedDocs() + " max: " + open.maxDoc(), 1, open.numDocs());
|
||||
}
|
||||
}
|
||||
open.close();
|
||||
if (open != null) {
|
||||
open.close();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
fail(e.getMessage());
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
public class TestSameTokenSamePosition extends LuceneTestCase {
|
||||
|
||||
/**
|
||||
* Attempt to reproduce an assertion error that happens
|
||||
* only with the trunk version around April 2011.
|
||||
* @param args
|
||||
*/
|
||||
public void test() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter riw = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new BugReproAnalyzer()));
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("eng", "Six drunken" /*This shouldn't matter. */,
|
||||
Field.Store.YES, Field.Index.ANALYZED));
|
||||
riw.addDocument(doc);
|
||||
riw.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
||||
final class BugReproAnalyzer extends Analyzer{
|
||||
@Override
|
||||
public TokenStream tokenStream(String arg0, Reader arg1) {
|
||||
return new BugReproAnalyzerTokenizer();
|
||||
}
|
||||
}
|
||||
|
||||
final class BugReproAnalyzerTokenizer extends TokenStream {
|
||||
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
|
||||
private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
|
||||
private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
|
||||
int tokenCount = 4;
|
||||
int nextTokenIndex = 0;
|
||||
String terms[] = new String[]{"six", "six", "drunken", "drunken"};
|
||||
int starts[] = new int[]{0, 0, 4, 4};
|
||||
int ends[] = new int[]{3, 3, 11, 11};
|
||||
int incs[] = new int[]{1, 0, 1, 0};
|
||||
|
||||
@Override
|
||||
public boolean incrementToken() throws IOException {
|
||||
if (nextTokenIndex < tokenCount) {
|
||||
termAtt.setEmpty().append(terms[nextTokenIndex]);
|
||||
offsetAtt.setOffset(starts[nextTokenIndex], ends[nextTokenIndex]);
|
||||
posIncAtt.setPositionIncrement(incs[nextTokenIndex]);
|
||||
nextTokenIndex++;
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -40,7 +40,8 @@ public class TestStressAdvance extends LuceneTestCase {
|
|||
doc.add(f);
|
||||
final Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
|
||||
doc.add(idField);
|
||||
for(int id=0;id<5000*RANDOM_MULTIPLIER;id++) {
|
||||
int num = atLeast(5000);
|
||||
for(int id=0;id<num;id++) {
|
||||
if (random.nextInt(4) == 3) {
|
||||
f.setValue("a");
|
||||
aDocs.add(id);
|
||||
|
|
|
@ -27,7 +27,7 @@ public class TestStressIndexing extends LuceneTestCase {
|
|||
private static abstract class TimedThread extends Thread {
|
||||
volatile boolean failed;
|
||||
int count;
|
||||
private static int RUN_TIME_SEC = 1 * RANDOM_MULTIPLIER;
|
||||
private static int RUN_TIME_MSEC = atLeast(1000);
|
||||
private TimedThread[] allThreads;
|
||||
|
||||
abstract public void doWork() throws Throwable;
|
||||
|
@ -38,7 +38,7 @@ public class TestStressIndexing extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
public void run() {
|
||||
final long stopTime = System.currentTimeMillis() + 1000*RUN_TIME_SEC;
|
||||
final long stopTime = System.currentTimeMillis() + RUN_TIME_MSEC;
|
||||
|
||||
count = 0;
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
public void testMultiConfig() throws Throwable {
|
||||
// test lots of smaller different params together
|
||||
|
||||
int num = 3 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(3);
|
||||
for (int i = 0; i < num; i++) { // increase iterations for better testing
|
||||
if (VERBOSE) {
|
||||
System.out.println("\n\nTEST: top iter=" + i);
|
||||
|
|
|
@ -47,14 +47,13 @@ public class TestThreadedOptimize extends LuceneTestCase {
|
|||
failed = true;
|
||||
}
|
||||
|
||||
public void runTest(Random random, Directory directory, MergeScheduler merger) throws Exception {
|
||||
public void runTest(Random random, Directory directory) throws Exception {
|
||||
|
||||
IndexWriter writer = new IndexWriter(
|
||||
directory,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, ANALYZER).
|
||||
setOpenMode(OpenMode.CREATE).
|
||||
setMaxBufferedDocs(2).
|
||||
setMergeScheduler(merger).
|
||||
setMergePolicy(newLogMergePolicy())
|
||||
);
|
||||
|
||||
|
@ -65,8 +64,8 @@ public class TestThreadedOptimize extends LuceneTestCase {
|
|||
|
||||
for(int i=0;i<200;i++) {
|
||||
Document d = new Document();
|
||||
d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
d.add(newField("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED));
|
||||
d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
|
||||
d.add(newField("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED_NO_NORMS));
|
||||
writer.addDocument(d);
|
||||
}
|
||||
|
||||
|
@ -86,8 +85,8 @@ public class TestThreadedOptimize extends LuceneTestCase {
|
|||
writerFinal.optimize(false);
|
||||
for(int k=0;k<17*(1+iFinal);k++) {
|
||||
Document d = new Document();
|
||||
d.add(newField("id", iterFinal + "_" + iFinal + "_" + j + "_" + k, Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
d.add(newField("contents", English.intToEnglish(iFinal+k), Field.Store.NO, Field.Index.ANALYZED));
|
||||
d.add(newField("id", iterFinal + "_" + iFinal + "_" + j + "_" + k, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
|
||||
d.add(newField("contents", English.intToEnglish(iFinal+k), Field.Store.NO, Field.Index.ANALYZED_NO_NORMS));
|
||||
writerFinal.addDocument(d);
|
||||
}
|
||||
for(int k=0;k<9*(1+iFinal);k++)
|
||||
|
@ -135,8 +134,7 @@ public class TestThreadedOptimize extends LuceneTestCase {
|
|||
*/
|
||||
public void testThreadedOptimize() throws Exception {
|
||||
Directory directory = newDirectory();
|
||||
runTest(random, directory, new SerialMergeScheduler());
|
||||
runTest(random, directory, new ConcurrentMergeScheduler());
|
||||
runTest(random, directory);
|
||||
directory.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,7 +66,8 @@ public class TestTieredMergePolicy extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testPartialOptimize() throws Exception {
|
||||
for(int iter=0;iter<10*RANDOM_MULTIPLIER;iter++) {
|
||||
int num = atLeast(10);
|
||||
for(int iter=0;iter<num;iter++) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: iter=" + iter);
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ public class TestTransactions extends LuceneTestCase {
|
|||
|
||||
private static abstract class TimedThread extends Thread {
|
||||
volatile boolean failed;
|
||||
private static float RUN_TIME_SEC = 0.5f * RANDOM_MULTIPLIER;
|
||||
private static float RUN_TIME_MSEC = atLeast(500);
|
||||
private TimedThread[] allThreads;
|
||||
|
||||
abstract public void doWork() throws Throwable;
|
||||
|
@ -53,7 +53,7 @@ public class TestTransactions extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
public void run() {
|
||||
final long stopTime = System.currentTimeMillis() + (long) (1000*RUN_TIME_SEC);
|
||||
final long stopTime = System.currentTimeMillis() + (long) (RUN_TIME_MSEC);
|
||||
|
||||
try {
|
||||
do {
|
||||
|
|
|
@ -134,7 +134,7 @@ public class TestSurrogates extends LuceneTestCase {
|
|||
System.out.println("\nTEST: top now seek");
|
||||
}
|
||||
|
||||
int num = 100 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(100);
|
||||
for (int iter = 0; iter < num; iter++) {
|
||||
|
||||
// pick random field+term
|
||||
|
@ -197,7 +197,7 @@ public class TestSurrogates extends LuceneTestCase {
|
|||
}
|
||||
|
||||
{
|
||||
int num = 100 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(100);
|
||||
for (int iter = 0; iter < num; iter++) {
|
||||
|
||||
// seek to random spot
|
||||
|
@ -287,7 +287,7 @@ public class TestSurrogates extends LuceneTestCase {
|
|||
|
||||
for(int f=0;f<numField;f++) {
|
||||
String field = "f" + f;
|
||||
final int numTerms = 10000 * RANDOM_MULTIPLIER;
|
||||
final int numTerms = atLeast(1000);
|
||||
|
||||
final Set<String> uniqueTerms = new HashSet<String>();
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ public class BaseTestRangeFilter extends LuceneTestCase {
|
|||
static TestIndex unsignedIndexDir;
|
||||
|
||||
static int minId = 0;
|
||||
static int maxId = 10000;
|
||||
static int maxId = atLeast(500);
|
||||
|
||||
static final int intLength = Integer.toString(Integer.MAX_VALUE).length();
|
||||
|
||||
|
@ -115,9 +115,9 @@ public class BaseTestRangeFilter extends LuceneTestCase {
|
|||
/* build an index */
|
||||
|
||||
Document doc = new Document();
|
||||
Field idField = newField(random, "id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
|
||||
Field randField = newField(random, "rand", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
|
||||
Field bodyField = newField(random, "body", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
|
||||
Field idField = newField(random, "id", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
|
||||
Field randField = newField(random, "rand", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
|
||||
Field bodyField = newField(random, "body", "", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS);
|
||||
doc.add(idField);
|
||||
doc.add(randField);
|
||||
doc.add(bodyField);
|
||||
|
|
|
@ -232,7 +232,7 @@ public class TestBoolean2 extends LuceneTestCase {
|
|||
try {
|
||||
|
||||
// increase number of iterations for more complete testing
|
||||
int num = 50 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(10);
|
||||
for (int i=0; i<num; i++) {
|
||||
int level = random.nextInt(3);
|
||||
q1 = randBoolQuery(new Random(random.nextLong()), random.nextBoolean(), level, field, vals, null);
|
||||
|
|
|
@ -24,6 +24,8 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.text.DecimalFormat;
|
||||
import java.util.Random;
|
||||
|
@ -32,14 +34,12 @@ import java.util.Random;
|
|||
*/
|
||||
public class TestBooleanMinShouldMatch extends LuceneTestCase {
|
||||
|
||||
private Directory index;
|
||||
private IndexReader r;
|
||||
private IndexSearcher s;
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
private static Directory index;
|
||||
private static IndexReader r;
|
||||
private static IndexSearcher s;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
String[] data = new String [] {
|
||||
"A 1 2 3 4 5 6",
|
||||
"Z 4 5 6",
|
||||
|
@ -70,12 +70,14 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
|
|||
//System.out.println("Set up " + getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
s.close();
|
||||
s = null;
|
||||
r.close();
|
||||
r = null;
|
||||
index.close();
|
||||
super.tearDown();
|
||||
index = null;
|
||||
}
|
||||
|
||||
|
||||
|
@ -312,7 +314,7 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
|
|||
|
||||
|
||||
// increase number of iterations for more complete testing
|
||||
int num = 50 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(10);
|
||||
for (int i=0; i<num; i++) {
|
||||
int lev = random.nextInt(maxLev);
|
||||
final long seed = random.nextLong();
|
||||
|
|
|
@ -39,7 +39,7 @@ public class TestCustomSearcherSort extends LuceneTestCase {
|
|||
private IndexReader reader;
|
||||
private Query query = null;
|
||||
// reduced from 20000 to 2000 to speed up test...
|
||||
private final static int INDEX_SIZE = 2000 * RANDOM_MULTIPLIER;
|
||||
private final static int INDEX_SIZE = atLeast(2000);
|
||||
|
||||
/**
|
||||
* Create index and query for test cases.
|
||||
|
|
|
@ -33,7 +33,7 @@ import java.io.PrintStream;
|
|||
|
||||
public class TestFieldCache extends LuceneTestCase {
|
||||
protected IndexReader reader;
|
||||
private static final int NUM_DOCS = 1000 * RANDOM_MULTIPLIER;
|
||||
private static final int NUM_DOCS = atLeast(1000);
|
||||
private String[] unicodeStrings;
|
||||
private Directory directory;
|
||||
|
||||
|
@ -185,7 +185,8 @@ public class TestFieldCache extends LuceneTestCase {
|
|||
}
|
||||
|
||||
// seek the enum around (note this isn't a great test here)
|
||||
for (int i = 0; i < 100 * RANDOM_MULTIPLIER; i++) {
|
||||
int num = atLeast(100);
|
||||
for (int i = 0; i < num; i++) {
|
||||
int k = _TestUtil.nextInt(random, 1, nTerms-1);
|
||||
BytesRef val1 = termsIndex.lookup(k, val);
|
||||
assertEquals(TermsEnum.SeekStatus.FOUND, tenum.seek(val1));
|
||||
|
|
|
@ -57,18 +57,21 @@ public class TestFuzzyQuery2 extends LuceneTestCase {
|
|||
/** epsilon for score comparisons */
|
||||
static final float epsilon = 0.00001f;
|
||||
|
||||
static int[][] mappings = new int[][] {
|
||||
new int[] { 0x40, 0x41 },
|
||||
new int[] { 0x40, 0x0195 },
|
||||
new int[] { 0x40, 0x0906 },
|
||||
new int[] { 0x40, 0x1040F },
|
||||
new int[] { 0x0194, 0x0195 },
|
||||
new int[] { 0x0194, 0x0906 },
|
||||
new int[] { 0x0194, 0x1040F },
|
||||
new int[] { 0x0905, 0x0906 },
|
||||
new int[] { 0x0905, 0x1040F },
|
||||
new int[] { 0x1040E, 0x1040F }
|
||||
};
|
||||
public void testFromTestData() throws Exception {
|
||||
// TODO: randomize!
|
||||
assertFromTestData(new int[] { 0x40, 0x41 });
|
||||
assertFromTestData(new int[] { 0x40, 0x0195 });
|
||||
assertFromTestData(new int[] { 0x40, 0x0906 });
|
||||
assertFromTestData(new int[] { 0x40, 0x1040F });
|
||||
assertFromTestData(new int[] { 0x0194, 0x0195 });
|
||||
assertFromTestData(new int[] { 0x0194, 0x0906 });
|
||||
assertFromTestData(new int[] { 0x0194, 0x1040F });
|
||||
assertFromTestData(new int[] { 0x0905, 0x0906 });
|
||||
assertFromTestData(new int[] { 0x0905, 0x1040F });
|
||||
assertFromTestData(new int[] { 0x1040E, 0x1040F });
|
||||
assertFromTestData(mappings[random.nextInt(mappings.length)]);
|
||||
}
|
||||
|
||||
public void assertFromTestData(int codePointTable[]) throws Exception {
|
||||
|
|
|
@ -46,7 +46,7 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
|
|||
|
||||
DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.US));
|
||||
|
||||
int num = 5000 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(500);
|
||||
for (int l = 0; l < num; l++) {
|
||||
Document doc = new Document();
|
||||
for (int m=0, c=random.nextInt(10); m<=c; m++) {
|
||||
|
@ -60,7 +60,7 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
IndexSearcher searcher=newSearcher(reader);
|
||||
num = 50 * RANDOM_MULTIPLIER;
|
||||
num = atLeast(50);
|
||||
for (int i = 0; i < num; i++) {
|
||||
int lower=random.nextInt(Integer.MAX_VALUE);
|
||||
int upper=random.nextInt(Integer.MAX_VALUE);
|
||||
|
|
|
@ -44,7 +44,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
|
|||
// shift the starting of the values to the left, to also have negative values:
|
||||
private static final int startOffset = - 1 << 15;
|
||||
// number of docs to generate for testing
|
||||
private static final int noDocs = 10000 * RANDOM_MULTIPLIER;
|
||||
private static final int noDocs = atLeast(5000);
|
||||
|
||||
private static Directory directory = null;
|
||||
private static IndexReader reader = null;
|
||||
|
@ -336,7 +336,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
|
|||
private void testRandomTrieAndClassicRangeQuery(int precisionStep) throws Exception {
|
||||
String field="field"+precisionStep;
|
||||
int termCountT=0,termCountC=0;
|
||||
int num = 10 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(10);
|
||||
for (int i = 0; i < num; i++) {
|
||||
int lower=(int)(random.nextDouble()*noDocs*distance)+startOffset;
|
||||
int upper=(int)(random.nextDouble()*noDocs*distance)+startOffset;
|
||||
|
@ -414,7 +414,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
|
|||
private void testRangeSplit(int precisionStep) throws Exception {
|
||||
String field="ascfield"+precisionStep;
|
||||
// 10 random tests
|
||||
int num = 10 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(10);
|
||||
for (int i =0; i< num; i++) {
|
||||
int lower=(int)(random.nextDouble()*noDocs - noDocs/2);
|
||||
int upper=(int)(random.nextDouble()*noDocs - noDocs/2);
|
||||
|
@ -490,7 +490,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
|
|||
String field="field"+precisionStep;
|
||||
// 10 random tests, the index order is ascending,
|
||||
// so using a reverse sort field should retun descending documents
|
||||
int num = 10 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(10);
|
||||
for (int i = 0; i < num; i++) {
|
||||
int lower=(int)(random.nextDouble()*noDocs*distance)+startOffset;
|
||||
int upper=(int)(random.nextDouble()*noDocs*distance)+startOffset;
|
||||
|
|
|
@ -41,7 +41,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
|
|||
// shift the starting of the values to the left, to also have negative values:
|
||||
private static final long startOffset = - 1L << 31;
|
||||
// number of docs to generate for testing
|
||||
private static final int noDocs = 10000 * RANDOM_MULTIPLIER;
|
||||
private static final int noDocs = atLeast(5000);
|
||||
|
||||
private static Directory directory = null;
|
||||
private static IndexReader reader = null;
|
||||
|
@ -353,7 +353,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
|
|||
private void testRandomTrieAndClassicRangeQuery(int precisionStep) throws Exception {
|
||||
String field="field"+precisionStep;
|
||||
int termCountT=0,termCountC=0;
|
||||
int num = 10 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(10);
|
||||
for (int i = 0; i < num; i++) {
|
||||
long lower=(long)(random.nextDouble()*noDocs*distance)+startOffset;
|
||||
long upper=(long)(random.nextDouble()*noDocs*distance)+startOffset;
|
||||
|
@ -436,7 +436,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
|
|||
private void testRangeSplit(int precisionStep) throws Exception {
|
||||
String field="ascfield"+precisionStep;
|
||||
// 10 random tests
|
||||
int num = 10 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(10);
|
||||
for (int i = 0; i < num; i++) {
|
||||
long lower=(long)(random.nextDouble()*noDocs - noDocs/2);
|
||||
long upper=(long)(random.nextDouble()*noDocs - noDocs/2);
|
||||
|
@ -522,7 +522,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
|
|||
String field="field"+precisionStep;
|
||||
// 10 random tests, the index order is ascending,
|
||||
// so using a reverse sort field should retun descending documents
|
||||
int num = 10 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(10);
|
||||
for (int i = 0; i < num; i++) {
|
||||
long lower=(long)(random.nextDouble()*noDocs*distance)+startOffset;
|
||||
long upper=(long)(random.nextDouble()*noDocs*distance)+startOffset;
|
||||
|
|
|
@ -27,6 +27,8 @@ import org.apache.lucene.queryParser.QueryParser;
|
|||
import org.apache.lucene.store.*;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
|
@ -45,14 +47,13 @@ public class TestPhraseQuery extends LuceneTestCase {
|
|||
/** threshold for comparing floats */
|
||||
public static final float SCORE_COMP_THRESH = 1e-6f;
|
||||
|
||||
private IndexSearcher searcher;
|
||||
private IndexReader reader;
|
||||
private static IndexSearcher searcher;
|
||||
private static IndexReader reader;
|
||||
private PhraseQuery query;
|
||||
private Directory directory;
|
||||
private static Directory directory;
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
directory = newDirectory();
|
||||
Analyzer analyzer = new Analyzer() {
|
||||
@Override
|
||||
|
@ -87,15 +88,22 @@ public class TestPhraseQuery extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
searcher = newSearcher(reader);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
query = new PhraseQuery();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
searcher.close();
|
||||
searcher = null;
|
||||
reader.close();
|
||||
reader = null;
|
||||
directory.close();
|
||||
super.tearDown();
|
||||
directory = null;
|
||||
}
|
||||
|
||||
public void testNotCloseEnough() throws Exception {
|
||||
|
@ -606,10 +614,10 @@ public class TestPhraseQuery extends LuceneTestCase {
|
|||
|
||||
Random r = random;
|
||||
|
||||
int NUM_DOCS = 10 * RANDOM_MULTIPLIER;
|
||||
int NUM_DOCS = atLeast(10);
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
// must be > 4096 so it spans multiple chunks
|
||||
int termCount = _TestUtil.nextInt(r, 10000, 30000);
|
||||
int termCount = atLeast(5000);
|
||||
|
||||
List<String> doc = new ArrayList<String>();
|
||||
|
||||
|
@ -656,7 +664,7 @@ public class TestPhraseQuery extends LuceneTestCase {
|
|||
w.close();
|
||||
|
||||
// now search
|
||||
int num = 100 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(10);
|
||||
for(int i=0;i<num;i++) {
|
||||
int docID = r.nextInt(docs.size());
|
||||
List<String> doc = docs.get(docID);
|
||||
|
|
|
@ -24,6 +24,8 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
/**
|
||||
* https://issues.apache.org/jira/browse/LUCENE-1974
|
||||
|
@ -38,53 +40,48 @@ import org.apache.lucene.store.Directory;
|
|||
public class TestPrefixInBooleanQuery extends LuceneTestCase {
|
||||
|
||||
private static final String FIELD = "name";
|
||||
private Directory directory;
|
||||
private IndexReader reader;
|
||||
private IndexSearcher searcher;
|
||||
private static Directory directory;
|
||||
private static IndexReader reader;
|
||||
private static IndexSearcher searcher;
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
directory = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random, directory);
|
||||
|
||||
Document doc = new Document();
|
||||
Field field = newField(FIELD, "meaninglessnames", Field.Store.NO,
|
||||
Field.Index.NOT_ANALYZED_NO_NORMS);
|
||||
doc.add(field);
|
||||
|
||||
for (int i = 0; i < 5137; ++i) {
|
||||
Document doc = new Document();
|
||||
doc.add(newField(FIELD, "meaninglessnames", Field.Store.YES,
|
||||
Field.Index.NOT_ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
{
|
||||
Document doc = new Document();
|
||||
doc.add(newField(FIELD, "tangfulin", Field.Store.YES,
|
||||
Field.Index.NOT_ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
field.setValue("tangfulin");
|
||||
writer.addDocument(doc);
|
||||
|
||||
field.setValue("meaninglessnames");
|
||||
for (int i = 5138; i < 11377; ++i) {
|
||||
Document doc = new Document();
|
||||
doc.add(newField(FIELD, "meaninglessnames", Field.Store.YES,
|
||||
Field.Index.NOT_ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
{
|
||||
Document doc = new Document();
|
||||
doc.add(newField(FIELD, "tangfulin", Field.Store.YES,
|
||||
Field.Index.NOT_ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
field.setValue("tangfulin");
|
||||
writer.addDocument(doc);
|
||||
|
||||
reader = writer.getReader();
|
||||
searcher = newSearcher(reader);
|
||||
writer.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
searcher.close();
|
||||
searcher = null;
|
||||
reader.close();
|
||||
reader = null;
|
||||
directory.close();
|
||||
super.tearDown();
|
||||
directory = null;
|
||||
}
|
||||
|
||||
public void testPrefixQuery() throws Exception {
|
||||
|
|
|
@ -59,7 +59,7 @@ public class TestPrefixRandom extends LuceneTestCase {
|
|||
// we generate aweful prefixes: good for testing.
|
||||
// but for preflex codec, the test can be very slow, so use less iterations.
|
||||
final String codec = CodecProvider.getDefault().getFieldCodec("field");
|
||||
int num = codec.equals("PreFlex") ? 200 * RANDOM_MULTIPLIER : 2000 * RANDOM_MULTIPLIER;
|
||||
int num = codec.equals("PreFlex") ? 200 * RANDOM_MULTIPLIER : atLeast(2000);
|
||||
for (int i = 0; i < num; i++) {
|
||||
field.setValue(_TestUtil.randomUnicodeString(random, 10));
|
||||
writer.addDocument(doc);
|
||||
|
@ -114,7 +114,7 @@ public class TestPrefixRandom extends LuceneTestCase {
|
|||
|
||||
/** test a bunch of random prefixes */
|
||||
public void testPrefixes() throws Exception {
|
||||
int num = 1000 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(1000);
|
||||
for (int i = 0; i < num; i++)
|
||||
assertSame(_TestUtil.randomUnicodeString(random, 5));
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
/**
|
||||
* Create an index with terms from 0000-9999.
|
||||
* Create an index with terms from 000-999.
|
||||
* Generates random regexps according to simple patterns,
|
||||
* and validates the correct number of hits are returned.
|
||||
*/
|
||||
|
@ -51,11 +51,11 @@ public class TestRegexpRandom extends LuceneTestCase {
|
|||
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
|
||||
|
||||
Document doc = new Document();
|
||||
Field field = newField("field", "", Field.Store.NO, Field.Index.ANALYZED);
|
||||
Field field = newField("field", "", Field.Store.NO, Field.Index.ANALYZED_NO_NORMS);
|
||||
doc.add(field);
|
||||
|
||||
NumberFormat df = new DecimalFormat("0000", new DecimalFormatSymbols(Locale.ENGLISH));
|
||||
for (int i = 0; i < 10000; i++) {
|
||||
NumberFormat df = new DecimalFormat("000", new DecimalFormatSymbols(Locale.ENGLISH));
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
field.setValue(df.format(i));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
@ -98,56 +98,42 @@ public class TestRegexpRandom extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testRegexps() throws Exception {
|
||||
int num = 100 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(1);
|
||||
for (int i = 0; i < num; i++) {
|
||||
assertPatternHits("NNNN", 1);
|
||||
assertPatternHits(".NNN", 10);
|
||||
assertPatternHits("N.NN", 10);
|
||||
assertPatternHits("NN.N", 10);
|
||||
assertPatternHits("NNN.", 10);
|
||||
assertPatternHits("NNN", 1);
|
||||
assertPatternHits(".NN", 10);
|
||||
assertPatternHits("N.N", 10);
|
||||
assertPatternHits("NN.", 10);
|
||||
}
|
||||
|
||||
num = 10 * RANDOM_MULTIPLIER;
|
||||
for (int i = 0; i < num; i++) {
|
||||
assertPatternHits(".{1,2}NN", 100);
|
||||
assertPatternHits("N.{1,2}N", 100);
|
||||
assertPatternHits("NN.{1,2}", 100);
|
||||
assertPatternHits(".{1,3}N", 1000);
|
||||
assertPatternHits("N.{1,3}", 1000);
|
||||
assertPatternHits(".{1,4}", 10000);
|
||||
assertPatternHits(".{1,2}N", 100);
|
||||
assertPatternHits("N.{1,2}", 100);
|
||||
assertPatternHits(".{1,3}", 1000);
|
||||
|
||||
assertPatternHits("NNN[3-7]", 5);
|
||||
assertPatternHits("NN[2-6][3-7]", 25);
|
||||
assertPatternHits("N[1-5][2-6][3-7]", 125);
|
||||
assertPatternHits("[0-4][3-7][4-8][5-9]", 625);
|
||||
assertPatternHits("[3-7][2-6][0-4]N", 125);
|
||||
assertPatternHits("[2-6][3-7]NN", 25);
|
||||
assertPatternHits("[3-7]NNN", 5);
|
||||
assertPatternHits("NN[3-7]", 5);
|
||||
assertPatternHits("N[2-6][3-7]", 25);
|
||||
assertPatternHits("[1-5][2-6][3-7]", 125);
|
||||
assertPatternHits("[0-4][3-7][4-8]", 125);
|
||||
assertPatternHits("[2-6][0-4]N", 25);
|
||||
assertPatternHits("[2-6]NN", 5);
|
||||
|
||||
assertPatternHits("NNN.*", 10);
|
||||
assertPatternHits("NN.*", 100);
|
||||
assertPatternHits("N.*", 1000);
|
||||
assertPatternHits(".*", 10000);
|
||||
assertPatternHits("NN.*", 10);
|
||||
assertPatternHits("N.*", 100);
|
||||
assertPatternHits(".*", 1000);
|
||||
|
||||
assertPatternHits(".*NNN", 10);
|
||||
assertPatternHits(".*NN", 100);
|
||||
assertPatternHits(".*N", 1000);
|
||||
assertPatternHits(".*NN", 10);
|
||||
assertPatternHits(".*N", 100);
|
||||
|
||||
assertPatternHits("N.*NN", 10);
|
||||
assertPatternHits("NN.*N", 10);
|
||||
assertPatternHits("N.*N", 10);
|
||||
|
||||
// combo of ? and * operators
|
||||
assertPatternHits(".NN.*", 100);
|
||||
assertPatternHits("N.N.*", 100);
|
||||
assertPatternHits("NN..*", 100);
|
||||
assertPatternHits(".N..*", 1000);
|
||||
assertPatternHits("N...*", 1000);
|
||||
assertPatternHits(".N.*", 100);
|
||||
assertPatternHits("N..*", 100);
|
||||
|
||||
assertPatternHits(".*NN.", 100);
|
||||
assertPatternHits(".*N..", 1000);
|
||||
assertPatternHits(".*...", 10000);
|
||||
assertPatternHits(".*.N.", 1000);
|
||||
assertPatternHits(".*..N", 1000);
|
||||
assertPatternHits(".*N.", 100);
|
||||
assertPatternHits(".*..", 1000);
|
||||
assertPatternHits(".*.N", 100);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ public class TestRegexpRandom2 extends LuceneTestCase {
|
|||
Field field = newField("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
|
||||
doc.add(field);
|
||||
List<String> terms = new ArrayList<String>();
|
||||
int num = 2000 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(200);
|
||||
for (int i = 0; i < num; i++) {
|
||||
String s = _TestUtil.randomUnicodeString(random);
|
||||
field.setValue(s);
|
||||
|
@ -140,7 +140,7 @@ public class TestRegexpRandom2 extends LuceneTestCase {
|
|||
public void testRegexps() throws Exception {
|
||||
// we generate aweful regexps: good for testing.
|
||||
// but for preflex codec, the test can be very slow, so use less iterations.
|
||||
int num = CodecProvider.getDefault().getFieldCodec("field").equals("PreFlex") ? 100 * RANDOM_MULTIPLIER : 1000 * RANDOM_MULTIPLIER;
|
||||
int num = CodecProvider.getDefault().getFieldCodec("field").equals("PreFlex") ? 100 * RANDOM_MULTIPLIER : atLeast(1000);
|
||||
for (int i = 0; i < num; i++) {
|
||||
String reg = AutomatonTestUtil.randomRegexp(random);
|
||||
assertSame(reg);
|
||||
|
|
|
@ -314,9 +314,9 @@ public class TestScorerPerf extends LuceneTestCase {
|
|||
// test many small sets... the bugs will be found on boundary conditions
|
||||
createDummySearcher();
|
||||
validate=true;
|
||||
sets=randBitSets(1000 * RANDOM_MULTIPLIER, 10 * RANDOM_MULTIPLIER);
|
||||
doConjunctions(10000 * RANDOM_MULTIPLIER, 5 * RANDOM_MULTIPLIER);
|
||||
doNestedConjunctions(10000 * RANDOM_MULTIPLIER, 3 * RANDOM_MULTIPLIER, 3 * RANDOM_MULTIPLIER);
|
||||
sets=randBitSets(atLeast(1000), atLeast(10));
|
||||
doConjunctions(atLeast(10000), atLeast(5));
|
||||
doNestedConjunctions(atLeast(10000), atLeast(3), atLeast(3));
|
||||
s.close();
|
||||
d.close();
|
||||
}
|
||||
|
|
|
@ -31,9 +31,9 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
|
||||
public class TestSearchWithThreads extends LuceneTestCase {
|
||||
|
||||
final int NUM_DOCS = 10000;
|
||||
final int NUM_DOCS = atLeast(10000);
|
||||
final int NUM_SEARCH_THREADS = 5;
|
||||
final int RUN_TIME_MSEC = 1000 * RANDOM_MULTIPLIER;
|
||||
final int RUN_TIME_MSEC = atLeast(1000);
|
||||
|
||||
public void test() throws Exception {
|
||||
final Directory dir = newDirectory();
|
||||
|
@ -47,7 +47,7 @@ public class TestSearchWithThreads extends LuceneTestCase {
|
|||
final Field body = newField("body", "", Field.Index.ANALYZED);
|
||||
doc.add(body);
|
||||
final StringBuilder sb = new StringBuilder();
|
||||
for(int docCount=0;docCount<NUM_DOCS*RANDOM_MULTIPLIER;docCount++) {
|
||||
for(int docCount=0;docCount<NUM_DOCS;docCount++) {
|
||||
final int numTerms = random.nextInt(10);
|
||||
for(int termCount=0;termCount<numTerms;termCount++) {
|
||||
sb.append(random.nextBoolean() ? "aaa" : "bbb");
|
||||
|
|
|
@ -66,7 +66,7 @@ import org.apache.lucene.util._TestUtil;
|
|||
public class TestSort extends LuceneTestCase {
|
||||
// true if our codec supports docvalues: true unless codec is preflex (3.x)
|
||||
boolean supportsDocValues = CodecProvider.getDefault().getDefaultFieldCodec().equals("PreFlex") == false;
|
||||
private static final int NUM_STRINGS = 6000 * RANDOM_MULTIPLIER;
|
||||
private static final int NUM_STRINGS = atLeast(6000);
|
||||
private IndexSearcher full;
|
||||
private IndexSearcher searchX;
|
||||
private IndexSearcher searchY;
|
||||
|
|
|
@ -43,7 +43,8 @@ public class TestSubScorerFreqs extends LuceneTestCase {
|
|||
RandomIndexWriter w = new RandomIndexWriter(
|
||||
random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
||||
// make sure we have more than one segment occationally
|
||||
for (int i = 0; i < 31 * RANDOM_MULTIPLIER; i++) {
|
||||
int num = atLeast(31);
|
||||
for (int i = 0; i < num; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newField("f", "a b c d b c d c d d", Field.Store.NO,
|
||||
Field.Index.ANALYZED));
|
||||
|
|
|
@ -142,7 +142,7 @@ public class TestThreadSafe extends LuceneTestCase {
|
|||
buildDir(dir1, 15, 5, 2000);
|
||||
|
||||
// do many small tests so the thread locals go away inbetween
|
||||
int num = 10 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(10);
|
||||
for (int i = 0; i < num; i++) {
|
||||
ir1 = IndexReader.open(dir1, false);
|
||||
doTest(10,10);
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
/**
|
||||
* Create an index with terms from 0000-9999.
|
||||
* Create an index with terms from 000-999.
|
||||
* Generates random wildcards according to patterns,
|
||||
* and validates the correct number of hits are returned.
|
||||
*/
|
||||
|
@ -51,11 +51,11 @@ public class TestWildcardRandom extends LuceneTestCase {
|
|||
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
|
||||
|
||||
Document doc = new Document();
|
||||
Field field = newField("field", "", Field.Store.NO, Field.Index.ANALYZED);
|
||||
Field field = newField("field", "", Field.Store.NO, Field.Index.ANALYZED_NO_NORMS);
|
||||
doc.add(field);
|
||||
|
||||
NumberFormat df = new DecimalFormat("0000", new DecimalFormatSymbols(Locale.ENGLISH));
|
||||
for (int i = 0; i < 10000; i++) {
|
||||
NumberFormat df = new DecimalFormat("000", new DecimalFormatSymbols(Locale.ENGLISH));
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
field.setValue(df.format(i));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
@ -99,48 +99,35 @@ public class TestWildcardRandom extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testWildcards() throws Exception {;
|
||||
int num = 100 * RANDOM_MULTIPLIER;
|
||||
int num = atLeast(1);
|
||||
for (int i = 0; i < num; i++) {
|
||||
assertPatternHits("NNNN", 1);
|
||||
assertPatternHits("?NNN", 10);
|
||||
assertPatternHits("N?NN", 10);
|
||||
assertPatternHits("NN?N", 10);
|
||||
assertPatternHits("NNN?", 10);
|
||||
assertPatternHits("NNN", 1);
|
||||
assertPatternHits("?NN", 10);
|
||||
assertPatternHits("N?N", 10);
|
||||
assertPatternHits("NN?", 10);
|
||||
}
|
||||
|
||||
num = 10 * RANDOM_MULTIPLIER;
|
||||
for (int i = 0; i < num; i++) {
|
||||
assertPatternHits("??NN", 100);
|
||||
assertPatternHits("N??N", 100);
|
||||
assertPatternHits("NN??", 100);
|
||||
assertPatternHits("???N", 1000);
|
||||
assertPatternHits("N???", 1000);
|
||||
assertPatternHits("????", 10000);
|
||||
assertPatternHits("??N", 100);
|
||||
assertPatternHits("N??", 100);
|
||||
assertPatternHits("???", 1000);
|
||||
|
||||
assertPatternHits("NNN*", 10);
|
||||
assertPatternHits("NN*", 100);
|
||||
assertPatternHits("N*", 1000);
|
||||
assertPatternHits("*", 10000);
|
||||
assertPatternHits("NN*", 10);
|
||||
assertPatternHits("N*", 100);
|
||||
assertPatternHits("*", 1000);
|
||||
|
||||
assertPatternHits("*NNN", 10);
|
||||
assertPatternHits("*NN", 100);
|
||||
assertPatternHits("*N", 1000);
|
||||
assertPatternHits("*NN", 10);
|
||||
assertPatternHits("*N", 100);
|
||||
|
||||
assertPatternHits("N*NN", 10);
|
||||
assertPatternHits("NN*N", 10);
|
||||
assertPatternHits("N*N", 10);
|
||||
|
||||
// combo of ? and * operators
|
||||
assertPatternHits("?NN*", 100);
|
||||
assertPatternHits("N?N*", 100);
|
||||
assertPatternHits("NN?*", 100);
|
||||
assertPatternHits("?N?*", 1000);
|
||||
assertPatternHits("N??*", 1000);
|
||||
assertPatternHits("?N*", 100);
|
||||
assertPatternHits("N?*", 100);
|
||||
|
||||
assertPatternHits("*NN?", 100);
|
||||
assertPatternHits("*N??", 1000);
|
||||
assertPatternHits("*???", 10000);
|
||||
assertPatternHits("*?N?", 1000);
|
||||
assertPatternHits("*??N", 1000);
|
||||
assertPatternHits("*N?", 100);
|
||||
assertPatternHits("*??", 1000);
|
||||
assertPatternHits("*?N", 100);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ import static org.hamcrest.CoreMatchers.*;
|
|||
|
||||
public class TestEntryCreators extends LuceneTestCase {
|
||||
protected IndexReader reader;
|
||||
private static final int NUM_DOCS = 500 * RANDOM_MULTIPLIER;
|
||||
private static final int NUM_DOCS = atLeast(500);
|
||||
private Directory directory;
|
||||
|
||||
static class NumberTypeTester {
|
||||
|
|
|
@ -27,15 +27,14 @@ import org.apache.lucene.index.IndexWriterConfig;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Ignore;
|
||||
|
||||
/**
|
||||
* Setup for function tests
|
||||
*/
|
||||
@Ignore
|
||||
public class FunctionTestSetup extends LuceneTestCase {
|
||||
public abstract class FunctionTestSetup extends LuceneTestCase {
|
||||
|
||||
/**
|
||||
* Actual score computation order is slightly different than assumptios
|
||||
|
@ -67,32 +66,17 @@ public class FunctionTestSetup extends LuceneTestCase {
|
|||
"text for the test, but oh much much safer. ",
|
||||
};
|
||||
|
||||
protected Directory dir;
|
||||
protected Analyzer anlzr;
|
||||
protected static Directory dir;
|
||||
protected static Analyzer anlzr;
|
||||
|
||||
private final boolean doMultiSegment;
|
||||
|
||||
public FunctionTestSetup(boolean doMultiSegment) {
|
||||
this.doMultiSegment = doMultiSegment;
|
||||
}
|
||||
|
||||
public FunctionTestSetup() {
|
||||
this(false);
|
||||
}
|
||||
|
||||
@Override
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
@AfterClass
|
||||
public static void afterClassFunctionTestSetup() throws Exception {
|
||||
dir.close();
|
||||
dir = null;
|
||||
anlzr = null;
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
protected static void createIndex(boolean doMultiSegment) throws Exception {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: setUp");
|
||||
}
|
||||
|
@ -130,7 +114,7 @@ public class FunctionTestSetup extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void addDoc(RandomIndexWriter iw, int i) throws Exception {
|
||||
private static void addDoc(RandomIndexWriter iw, int i) throws Exception {
|
||||
Document d = new Document();
|
||||
Fieldable f;
|
||||
int scoreAndID = i + 1;
|
||||
|
@ -156,7 +140,7 @@ public class FunctionTestSetup extends LuceneTestCase {
|
|||
}
|
||||
|
||||
// 17 --> ID00017
|
||||
protected String id2String(int scoreAndID) {
|
||||
protected static String id2String(int scoreAndID) {
|
||||
String s = "000000000" + scoreAndID;
|
||||
int n = ("" + N_DOCS).length() + 3;
|
||||
int k = s.length() - n;
|
||||
|
@ -164,17 +148,17 @@ public class FunctionTestSetup extends LuceneTestCase {
|
|||
}
|
||||
|
||||
// some text line for regular search
|
||||
private String textLine(int docNum) {
|
||||
private static String textLine(int docNum) {
|
||||
return DOC_TEXT_LINES[docNum % DOC_TEXT_LINES.length];
|
||||
}
|
||||
|
||||
// extract expected doc score from its ID Field: "ID7" --> 7.0
|
||||
protected float expectedFieldScore(String docIDFieldVal) {
|
||||
protected static float expectedFieldScore(String docIDFieldVal) {
|
||||
return Float.parseFloat(docIDFieldVal.substring(2));
|
||||
}
|
||||
|
||||
// debug messages (change DBG to true for anything to print)
|
||||
protected void log(Object o) {
|
||||
protected static void log(Object o) {
|
||||
if (VERBOSE) {
|
||||
System.out.println(o.toString());
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.lucene.search.function;
|
|||
import org.apache.lucene.queryParser.QueryParser;
|
||||
import org.apache.lucene.queryParser.ParseException;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
|
@ -33,9 +34,9 @@ import org.apache.lucene.index.Term;
|
|||
*/
|
||||
public class TestCustomScoreQuery extends FunctionTestSetup {
|
||||
|
||||
/* @override constructor */
|
||||
public TestCustomScoreQuery() {
|
||||
super(true);
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
createIndex(true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -193,7 +194,7 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
|
|||
final Query q = new CustomExternalQuery(q1);
|
||||
log(q);
|
||||
|
||||
IndexSearcher s = new IndexSearcher(dir);
|
||||
IndexSearcher s = new IndexSearcher(dir, true);
|
||||
TopDocs hits = s.search(q, 1000);
|
||||
assertEquals(N_DOCS, hits.totalHits);
|
||||
for(int i=0;i<N_DOCS;i++) {
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.search.QueryUtils;
|
|||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
|
@ -41,9 +42,9 @@ import org.junit.Test;
|
|||
*/
|
||||
public class TestFieldScoreQuery extends FunctionTestSetup {
|
||||
|
||||
/* @override constructor */
|
||||
public TestFieldScoreQuery() {
|
||||
super(true);
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
createIndex(true);
|
||||
}
|
||||
|
||||
/** Test that FieldScoreQuery of Type.BYTE returns docs in expected order. */
|
||||
|
|
|
@ -21,6 +21,7 @@ import org.apache.lucene.index.CorruptIndexException;
|
|||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
|
@ -36,9 +37,9 @@ import org.junit.Test;
|
|||
*/
|
||||
public class TestOrdValues extends FunctionTestSetup {
|
||||
|
||||
/* @override constructor */
|
||||
public TestOrdValues() {
|
||||
super(false);
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
createIndex(false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -46,17 +46,19 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.util.English;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.search.Explanation.IDFExplanation;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
|
||||
public class TestPayloadNearQuery extends LuceneTestCase {
|
||||
private IndexSearcher searcher;
|
||||
private IndexReader reader;
|
||||
private Directory directory;
|
||||
private BoostingSimilarityProvider similarityProvider = new BoostingSimilarityProvider();
|
||||
private byte[] payload2 = new byte[]{2};
|
||||
private byte[] payload4 = new byte[]{4};
|
||||
private static IndexSearcher searcher;
|
||||
private static IndexReader reader;
|
||||
private static Directory directory;
|
||||
private static BoostingSimilarityProvider similarityProvider = new BoostingSimilarityProvider();
|
||||
private static byte[] payload2 = new byte[]{2};
|
||||
private static byte[] payload4 = new byte[]{4};
|
||||
|
||||
private class PayloadAnalyzer extends Analyzer {
|
||||
private static class PayloadAnalyzer extends Analyzer {
|
||||
@Override
|
||||
public TokenStream tokenStream(String fieldName, Reader reader) {
|
||||
TokenStream result = new MockTokenizer(reader, MockTokenizer.SIMPLE, true);
|
||||
|
@ -65,7 +67,7 @@ public class TestPayloadNearQuery extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private class PayloadFilter extends TokenFilter {
|
||||
private static class PayloadFilter extends TokenFilter {
|
||||
String fieldName;
|
||||
int numSeen = 0;
|
||||
protected PayloadAttribute payAtt;
|
||||
|
@ -101,9 +103,8 @@ public class TestPayloadNearQuery extends LuceneTestCase {
|
|||
return new PayloadNearQuery(clauses, 0, inOrder, function);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
directory = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random, directory,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer())
|
||||
|
@ -123,12 +124,14 @@ public class TestPayloadNearQuery extends LuceneTestCase {
|
|||
searcher.setSimilarityProvider(similarityProvider);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
searcher.close();
|
||||
searcher = null;
|
||||
reader.close();
|
||||
reader = null;
|
||||
directory.close();
|
||||
super.tearDown();
|
||||
directory = null;
|
||||
}
|
||||
|
||||
public void test() throws IOException {
|
||||
|
|
|
@ -32,6 +32,8 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.search.QueryUtils;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
public class TestFieldMaskingSpanQuery extends LuceneTestCase {
|
||||
|
||||
|
@ -43,17 +45,16 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase {
|
|||
return doc;
|
||||
}
|
||||
|
||||
protected Field field(String name, String value) {
|
||||
protected static Field field(String name, String value) {
|
||||
return newField(name, value, Field.Store.NO, Field.Index.ANALYZED);
|
||||
}
|
||||
|
||||
protected IndexSearcher searcher;
|
||||
protected Directory directory;
|
||||
protected IndexReader reader;
|
||||
protected static IndexSearcher searcher;
|
||||
protected static Directory directory;
|
||||
protected static IndexReader reader;
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
directory = newDirectory();
|
||||
RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
||||
|
||||
|
@ -115,12 +116,14 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase {
|
|||
searcher = newSearcher(reader);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
searcher.close();
|
||||
searcher = null;
|
||||
reader.close();
|
||||
reader = null;
|
||||
directory.close();
|
||||
super.tearDown();
|
||||
directory = null;
|
||||
}
|
||||
|
||||
protected void check(SpanQuery q, int[] docs) throws Exception {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue