mirror of https://github.com/apache/lucene.git
LUCENE-3846: merge trunk
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene3846@1403336 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
961cf395a6
|
@ -168,7 +168,7 @@
|
|||
<classpathentry kind="lib" path="solr/contrib/velocity/lib/commons-beanutils-1.7.0.jar"/>
|
||||
<classpathentry kind="lib" path="solr/contrib/velocity/lib/commons-collections-3.2.1.jar"/>
|
||||
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
|
||||
<classpathentry kind="lib" path="lucene/test-framework/lib/randomizedtesting-runner-2.0.3.jar"/>
|
||||
<classpathentry kind="lib" path="lucene/test-framework/lib/randomizedtesting-runner-2.0.4.jar"/>
|
||||
<classpathentry kind="lib" path="solr/contrib/extraction/lib/apache-mime4j-core-0.7.2.jar"/>
|
||||
<classpathentry kind="lib" path="solr/contrib/extraction/lib/apache-mime4j-dom-0.7.2.jar"/>
|
||||
<classpathentry kind="lib" path="solr/contrib/extraction/lib/fontbox-1.7.0.jar"/>
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
<library name="JUnit">
|
||||
<CLASSES>
|
||||
<root url="jar://$PROJECT_DIR$/lucene/test-framework/lib/junit-4.10.jar!/" />
|
||||
<root url="jar://$PROJECT_DIR$/lucene/test-framework/lib/randomizedtesting-runner-2.0.3.jar!/" />
|
||||
<root url="jar://$PROJECT_DIR$/lucene/test-framework/lib/randomizedtesting-runner-2.0.4.jar!/" />
|
||||
</CLASSES>
|
||||
<JAVADOC />
|
||||
<SOURCES />
|
||||
|
|
|
@ -15,5 +15,7 @@
|
|||
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
|
||||
<orderEntry type="module" scope="TEST" module-name="lucene-test-framework" />
|
||||
<orderEntry type="module" module-name="lucene-core" />
|
||||
<orderEntry type="module" module-name="queries" />
|
||||
<orderEntry type="module" scope="TEST" module-name="analysis-common" />
|
||||
</component>
|
||||
</module>
|
||||
|
|
|
@ -54,6 +54,17 @@
|
|||
<artifactId>lucene-core</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>${project.groupId}</groupId>
|
||||
<artifactId>lucene-queries</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>${project.groupId}</groupId>
|
||||
<artifactId>lucene-analyzers-common</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<sourceDirectory>${module-path}/src/java</sourceDirectory>
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-parent</artifactId>
|
||||
<version>@version@</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
<relativePath>../../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-core</artifactId>
|
||||
|
|
|
@ -434,7 +434,7 @@
|
|||
<dependency>
|
||||
<groupId>com.carrotsearch.randomizedtesting</groupId>
|
||||
<artifactId>randomizedtesting-runner</artifactId>
|
||||
<version>2.0.3</version>
|
||||
<version>2.0.4</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
|
|
|
@ -32,12 +32,17 @@ class FindHyperlinks(HTMLParser):
|
|||
|
||||
def __init__(self, baseURL):
|
||||
HTMLParser.__init__(self)
|
||||
self.stack = []
|
||||
self.anchors = set()
|
||||
self.links = []
|
||||
self.baseURL = baseURL
|
||||
self.printed = False
|
||||
|
||||
def handle_starttag(self, tag, attrs):
|
||||
# NOTE: I don't think 'a' should be in here. But try debugging
|
||||
# NumericRangeQuery.html. (Could be javadocs bug, its a generic type...)
|
||||
if tag not in ('link', 'meta', 'frame', 'br', 'hr', 'p', 'li', 'img', 'col', 'a'):
|
||||
self.stack.append(tag)
|
||||
if tag == 'a':
|
||||
name = None
|
||||
href = None
|
||||
|
@ -74,6 +79,18 @@ class FindHyperlinks(HTMLParser):
|
|||
else:
|
||||
raise RuntimeError('couldn\'t find an href nor name in link in %s: only got these attrs: %s' % (self.baseURL, attrs))
|
||||
|
||||
def handle_endtag(self, tag):
|
||||
if tag in ('link', 'meta', 'frame', 'br', 'hr', 'p', 'li', 'img', 'col', 'a'):
|
||||
return
|
||||
|
||||
if len(self.stack) == 0:
|
||||
raise RuntimeError('%s %s:%s: saw </%s> no opening <%s>' % (self.baseURL, self.getpos()[0], self.getpos()[1], tag, self.stack[-1]))
|
||||
|
||||
if self.stack[-1] == tag:
|
||||
self.stack.pop()
|
||||
else:
|
||||
raise RuntimeError('%s %s:%s: saw </%s> but expected </%s>' % (self.baseURL, self.getpos()[0], self.getpos()[1], tag, self.stack[-1]))
|
||||
|
||||
def printFile(self):
|
||||
if not self.printed:
|
||||
print()
|
||||
|
|
|
@ -657,11 +657,21 @@ def verifyUnpacked(project, artifact, unpackPath, version, tmpDir):
|
|||
print(' run tests w/ Java 6...')
|
||||
run('%s; ant test' % javaExe('1.6'), '%s/test.log' % unpackPath)
|
||||
run('%s; ant jar' % javaExe('1.6'), '%s/compile.log' % unpackPath)
|
||||
testDemo(isSrc, version)
|
||||
testDemo(isSrc, version, '1.6')
|
||||
# test javadocs
|
||||
print(' generate javadocs w/ Java 6...')
|
||||
run('%s; ant javadocs' % javaExe('1.6'), '%s/javadocs.log' % unpackPath)
|
||||
checkJavadocpath('%s/build/docs' % unpackPath)
|
||||
|
||||
print(' run tests w/ Java 7...')
|
||||
run('%s; ant clean test' % javaExe('1.7'), '%s/test.log' % unpackPath)
|
||||
run('%s; ant jar' % javaExe('1.7'), '%s/compile.log' % unpackPath)
|
||||
testDemo(isSrc, version, '1.7')
|
||||
|
||||
print(' generate javadocs w/ Java 7...')
|
||||
run('%s; ant javadocs' % javaExe('1.7'), '%s/javadocs.log' % unpackPath)
|
||||
checkJavadocpathFull('%s/build/docs' % unpackPath)
|
||||
|
||||
else:
|
||||
os.chdir('solr')
|
||||
# DISABLED until solr tests consistently pass
|
||||
|
@ -679,8 +689,8 @@ def verifyUnpacked(project, artifact, unpackPath, version, tmpDir):
|
|||
|
||||
# test javadocs
|
||||
print(' generate javadocs w/ Java 7...')
|
||||
run('%s; ant javadocs' % javaExe('1.7'), '%s/javadocs.log' % unpackPath)
|
||||
checkJavadocpath('%s/solr/build/docs' % unpackPath, False)
|
||||
run('%s; ant clean javadocs' % javaExe('1.7'), '%s/javadocs.log' % unpackPath)
|
||||
checkJavadocpathFull('%s/solr/build/docs' % unpackPath, False)
|
||||
|
||||
print(' test solr example w/ Java 6...')
|
||||
run('%s; ant clean example' % javaExe('1.6'), '%s/antexample.log' % unpackPath)
|
||||
|
@ -699,7 +709,8 @@ def verifyUnpacked(project, artifact, unpackPath, version, tmpDir):
|
|||
checkAllJARs(os.getcwd(), project, version)
|
||||
|
||||
if project == 'lucene':
|
||||
testDemo(isSrc, version)
|
||||
testDemo(isSrc, version, '1.6')
|
||||
testDemo(isSrc, version, '1.7')
|
||||
|
||||
else:
|
||||
checkSolrWAR('%s/example/webapps/solr.war' % unpackPath, version)
|
||||
|
@ -819,6 +830,9 @@ def testSolrExample(unpackPath, javaPath, isSrc):
|
|||
|
||||
os.chdir('..')
|
||||
|
||||
# the weaker check: we can use this on java6 for some checks,
|
||||
# but its generated HTML is hopelessly broken so we cannot run
|
||||
# the link checking that checkJavadocpathFull does.
|
||||
def checkJavadocpath(path, failOnMissing=True):
|
||||
# check for level='package'
|
||||
# we fail here if its screwed up
|
||||
|
@ -831,11 +845,20 @@ def checkJavadocpath(path, failOnMissing=True):
|
|||
# raise RuntimeError('javadoc problems')
|
||||
print('\n***WARNING***: javadocs want to fail!\n')
|
||||
|
||||
# full checks
|
||||
def checkJavadocpathFull(path, failOnMissing=True):
|
||||
# check for missing, etc
|
||||
checkJavadocpath(path, failOnMissing)
|
||||
|
||||
# also validate html/check for broken links
|
||||
if checkJavadocLinks.checkAll(path):
|
||||
raise RuntimeError('broken javadocs links found!')
|
||||
|
||||
def testDemo(isSrc, version):
|
||||
print(' test demo...')
|
||||
def testDemo(isSrc, version, jdk):
|
||||
if os.path.exists('index'):
|
||||
shutil.rmtree('index') # nuke any index from any previous iteration
|
||||
|
||||
print(' test demo with %s...' % jdk)
|
||||
sep = ';' if cygwin else ':'
|
||||
if isSrc:
|
||||
cp = 'build/core/classes/java{0}build/demo/classes/java{0}build/analysis/common/classes/java{0}build/queryparser/classes/java'.format(sep)
|
||||
|
@ -843,8 +866,8 @@ def testDemo(isSrc, version):
|
|||
else:
|
||||
cp = 'core/lucene-core-{0}.jar{1}demo/lucene-demo-{0}.jar{1}analysis/common/lucene-analyzers-common-{0}.jar{1}queryparser/lucene-queryparser-{0}.jar'.format(version, sep)
|
||||
docsDir = 'docs'
|
||||
run('%s; java -cp "%s" org.apache.lucene.demo.IndexFiles -index index -docs %s' % (javaExe('1.6'), cp, docsDir), 'index.log')
|
||||
run('%s; java -cp "%s" org.apache.lucene.demo.SearchFiles -index index -query lucene' % (javaExe('1.6'), cp), 'search.log')
|
||||
run('%s; java -cp "%s" org.apache.lucene.demo.IndexFiles -index index -docs %s' % (javaExe(jdk), cp, docsDir), 'index.log')
|
||||
run('%s; java -cp "%s" org.apache.lucene.demo.SearchFiles -index index -query lucene' % (javaExe(jdk), cp), 'search.log')
|
||||
reMatchingDocs = re.compile('(\d+) total matching documents')
|
||||
m = reMatchingDocs.search(open('search.log', encoding='UTF-8').read())
|
||||
if m is None:
|
||||
|
|
|
@ -12,6 +12,10 @@
|
|||
# 2. The pathname of the Ant build script to be built.
|
||||
# 3. The pathname of common-build.xml, which will be imported
|
||||
# in the Ant build script to be built.
|
||||
# 4. Whether to prompt for credentials, rather than consulting
|
||||
# settings.xml: boolean, e.g. "true" or "false"
|
||||
# 5. The ID of the target repository
|
||||
# 6. The URL to the target repository
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
|
@ -33,33 +37,72 @@ use strict;
|
|||
use warnings;
|
||||
use File::Basename;
|
||||
use File::Find;
|
||||
use Cwd 'abs_path';
|
||||
use File::Path qw(make_path);
|
||||
|
||||
my $num_artifacts = 0;
|
||||
my $maven_dist_dir = $ARGV[0];
|
||||
my $maven_dist_dir = abs_path($ARGV[0]);
|
||||
my $output_build_xml_file = $ARGV[1];
|
||||
my $common_build_xml = $ARGV[2];
|
||||
my $m2_credentials_prompt = $ARGV[3];
|
||||
my $m2_repository_id = $ARGV[4];
|
||||
my $m2_repository_url = $ARGV[5];
|
||||
if ($^O eq 'cygwin') { # Make sure Cygwin Perl can find the output path
|
||||
$output_build_xml_file = `cygpath -u "$output_build_xml_file"`;
|
||||
$output_build_xml_file =~ s/\s+$//; # Trim trailing whitespace
|
||||
$output_build_xml_file =~ s/^\s+//; # Trim leading whitespace
|
||||
}
|
||||
my ($output_file, $output_dir) = fileparse($output_build_xml_file);
|
||||
|
||||
my @basepaths = ();
|
||||
my $grandparent_pom = '';
|
||||
my @parent_poms = ();
|
||||
sub find_poms;
|
||||
File::Find::find({follow => 1, wanted => \&find_poms}, $maven_dist_dir);
|
||||
|
||||
my $parent_pom_targets = '';
|
||||
if (@parent_poms) {
|
||||
$parent_pom_targets = "<parent-poms>\n";
|
||||
if ($grandparent_pom) {
|
||||
$parent_pom_targets .= qq! <artifact:pom id="grandparent" file="$grandparent_pom"/>\n!;
|
||||
}
|
||||
my $n = 0;
|
||||
for my $parent_pom (@parent_poms) {
|
||||
$parent_pom_targets .= qq! <artifact:pom id="parent.$n" file="$parent_pom"/>\n!;
|
||||
++$n;
|
||||
}
|
||||
$parent_pom_targets .= " </parent-poms>\n";
|
||||
}
|
||||
|
||||
make_path($output_dir);
|
||||
open my $output_build_xml, ">$output_build_xml_file"
|
||||
or die "ERROR opening '$ARGV[1]' for writing: $!";
|
||||
|
||||
print $output_build_xml qq!<?xml version="1.0"?>
|
||||
<project>
|
||||
<project xmlns:artifact="antlib:org.apache.maven.artifact.ant">
|
||||
<import file="${common_build_xml}"/>
|
||||
|
||||
<target name="stage-maven" depends="install-maven-tasks">
|
||||
<sequential>
|
||||
!;
|
||||
|
||||
sub wanted;
|
||||
my $credentials = '';
|
||||
if ($m2_credentials_prompt !~ /\A(?s:f(?:alse)?|no?)\z/) {
|
||||
print $output_build_xml qq!
|
||||
<input message="Enter $m2_repository_id username: >" addproperty="m2.repository.username"/>
|
||||
<echo>WARNING: ON SOME PLATFORMS YOUR PASSPHRASE WILL BE ECHOED BACK\!\!\!\!\!</echo>
|
||||
<input message="Enter $m2_repository_id password: >" addproperty="m2.repository.password">
|
||||
<handler type="secure"/>
|
||||
</input>\n!;
|
||||
|
||||
File::Find::find({follow => 1, wanted => \&wanted}, $maven_dist_dir);
|
||||
$credentials = q!<credentials>
|
||||
<authentication username="${m2.repository.username}" password="${m2.repository.password}"/>
|
||||
</credentials>!;
|
||||
}
|
||||
|
||||
for my $basepath (@basepaths) {
|
||||
output_deploy_stanza($basepath);
|
||||
}
|
||||
|
||||
print $output_build_xml q!
|
||||
</sequential>
|
||||
|
@ -72,7 +115,7 @@ close $output_build_xml;
|
|||
print "Wrote '$output_build_xml_file' to stage $num_artifacts Maven artifacts.\n";
|
||||
exit;
|
||||
|
||||
sub wanted {
|
||||
sub find_poms {
|
||||
/^(.*)\.pom\z/s && do {
|
||||
my $pom_dir = $File::Find::dir;
|
||||
if ($^O eq 'cygwin') { # Output windows-style paths on Windows
|
||||
|
@ -83,6 +126,18 @@ sub wanted {
|
|||
my $basefile = $_;
|
||||
$basefile =~ s/\.pom\z//;
|
||||
my $basepath = "$pom_dir/$basefile";
|
||||
push @basepaths, $basepath;
|
||||
|
||||
if ($basefile =~ /grandparent/) {
|
||||
$grandparent_pom = "$basepath.pom";
|
||||
} elsif ($basefile =~ /parent/) {
|
||||
push @parent_poms, "$basepath.pom";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sub output_deploy_stanza {
|
||||
my $basepath = shift;
|
||||
my $pom_file = "$basepath.pom";
|
||||
my $jar_file = "$basepath.jar";
|
||||
my $war_file = "$basepath.war";
|
||||
|
@ -90,14 +145,17 @@ sub wanted {
|
|||
if (-f $war_file) {
|
||||
print $output_build_xml qq!
|
||||
<m2-deploy pom.xml="${pom_file}" jar.file="${war_file}">
|
||||
$parent_pom_targets
|
||||
<artifact-attachments>
|
||||
<attach file="${pom_file}.asc" type="pom.asc"/>
|
||||
<attach file="${war_file}.asc" type="war.asc"/>
|
||||
</artifact-attachments>
|
||||
$credentials
|
||||
</m2-deploy>\n!;
|
||||
} elsif (-f $jar_file) {
|
||||
print $output_build_xml qq!
|
||||
<m2-deploy pom.xml="${pom_file}" jar.file="${jar_file}">
|
||||
$parent_pom_targets
|
||||
<artifact-attachments>
|
||||
<attach file="${basepath}-sources.jar" classifier="sources"/>
|
||||
<attach file="${basepath}-javadoc.jar" classifier="javadoc"/>
|
||||
|
@ -106,16 +164,18 @@ sub wanted {
|
|||
<attach file="${basepath}-sources.jar.asc" classifier="sources" type="jar.asc"/>
|
||||
<attach file="${basepath}-javadoc.jar.asc" classifier="javadoc" type="jar.asc"/>
|
||||
</artifact-attachments>
|
||||
$credentials
|
||||
</m2-deploy>\n!;
|
||||
} else {
|
||||
print $output_build_xml qq!
|
||||
<m2-deploy pom.xml="${pom_file}">
|
||||
$parent_pom_targets
|
||||
<artifact-attachments>
|
||||
<attach file="${pom_file}.asc" type="pom.asc"/>
|
||||
</artifact-attachments>
|
||||
$credentials
|
||||
</m2-deploy>\n!;
|
||||
}
|
||||
|
||||
++$num_artifacts;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ New Features
|
|||
output for a single input. UpToTwoPositiveIntsOutputs was moved
|
||||
from lucene/core to lucene/misc. (Mike McCandless)
|
||||
|
||||
* LUCENE-3842: New AnalyzingCompletionLookup, for doing auto-suggest
|
||||
* LUCENE-3842: New AnalyzingSuggester, for doing auto-suggest
|
||||
using an analyzer. This can create powerful suggesters: if the analyzer
|
||||
remove stop words then "ghost chr..." could suggest "The Ghost of
|
||||
Christmas Past"; if SynonymFilter is used to map wifi and wireless
|
||||
|
@ -83,6 +83,9 @@ Bug Fixes
|
|||
romaji even for out-of-vocabulary kana cases (e.g. half-width forms).
|
||||
(Robert Muir)
|
||||
|
||||
* LUCENE-4504: Fix broken sort comparator in ValueSource.getSortField,
|
||||
used when sorting by a function query. (Tom Shally via Robert Muir)
|
||||
|
||||
Optimizations
|
||||
|
||||
* LUCENE-4443: Lucene41PostingsFormat no longer writes unnecessary offsets
|
||||
|
@ -114,6 +117,10 @@ Optimizations
|
|||
|
||||
Build
|
||||
|
||||
* Upgrade randomized testing to version 2.0.4: avoid hangs on shutdown
|
||||
hooks hanging forever by calling Runtime.halt() in addition to
|
||||
Runtime.exit() after a short delay to allow graceful shutdown (Dawid Weiss)
|
||||
|
||||
* LUCENE-4451: Memory leak per unique thread caused by
|
||||
RandomizedContext.contexts static map. Upgrade randomized testing
|
||||
to version 2.0.2 (Mike McCandless, Dawid Weiss)
|
||||
|
|
|
@ -20,11 +20,6 @@
|
|||
<TITLE>org.apache.lucene.analysis.payloads</TITLE>
|
||||
</HEAD>
|
||||
<BODY>
|
||||
<DIV>Provides various convenience classes for creating payloads on Tokens.
|
||||
</DIV>
|
||||
<DIV> </DIV>
|
||||
<DIV align="center">
|
||||
Copyright © 2007 <A HREF="http://www.apache.org">Apache Software Foundation</A>
|
||||
</DIV>
|
||||
Provides various convenience classes for creating payloads on Tokens.
|
||||
</BODY>
|
||||
</HTML>
|
|
@ -16,32 +16,10 @@
|
|||
limitations under the License.
|
||||
-->
|
||||
<HTML>
|
||||
<!--
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
--><HEAD>
|
||||
<HEAD>
|
||||
<TITLE>org.apache.lucene.analysis.sinks</TITLE>
|
||||
</HEAD>
|
||||
<BODY>
|
||||
<DIV>Implementations of the SinkTokenizer that might be useful.
|
||||
</DIV>
|
||||
<DIV> </DIV>
|
||||
<DIV align="center">
|
||||
Copyright © 2007 <A HREF="http://www.apache.org">Apache Software Foundation</A>
|
||||
</DIV>
|
||||
Implementations of the SinkTokenizer that might be useful.
|
||||
</BODY>
|
||||
</HTML>
|
|
@ -120,7 +120,7 @@ public final class ClassicTokenizer extends Tokenizer {
|
|||
}
|
||||
|
||||
private void init(Version matchVersion) {
|
||||
this.scanner = new ClassicTokenizerImpl(input);
|
||||
this.scanner = new ClassicTokenizerImpl(null); // best effort NPE if you dont call reset
|
||||
}
|
||||
|
||||
// this tokenizer generates three attributes:
|
||||
|
|
|
@ -134,7 +134,7 @@ public final class StandardTokenizer extends Tokenizer {
|
|||
}
|
||||
|
||||
private final void init(Version matchVersion) {
|
||||
this.scanner = new StandardTokenizerImpl(input);
|
||||
this.scanner = new StandardTokenizerImpl(null); // best effort NPE if you dont call reset
|
||||
}
|
||||
|
||||
// this tokenizer generates three attributes:
|
||||
|
|
|
@ -98,7 +98,7 @@ public final class UAX29URLEmailTokenizer extends Tokenizer {
|
|||
*/
|
||||
public UAX29URLEmailTokenizer(Version matchVersion, Reader input) {
|
||||
super(input);
|
||||
this.scanner = getScannerFor(matchVersion, input);
|
||||
this.scanner = getScannerFor(matchVersion);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -106,7 +106,7 @@ public final class UAX29URLEmailTokenizer extends Tokenizer {
|
|||
*/
|
||||
public UAX29URLEmailTokenizer(Version matchVersion, AttributeSource source, Reader input) {
|
||||
super(source, input);
|
||||
this.scanner = getScannerFor(matchVersion, input);
|
||||
this.scanner = getScannerFor(matchVersion);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -114,11 +114,11 @@ public final class UAX29URLEmailTokenizer extends Tokenizer {
|
|||
*/
|
||||
public UAX29URLEmailTokenizer(Version matchVersion, AttributeFactory factory, Reader input) {
|
||||
super(factory, input);
|
||||
this.scanner = getScannerFor(matchVersion, input);
|
||||
this.scanner = getScannerFor(matchVersion);
|
||||
}
|
||||
|
||||
private static StandardTokenizerInterface getScannerFor(Version matchVersion, Reader input) {
|
||||
return new UAX29URLEmailTokenizerImpl(input);
|
||||
private static StandardTokenizerInterface getScannerFor(Version matchVersion) {
|
||||
return new UAX29URLEmailTokenizerImpl(null); // best effort NPE if you dont call reset
|
||||
}
|
||||
|
||||
// this tokenizer generates three attributes:
|
||||
|
|
|
@ -143,7 +143,7 @@ public final class WikipediaTokenizer extends Tokenizer {
|
|||
*/
|
||||
public WikipediaTokenizer(Reader input, int tokenOutput, Set<String> untokenizedTypes) {
|
||||
super(input);
|
||||
this.scanner = new WikipediaTokenizerImpl(input);
|
||||
this.scanner = new WikipediaTokenizerImpl(null); // best effort NPE if you dont call reset
|
||||
init(tokenOutput, untokenizedTypes);
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,7 @@ public final class WikipediaTokenizer extends Tokenizer {
|
|||
*/
|
||||
public WikipediaTokenizer(AttributeFactory factory, Reader input, int tokenOutput, Set<String> untokenizedTypes) {
|
||||
super(factory, input);
|
||||
this.scanner = new WikipediaTokenizerImpl(input);
|
||||
this.scanner = new WikipediaTokenizerImpl(null); // best effort NPE if you dont call reset
|
||||
init(tokenOutput, untokenizedTypes);
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,7 @@ public final class WikipediaTokenizer extends Tokenizer {
|
|||
*/
|
||||
public WikipediaTokenizer(AttributeSource source, Reader input, int tokenOutput, Set<String> untokenizedTypes) {
|
||||
super(source, input);
|
||||
this.scanner = new WikipediaTokenizerImpl(input);
|
||||
this.scanner = new WikipediaTokenizerImpl(null); // best effort NPE if you dont call reset
|
||||
init(tokenOutput, untokenizedTypes);
|
||||
}
|
||||
|
||||
|
|
|
@ -52,9 +52,12 @@ public class TestElision extends BaseTokenStreamTestCase {
|
|||
private List<String> filter(TokenFilter filter) throws IOException {
|
||||
List<String> tas = new ArrayList<String>();
|
||||
CharTermAttribute termAtt = filter.getAttribute(CharTermAttribute.class);
|
||||
filter.reset();
|
||||
while (filter.incrementToken()) {
|
||||
tas.add(termAtt.toString());
|
||||
}
|
||||
filter.end();
|
||||
filter.close();
|
||||
return tas;
|
||||
}
|
||||
|
||||
|
|
|
@ -227,7 +227,7 @@ public final class JapaneseTokenizer extends Tokenizer {
|
|||
outputCompounds = false;
|
||||
break;
|
||||
}
|
||||
buffer.reset(input);
|
||||
buffer.reset(null); // best effort NPE consumers that don't call reset()
|
||||
|
||||
resetState();
|
||||
|
||||
|
|
|
@ -62,12 +62,16 @@ public class TestMorfologikAnalyzer extends BaseTokenStreamTestCase {
|
|||
ts_1.reset();
|
||||
ts_1.incrementToken();
|
||||
assertEquals("first stream", "liście", termAtt_1.toString());
|
||||
ts_1.end();
|
||||
ts_1.close();
|
||||
|
||||
TokenStream ts_2 = a.tokenStream("dummy", new StringReader("danych"));
|
||||
CharTermAttribute termAtt_2 = ts_2.getAttribute(CharTermAttribute.class);
|
||||
ts_2.reset();
|
||||
ts_2.incrementToken();
|
||||
assertEquals("second stream", "dany", termAtt_2.toString());
|
||||
ts_2.end();
|
||||
ts_2.close();
|
||||
}
|
||||
|
||||
/** Test stemming of mixed-case tokens. */
|
||||
|
@ -110,6 +114,7 @@ public class TestMorfologikAnalyzer extends BaseTokenStreamTestCase {
|
|||
public final void testPOSAttribute() throws IOException {
|
||||
TokenStream ts = getTestAnalyzer().tokenStream("dummy", new StringReader("liście"));
|
||||
|
||||
ts.reset();
|
||||
assertPOSToken(ts, "liście",
|
||||
"subst:sg:acc:n2",
|
||||
"subst:sg:nom:n2",
|
||||
|
@ -127,6 +132,8 @@ public class TestMorfologikAnalyzer extends BaseTokenStreamTestCase {
|
|||
assertPOSToken(ts, "lista",
|
||||
"subst:sg:dat:f",
|
||||
"subst:sg:loc:f");
|
||||
ts.end();
|
||||
ts.close();
|
||||
}
|
||||
|
||||
/** blast some random strings through the analyzer */
|
||||
|
|
|
@ -41,12 +41,12 @@ import org.apache.lucene.analysis.util.TokenFilterFactory;
|
|||
* <p>
|
||||
* This takes one required argument, "encoder", and the rest are optional:
|
||||
* <dl>
|
||||
* <dt>encoder<dd> required, one of "DoubleMetaphone", "Metaphone", "Soundex", "RefinedSoundex", "Caverphone" (v2.0),
|
||||
* <dt>encoder</dt><dd> required, one of "DoubleMetaphone", "Metaphone", "Soundex", "RefinedSoundex", "Caverphone" (v2.0),
|
||||
* or "ColognePhonetic" (case insensitive). If encoder isn't one of these, it'll be resolved as a class name either by
|
||||
* itself if it already contains a '.' or otherwise as in the same package as these others.
|
||||
* <dt>inject<dd> (default=true) add tokens to the stream with the offset=0
|
||||
* <dt>maxCodeLength<dd>The maximum length of the phonetic codes, as defined by the encoder. If an encoder doesn't
|
||||
* support this then specifying this is an error.
|
||||
* itself if it already contains a '.' or otherwise as in the same package as these others.</dd>
|
||||
* <dt>inject</dt><dd> (default=true) add tokens to the stream with the offset=0</dd>
|
||||
* <dt>maxCodeLength</dt><dd>The maximum length of the phonetic codes, as defined by the encoder. If an encoder doesn't
|
||||
* support this then specifying this is an error.</dd>
|
||||
* </dl>
|
||||
*
|
||||
* <pre class="prettyprint" >
|
||||
|
|
|
@ -19,11 +19,7 @@
|
|||
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
</head>
|
||||
<body>
|
||||
<div>
|
||||
SmartChineseAnalyzer Hidden Markov Model package.
|
||||
</div>
|
||||
<div>
|
||||
@lucene.experimental
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
|
|
|
@ -20,12 +20,8 @@
|
|||
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
|
||||
</head>
|
||||
<body>
|
||||
<div>
|
||||
Analyzer for Simplified Chinese, which indexes words.
|
||||
</div>
|
||||
<div>
|
||||
@lucene.experimental
|
||||
</div>
|
||||
<div>
|
||||
Three analyzers are provided for Chinese, each of which treats Chinese text in a different way.
|
||||
<ul>
|
||||
|
|
|
@ -24,7 +24,7 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Parser for trec doc content, invoked on doc text excluding <DOC> and <DOCNO>
|
||||
* Parser for trec doc content, invoked on doc text excluding <DOC> and <DOCNO>
|
||||
* which are handled in TrecContentSource. Required to be stateless and hence thread safe.
|
||||
*/
|
||||
public abstract class TrecDocParser {
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
<TITLE>Benchmarking Lucene By Tasks</TITLE>
|
||||
</HEAD>
|
||||
<BODY>
|
||||
<DIV>
|
||||
Benchmarking Lucene By Tasks.
|
||||
<DIV>
|
||||
<p>
|
||||
This package provides "task based" performance benchmarking of Lucene.
|
||||
One can use the predefined benchmarks, or create new ones.
|
||||
|
@ -251,7 +251,7 @@ The following is an informal description of the supported syntax.
|
|||
fixed, so for deletion in loops it is better to use the
|
||||
<code>doc.delete.step</code> property.
|
||||
</li>
|
||||
<li><b>SetProp</b> takes a <code>name,value<code> mandatory param,
|
||||
<li><b>SetProp</b> takes a <code>name,value</code> mandatory param,
|
||||
',' used as a separator.
|
||||
</li>
|
||||
<li><b>SearchTravRetTask</b> and <b>SearchTravTask</b> take a numeric
|
||||
|
|
|
@ -20,9 +20,10 @@
|
|||
<TITLE>Lucene Benchmarking Package</TITLE>
|
||||
</HEAD>
|
||||
<BODY>
|
||||
The benchmark contribution contains tools for benchmarking Lucene using standard, freely available corpora.
|
||||
<DIV>
|
||||
<p/>
|
||||
The benchmark contribution contains tools for benchmarking Lucene using standard, freely available corpora. ANT will
|
||||
<p/>
|
||||
ANT will
|
||||
download the corpus automatically, place it in a temp directory and then unpack it to the working.dir directory specified in the build.
|
||||
The temp directory
|
||||
and working directory can be safely removed after a run. However, the next time the task is run, it will need to download the files again.
|
||||
|
|
|
@ -231,6 +231,13 @@
|
|||
|
||||
<!-- we check for broken links across all documentation -->
|
||||
<target name="-documentation-lint" if="documentation-lint.supported" depends="documentation">
|
||||
<echo message="checking for broken html..."/>
|
||||
<jtidy-macro>
|
||||
<!-- NOTE: must currently exclude deprecated-list due to a javadocs bug (as of 1.7.0_09)
|
||||
javadocs generates invalid XML if you deprecate a method that takes a parameter
|
||||
with a generic type -->
|
||||
<fileset dir="build/docs" includes="**/*.html" excludes="**/deprecated-list.html"/>
|
||||
</jtidy-macro>
|
||||
<echo message="Checking for broken links..."/>
|
||||
<check-broken-links dir="build/docs"/>
|
||||
<echo message="Checking for missing docs..."/>
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
package org.apache.lucene.classification;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
|
@ -16,24 +14,39 @@ package org.apache.lucene.classification;
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.classification;
|
||||
|
||||
/**
|
||||
* The result of a call to {@link Classifier#assignClass(String)} holding an assigned class and a score.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class ClassificationResult {
|
||||
|
||||
private String assignedClass;
|
||||
private double score;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param assignedClass the class <code>String</code> assigned by a {@link Classifier}
|
||||
* @param score the score for the assignedClass as a <code>double</code>
|
||||
*/
|
||||
public ClassificationResult(String assignedClass, double score) {
|
||||
this.assignedClass = assignedClass;
|
||||
this.score = score;
|
||||
}
|
||||
|
||||
/**
|
||||
* retrieve the result class
|
||||
* @return a <code>String</code> representing an assigned class
|
||||
*/
|
||||
public String getAssignedClass() {
|
||||
return assignedClass;
|
||||
}
|
||||
|
||||
/**
|
||||
* retrieve the result score
|
||||
* @return a <code>double</code> representing a result score
|
||||
*/
|
||||
public double getScore() {
|
||||
return score;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
package org.apache.lucene.classification;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
|
@ -16,6 +14,7 @@ package org.apache.lucene.classification;
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.classification;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
package org.apache.lucene.classification;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
|
@ -16,6 +14,7 @@ package org.apache.lucene.classification;
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.classification;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
|
@ -43,6 +42,10 @@ public class KNearestNeighborClassifier implements Classifier {
|
|||
private IndexSearcher indexSearcher;
|
||||
private int k;
|
||||
|
||||
/**
|
||||
* Create a {@link Classifier} using kNN algorithm
|
||||
* @param k the number of neighbors to analyze as an <code>int</code>
|
||||
*/
|
||||
public KNearestNeighborClassifier(int k) {
|
||||
this.k = k;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
package org.apache.lucene.classification;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
|
@ -16,6 +14,7 @@ package org.apache.lucene.classification;
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.classification;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
package org.apache.lucene.classification;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
|
@ -16,6 +14,7 @@ package org.apache.lucene.classification;
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.classification;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
|
@ -31,7 +30,7 @@ import org.junit.Before;
|
|||
/**
|
||||
* Base class for testing {@link Classifier}s
|
||||
*/
|
||||
public class ClassificationTestBase extends LuceneTestCase {
|
||||
public abstract class ClassificationTestBase extends LuceneTestCase {
|
||||
|
||||
private RandomIndexWriter indexWriter;
|
||||
private String textFieldName;
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
package org.apache.lucene.classification;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
|
@ -16,6 +14,7 @@ package org.apache.lucene.classification;
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.classification;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.junit.Test;
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
package org.apache.lucene.classification;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
|
@ -16,6 +14,7 @@ package org.apache.lucene.classification;
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.classification;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
|
|
|
@ -76,6 +76,16 @@ public abstract class AbstractTestCompressionMode extends LuceneTestCase {
|
|||
return Arrays.copyOfRange(bytes.bytes, bytes.offset, bytes.offset + bytes.length);
|
||||
}
|
||||
|
||||
static byte[] copyCompressedData(Uncompressor uncompressor, byte[] compressed) throws IOException {
|
||||
GrowableByteArrayDataOutput out = new GrowableByteArrayDataOutput(compressed.length);
|
||||
uncompressor.copyCompressedData(new ByteArrayDataInput(compressed), out);
|
||||
return Arrays.copyOf(out.bytes, out.length);
|
||||
}
|
||||
|
||||
byte[] copyCompressedData(byte[] compressed) throws IOException {
|
||||
return copyCompressedData(mode.newUncompressor(), compressed);
|
||||
}
|
||||
|
||||
public void testUncompress() throws IOException {
|
||||
final byte[] uncompressed = randomArray();
|
||||
final byte[] compressed = compress(uncompressed);
|
||||
|
@ -103,9 +113,47 @@ public abstract class AbstractTestCompressionMode extends LuceneTestCase {
|
|||
public void testCopyCompressedData() throws IOException {
|
||||
final byte[] uncompressed = randomArray();
|
||||
final byte[] compressed = compress(uncompressed);
|
||||
GrowableByteArrayDataOutput out = new GrowableByteArrayDataOutput(uncompressed.length);
|
||||
mode.newUncompressor().copyCompressedData(new ByteArrayDataInput(compressed), out);
|
||||
assertArrayEquals(compressed, Arrays.copyOf(out.bytes, out.length));
|
||||
assertArrayEquals(compressed, copyCompressedData(compressed));
|
||||
}
|
||||
|
||||
public void test(byte[] uncompressed) throws IOException {
|
||||
final byte[] compressed = compress(uncompressed);
|
||||
final byte[] restored = uncompress(compressed);
|
||||
assertEquals(uncompressed.length, restored.length);
|
||||
assertArrayEquals(compressed, copyCompressedData(compressed));
|
||||
}
|
||||
|
||||
public void testEmptySequence() throws IOException {
|
||||
test(new byte[0]);
|
||||
}
|
||||
|
||||
public void testShortSequence() throws IOException {
|
||||
test(new byte[] { (byte) random().nextInt(256) });
|
||||
}
|
||||
|
||||
public void testIncompressible() throws IOException {
|
||||
final byte[] uncompressed = new byte[RandomInts.randomIntBetween(random(), 20, 256)];
|
||||
for (int i = 0; i < uncompressed.length; ++i) {
|
||||
uncompressed[i] = (byte) i;
|
||||
}
|
||||
test(uncompressed);
|
||||
}
|
||||
|
||||
// for LZ compression
|
||||
|
||||
public void testShortLiteralsAndMatchs() throws IOException {
|
||||
// literals and matchs lengths <= 15
|
||||
final byte[] uncompressed = "1234562345673456745678910123".getBytes("UTF-8");
|
||||
test(uncompressed);
|
||||
}
|
||||
|
||||
public void testLongLiteralsAndMatchs() throws IOException {
|
||||
// literals and matchs length > 16
|
||||
final byte[] uncompressed = new byte[RandomInts.randomIntBetween(random(), 300, 1024)];
|
||||
for (int i = 0; i < uncompressed.length; ++i) {
|
||||
uncompressed[i] = (byte) i;
|
||||
}
|
||||
test(uncompressed);
|
||||
}
|
||||
|
||||
}
|
|
@ -181,6 +181,7 @@
|
|||
<makeurl file="${maven.dist.dir}" property="m2.repository.url" validate="false"/>
|
||||
<property name="m2.repository.private.key" value="${user.home}/.ssh/id_dsa"/>
|
||||
<property name="m2.repository.id" value="local"/>
|
||||
<property name="m2.credentials.prompt" value="true"/>
|
||||
|
||||
<property name="jflex.home" location="${common.dir}"/>
|
||||
|
||||
|
@ -273,10 +274,11 @@
|
|||
<contains string="${java.vm.name}" substring="jrockit" casesensitive="false"/>
|
||||
</or>
|
||||
<or>
|
||||
<equals arg1="${ant.java.version}" arg2="1.6"/>
|
||||
<equals arg1="${ant.java.version}" arg2="1.7"/>
|
||||
<equals arg1="${ant.java.version}" arg2="1.8"/>
|
||||
</or>
|
||||
<!-- TODO: Fix this! For now only run this on 64bit, because jTIDY OOMs with default heap size: -->
|
||||
<contains string="${os.arch}" substring="64"/>
|
||||
</and>
|
||||
</condition>
|
||||
|
||||
|
@ -457,32 +459,19 @@
|
|||
|
||||
<macrodef name="m2-deploy" description="Builds a Maven artifact">
|
||||
<element name="artifact-attachments" optional="yes"/>
|
||||
<element name="parent-poms" optional="yes"/>
|
||||
<element name="credentials" optional="yes"/>
|
||||
<attribute name="pom.xml"/>
|
||||
<attribute name="jar.file" default="${build.dir}/${final.name}.jar"/>
|
||||
<sequential>
|
||||
<artifact:install-provider artifactId="wagon-ssh" version="1.0-beta-7"/>
|
||||
<parent-poms/>
|
||||
<artifact:pom id="maven.project" file="@{pom.xml}"/>
|
||||
<artifact:deploy file="@{jar.file}">
|
||||
<artifact-attachments/>
|
||||
<remoteRepository id="${m2.repository.id}" url="${m2.repository.url}"/>
|
||||
<pom refid="maven.project"/>
|
||||
</artifact:deploy>
|
||||
</sequential>
|
||||
</macrodef>
|
||||
|
||||
<macrodef name="m2-deploy-with-pom-template" description="Builds a Maven artifact given a POM template">
|
||||
<attribute name="pom.xml"/>
|
||||
<attribute name="jar.file"/>
|
||||
<sequential>
|
||||
<copy file="@{pom.xml}" tofile="${maven.build.dir}/pom.xml" overwrite="true">
|
||||
<filterset begintoken="@" endtoken="@">
|
||||
<filter token="version" value="${version}"/>
|
||||
</filterset>
|
||||
</copy>
|
||||
<artifact:install-provider artifactId="wagon-ssh" version="1.0-beta-7"/>
|
||||
<artifact:pom id="maven.project" file="${maven.build.dir}/pom.xml" />
|
||||
<artifact:deploy file="@{jar.file}">
|
||||
<remoteRepository id="${m2.repository.id}" url="${m2.repository.url}"/>
|
||||
<remoteRepository id="${m2.repository.id}" url="${m2.repository.url}">
|
||||
<credentials/>
|
||||
</remoteRepository>
|
||||
<pom refid="maven.project"/>
|
||||
</artifact:deploy>
|
||||
</sequential>
|
||||
|
@ -1389,14 +1378,25 @@ ${tests-output}/junit4-*.suites - per-JVM executed suites
|
|||
<sequential>
|
||||
<property name="output.build.xml" location="${build.dir}/stage_maven_build.xml"/>
|
||||
<property name="dev-tools.scripts.dir" value="../dev-tools/scripts"/>
|
||||
<exec dir="." executable="${perl.exe}" failonerror="true" outputproperty="stage.maven.script.output">
|
||||
<exec dir="." executable="${perl.exe}" failonerror="false" outputproperty="stage.maven.script.output"
|
||||
resultproperty="stage.maven.script.success">
|
||||
<arg value="-CSD"/>
|
||||
<arg value="${dev-tools.scripts.dir}/write.stage.maven.build.xml.pl"/>
|
||||
<arg value="${maven.dist.dir}"/> <!-- Maven distribution artifacts directory -->
|
||||
<arg value="${output.build.xml}"/> <!-- Ant build file to be written -->
|
||||
<arg value="${common.dir}/common-build.xml"/> <!-- Imported from the ant file to be written -->
|
||||
<arg value="${m2.credentials.prompt}"/>
|
||||
<arg value="${m2.repository.id}"/>
|
||||
<arg value="${m2.repository.url}"/>
|
||||
</exec>
|
||||
<echo message="${stage.maven.script.output}"/>
|
||||
<fail message="maven stage script failed!">
|
||||
<condition>
|
||||
<not>
|
||||
<equals arg1="${stage.maven.script.success}" arg2="0"/>
|
||||
</not>
|
||||
</condition>
|
||||
</fail>
|
||||
</sequential>
|
||||
<echo>Invoking target stage-maven in ${output.build.xml} now...</echo>
|
||||
<ant target="stage-maven" antfile="${output.build.xml}" inheritall="false">
|
||||
|
@ -1564,6 +1564,26 @@ ${tests-output}/junit4-*.suites - per-JVM executed suites
|
|||
</sequential>
|
||||
</macrodef>
|
||||
|
||||
<!-- TODO: if we make a custom ant task, we can give better
|
||||
errors and stuff here, and not make a stupid temp dir -->
|
||||
<macrodef name="jtidy-macro">
|
||||
<element name="nested" implicit="yes" optional="yes"/>
|
||||
<sequential>
|
||||
<ivy:cachepath organisation="net.sf.jtidy" module="jtidy" revision="r938"
|
||||
log="download-only" inline="true" conf="master" type="jar" pathid="jtidy.classpath" />
|
||||
<taskdef name="tidy" classname="org.w3c.tidy.ant.JTidyTask" classpathref="jtidy.classpath"/>
|
||||
<delete dir="${common.dir}/build/jtidy_tmp" quiet="true"/>
|
||||
<echo message="Checking for broken html (such as invalid tags)..." taskname="jtidy"/>
|
||||
<tidy failonerror="true" destdir="${common.dir}/build/jtidy_tmp">
|
||||
<nested/>
|
||||
<parameter name="input-encoding" value="UTF-8" />
|
||||
<parameter name="only-errors" value="true" />
|
||||
<parameter name="show-warnings" value="false" />
|
||||
</tidy>
|
||||
<delete dir="${common.dir}/build/jtidy_tmp" quiet="true"/>
|
||||
</sequential>
|
||||
</macrodef>
|
||||
|
||||
<property name="failonjavadocwarning" value="true"/>
|
||||
<macrodef name="invoke-javadoc">
|
||||
<element name="sources" optional="yes"/>
|
||||
|
@ -1604,7 +1624,7 @@ ${tests-output}/junit4-*.suites - per-JVM executed suites
|
|||
<link offline="true" packagelistLoc="${javadoc.dir}"/>
|
||||
<link offline="true" href="${javadoc.link}" packagelistLoc="${javadoc.packagelist.dir}/java6"/>
|
||||
<bottom><![CDATA[
|
||||
<address>Copyright © ${year} Apache Software Foundation. All Rights Reserved.</address>
|
||||
<i>Copyright © ${year} Apache Software Foundation. All Rights Reserved.</i>
|
||||
<script src='{@docRoot}/prettify.js' type='text/javascript'></script>
|
||||
<script type='text/javascript'>
|
||||
(function(){
|
||||
|
@ -1786,6 +1806,7 @@ ${tests-output}/junit4-*.suites - per-JVM executed suites
|
|||
<available property="gpg.input.handler" classname="org.apache.tools.ant.input.SecureInputHandler"
|
||||
value="org.apache.tools.ant.input.SecureInputHandler"/>
|
||||
<!--else:--><property name="gpg.input.handler" value="org.apache.tools.ant.input.DefaultInputHandler"/>
|
||||
<echo>WARNING: ON SOME PLATFORMS YOUR PASSPHRASE WILL BE ECHOED BACK!!!!!</echo>
|
||||
<input message="Enter GPG keystore password: >" addproperty="gpg.passphrase">
|
||||
<handler classname="${gpg.input.handler}" />
|
||||
</input>
|
||||
|
|
|
@ -27,20 +27,21 @@ Lucene 4.0 file format.
|
|||
<ul>
|
||||
<li><a href="#Introduction">Introduction</a></li>
|
||||
<li><a href="#Definitions">Definitions</a>
|
||||
<ul>
|
||||
<li><a href="#Inverted_Indexing">Inverted Indexing</a></li>
|
||||
<li><a href="#Types_of_Fields">Types of Fields</a></li>
|
||||
<li><a href="#Segments">Segments</a></li>
|
||||
<li><a href="#Document_Numbers">Document Numbers</a></li>
|
||||
</ul>
|
||||
<ul>
|
||||
<li><a href="#Inverted_Indexing">Inverted Indexing</a></li>
|
||||
<li><a href="#Types_of_Fields">Types of Fields</a></li>
|
||||
<li><a href="#Segments">Segments</a></li>
|
||||
<li><a href="#Document_Numbers">Document Numbers</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li><a href="#Overview">Index Structure Overview</a></li>
|
||||
<li><a href="#File_Naming">File Naming</a></li>
|
||||
<li><a href="#file-names">Summary of File Extensions</a></li>
|
||||
<ul>
|
||||
<li><a href="#Lock_File">Lock File</a></li>
|
||||
<li><a href="#History">History</a></li>
|
||||
<li><a href="#Limitations">Limitations</a></li>
|
||||
<ul>
|
||||
<li><a href="#Lock_File">Lock File</a></li>
|
||||
<li><a href="#History">History</a></li>
|
||||
<li><a href="#Limitations">Limitations</a></li>
|
||||
</ul>
|
||||
</ul>
|
||||
</div>
|
||||
<a name="Introduction"></a>
|
||||
|
|
|
@ -27,20 +27,21 @@ Lucene 4.1 file format.
|
|||
<ul>
|
||||
<li><a href="#Introduction">Introduction</a></li>
|
||||
<li><a href="#Definitions">Definitions</a>
|
||||
<ul>
|
||||
<li><a href="#Inverted_Indexing">Inverted Indexing</a></li>
|
||||
<li><a href="#Types_of_Fields">Types of Fields</a></li>
|
||||
<li><a href="#Segments">Segments</a></li>
|
||||
<li><a href="#Document_Numbers">Document Numbers</a></li>
|
||||
</ul>
|
||||
<ul>
|
||||
<li><a href="#Inverted_Indexing">Inverted Indexing</a></li>
|
||||
<li><a href="#Types_of_Fields">Types of Fields</a></li>
|
||||
<li><a href="#Segments">Segments</a></li>
|
||||
<li><a href="#Document_Numbers">Document Numbers</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li><a href="#Overview">Index Structure Overview</a></li>
|
||||
<li><a href="#File_Naming">File Naming</a></li>
|
||||
<li><a href="#file-names">Summary of File Extensions</a></li>
|
||||
<ul>
|
||||
<li><a href="#Lock_File">Lock File</a></li>
|
||||
<li><a href="#History">History</a></li>
|
||||
<li><a href="#Limitations">Limitations</a></li>
|
||||
<ul>
|
||||
<li><a href="#Lock_File">Lock File</a></li>
|
||||
<li><a href="#History">History</a></li>
|
||||
<li><a href="#Limitations">Limitations</a></li>
|
||||
</ul>
|
||||
</ul>
|
||||
</div>
|
||||
<a name="Introduction"></a>
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
// TODO: we need to break out separate StoredField...
|
||||
|
||||
/** Represents a single field for indexing. IndexWriter
|
||||
* consumes Iterable<IndexableField> as a document.
|
||||
* consumes Iterable<IndexableField> as a document.
|
||||
*
|
||||
* @lucene.experimental */
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ public abstract class DocIdSet {
|
|||
* should be cached without copying it into a BitSet. The default is to return
|
||||
* <code>false</code>. If you have an own <code>DocIdSet</code> implementation
|
||||
* that does its iteration very effective and fast without doing disk I/O,
|
||||
* override this method and return <code>true</here>.
|
||||
* override this method and return <code>true</code>.
|
||||
*/
|
||||
public boolean isCacheable() {
|
||||
return false;
|
||||
|
|
|
@ -289,7 +289,7 @@ public class SearcherLifetimeManager implements Closeable {
|
|||
* should still call {@link #release} after they are
|
||||
* done.
|
||||
*
|
||||
* <p><b>NOTE: you must ensure no other threads are
|
||||
* <p><b>NOTE</b>: you must ensure no other threads are
|
||||
* calling {@link #record} while you call close();
|
||||
* otherwise it's possible not all searcher references
|
||||
* will be freed. */
|
||||
|
|
|
@ -20,6 +20,10 @@ package org.apache.lucene.index;
|
|||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.codecs.compressing.CompressingCodec;
|
||||
import org.apache.lucene.codecs.compressing.CompressingStoredFieldsFormat;
|
||||
import org.apache.lucene.codecs.compressing.CompressingStoredFieldsIndex;
|
||||
import org.apache.lucene.codecs.compressing.CompressionMode;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
|
@ -27,12 +31,13 @@ import org.apache.lucene.document.TextField;
|
|||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TimeUnits;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
|
||||
/**
|
||||
* Test indexes ~82M docs with 26 terms each, so you get > Integer.MAX_VALUE terms/docs pairs
|
||||
|
@ -49,13 +54,25 @@ public class Test2BPostings extends LuceneTestCase {
|
|||
((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
|
||||
}
|
||||
|
||||
IndexWriter w = new IndexWriter(dir,
|
||||
new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
|
||||
IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
|
||||
.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
|
||||
.setRAMBufferSizeMB(256.0)
|
||||
.setMergeScheduler(new ConcurrentMergeScheduler())
|
||||
.setMergePolicy(newLogMergePolicy(false, 10))
|
||||
.setOpenMode(IndexWriterConfig.OpenMode.CREATE));
|
||||
.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
|
||||
|
||||
if (iwc.getCodec() instanceof CompressingCodec) {
|
||||
CompressingStoredFieldsFormat fmt = (CompressingStoredFieldsFormat) ((CompressingCodec) iwc.getCodec()).storedFieldsFormat();
|
||||
// NOTE: copied from CompressingCodec.randomInstance(), but fixed to not
|
||||
// use any memory index ... maybe we can instead add
|
||||
// something like CompressingMemory to the
|
||||
// SuppressCodecs list...?:
|
||||
final CompressionMode mode = RandomPicks.randomFrom(random(), CompressionMode.values());
|
||||
final int chunkSize = RandomInts.randomIntBetween(random(), 1, 500);
|
||||
iwc.setCodec(new CompressingCodec(mode, chunkSize, CompressingStoredFieldsIndex.DISK_DOC));
|
||||
}
|
||||
|
||||
IndexWriter w = new IndexWriter(dir, iwc);
|
||||
|
||||
MergePolicy mp = w.getConfig().getMergePolicy();
|
||||
if (mp instanceof LogByteSizeMergePolicy) {
|
||||
|
|
|
@ -134,6 +134,7 @@ table.code_description td {
|
|||
<li class="toc_first"><a href="#optimizations">Optimizations</a></li>
|
||||
<li class="toc_first"><a href="#concurrent_indexing_search">Concurrent Indexing and Search</a></li>
|
||||
</ol>
|
||||
</div>
|
||||
|
||||
<h1 class="section"><a name="intro">Introduction</a></h1>
|
||||
<p>
|
||||
|
|
|
@ -59,7 +59,7 @@ import org.apache.lucene.util.encoding.IntEncoder;
|
|||
* DirectoryReader reader = DirectoryReader.open(oldDir);
|
||||
* IndexWriterConfig conf = new IndexWriterConfig(VER, ANALYZER);
|
||||
* IndexWriter writer = new IndexWriter(newDir, conf);
|
||||
* List<AtomicReaderContext> leaves = reader.leaves();
|
||||
* List<AtomicReaderContext> leaves = reader.leaves();
|
||||
* AtomicReader wrappedLeaves[] = new AtomicReader[leaves.size()];
|
||||
* for (int i = 0; i < leaves.size(); i++) {
|
||||
* wrappedLeaves[i] = new OrdinalMappingAtomicReader(leaves.get(i).reader(), ordmap);
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.apache.lucene.facet.index.attributes.CategoryAttribute;
|
|||
* <P>
|
||||
* A CategoryAttributesStream object can be reused for producing more than one
|
||||
* stream. To do that, the user should cause the underlying
|
||||
* Iterable<CategoryAttribute> object to return a new set of categories, and
|
||||
* Iterable<CategoryAttribute> object to return a new set of categories, and
|
||||
* then call {@link #reset()} to allow this stream to be used again.
|
||||
*
|
||||
* @lucene.experimental
|
||||
|
|
|
@ -47,7 +47,7 @@ import java.io.OutputStream;
|
|||
* manner: <code><pre class="prettyprint">
|
||||
* IntEncoder fourFlags =
|
||||
* new SortingEncoderFilter(new UniqueValuesIntEncoder(new DGapIntEncoder(new FlagsIntEncoderImpl())));
|
||||
* </code></pre>
|
||||
* </pre></code>
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
c9b5b280935fda44bb2c905572cfab0192879bcb
|
|
@ -0,0 +1 @@
|
|||
6feed9f7b79b3a9fa2cf52a2ac171e87a261de56
|
|
@ -1 +0,0 @@
|
|||
c31bc570c1e2f7584a09aa4853de7f3e3785a7ef
|
|
@ -0,0 +1 @@
|
|||
aefbd9ebaae63716d9182fcce221ec4bb2528dfc
|
|
@ -192,7 +192,7 @@ public abstract class ValueSource {
|
|||
if (docValue < value) {
|
||||
return -1;
|
||||
} else if (docValue > value) {
|
||||
return -1;
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
package org.apache.lucene.queries.function;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.queries.function.valuesource.IntFieldSource;
|
||||
import org.apache.lucene.search.FieldDoc;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
/** Test that functionquery's getSortField() actually works */
|
||||
public class TestFunctionQuerySort extends LuceneTestCase {
|
||||
|
||||
public void testSearchAfterWhenSortingByFunctionValues() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, null);
|
||||
iwc.setMergePolicy(newLogMergePolicy()); // depends on docid order
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
|
||||
|
||||
Document doc = new Document();
|
||||
Field field = new StringField("value", "", Field.Store.YES);
|
||||
doc.add(field);
|
||||
|
||||
// Save docs unsorted (decreasing value n, n-1, ...)
|
||||
final int NUM_VALS = 5;
|
||||
for (int val = NUM_VALS; val > 0; val--) {
|
||||
field.setStringValue(Integer.toString(val));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
// Open index
|
||||
IndexReader reader = writer.getReader();
|
||||
writer.close();
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
|
||||
// Get ValueSource from FieldCache
|
||||
IntFieldSource src = new IntFieldSource("value");
|
||||
// ...and make it a sort criterion
|
||||
SortField sf = src.getSortField(false).rewrite(searcher);
|
||||
Sort orderBy = new Sort(sf);
|
||||
|
||||
// Get hits sorted by our FunctionValues (ascending values)
|
||||
Query q = new MatchAllDocsQuery();
|
||||
TopDocs hits = searcher.search(q, Integer.MAX_VALUE, orderBy);
|
||||
assertEquals(NUM_VALS, hits.scoreDocs.length);
|
||||
// Verify that sorting works in general
|
||||
int i = 0;
|
||||
for (ScoreDoc hit : hits.scoreDocs) {
|
||||
int valueFromDoc = Integer.parseInt(reader.document(hit.doc).get("value"));
|
||||
assertEquals(++i, valueFromDoc);
|
||||
}
|
||||
|
||||
// Now get hits after hit #2 using IS.searchAfter()
|
||||
int afterIdx = 1;
|
||||
FieldDoc afterHit = (FieldDoc) hits.scoreDocs[afterIdx];
|
||||
hits = searcher.searchAfter(afterHit, q, Integer.MAX_VALUE, orderBy);
|
||||
|
||||
// Expected # of hits: NUM_VALS - 2
|
||||
assertEquals(NUM_VALS - (afterIdx + 1), hits.scoreDocs.length);
|
||||
|
||||
// Verify that hits are actually "after"
|
||||
int afterValue = ((Double) afterHit.fields[0]).intValue();
|
||||
for (ScoreDoc hit : hits.scoreDocs) {
|
||||
int val = Integer.parseInt(reader.document(hit.doc).get("value"));
|
||||
assertTrue(afterValue <= val);
|
||||
assertFalse(hit.doc == afterHit.doc);
|
||||
}
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
|
@ -99,6 +99,7 @@ import org.apache.lucene.util.fst.Util;
|
|||
*
|
||||
* <li> Lookups with the empty string return no results
|
||||
* instead of all results.
|
||||
* </ul>
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
|
|
@ -32,8 +32,8 @@
|
|||
<dependency org="org.apache.ant" name="ant" rev="1.8.2" transitive="false" />
|
||||
|
||||
<dependency org="junit" name="junit" rev="4.10" transitive="false" conf="default->*;junit4-stdalone->*" />
|
||||
<dependency org="com.carrotsearch.randomizedtesting" name="junit4-ant" rev="2.0.3" transitive="false" conf="default->*;junit4-stdalone->*" />
|
||||
<dependency org="com.carrotsearch.randomizedtesting" name="randomizedtesting-runner" rev="2.0.3" transitive="false" conf="default->*;junit4-stdalone->*" />
|
||||
<dependency org="com.carrotsearch.randomizedtesting" name="junit4-ant" rev="2.0.4" transitive="false" conf="default->*;junit4-stdalone->*" />
|
||||
<dependency org="com.carrotsearch.randomizedtesting" name="randomizedtesting-runner" rev="2.0.4" transitive="false" conf="default->*;junit4-stdalone->*" />
|
||||
|
||||
<exclude org="*" ext="*" matcher="regexp" type="${ivy.exclude.types}"/>
|
||||
</dependencies>
|
||||
|
|
|
@ -49,6 +49,12 @@ New Features
|
|||
underlying analyzed form used for suggestions is separate from the returned
|
||||
text. (Robert Muir)
|
||||
|
||||
* SOLR-3985: ExternalFileField caches can be reloaded on firstSearcher/
|
||||
newSearcher events using the ExternalFileFieldReloader (Alan Woodward)
|
||||
|
||||
* SOLR-3911: Make Directory and DirectoryFactory first class so that the majority
|
||||
of Solr's features work with any custom implementations. (Mark Miller)
|
||||
|
||||
Optimizations
|
||||
----------------------
|
||||
|
||||
|
@ -88,6 +94,41 @@ Bug Fixes
|
|||
* SOLR-3961: Fixed error using LimitTokenCountFilterFactory
|
||||
(Jack Krupansky, hossman)
|
||||
|
||||
* SOLR-3933: Distributed commits are not guaranteed to be ordered within a
|
||||
request. (Mark Miller)
|
||||
|
||||
* SOLR-3939: An empty or just replicated index cannot become the leader of a
|
||||
shard after a leader goes down. (Joel Bernstein, yonik, Mark Miller)
|
||||
|
||||
* SOLR-3971: A collection that is created with numShards=1 turns into a
|
||||
numShards=2 collection after starting up a second core and not specifying
|
||||
numShards. (Mark Miller)
|
||||
|
||||
* SOLR-3988: Fixed SolrTestCaseJ4.adoc(SolrInputDocument) to respect
|
||||
field and document boosts (hossman)
|
||||
|
||||
* SOLR-3981: Fixed bug that resulted in document boosts being compounded in
|
||||
<copyField/> destination fields. (hossman)
|
||||
|
||||
* SOLR-3920: Fix server list caching in CloudSolrServer when using more than one
|
||||
collection list with the same instance. (Grzegorz Sobczyk, Mark Miller)
|
||||
|
||||
* SOLR-3938: prepareCommit command omits commitData causing a failure to trigger
|
||||
replication to slaves. (yonik)
|
||||
|
||||
* SOLR-3992: QuerySenderListener doesn't populate document cache.
|
||||
(Shotaro Kamio, yonik)
|
||||
|
||||
* SOLR-3995: Recovery may never finish on SolrCore shutdown if the last reference to
|
||||
a SolrCore is closed by the recovery process. (Mark Miller)
|
||||
|
||||
* SOLR-3998: Atomic update on uniqueKey field itself causes duplicate document.
|
||||
(Eric Spencer, yonik)
|
||||
|
||||
* SOLR-4001: In CachingDirectoryFactory#close, if there are still refs for a
|
||||
Directory outstanding, we need to wait for them to be released before closing.
|
||||
(Mark Miller)
|
||||
|
||||
Other Changes
|
||||
----------------------
|
||||
|
||||
|
@ -100,6 +141,9 @@ Other Changes
|
|||
* SOLR-3966: Eliminate superfluous warning from LanguageIdentifierUpdateProcessor
|
||||
(Markus Jelsma via hossman)
|
||||
|
||||
* SOLR-3932: SolrCmdDistributorTest either takes 3 seconds or 3 minutes.
|
||||
(yonik, Mark Miller)
|
||||
|
||||
================== 4.0.0 ==================
|
||||
|
||||
Versions of Major Components
|
||||
|
|
|
@ -526,6 +526,12 @@
|
|||
<!-- TODO: does solr have any other docs we should check? -->
|
||||
<!-- TODO: also integrate checkJavaDocs.py, which does more checks -->
|
||||
<target name="-documentation-lint" if="documentation-lint.supported" depends="documentation">
|
||||
<jtidy-macro>
|
||||
<!-- NOTE: must currently exclude deprecated-list due to a javadocs bug (as of 1.7.0_09)
|
||||
javadocs generates invalid XML if you deprecate a method that takes a parameter
|
||||
with a generic type -->
|
||||
<fileset dir="build/docs" includes="**/*.html" excludes="**/deprecated-list.html"/>
|
||||
</jtidy-macro>
|
||||
<echo message="Checking for broken links..."/>
|
||||
<check-broken-links dir="${javadoc.dir}"/>
|
||||
<echo message="Checking for malformed docs..."/>
|
||||
|
|
|
@ -87,7 +87,7 @@ public abstract class Context {
|
|||
|
||||
/**
|
||||
* Returns the VariableResolver used in this entity which can be used to
|
||||
* resolve the tokens in ${<namespce.name>}
|
||||
* resolve the tokens in ${<namespce.name>}
|
||||
*
|
||||
* @return a VariableResolver instance
|
||||
* @see org.apache.solr.handler.dataimport.VariableResolver
|
||||
|
|
|
@ -32,7 +32,7 @@ import java.util.Properties;
|
|||
* <p/>
|
||||
* The datasouce may be configured as follows
|
||||
* <p/>
|
||||
* <datasource name="f1" type="FieldReaderDataSource" />
|
||||
* <datasource name="f1" type="FieldReaderDataSource" />
|
||||
* <p/>
|
||||
* The enity which uses this datasource must keep the url value as the variable name url="field-name"
|
||||
* <p/>
|
||||
|
|
|
@ -45,7 +45,7 @@ import org.slf4j.LoggerFactory;
|
|||
* /a/b/c
|
||||
* </pre>
|
||||
* A record is a Map<String,Object> . The key is the provided name
|
||||
* and the value is a String or a List<String>
|
||||
* and the value is a String or a List<String>
|
||||
*
|
||||
* This class is thread-safe for parsing xml. But adding fields is not
|
||||
* thread-safe. The recommended usage is to addField() in one thread and
|
||||
|
@ -651,10 +651,10 @@ public class XPathRecordReader {
|
|||
/**
|
||||
* @param record The record map. The key is the field name as provided in
|
||||
* the addField() methods. The value can be a single String (for single
|
||||
* valued fields) or a List<String> (for multiValued).
|
||||
* valued fields) or a List<String> (for multiValued).
|
||||
* @param xpath The forEach XPATH for which this record is being emitted
|
||||
* If there is any change all parsing will be aborted and the Exception
|
||||
* is propogated up
|
||||
* is propagated up
|
||||
*/
|
||||
public void handle(Map<String, Object> record, String xpath);
|
||||
}
|
||||
|
|
|
@ -159,7 +159,7 @@ public interface ExtractingParams {
|
|||
* File format is Java properties format with one key=value per line.
|
||||
* The key is evaluated as a regex against the file name, and the value is the password
|
||||
* The rules are evaluated top-bottom, i.e. the first match will be used
|
||||
* If you want a fallback password to be always used, supply a .*=<defaultmypassword> at the end
|
||||
* If you want a fallback password to be always used, supply a .*=<defaultmypassword> at the end
|
||||
*/
|
||||
public static final String PASSWORD_MAP_FILE = "passwordsFile";
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
|||
* This class produces a special form of reversed tokens, suitable for
|
||||
* better handling of leading wildcards. Tokens from the input TokenStream
|
||||
* are reversed and prepended with a special "reversed" marker character.
|
||||
* If <code>withOriginal<code> argument is <code>true</code> then first the
|
||||
* If <code>withOriginal</code> argument is <code>true</code> then first the
|
||||
* original token is returned, and then the reversed token (with
|
||||
* <code>positionIncrement == 0</code>) is returned. Otherwise only reversed
|
||||
* tokens are returned.
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.eclipse.jetty.server.Connector;
|
|||
import org.eclipse.jetty.server.Server;
|
||||
import org.eclipse.jetty.server.bio.SocketConnector;
|
||||
import org.eclipse.jetty.server.handler.GzipHandler;
|
||||
import org.eclipse.jetty.server.nio.SelectChannelConnector;
|
||||
import org.eclipse.jetty.server.session.HashSessionIdManager;
|
||||
import org.eclipse.jetty.servlet.FilterHolder;
|
||||
import org.eclipse.jetty.servlet.ServletContextHandler;
|
||||
|
@ -92,6 +93,7 @@ public class JettySolrRunner {
|
|||
private void init(String solrHome, String context, int port, boolean stopAtShutdown) {
|
||||
this.context = context;
|
||||
server = new Server(port);
|
||||
|
||||
this.solrHome = solrHome;
|
||||
this.stopAtShutdown = stopAtShutdown;
|
||||
server.setStopAtShutdown(stopAtShutdown);
|
||||
|
@ -100,32 +102,45 @@ public class JettySolrRunner {
|
|||
}
|
||||
System.setProperty("solr.solr.home", solrHome);
|
||||
if (System.getProperty("jetty.testMode") != null) {
|
||||
// SelectChannelConnector connector = new SelectChannelConnector();
|
||||
// Normal SocketConnector is what solr's example server uses by default
|
||||
SocketConnector connector = new SocketConnector();
|
||||
SelectChannelConnector connector = new SelectChannelConnector();
|
||||
connector.setPort(port);
|
||||
connector.setReuseAddress(true);
|
||||
if (!stopAtShutdown) {
|
||||
connector.setLowResourcesMaxIdleTime(1500);
|
||||
QueuedThreadPool threadPool = (QueuedThreadPool) connector
|
||||
.getThreadPool();
|
||||
if (threadPool != null) {
|
||||
threadPool.setMaxThreads(10000);
|
||||
threadPool.setMaxIdleTimeMs(5000);
|
||||
if (!stopAtShutdown) {
|
||||
threadPool.setMaxStopTimeMs(100);
|
||||
}
|
||||
}
|
||||
|
||||
server.setConnectors(new Connector[] {connector});
|
||||
server.setSessionIdManager(new HashSessionIdManager(new Random()));
|
||||
} else {
|
||||
if (!stopAtShutdown) {
|
||||
|
||||
for (Connector connector : server.getConnectors()) {
|
||||
QueuedThreadPool threadPool = null;
|
||||
if (connector instanceof SocketConnector) {
|
||||
QueuedThreadPool threadPool = (QueuedThreadPool) ((SocketConnector) connector)
|
||||
threadPool = (QueuedThreadPool) ((SocketConnector) connector)
|
||||
.getThreadPool();
|
||||
}
|
||||
if (connector instanceof SelectChannelConnector) {
|
||||
threadPool = (QueuedThreadPool) ((SelectChannelConnector) connector)
|
||||
.getThreadPool();
|
||||
}
|
||||
|
||||
if (threadPool != null) {
|
||||
threadPool.setMaxThreads(10000);
|
||||
threadPool.setMaxIdleTimeMs(5000);
|
||||
if (!stopAtShutdown) {
|
||||
threadPool.setMaxStopTimeMs(100);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Initialize the servlets
|
||||
|
|
|
@ -14,8 +14,10 @@ import org.apache.solr.common.cloud.ZkNodeProps;
|
|||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
import org.apache.solr.update.UpdateLog;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.KeeperException.NoNodeException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -37,7 +39,7 @@ import org.slf4j.LoggerFactory;
|
|||
*/
|
||||
|
||||
public abstract class ElectionContext {
|
||||
|
||||
private static Logger log = LoggerFactory.getLogger(ElectionContext.class);
|
||||
final String electionPath;
|
||||
final ZkNodeProps leaderProps;
|
||||
final String id;
|
||||
|
@ -57,7 +59,12 @@ public abstract class ElectionContext {
|
|||
public void close() {}
|
||||
|
||||
public void cancelElection() throws InterruptedException, KeeperException {
|
||||
try {
|
||||
zkClient.delete(leaderSeqPath, -1, true);
|
||||
} catch (NoNodeException e) {
|
||||
// fine
|
||||
log.warn("cancelElection did not find election node to remove");
|
||||
}
|
||||
}
|
||||
|
||||
abstract void runLeaderProcess(boolean weAreReplacement) throws KeeperException, InterruptedException, IOException;
|
||||
|
@ -162,6 +169,10 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
|||
}
|
||||
|
||||
log.info("I may be the new leader - try and sync");
|
||||
|
||||
UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
|
||||
|
||||
|
||||
// we are going to attempt to be the leader
|
||||
// first cancel any current recovery
|
||||
core.getUpdateHandler().getSolrCoreState().cancelRecovery();
|
||||
|
@ -173,6 +184,14 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
|||
success = false;
|
||||
}
|
||||
|
||||
if (!success && ulog.getRecentUpdates().getVersions(1).isEmpty()) {
|
||||
// we failed sync, but we have no versions - we can't sync in that case
|
||||
// - we were active
|
||||
// before, so become leader anyway
|
||||
log.info("We failed sync, but we have no versions - we can't sync in that case - we were active before, so become leader anyway");
|
||||
success = true;
|
||||
}
|
||||
|
||||
// if !success but no one else is in active mode,
|
||||
// we are the leader anyway
|
||||
// TODO: should we also be leader if there is only one other active?
|
||||
|
@ -220,6 +239,13 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
|||
} catch (Throwable t) {
|
||||
try {
|
||||
core = cc.getCore(coreName);
|
||||
if (core == null) {
|
||||
cancelElection();
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR,
|
||||
"Fatal Error, SolrCore not found:" + coreName + " in "
|
||||
+ cc.getCoreNames());
|
||||
}
|
||||
|
||||
core.getCoreDescriptor().getCloudDescriptor().isLeader = false;
|
||||
|
||||
// we could not publish ourselves as leader - rejoin election
|
||||
|
@ -308,13 +334,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
|||
return;
|
||||
}
|
||||
|
||||
log.info("There is a better leader candidate than us - going back into recovery");
|
||||
|
||||
try {
|
||||
zkController.publish(core.getCoreDescriptor(), ZkStateReader.DOWN);
|
||||
} catch (Throwable t) {
|
||||
SolrException.log(log, "Error trying to publish down state", t);
|
||||
}
|
||||
log.info("There may be a better leader candidate than us - going back into recovery");
|
||||
|
||||
cancelElection();
|
||||
|
||||
|
@ -335,12 +355,15 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
|||
return false;
|
||||
}
|
||||
|
||||
if (core.getCoreDescriptor().getCloudDescriptor().getLastPublished().equals(ZkStateReader.ACTIVE)) {
|
||||
if (core.getCoreDescriptor().getCloudDescriptor().getLastPublished()
|
||||
.equals(ZkStateReader.ACTIVE)) {
|
||||
log.info("My last published State was Active, it's okay to be the leader.");
|
||||
return true;
|
||||
}
|
||||
|
||||
// TODO: and if no is a good candidate?
|
||||
log.info("My last published State was "
|
||||
+ core.getCoreDescriptor().getCloudDescriptor().getLastPublished()
|
||||
+ ", I won't be the leader.");
|
||||
// TODO: and if no one is a good candidate?
|
||||
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -84,6 +84,10 @@ public class LeaderElector {
|
|||
|
||||
sortSeqs(seqs);
|
||||
List<Integer> intSeqs = getSeqs(seqs);
|
||||
if (intSeqs.size() == 0) {
|
||||
log.warn("Our node is no longer in line to be leader");
|
||||
return;
|
||||
}
|
||||
if (seq <= intSeqs.get(0)) {
|
||||
// first we delete the node advertising the old leader in case the ephem is still there
|
||||
try {
|
||||
|
|
|
@ -17,14 +17,11 @@ package org.apache.solr.cloud;
|
|||
* the License.
|
||||
*/
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import org.apache.noggit.JSONUtil;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.ClosableThread;
|
||||
|
@ -210,11 +207,11 @@ public class Overseer {
|
|||
private ClusterState updateState(ClusterState state, final ZkNodeProps message) {
|
||||
final String collection = message.getStr(ZkStateReader.COLLECTION_PROP);
|
||||
final String zkCoreNodeName = message.getStr(ZkStateReader.NODE_NAME_PROP) + "_" + message.getStr(ZkStateReader.CORE_NAME_PROP);
|
||||
final Integer numShards = message.getStr(ZkStateReader.NUM_SHARDS_PROP)!=null?Integer.parseInt(message.getStr(ZkStateReader.NUM_SHARDS_PROP)):null;
|
||||
|
||||
Integer numShards = message.getStr(ZkStateReader.NUM_SHARDS_PROP)!=null?Integer.parseInt(message.getStr(ZkStateReader.NUM_SHARDS_PROP)):null;
|
||||
log.info("Update state numShards={} message={}", numShards, message);
|
||||
//collection does not yet exist, create placeholders if num shards is specified
|
||||
if (!state.getCollections().contains(collection)
|
||||
&& numShards!=null) {
|
||||
boolean collectionExists = state.getCollections().contains(collection);
|
||||
if (!collectionExists && numShards!=null) {
|
||||
state = createCollection(state, collection, numShards);
|
||||
}
|
||||
|
||||
|
@ -227,6 +224,10 @@ public class Overseer {
|
|||
}
|
||||
if(sliceName == null) {
|
||||
//request new shardId
|
||||
if (collectionExists) {
|
||||
// use existing numShards
|
||||
numShards = state.getCollectionStates().get(collection).size();
|
||||
}
|
||||
sliceName = AssignShard.assignShard(collection, state, numShards);
|
||||
}
|
||||
|
||||
|
@ -269,6 +270,8 @@ public class Overseer {
|
|||
}
|
||||
|
||||
private ClusterState createCollection(ClusterState state, String collectionName, int numShards) {
|
||||
log.info("Create collection {} with numShards {}", collectionName, numShards);
|
||||
|
||||
HashPartitioner hp = new HashPartitioner();
|
||||
List<HashPartitioner.Range> ranges = hp.partitionRange(numShards, hp.fullRange());
|
||||
|
||||
|
|
|
@ -313,7 +313,7 @@ public class RecoveryStrategy extends Thread implements ClosableThread {
|
|||
}
|
||||
}
|
||||
|
||||
while (!successfulRecovery && !isInterrupted()) { // don't use interruption or it will close channels though
|
||||
while (!successfulRecovery && !isInterrupted() && !isClosed()) { // don't use interruption or it will close channels though
|
||||
try {
|
||||
CloudDescriptor cloudDesc = core.getCoreDescriptor()
|
||||
.getCloudDescriptor();
|
||||
|
@ -348,7 +348,7 @@ public class RecoveryStrategy extends Thread implements ClosableThread {
|
|||
// System.out.println("Attempting to PeerSync from " + leaderUrl
|
||||
// + " i am:" + zkController.getNodeName());
|
||||
PeerSync peerSync = new PeerSync(core,
|
||||
Collections.singletonList(leaderUrl), ulog.numRecordsToKeep);
|
||||
Collections.singletonList(leaderUrl), ulog.numRecordsToKeep, false, false);
|
||||
peerSync.setStartingVersions(recentVersions);
|
||||
boolean syncSuccess = peerSync.sync();
|
||||
if (syncSuccess) {
|
||||
|
@ -443,7 +443,7 @@ public class RecoveryStrategy extends Thread implements ClosableThread {
|
|||
// Or do a fall off retry...
|
||||
try {
|
||||
|
||||
log.error("Recovery failed - trying again... core=" + coreName);
|
||||
log.error("Recovery failed - trying again... (" + retries + ") core=" + coreName);
|
||||
|
||||
if (isClosed()) {
|
||||
retries = INTERRUPTED;
|
||||
|
@ -451,7 +451,7 @@ public class RecoveryStrategy extends Thread implements ClosableThread {
|
|||
|
||||
retries++;
|
||||
if (retries >= MAX_RETRIES) {
|
||||
if (retries == INTERRUPTED) {
|
||||
if (retries >= INTERRUPTED) {
|
||||
SolrException.log(log, "Recovery failed - interrupted. core="
|
||||
+ coreName);
|
||||
try {
|
||||
|
@ -463,7 +463,7 @@ public class RecoveryStrategy extends Thread implements ClosableThread {
|
|||
}
|
||||
} else {
|
||||
SolrException.log(log,
|
||||
"Recovery failed - max retries exceeded. core=" + coreName);
|
||||
"Recovery failed - max retries exceeded (" + retries + "). core=" + coreName);
|
||||
try {
|
||||
recoveryFailed(core, zkController, baseUrl, coreZkNodeName,
|
||||
core.getCoreDescriptor());
|
||||
|
@ -482,6 +482,7 @@ public class RecoveryStrategy extends Thread implements ClosableThread {
|
|||
try {
|
||||
// start at 1 sec and work up to a couple min
|
||||
double loopCount = Math.min(Math.pow(2, retries), 600);
|
||||
log.info("Wait {} seconds before trying to recover again ({})", loopCount, retries);
|
||||
for (int i = 0; i < loopCount; i++) {
|
||||
if (isClosed()) break; // check if someone closed us
|
||||
Thread.sleep(STARTING_RECOVERY_DELAY);
|
||||
|
|
|
@ -243,7 +243,7 @@ class SolrZkServerProps extends QuorumPeerConfig {
|
|||
|
||||
// called by the modified version of parseProperties
|
||||
// when the myid file is missing.
|
||||
public Long getMySeverId() {
|
||||
public Long getMyServerId() {
|
||||
if (zkRun == null && solrPort == null) return null;
|
||||
|
||||
Map<Long, QuorumPeer.QuorumServer> slist = getServers();
|
||||
|
@ -450,7 +450,7 @@ class SolrZkServerProps extends QuorumPeerConfig {
|
|||
File myIdFile = new File(dataDir, "myid");
|
||||
if (!myIdFile.exists()) {
|
||||
///////////////// ADDED FOR SOLR //////
|
||||
Long myid = getMySeverId();
|
||||
Long myid = getMyServerId();
|
||||
if (myid != null) {
|
||||
serverId = myid;
|
||||
return;
|
||||
|
|
|
@ -176,7 +176,7 @@ public class SyncStrategy {
|
|||
// if we can't reach a replica for sync, we still consider the overall sync a success
|
||||
// TODO: as an assurance, we should still try and tell the sync nodes that we couldn't reach
|
||||
// to recover once more?
|
||||
PeerSync peerSync = new PeerSync(core, syncWith, core.getUpdateHandler().getUpdateLog().numRecordsToKeep, true);
|
||||
PeerSync peerSync = new PeerSync(core, syncWith, core.getUpdateHandler().getUpdateLog().numRecordsToKeep, true, true);
|
||||
return peerSync.sync();
|
||||
}
|
||||
|
||||
|
|
|
@ -194,8 +194,6 @@ public final class ZkController {
|
|||
overseerElector.joinElection(context, true);
|
||||
zkStateReader.createClusterStateWatchersAndUpdate();
|
||||
|
||||
// cc.newCmdDistribExecutor();
|
||||
|
||||
// we have to register as live first to pick up docs in the buffer
|
||||
createEphemeralLiveNode();
|
||||
|
||||
|
@ -308,7 +306,11 @@ public final class ZkController {
|
|||
}
|
||||
|
||||
for (ElectionContext context : electionContexts.values()) {
|
||||
try {
|
||||
context.close();
|
||||
} catch (Throwable t) {
|
||||
log.error("Error closing overseer", t);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -603,7 +605,7 @@ public final class ZkController {
|
|||
recoveryFuture.get(); // NOTE: this could potentially block for
|
||||
// minutes or more!
|
||||
// TODO: public as recovering in the mean time?
|
||||
// TODO: in the future we could do peerync in parallel with recoverFromLog
|
||||
// TODO: in the future we could do peersync in parallel with recoverFromLog
|
||||
} else {
|
||||
log.info("No LogReplay needed for core="+core.getName() + " baseURL=" + baseUrl);
|
||||
}
|
||||
|
@ -781,6 +783,7 @@ public final class ZkController {
|
|||
//System.out.println(Thread.currentThread().getStackTrace()[3]);
|
||||
Integer numShards = cd.getCloudDescriptor().getNumShards();
|
||||
if (numShards == null) { //XXX sys prop hack
|
||||
log.info("numShards not found on descriptor - reading it from system property");
|
||||
numShards = Integer.getInteger(ZkStateReader.NUM_SHARDS_PROP);
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,9 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
|
|||
protected Map<Directory,List<CloseListener>> closeListeners = new HashMap<Directory,List<CloseListener>>();
|
||||
|
||||
public interface CloseListener {
|
||||
public void onClose();
|
||||
public void postClose();
|
||||
|
||||
public void preClose();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -109,6 +111,18 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
|
|||
synchronized (this) {
|
||||
for (CacheValue val : byDirectoryCache.values()) {
|
||||
try {
|
||||
// if there are still refs out, we have to wait for them
|
||||
int cnt = 0;
|
||||
while(val.refCnt != 0) {
|
||||
wait(100);
|
||||
|
||||
if (cnt++ >= 300) {
|
||||
log.error("Timeout waiting for all directory ref counts to be released");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert val.refCnt == 0 : val.refCnt;
|
||||
val.directory.close();
|
||||
} catch (Throwable t) {
|
||||
SolrException.log(log, "Error closing directory", t);
|
||||
|
@ -126,22 +140,33 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
|
|||
throw new IllegalArgumentException("Unknown directory: " + directory
|
||||
+ " " + byDirectoryCache);
|
||||
}
|
||||
|
||||
log.debug("Closing: {}", cacheValue);
|
||||
log.info("Releasing directory:" + cacheValue.path);
|
||||
|
||||
cacheValue.refCnt--;
|
||||
|
||||
if (cacheValue.refCnt == 0 && cacheValue.doneWithDir) {
|
||||
log.info("Closing directory:" + cacheValue.path);
|
||||
directory.close();
|
||||
byDirectoryCache.remove(directory);
|
||||
byPathCache.remove(cacheValue.path);
|
||||
List<CloseListener> listeners = closeListeners.remove(directory);
|
||||
if (listeners != null) {
|
||||
for (CloseListener listener : listeners) {
|
||||
listener.onClose();
|
||||
listener.preClose();
|
||||
}
|
||||
}
|
||||
try {
|
||||
directory.close();
|
||||
} catch (Throwable t) {
|
||||
SolrException.log(log, "Error closing directory", t);
|
||||
}
|
||||
|
||||
if (listeners != null) {
|
||||
for (CloseListener listener : listeners) {
|
||||
listener.postClose();
|
||||
}
|
||||
closeListeners.remove(directory);
|
||||
}
|
||||
|
||||
byDirectoryCache.remove(directory);
|
||||
byPathCache.remove(cacheValue.path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -178,16 +203,29 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
|
|||
throws IOException {
|
||||
String fullPath = new File(path).getAbsolutePath();
|
||||
synchronized (this) {
|
||||
CacheValue cacheValue = byPathCache.get(fullPath);
|
||||
final CacheValue cacheValue = byPathCache.get(fullPath);
|
||||
Directory directory = null;
|
||||
if (cacheValue != null) {
|
||||
directory = cacheValue.directory;
|
||||
if (forceNew) {
|
||||
cacheValue.doneWithDir = true;
|
||||
|
||||
// we make a quick close attempt,
|
||||
// otherwise this should be closed
|
||||
// when whatever is using it, releases it
|
||||
|
||||
if (cacheValue.refCnt == 0) {
|
||||
try {
|
||||
// the following will decref, so
|
||||
// first incref
|
||||
cacheValue.refCnt++;
|
||||
close(cacheValue.directory);
|
||||
} catch (IOException e) {
|
||||
SolrException.log(log, "Error closing directory", e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if (directory == null || forceNew) {
|
||||
|
|
|
@ -34,9 +34,6 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
|
@ -59,7 +56,6 @@ import org.apache.solr.common.SolrException;
|
|||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.cloud.ZooKeeperException;
|
||||
import org.apache.solr.common.util.ExecutorUtil;
|
||||
import org.apache.solr.core.SolrXMLSerializer.SolrCoreXMLDef;
|
||||
import org.apache.solr.core.SolrXMLSerializer.SolrXMLDef;
|
||||
import org.apache.solr.handler.admin.CollectionsHandler;
|
||||
|
@ -72,7 +68,6 @@ import org.apache.solr.logging.jul.JulWatcher;
|
|||
import org.apache.solr.schema.IndexSchema;
|
||||
import org.apache.solr.update.SolrCoreState;
|
||||
import org.apache.solr.util.DOMUtil;
|
||||
import org.apache.solr.util.DefaultSolrThreadFactory;
|
||||
import org.apache.solr.util.FileUtils;
|
||||
import org.apache.solr.util.SystemIdResolver;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
|
@ -760,9 +755,6 @@ public class CoreContainer
|
|||
try {
|
||||
// Make the instanceDir relative to the cores instanceDir if not absolute
|
||||
File idir = new File(dcore.getInstanceDir());
|
||||
if (!idir.isAbsolute()) {
|
||||
idir = new File(solrHome, dcore.getInstanceDir());
|
||||
}
|
||||
String instanceDir = idir.getPath();
|
||||
log.info("Creating SolrCore '{}' using instanceDir: {}",
|
||||
dcore.getName(), instanceDir);
|
||||
|
@ -973,9 +965,6 @@ public class CoreContainer
|
|||
CoreDescriptor cd = core.getCoreDescriptor();
|
||||
|
||||
File instanceDir = new File(cd.getInstanceDir());
|
||||
if (!instanceDir.isAbsolute()) {
|
||||
instanceDir = new File(getSolrHome(), cd.getInstanceDir());
|
||||
}
|
||||
|
||||
log.info("Reloading SolrCore '{}' using instanceDir: {}",
|
||||
cd.getName(), instanceDir.getAbsolutePath());
|
||||
|
@ -1083,7 +1072,9 @@ public class CoreContainer
|
|||
|
||||
synchronized(cores) {
|
||||
SolrCore core = cores.remove( name );
|
||||
if (core != null) {
|
||||
coreToOrigName.remove(core);
|
||||
}
|
||||
return core;
|
||||
}
|
||||
|
||||
|
@ -1283,7 +1274,7 @@ public class CoreContainer
|
|||
|
||||
coreAttribs.put(CORE_NAME, coreName);
|
||||
|
||||
String instanceDir = dcore.getInstanceDir();
|
||||
String instanceDir = dcore.getRawInstanceDir();
|
||||
addCoreProperty(coreAttribs, coreNode, CORE_INSTDIR, instanceDir, null);
|
||||
|
||||
// write config
|
||||
|
|
|
@ -130,10 +130,24 @@ public class CoreDescriptor {
|
|||
}
|
||||
|
||||
/**@return the core instance directory. */
|
||||
public String getInstanceDir() {
|
||||
return instanceDir;
|
||||
public String getRawInstanceDir() {
|
||||
return this.instanceDir;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @return the core instance directory, prepended with solr_home if not an absolute path.
|
||||
*/
|
||||
public String getInstanceDir() {
|
||||
String instDir = this.instanceDir;
|
||||
if (instDir == null) return null; // No worse than before.
|
||||
|
||||
if (new File(instDir).isAbsolute()) {
|
||||
return SolrResourceLoader.normalizeDir(SolrResourceLoader.normalizeDir(instanceDir));
|
||||
}
|
||||
return SolrResourceLoader.normalizeDir(coreContainer.getSolrHome() +
|
||||
SolrResourceLoader.normalizeDir(instDir));
|
||||
}
|
||||
/**Sets the core configuration resource name. */
|
||||
public void setConfigName(String name) {
|
||||
if (name == null || name.length() == 0)
|
||||
|
|
|
@ -21,8 +21,12 @@ import java.io.Closeable;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.core.CachingDirectoryFactory.CloseListener;
|
||||
import org.apache.solr.util.plugin.NamedListInitializedPlugin;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Provides access to a Directory implementation. You must release every
|
||||
|
@ -31,6 +35,8 @@ import org.apache.solr.util.plugin.NamedListInitializedPlugin;
|
|||
public abstract class DirectoryFactory implements NamedListInitializedPlugin,
|
||||
Closeable {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(DirectoryFactory.class.getName());
|
||||
|
||||
/**
|
||||
* Indicates a Directory will no longer be used, and when it's ref count
|
||||
* hits 0, it can be closed. On shutdown all directories will be closed
|
||||
|
@ -65,10 +71,33 @@ public abstract class DirectoryFactory implements NamedListInitializedPlugin,
|
|||
*/
|
||||
public abstract boolean exists(String path);
|
||||
|
||||
/**
|
||||
* Removes the Directory's persistent storage.
|
||||
* For example: A file system impl may remove the
|
||||
* on disk directory.
|
||||
* @throws IOException If there is a low-level I/O error.
|
||||
*
|
||||
*/
|
||||
public abstract void remove(Directory dir) throws IOException;
|
||||
|
||||
/**
|
||||
* Override for more efficient moves.
|
||||
*
|
||||
* @throws IOException If there is a low-level I/O error.
|
||||
*/
|
||||
public void move(Directory fromDir, Directory toDir, String fileName) throws IOException {
|
||||
fromDir.copy(toDir, fileName, fileName, IOContext.DEFAULT);
|
||||
fromDir.deleteFile(fileName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the Directory for a given path, using the specified rawLockType.
|
||||
* Will return the same Directory instance for the same path.
|
||||
*
|
||||
* Note: sometimes you might pass null for the rawLockType when
|
||||
* you know the Directory exists and the rawLockType is already
|
||||
* in use.
|
||||
*
|
||||
* @throws IOException If there is a low-level I/O error.
|
||||
*/
|
||||
public abstract Directory get(String path, String rawLockType)
|
||||
|
@ -101,4 +130,58 @@ public abstract class DirectoryFactory implements NamedListInitializedPlugin,
|
|||
*/
|
||||
public abstract void release(Directory directory) throws IOException;
|
||||
|
||||
|
||||
/**
|
||||
* Normalize a given path.
|
||||
*
|
||||
* @param path to normalize
|
||||
* @return normalized path
|
||||
* @throws IOException on io error
|
||||
*/
|
||||
public String normalize(String path) throws IOException {
|
||||
return path;
|
||||
}
|
||||
|
||||
public static long sizeOfDirectory(Directory directory) throws IOException {
|
||||
final String[] files = directory.listAll();
|
||||
long size = 0;
|
||||
|
||||
for (final String file : files) {
|
||||
size += sizeOf(directory, file);
|
||||
if (size < 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
public static long sizeOf(Directory directory, String file) throws IOException {
|
||||
if (!directory.fileExists(file)) {
|
||||
throw new IllegalArgumentException(file + " does not exist");
|
||||
}
|
||||
|
||||
return directory.fileLength(file);
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete the files in the Directory
|
||||
*/
|
||||
public static boolean empty(Directory dir) {
|
||||
boolean isSuccess = true;
|
||||
String contents[];
|
||||
try {
|
||||
contents = dir.listAll();
|
||||
if (contents != null) {
|
||||
for (String file : contents) {
|
||||
dir.deleteFile(file);
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
SolrException.log(log, "Error deleting files from Directory", e);
|
||||
isSuccess = false;
|
||||
}
|
||||
return isSuccess;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
package org.apache.solr.core;
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
||||
/**
|
||||
* Directory provider for implementations that do not persist over reboots.
|
||||
*
|
||||
*/
|
||||
public abstract class EphemeralDirectoryFactory extends CachingDirectoryFactory {
|
||||
|
||||
@Override
|
||||
public boolean exists(String path) {
|
||||
String fullPath = new File(path).getAbsolutePath();
|
||||
synchronized (this) {
|
||||
CacheValue cacheValue = byPathCache.get(fullPath);
|
||||
Directory directory = null;
|
||||
if (cacheValue != null) {
|
||||
directory = cacheValue.directory;
|
||||
}
|
||||
if (directory == null) {
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(Directory dir) throws IOException {
|
||||
// ram dir does not persist its dir anywhere
|
||||
}
|
||||
|
||||
@Override
|
||||
public String normalize(String path) throws IOException {
|
||||
return path;
|
||||
}
|
||||
}
|
|
@ -39,7 +39,7 @@ import java.io.IOException;
|
|||
* </ul>
|
||||
*
|
||||
**/
|
||||
public class MMapDirectoryFactory extends CachingDirectoryFactory {
|
||||
public class MMapDirectoryFactory extends StandardDirectoryFactory {
|
||||
private transient static Logger log = LoggerFactory.getLogger(MMapDirectoryFactory.class);
|
||||
boolean unmapHack;
|
||||
private int maxChunk;
|
||||
|
|
|
@ -27,11 +27,11 @@ import java.io.IOException;
|
|||
* Factory to instantiate {@link org.apache.lucene.store.NIOFSDirectory}
|
||||
*
|
||||
**/
|
||||
public class NIOFSDirectoryFactory extends CachingDirectoryFactory {
|
||||
public class NIOFSDirectoryFactory extends StandardDirectoryFactory {
|
||||
|
||||
@Override
|
||||
protected Directory create(String path) throws IOException {
|
||||
|
||||
return new NIOFSDirectory(new File(path));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.solr.core;
|
|||
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.request.SolrRequestInfo;
|
||||
import org.apache.solr.response.ResultContext;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
import org.apache.solr.search.DocList;
|
||||
import org.apache.solr.search.DocIterator;
|
||||
|
@ -68,6 +69,9 @@ public class QuerySenderListener extends AbstractSolrEventListener {
|
|||
NamedList values = rsp.getValues();
|
||||
for (int i=0; i<values.size(); i++) {
|
||||
Object o = values.getVal(i);
|
||||
if (o instanceof ResultContext) {
|
||||
o = ((ResultContext)o).docs;
|
||||
}
|
||||
if (o instanceof DocList) {
|
||||
DocList docs = (DocList)o;
|
||||
for (DocIterator iter = docs.iterator(); iter.hasNext();) {
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
|
||||
package org.apache.solr.core;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -26,28 +25,11 @@ import org.apache.lucene.store.RAMDirectory;
|
|||
/**
|
||||
* Factory to instantiate {@link org.apache.lucene.store.RAMDirectory}
|
||||
*/
|
||||
public class RAMDirectoryFactory extends StandardDirectoryFactory {
|
||||
public class RAMDirectoryFactory extends EphemeralDirectoryFactory {
|
||||
|
||||
@Override
|
||||
protected Directory create(String path) throws IOException {
|
||||
return new RAMDirectory();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean exists(String path) {
|
||||
String fullPath = new File(path).getAbsolutePath();
|
||||
synchronized (this) {
|
||||
CacheValue cacheValue = byPathCache.get(fullPath);
|
||||
Directory directory = null;
|
||||
if (cacheValue != null) {
|
||||
directory = cacheValue.directory;
|
||||
}
|
||||
if (directory == null) {
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -189,8 +189,8 @@ public final class RequestHandlers {
|
|||
|
||||
|
||||
/**
|
||||
* The <code>LazyRequestHandlerWrapper</core> wraps any {@link SolrRequestHandler}.
|
||||
* Rather then instanciate and initalize the handler on startup, this wrapper waits
|
||||
* The <code>LazyRequestHandlerWrapper</code> wraps any {@link SolrRequestHandler}.
|
||||
* Rather then instantiate and initialize the handler on startup, this wrapper waits
|
||||
* until it is actually called. This should only be used for handlers that are
|
||||
* unlikely to be used in the normal lifecycle.
|
||||
*
|
||||
|
|
|
@ -27,10 +27,11 @@ import java.io.IOException;
|
|||
* Factory to instantiate {@link org.apache.lucene.store.SimpleFSDirectory}
|
||||
*
|
||||
**/
|
||||
public class SimpleFSDirectoryFactory extends CachingDirectoryFactory {
|
||||
public class SimpleFSDirectoryFactory extends StandardDirectoryFactory {
|
||||
|
||||
@Override
|
||||
protected Directory create(String path) throws IOException {
|
||||
return new SimpleFSDirectory(new File(path));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -24,6 +24,8 @@ import org.apache.lucene.index.IndexDeletionPolicy;
|
|||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.solr.cloud.CloudDescriptor;
|
||||
import org.apache.solr.common.SolrException;
|
||||
|
@ -84,8 +86,9 @@ import org.slf4j.LoggerFactory;
|
|||
import org.xml.sax.SAXException;
|
||||
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.Writer;
|
||||
|
@ -215,12 +218,7 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
}
|
||||
|
||||
public String getIndexDir() {
|
||||
synchronized (searcherLock) {
|
||||
if (_searcher == null)
|
||||
return dataDir + "index/";
|
||||
SolrIndexSearcher searcher = _searcher.get();
|
||||
return searcher.getIndexDir() == null ? dataDir + "index/" : searcher.getIndexDir();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -234,23 +232,55 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
*/
|
||||
public String getNewIndexDir() {
|
||||
String result = dataDir + "index/";
|
||||
File propsFile = new File(dataDir + "index.properties");
|
||||
if (propsFile.exists()) {
|
||||
Properties p = new Properties();
|
||||
InputStream is = null;
|
||||
Directory dir = null;
|
||||
try {
|
||||
dir = getDirectoryFactory().get(getDataDir(), null);
|
||||
if (dir.fileExists("index.properties")){
|
||||
final IndexInput input = dir.openInput("index.properties", IOContext.DEFAULT);
|
||||
|
||||
final InputStream is = new InputStream() {
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
byte next;
|
||||
try {
|
||||
next = input.readByte();
|
||||
} catch (EOFException e) {
|
||||
return -1;
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
super.close();
|
||||
input.close();
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
is = new FileInputStream(propsFile);
|
||||
p.load(is);
|
||||
} catch (IOException e) {
|
||||
/*no op*/
|
||||
|
||||
String s = p.getProperty("index");
|
||||
if (s != null && s.trim().length() > 0) {
|
||||
result = dataDir + s;
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
log.error("Unable to load index.properties", e);
|
||||
} finally {
|
||||
IOUtils.closeQuietly(is);
|
||||
}
|
||||
String s = p.getProperty("index");
|
||||
if (s != null && s.trim().length() > 0) {
|
||||
File tmp = new File(dataDir + s);
|
||||
if (tmp.exists() && tmp.isDirectory())
|
||||
result = dataDir + s;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
SolrException.log(log, "", e);
|
||||
} finally {
|
||||
|
||||
try {
|
||||
getDirectoryFactory().release(dir);
|
||||
} catch (IOException e) {
|
||||
SolrException.log(log, "", e);
|
||||
}
|
||||
}
|
||||
if (!result.equals(lastNewIndexDir)) {
|
||||
|
@ -365,6 +395,11 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
return responseWriters.put(name, responseWriter);
|
||||
}
|
||||
|
||||
public SolrCore reload(SolrCore prev) throws IOException,
|
||||
ParserConfigurationException, SAXException {
|
||||
return reload(prev.getResourceLoader(), prev);
|
||||
}
|
||||
|
||||
public SolrCore reload(SolrResourceLoader resourceLoader, SolrCore prev) throws IOException,
|
||||
ParserConfigurationException, SAXException {
|
||||
// TODO - what if indexwriter settings have changed
|
||||
|
@ -379,6 +414,7 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
|
||||
SolrCore core = new SolrCore(getName(), getDataDir(), config,
|
||||
schema, coreDescriptor, updateHandler, prev);
|
||||
core.solrDelPolicy = this.solrDelPolicy;
|
||||
return core;
|
||||
}
|
||||
|
||||
|
@ -393,9 +429,11 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
DirectoryFactory dirFactory;
|
||||
PluginInfo info = solrConfig.getPluginInfo(DirectoryFactory.class.getName());
|
||||
if (info != null) {
|
||||
log.info(info.className);
|
||||
dirFactory = getResourceLoader().newInstance(info.className, DirectoryFactory.class);
|
||||
dirFactory.init(info.initArgs);
|
||||
} else {
|
||||
log.info("solr.NRTCachingDirectoryFactory");
|
||||
dirFactory = new NRTCachingDirectoryFactory();
|
||||
}
|
||||
// And set it
|
||||
|
@ -417,8 +455,8 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
// protect via synchronized(SolrCore.class)
|
||||
private static Set<String> dirs = new HashSet<String>();
|
||||
|
||||
void initIndex(boolean reload) {
|
||||
try {
|
||||
void initIndex(boolean reload) throws IOException {
|
||||
|
||||
String indexDir = getNewIndexDir();
|
||||
boolean indexExists = getDirectoryFactory().exists(indexDir);
|
||||
boolean firstTime;
|
||||
|
@ -430,19 +468,28 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
initIndexReaderFactory();
|
||||
|
||||
if (indexExists && firstTime && !reload) {
|
||||
// to remove locks, the directory must already exist... so we create it
|
||||
// if it didn't exist already...
|
||||
Directory dir = directoryFactory.get(indexDir, getSolrConfig().indexConfig.lockType);
|
||||
if (dir != null) {
|
||||
|
||||
Directory dir = directoryFactory.get(indexDir,
|
||||
getSolrConfig().indexConfig.lockType);
|
||||
try {
|
||||
if (IndexWriter.isLocked(dir)) {
|
||||
if (removeLocks) {
|
||||
log.warn(logid + "WARNING: Solr index directory '{}' is locked. Unlocking...", indexDir);
|
||||
log.warn(
|
||||
logid
|
||||
+ "WARNING: Solr index directory '{}' is locked. Unlocking...",
|
||||
indexDir);
|
||||
IndexWriter.unlock(dir);
|
||||
} else {
|
||||
log.error(logid + "Solr index directory '{}' is locked. Throwing exception", indexDir);
|
||||
throw new LockObtainFailedException("Index locked for write for core " + name);
|
||||
log.error(logid
|
||||
+ "Solr index directory '{}' is locked. Throwing exception",
|
||||
indexDir);
|
||||
throw new LockObtainFailedException(
|
||||
"Index locked for write for core " + name);
|
||||
}
|
||||
|
||||
directoryFactory.release(dir);
|
||||
}
|
||||
} finally {
|
||||
directoryFactory.release(dir);
|
||||
}
|
||||
}
|
||||
|
@ -456,9 +503,7 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
writer.close();
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/** Creates an instance by trying a constructor that accepts a SolrCore before
|
||||
|
@ -636,11 +681,17 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
|
||||
booleanQueryMaxClauseCount();
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
|
||||
try {
|
||||
|
||||
initListeners();
|
||||
|
||||
if (updateHandler == null) {
|
||||
initDeletionPolicy();
|
||||
}
|
||||
|
||||
this.codec= initCodec(solrConfig, schema);
|
||||
this.codec = initCodec(solrConfig, schema);
|
||||
|
||||
if (updateHandler == null) {
|
||||
initDirectoryFactory();
|
||||
|
@ -658,20 +709,17 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
initValueSourceParsers();
|
||||
initTransformerFactories();
|
||||
|
||||
this.searchComponents = Collections.unmodifiableMap(loadSearchComponents());
|
||||
this.searchComponents = Collections
|
||||
.unmodifiableMap(loadSearchComponents());
|
||||
|
||||
// Processors initialized before the handlers
|
||||
updateProcessorChains = loadUpdateProcessorChains();
|
||||
reqHandlers = new RequestHandlers(this);
|
||||
reqHandlers.initHandlersFromConfig( solrConfig );
|
||||
|
||||
reqHandlers.initHandlersFromConfig(solrConfig);
|
||||
|
||||
// Handle things that should eventually go away
|
||||
initDeprecatedSupport();
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
|
||||
try {
|
||||
// cause the executor to stall so firstSearcher events won't fire
|
||||
// until after inform() has been called for all components.
|
||||
// searchExecutor must be single-threaded for this to work
|
||||
|
@ -697,12 +745,14 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
}
|
||||
}
|
||||
|
||||
// Open the searcher *before* the update handler so we don't end up opening
|
||||
// Open the searcher *before* the update handler so we don't end up
|
||||
// opening
|
||||
// one in the middle.
|
||||
// With lockless commits in Lucene now, this probably shouldn't be an issue anymore
|
||||
// With lockless commits in Lucene now, this probably shouldn't be an
|
||||
// issue anymore
|
||||
|
||||
try {
|
||||
getSearcher(false,false,null,true);
|
||||
getSearcher(false, false, null, true);
|
||||
} finally {
|
||||
newReaderCreator = null;
|
||||
if (iwRef != null) iwRef.decref();
|
||||
|
@ -721,8 +771,8 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
infoRegistry.put("updateHandler", this.updateHandler);
|
||||
|
||||
// Finally tell anyone who wants to know
|
||||
resourceLoader.inform( resourceLoader );
|
||||
resourceLoader.inform( this ); // last call before the latch is released.
|
||||
resourceLoader.inform(resourceLoader);
|
||||
resourceLoader.inform(this); // last call before the latch is released.
|
||||
} catch (Throwable e) {
|
||||
latch.countDown();//release the latch, otherwise we block trying to do the close. This should be fine, since counting down on a latch of 0 is still fine
|
||||
//close down the searcher and any other resources, if it exists, as this is not recoverable
|
||||
|
@ -911,12 +961,15 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
SolrException.log(log,e);
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
if (updateHandler instanceof IndexWriterCloser) {
|
||||
decrefSolrCoreState((IndexWriterCloser)updateHandler);
|
||||
decrefSolrCoreState((IndexWriterCloser) updateHandler);
|
||||
} else {
|
||||
decrefSolrCoreState(null);
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
SolrException.log(log, e);
|
||||
}
|
||||
|
||||
try {
|
||||
searcherExecutor.shutdown();
|
||||
|
@ -948,6 +1001,7 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
SolrException.log(log,e);
|
||||
}
|
||||
|
||||
if (solrCoreState != null) { // bad startup case
|
||||
synchronized (solrCoreState) {
|
||||
if (solrCoreStateRefCnt == 0) {
|
||||
try {
|
||||
|
@ -957,6 +1011,7 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if( closeHooks != null ) {
|
||||
|
@ -1271,13 +1326,13 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
openSearcherLock.lock();
|
||||
try {
|
||||
String newIndexDir = getNewIndexDir();
|
||||
File indexDirFile = null;
|
||||
File newIndexDirFile = null;
|
||||
String indexDirFile = null;
|
||||
String newIndexDirFile = null;
|
||||
|
||||
// if it's not a normal near-realtime update, check that paths haven't changed.
|
||||
if (!nrt) {
|
||||
indexDirFile = new File(getIndexDir()).getCanonicalFile();
|
||||
newIndexDirFile = new File(newIndexDir).getCanonicalFile();
|
||||
indexDirFile = getDirectoryFactory().normalize(getIndexDir());
|
||||
newIndexDirFile = getDirectoryFactory().normalize(newIndexDir);
|
||||
}
|
||||
|
||||
synchronized (searcherLock) {
|
||||
|
@ -1305,6 +1360,7 @@ public final class SolrCore implements SolrInfoMBean {
|
|||
} else {
|
||||
// verbose("start reopen without writer, reader=", currentReader);
|
||||
newReader = DirectoryReader.openIfChanged(currentReader);
|
||||
|
||||
// verbose("reopen result", newReader);
|
||||
}
|
||||
|
||||
|
|
|
@ -132,7 +132,11 @@ public class SolrDeletionPolicy implements IndexDeletionPolicy, NamedListInitial
|
|||
synchronized (this) {
|
||||
long maxCommitAgeTimeStamp = -1L;
|
||||
IndexCommit newest = commits.get(commits.size() - 1);
|
||||
log.info("newest commit = " + newest.getGeneration());
|
||||
try {
|
||||
log.info("newest commit = " + newest.getGeneration() + newest.getFileNames().toString());
|
||||
} catch (IOException e1) {
|
||||
throw new RuntimeException();
|
||||
}
|
||||
|
||||
int singleSegKept = (newest.getSegmentCount() == 1) ? 1 : 0;
|
||||
int totalKept = 1;
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.solr.core;
|
|||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
|
||||
|
@ -26,6 +27,9 @@ import org.apache.lucene.store.FSDirectory;
|
|||
* Directory provider which mimics original Solr
|
||||
* {@link org.apache.lucene.store.FSDirectory} based behavior.
|
||||
*
|
||||
* File based DirectoryFactory implementations generally extend
|
||||
* this class.
|
||||
*
|
||||
*/
|
||||
public class StandardDirectoryFactory extends CachingDirectoryFactory {
|
||||
|
||||
|
@ -33,4 +37,41 @@ public class StandardDirectoryFactory extends CachingDirectoryFactory {
|
|||
protected Directory create(String path) throws IOException {
|
||||
return FSDirectory.open(new File(path));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String normalize(String path) throws IOException {
|
||||
return new File(path).getCanonicalPath();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(Directory dir) throws IOException {
|
||||
CacheValue val = byDirectoryCache.get(dir);
|
||||
if (val == null) {
|
||||
throw new NullPointerException("Unknown directory " + dir);
|
||||
}
|
||||
File dirFile = new File(val.path);
|
||||
FileUtils.deleteDirectory(dirFile);
|
||||
}
|
||||
|
||||
/**
|
||||
* Override for more efficient moves.
|
||||
*
|
||||
* @throws IOException
|
||||
* If there is a low-level I/O error.
|
||||
*/
|
||||
public void move(Directory fromDir, Directory toDir, String fileName)
|
||||
throws IOException {
|
||||
if (fromDir instanceof FSDirectory && toDir instanceof FSDirectory) {
|
||||
File dir1 = ((FSDirectory) fromDir).getDirectory();
|
||||
File dir2 = ((FSDirectory) toDir).getDirectory();
|
||||
File indexFileInTmpDir = new File(dir1, fileName);
|
||||
File indexFileInIndex = new File(dir2, fileName);
|
||||
boolean success = indexFileInTmpDir.renameTo(indexFileInIndex);
|
||||
if (success) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
super.move(fromDir, toDir, fileName);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -119,7 +119,7 @@ import org.slf4j.LoggerFactory;
|
|||
* </li>
|
||||
* <li><code>http://.../ping?action=status</code>
|
||||
* - returns a status code indicating if the healthcheck file exists
|
||||
* ("<code>enabled</code>") or not ("<code>disabled<code>")
|
||||
* ("<code>enabled</code>") or not ("<code>disabled</code>")
|
||||
* </li>
|
||||
* </ul>
|
||||
*
|
||||
|
|
|
@ -38,11 +38,13 @@ import java.util.zip.Adler32;
|
|||
import java.util.zip.Checksum;
|
||||
import java.util.zip.DeflaterOutputStream;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.apache.lucene.index.IndexDeletionPolicy;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.params.CommonParams;
|
||||
|
@ -53,6 +55,7 @@ import org.apache.solr.common.util.NamedList;
|
|||
import org.apache.solr.common.util.SimpleOrderedMap;
|
||||
import org.apache.solr.common.util.StrUtils;
|
||||
import org.apache.solr.core.CloseHook;
|
||||
import org.apache.solr.core.DirectoryFactory;
|
||||
import org.apache.solr.core.IndexDeletionPolicyWrapper;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
import org.apache.solr.core.SolrDeletionPolicy;
|
||||
|
@ -204,9 +207,6 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
rsp.add(STATUS,ERR_STATUS);
|
||||
rsp.add("message","No slave configured");
|
||||
}
|
||||
} else if (command.equals(CMD_FILE_CHECKSUM)) {
|
||||
// this command is not used by anyone
|
||||
getFileChecksum(solrParams, rsp);
|
||||
} else if (command.equals(CMD_SHOW_COMMITS)) {
|
||||
rsp.add(CMD_SHOW_COMMITS, getCommits());
|
||||
} else if (command.equals(CMD_DETAILS)) {
|
||||
|
@ -239,30 +239,6 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
return l;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the checksum of a file
|
||||
*/
|
||||
private void getFileChecksum(SolrParams solrParams, SolrQueryResponse rsp) {
|
||||
Checksum checksum = new Adler32();
|
||||
File dir = new File(core.getIndexDir());
|
||||
rsp.add(CHECKSUM, getCheckSums(solrParams.getParams(FILE), dir, checksum));
|
||||
dir = new File(core.getResourceLoader().getConfigDir());
|
||||
rsp.add(CONF_CHECKSUM, getCheckSums(solrParams.getParams(CONF_FILE_SHORT), dir, checksum));
|
||||
}
|
||||
|
||||
private Map<String, Long> getCheckSums(String[] files, File dir, Checksum checksum) {
|
||||
Map<String, Long> checksumMap = new HashMap<String, Long>();
|
||||
if (files == null || files.length == 0)
|
||||
return checksumMap;
|
||||
for (String file : files) {
|
||||
File f = new File(dir, file);
|
||||
Long checkSumVal = getCheckSum(checksum, f);
|
||||
if (checkSumVal != null)
|
||||
checksumMap.put(file, checkSumVal);
|
||||
}
|
||||
return checksumMap;
|
||||
}
|
||||
|
||||
static Long getCheckSum(Checksum checksum, File f) {
|
||||
FileInputStream fis = null;
|
||||
checksum.reset();
|
||||
|
@ -343,15 +319,22 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
}
|
||||
|
||||
/**
|
||||
* This method adds an Object of FileStream to the resposnse . The FileStream implements a custom protocol which is
|
||||
* This method adds an Object of FileStream to the response . The FileStream implements a custom protocol which is
|
||||
* understood by SnapPuller.FileFetcher
|
||||
*
|
||||
* @see org.apache.solr.handler.SnapPuller.FileFetcher
|
||||
* @see org.apache.solr.handler.SnapPuller.LocalFsFileFetcher
|
||||
* @see org.apache.solr.handler.SnapPuller.DirectoryFileFetcher
|
||||
*/
|
||||
private void getFileStream(SolrParams solrParams, SolrQueryResponse rsp) {
|
||||
ModifiableSolrParams rawParams = new ModifiableSolrParams(solrParams);
|
||||
rawParams.set(CommonParams.WT, FILE_STREAM);
|
||||
rsp.add(FILE_STREAM, new FileStream(solrParams));
|
||||
|
||||
String cfileName = solrParams.get(CONF_FILE_SHORT);
|
||||
if (cfileName != null) {
|
||||
rsp.add(FILE_STREAM, new LocalFsFileStream(solrParams));
|
||||
} else {
|
||||
rsp.add(FILE_STREAM, new DirectoryFileStream(solrParams));
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -372,21 +355,29 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
// reserve the indexcommit for sometime
|
||||
core.getDeletionPolicy().setReserveDuration(gen, reserveCommitDuration);
|
||||
List<Map<String, Object>> result = new ArrayList<Map<String, Object>>();
|
||||
Directory dir = null;
|
||||
try {
|
||||
//get all the files in the commit
|
||||
//use a set to workaround possible Lucene bug which returns same file name multiple times
|
||||
// get all the files in the commit
|
||||
// use a set to workaround possible Lucene bug which returns same file
|
||||
// name multiple times
|
||||
Collection<String> files = new HashSet<String>(commit.getFileNames());
|
||||
dir = core.getDirectoryFactory().get(core.getNewIndexDir(), null);
|
||||
try {
|
||||
|
||||
for (String fileName : files) {
|
||||
if(fileName.endsWith(".lock")) continue;
|
||||
File file = new File(core.getIndexDir(), fileName);
|
||||
Map<String, Object> fileMeta = getFileInfo(file);
|
||||
if (fileName.endsWith(".lock")) continue;
|
||||
Map<String,Object> fileMeta = new HashMap<String,Object>();
|
||||
fileMeta.put(NAME, fileName);
|
||||
fileMeta.put(SIZE, dir.fileLength(fileName));
|
||||
result.add(fileMeta);
|
||||
}
|
||||
} finally {
|
||||
core.getDirectoryFactory().release(dir);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
rsp.add("status", "unable to get file names for given index generation");
|
||||
rsp.add("exception", e);
|
||||
LOG.warn("Unable to get file names for indexCommit generation: "
|
||||
+ gen, e);
|
||||
LOG.error("Unable to get file names for indexCommit generation: " + gen, e);
|
||||
}
|
||||
rsp.add(CMD_GET_FILE_LIST, result);
|
||||
if (confFileNameAlias.size() < 1 || core.getCoreDescriptor().getCoreContainer().isZooKeeperAware())
|
||||
|
@ -444,7 +435,6 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
Map<String, Object> map = new HashMap<String, Object>();
|
||||
map.put(NAME, name);
|
||||
map.put(SIZE, size);
|
||||
map.put(LAST_MODIFIED, lastmodified);
|
||||
map.put(CHECKSUM, checksum);
|
||||
return map;
|
||||
}
|
||||
|
@ -474,18 +464,19 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
}
|
||||
|
||||
long getIndexSize() {
|
||||
return FileUtils.sizeOfDirectory(new File(core.getIndexDir()));
|
||||
Directory dir;
|
||||
long size = 0;
|
||||
try {
|
||||
dir = core.getDirectoryFactory().get(core.getIndexDir(), null);
|
||||
try {
|
||||
size = DirectoryFactory.sizeOfDirectory(dir);
|
||||
} finally {
|
||||
core.getDirectoryFactory().release(dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* Collects the details such as name, size ,lastModified of a file
|
||||
*/
|
||||
private Map<String, Object> getFileInfo(File file) {
|
||||
Map<String, Object> fileMeta = new HashMap<String, Object>();
|
||||
fileMeta.put(NAME, file.getName());
|
||||
fileMeta.put(SIZE, file.length());
|
||||
fileMeta.put(LAST_MODIFIED, file.lastModified());
|
||||
return fileMeta;
|
||||
} catch (IOException e) {
|
||||
SolrException.log(LOG, "IO error while trying to get the size of the Directory", e);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -885,7 +876,8 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
}
|
||||
|
||||
// reboot the writer on the new index
|
||||
core.getUpdateHandler().newIndexWriter(true);
|
||||
// TODO: perhaps this is no longer necessary then?
|
||||
// core.getUpdateHandler().newIndexWriter(true);
|
||||
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Unable to get IndexCommit on startup", e);
|
||||
|
@ -936,7 +928,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
private void registerFileStreamResponseWriter() {
|
||||
core.registerResponseWriter(FILE_STREAM, new BinaryQueryResponseWriter() {
|
||||
public void write(OutputStream out, SolrQueryRequest request, SolrQueryResponse resp) throws IOException {
|
||||
FileStream stream = (FileStream) resp.getValues().get(FILE_STREAM);
|
||||
DirectoryFileStream stream = (DirectoryFileStream) resp.getValues().get(FILE_STREAM);
|
||||
stream.write(out);
|
||||
}
|
||||
|
||||
|
@ -1009,19 +1001,113 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
};
|
||||
}
|
||||
|
||||
private class FileStream {
|
||||
private SolrParams params;
|
||||
private class DirectoryFileStream {
|
||||
protected SolrParams params;
|
||||
|
||||
private FastOutputStream fos;
|
||||
protected FastOutputStream fos;
|
||||
|
||||
private Long indexGen;
|
||||
private IndexDeletionPolicyWrapper delPolicy;
|
||||
protected Long indexGen;
|
||||
protected IndexDeletionPolicyWrapper delPolicy;
|
||||
|
||||
public FileStream(SolrParams solrParams) {
|
||||
public DirectoryFileStream(SolrParams solrParams) {
|
||||
params = solrParams;
|
||||
delPolicy = core.getDeletionPolicy();
|
||||
}
|
||||
|
||||
public void write(OutputStream out) throws IOException {
|
||||
String fileName = params.get(FILE);
|
||||
String cfileName = params.get(CONF_FILE_SHORT);
|
||||
String sOffset = params.get(OFFSET);
|
||||
String sLen = params.get(LEN);
|
||||
String compress = params.get(COMPRESSION);
|
||||
String sChecksum = params.get(CHECKSUM);
|
||||
String sGen = params.get(GENERATION);
|
||||
if (sGen != null) indexGen = Long.parseLong(sGen);
|
||||
if (Boolean.parseBoolean(compress)) {
|
||||
fos = new FastOutputStream(new DeflaterOutputStream(out));
|
||||
} else {
|
||||
fos = new FastOutputStream(out);
|
||||
}
|
||||
|
||||
int packetsWritten = 0;
|
||||
IndexInput in = null;
|
||||
try {
|
||||
long offset = -1;
|
||||
int len = -1;
|
||||
// check if checksum is requested
|
||||
boolean useChecksum = Boolean.parseBoolean(sChecksum);
|
||||
if (sOffset != null) offset = Long.parseLong(sOffset);
|
||||
if (sLen != null) len = Integer.parseInt(sLen);
|
||||
if (fileName == null && cfileName == null) {
|
||||
// no filename do nothing
|
||||
writeNothing();
|
||||
}
|
||||
|
||||
RefCounted<SolrIndexSearcher> sref = core.getSearcher();
|
||||
Directory dir;
|
||||
try {
|
||||
SolrIndexSearcher searcher = sref.get();
|
||||
dir = searcher.getIndexReader().directory();
|
||||
} finally {
|
||||
sref.decref();
|
||||
}
|
||||
in = dir.openInput(fileName, IOContext.READONCE);
|
||||
// if offset is mentioned move the pointer to that point
|
||||
if (offset != -1) in.seek(offset);
|
||||
byte[] buf = new byte[(len == -1 || len > PACKET_SZ) ? PACKET_SZ : len];
|
||||
Checksum checksum = null;
|
||||
if (useChecksum) checksum = new Adler32();
|
||||
|
||||
long filelen = dir.fileLength(fileName);
|
||||
while (true) {
|
||||
offset = offset == -1 ? 0 : offset;
|
||||
int read = (int) Math.min(buf.length, filelen - offset);
|
||||
in.readBytes(buf, offset == -1 ? 0 : (int) offset, read);
|
||||
|
||||
fos.writeInt((int) read);
|
||||
if (useChecksum) {
|
||||
checksum.reset();
|
||||
checksum.update(buf, 0, read);
|
||||
fos.writeLong(checksum.getValue());
|
||||
}
|
||||
fos.write(buf, 0, read);
|
||||
fos.flush();
|
||||
if (indexGen != null && (packetsWritten % 5 == 0)) {
|
||||
// after every 5 packets reserve the commitpoint for some time
|
||||
delPolicy.setReserveDuration(indexGen, reserveCommitDuration);
|
||||
}
|
||||
packetsWritten++;
|
||||
if (read != buf.length) {
|
||||
writeNothing();
|
||||
fos.close();
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Exception while writing response for params: " + params, e);
|
||||
} finally {
|
||||
if (in != null) {
|
||||
in.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Used to write a marker for EOF
|
||||
*/
|
||||
protected void writeNothing() throws IOException {
|
||||
fos.writeInt(0);
|
||||
fos.flush();
|
||||
}
|
||||
}
|
||||
|
||||
private class LocalFsFileStream extends DirectoryFileStream {
|
||||
|
||||
public LocalFsFileStream(SolrParams solrParams) {
|
||||
super(solrParams);
|
||||
}
|
||||
|
||||
public void write(OutputStream out) throws IOException {
|
||||
String fileName = params.get(FILE);
|
||||
String cfileName = params.get(CONF_FILE_SHORT);
|
||||
|
@ -1053,13 +1139,10 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
}
|
||||
|
||||
File file = null;
|
||||
if (cfileName != null) {
|
||||
|
||||
//if if is a conf file read from config diectory
|
||||
file = new File(core.getResourceLoader().getConfigDir(), cfileName);
|
||||
} else {
|
||||
//else read from the indexdirectory
|
||||
file = new File(core.getIndexDir(), fileName);
|
||||
}
|
||||
|
||||
if (file.exists() && file.canRead()) {
|
||||
inputStream = new FileInputStream(file);
|
||||
FileChannel channel = inputStream.getChannel();
|
||||
|
@ -1103,15 +1186,6 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
IOUtils.closeQuietly(inputStream);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Used to write a marker for EOF
|
||||
*/
|
||||
private void writeNothing() throws IOException {
|
||||
fos.writeInt(0);
|
||||
fos.flush();
|
||||
}
|
||||
}
|
||||
|
||||
public static final String MASTER_URL = "masterUrl";
|
||||
|
@ -1132,8 +1206,6 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
|
||||
public static final String CMD_GET_FILE = "filecontent";
|
||||
|
||||
public static final String CMD_FILE_CHECKSUM = "filechecksum";
|
||||
|
||||
public static final String CMD_DISABLE_POLL = "disablepoll";
|
||||
|
||||
public static final String CMD_DISABLE_REPL = "disablereplication";
|
||||
|
@ -1158,8 +1230,6 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
|
||||
public static final String SIZE = "size";
|
||||
|
||||
public static final String LAST_MODIFIED = "lastmodified";
|
||||
|
||||
public static final String CONF_FILE_SHORT = "cf";
|
||||
|
||||
public static final String CHECKSUM = "checksum";
|
||||
|
|
|
@ -16,11 +16,67 @@
|
|||
*/
|
||||
package org.apache.solr.handler;
|
||||
|
||||
import static org.apache.solr.handler.ReplicationHandler.ALIAS;
|
||||
import static org.apache.solr.handler.ReplicationHandler.CHECKSUM;
|
||||
import static org.apache.solr.handler.ReplicationHandler.CMD_DETAILS;
|
||||
import static org.apache.solr.handler.ReplicationHandler.CMD_GET_FILE;
|
||||
import static org.apache.solr.handler.ReplicationHandler.CMD_GET_FILE_LIST;
|
||||
import static org.apache.solr.handler.ReplicationHandler.CMD_INDEX_VERSION;
|
||||
import static org.apache.solr.handler.ReplicationHandler.COMMAND;
|
||||
import static org.apache.solr.handler.ReplicationHandler.COMPRESSION;
|
||||
import static org.apache.solr.handler.ReplicationHandler.CONF_FILES;
|
||||
import static org.apache.solr.handler.ReplicationHandler.CONF_FILE_SHORT;
|
||||
import static org.apache.solr.handler.ReplicationHandler.EXTERNAL;
|
||||
import static org.apache.solr.handler.ReplicationHandler.FILE;
|
||||
import static org.apache.solr.handler.ReplicationHandler.FILE_STREAM;
|
||||
import static org.apache.solr.handler.ReplicationHandler.GENERATION;
|
||||
import static org.apache.solr.handler.ReplicationHandler.INTERNAL;
|
||||
import static org.apache.solr.handler.ReplicationHandler.MASTER_URL;
|
||||
import static org.apache.solr.handler.ReplicationHandler.NAME;
|
||||
import static org.apache.solr.handler.ReplicationHandler.OFFSET;
|
||||
import static org.apache.solr.handler.ReplicationHandler.SIZE;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.FileChannel;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.zip.Adler32;
|
||||
import java.util.zip.Checksum;
|
||||
import java.util.zip.InflaterInputStream;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpClientUtil;
|
||||
|
@ -31,35 +87,22 @@ import org.apache.solr.common.params.CommonParams;
|
|||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.common.util.FastInputStream;
|
||||
import org.apache.solr.util.DefaultSolrThreadFactory;
|
||||
import org.apache.solr.util.FileUtils;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.core.CachingDirectoryFactory.CloseListener;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
import org.apache.solr.core.DirectoryFactory;
|
||||
import org.apache.solr.core.IndexDeletionPolicyWrapper;
|
||||
import static org.apache.solr.handler.ReplicationHandler.*;
|
||||
|
||||
import org.apache.solr.core.SolrCore;
|
||||
import org.apache.solr.handler.ReplicationHandler.FileInfo;
|
||||
import org.apache.solr.request.LocalSolrQueryRequest;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
import org.apache.solr.update.CommitUpdateCommand;
|
||||
import org.apache.solr.util.DefaultSolrThreadFactory;
|
||||
import org.apache.solr.util.FileUtils;
|
||||
import org.apache.solr.util.RefCounted;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.FileChannel;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.zip.Adler32;
|
||||
import java.util.zip.Checksum;
|
||||
import java.util.zip.InflaterInputStream;
|
||||
|
||||
/**
|
||||
* <p/> Provides functionality of downloading changed index files as well as config files and a timer for scheduling fetches from the
|
||||
* master. </p>
|
||||
|
@ -96,7 +139,9 @@ public class SnapPuller {
|
|||
|
||||
private volatile Map<String, Object> currentFile;
|
||||
|
||||
private volatile FileFetcher fileFetcher;
|
||||
private volatile DirectoryFileFetcher dirFileFetcher;
|
||||
|
||||
private volatile LocalFsFileFetcher localFileFetcher;
|
||||
|
||||
private volatile ExecutorService fsyncService;
|
||||
|
||||
|
@ -247,9 +292,12 @@ public class SnapPuller {
|
|||
* @return true on success, false if slave is already in sync
|
||||
* @throws IOException if an exception occurs
|
||||
*/
|
||||
boolean fetchLatestIndex(SolrCore core, boolean forceReplication) throws IOException, InterruptedException {
|
||||
boolean fetchLatestIndex(final SolrCore core, boolean forceReplication) throws IOException, InterruptedException {
|
||||
successfulInstall = false;
|
||||
replicationStartTime = System.currentTimeMillis();
|
||||
Directory tmpIndexDir = null;
|
||||
Directory indexDir = null;
|
||||
boolean deleteTmpIdxDir = true;
|
||||
try {
|
||||
//get the current 'replicateable' index version in the master
|
||||
NamedList response = null;
|
||||
|
@ -318,28 +366,34 @@ public class SnapPuller {
|
|||
// if the generateion of master is older than that of the slave , it means they are not compatible to be copied
|
||||
// then a new index direcory to be created and all the files need to be copied
|
||||
boolean isFullCopyNeeded = IndexDeletionPolicyWrapper.getCommitTimestamp(commit) >= latestVersion || forceReplication;
|
||||
File tmpIndexDir = createTempindexDir(core);
|
||||
if (isIndexStale()) {
|
||||
|
||||
String tmpIdxDirName = "index." + new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
|
||||
String tmpIndex = createTempindexDir(core, tmpIdxDirName);
|
||||
|
||||
tmpIndexDir = core.getDirectoryFactory().get(tmpIndex, null);
|
||||
|
||||
// make sure it's the newest known index dir...
|
||||
indexDir = core.getDirectoryFactory().get(core.getNewIndexDir(), null);
|
||||
Directory oldDirectory = null;
|
||||
|
||||
try {
|
||||
|
||||
if (isIndexStale(indexDir)) {
|
||||
isFullCopyNeeded = true;
|
||||
}
|
||||
LOG.info("Starting download to " + tmpIndexDir + " fullCopy=" + isFullCopyNeeded);
|
||||
successfulInstall = false;
|
||||
boolean deleteTmpIdxDir = true;
|
||||
|
||||
// make sure it's the newest known index dir...
|
||||
final File indexDir = new File(core.getNewIndexDir());
|
||||
Directory oldDirectory = null;
|
||||
try {
|
||||
downloadIndexFiles(isFullCopyNeeded, tmpIndexDir, latestGeneration);
|
||||
LOG.info("Total time taken for download : " + ((System.currentTimeMillis() - replicationStartTime) / 1000) + " secs");
|
||||
Collection<Map<String, Object>> modifiedConfFiles = getModifiedConfFiles(confFilesToDownload);
|
||||
if (!modifiedConfFiles.isEmpty()) {
|
||||
downloadConfFiles(confFilesToDownload, latestGeneration);
|
||||
if (isFullCopyNeeded) {
|
||||
successfulInstall = modifyIndexProps(tmpIndexDir.getName());
|
||||
successfulInstall = modifyIndexProps(tmpIdxDirName);
|
||||
deleteTmpIdxDir = false;
|
||||
} else {
|
||||
successfulInstall = copyIndexFiles(tmpIndexDir, indexDir);
|
||||
successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
|
||||
}
|
||||
if (successfulInstall) {
|
||||
LOG.info("Configuration files are modified, core will be reloaded");
|
||||
|
@ -349,7 +403,7 @@ public class SnapPuller {
|
|||
} else {
|
||||
terminateAndWaitFsyncService();
|
||||
if (isFullCopyNeeded) {
|
||||
successfulInstall = modifyIndexProps(tmpIndexDir.getName());
|
||||
successfulInstall = modifyIndexProps(tmpIdxDirName);
|
||||
deleteTmpIdxDir = false;
|
||||
RefCounted<IndexWriter> iw = core.getUpdateHandler().getSolrCoreState().getIndexWriter(core);
|
||||
try {
|
||||
|
@ -358,7 +412,7 @@ public class SnapPuller {
|
|||
iw.decref();
|
||||
}
|
||||
} else {
|
||||
successfulInstall = copyIndexFiles(tmpIndexDir, indexDir);
|
||||
successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
|
||||
}
|
||||
if (successfulInstall) {
|
||||
logReplicationTimeAndConfFiles(modifiedConfFiles, successfulInstall);
|
||||
|
@ -367,12 +421,23 @@ public class SnapPuller {
|
|||
|
||||
if (isFullCopyNeeded) {
|
||||
// we have to do this before commit
|
||||
final Directory freezeIndexDir = indexDir;
|
||||
core.getDirectoryFactory().addCloseListener(oldDirectory, new CloseListener(){
|
||||
|
||||
@Override
|
||||
public void onClose() {
|
||||
LOG.info("removing old index directory " + indexDir);
|
||||
delTree(indexDir);
|
||||
public void preClose() {
|
||||
LOG.info("removing old index files " + freezeIndexDir);
|
||||
DirectoryFactory.empty(freezeIndexDir);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postClose() {
|
||||
LOG.info("removing old index directory " + freezeIndexDir);
|
||||
try {
|
||||
core.getDirectoryFactory().remove(freezeIndexDir);
|
||||
} catch (IOException e) {
|
||||
SolrException.log(LOG, "Error removing directory " + freezeIndexDir, e);
|
||||
}
|
||||
}
|
||||
|
||||
});
|
||||
|
@ -400,21 +465,39 @@ public class SnapPuller {
|
|||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Index fetch failed : ", e);
|
||||
} finally {
|
||||
if (deleteTmpIdxDir) {
|
||||
LOG.info("removing temporary index download directory " + tmpIndexDir);
|
||||
delTree(tmpIndexDir);
|
||||
LOG.info("removing temporary index download directory files " + tmpIndexDir);
|
||||
DirectoryFactory.empty(tmpIndexDir);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
try {
|
||||
if (!successfulInstall) {
|
||||
logReplicationTimeAndConfFiles(null, successfulInstall);
|
||||
}
|
||||
filesToDownload = filesDownloaded = confFilesDownloaded = confFilesToDownload = null;
|
||||
replicationStartTime = 0;
|
||||
fileFetcher = null;
|
||||
if (fsyncService != null && !fsyncService.isShutdown()) fsyncService.shutdownNow();
|
||||
dirFileFetcher = null;
|
||||
localFileFetcher = null;
|
||||
if (fsyncService != null && !fsyncService.isShutdown()) fsyncService
|
||||
.shutdownNow();
|
||||
fsyncService = null;
|
||||
stop = false;
|
||||
fsyncException = null;
|
||||
} finally {
|
||||
if (tmpIndexDir != null) {
|
||||
core.getDirectoryFactory().release(tmpIndexDir);
|
||||
}
|
||||
if (deleteTmpIdxDir && tmpIndexDir != null) {
|
||||
try {
|
||||
core.getDirectoryFactory().remove(tmpIndexDir);
|
||||
} catch (IOException e) {
|
||||
SolrException.log(LOG, "Error removing directory " + tmpIndexDir, e);
|
||||
}
|
||||
}
|
||||
if (indexDir != null) {
|
||||
core.getDirectoryFactory().release(indexDir);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -535,7 +618,7 @@ public class SnapPuller {
|
|||
SolrQueryRequest req = new LocalSolrQueryRequest(solrCore,
|
||||
new ModifiableSolrParams());
|
||||
// reboot the writer on the new index and get a new searcher
|
||||
solrCore.getUpdateHandler().newIndexWriter(isFullCopyNeeded);
|
||||
solrCore.getUpdateHandler().newIndexWriter(isFullCopyNeeded, false);
|
||||
|
||||
try {
|
||||
// first try to open an NRT searcher so that the new
|
||||
|
@ -567,11 +650,9 @@ public class SnapPuller {
|
|||
/**
|
||||
* All the files are copied to a temp dir first
|
||||
*/
|
||||
private File createTempindexDir(SolrCore core) {
|
||||
String tmpIdxDirName = "index." + new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
|
||||
private String createTempindexDir(SolrCore core, String tmpIdxDirName) {
|
||||
File tmpIdxDir = new File(core.getDataDir(), tmpIdxDirName);
|
||||
tmpIdxDir.mkdirs();
|
||||
return tmpIdxDir;
|
||||
return tmpIdxDir.toString();
|
||||
}
|
||||
|
||||
private void reloadCore() {
|
||||
|
@ -599,9 +680,9 @@ public class SnapPuller {
|
|||
}
|
||||
for (Map<String, Object> file : confFilesToDownload) {
|
||||
String saveAs = (String) (file.get(ALIAS) == null ? file.get(NAME) : file.get(ALIAS));
|
||||
fileFetcher = new FileFetcher(tmpconfDir, file, saveAs, true, latestGeneration);
|
||||
localFileFetcher = new LocalFsFileFetcher(tmpconfDir, file, saveAs, true, latestGeneration);
|
||||
currentFile = file;
|
||||
fileFetcher.fetchFile();
|
||||
localFileFetcher.fetchFile();
|
||||
confFilesDownloaded.add(new HashMap<String, Object>(file));
|
||||
}
|
||||
// this is called before copying the files to the original conf dir
|
||||
|
@ -617,22 +698,30 @@ public class SnapPuller {
|
|||
* Download the index files. If a new index is needed, download all the files.
|
||||
*
|
||||
* @param downloadCompleteIndex is it a fresh index copy
|
||||
* @param tmpIdxDir the directory to which files need to be downloadeed to
|
||||
* @param tmpIndexDir the directory to which files need to be downloadeed to
|
||||
* @param latestGeneration the version number
|
||||
*/
|
||||
private void downloadIndexFiles(boolean downloadCompleteIndex, File tmpIdxDir, long latestGeneration) throws Exception {
|
||||
private void downloadIndexFiles(boolean downloadCompleteIndex,
|
||||
Directory tmpIndexDir, long latestGeneration) throws Exception {
|
||||
String indexDir = solrCore.getIndexDir();
|
||||
for (Map<String, Object> file : filesToDownload) {
|
||||
File localIndexFile = new File(indexDir, (String) file.get(NAME));
|
||||
if (!localIndexFile.exists() || downloadCompleteIndex) {
|
||||
fileFetcher = new FileFetcher(tmpIdxDir, file, (String) file.get(NAME), false, latestGeneration);
|
||||
|
||||
// it's okay to use null for lock factory since we know this dir will exist
|
||||
Directory dir = solrCore.getDirectoryFactory().get(indexDir, null);
|
||||
try {
|
||||
for (Map<String,Object> file : filesToDownload) {
|
||||
if (!dir.fileExists((String) file.get(NAME)) || downloadCompleteIndex) {
|
||||
dirFileFetcher = new DirectoryFileFetcher(tmpIndexDir, file,
|
||||
(String) file.get(NAME), false, latestGeneration);
|
||||
currentFile = file;
|
||||
fileFetcher.fetchFile();
|
||||
filesDownloaded.add(new HashMap<String, Object>(file));
|
||||
dirFileFetcher.fetchFile();
|
||||
filesDownloaded.add(new HashMap<String,Object>(file));
|
||||
} else {
|
||||
LOG.info("Skipping download for " + localIndexFile);
|
||||
LOG.info("Skipping download for " + file.get(NAME));
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
solrCore.getDirectoryFactory().release(dir);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -640,13 +729,12 @@ public class SnapPuller {
|
|||
* not compatible (stale).
|
||||
*
|
||||
* @return true if the index stale and we need to download a fresh copy, false otherwise.
|
||||
* @throws IOException if low level io error
|
||||
*/
|
||||
private boolean isIndexStale() {
|
||||
private boolean isIndexStale(Directory dir) throws IOException {
|
||||
for (Map<String, Object> file : filesToDownload) {
|
||||
File localIndexFile = new File(solrCore.getIndexDir(), (String) file
|
||||
.get(NAME));
|
||||
if (localIndexFile.exists()
|
||||
&& localIndexFile.length() != (Long) file.get(SIZE)) {
|
||||
if (dir.fileExists((String) file.get(NAME))
|
||||
&& dir.fileLength((String) file.get(NAME)) != (Long) file.get(SIZE)) {
|
||||
// file exists and size is different, therefore we must assume
|
||||
// corrupted index
|
||||
return true;
|
||||
|
@ -659,52 +747,31 @@ public class SnapPuller {
|
|||
* Copy a file by the File#renameTo() method. If it fails, it is considered a failure
|
||||
* <p/>
|
||||
*/
|
||||
private boolean copyAFile(File tmpIdxDir, File indexDir, String fname, List<String> copiedfiles) {
|
||||
File indexFileInTmpDir = new File(tmpIdxDir, fname);
|
||||
File indexFileInIndex = new File(indexDir, fname);
|
||||
boolean success = indexFileInTmpDir.renameTo(indexFileInIndex);
|
||||
if(!success){
|
||||
private boolean moveAFile(Directory tmpIdxDir, Directory indexDir, String fname, List<String> copiedfiles) {
|
||||
boolean success = false;
|
||||
try {
|
||||
LOG.error("Unable to move index file from: " + indexFileInTmpDir
|
||||
+ " to: " + indexFileInIndex + " Trying to do a copy");
|
||||
FileUtils.copyFile(indexFileInTmpDir,indexFileInIndex);
|
||||
success = true;
|
||||
} catch (FileNotFoundException e) {
|
||||
if (!indexDir.exists()) {
|
||||
File parent = indexDir.getParentFile();
|
||||
String[] children = null;
|
||||
if (parent != null) {
|
||||
children = parent.list();
|
||||
if (indexDir.fileExists(fname)) {
|
||||
return true;
|
||||
}
|
||||
LOG.error("The index directory does not exist: " + indexDir.getAbsolutePath()
|
||||
+ " dirs found: " + (children == null ? "none could be found" : Arrays.asList(children)));
|
||||
}
|
||||
LOG.error("Unable to copy index file from: " + indexFileInTmpDir
|
||||
+ " to: " + indexFileInIndex , e);
|
||||
} catch (IOException e) {
|
||||
LOG.error("Unable to copy index file from: " + indexFileInTmpDir
|
||||
+ " to: " + indexFileInIndex , e);
|
||||
}
|
||||
}
|
||||
|
||||
if (!success) {
|
||||
for (String f : copiedfiles) {
|
||||
File indexFile = new File(indexDir, f);
|
||||
if (indexFile.exists())
|
||||
indexFile.delete();
|
||||
}
|
||||
delTree(tmpIdxDir);
|
||||
SolrException.log(LOG, "could not check if a file exists", e);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
try {
|
||||
solrCore.getDirectoryFactory().move(tmpIdxDir, indexDir, fname);
|
||||
success = true;
|
||||
} catch (IOException e) {
|
||||
SolrException.log(LOG, "Could not move file", e);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy all index files from the temp index dir to the actual index. The segments_N file is copied last.
|
||||
*/
|
||||
private boolean copyIndexFiles(File tmpIdxDir, File indexDir) {
|
||||
private boolean moveIndexFiles(Directory tmpIdxDir, Directory indexDir) {
|
||||
String segmentsFile = null;
|
||||
List<String> copiedfiles = new ArrayList<String>();
|
||||
List<String> movedfiles = new ArrayList<String>();
|
||||
for (Map<String, Object> f : filesDownloaded) {
|
||||
String fname = (String) f.get(NAME);
|
||||
// the segments file must be copied last
|
||||
|
@ -716,12 +783,12 @@ public class SnapPuller {
|
|||
segmentsFile = fname;
|
||||
continue;
|
||||
}
|
||||
if (!copyAFile(tmpIdxDir, indexDir, fname, copiedfiles)) return false;
|
||||
copiedfiles.add(fname);
|
||||
if (!moveAFile(tmpIdxDir, indexDir, fname, movedfiles)) return false;
|
||||
movedfiles.add(fname);
|
||||
}
|
||||
//copy the segments file last
|
||||
if (segmentsFile != null) {
|
||||
if (!copyAFile(tmpIdxDir, indexDir, segmentsFile, copiedfiles)) return false;
|
||||
if (!moveAFile(tmpIdxDir, indexDir, segmentsFile, movedfiles)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -759,23 +826,63 @@ public class SnapPuller {
|
|||
*/
|
||||
private boolean modifyIndexProps(String tmpIdxDirName) {
|
||||
LOG.info("New index installed. Updating index properties... index="+tmpIdxDirName);
|
||||
File idxprops = new File(solrCore.getDataDir() + "index.properties");
|
||||
Properties p = new Properties();
|
||||
if (idxprops.exists()) {
|
||||
InputStream is = null;
|
||||
Directory dir = null;
|
||||
try {
|
||||
dir = solrCore.getDirectoryFactory().get(solrCore.getDataDir(), null);
|
||||
if (dir.fileExists("index.properties")){
|
||||
final IndexInput input = dir.openInput("index.properties", IOContext.DEFAULT);
|
||||
|
||||
final InputStream is = new InputStream() {
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
byte next;
|
||||
try {
|
||||
next = input.readByte();
|
||||
} catch (EOFException e) {
|
||||
return -1;
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
super.close();
|
||||
input.close();
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
is = new FileInputStream(idxprops);
|
||||
p.load(is);
|
||||
} catch (Exception e) {
|
||||
LOG.error("Unable to load index.properties");
|
||||
LOG.error("Unable to load index.properties", e);
|
||||
} finally {
|
||||
IOUtils.closeQuietly(is);
|
||||
}
|
||||
}
|
||||
p.put("index", tmpIdxDirName);
|
||||
FileOutputStream os = null;
|
||||
try {
|
||||
os = new FileOutputStream(idxprops);
|
||||
dir.deleteFile("index.properties");
|
||||
} catch (IOException e) {
|
||||
// no problem
|
||||
}
|
||||
final IndexOutput out = dir.createOutput("index.properties", IOContext.DEFAULT);
|
||||
p.put("index", tmpIdxDirName);
|
||||
OutputStream os = null;
|
||||
try {
|
||||
os = new OutputStream() {
|
||||
|
||||
@Override
|
||||
public void write(int b) throws IOException {
|
||||
out.writeByte((byte) b);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
super.close();
|
||||
out.close();
|
||||
}
|
||||
};
|
||||
p.store(os, "index properties");
|
||||
} catch (Exception e) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
|
||||
|
@ -784,6 +891,19 @@ public class SnapPuller {
|
|||
IOUtils.closeQuietly(os);
|
||||
}
|
||||
return true;
|
||||
|
||||
} catch (IOException e1) {
|
||||
throw new RuntimeException(e1);
|
||||
} finally {
|
||||
if (dir != null) {
|
||||
try {
|
||||
solrCore.getDirectoryFactory().release(dir);
|
||||
} catch (IOException e) {
|
||||
SolrException.log(LOG, "", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private final Map<String, FileInfo> confFileInfoCache = new HashMap<String, FileInfo>();
|
||||
|
@ -821,12 +941,7 @@ public class SnapPuller {
|
|||
return nameVsFile.isEmpty() ? Collections.EMPTY_LIST : nameVsFile.values();
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete the directory tree recursively
|
||||
*/
|
||||
static boolean delTree(File dir) {
|
||||
if (dir == null || !dir.exists())
|
||||
return false;
|
||||
boolean isSuccess = true;
|
||||
File contents[] = dir.listFiles();
|
||||
if (contents != null) {
|
||||
|
@ -902,9 +1017,10 @@ public class SnapPuller {
|
|||
return tmp == null ? Collections.EMPTY_LIST : new ArrayList<Map<String, Object>>(tmp);
|
||||
}
|
||||
|
||||
// TODO: currently does not reflect conf files
|
||||
Map<String, Object> getCurrentFile() {
|
||||
Map<String, Object> tmp = currentFile;
|
||||
FileFetcher tmpFileFetcher = fileFetcher;
|
||||
DirectoryFileFetcher tmpFileFetcher = dirFileFetcher;
|
||||
if (tmp == null)
|
||||
return null;
|
||||
tmp = new HashMap<String, Object>(tmp);
|
||||
|
@ -933,9 +1049,255 @@ public class SnapPuller {
|
|||
/**
|
||||
* The class acts as a client for ReplicationHandler.FileStream. It understands the protocol of wt=filestream
|
||||
*
|
||||
* @see org.apache.solr.handler.ReplicationHandler.FileStream
|
||||
* @see org.apache.solr.handler.ReplicationHandler.DirectoryFileStream
|
||||
*/
|
||||
private class FileFetcher {
|
||||
private class DirectoryFileFetcher {
|
||||
boolean includeChecksum = true;
|
||||
|
||||
Directory copy2Dir;
|
||||
|
||||
String fileName;
|
||||
|
||||
String saveAs;
|
||||
|
||||
long size;
|
||||
|
||||
long bytesDownloaded = 0;
|
||||
|
||||
byte[] buf = new byte[1024 * 1024];
|
||||
|
||||
Checksum checksum;
|
||||
|
||||
int errorCount = 0;
|
||||
|
||||
private boolean isConf;
|
||||
|
||||
private boolean aborted = false;
|
||||
|
||||
private Long indexGen;
|
||||
|
||||
private IndexOutput outStream;
|
||||
|
||||
DirectoryFileFetcher(Directory tmpIndexDir, Map<String, Object> fileDetails, String saveAs,
|
||||
boolean isConf, long latestGen) throws IOException {
|
||||
this.copy2Dir = tmpIndexDir;
|
||||
this.fileName = (String) fileDetails.get(NAME);
|
||||
this.size = (Long) fileDetails.get(SIZE);
|
||||
this.isConf = isConf;
|
||||
this.saveAs = saveAs;
|
||||
|
||||
indexGen = latestGen;
|
||||
|
||||
outStream = copy2Dir.createOutput(saveAs, IOContext.DEFAULT);
|
||||
|
||||
if (includeChecksum)
|
||||
checksum = new Adler32();
|
||||
}
|
||||
|
||||
/**
|
||||
* The main method which downloads file
|
||||
*/
|
||||
void fetchFile() throws Exception {
|
||||
try {
|
||||
while (true) {
|
||||
final FastInputStream is = getStream();
|
||||
int result;
|
||||
try {
|
||||
//fetch packets one by one in a single request
|
||||
result = fetchPackets(is);
|
||||
if (result == 0 || result == NO_CONTENT) {
|
||||
|
||||
return;
|
||||
}
|
||||
//if there is an error continue. But continue from the point where it got broken
|
||||
} finally {
|
||||
IOUtils.closeQuietly(is);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
cleanup();
|
||||
//if cleanup suceeds . The file is downloaded fully. do an fsync
|
||||
fsyncService.submit(new Runnable(){
|
||||
public void run() {
|
||||
try {
|
||||
copy2Dir.sync(Collections.singleton(saveAs));
|
||||
} catch (IOException e) {
|
||||
fsyncException = e;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private int fetchPackets(FastInputStream fis) throws Exception {
|
||||
byte[] intbytes = new byte[4];
|
||||
byte[] longbytes = new byte[8];
|
||||
try {
|
||||
while (true) {
|
||||
if (stop) {
|
||||
stop = false;
|
||||
aborted = true;
|
||||
throw new ReplicationHandlerException("User aborted replication");
|
||||
}
|
||||
long checkSumServer = -1;
|
||||
fis.readFully(intbytes);
|
||||
//read the size of the packet
|
||||
int packetSize = readInt(intbytes);
|
||||
if (packetSize <= 0) {
|
||||
LOG.warn("No content recieved for file: " + currentFile);
|
||||
return NO_CONTENT;
|
||||
}
|
||||
if (buf.length < packetSize)
|
||||
buf = new byte[packetSize];
|
||||
if (checksum != null) {
|
||||
//read the checksum
|
||||
fis.readFully(longbytes);
|
||||
checkSumServer = readLong(longbytes);
|
||||
}
|
||||
//then read the packet of bytes
|
||||
fis.readFully(buf, 0, packetSize);
|
||||
//compare the checksum as sent from the master
|
||||
if (includeChecksum) {
|
||||
checksum.reset();
|
||||
checksum.update(buf, 0, packetSize);
|
||||
long checkSumClient = checksum.getValue();
|
||||
if (checkSumClient != checkSumServer) {
|
||||
LOG.error("Checksum not matched between client and server for: " + currentFile);
|
||||
//if checksum is wrong it is a problem return for retry
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
//if everything is fine, write down the packet to the file
|
||||
writeBytes(packetSize);
|
||||
bytesDownloaded += packetSize;
|
||||
if (bytesDownloaded >= size)
|
||||
return 0;
|
||||
//errorcount is always set to zero after a successful packet
|
||||
errorCount = 0;
|
||||
}
|
||||
} catch (ReplicationHandlerException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Error in fetching packets ", e);
|
||||
//for any failure , increment the error count
|
||||
errorCount++;
|
||||
//if it fails for the same pacaket for MAX_RETRIES fail and come out
|
||||
if (errorCount > MAX_RETRIES) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
|
||||
"Fetch failed for file:" + fileName, e);
|
||||
}
|
||||
return ERR;
|
||||
}
|
||||
}
|
||||
|
||||
protected void writeBytes(int packetSize) throws IOException {
|
||||
outStream.writeBytes(buf, 0, packetSize);
|
||||
}
|
||||
|
||||
/**
|
||||
* The webcontainer flushes the data only after it fills the buffer size. So, all data has to be read as readFully()
|
||||
* other wise it fails. So read everything as bytes and then extract an integer out of it
|
||||
*/
|
||||
private int readInt(byte[] b) {
|
||||
return (((b[0] & 0xff) << 24) | ((b[1] & 0xff) << 16)
|
||||
| ((b[2] & 0xff) << 8) | (b[3] & 0xff));
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as above but to read longs from a byte array
|
||||
*/
|
||||
private long readLong(byte[] b) {
|
||||
return (((long) (b[0] & 0xff)) << 56) | (((long) (b[1] & 0xff)) << 48)
|
||||
| (((long) (b[2] & 0xff)) << 40) | (((long) (b[3] & 0xff)) << 32)
|
||||
| (((long) (b[4] & 0xff)) << 24) | ((b[5] & 0xff) << 16)
|
||||
| ((b[6] & 0xff) << 8) | ((b[7] & 0xff));
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* cleanup everything
|
||||
*/
|
||||
private void cleanup() {
|
||||
try {
|
||||
outStream.close();
|
||||
} catch (Exception e) {/* noop */
|
||||
LOG.error("Error closing the file stream: "+ this.saveAs ,e);
|
||||
}
|
||||
if (bytesDownloaded != size) {
|
||||
//if the download is not complete then
|
||||
//delete the file being downloaded
|
||||
try {
|
||||
copy2Dir.deleteFile(saveAs);
|
||||
} catch (Exception e) {
|
||||
LOG.error("Error deleting file in cleanup" + e.getMessage());
|
||||
}
|
||||
//if the failure is due to a user abort it is returned nomally else an exception is thrown
|
||||
if (!aborted)
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
|
||||
"Unable to download " + fileName + " completely. Downloaded "
|
||||
+ bytesDownloaded + "!=" + size);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Open a new stream using HttpClient
|
||||
*/
|
||||
FastInputStream getStream() throws IOException {
|
||||
SolrServer s = new HttpSolrServer(masterUrl, myHttpClient, null); //XXX use shardhandler
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
|
||||
// //the method is command=filecontent
|
||||
params.set(COMMAND, CMD_GET_FILE);
|
||||
params.set(GENERATION, Long.toString(indexGen));
|
||||
params.set(CommonParams.QT, "/replication");
|
||||
//add the version to download. This is used to reserve the download
|
||||
if (isConf) {
|
||||
//set cf instead of file for config file
|
||||
params.set(CONF_FILE_SHORT, fileName);
|
||||
} else {
|
||||
params.set(FILE, fileName);
|
||||
}
|
||||
if (useInternal) {
|
||||
params.set(COMPRESSION, "true");
|
||||
}
|
||||
//use checksum
|
||||
if (this.includeChecksum) {
|
||||
params.set(CHECKSUM, true);
|
||||
}
|
||||
//wt=filestream this is a custom protocol
|
||||
params.set(CommonParams.WT, FILE_STREAM);
|
||||
// This happen if there is a failure there is a retry. the offset=<sizedownloaded> ensures that
|
||||
// the server starts from the offset
|
||||
if (bytesDownloaded > 0) {
|
||||
params.set(OFFSET, Long.toString(bytesDownloaded));
|
||||
}
|
||||
|
||||
|
||||
NamedList response;
|
||||
InputStream is = null;
|
||||
try {
|
||||
QueryRequest req = new QueryRequest(params);
|
||||
response = s.request(req);
|
||||
is = (InputStream) response.get("stream");
|
||||
if(useInternal) {
|
||||
is = new InflaterInputStream(is);
|
||||
}
|
||||
return new FastInputStream(is);
|
||||
} catch (Throwable t) {
|
||||
//close stream on error
|
||||
IOUtils.closeQuietly(is);
|
||||
throw new IOException("Could not download file '" + fileName + "'", t);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The class acts as a client for ReplicationHandler.FileStream. It understands the protocol of wt=filestream
|
||||
*
|
||||
* @see org.apache.solr.handler.ReplicationHandler.LocalFsFileStream
|
||||
*/
|
||||
private class LocalFsFileFetcher {
|
||||
boolean includeChecksum = true;
|
||||
|
||||
private File copy2Dir;
|
||||
|
@ -944,7 +1306,7 @@ public class SnapPuller {
|
|||
|
||||
String saveAs;
|
||||
|
||||
long size, lastmodified;
|
||||
long size;
|
||||
|
||||
long bytesDownloaded = 0;
|
||||
|
||||
|
@ -966,16 +1328,15 @@ public class SnapPuller {
|
|||
|
||||
private Long indexGen;
|
||||
|
||||
FileFetcher(File dir, Map<String, Object> fileDetails, String saveAs,
|
||||
// TODO: could do more code sharing with DirectoryFileFetcher
|
||||
LocalFsFileFetcher(File dir, Map<String, Object> fileDetails, String saveAs,
|
||||
boolean isConf, long latestGen) throws IOException {
|
||||
this.copy2Dir = dir;
|
||||
this.fileName = (String) fileDetails.get(NAME);
|
||||
this.size = (Long) fileDetails.get(SIZE);
|
||||
this.isConf = isConf;
|
||||
this.saveAs = saveAs;
|
||||
if(fileDetails.get(LAST_MODIFIED) != null){
|
||||
lastmodified = (Long)fileDetails.get(LAST_MODIFIED);
|
||||
}
|
||||
|
||||
indexGen = latestGen;
|
||||
|
||||
this.file = new File(copy2Dir, saveAs);
|
||||
|
@ -1007,10 +1368,6 @@ public class SnapPuller {
|
|||
//fetch packets one by one in a single request
|
||||
result = fetchPackets(is);
|
||||
if (result == 0 || result == NO_CONTENT) {
|
||||
// if the file is downloaded properly set the
|
||||
// timestamp same as that in the server
|
||||
if (file.exists() && lastmodified > 0)
|
||||
file.setLastModified(lastmodified);
|
||||
return;
|
||||
}
|
||||
//if there is an error continue. But continue from the point where it got broken
|
||||
|
|
|
@ -17,9 +17,6 @@
|
|||
package org.apache.solr.handler;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
|
@ -31,12 +28,13 @@ import java.util.Locale;
|
|||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.SimpleFSLockFactory;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.core.IndexDeletionPolicyWrapper;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -57,7 +55,7 @@ public class SnapShooter {
|
|||
solrCore = core;
|
||||
if (location == null) snapDir = core.getDataDir();
|
||||
else {
|
||||
File base = new File(core.getCoreDescriptor().getInstanceDir());
|
||||
File base = new File(core.getCoreDescriptor().getRawInstanceDir());
|
||||
snapDir = org.apache.solr.util.FileUtils.resolvePath(base, location).getAbsolutePath();
|
||||
File dir = new File(snapDir);
|
||||
if (!dir.exists()) dir.mkdirs();
|
||||
|
@ -101,8 +99,14 @@ public class SnapShooter {
|
|||
return;
|
||||
}
|
||||
Collection<String> files = indexCommit.getFileNames();
|
||||
FileCopier fileCopier = new FileCopier(solrCore.getDeletionPolicy(), indexCommit);
|
||||
fileCopier.copyFiles(files, snapShotDir);
|
||||
FileCopier fileCopier = new FileCopier();
|
||||
|
||||
Directory dir = solrCore.getDirectoryFactory().get(solrCore.getIndexDir(), null);
|
||||
try {
|
||||
fileCopier.copyFiles(dir, files, snapShotDir);
|
||||
} finally {
|
||||
solrCore.getDirectoryFactory().release(dir);
|
||||
}
|
||||
|
||||
details.add("fileCount", files.size());
|
||||
details.add("status", "success");
|
||||
|
@ -169,82 +173,34 @@ public class SnapShooter {
|
|||
|
||||
|
||||
private class FileCopier {
|
||||
private static final int DEFAULT_BUFFER_SIZE = 32768;
|
||||
private byte[] buffer = new byte[DEFAULT_BUFFER_SIZE];
|
||||
private IndexCommit indexCommit;
|
||||
private IndexDeletionPolicyWrapper delPolicy;
|
||||
|
||||
public FileCopier(IndexDeletionPolicyWrapper delPolicy, IndexCommit commit) {
|
||||
this.delPolicy = delPolicy;
|
||||
this.indexCommit = commit;
|
||||
}
|
||||
|
||||
public void copyFiles(Collection<String> files, File destDir) throws IOException {
|
||||
for (String indexFile : files) {
|
||||
File source = new File(solrCore.getIndexDir(), indexFile);
|
||||
copyFile(source, new File(destDir, source.getName()), true);
|
||||
}
|
||||
}
|
||||
|
||||
public void copyFile(File source, File destination, boolean preserveFileDate)
|
||||
throws IOException {
|
||||
// check source exists
|
||||
if (!source.exists()) {
|
||||
String message = "File " + source + " does not exist";
|
||||
throw new FileNotFoundException(message);
|
||||
}
|
||||
|
||||
public void copyFiles(Directory sourceDir, Collection<String> files,
|
||||
File destDir) throws IOException {
|
||||
// does destinations directory exist ?
|
||||
if (destination.getParentFile() != null
|
||||
&& !destination.getParentFile().exists()) {
|
||||
destination.getParentFile().mkdirs();
|
||||
if (destDir != null && !destDir.exists()) {
|
||||
destDir.mkdirs();
|
||||
}
|
||||
|
||||
FSDirectory dir = FSDirectory.open(destDir);
|
||||
try {
|
||||
for (String indexFile : files) {
|
||||
copyFile(sourceDir, indexFile, new File(destDir, indexFile), dir);
|
||||
}
|
||||
} finally {
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
||||
public void copyFile(Directory sourceDir, String indexFile, File destination, Directory destDir)
|
||||
throws IOException {
|
||||
|
||||
// make sure we can write to destination
|
||||
if (destination.exists() && !destination.canWrite()) {
|
||||
String message = "Unable to open file " + destination + " for writing.";
|
||||
throw new IOException(message);
|
||||
}
|
||||
|
||||
FileInputStream input = null;
|
||||
FileOutputStream output = null;
|
||||
try {
|
||||
input = new FileInputStream(source);
|
||||
output = new FileOutputStream(destination);
|
||||
|
||||
int count = 0;
|
||||
int n = 0;
|
||||
int rcnt = 0;
|
||||
while (-1 != (n = input.read(buffer))) {
|
||||
output.write(buffer, 0, n);
|
||||
count += n;
|
||||
rcnt++;
|
||||
/***
|
||||
// reserve every 4.6875 MB
|
||||
if (rcnt == 150) {
|
||||
rcnt = 0;
|
||||
delPolicy.setReserveDuration(indexCommit.getVersion(), reserveTime);
|
||||
}
|
||||
***/
|
||||
}
|
||||
} finally {
|
||||
try {
|
||||
IOUtils.closeQuietly(input);
|
||||
} finally {
|
||||
IOUtils.closeQuietly(output);
|
||||
}
|
||||
}
|
||||
|
||||
if (source.length() != destination.length()) {
|
||||
String message = "Failed to copy full contents from " + source + " to "
|
||||
+ destination;
|
||||
throw new IOException(message);
|
||||
}
|
||||
|
||||
if (preserveFileDate) {
|
||||
// file copy should preserve file date
|
||||
destination.setLastModified(source.lastModified());
|
||||
}
|
||||
sourceDir.copy(destDir, indexFile, indexFile, IOContext.DEFAULT);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -610,20 +610,26 @@ public class CoreAdminHandler extends RequestHandlerBase {
|
|||
|
||||
@Override
|
||||
public void postClose(SolrCore core) {
|
||||
File dataDir = new File(core.getIndexDir());
|
||||
File[] files = dataDir.listFiles();
|
||||
if (files != null) {
|
||||
for (File file : files) {
|
||||
if (!file.delete()) {
|
||||
log.error(file.getAbsolutePath()
|
||||
+ " could not be deleted on core unload");
|
||||
Directory dir = null;
|
||||
try {
|
||||
dir = core.getDirectoryFactory().get(core.getIndexDir(), null);
|
||||
core.getDirectoryFactory().remove(dir);
|
||||
core.getDirectoryFactory().doneWithDirectory(dir);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
} finally {
|
||||
if (dir != null) {
|
||||
try {
|
||||
core.getDirectoryFactory().release(dir);
|
||||
} catch (IOException e) {
|
||||
log.error("IOException trying to release directory", e);
|
||||
}
|
||||
}
|
||||
if (!dataDir.delete()) log.error(dataDir.getAbsolutePath()
|
||||
+ " could not be deleted on core unload");
|
||||
} else {
|
||||
log.error(dataDir.getAbsolutePath()
|
||||
+ " could not be deleted on core unload");
|
||||
}
|
||||
try {
|
||||
core.getDirectoryFactory().remove(dir);
|
||||
} catch (IOException e) {
|
||||
log.error("IOException trying to remove directory", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -668,7 +674,16 @@ public class CoreAdminHandler extends RequestHandlerBase {
|
|||
});
|
||||
}
|
||||
} finally {
|
||||
if (core != null) core.close();
|
||||
// it's important that we try and cancel recovery
|
||||
// before we close here - else we might close the
|
||||
// core *in* recovery and end up locked in recovery
|
||||
// waiting to for recovery to be cancelled
|
||||
if (core != null) {
|
||||
if (coreContainer.getZkController() != null) {
|
||||
core.getSolrCoreState().cancelRecovery();
|
||||
}
|
||||
core.close();
|
||||
}
|
||||
}
|
||||
return coreContainer.isPersistent();
|
||||
|
||||
|
@ -996,7 +1011,19 @@ public class CoreAdminHandler extends RequestHandlerBase {
|
|||
}
|
||||
|
||||
private long getIndexSize(SolrCore core) {
|
||||
return FileUtils.sizeOfDirectory(new File(core.getIndexDir()));
|
||||
Directory dir;
|
||||
long size = 0;
|
||||
try {
|
||||
dir = core.getDirectoryFactory().get(core.getIndexDir(), null);
|
||||
try {
|
||||
size = DirectoryFactory.sizeOfDirectory(dir);
|
||||
} finally {
|
||||
core.getDirectoryFactory().release(dir);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
SolrException.log(log, "IO error while trying to get the size of the Directory", e);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
protected static String normalizePath(String path) {
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.solr.handler.admin;
|
|||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.OperatingSystemMXBean;
|
||||
|
@ -104,7 +105,13 @@ public class SystemInfoHandler extends RequestHandlerBase
|
|||
dirs.add( "cwd" , new File( System.getProperty("user.dir")).getAbsolutePath() );
|
||||
dirs.add( "instance", new File( core.getResourceLoader().getInstanceDir() ).getAbsolutePath() );
|
||||
dirs.add( "data", new File( core.getDataDir() ).getAbsolutePath() );
|
||||
dirs.add( "index", new File( core.getIndexDir() ).getAbsolutePath() );
|
||||
dirs.add( "dirimpl", core.getDirectoryFactory().getClass().getName());
|
||||
try {
|
||||
dirs.add( "index", core.getDirectoryFactory().normalize(core.getIndexDir()) );
|
||||
} catch (IOException e) {
|
||||
log.warn("Problem getting the normalized index directory path", e);
|
||||
dirs.add( "index", "N/A" );
|
||||
}
|
||||
info.add( "directory", dirs );
|
||||
return info;
|
||||
}
|
||||
|
|
|
@ -525,7 +525,7 @@ public class RealTimeGetComponent extends SearchComponent
|
|||
|
||||
boolean cantReachIsSuccess = rb.req.getParams().getBool("cantReachIsSuccess", false);
|
||||
|
||||
PeerSync peerSync = new PeerSync(rb.req.getCore(), replicas, nVersions, cantReachIsSuccess);
|
||||
PeerSync peerSync = new PeerSync(rb.req.getCore(), replicas, nVersions, cantReachIsSuccess, true);
|
||||
boolean success = peerSync.sync();
|
||||
|
||||
// TODO: more complex response?
|
||||
|
|
|
@ -16,18 +16,16 @@
|
|||
*/
|
||||
package org.apache.solr.schema;
|
||||
|
||||
import org.apache.lucene.index.StorableField;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.index.GeneralField;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.StorableField;
|
||||
import org.apache.solr.search.function.FileFloatSource;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.search.function.FileFloatSource;
|
||||
|
||||
import java.util.Map;
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
/** Get values from an external file instead of the index.
|
||||
*
|
||||
|
@ -55,7 +53,7 @@ import java.io.IOException;
|
|||
* <p/>The external file may be sorted or unsorted by the key field, but it will be substantially slower (untested) if it isn't sorted.
|
||||
* <p/>Fields of this type may currently only be used as a ValueSource in a FunctionQuery.
|
||||
*
|
||||
*
|
||||
* @see ExternalFileFieldReloader
|
||||
*/
|
||||
public class ExternalFileField extends FieldType {
|
||||
private FieldType ftype;
|
||||
|
@ -94,10 +92,26 @@ public class ExternalFileField extends FieldType {
|
|||
|
||||
@Override
|
||||
public ValueSource getValueSource(SchemaField field, QParser parser) {
|
||||
// default key field to unique key
|
||||
SchemaField keyField = keyFieldName==null ? schema.getUniqueKeyField() : schema.getField(keyFieldName);
|
||||
return new FileFloatSource(field, keyField, defVal, parser);
|
||||
return getFileFloatSource(field, parser.getReq().getCore().getDataDir());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a FileFloatSource for the given field, looking in datadir for the relevant file
|
||||
* @param field the field to get a source for
|
||||
* @param datadir the data directory in which to look for the external file
|
||||
* @return a FileFloatSource
|
||||
*/
|
||||
public FileFloatSource getFileFloatSource(SchemaField field, String datadir) {
|
||||
// Because the float source uses a static cache, all source objects will
|
||||
// refer to the same data.
|
||||
return new FileFloatSource(field, getKeyField(), defVal, datadir);
|
||||
}
|
||||
|
||||
// If no key field is defined, we use the unique key field
|
||||
private SchemaField getKeyField() {
|
||||
return keyFieldName == null ?
|
||||
schema.getUniqueKeyField() :
|
||||
schema.getField(keyFieldName);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.schema;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.core.AbstractSolrEventListener;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
import org.apache.solr.search.function.FileFloatSource;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* An event listener to reload ExternalFileFields for new searchers.
|
||||
*
|
||||
* Opening a new IndexSearcher will invalidate the internal caches used by
|
||||
* {@link ExternalFileField}. By default, these caches are reloaded lazily
|
||||
* by the first search that uses them. For large external files, this can
|
||||
* slow down searches unacceptably.
|
||||
*
|
||||
* To reload the caches when the searcher is first opened, set up event
|
||||
* listeners in your solrconfig.xml:
|
||||
*
|
||||
* <pre>
|
||||
* <listener event="newSearcher" class="org.apache.solr.schema.ExternalFileFieldReloader"/>
|
||||
* <listener event="firstSearcher" class="org.apache.solr.schema.ExternalFileFieldReloader"/>
|
||||
* </pre>
|
||||
*
|
||||
* The caches will be reloaded for all ExternalFileFields in your schema after
|
||||
* each commit.
|
||||
*/
|
||||
public class ExternalFileFieldReloader extends AbstractSolrEventListener {
|
||||
|
||||
private IndexSchema schema;
|
||||
private String datadir;
|
||||
private List<FileFloatSource> fieldSources = new ArrayList<FileFloatSource>();
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(ExternalFileFieldReloader.class);
|
||||
|
||||
public ExternalFileFieldReloader(SolrCore core) {
|
||||
super(core);
|
||||
schema = core.getSchema();
|
||||
datadir = core.getDataDir();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
for (SchemaField field : schema.getFields().values()) {
|
||||
FieldType type = field.getType();
|
||||
if (type instanceof ExternalFileField) {
|
||||
ExternalFileField eff = (ExternalFileField) type;
|
||||
fieldSources.add(eff.getFileFloatSource(field, datadir));
|
||||
log.info("Adding ExternalFileFieldReloader listener for field {}", field.getName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void newSearcher(SolrIndexSearcher newSearcher, SolrIndexSearcher currentSearcher) {
|
||||
// We need to reload the caches for the new searcher
|
||||
IndexReader reader = newSearcher.getIndexReader();
|
||||
for (FileFloatSource fieldSource : fieldSources) {
|
||||
fieldSource.refreshCache(reader);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -41,6 +41,7 @@ import org.apache.lucene.store.NRTCachingDirectory;
|
|||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.OpenBitSet;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.common.util.SimpleOrderedMap;
|
||||
|
@ -77,7 +78,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
|
|||
private static Logger log = LoggerFactory.getLogger(SolrIndexSearcher.class);
|
||||
private final SolrCore core;
|
||||
private final IndexSchema schema;
|
||||
private String indexDir;
|
||||
|
||||
private boolean debug = log.isDebugEnabled();
|
||||
|
||||
private final String name;
|
||||
|
@ -148,8 +149,6 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
|
|||
directoryFactory.incRef(dir);
|
||||
}
|
||||
|
||||
this.indexDir = getIndexDir(dir);
|
||||
|
||||
this.closeReader = closeReader;
|
||||
setSimilarity(schema.getSimilarity());
|
||||
|
||||
|
@ -273,7 +272,11 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
|
|||
// super.close();
|
||||
// can't use super.close() since it just calls reader.close() and that may only be called once
|
||||
// per reader (even if incRef() was previously called).
|
||||
try {
|
||||
if (closeReader) reader.decRef();
|
||||
} catch (Throwable t) {
|
||||
SolrException.log(log, "Problem dec ref'ing reader", t);
|
||||
}
|
||||
|
||||
for (SolrCache cache : cacheList) {
|
||||
cache.close();
|
||||
|
@ -409,12 +412,6 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
|
|||
// }
|
||||
// }
|
||||
|
||||
/**
|
||||
* @return the indexDir on which this searcher is opened
|
||||
*/
|
||||
public String getIndexDir() {
|
||||
return indexDir;
|
||||
}
|
||||
|
||||
/* ********************** Document retrieval *************************/
|
||||
|
||||
|
|
|
@ -34,8 +34,8 @@ import org.slf4j.LoggerFactory;
|
|||
* Plugin for lucene/contrib Surround query parser, bringing SpanQuery support
|
||||
* to Solr
|
||||
*
|
||||
* <queryParser name="surround"
|
||||
* class="org.apache.solr.search.SurroundQParserPlugin" />
|
||||
* <queryParser name="surround"
|
||||
* class="org.apache.solr.search.SurroundQParserPlugin" />
|
||||
*
|
||||
* Examples of query syntax can be found in lucene/queryparser/docs/surround
|
||||
*
|
||||
|
|
|
@ -16,24 +16,7 @@
|
|||
*/
|
||||
package org.apache.solr.search.function;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.WeakHashMap;
|
||||
|
||||
import org.apache.lucene.index.DocsEnum;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.IndexReaderContext;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.docvalues.FloatDocValues;
|
||||
|
@ -47,29 +30,45 @@ import org.apache.solr.request.SolrQueryRequest;
|
|||
import org.apache.solr.response.SolrQueryResponse;
|
||||
import org.apache.solr.schema.FieldType;
|
||||
import org.apache.solr.schema.SchemaField;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.update.processor.UpdateRequestProcessor;
|
||||
import org.apache.solr.util.VersionedFile;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* Obtains float field values from an external file.
|
||||
*
|
||||
* @see org.apache.solr.schema.ExternalFileField
|
||||
* @see org.apache.solr.schema.ExternalFileFieldReloader
|
||||
*/
|
||||
|
||||
public class FileFloatSource extends ValueSource {
|
||||
|
||||
private SchemaField field;
|
||||
private final SchemaField keyField;
|
||||
private final float defVal;
|
||||
|
||||
private final String dataDir;
|
||||
|
||||
public FileFloatSource(SchemaField field, SchemaField keyField, float defVal, QParser parser) {
|
||||
private static final Logger log = LoggerFactory.getLogger(FileFloatSource.class);
|
||||
|
||||
/**
|
||||
* Creates a new FileFloatSource
|
||||
* @param field the source's SchemaField
|
||||
* @param keyField the field to use as a key
|
||||
* @param defVal the default value to use if a field has no entry in the external file
|
||||
* @param datadir the directory in which to look for the external file
|
||||
*/
|
||||
public FileFloatSource(SchemaField field, SchemaField keyField, float defVal, String datadir) {
|
||||
this.field = field;
|
||||
this.keyField = keyField;
|
||||
this.defVal = defVal;
|
||||
this.dataDir = parser.getReq().getCore().getDataDir();
|
||||
this.dataDir = datadir;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -118,10 +117,26 @@ public class FileFloatSource extends ValueSource {
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove all cached entries. Values are lazily loaded next time getValues() is
|
||||
* called.
|
||||
*/
|
||||
public static void resetCache(){
|
||||
floatCache.resetCache();
|
||||
}
|
||||
|
||||
/**
|
||||
* Refresh the cache for an IndexReader. The new values are loaded in the background
|
||||
* and then swapped in, so queries against the cache should not block while the reload
|
||||
* is happening.
|
||||
* @param reader the IndexReader whose cache needs refreshing
|
||||
*/
|
||||
public void refreshCache(IndexReader reader) {
|
||||
log.info("Refreshing FlaxFileFloatSource cache for field {}", this.field.getName());
|
||||
floatCache.refresh(reader, new Entry(this));
|
||||
log.info("FlaxFileFloatSource cache for field {} reloaded", this.field.getName());
|
||||
}
|
||||
|
||||
private final float[] getCachedFloats(IndexReader reader) {
|
||||
return (float[])floatCache.get(reader, new Entry(this));
|
||||
}
|
||||
|
@ -139,6 +154,18 @@ public class FileFloatSource extends ValueSource {
|
|||
|
||||
protected abstract Object createValue(IndexReader reader, Object key);
|
||||
|
||||
public void refresh(IndexReader reader, Object key) {
|
||||
Object refreshedValues = createValue(reader, key);
|
||||
synchronized (readerCache) {
|
||||
Map innerCache = (Map) readerCache.get(reader);
|
||||
if (innerCache == null) {
|
||||
innerCache = new HashMap();
|
||||
readerCache.put(reader, innerCache);
|
||||
}
|
||||
innerCache.put(key, refreshedValues);
|
||||
}
|
||||
}
|
||||
|
||||
public Object get(IndexReader reader, Object key) {
|
||||
Map innerCache;
|
||||
Object value;
|
||||
|
|
|
@ -493,20 +493,24 @@ public class SolrDispatchFilter implements Filter
|
|||
* filter into a larger web application.
|
||||
*
|
||||
* For example, if web.xml specifies:
|
||||
*
|
||||
* <pre class="prettyprint">
|
||||
* {@code
|
||||
* <filter-mapping>
|
||||
* <filter-name>SolrRequestFilter</filter-name>
|
||||
* <url-pattern>/xxx/*</url-pattern>
|
||||
* </filter-mapping>
|
||||
* </filter-mapping>}
|
||||
* </pre>
|
||||
*
|
||||
* Make sure to set the PathPrefix to "/xxx" either with this function
|
||||
* or in web.xml.
|
||||
*
|
||||
* <pre class="prettyprint">
|
||||
* {@code
|
||||
* <init-param>
|
||||
* <param-name>path-prefix</param-name>
|
||||
* <param-value>/xxx</param-value>
|
||||
* </init-param>
|
||||
*
|
||||
* </init-param>}
|
||||
* </pre>
|
||||
*/
|
||||
public void setPathPrefix(String pathPrefix) {
|
||||
this.pathPrefix = pathPrefix;
|
||||
|
|
|
@ -44,7 +44,7 @@ public final class DefaultSolrCoreState extends SolrCoreState implements Recover
|
|||
|
||||
private volatile boolean recoveryRunning;
|
||||
private RecoveryStrategy recoveryStrat;
|
||||
private boolean closed = false;
|
||||
private volatile boolean closed = false;
|
||||
|
||||
private RefCounted<IndexWriter> refCntWriter;
|
||||
|
||||
|
@ -113,7 +113,7 @@ public final class DefaultSolrCoreState extends SolrCoreState implements Recover
|
|||
}
|
||||
|
||||
@Override
|
||||
public synchronized void newIndexWriter(SolrCore core, boolean rollback) throws IOException {
|
||||
public synchronized void newIndexWriter(SolrCore core, boolean rollback, boolean forceNewDir) throws IOException {
|
||||
log.info("Creating new IndexWriter...");
|
||||
String coreName = core.getName();
|
||||
synchronized (writerPauseLock) {
|
||||
|
@ -148,7 +148,7 @@ public final class DefaultSolrCoreState extends SolrCoreState implements Recover
|
|||
}
|
||||
}
|
||||
}
|
||||
indexWriter = createMainIndexWriter(core, "DirectUpdateHandler2", true);
|
||||
indexWriter = createMainIndexWriter(core, "DirectUpdateHandler2", forceNewDir);
|
||||
log.info("New IndexWriter is ready to be used.");
|
||||
// we need to null this so it picks up the new writer next get call
|
||||
refCntWriter = null;
|
||||
|
@ -162,7 +162,7 @@ public final class DefaultSolrCoreState extends SolrCoreState implements Recover
|
|||
|
||||
@Override
|
||||
public synchronized void rollbackIndexWriter(SolrCore core) throws IOException {
|
||||
newIndexWriter(core, true);
|
||||
newIndexWriter(core, true, true);
|
||||
}
|
||||
|
||||
protected SolrIndexWriter createMainIndexWriter(SolrCore core, String name, boolean forceNewDirectory) throws IOException {
|
||||
|
|
|
@ -45,7 +45,6 @@ import org.apache.lucene.search.MatchAllDocsQuery;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.common.util.SimpleOrderedMap;
|
||||
|
@ -447,7 +446,11 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
|
|||
log.info("start "+cmd);
|
||||
RefCounted<IndexWriter> iw = solrCoreState.getIndexWriter(core);
|
||||
try {
|
||||
iw.get().prepareCommit();
|
||||
final Map<String,String> commitData = new HashMap<String,String>();
|
||||
commitData.put(SolrIndexWriter.COMMIT_TIME_MSEC_KEY,
|
||||
String.valueOf(System.currentTimeMillis()));
|
||||
|
||||
iw.get().prepareCommit(commitData);
|
||||
} finally {
|
||||
iw.decref();
|
||||
}
|
||||
|
@ -600,8 +603,8 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
|
|||
}
|
||||
|
||||
@Override
|
||||
public void newIndexWriter(boolean rollback) throws IOException {
|
||||
solrCoreState.newIndexWriter(core, rollback);
|
||||
public void newIndexWriter(boolean rollback, boolean forceNewDir) throws IOException {
|
||||
solrCoreState.newIndexWriter(core, rollback, forceNewDir);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -238,8 +238,6 @@ public class DocumentBuilder {
|
|||
SchemaField sfield = schema.getFieldOrNull(name);
|
||||
boolean used = false;
|
||||
|
||||
float boost = field.getBoost();
|
||||
boolean applyBoost = sfield != null && sfield.indexed() && !sfield.omitNorms();
|
||||
|
||||
// Make sure it has the correct number
|
||||
if( sfield!=null && !sfield.multiValued() && field.getValueCount() > 1 ) {
|
||||
|
@ -248,17 +246,18 @@ public class DocumentBuilder {
|
|||
sfield.getName() + ": " +field.getValue() );
|
||||
}
|
||||
|
||||
if (applyBoost == false && boost != 1.0F) {
|
||||
float fieldBoost = field.getBoost();
|
||||
boolean applyBoost = sfield != null && sfield.indexed() && !sfield.omitNorms();
|
||||
|
||||
if (applyBoost == false && fieldBoost != 1.0F) {
|
||||
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,
|
||||
"ERROR: "+getID(doc, schema)+"cannot set an index-time boost, unindexed or norms are omitted for field " +
|
||||
sfield.getName() + ": " +field.getValue() );
|
||||
}
|
||||
|
||||
// Lucene no longer has a native docBoost, so we have to multiply
|
||||
// it ourselves (do this after the applyBoost error check so we don't
|
||||
// give an error on fields that don't support boost just because of a
|
||||
// docBoost)
|
||||
boost *= docBoost;
|
||||
// it ourselves
|
||||
float compoundBoost = fieldBoost * docBoost;
|
||||
|
||||
// load each field value
|
||||
boolean hasField = false;
|
||||
|
@ -270,16 +269,20 @@ public class DocumentBuilder {
|
|||
hasField = true;
|
||||
if (sfield != null) {
|
||||
used = true;
|
||||
addField(out, sfield, v, applyBoost ? boost : 1f);
|
||||
addField(out, sfield, v, applyBoost ? compoundBoost : 1f);
|
||||
}
|
||||
|
||||
// Check if we should copy this field to any other fields.
|
||||
// Check if we should copy this field value to any other fields.
|
||||
// This could happen whether it is explicit or not.
|
||||
List<CopyField> copyFields = schema.getCopyFieldsList(name);
|
||||
for (CopyField cf : copyFields) {
|
||||
SchemaField destinationField = cf.getDestination();
|
||||
|
||||
final boolean destHasValues =
|
||||
(null != out.getField(destinationField.getName()));
|
||||
|
||||
// check if the copy field is a multivalued or not
|
||||
if (!destinationField.multiValued() && out.getField(destinationField.getName()) != null) {
|
||||
if (!destinationField.multiValued() && destHasValues) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
|
||||
"ERROR: "+getID(doc, schema)+"multiple values encountered for non multiValued copy field " +
|
||||
destinationField.getName() + ": " + v);
|
||||
|
@ -292,14 +295,23 @@ public class DocumentBuilder {
|
|||
if( val instanceof String && cf.getMaxChars() > 0 ) {
|
||||
val = cf.getLimitedValue((String)val);
|
||||
}
|
||||
addField(out, destinationField, val, destinationField.indexed() && !destinationField.omitNorms() ? boost : 1F);
|
||||
|
||||
// we can't copy any boost unless the dest field is
|
||||
// indexed & !omitNorms, but which boost we copy depends
|
||||
// on wether the dest field already contains values (we
|
||||
// don't want to apply the compounded docBoost more then once)
|
||||
final float destBoost =
|
||||
(destinationField.indexed() && !destinationField.omitNorms()) ?
|
||||
(destHasValues ? fieldBoost : compoundBoost) : 1.0F;
|
||||
|
||||
addField(out, destinationField, val, destBoost);
|
||||
}
|
||||
|
||||
// The boost for a given field is the product of the
|
||||
// The final boost for a given field named is the product of the
|
||||
// *all* boosts on values of that field.
|
||||
// For multi-valued fields, we only want to set the boost on the
|
||||
// first field.
|
||||
boost = 1.0f;
|
||||
fieldBoost = compoundBoost = 1.0f;
|
||||
}
|
||||
}
|
||||
catch( SolrException ex ) {
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.solr.update;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.ConnectException;
|
||||
import java.net.SocketException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
|
@ -79,6 +80,7 @@ public class PeerSync {
|
|||
private long ourLowThreshold; // 20th percentile
|
||||
private long ourHighThreshold; // 80th percentile
|
||||
private boolean cantReachIsSuccess;
|
||||
private boolean getNoVersionsIsSuccess;
|
||||
private static final HttpClient client;
|
||||
static {
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
|
@ -129,14 +131,15 @@ public class PeerSync {
|
|||
}
|
||||
|
||||
public PeerSync(SolrCore core, List<String> replicas, int nUpdates) {
|
||||
this(core, replicas, nUpdates, false);
|
||||
this(core, replicas, nUpdates, false, true);
|
||||
}
|
||||
|
||||
public PeerSync(SolrCore core, List<String> replicas, int nUpdates, boolean cantReachIsSuccess) {
|
||||
public PeerSync(SolrCore core, List<String> replicas, int nUpdates, boolean cantReachIsSuccess, boolean getNoVersionsIsSuccess) {
|
||||
this.replicas = replicas;
|
||||
this.nUpdates = nUpdates;
|
||||
this.maxUpdates = nUpdates;
|
||||
this.cantReachIsSuccess = cantReachIsSuccess;
|
||||
this.getNoVersionsIsSuccess = getNoVersionsIsSuccess;
|
||||
|
||||
|
||||
uhandler = core.getUpdateHandler();
|
||||
|
@ -301,7 +304,7 @@ public class PeerSync {
|
|||
Throwable solrException = ((SolrServerException) srsp.getException())
|
||||
.getRootCause();
|
||||
if (solrException instanceof ConnectException || solrException instanceof ConnectTimeoutException
|
||||
|| solrException instanceof NoHttpResponseException) {
|
||||
|| solrException instanceof NoHttpResponseException || solrException instanceof SocketException) {
|
||||
log.warn(msg() + " couldn't connect to " + srsp.getShardAddress() + ", counting as success");
|
||||
|
||||
return true;
|
||||
|
@ -343,7 +346,7 @@ public class PeerSync {
|
|||
log.info(msg() + " Received " + otherVersions.size() + " versions from " + sreq.shards[0] );
|
||||
|
||||
if (otherVersions.size() == 0) {
|
||||
return true;
|
||||
return getNoVersionsIsSuccess;
|
||||
}
|
||||
|
||||
boolean completeList = otherVersions.size() < nUpdates; // do we have their complete list of updates?
|
||||
|
|
|
@ -101,7 +101,6 @@ public class SolrCmdDistributor {
|
|||
|
||||
public void finish() {
|
||||
|
||||
// piggyback on any outstanding adds or deletes if possible.
|
||||
flushAdds(1);
|
||||
flushDeletes(1);
|
||||
|
||||
|
@ -150,6 +149,12 @@ public class SolrCmdDistributor {
|
|||
|
||||
public void distribCommit(CommitUpdateCommand cmd, List<Node> nodes,
|
||||
ModifiableSolrParams params) throws IOException {
|
||||
|
||||
// make sure we are ordered
|
||||
flushAdds(1);
|
||||
flushDeletes(1);
|
||||
|
||||
|
||||
// Wait for all outstanding responses to make sure that a commit
|
||||
// can't sneak in ahead of adds or deletes we already sent.
|
||||
// We could do this on a per-server basis, but it's more complex
|
||||
|
@ -163,7 +168,7 @@ public class SolrCmdDistributor {
|
|||
|
||||
addCommit(ureq, cmd);
|
||||
|
||||
log.info("Distrib commit to:" + nodes);
|
||||
log.info("Distrib commit to:" + nodes + " params:" + params);
|
||||
|
||||
for (Node node : nodes) {
|
||||
submit(ureq, node);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue