mirror of https://github.com/apache/lucene.git
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/lucene-solr
This commit is contained in:
commit
8a5a4a6317
250
build.xml
250
build.xml
|
@ -125,177 +125,7 @@
|
|||
</target>
|
||||
|
||||
<target name="-validate-source-patterns" unless="disable.source-patterns" depends="resolve-groovy,rat-sources-typedef">
|
||||
<!-- check that there are no @author javadoc tags, tabs, svn keywords, javadoc-style licenses, or nocommits: -->
|
||||
<property name="validate.baseDir" location="."/>
|
||||
<groovy taskname="source-patterns" classpathref="rat.classpath"><![CDATA[
|
||||
import org.apache.tools.ant.BuildException;
|
||||
import org.apache.tools.ant.Project;
|
||||
import org.apache.rat.Defaults;
|
||||
import org.apache.rat.document.impl.FileDocument;
|
||||
import org.apache.rat.api.MetaData;
|
||||
|
||||
def extensions = [
|
||||
'java', 'jflex', 'py', 'pl', 'g4', 'jj', 'html', 'js',
|
||||
'css', 'xml', 'xsl', 'vm', 'sh', 'cmd', 'bat', 'policy',
|
||||
'properties', 'mdtext',
|
||||
'template', 'adoc', 'json',
|
||||
];
|
||||
def invalidPatterns = [
|
||||
(~$/@author\b/$) : '@author javadoc tag',
|
||||
(~$/(?i)\bno(n|)commit\b/$) : 'nocommit',
|
||||
(~$/\bTOOD:/$) : 'TOOD instead TODO',
|
||||
(~$/\t/$) : 'tabs instead spaces',
|
||||
(~$/\Q/**\E((?:\s)|(?:\*))*\Q{@inheritDoc}\E((?:\s)|(?:\*))*\Q*/\E/$) : '{@inheritDoc} on its own is unnecessary',
|
||||
(~$/\$$(?:LastChanged)?Date\b/$) : 'svn keyword',
|
||||
(~$/\$$(?:(?:LastChanged)?Revision|Rev)\b/$) : 'svn keyword',
|
||||
(~$/\$$(?:LastChangedBy|Author)\b/$) : 'svn keyword',
|
||||
(~$/\$$(?:Head)?URL\b/$) : 'svn keyword',
|
||||
(~$/\$$Id\b/$) : 'svn keyword',
|
||||
(~$/\$$Header\b/$) : 'svn keyword',
|
||||
(~$/\$$Source\b/$) : 'svn keyword',
|
||||
(~$/^\uFEFF/$) : 'UTF-8 byte order mark'
|
||||
];
|
||||
|
||||
def baseDir = properties['validate.baseDir'];
|
||||
def baseDirLen = baseDir.length() + 1;
|
||||
|
||||
def found = 0;
|
||||
def violations = new TreeSet();
|
||||
def reportViolation = { f, name ->
|
||||
task.log(name + ': ' + f.toString().substring(baseDirLen).replace(File.separatorChar, (char)'/'), Project.MSG_ERR);
|
||||
violations.add(name);
|
||||
found++;
|
||||
}
|
||||
|
||||
def javadocsPattern = ~$/(?sm)^\Q/**\E(.*?)\Q*/\E/$;
|
||||
def javaCommentPattern = ~$/(?sm)^\Q/*\E(.*?)\Q*/\E/$;
|
||||
def xmlCommentPattern = ~$/(?sm)\Q<!--\E(.*?)\Q-->\E/$;
|
||||
def lineSplitter = ~$/[\r\n]+/$;
|
||||
def singleLineSplitter = ~$/\n\r?/$;
|
||||
def licenseMatcher = Defaults.createDefaultMatcher();
|
||||
def validLoggerPattern = ~$/(?s)\b(private\s|static\s|final\s){3}+\s*Logger\s+\p{javaJavaIdentifierStart}+\s+=\s+\QLoggerFactory.getLogger(MethodHandles.lookup().lookupClass());\E/$;
|
||||
def packagePattern = ~$/(?m)^\s*package\s+org\.apache.*;/$;
|
||||
def xmlTagPattern = ~$/(?m)\s*<[a-zA-Z].*/$;
|
||||
def sourceHeaderPattern = ~$/\[source\b.*/$;
|
||||
def blockBoundaryPattern = ~$/----\s*/$;
|
||||
def blockTitlePattern = ~$/\..*/$;
|
||||
def unescapedSymbolPattern = ~$/(?<=[^\\]|^)([-=]>|<[-=])/$; // SOLR-10883
|
||||
|
||||
def isLicense = { matcher, ratDocument ->
|
||||
licenseMatcher.reset();
|
||||
return lineSplitter.split(matcher.group(1)).any{ licenseMatcher.match(ratDocument, it) };
|
||||
}
|
||||
|
||||
def checkLicenseHeaderPrecedes = { f, description, contentPattern, commentPattern, text, ratDocument ->
|
||||
def contentMatcher = contentPattern.matcher(text);
|
||||
if (contentMatcher.find()) {
|
||||
def contentStartPos = contentMatcher.start();
|
||||
def commentMatcher = commentPattern.matcher(text);
|
||||
while (commentMatcher.find()) {
|
||||
if (isLicense(commentMatcher, ratDocument)) {
|
||||
if (commentMatcher.start() < contentStartPos) {
|
||||
break; // This file is all good, so break loop: license header precedes 'description' definition
|
||||
} else {
|
||||
reportViolation(f, description+' declaration precedes license header');
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def checkMockitoAssume = { f, text ->
|
||||
if (text.contains("mockito") && !text.contains("assumeWorkingMockito()")) {
|
||||
reportViolation(f, 'File uses Mockito but has no assumeWorkingMockito() call');
|
||||
}
|
||||
}
|
||||
|
||||
def checkForUnescapedSymbolSubstitutions = { f, text ->
|
||||
def inCodeBlock = false;
|
||||
def underSourceHeader = false;
|
||||
def lineNumber = 0;
|
||||
singleLineSplitter.split(text).each {
|
||||
++lineNumber;
|
||||
if (underSourceHeader) { // This line is either a single source line, or the boundary of a code block
|
||||
inCodeBlock = blockBoundaryPattern.matcher(it).matches();
|
||||
if ( ! blockTitlePattern.matcher(it).matches()) {
|
||||
underSourceHeader = false;
|
||||
}
|
||||
} else {
|
||||
if (inCodeBlock) {
|
||||
inCodeBlock = ! blockBoundaryPattern.matcher(it).matches();
|
||||
} else {
|
||||
underSourceHeader = sourceHeaderPattern.matcher(it).lookingAt();
|
||||
if ( ! underSourceHeader) {
|
||||
def unescapedSymbolMatcher = unescapedSymbolPattern.matcher(it);
|
||||
if (unescapedSymbolMatcher.find()) {
|
||||
reportViolation(f, 'Unescaped symbol "' + unescapedSymbolMatcher.group(1) + '" on line #' + lineNumber);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ant.fileScanner{
|
||||
fileset(dir: baseDir){
|
||||
extensions.each{
|
||||
include(name: 'lucene/**/*.' + it)
|
||||
include(name: 'solr/**/*.' + it)
|
||||
include(name: 'dev-tools/**/*.' + it)
|
||||
include(name: '*.' + it)
|
||||
}
|
||||
// TODO: For now we don't scan txt files, so we
|
||||
// check licenses in top-level folders separately:
|
||||
include(name: '*.txt')
|
||||
include(name: '*/*.txt')
|
||||
// excludes:
|
||||
exclude(name: '**/build/**')
|
||||
exclude(name: '**/dist/**')
|
||||
exclude(name: 'lucene/benchmark/work/**')
|
||||
exclude(name: 'lucene/benchmark/temp/**')
|
||||
exclude(name: '**/CheckLoggingConfiguration.java')
|
||||
exclude(name: 'build.xml') // ourselves :-)
|
||||
}
|
||||
}.each{ f ->
|
||||
task.log('Scanning file: ' + f, Project.MSG_VERBOSE);
|
||||
def text = f.getText('UTF-8');
|
||||
invalidPatterns.each{ pattern,name ->
|
||||
if (pattern.matcher(text).find()) {
|
||||
reportViolation(f, name);
|
||||
}
|
||||
}
|
||||
def javadocsMatcher = javadocsPattern.matcher(text);
|
||||
def ratDocument = new FileDocument(f);
|
||||
while (javadocsMatcher.find()) {
|
||||
if (isLicense(javadocsMatcher, ratDocument)) {
|
||||
reportViolation(f, String.format(Locale.ENGLISH, 'javadoc-style license header [%s]',
|
||||
ratDocument.getMetaData().value(MetaData.RAT_URL_LICENSE_FAMILY_NAME)));
|
||||
}
|
||||
}
|
||||
if (f.name.endsWith('.java')) {
|
||||
if (text.contains('org.slf4j.LoggerFactory')) {
|
||||
if (!validLoggerPattern.matcher(text).find()) {
|
||||
reportViolation(f, 'invalid logging pattern [not private static final, uses static class name]');
|
||||
}
|
||||
}
|
||||
checkLicenseHeaderPrecedes(f, 'package', packagePattern, javaCommentPattern, text, ratDocument);
|
||||
if (f.name.contains("Test")) {
|
||||
checkMockitoAssume(f, text);
|
||||
}
|
||||
}
|
||||
if (f.name.endsWith('.xml') || f.name.endsWith('.xml.template')) {
|
||||
checkLicenseHeaderPrecedes(f, '<tag>', xmlTagPattern, xmlCommentPattern, text, ratDocument);
|
||||
}
|
||||
if (f.name.endsWith('.adoc')) {
|
||||
checkForUnescapedSymbolSubstitutions(f, text);
|
||||
}
|
||||
};
|
||||
|
||||
if (found) {
|
||||
throw new BuildException(String.format(Locale.ENGLISH, 'Found %d violations in source files (%s).',
|
||||
found, violations.join(', ')));
|
||||
}
|
||||
]]></groovy>
|
||||
<groovy taskname="source-patterns" classpathref="rat.classpath" src="${common.dir}/tools/src/groovy/check-source-patterns.groovy"/>
|
||||
</target>
|
||||
|
||||
<target name="rat-sources" description="Runs rat across all sources and tests" depends="common.rat-sources">
|
||||
|
@ -410,31 +240,7 @@
|
|||
</target>
|
||||
|
||||
<target name="run-maven-build" depends="get-maven-poms,install-maven-tasks,resolve-groovy" description="Runs the Maven build using automatically generated POMs">
|
||||
<groovy><![CDATA[
|
||||
import groovy.xml.NamespaceBuilder;
|
||||
import org.apache.tools.ant.Project;
|
||||
def userHome = properties['user.home'], commonDir = properties['common.dir'];
|
||||
def propPrefix = '-mvn.inject.'; int propPrefixLen = propPrefix.length();
|
||||
def subProject = project.createSubProject();
|
||||
project.copyUserProperties(subProject);
|
||||
subProject.initProperties();
|
||||
new AntBuilder(subProject).sequential{
|
||||
property(file: userHome+'/lucene.build.properties', prefix: propPrefix);
|
||||
property(file: userHome+'/build.properties', prefix: propPrefix);
|
||||
property(file: commonDir+'/build.properties', prefix: propPrefix);
|
||||
};
|
||||
def cmdlineProps = subProject.properties
|
||||
.findAll{ k, v -> k.startsWith(propPrefix) }
|
||||
.collectEntries{ k, v -> [k.substring(propPrefixLen), v] };
|
||||
cmdlineProps << project.userProperties.findAll{ k, v -> !k.startsWith('ant.') };
|
||||
def artifact = NamespaceBuilder.newInstance(ant, 'antlib:org.apache.maven.artifact.ant');
|
||||
task.log('Running Maven with props: ' + cmdlineProps.toString(), Project.MSG_INFO);
|
||||
artifact.mvn(pom: properties['maven-build-dir']+'/pom.xml', mavenVersion: properties['maven-version'], failonerror: true, fork: true) {
|
||||
cmdlineProps.each{ k, v -> arg(value: '-D' + k + '=' + v) };
|
||||
arg(value: '-fae');
|
||||
arg(value: 'install');
|
||||
};
|
||||
]]></groovy>
|
||||
<groovy src="${common.dir}/tools/src/groovy/run-maven-build.groovy"/>
|
||||
</target>
|
||||
|
||||
<target name="remove-maven-artifacts" description="Removes all Lucene/Solr Maven artifacts from the local repository">
|
||||
|
@ -667,48 +473,7 @@ File | Project Structure | Platform Settings | SDKs):
|
|||
<ivy:cachepath xmlns:ivy="antlib:org.apache.ivy.ant"
|
||||
organisation="org.eclipse.jgit" module="org.eclipse.jgit" revision="${jgit-version}"
|
||||
inline="true" conf="default" transitive="true" pathid="jgit.classpath"/>
|
||||
<groovy taskname="wc-checker" classpathref="jgit.classpath"><![CDATA[
|
||||
import org.apache.tools.ant.BuildException;
|
||||
import org.apache.tools.ant.Project;
|
||||
import org.eclipse.jgit.api.Git;
|
||||
import org.eclipse.jgit.api.Status;
|
||||
import org.eclipse.jgit.lib.Repository;
|
||||
import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
|
||||
import org.eclipse.jgit.errors.*;
|
||||
|
||||
def setProjectPropertyFromSet(prop, set) {
|
||||
if (set) {
|
||||
properties[prop] = '* ' + set.join(properties['line.separator'] + '* ');
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
task.log('Initializing working copy...', Project.MSG_INFO);
|
||||
final Repository repository = new FileRepositoryBuilder()
|
||||
.setWorkTree(project.getBaseDir())
|
||||
.setMustExist(true)
|
||||
.build();
|
||||
|
||||
task.log('Checking working copy status...', Project.MSG_INFO);
|
||||
final Status status = new Git(repository).status().call();
|
||||
if (!status.isClean()) {
|
||||
final SortedSet unversioned = new TreeSet(), modified = new TreeSet();
|
||||
status.properties.each{ prop, val ->
|
||||
if (val instanceof Set) {
|
||||
if (prop in ['untracked', 'untrackedFolders', 'missing']) {
|
||||
unversioned.addAll(val);
|
||||
} else if (prop != 'ignoredNotInIndex') {
|
||||
modified.addAll(val);
|
||||
}
|
||||
}
|
||||
};
|
||||
setProjectPropertyFromSet('wc.unversioned.files', unversioned);
|
||||
setProjectPropertyFromSet('wc.modified.files', modified);
|
||||
}
|
||||
} catch (RepositoryNotFoundException | NoWorkTreeException | NotSupportedException e) {
|
||||
task.log('WARNING: Development directory is not a valid GIT checkout! Disabling checks...', Project.MSG_WARN);
|
||||
}
|
||||
]]></groovy>
|
||||
<groovy taskname="wc-checker" classpathref="jgit.classpath" src="${common.dir}/tools/src/groovy/check-working-copy.groovy"/>
|
||||
<fail if="wc.unversioned.files"
|
||||
message="Source checkout is dirty (unversioned/missing files) after running tests!!! Offending files:${line.separator}${wc.unversioned.files}"/>
|
||||
<fail message="Source checkout is modified!!! Offending files:${line.separator}${wc.modified.files}">
|
||||
|
@ -726,7 +491,7 @@ File | Project Structure | Platform Settings | SDKs):
|
|||
<wc-checker failonmodifications="${is.jenkins.build}"/>
|
||||
</target>
|
||||
|
||||
<target name="run-clover" description="Runs all tests to measure coverage and generates report (pass "ANT_OPTS=-Xmx1536M" as environment)" depends="clean">
|
||||
<target name="run-clover" description="Runs all tests to measure coverage and generates report (pass "ANT_OPTS=-Xmx2G" as environment)" depends="clean">
|
||||
<antcall inheritAll="false">
|
||||
<param name="run.clover" value="true"/>
|
||||
<!-- must be 1, as clover does not like parallel test runs: -->
|
||||
|
@ -759,14 +524,21 @@ File | Project Structure | Platform Settings | SDKs):
|
|||
<fileset dir="." id="clover.test.result.files">
|
||||
<include name="*/build/**/test/TEST-*.xml"/>
|
||||
</fileset>
|
||||
<fileset dir="." id="clover.test.src.files">
|
||||
<include name="**/src/test/**/*.java"/>
|
||||
<!-- test framework source files are all test code: -->
|
||||
<include name="*/test-framework/src/**/*.java"/>
|
||||
</fileset>
|
||||
<clover-report projectName="Apache Lucene/Solr">
|
||||
<current outfile="${clover.report.dir}" title="Apache Lucene/Solr ${version}" numThreads="0">
|
||||
<format type="html" filter="assert"/>
|
||||
<testresults refid="clover.test.result.files"/>
|
||||
<testsources refid="clover.test.src.files"/>
|
||||
</current>
|
||||
<current outfile="${clover.report.dir}/clover.xml" title="Apache Lucene/Solr ${version}">
|
||||
<format type="xml" filter="assert"/>
|
||||
<testresults refid="clover.test.result.files"/>
|
||||
<testsources refid="clover.test.src.files"/>
|
||||
</current>
|
||||
</clover-report>
|
||||
<echo>You can find the merged Lucene/Solr Clover report in '${clover.report.dir}'.</echo>
|
||||
|
|
|
@ -116,6 +116,13 @@ D. How to use Maven to build Lucene/Solr
|
|||
|
||||
ant clean-maven-build
|
||||
|
||||
5. Please keep in mind that this is just a minimal Maven build. The resulting
|
||||
artifacts are not the same as those created by the native Ant-based build.
|
||||
It should be fine to enable Lucene builds in several Maven-based IDEs,
|
||||
but should never be used for Lucene/Solr production usage, as they may lack
|
||||
optimized class files (e.g., Java 9 MR-JAR support). To install Lucene/Solr
|
||||
in your local repository, see instructions above.
|
||||
|
||||
|
||||
Some example Maven commands you can use after you perform the above
|
||||
preparatory steps:
|
||||
|
@ -128,6 +135,11 @@ D. How to use Maven to build Lucene/Solr
|
|||
After compiling and packaging, but before installing each module's
|
||||
artifact, the above command will also run all the module's tests.
|
||||
|
||||
The resulting artifacts are not the same as those created by the native
|
||||
Ant-based build. They should never be used for Lucene/Solr production
|
||||
usage, as they may lack optimized class files (e.g., Java 9 MR-JAR
|
||||
support).
|
||||
|
||||
- Compile, package, and install all binary artifacts to your local
|
||||
repository, without running any tests:
|
||||
|
||||
|
|
|
@ -141,6 +141,15 @@ Improvements
|
|||
classes to use the optimized variants through the MR-JAR mechanism.
|
||||
(Uwe Schindler, Robert Muir, Adrien Grand, Mike McCandless)
|
||||
|
||||
* LUCENE-8127: Speed up rewriteNoScoring when there are no MUST clauses.
|
||||
(Michael Braun via Adrien Grand)
|
||||
|
||||
* LUCENE-8152: Improve consumption of doc-value iterators. (Horatiu Lazu via
|
||||
Adrien Grand)
|
||||
|
||||
* LUCENE-8033: FieldInfos now always use a dense encoding. (Mayya Sharipova
|
||||
via Adrien Grand)
|
||||
|
||||
Bug Fixes
|
||||
|
||||
* LUCENE-8077: Fixed bug in how CheckIndex verifies doc-value iterators.
|
||||
|
@ -189,6 +198,17 @@ Other
|
|||
* LUCENE-8155: Add back support in smoke tester to run against later Java versions.
|
||||
(Uwe Schindler)
|
||||
|
||||
* LUCENE-8169: Migrated build to use OpenClover 4.2.1 for checking code coverage.
|
||||
(Uwe Schindler)
|
||||
|
||||
* LUCENE-8170: Improve OpenClover reports (separate test from production code);
|
||||
enable coverage reports inside test-frameworks. (Uwe Schindler)
|
||||
|
||||
Build
|
||||
|
||||
* LUCENE-8168: Moved Groovy scripts in build files to separate files.
|
||||
Update Groovy to 2.4.13. (Uwe Schindler)
|
||||
|
||||
======================= Lucene 7.2.1 =======================
|
||||
|
||||
Bug Fixes
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package conf;
|
||||
|
||||
public class ConfLoader {
|
||||
// don't mind me, I load .alg files
|
||||
}
|
|
@ -38,8 +38,6 @@ import org.apache.lucene.store.RAMDirectory;
|
|||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
|
||||
|
||||
import conf.ConfLoader;
|
||||
|
||||
/** Test very simply that perf tasks are parses as expected. */
|
||||
@SuppressSysoutChecks(bugUrl = "very noisy")
|
||||
public class TestPerfTasksParse extends LuceneTestCase {
|
||||
|
@ -114,7 +112,7 @@ public class TestPerfTasksParse extends LuceneTestCase {
|
|||
public void testParseExamples() throws Exception {
|
||||
// hackedy-hack-hack
|
||||
boolean foundFiles = false;
|
||||
final Path examplesDir = Paths.get(ConfLoader.class.getResource(".").toURI());
|
||||
final Path examplesDir = Paths.get(getClass().getResource("/conf").toURI());
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(examplesDir, "*.alg")) {
|
||||
for (Path path : stream) {
|
||||
Config config = new Config(Files.newBufferedReader(path, StandardCharsets.UTF_8));
|
||||
|
|
|
@ -353,8 +353,6 @@
|
|||
|
||||
<!-- Exclude javadoc package-list files under licenses incompatible with the ASL -->
|
||||
<delete dir="${src.export.dir}/tools/javadoc/java8"/>
|
||||
<!-- Exclude clover license files incompatible with the ASL -->
|
||||
<delete dir="${src.export.dir}/tools/clover"/>
|
||||
|
||||
<!-- because we only package the "lucene/" folder, we have to adjust dir to work on: -->
|
||||
<property name="local.src.export.dir" location="${src.export.dir}/lucene"/>
|
||||
|
|
|
@ -139,11 +139,11 @@
|
|||
<istrue value="${tests.asserts}"/>
|
||||
</condition>
|
||||
|
||||
<condition property="tests.heapsize" value="768M" else="512M">
|
||||
<condition property="tests.heapsize" value="1024M" else="512M">
|
||||
<isset property="run.clover"/>
|
||||
</condition>
|
||||
|
||||
<condition property="tests.clover.args" value="-XX:ReservedCodeCacheSize=128m -XX:MaxPermSize=192m" else="">
|
||||
<condition property="tests.clover.args" value="-XX:ReservedCodeCacheSize=192m -Dclover.pertest.coverage=off" else="">
|
||||
<isset property="run.clover"/>
|
||||
</condition>
|
||||
|
||||
|
@ -234,7 +234,6 @@
|
|||
|
||||
<property name="filtered.pom.templates.dir" location="${common.dir}/build/poms"/>
|
||||
|
||||
<property name="clover.license.path" location="${common.dir}/tools/clover/clover.license"/>
|
||||
<property name="clover.db.dir" location="${common.dir}/build/clover/db"/>
|
||||
<property name="clover.report.dir" location="${common.dir}/build/clover/reports"/>
|
||||
|
||||
|
@ -266,7 +265,7 @@
|
|||
</propertyset>
|
||||
|
||||
<patternset id="lucene.local.src.package.patterns"
|
||||
excludes="**/pom.xml,**/*.iml,**/*.jar,build/**,dist/**,benchmark/work/**,benchmark/temp/**,tools/javadoc/java8/**,tools/clover/**"
|
||||
excludes="**/pom.xml,**/*.iml,**/*.jar,build/**,dist/**,benchmark/work/**,benchmark/temp/**,tools/javadoc/java8/**"
|
||||
/>
|
||||
|
||||
<!-- Default exclude sources and javadoc jars from Ivy fetch to save time and bandwidth -->
|
||||
|
@ -568,12 +567,12 @@
|
|||
<loadproperties prefix="ivyversions" srcFile="${common.dir}/ivy-versions.properties"/>
|
||||
<ivy:cachepath organisation="org.ow2.asm" module="asm-commons" revision="${ivyversions./org.ow2.asm/asm-commons}"
|
||||
inline="true" conf="default" transitive="true" log="download-only" pathid="asm.classpath"/>
|
||||
<groovy classpathref="asm.classpath" src="${common.dir}/tools/src/groovy/patch-mrjar-classes.groovy"/>
|
||||
<groovy taskname="patch-cls" classpathref="asm.classpath" src="${common.dir}/tools/src/groovy/patch-mrjar-classes.groovy"/>
|
||||
<touch file="${build.dir}/patch-mrjar.stamp"/>
|
||||
</target>
|
||||
|
||||
<target name="-mrjar-check" depends="patch-mrjar-classes">
|
||||
<zipfileset id="mrjar-patched-files" prefix="META-INF/versions/9" dir="${build.dir}/classes/java9"/>
|
||||
<zipfileset id="mrjar-patched-files" prefix="META-INF/versions/9" dir="${build.dir}/classes/java9" erroronmissingdir="false"/>
|
||||
<condition property="has-mrjar-patched-files">
|
||||
<resourcecount refid="mrjar-patched-files" when="greater" count="0" />
|
||||
</condition>
|
||||
|
@ -1568,58 +1567,7 @@ ${tests-output}/junit4-*.suites - per-JVM executed suites
|
|||
<not><isreference refid="junit.classpath"/></not>
|
||||
</condition>
|
||||
</fail>
|
||||
<groovy taskname="beaster"><![CDATA[
|
||||
import org.apache.tools.ant.BuildException;
|
||||
import org.apache.tools.ant.BuildLogger;
|
||||
import org.apache.tools.ant.Project;
|
||||
|
||||
int iters = (properties['beast.iters'] ?: '1') as int;
|
||||
if (iters <= 1) {
|
||||
throw new BuildException("Please give -Dbeast.iters with an int value > 1.");
|
||||
}
|
||||
|
||||
def antcall = project.createTask('antcall');
|
||||
antcall.with {
|
||||
target = '-test';
|
||||
inheritAll = true;
|
||||
inheritRefs = true;
|
||||
createParam().with {
|
||||
name = "tests.isbeasting";
|
||||
value = "true";
|
||||
};
|
||||
};
|
||||
|
||||
(1..iters).each { i ->
|
||||
task.log('Beast round: ' + i, Project.MSG_INFO);
|
||||
try {
|
||||
// disable verbose build logging:
|
||||
project.buildListeners.each { listener ->
|
||||
if (listener instanceof BuildLogger) {
|
||||
listener.messageOutputLevel = Project.MSG_WARN;
|
||||
}
|
||||
};
|
||||
|
||||
antcall.execute();
|
||||
|
||||
} catch (BuildException be) {
|
||||
def logFile = new File(properties["junit.output.dir"], "tests-failures.txt");
|
||||
if (logFile.exists()) {
|
||||
logFile.eachLine("UTF-8", { line ->
|
||||
task.log(line, Project.MSG_ERR);
|
||||
});
|
||||
}
|
||||
throw be;
|
||||
} finally {
|
||||
// restore build logging (unfortunately there is no way to get the original logging level (write-only property):
|
||||
project.buildListeners.each { listener ->
|
||||
if (listener instanceof BuildLogger) {
|
||||
listener.messageOutputLevel = Project.MSG_INFO;
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
task.log('Beasting finished.', Project.MSG_INFO);
|
||||
]]></groovy>
|
||||
<groovy taskname="beaster" src="${common.dir}/tools/src/groovy/run-beaster.groovy"/>
|
||||
</target>
|
||||
|
||||
<target name="-check-totals" if="tests.totals.toplevel" depends="resolve-groovy">
|
||||
|
@ -1663,27 +1611,8 @@ ${tests-output}/junit4-*.suites - per-JVM executed suites
|
|||
<target name="clover" depends="-clover.disable,-clover.load,-clover.classpath,-clover.setup"/>
|
||||
|
||||
<target name="-clover.load" depends="ivy-availability-check,ivy-configure" if="run.clover" unless="clover.loaded">
|
||||
<available file="${clover.license.path}" property="clover.license.available" />
|
||||
<fail unless="clover.license.available"><![CDATA[.
|
||||
|
||||
#########################################################################
|
||||
Atlassian Clover License not found!
|
||||
|
||||
Current License path: ${clover.license.path}
|
||||
|
||||
To use Atlassian Clover with Lucene build, you need a proper license
|
||||
and let the system property 'clover.license.path' point to it.
|
||||
You can pass it to ANT with:
|
||||
|
||||
$ ant -Dclover.license.path=/path/to/clover.license -Drun.clover=true ...
|
||||
|
||||
Apache Lucene/Solr source checkouts from Git already contain the
|
||||
file, but source distributions cannot because of legal reasons.
|
||||
#########################################################################
|
||||
|
||||
]]></fail>
|
||||
<echo>Code coverage with Atlassian Clover enabled.</echo>
|
||||
<ivy:cachepath organisation="com.atlassian.clover" module="clover" revision="4.0.4"
|
||||
<echo>Code coverage with OpenClover enabled.</echo>
|
||||
<ivy:cachepath organisation="org.openclover" module="clover" revision="4.2.1"
|
||||
inline="true" conf="master" pathid="clover.classpath"/>
|
||||
<taskdef resource="cloverlib.xml" classpathref="clover.classpath" />
|
||||
<mkdir dir="${clover.db.dir}"/>
|
||||
|
@ -1700,11 +1629,9 @@ ${tests-output}/junit4-*.suites - per-JVM executed suites
|
|||
|
||||
<target name="-clover.setup" if="run.clover">
|
||||
<clover-setup initString="${clover.db.dir}/coverage.db" encoding="${build.encoding}">
|
||||
<fileset dir="${src.dir}" erroronmissingdir="no">
|
||||
<include name="org/apache/**/*.java" />
|
||||
</fileset>
|
||||
<fileset dir="${src.dir}" erroronmissingdir="no"/>
|
||||
<testsources dir="${tests.src.dir}" erroronmissingdir="no">
|
||||
<include name="org/apache/**/*.java" />
|
||||
<exclude name="**/TestOpenNLP*Factory.java"/><!-- https://bitbucket.org/openclover/clover/issues/59 -->
|
||||
</testsources>
|
||||
</clover-setup>
|
||||
</target>
|
||||
|
@ -2484,7 +2411,7 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
|
|||
|
||||
<!-- GROOVY scripting engine for ANT tasks -->
|
||||
<target name="resolve-groovy" unless="groovy.loaded" depends="ivy-availability-check,ivy-configure">
|
||||
<ivy:cachepath organisation="org.codehaus.groovy" module="groovy-all" revision="2.4.12"
|
||||
<ivy:cachepath organisation="org.codehaus.groovy" module="groovy-all" revision="2.4.13"
|
||||
inline="true" conf="default" type="jar" transitive="true" pathid="groovy.classpath"/>
|
||||
<taskdef name="groovy"
|
||||
classname="org.codehaus.groovy.ant.Groovy"
|
||||
|
@ -2567,48 +2494,7 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
|
|||
<ivy:dependency org="com.vladsch.flexmark" name="flexmark-ext-autolink" rev="${flexmark.version}" conf="default" />
|
||||
<ivy:dependency org="com.vladsch.flexmark" name="flexmark-ext-abbreviation" rev="${flexmark.version}" conf="default" />
|
||||
</ivy:cachepath>
|
||||
<groovy classpathref="markdown.classpath"><![CDATA[
|
||||
import org.apache.tools.ant.AntTypeDefinition;
|
||||
import org.apache.tools.ant.ComponentHelper;
|
||||
import org.apache.tools.ant.filters.TokenFilter.ChainableReaderFilter;
|
||||
import com.vladsch.flexmark.ast.Node;
|
||||
import com.vladsch.flexmark.ast.Heading;
|
||||
import com.vladsch.flexmark.html.HtmlRenderer;
|
||||
import com.vladsch.flexmark.parser.Parser;
|
||||
import com.vladsch.flexmark.parser.ParserEmulationProfile;
|
||||
import com.vladsch.flexmark.util.html.Escaping;
|
||||
import com.vladsch.flexmark.util.options.MutableDataSet;
|
||||
import com.vladsch.flexmark.ext.abbreviation.AbbreviationExtension;
|
||||
import com.vladsch.flexmark.ext.autolink.AutolinkExtension;
|
||||
|
||||
public final class MarkdownFilter extends ChainableReaderFilter {
|
||||
@Override
|
||||
public String filter(String markdownSource) {
|
||||
MutableDataSet options = new MutableDataSet();
|
||||
options.setFrom(ParserEmulationProfile.MARKDOWN);
|
||||
options.set(Parser.EXTENSIONS, [ AbbreviationExtension.create(), AutolinkExtension.create() ]);
|
||||
options.set(HtmlRenderer.RENDER_HEADER_ID, true);
|
||||
options.set(HtmlRenderer.MAX_TRAILING_BLANK_LINES, 0);
|
||||
Node parsed = Parser.builder(options).build().parse(markdownSource);
|
||||
|
||||
StringBuilder html = new StringBuilder('<html>\n<head>\n');
|
||||
CharSequence title = parsed.getFirstChildAny(Heading.class)?.getText();
|
||||
if (title != null) {
|
||||
html.append('<title>').append(Escaping.escapeHtml(title, false)).append('</title>\n');
|
||||
}
|
||||
html.append('<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">\n')
|
||||
.append('</head>\n<body>\n');
|
||||
HtmlRenderer.builder(options).build().render(parsed, html);
|
||||
html.append('</body>\n</html>\n');
|
||||
return html;
|
||||
}
|
||||
}
|
||||
|
||||
AntTypeDefinition t = new AntTypeDefinition();
|
||||
t.setName('markdownfilter');
|
||||
t.setClass(MarkdownFilter.class);
|
||||
ComponentHelper.getComponentHelper(project).addDataTypeDefinition(t);
|
||||
]]></groovy>
|
||||
<groovy classpathref="markdown.classpath" src="${common.dir}/tools/src/groovy/install-markdown-filter.groovy"/>
|
||||
<property name="markdown.loaded" value="true"/>
|
||||
</target>
|
||||
|
||||
|
|
|
@ -25,8 +25,8 @@ import java.util.HashSet;
|
|||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
|
||||
|
@ -45,8 +45,7 @@ public class FieldInfos implements Iterable<FieldInfo> {
|
|||
private final boolean hasPointValues;
|
||||
|
||||
// used only by fieldInfo(int)
|
||||
private final FieldInfo[] byNumberTable; // contiguous
|
||||
private final SortedMap<Integer,FieldInfo> byNumberMap; // sparse
|
||||
private final FieldInfo[] byNumber;
|
||||
|
||||
private final HashMap<String,FieldInfo> byName = new HashMap<>();
|
||||
private final Collection<FieldInfo> values; // for an unmodifiable iterator
|
||||
|
@ -64,15 +63,22 @@ public class FieldInfos implements Iterable<FieldInfo> {
|
|||
boolean hasDocValues = false;
|
||||
boolean hasPointValues = false;
|
||||
|
||||
TreeMap<Integer, FieldInfo> byNumber = new TreeMap<>();
|
||||
int size = 0; // number of elements in byNumberTemp, number of used array slots
|
||||
FieldInfo[] byNumberTemp = new FieldInfo[10]; // initial array capacity of 10
|
||||
for (FieldInfo info : infos) {
|
||||
if (info.number < 0) {
|
||||
throw new IllegalArgumentException("illegal field number: " + info.number + " for field " + info.name);
|
||||
}
|
||||
FieldInfo previous = byNumber.put(info.number, info);
|
||||
size = info.number >= size ? info.number+1 : size;
|
||||
if (info.number >= byNumberTemp.length){ //grow array
|
||||
byNumberTemp = ArrayUtil.grow(byNumberTemp, info.number + 1);
|
||||
}
|
||||
FieldInfo previous = byNumberTemp[info.number];
|
||||
if (previous != null) {
|
||||
throw new IllegalArgumentException("duplicate field numbers: " + previous.name + " and " + info.name + " have: " + info.number);
|
||||
}
|
||||
byNumberTemp[info.number] = info;
|
||||
|
||||
previous = byName.put(info.name, info);
|
||||
if (previous != null) {
|
||||
throw new IllegalArgumentException("duplicate field names: " + previous.number + " and " + info.number + " have: " + info.name);
|
||||
|
@ -96,25 +102,16 @@ public class FieldInfos implements Iterable<FieldInfo> {
|
|||
this.hasNorms = hasNorms;
|
||||
this.hasDocValues = hasDocValues;
|
||||
this.hasPointValues = hasPointValues;
|
||||
Integer max = byNumber.isEmpty() ? null : byNumber.lastKey();
|
||||
|
||||
// Only usee TreeMap in the very sparse case (< 1/16th of the numbers are used),
|
||||
// because TreeMap uses ~ 64 (32 bit JVM) or 120 (64 bit JVM w/o compressed oops)
|
||||
// overall bytes per entry, but array uses 4 (32 bit JMV) or 8
|
||||
// (64 bit JVM w/o compressed oops):
|
||||
if (max != null && max < ArrayUtil.MAX_ARRAY_LENGTH && max < 16L*byNumber.size()) {
|
||||
// Pull infos into an arraylist to avoid holding a reference to the TreeMap
|
||||
values = Collections.unmodifiableCollection(new ArrayList<>(byNumber.values()));
|
||||
byNumberMap = null;
|
||||
byNumberTable = new FieldInfo[max+1];
|
||||
for (Map.Entry<Integer,FieldInfo> entry : byNumber.entrySet()) {
|
||||
byNumberTable[entry.getKey()] = entry.getValue();
|
||||
List<FieldInfo> valuesTemp = new ArrayList<>();
|
||||
byNumber = new FieldInfo[size];
|
||||
for(int i=0; i<size; i++){
|
||||
byNumber[i] = byNumberTemp[i];
|
||||
if (byNumberTemp[i] != null) {
|
||||
valuesTemp.add(byNumberTemp[i]);
|
||||
}
|
||||
} else {
|
||||
byNumberMap = byNumber;
|
||||
values = Collections.unmodifiableCollection(byNumber.values());
|
||||
byNumberTable = null;
|
||||
}
|
||||
values = Collections.unmodifiableCollection(Arrays.asList(valuesTemp.toArray(new FieldInfo[0])));
|
||||
}
|
||||
|
||||
/** Returns true if any fields have freqs */
|
||||
|
@ -192,14 +189,10 @@ public class FieldInfos implements Iterable<FieldInfo> {
|
|||
if (fieldNumber < 0) {
|
||||
throw new IllegalArgumentException("Illegal field number: " + fieldNumber);
|
||||
}
|
||||
if (byNumberTable != null) {
|
||||
if (fieldNumber >= byNumberTable.length) {
|
||||
return null;
|
||||
}
|
||||
return byNumberTable[fieldNumber];
|
||||
} else {
|
||||
return byNumberMap.get(fieldNumber);
|
||||
if (fieldNumber >= byNumber.length) {
|
||||
return null;
|
||||
}
|
||||
return byNumber[fieldNumber];
|
||||
}
|
||||
|
||||
static final class FieldDimensions {
|
||||
|
|
|
@ -249,7 +249,9 @@ public abstract class FilteredTermsEnum extends TermsEnum {
|
|||
case END:
|
||||
// we are supposed to end the enum
|
||||
return null;
|
||||
// NO: we just fall through and iterate again
|
||||
case NO:
|
||||
// we just iterate again
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -183,6 +183,9 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
|
|||
}
|
||||
|
||||
private BooleanQuery rewriteNoScoring() {
|
||||
if (clauseSets.get(Occur.MUST).size() == 0) {
|
||||
return this;
|
||||
}
|
||||
BooleanQuery.Builder newQuery = new BooleanQuery.Builder();
|
||||
newQuery.setMinimumNumberShouldMatch(getMinimumNumberShouldMatch());
|
||||
for (BooleanClause clause : clauses) {
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.index;
|
||||
|
||||
|
||||
import java.util.Iterator;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
public class TestFieldInfos extends LuceneTestCase {
|
||||
|
||||
public void testFieldInfos() throws Exception{
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
|
||||
.setMergePolicy(NoMergePolicy.INSTANCE));
|
||||
|
||||
Document d1 = new Document();
|
||||
for (int i = 0; i < 15; i++) {
|
||||
d1.add(new StringField("f" + i, "v" + i, Field.Store.YES));
|
||||
}
|
||||
writer.addDocument(d1);
|
||||
writer.commit();
|
||||
|
||||
Document d2 = new Document();
|
||||
d2.add(new StringField("f0", "v0", Field.Store.YES));
|
||||
d2.add(new StringField("f15", "v15", Field.Store.YES));
|
||||
d2.add(new StringField("f16", "v16", Field.Store.YES));
|
||||
writer.addDocument(d2);
|
||||
writer.commit();
|
||||
|
||||
Document d3 = new Document();
|
||||
writer.addDocument(d3);
|
||||
writer.close();
|
||||
|
||||
SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
|
||||
assertEquals(3, sis.size());
|
||||
|
||||
FieldInfos fis1 = IndexWriter.readFieldInfos(sis.info(0));
|
||||
FieldInfos fis2 = IndexWriter.readFieldInfos(sis.info(1));
|
||||
FieldInfos fis3 = IndexWriter.readFieldInfos(sis.info(2));
|
||||
|
||||
// testing dense FieldInfos
|
||||
Iterator<FieldInfo> it = fis1.iterator();
|
||||
int i = 0;
|
||||
while(it.hasNext()) {
|
||||
FieldInfo fi = it.next();
|
||||
assertEquals(i, fi.number);
|
||||
assertEquals("f" + i , fi.name);
|
||||
assertEquals("f" + i, fis1.fieldInfo(i).name); //lookup by number
|
||||
assertEquals("f" + i, fis1.fieldInfo("f" + i).name); //lookup by name
|
||||
i++;
|
||||
}
|
||||
|
||||
// testing sparse FieldInfos
|
||||
assertEquals("f0", fis2.fieldInfo(0).name); //lookup by number
|
||||
assertEquals("f0", fis2.fieldInfo("f0").name); //lookup by name
|
||||
assertNull(fis2.fieldInfo(1));
|
||||
assertNull(fis2.fieldInfo("f1"));
|
||||
assertEquals("f15", fis2.fieldInfo(15).name);
|
||||
assertEquals("f15", fis2.fieldInfo("f15").name);
|
||||
assertEquals("f16", fis2.fieldInfo(16).name);
|
||||
assertEquals("f16", fis2.fieldInfo("f16").name);
|
||||
|
||||
// testing empty FieldInfos
|
||||
assertNull(fis3.fieldInfo(0)); //lookup by number
|
||||
assertNull(fis3.fieldInfo("f0")); //lookup by name
|
||||
assertEquals(0, fis3.size());
|
||||
Iterator<FieldInfo> it3 = fis3.iterator();
|
||||
assertFalse(it3.hasNext());
|
||||
dir.close();
|
||||
}
|
||||
|
||||
}
|
|
@ -77,10 +77,7 @@ final class GlobalOrdinalsCollector implements Collector {
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (doc > docTermOrds.docID()) {
|
||||
docTermOrds.advance(doc);
|
||||
}
|
||||
if (doc == docTermOrds.docID()) {
|
||||
if (docTermOrds.advanceExact(doc)) {
|
||||
long segmentOrd = docTermOrds.ordValue();
|
||||
long globalOrd = segmentOrdToGlobalOrdLookup.get(segmentOrd);
|
||||
collectedOrds.set(globalOrd);
|
||||
|
@ -102,10 +99,7 @@ final class GlobalOrdinalsCollector implements Collector {
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (doc > docTermOrds.docID()) {
|
||||
docTermOrds.advance(doc);
|
||||
}
|
||||
if (doc == docTermOrds.docID()) {
|
||||
if (docTermOrds.advanceExact(doc)) {
|
||||
collectedOrds.set(docTermOrds.ordValue());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -182,11 +182,7 @@ final class GlobalOrdinalsQuery extends Query {
|
|||
|
||||
@Override
|
||||
public boolean matches() throws IOException {
|
||||
int docID = approximation.docID();
|
||||
if (docID > values.docID()) {
|
||||
values.advance(docID);
|
||||
}
|
||||
if (docID == values.docID()) {
|
||||
if (values.advanceExact(approximation.docID())) {
|
||||
final long segmentOrd = values.ordValue();
|
||||
final long globalOrd = segmentOrdToGlobalOrdLookup.get(segmentOrd);
|
||||
if (foundOrds.get(globalOrd)) {
|
||||
|
@ -220,14 +216,8 @@ final class GlobalOrdinalsQuery extends Query {
|
|||
|
||||
@Override
|
||||
public boolean matches() throws IOException {
|
||||
int docID = approximation.docID();
|
||||
if (docID > values.docID()) {
|
||||
values.advance(docID);
|
||||
}
|
||||
if (docID == values.docID()) {
|
||||
if (foundOrds.get(values.ordValue())) {
|
||||
return true;
|
||||
}
|
||||
if (values.advanceExact(approximation.docID()) && foundOrds.get(values.ordValue())) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -113,10 +113,7 @@ abstract class GlobalOrdinalsWithScoreCollector implements Collector {
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (doc > docTermOrds.docID()) {
|
||||
docTermOrds.advance(doc);
|
||||
}
|
||||
if (doc == docTermOrds.docID()) {
|
||||
if (docTermOrds.advanceExact(doc)) {
|
||||
final int globalOrd = (int) segmentOrdToGlobalOrdLookup.get(docTermOrds.ordValue());
|
||||
collectedOrds.set(globalOrd);
|
||||
float existingScore = scores.getScore(globalOrd);
|
||||
|
@ -145,10 +142,7 @@ abstract class GlobalOrdinalsWithScoreCollector implements Collector {
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (doc > docTermOrds.docID()) {
|
||||
docTermOrds.advance(doc);
|
||||
}
|
||||
if (doc == docTermOrds.docID()) {
|
||||
if (docTermOrds.advanceExact(doc)) {
|
||||
int segmentOrd = docTermOrds.ordValue();
|
||||
collectedOrds.set(segmentOrd);
|
||||
float existingScore = scores.getScore(segmentOrd);
|
||||
|
@ -258,10 +252,7 @@ abstract class GlobalOrdinalsWithScoreCollector implements Collector {
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (doc > docTermOrds.docID()) {
|
||||
docTermOrds.advance(doc);
|
||||
}
|
||||
if (doc == docTermOrds.docID()) {
|
||||
if (docTermOrds.advanceExact(doc)) {
|
||||
final int globalOrd = (int) segmentOrdToGlobalOrdLookup.get(docTermOrds.ordValue());
|
||||
collectedOrds.set(globalOrd);
|
||||
occurrences.increment(globalOrd);
|
||||
|
@ -276,10 +267,7 @@ abstract class GlobalOrdinalsWithScoreCollector implements Collector {
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (doc > docTermOrds.docID()) {
|
||||
docTermOrds.advance(doc);
|
||||
}
|
||||
if (doc == docTermOrds.docID()) {
|
||||
if (docTermOrds.advanceExact(doc)) {
|
||||
int segmentOrd = docTermOrds.ordValue();
|
||||
collectedOrds.set(segmentOrd);
|
||||
occurrences.increment(segmentOrd);
|
||||
|
|
|
@ -191,11 +191,7 @@ final class GlobalOrdinalsWithScoreQuery extends Query {
|
|||
|
||||
@Override
|
||||
public boolean matches() throws IOException {
|
||||
int docID = approximation.docID();
|
||||
if (docID > values.docID()) {
|
||||
values.advance(docID);
|
||||
}
|
||||
if (docID == values.docID()) {
|
||||
if (values.advanceExact(approximation.docID())) {
|
||||
final long segmentOrd = values.ordValue();
|
||||
final int globalOrd = (int) segmentOrdToGlobalOrdLookup.get(segmentOrd);
|
||||
if (collector.match(globalOrd)) {
|
||||
|
@ -229,11 +225,7 @@ final class GlobalOrdinalsWithScoreQuery extends Query {
|
|||
|
||||
@Override
|
||||
public boolean matches() throws IOException {
|
||||
int docID = approximation.docID();
|
||||
if (docID > values.docID()) {
|
||||
values.advance(docID);
|
||||
}
|
||||
if (docID == values.docID()) {
|
||||
if (values.advanceExact(approximation.docID())) {
|
||||
final int segmentOrd = values.ordValue();
|
||||
if (collector.match(segmentOrd)) {
|
||||
score = collector.score(segmentOrd);
|
||||
|
|
|
@ -199,10 +199,7 @@ public final class JoinUtil {
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (doc > sortedNumericDocValues.docID()) {
|
||||
sortedNumericDocValues.advance(doc);
|
||||
}
|
||||
if (doc == sortedNumericDocValues.docID()) {
|
||||
if (sortedNumericDocValues.advanceExact(doc)) {
|
||||
for (int i = 0; i < sortedNumericDocValues.docValueCount(); i++) {
|
||||
long value = sortedNumericDocValues.nextValue();
|
||||
joinValues.add(value);
|
||||
|
@ -246,15 +243,9 @@ public final class JoinUtil {
|
|||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
assert docsInOrder(doc);
|
||||
int dvDocID = numericDocValues.docID();
|
||||
if (dvDocID < doc) {
|
||||
dvDocID = numericDocValues.advance(doc);
|
||||
}
|
||||
long value;
|
||||
if (dvDocID == doc) {
|
||||
long value = 0;
|
||||
if (numericDocValues.advanceExact(doc)) {
|
||||
value = numericDocValues.longValue();
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
joinValues.add(value);
|
||||
if (needsScore) {
|
||||
|
|
|
@ -84,11 +84,8 @@ abstract class TermsCollector<DV> extends DocValuesTermsCollector<DV> {
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (docValues.docID() < doc) {
|
||||
docValues.advance(doc);
|
||||
}
|
||||
BytesRef term;
|
||||
if (docValues.docID() == doc) {
|
||||
if (docValues.advanceExact(doc)) {
|
||||
term = docValues.binaryValue();
|
||||
} else {
|
||||
term = new BytesRef(BytesRef.EMPTY_BYTES);
|
||||
|
|
|
@ -96,11 +96,8 @@ abstract class TermsWithScoreCollector<DV> extends DocValuesTermsCollector<DV>
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (docValues.docID() < doc) {
|
||||
docValues.advance(doc);
|
||||
}
|
||||
BytesRef value;
|
||||
if (docValues.docID() == doc) {
|
||||
if (docValues.advanceExact(doc)) {
|
||||
value = docValues.binaryValue();
|
||||
} else {
|
||||
value = new BytesRef(BytesRef.EMPTY_BYTES);
|
||||
|
@ -155,11 +152,8 @@ abstract class TermsWithScoreCollector<DV> extends DocValuesTermsCollector<DV>
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (docValues.docID() < doc) {
|
||||
docValues.advance(doc);
|
||||
}
|
||||
BytesRef value;
|
||||
if (docValues.docID() == doc) {
|
||||
if (docValues.advanceExact(doc)) {
|
||||
value = docValues.binaryValue();
|
||||
} else {
|
||||
value = new BytesRef(BytesRef.EMPTY_BYTES);
|
||||
|
@ -207,10 +201,7 @@ abstract class TermsWithScoreCollector<DV> extends DocValuesTermsCollector<DV>
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (doc > docValues.docID()) {
|
||||
docValues.advance(doc);
|
||||
}
|
||||
if (doc == docValues.docID()) {
|
||||
if (docValues.advanceExact(doc)) {
|
||||
long ord;
|
||||
while ((ord = docValues.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
|
||||
int termID = collectedTerms.add(docValues.lookupOrd(ord));
|
||||
|
@ -255,10 +246,7 @@ abstract class TermsWithScoreCollector<DV> extends DocValuesTermsCollector<DV>
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (doc > docValues.docID()) {
|
||||
docValues.advance(doc);
|
||||
}
|
||||
if (doc == docValues.docID()) {
|
||||
if (docValues.advanceExact(doc)) {
|
||||
long ord;
|
||||
while ((ord = docValues.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
|
||||
int termID = collectedTerms.add(docValues.lookupOrd(ord));
|
||||
|
|
|
@ -1471,11 +1471,8 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (doc > terms.docID()) {
|
||||
terms.advance(doc);
|
||||
}
|
||||
final BytesRef joinValue;
|
||||
if (doc == terms.docID()) {
|
||||
if (terms.advanceExact(doc)) {
|
||||
joinValue = terms.binaryValue();
|
||||
} else {
|
||||
// missing;
|
||||
|
@ -1540,11 +1537,8 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (doc > terms.docID()) {
|
||||
terms.advance(doc);
|
||||
}
|
||||
final BytesRef joinValue;
|
||||
if (doc == terms.docID()) {
|
||||
if (terms.advanceExact(doc)) {
|
||||
joinValue = terms.binaryValue();
|
||||
} else {
|
||||
// missing;
|
||||
|
|
|
@ -92,6 +92,8 @@ public class TestDocValuesFieldSources extends LuceneTestCase {
|
|||
vals[i] = (long) random().nextInt((int) PackedInts.maxValue(bitsPerValue));
|
||||
f.setLongValue((Long) vals[i]);
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError();
|
||||
}
|
||||
iw.addDocument(document);
|
||||
if (random().nextBoolean() && i % 10 == 9) {
|
||||
|
@ -150,6 +152,8 @@ public class TestDocValuesFieldSources extends LuceneTestCase {
|
|||
case SORTED_NUMERIC:
|
||||
assertEquals(((Number) expected).longValue(), values.longVal(i));
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,6 +40,9 @@
|
|||
<target name="-check-forbidden-tests"/>
|
||||
<target name="-check-forbidden-sysout"/>
|
||||
|
||||
<!-- disable clover -->
|
||||
<target name="-clover.setup" if="run.clover"/>
|
||||
|
||||
<!--
|
||||
Specialize compile-core to not depend on clover, to exclude a
|
||||
classpath reference when compiling, and to not attempt to copy
|
||||
|
|
|
@ -1,108 +0,0 @@
|
|||
################################################
|
||||
DOCUMENTATION ABOUT ATLASSIAN CLOVER LICENSE
|
||||
################################################
|
||||
|
||||
From: sales@atlassian.com [mailto:sales@atlassian.com]
|
||||
Sent: Wednesday, September 25, 2013 5:09 PM
|
||||
To: uschindler@apache.org
|
||||
Cc: npellow@atlassian.com
|
||||
Subject: RE: Fwd: New Clover License for Apache - {844535}
|
||||
|
||||
Hey Uwe,
|
||||
|
||||
Thanks for getting back.
|
||||
I found the license using the key you provided - thanks for that! I've renewed the license for another year and added you as a technical contact.
|
||||
|
||||
Cheers,
|
||||
Amber
|
||||
|
||||
Clover 100+ Machines: Open Source License Apache 27 Jun 2014
|
||||
|
||||
SEN: SEN-2304226
|
||||
Technical Contact: Mike Mccandless (mikemccand@apache.org), Uwe Schindler (uschindler@apache.org)
|
||||
Billing Contact: Mike Mccandless (mikemccand@apache.org)
|
||||
Licensed To: Apache
|
||||
|
||||
History:
|
||||
27 Jun 2012 Purchase of Clover 100+ Machines: Open Source License
|
||||
27 Jun 2013 Maintenance Renewal
|
||||
|
||||
---------------------------------------------------------------------------------
|
||||
|
||||
From: Nicholas Muldoon <nmuldoon@atlassian.com>
|
||||
To: Ted Yu <yuzhihong@gmail.com>
|
||||
Cc: Enis Soztutar <enis@hortonworks.com>, Todd Lipcon <todd@cloudera.com>
|
||||
Content-Type: multipart/alternative; boundary=047d7b10cff34cee0f04c376df70
|
||||
X-Gm-Message-State: ALoCoQmzowRKrtL3txnON+W+U2vn7gwwWSn/U5dvchuTV0Nn3xaMoAojvbPal5TBkC6foBnuHPWU
|
||||
|
||||
--047d7b10cff34cee0f04c376df70
|
||||
Content-Type: text/plain; charset=ISO-8859-1
|
||||
|
||||
Hi Ted,
|
||||
|
||||
Please find a renewed license below - Clover 3 compatible. Should you have
|
||||
any difficulty please let me know.
|
||||
|
||||
[license]
|
||||
|
||||
Thanks Ted, have a great day, thanks for your support,
|
||||
Nicholas
|
||||
|
||||
|
||||
On Fri, Dec 18, 2009 at 1:33 AM, Nicholas Muldoon <nmuldoon@atlassian.com> wrote:
|
||||
---------------------------------------------------------------------------------
|
||||
|
||||
Hi,
|
||||
Atlassian are excited to be presenting Apache with a site license for Clover
|
||||
2.6.
|
||||
|
||||
This Clover license can be used for any code that is under an org.apache
|
||||
package. Further, this license can be used by any developer on their machine
|
||||
in conjunction with our Eclipse or IntelliJ plugins for development on an
|
||||
org.apache project.
|
||||
|
||||
[license]
|
||||
|
||||
|
||||
On Sat, Dec 19, 2009 at 10:38 PM, Nick Pellow <npellow@atlassian.com> wrote:
|
||||
---------------------------------------------------------------------------------
|
||||
Hi Mike,
|
||||
|
||||
That would be great if you could forward this to committers@apache.org.
|
||||
The license is available to anyone working on the org.apache.* be it
|
||||
in IDEA/Eclipse/Ant/Maven locally, or on a central build server.
|
||||
|
||||
Since the license will only instrument and report coverage on
|
||||
org.apache packages, please mention that it is fine to commit this
|
||||
license to each project if it makes running builds easier. ie just
|
||||
check out the project and run with Clover, without the need for the
|
||||
extra step of locating and installing the clover license.
|
||||
|
||||
Cheers,
|
||||
Nick
|
||||
|
||||
> On 19/12/2009, at 1:11 AM, Michael McCandless wrote:
|
||||
>
|
||||
>> Woops, I meant "The only restriction is that it will only test
|
||||
>> coverage of packages under org.apache", below.
|
||||
>>
|
||||
>> Mike
|
||||
>>
|
||||
>> On Fri, Dec 18, 2009 at 9:05 AM, Michael McCandless
|
||||
>> <lucene@mikemccandless.com> wrote:
|
||||
>>>
|
||||
>>> Since this generous offer extends beyond Lucene...
|
||||
>>>
|
||||
>>> I'd like to forward this to committers@apache.org, pointing to where
|
||||
>>> the license is available
|
||||
>>>
|
||||
>>> (https://svn.apache.org/repos/private/committers/donated-licenses/cl
|
||||
>>> over/2.6.x), explaining that Lucene upgraded (providing the link to
|
||||
>>> our coverage report), etc.
|
||||
>>>
|
||||
>>> But I wanted to confirm with you all first: is this OK? This
|
||||
>>> license may be used by anyone? The only restriction is that it will
|
||||
>>> only test coverage of packages under org.apache.lucene?
|
||||
>>>
|
||||
>>> I can draft something up and run it by you all first, if this makes
|
||||
>>> sense...
|
|
@ -1,5 +0,0 @@
|
|||
nRQNWtwTDKNQARUudFdtDsicMRPIUONPsMvQXbqMkHLoFB
|
||||
mi2Kv8XgEIr0AKkpEyRKsl2L2KSZDO6fk1Eio5G7RKZiUc
|
||||
OoroXsoPoMPqOQrNrPRTtnNpPOMmMOnnOoTsXVsTvXXvQp
|
||||
moNnmqmUUnqmptmqummmmmUUnqmptmqummmmmUU1mXZebU
|
||||
Unmmmm
|
|
@ -0,0 +1,189 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/** Task script that is called by Ant's build.xml file:
|
||||
* Checks that there are no @author javadoc tags, tabs,
|
||||
* svn keywords, javadoc-style licenses, or nocommits.
|
||||
*/
|
||||
|
||||
import org.apache.tools.ant.BuildException;
|
||||
import org.apache.tools.ant.Project;
|
||||
import org.apache.rat.Defaults;
|
||||
import org.apache.rat.document.impl.FileDocument;
|
||||
import org.apache.rat.api.MetaData;
|
||||
|
||||
def extensions = [
|
||||
'java', 'jflex', 'py', 'pl', 'g4', 'jj', 'html', 'js',
|
||||
'css', 'xml', 'xsl', 'vm', 'sh', 'cmd', 'bat', 'policy',
|
||||
'properties', 'mdtext', 'groovy',
|
||||
'template', 'adoc', 'json',
|
||||
];
|
||||
def invalidPatterns = [
|
||||
(~$/@author\b/$) : '@author javadoc tag',
|
||||
(~$/(?i)\bno(n|)commit\b/$) : 'nocommit',
|
||||
(~$/\bTOOD:/$) : 'TOOD instead TODO',
|
||||
(~$/\t/$) : 'tabs instead spaces',
|
||||
(~$/\Q/**\E((?:\s)|(?:\*))*\Q{@inheritDoc}\E((?:\s)|(?:\*))*\Q*/\E/$) : '{@inheritDoc} on its own is unnecessary',
|
||||
(~$/\$$(?:LastChanged)?Date\b/$) : 'svn keyword',
|
||||
(~$/\$$(?:(?:LastChanged)?Revision|Rev)\b/$) : 'svn keyword',
|
||||
(~$/\$$(?:LastChangedBy|Author)\b/$) : 'svn keyword',
|
||||
(~$/\$$(?:Head)?URL\b/$) : 'svn keyword',
|
||||
(~$/\$$Id\b/$) : 'svn keyword',
|
||||
(~$/\$$Header\b/$) : 'svn keyword',
|
||||
(~$/\$$Source\b/$) : 'svn keyword',
|
||||
(~$/^\uFEFF/$) : 'UTF-8 byte order mark'
|
||||
];
|
||||
|
||||
def baseDir = properties['basedir'];
|
||||
def baseDirLen = baseDir.length() + 1;
|
||||
|
||||
def found = 0;
|
||||
def violations = new TreeSet();
|
||||
def reportViolation = { f, name ->
|
||||
task.log(name + ': ' + f.toString().substring(baseDirLen).replace(File.separatorChar, (char)'/'), Project.MSG_ERR);
|
||||
violations.add(name);
|
||||
found++;
|
||||
}
|
||||
|
||||
def javadocsPattern = ~$/(?sm)^\Q/**\E(.*?)\Q*/\E/$;
|
||||
def javaCommentPattern = ~$/(?sm)^\Q/*\E(.*?)\Q*/\E/$;
|
||||
def xmlCommentPattern = ~$/(?sm)\Q<!--\E(.*?)\Q-->\E/$;
|
||||
def lineSplitter = ~$/[\r\n]+/$;
|
||||
def singleLineSplitter = ~$/\n\r?/$;
|
||||
def licenseMatcher = Defaults.createDefaultMatcher();
|
||||
def validLoggerPattern = ~$/(?s)\b(private\s|static\s|final\s){3}+\s*Logger\s+\p{javaJavaIdentifierStart}+\s+=\s+\QLoggerFactory.getLogger(MethodHandles.lookup().lookupClass());\E/$;
|
||||
def packagePattern = ~$/(?m)^\s*package\s+org\.apache.*;/$;
|
||||
def xmlTagPattern = ~$/(?m)\s*<[a-zA-Z].*/$;
|
||||
def sourceHeaderPattern = ~$/\[source\b.*/$;
|
||||
def blockBoundaryPattern = ~$/----\s*/$;
|
||||
def blockTitlePattern = ~$/\..*/$;
|
||||
def unescapedSymbolPattern = ~$/(?<=[^\\]|^)([-=]>|<[-=])/$; // SOLR-10883
|
||||
|
||||
def isLicense = { matcher, ratDocument ->
|
||||
licenseMatcher.reset();
|
||||
return lineSplitter.split(matcher.group(1)).any{ licenseMatcher.match(ratDocument, it) };
|
||||
}
|
||||
|
||||
def checkLicenseHeaderPrecedes = { f, description, contentPattern, commentPattern, text, ratDocument ->
|
||||
def contentMatcher = contentPattern.matcher(text);
|
||||
if (contentMatcher.find()) {
|
||||
def contentStartPos = contentMatcher.start();
|
||||
def commentMatcher = commentPattern.matcher(text);
|
||||
while (commentMatcher.find()) {
|
||||
if (isLicense(commentMatcher, ratDocument)) {
|
||||
if (commentMatcher.start() < contentStartPos) {
|
||||
break; // This file is all good, so break loop: license header precedes 'description' definition
|
||||
} else {
|
||||
reportViolation(f, description+' declaration precedes license header');
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def checkMockitoAssume = { f, text ->
|
||||
if (text.contains("mockito") && !text.contains("assumeWorkingMockito()")) {
|
||||
reportViolation(f, 'File uses Mockito but has no assumeWorkingMockito() call');
|
||||
}
|
||||
}
|
||||
|
||||
def checkForUnescapedSymbolSubstitutions = { f, text ->
|
||||
def inCodeBlock = false;
|
||||
def underSourceHeader = false;
|
||||
def lineNumber = 0;
|
||||
singleLineSplitter.split(text).each {
|
||||
++lineNumber;
|
||||
if (underSourceHeader) { // This line is either a single source line, or the boundary of a code block
|
||||
inCodeBlock = blockBoundaryPattern.matcher(it).matches();
|
||||
if ( ! blockTitlePattern.matcher(it).matches()) {
|
||||
underSourceHeader = false;
|
||||
}
|
||||
} else {
|
||||
if (inCodeBlock) {
|
||||
inCodeBlock = ! blockBoundaryPattern.matcher(it).matches();
|
||||
} else {
|
||||
underSourceHeader = sourceHeaderPattern.matcher(it).lookingAt();
|
||||
if ( ! underSourceHeader) {
|
||||
def unescapedSymbolMatcher = unescapedSymbolPattern.matcher(it);
|
||||
if (unescapedSymbolMatcher.find()) {
|
||||
reportViolation(f, 'Unescaped symbol "' + unescapedSymbolMatcher.group(1) + '" on line #' + lineNumber);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ant.fileScanner{
|
||||
fileset(dir: baseDir){
|
||||
extensions.each{
|
||||
include(name: 'lucene/**/*.' + it)
|
||||
include(name: 'solr/**/*.' + it)
|
||||
include(name: 'dev-tools/**/*.' + it)
|
||||
include(name: '*.' + it)
|
||||
}
|
||||
// TODO: For now we don't scan txt files, so we
|
||||
// check licenses in top-level folders separately:
|
||||
include(name: '*.txt')
|
||||
include(name: '*/*.txt')
|
||||
// excludes:
|
||||
exclude(name: '**/build/**')
|
||||
exclude(name: '**/dist/**')
|
||||
exclude(name: 'lucene/benchmark/work/**')
|
||||
exclude(name: 'lucene/benchmark/temp/**')
|
||||
exclude(name: '**/CheckLoggingConfiguration.java')
|
||||
exclude(name: 'lucene/tools/src/groovy/check-source-patterns.groovy') // ourselves :-)
|
||||
}
|
||||
}.each{ f ->
|
||||
task.log('Scanning file: ' + f, Project.MSG_VERBOSE);
|
||||
def text = f.getText('UTF-8');
|
||||
invalidPatterns.each{ pattern,name ->
|
||||
if (pattern.matcher(text).find()) {
|
||||
reportViolation(f, name);
|
||||
}
|
||||
}
|
||||
def javadocsMatcher = javadocsPattern.matcher(text);
|
||||
def ratDocument = new FileDocument(f);
|
||||
while (javadocsMatcher.find()) {
|
||||
if (isLicense(javadocsMatcher, ratDocument)) {
|
||||
reportViolation(f, String.format(Locale.ENGLISH, 'javadoc-style license header [%s]',
|
||||
ratDocument.getMetaData().value(MetaData.RAT_URL_LICENSE_FAMILY_NAME)));
|
||||
}
|
||||
}
|
||||
if (f.name.endsWith('.java')) {
|
||||
if (text.contains('org.slf4j.LoggerFactory')) {
|
||||
if (!validLoggerPattern.matcher(text).find()) {
|
||||
reportViolation(f, 'invalid logging pattern [not private static final, uses static class name]');
|
||||
}
|
||||
}
|
||||
checkLicenseHeaderPrecedes(f, 'package', packagePattern, javaCommentPattern, text, ratDocument);
|
||||
if (f.name.contains("Test")) {
|
||||
checkMockitoAssume(f, text);
|
||||
}
|
||||
}
|
||||
if (f.name.endsWith('.xml') || f.name.endsWith('.xml.template')) {
|
||||
checkLicenseHeaderPrecedes(f, '<tag>', xmlTagPattern, xmlCommentPattern, text, ratDocument);
|
||||
}
|
||||
if (f.name.endsWith('.adoc')) {
|
||||
checkForUnescapedSymbolSubstitutions(f, text);
|
||||
}
|
||||
};
|
||||
|
||||
if (found) {
|
||||
throw new BuildException(String.format(Locale.ENGLISH, 'Found %d violations in source files (%s).',
|
||||
found, violations.join(', ')));
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/** Task script that is called by Ant's build.xml file:
|
||||
* Checks GIT working copy for unversioned or modified files.
|
||||
*/
|
||||
|
||||
import org.apache.tools.ant.BuildException;
|
||||
import org.apache.tools.ant.Project;
|
||||
import org.eclipse.jgit.api.Git;
|
||||
import org.eclipse.jgit.api.Status;
|
||||
import org.eclipse.jgit.lib.Repository;
|
||||
import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
|
||||
import org.eclipse.jgit.errors.*;
|
||||
|
||||
def setProjectPropertyFromSet = { prop, set ->
|
||||
if (set) {
|
||||
properties[prop] = '* ' + set.join(properties['line.separator'] + '* ');
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
task.log('Initializing working copy...', Project.MSG_INFO);
|
||||
final Repository repository = new FileRepositoryBuilder()
|
||||
.setWorkTree(project.getBaseDir())
|
||||
.setMustExist(true)
|
||||
.build();
|
||||
|
||||
task.log('Checking working copy status...', Project.MSG_INFO);
|
||||
final Status status = new Git(repository).status().call();
|
||||
if (!status.isClean()) {
|
||||
final SortedSet unversioned = new TreeSet(), modified = new TreeSet();
|
||||
status.properties.each{ prop, val ->
|
||||
if (val instanceof Set) {
|
||||
if (prop in ['untracked', 'untrackedFolders', 'missing']) {
|
||||
unversioned.addAll(val);
|
||||
} else if (prop != 'ignoredNotInIndex') {
|
||||
modified.addAll(val);
|
||||
}
|
||||
}
|
||||
};
|
||||
setProjectPropertyFromSet('wc.unversioned.files', unversioned);
|
||||
setProjectPropertyFromSet('wc.modified.files', modified);
|
||||
}
|
||||
} catch (RepositoryNotFoundException | NoWorkTreeException | NotSupportedException e) {
|
||||
task.log('WARNING: Development directory is not a valid GIT checkout! Disabling checks...', Project.MSG_WARN);
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/** Task script that is called by Ant's common-build.xml file:
|
||||
* Installs markdown filter into Ant.
|
||||
*/
|
||||
|
||||
import org.apache.tools.ant.AntTypeDefinition;
|
||||
import org.apache.tools.ant.ComponentHelper;
|
||||
import org.apache.tools.ant.filters.TokenFilter.ChainableReaderFilter;
|
||||
import com.vladsch.flexmark.ast.Node;
|
||||
import com.vladsch.flexmark.ast.Heading;
|
||||
import com.vladsch.flexmark.html.HtmlRenderer;
|
||||
import com.vladsch.flexmark.parser.Parser;
|
||||
import com.vladsch.flexmark.parser.ParserEmulationProfile;
|
||||
import com.vladsch.flexmark.util.html.Escaping;
|
||||
import com.vladsch.flexmark.util.options.MutableDataSet;
|
||||
import com.vladsch.flexmark.ext.abbreviation.AbbreviationExtension;
|
||||
import com.vladsch.flexmark.ext.autolink.AutolinkExtension;
|
||||
|
||||
public final class MarkdownFilter extends ChainableReaderFilter {
|
||||
@Override
|
||||
public String filter(String markdownSource) {
|
||||
MutableDataSet options = new MutableDataSet();
|
||||
options.setFrom(ParserEmulationProfile.MARKDOWN);
|
||||
options.set(Parser.EXTENSIONS, [ AbbreviationExtension.create(), AutolinkExtension.create() ]);
|
||||
options.set(HtmlRenderer.RENDER_HEADER_ID, true);
|
||||
options.set(HtmlRenderer.MAX_TRAILING_BLANK_LINES, 0);
|
||||
Node parsed = Parser.builder(options).build().parse(markdownSource);
|
||||
|
||||
StringBuilder html = new StringBuilder('<html>\n<head>\n');
|
||||
CharSequence title = parsed.getFirstChildAny(Heading.class)?.getText();
|
||||
if (title != null) {
|
||||
html.append('<title>').append(Escaping.escapeHtml(title, false)).append('</title>\n');
|
||||
}
|
||||
html.append('<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">\n')
|
||||
.append('</head>\n<body>\n');
|
||||
HtmlRenderer.builder(options).build().render(parsed, html);
|
||||
html.append('</body>\n</html>\n');
|
||||
return html;
|
||||
}
|
||||
}
|
||||
|
||||
AntTypeDefinition t = new AntTypeDefinition();
|
||||
t.setName('markdownfilter');
|
||||
t.setClass(MarkdownFilter.class);
|
||||
ComponentHelper.getComponentHelper(project).addDataTypeDefinition(t);
|
|
@ -15,6 +15,11 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/** Task script that is called by Ant's common-build.xml file:
|
||||
* Patches Java 8 class files to replace method signatures by
|
||||
* native Java 9 optimized ones (to be placed in MR-JAR).
|
||||
*/
|
||||
|
||||
import org.apache.tools.ant.Project;
|
||||
|
||||
import org.objectweb.asm.ClassReader;
|
||||
|
@ -27,6 +32,11 @@ def mappings = [
|
|||
'org/apache/lucene/util/FutureArrays': 'java/util/Arrays',
|
||||
];
|
||||
|
||||
if (properties['run.clover'] != null) {
|
||||
task.log("Disabled class file remapping for Java 9, because Clover code coverage is enabled.", Project.MSG_INFO);
|
||||
return;
|
||||
}
|
||||
|
||||
File inputDir = new File(properties['build.dir'], 'classes/java');
|
||||
File outputDir = new File(properties['build.dir'], 'classes/java9');
|
||||
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/** Task script that is called by Ant's common-build.xml file:
|
||||
* Runs test beaster.
|
||||
*/
|
||||
|
||||
import org.apache.tools.ant.BuildException;
|
||||
import org.apache.tools.ant.BuildLogger;
|
||||
import org.apache.tools.ant.Project;
|
||||
|
||||
int iters = (properties['beast.iters'] ?: '1') as int;
|
||||
if (iters <= 1) {
|
||||
throw new BuildException("Please give -Dbeast.iters with an int value > 1.");
|
||||
}
|
||||
|
||||
def antcall = project.createTask('antcall');
|
||||
antcall.with {
|
||||
target = '-test';
|
||||
inheritAll = true;
|
||||
inheritRefs = true;
|
||||
createParam().with {
|
||||
name = "tests.isbeasting";
|
||||
value = "true";
|
||||
};
|
||||
};
|
||||
|
||||
(1..iters).each { i ->
|
||||
task.log('Beast round: ' + i, Project.MSG_INFO);
|
||||
try {
|
||||
// disable verbose build logging:
|
||||
project.buildListeners.each { listener ->
|
||||
if (listener instanceof BuildLogger) {
|
||||
listener.messageOutputLevel = Project.MSG_WARN;
|
||||
}
|
||||
};
|
||||
|
||||
antcall.execute();
|
||||
|
||||
} catch (BuildException be) {
|
||||
def logFile = new File(properties["junit.output.dir"], "tests-failures.txt");
|
||||
if (logFile.exists()) {
|
||||
logFile.eachLine("UTF-8", { line ->
|
||||
task.log(line, Project.MSG_ERR);
|
||||
});
|
||||
}
|
||||
throw be;
|
||||
} finally {
|
||||
// restore build logging (unfortunately there is no way to get the original logging level (write-only property):
|
||||
project.buildListeners.each { listener ->
|
||||
if (listener instanceof BuildLogger) {
|
||||
listener.messageOutputLevel = Project.MSG_INFO;
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
task.log('Beasting finished.', Project.MSG_INFO);
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/** Task script that is called by Ant's build.xml file:
|
||||
* Runs maven build from within Ant after creating POMs.
|
||||
*/
|
||||
|
||||
import groovy.xml.NamespaceBuilder;
|
||||
import org.apache.tools.ant.Project;
|
||||
|
||||
def userHome = properties['user.home'], commonDir = properties['common.dir'];
|
||||
def propPrefix = '-mvn.inject.'; int propPrefixLen = propPrefix.length();
|
||||
|
||||
def subProject = project.createSubProject();
|
||||
project.copyUserProperties(subProject);
|
||||
subProject.initProperties();
|
||||
new AntBuilder(subProject).sequential{
|
||||
property(file: userHome+'/lucene.build.properties', prefix: propPrefix);
|
||||
property(file: userHome+'/build.properties', prefix: propPrefix);
|
||||
property(file: commonDir+'/build.properties', prefix: propPrefix);
|
||||
};
|
||||
|
||||
def cmdlineProps = subProject.properties
|
||||
.findAll{ k, v -> k.startsWith(propPrefix) }
|
||||
.collectEntries{ k, v -> [k.substring(propPrefixLen), v] };
|
||||
cmdlineProps << project.userProperties.findAll{ k, v -> !k.startsWith('ant.') };
|
||||
|
||||
def artifact = NamespaceBuilder.newInstance(ant, 'antlib:org.apache.maven.artifact.ant');
|
||||
|
||||
task.log('Running Maven with props: ' + cmdlineProps.toString(), Project.MSG_INFO);
|
||||
artifact.mvn(pom: properties['maven-build-dir']+'/pom.xml', mavenVersion: properties['maven-version'], failonerror: true, fork: true) {
|
||||
cmdlineProps.each{ k, v -> arg(value: '-D' + k + '=' + v) };
|
||||
arg(value: '-fae');
|
||||
arg(value: 'install');
|
||||
};
|
|
@ -197,6 +197,14 @@ Bug Fixes
|
|||
|
||||
* SOLR-11931: Fix contrib/ltr custom inner class feature/normaliser/model persistence. (Christine Poerschke)
|
||||
|
||||
* SOLR-10261: In case of in-place updates, failure in leader to follower replica update request now throws the
|
||||
follower replica in leader-initiated-recovery (Ishan Chattopadhyaya, Steve Rowe)
|
||||
|
||||
* SOLR-11898: ConcurrentModificationException when calling org.apache.solr.core.SolrInfoBean.getMetricsSnapshot
|
||||
(Jeff Miller via Erick Erickson)
|
||||
|
||||
* SOLR-11950: Allow CLUSTERSTATUS "shard" parameter to accept comma (,) delimited list (Chris Ulicny via Jason Gerlowski)
|
||||
|
||||
Optimizations
|
||||
----------------------
|
||||
|
||||
|
|
|
@ -488,8 +488,6 @@
|
|||
|
||||
<!-- Exclude javadoc package-list files under licenses incompatible with the ASL -->
|
||||
<delete dir="${src.export.dir}/lucene/tools/javadoc/java8"/>
|
||||
<!-- Exclude clover license files incompatible with the ASL -->
|
||||
<delete dir="${src.export.dir}/lucene/tools/clover"/>
|
||||
|
||||
<build-changes changes.src.file="${src.export.dir}/solr/CHANGES.txt"
|
||||
changes.target.dir="${src.export.dir}/solr/docs/changes"
|
||||
|
|
|
@ -162,7 +162,7 @@
|
|||
<target name="validate" depends="compile-tools">
|
||||
</target>
|
||||
|
||||
<target name="init-dist" >
|
||||
<target name="init-dist" depends="resolve-groovy">
|
||||
<mkdir dir="${build.dir}"/>
|
||||
<mkdir dir="${package.dir}"/>
|
||||
<mkdir dir="${dist}"/>
|
||||
|
@ -170,7 +170,8 @@
|
|||
</target>
|
||||
|
||||
<target name="prep-lucene-jars"
|
||||
depends="jar-lucene-core, jar-backward-codecs, jar-analyzers-phonetic, jar-analyzers-kuromoji, jar-codecs,jar-expressions, jar-suggest, jar-highlighter, jar-memory,
|
||||
depends="resolve-groovy,
|
||||
jar-lucene-core, jar-backward-codecs, jar-analyzers-phonetic, jar-analyzers-kuromoji, jar-codecs,jar-expressions, jar-suggest, jar-highlighter, jar-memory,
|
||||
jar-misc, jar-spatial-extras, jar-spatial3d, jar-grouping, jar-queries, jar-queryparser, jar-join, jar-sandbox, jar-classification">
|
||||
<property name="solr.deps.compiled" value="true"/>
|
||||
</target>
|
||||
|
|
|
@ -115,7 +115,10 @@ public class ClusterStatus {
|
|||
}
|
||||
}
|
||||
if (shard != null) {
|
||||
requestedShards.add(shard);
|
||||
String[] paramShards = shard.split(",");
|
||||
for(String paramShard : paramShards){
|
||||
requestedShards.add(paramShard);
|
||||
}
|
||||
}
|
||||
|
||||
if (clusterStateCollection.getStateFormat() > 1) {
|
||||
|
|
|
@ -591,10 +591,7 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
|
|||
ord = -1;
|
||||
}
|
||||
} else {
|
||||
if (globalDoc > docValues.docID()) {
|
||||
docValues.advance(globalDoc);
|
||||
}
|
||||
if (globalDoc == docValues.docID()) {
|
||||
if (docValues.advanceExact(globalDoc)) {
|
||||
ord = docValues.ordValue();
|
||||
} else {
|
||||
ord = -1;
|
||||
|
@ -664,12 +661,8 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
|
|||
|
||||
@Override
|
||||
public void collect(int docId) throws IOException {
|
||||
int valuesDocID = docValues.docID();
|
||||
if (valuesDocID < docId) {
|
||||
valuesDocID = docValues.advance(docId);
|
||||
}
|
||||
long value;
|
||||
if (valuesDocID == docId) {
|
||||
if (docValues.advanceExact(docId)) {
|
||||
value = docValues.longValue();
|
||||
} else {
|
||||
value = 0;
|
||||
|
@ -739,6 +732,8 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
|
|||
return Float.toString(Float.intBitsToFloat((int)val));
|
||||
case DOUBLE:
|
||||
return Double.toString(Double.longBitsToDouble(val));
|
||||
case DATE:
|
||||
break;
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException("FieldType must be INT,LONG,FLOAT,DOUBLE found " + fieldType);
|
||||
|
|
|
@ -196,6 +196,9 @@ public class RangeFacetProcessor extends SimpleFacets {
|
|||
intervals.set(includeBefore ? 1 : 0, new IntervalFacets.FacetInterval(sf, range.lower, range.upper,
|
||||
range.includeLower, range.includeUpper, FacetRangeOther.BETWEEN.toString()));
|
||||
break;
|
||||
case ALL:
|
||||
case NONE:
|
||||
break;
|
||||
}
|
||||
}
|
||||
continue;
|
||||
|
|
|
@ -80,10 +80,10 @@ public class SortableTextField extends TextField {
|
|||
}
|
||||
}
|
||||
|
||||
// by the time our init() is called, super.setArgs has already removed & procesesd any explicit
|
||||
// by the time our init() is called, super.setArgs has already removed & processed any explicit
|
||||
// "docValues=foo" or useDocValuesAsStored=bar args...
|
||||
// - If the user explicitly said docValues=false, we want to respect that and not change it.
|
||||
// - if the user didn't explicit specify anything, then we want to implicitly *default* docValues=true
|
||||
// - if the user didn't explicitly specify anything, then we want to implicitly *default* docValues=true
|
||||
// - The inverse is true for useDocValuesAsStored=true:
|
||||
// - if explict, then respect it; else implicitly default to useDocValuesAsStored=false
|
||||
// ...lucky for us, setArgs preserved info about explicitly set true|false properties...
|
||||
|
|
|
@ -560,20 +560,14 @@ public class CollapsingQParserPlugin extends QParserPlugin {
|
|||
int ord = -1;
|
||||
if(this.ordinalMap != null) {
|
||||
//Handle ordinalMapping case
|
||||
if (contextDoc > segmentValues.docID()) {
|
||||
segmentValues.advance(contextDoc);
|
||||
}
|
||||
if (contextDoc == segmentValues.docID()) {
|
||||
if (segmentValues.advanceExact(contextDoc)) {
|
||||
ord = (int)segmentOrdinalMap.get(segmentValues.ordValue());
|
||||
} else {
|
||||
ord = -1;
|
||||
}
|
||||
} else {
|
||||
//Handle top Level FieldCache or Single Segment Case
|
||||
if (globalDoc > segmentValues.docID()) {
|
||||
segmentValues.advance(globalDoc);
|
||||
}
|
||||
if (globalDoc == segmentValues.docID()) {
|
||||
if (segmentValues.advanceExact(globalDoc)) {
|
||||
ord = segmentValues.ordValue();
|
||||
} else {
|
||||
ord = -1;
|
||||
|
@ -680,18 +674,12 @@ public class CollapsingQParserPlugin extends QParserPlugin {
|
|||
int ord = -1;
|
||||
if(this.ordinalMap != null) {
|
||||
//Handle ordinalMapping case
|
||||
if (contextDoc > segmentValues.docID()) {
|
||||
segmentValues.advance(contextDoc);
|
||||
}
|
||||
if (contextDoc == segmentValues.docID()) {
|
||||
if (segmentValues.advanceExact(contextDoc)) {
|
||||
ord = (int)segmentOrdinalMap.get(segmentValues.ordValue());
|
||||
}
|
||||
} else {
|
||||
//Handle top Level FieldCache or Single Segment Case
|
||||
if (docId > segmentValues.docID()) {
|
||||
segmentValues.advance(docId);
|
||||
}
|
||||
if (docId == segmentValues.docID()) {
|
||||
if (segmentValues.advanceExact(docId)) {
|
||||
ord = segmentValues.ordValue();
|
||||
}
|
||||
}
|
||||
|
@ -786,14 +774,8 @@ public class CollapsingQParserPlugin extends QParserPlugin {
|
|||
|
||||
@Override
|
||||
public void collect(int contextDoc) throws IOException {
|
||||
|
||||
int collapseDocID = collapseValues.docID();
|
||||
if (collapseDocID < contextDoc) {
|
||||
collapseDocID = collapseValues.advance(contextDoc);
|
||||
}
|
||||
|
||||
int collapseValue;
|
||||
if (collapseDocID == contextDoc) {
|
||||
if (collapseValues.advanceExact(contextDoc)) {
|
||||
collapseValue = (int) collapseValues.longValue();
|
||||
} else {
|
||||
collapseValue = 0;
|
||||
|
@ -889,12 +871,8 @@ public class CollapsingQParserPlugin extends QParserPlugin {
|
|||
}
|
||||
|
||||
int contextDoc = globalDoc-currentDocBase;
|
||||
int valuesDocID = collapseValues.docID();
|
||||
if (valuesDocID < contextDoc) {
|
||||
valuesDocID = collapseValues.advance(contextDoc);
|
||||
}
|
||||
int collapseValue;
|
||||
if (valuesDocID == contextDoc) {
|
||||
if (collapseValues.advanceExact(contextDoc)) {
|
||||
collapseValue = (int) collapseValues.longValue();
|
||||
} else {
|
||||
collapseValue = 0;
|
||||
|
@ -1015,17 +993,11 @@ public class CollapsingQParserPlugin extends QParserPlugin {
|
|||
int globalDoc = contextDoc+this.docBase;
|
||||
int ord = -1;
|
||||
if(this.ordinalMap != null) {
|
||||
if (contextDoc > segmentValues.docID()) {
|
||||
segmentValues.advance(contextDoc);
|
||||
}
|
||||
if (contextDoc == segmentValues.docID()) {
|
||||
if (segmentValues.advanceExact(contextDoc)) {
|
||||
ord = (int)segmentOrdinalMap.get(segmentValues.ordValue());
|
||||
}
|
||||
} else {
|
||||
if (globalDoc > segmentValues.docID()) {
|
||||
segmentValues.advance(globalDoc);
|
||||
}
|
||||
if (globalDoc == segmentValues.docID()) {
|
||||
if (segmentValues.advanceExact(globalDoc)) {
|
||||
ord = segmentValues.ordValue();
|
||||
}
|
||||
}
|
||||
|
@ -1085,18 +1057,12 @@ public class CollapsingQParserPlugin extends QParserPlugin {
|
|||
int ord = -1;
|
||||
if(this.ordinalMap != null) {
|
||||
//Handle ordinalMapping case
|
||||
if (contextDoc > segmentValues.docID()) {
|
||||
segmentValues.advance(contextDoc);
|
||||
}
|
||||
if (contextDoc == segmentValues.docID()) {
|
||||
if (segmentValues.advanceExact(contextDoc)) {
|
||||
ord = (int) segmentOrdinalMap.get(segmentValues.ordValue());
|
||||
}
|
||||
} else {
|
||||
//Handle top Level FieldCache or Single Segment Case
|
||||
if (globalDoc > segmentValues.docID()) {
|
||||
segmentValues.advance(globalDoc);
|
||||
}
|
||||
if (globalDoc == segmentValues.docID()) {
|
||||
if (segmentValues.advanceExact(globalDoc)) {
|
||||
ord = segmentValues.ordValue();
|
||||
}
|
||||
}
|
||||
|
@ -1197,13 +1163,8 @@ public class CollapsingQParserPlugin extends QParserPlugin {
|
|||
}
|
||||
|
||||
public void collect(int contextDoc) throws IOException {
|
||||
int collapseDocID = collapseValues.docID();
|
||||
if (collapseDocID < contextDoc) {
|
||||
collapseDocID = collapseValues.advance(contextDoc);
|
||||
}
|
||||
|
||||
int collapseKey;
|
||||
if (collapseDocID == contextDoc) {
|
||||
if (collapseValues.advanceExact(contextDoc)) {
|
||||
collapseKey = (int) collapseValues.longValue();
|
||||
} else {
|
||||
collapseKey = 0;
|
||||
|
@ -1249,13 +1210,8 @@ public class CollapsingQParserPlugin extends QParserPlugin {
|
|||
int contextDoc = globalDoc-currentDocBase;
|
||||
|
||||
if(this.needsScores){
|
||||
int collapseDocID = collapseValues.docID();
|
||||
if (collapseDocID < contextDoc) {
|
||||
collapseDocID = collapseValues.advance(contextDoc);
|
||||
}
|
||||
|
||||
int collapseValue;
|
||||
if (collapseDocID == contextDoc) {
|
||||
if (collapseValues.advanceExact(contextDoc)) {
|
||||
collapseValue = (int) collapseValues.longValue();
|
||||
} else {
|
||||
collapseValue = 0;
|
||||
|
@ -1637,13 +1593,8 @@ public class CollapsingQParserPlugin extends QParserPlugin {
|
|||
return;
|
||||
}
|
||||
|
||||
int valuesDocID = minMaxValues.docID();
|
||||
if (valuesDocID < contextDoc) {
|
||||
valuesDocID = minMaxValues.advance(contextDoc);
|
||||
}
|
||||
|
||||
int currentVal;
|
||||
if (valuesDocID == contextDoc) {
|
||||
if (minMaxValues.advanceExact(contextDoc)) {
|
||||
currentVal = (int) minMaxValues.longValue();
|
||||
} else {
|
||||
currentVal = 0;
|
||||
|
@ -1729,13 +1680,8 @@ public class CollapsingQParserPlugin extends QParserPlugin {
|
|||
return;
|
||||
}
|
||||
|
||||
int valuesDocID = minMaxValues.docID();
|
||||
if (valuesDocID < contextDoc) {
|
||||
valuesDocID = minMaxValues.advance(contextDoc);
|
||||
}
|
||||
|
||||
int currentMinMax;
|
||||
if (valuesDocID == contextDoc) {
|
||||
if (minMaxValues.advanceExact(contextDoc)) {
|
||||
currentMinMax = (int) minMaxValues.longValue();
|
||||
} else {
|
||||
currentMinMax = 0;
|
||||
|
@ -1822,13 +1768,8 @@ public class CollapsingQParserPlugin extends QParserPlugin {
|
|||
return;
|
||||
}
|
||||
|
||||
int valuesDocID = minMaxVals.docID();
|
||||
if (valuesDocID < contextDoc) {
|
||||
valuesDocID = minMaxVals.advance(contextDoc);
|
||||
}
|
||||
|
||||
long currentVal;
|
||||
if (valuesDocID == contextDoc) {
|
||||
if (minMaxVals.advanceExact(contextDoc)) {
|
||||
currentVal = minMaxVals.longValue();
|
||||
} else {
|
||||
currentVal = 0;
|
||||
|
@ -2229,13 +2170,8 @@ public class CollapsingQParserPlugin extends QParserPlugin {
|
|||
return;
|
||||
}
|
||||
|
||||
int valuesDocID = minMaxVals.docID();
|
||||
if (valuesDocID < contextDoc) {
|
||||
valuesDocID = minMaxVals.advance(contextDoc);
|
||||
}
|
||||
|
||||
int currentVal;
|
||||
if (valuesDocID == contextDoc) {
|
||||
if (minMaxVals.advanceExact(contextDoc)) {
|
||||
currentVal = (int) minMaxVals.longValue();
|
||||
} else {
|
||||
currentVal = 0;
|
||||
|
@ -2341,13 +2277,8 @@ public class CollapsingQParserPlugin extends QParserPlugin {
|
|||
return;
|
||||
}
|
||||
|
||||
int valuesDocID = minMaxVals.docID();
|
||||
if (valuesDocID < contextDoc) {
|
||||
valuesDocID = minMaxVals.advance(contextDoc);
|
||||
}
|
||||
|
||||
int minMaxVal;
|
||||
if (valuesDocID == contextDoc) {
|
||||
if (minMaxVals.advanceExact(contextDoc)) {
|
||||
minMaxVal = (int) minMaxVals.longValue();
|
||||
} else {
|
||||
minMaxVal = 0;
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.HashSet;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -60,7 +60,7 @@ public class FastLRUCache<K, V> extends SolrCacheBase implements SolrCache<K,V>
|
|||
private long maxRamBytes;
|
||||
|
||||
private MetricsMap cacheMap;
|
||||
private Set<String> metricNames = new HashSet<>();
|
||||
private Set<String> metricNames = ConcurrentHashMap.newKeySet();
|
||||
private MetricRegistry registry;
|
||||
|
||||
@Override
|
||||
|
|
|
@ -125,12 +125,8 @@ public class IGainTermsQParserPlugin extends QParserPlugin {
|
|||
public void collect(int doc) throws IOException {
|
||||
super.collect(doc);
|
||||
++count;
|
||||
int valuesDocID = leafOutcomeValue.docID();
|
||||
if (valuesDocID < doc) {
|
||||
valuesDocID = leafOutcomeValue.advance(doc);
|
||||
}
|
||||
int value;
|
||||
if (valuesDocID == doc) {
|
||||
if (leafOutcomeValue.advanceExact(doc)) {
|
||||
value = (int) leafOutcomeValue.longValue();
|
||||
} else {
|
||||
value = 0;
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
package org.apache.solr.search;
|
||||
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.HashSet;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -65,7 +65,7 @@ public class LFUCache<K, V> implements SolrCache<K, V> {
|
|||
private int showItems = 0;
|
||||
private Boolean timeDecay = true;
|
||||
private MetricsMap cacheMap;
|
||||
private Set<String> metricNames = new HashSet<>();
|
||||
private Set<String> metricNames = ConcurrentHashMap.newKeySet();
|
||||
private MetricRegistry registry;
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.solr.search;
|
|||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
|
@ -87,7 +87,7 @@ public class LRUCache<K,V> extends SolrCacheBase implements SolrCache<K,V>, Acco
|
|||
private Map<K,V> map;
|
||||
private String description="LRU Cache";
|
||||
private MetricsMap cacheMap;
|
||||
private Set<String> metricNames = new HashSet<>();
|
||||
private Set<String> metricNames = ConcurrentHashMap.newKeySet();
|
||||
private MetricRegistry registry;
|
||||
|
||||
private long maxRamBytes = Long.MAX_VALUE;
|
||||
|
|
|
@ -79,6 +79,8 @@ public class PointMerger {
|
|||
case DOUBLE:
|
||||
seg = new DoubleSeg(pv, capacity);
|
||||
break;
|
||||
case DATE:
|
||||
break;
|
||||
}
|
||||
int count = seg.setNextValue();
|
||||
if (count >= 0) {
|
||||
|
|
|
@ -150,12 +150,8 @@ public class TextLogisticRegressionQParserPlugin extends QParserPlugin {
|
|||
}
|
||||
|
||||
public void collect(int doc) throws IOException{
|
||||
int valuesDocID = leafOutcomeValue.docID();
|
||||
if (valuesDocID < doc) {
|
||||
valuesDocID = leafOutcomeValue.advance(doc);
|
||||
}
|
||||
int outcome;
|
||||
if (valuesDocID == doc) {
|
||||
if (leafOutcomeValue.advanceExact(doc)) {
|
||||
outcome = (int) leafOutcomeValue.longValue();
|
||||
} else {
|
||||
outcome = 0;
|
||||
|
|
|
@ -387,10 +387,7 @@ class FacetFieldProcessorByHashDV extends FacetFieldProcessor {
|
|||
|
||||
@Override
|
||||
public void collect(int segDoc) throws IOException {
|
||||
if (segDoc > values.docID()) {
|
||||
values.advance(segDoc);
|
||||
}
|
||||
if (segDoc == values.docID()) {
|
||||
if (values.advanceExact(segDoc)) {
|
||||
long l = values.nextValue(); // This document must have at least one value
|
||||
collectValFirstPhase(segDoc, l);
|
||||
for (int i = 1; i < values.docValueCount(); i++) {
|
||||
|
@ -418,10 +415,7 @@ class FacetFieldProcessorByHashDV extends FacetFieldProcessor {
|
|||
|
||||
@Override
|
||||
public void collect(int segDoc) throws IOException {
|
||||
if (segDoc > values.docID()) {
|
||||
values.advance(segDoc);
|
||||
}
|
||||
if (segDoc == values.docID()) {
|
||||
if (values.advanceExact(segDoc)) {
|
||||
collectValFirstPhase(segDoc, values.longValue());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -335,10 +335,7 @@ public class MinMaxAgg extends SimpleAggValueSource {
|
|||
|
||||
@Override
|
||||
public void collect(int doc, int slotNum) throws IOException {
|
||||
if (doc > subDv.docID()) {
|
||||
subDv.advance(doc);
|
||||
}
|
||||
if (doc == subDv.docID()) {
|
||||
if (subDv.advanceExact(doc)) {
|
||||
int segOrd = subDv.ordValue();
|
||||
int ord = toGlobal==null ? segOrd : (int)toGlobal.get(segOrd);
|
||||
if ((ord - slotOrd[slotNum]) * minmax < 0 || slotOrd[slotNum]==MISSING) {
|
||||
|
|
|
@ -71,10 +71,7 @@ class UniqueMultiDvSlotAcc extends UniqueSlotAcc {
|
|||
|
||||
@Override
|
||||
public void collect(int doc, int slotNum) throws IOException {
|
||||
if (doc > subDv.docID()) {
|
||||
subDv.advance(doc);
|
||||
}
|
||||
if (doc == subDv.docID()) {
|
||||
if (subDv.advanceExact(doc)) {
|
||||
|
||||
int segOrd = (int) subDv.nextOrd();
|
||||
assert segOrd >= 0;
|
||||
|
|
|
@ -388,6 +388,11 @@ public class SolrDispatchFilter extends BaseSolrFilter {
|
|||
case FORWARD:
|
||||
request.getRequestDispatcher(call.getPath()).forward(request, response);
|
||||
break;
|
||||
case ADMIN:
|
||||
case PROCESS:
|
||||
case REMOTEQUERY:
|
||||
case RETURN:
|
||||
break;
|
||||
}
|
||||
} finally {
|
||||
call.destroy();
|
||||
|
|
|
@ -309,6 +309,14 @@ public class UninvertingReader extends FilterLeafReader {
|
|||
case LEGACY_FLOAT: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.LEGACY_FLOAT_PARSER);
|
||||
case LEGACY_LONG: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.LEGACY_LONG_PARSER);
|
||||
case LEGACY_DOUBLE: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.LEGACY_DOUBLE_PARSER);
|
||||
case BINARY:
|
||||
case SORTED:
|
||||
case SORTED_SET_BINARY:
|
||||
case SORTED_SET_DOUBLE:
|
||||
case SORTED_SET_FLOAT:
|
||||
case SORTED_SET_INTEGER:
|
||||
case SORTED_SET_LONG:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
|
@ -359,6 +367,17 @@ public class UninvertingReader extends FilterLeafReader {
|
|||
return FieldCache.DEFAULT.getDocTermOrds(in, field, FieldCache.INT64_TERM_PREFIX);
|
||||
case SORTED_SET_BINARY:
|
||||
return FieldCache.DEFAULT.getDocTermOrds(in, field, null);
|
||||
case BINARY:
|
||||
case LEGACY_DOUBLE:
|
||||
case LEGACY_FLOAT:
|
||||
case LEGACY_INTEGER:
|
||||
case LEGACY_LONG:
|
||||
case DOUBLE_POINT:
|
||||
case FLOAT_POINT:
|
||||
case INTEGER_POINT:
|
||||
case LONG_POINT:
|
||||
case SORTED:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient; // jdoc
|
|||
import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.cloud.ZkCoreNodeProps;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
|
@ -279,7 +278,24 @@ public class SolrCmdDistributor implements Closeable {
|
|||
try (HttpSolrClient client = new HttpSolrClient.Builder(req.node.getUrl()).withHttpClient(clients.getHttpClient()).build()) {
|
||||
client.request(req.uReq);
|
||||
} catch (Exception e) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed synchronous update on shard " + req.node + " update: " + req.uReq , e);
|
||||
try {
|
||||
// if false, then the node is probably not "live" anymore
|
||||
// and we do not need to send a recovery message
|
||||
Throwable rootCause = SolrException.getRootCause(e);
|
||||
log.error("Setting up to try to start recovery on replica {}", req.node.getUrl(), rootCause);
|
||||
req.cmd.getReq().getCore().getCoreContainer().getZkController().ensureReplicaInLeaderInitiatedRecovery(
|
||||
req.cmd.getReq().getCore().getCoreContainer(),
|
||||
req.node.getCollection(),
|
||||
req.node.getShardId(),
|
||||
req.node.getNodeProps(),
|
||||
req.cmd.getReq().getCore().getCoreDescriptor(),
|
||||
false /* forcePublishState */
|
||||
);
|
||||
} catch (Exception exc) {
|
||||
Throwable setLirZnodeFailedCause = SolrException.getRootCause(exc);
|
||||
log.error("Leader failed to set replica " +
|
||||
req.node.getUrl() + " state to DOWN due to: " + setLirZnodeFailedCause, setLirZnodeFailedCause);
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
|
|
|
@ -289,6 +289,30 @@
|
|||
</analyzer>
|
||||
</fieldType>
|
||||
|
||||
|
||||
<!-- SortableTextField generaly functions exactly like TextField,
|
||||
except that it supports, and by default uses, docValues for sorting (or faceting)
|
||||
on the first 1024 characters of the original field values (which is configurable).
|
||||
|
||||
This makes it a bit more useful then TextField in many situations, but the trade-off
|
||||
is that it takes up more space on disk; which is why it's not used in place of TextField
|
||||
for every fieldType in this _default schema.
|
||||
-->
|
||||
<dynamicField name="*_txt_sort" type="text_gen_sort" indexed="true" stored="true"/>
|
||||
<fieldType name="text_gen_sort" class="solr.SortableTextField" positionIncrementGap="100" multiValued="true">
|
||||
<analyzer type="index">
|
||||
<tokenizer class="solr.StandardTokenizerFactory"/>
|
||||
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
|
||||
<filter class="solr.LowerCaseFilterFactory"/>
|
||||
</analyzer>
|
||||
<analyzer type="query">
|
||||
<tokenizer class="solr.StandardTokenizerFactory"/>
|
||||
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
|
||||
<filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
|
||||
<filter class="solr.LowerCaseFilterFactory"/>
|
||||
</analyzer>
|
||||
</fieldType>
|
||||
|
||||
<!-- A text field with defaults appropriate for English: it tokenizes with StandardTokenizer,
|
||||
removes English stop words (lang/stopwords_en.txt), down cases, protects words from protwords.txt, and
|
||||
finally applies Porter's stemming. The query time analyzer also applies synonyms from synonyms.txt. -->
|
||||
|
|
|
@ -1,41 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.security.SecureRandom;
|
||||
|
||||
public class SecureRandomAlgorithmTesterApp {
|
||||
public static void main(String[] args) throws NoSuchAlgorithmException {
|
||||
String algorithm = args[0];
|
||||
String method = args[1];
|
||||
int amount = Integer.valueOf(args[2]);
|
||||
SecureRandom secureRandom;
|
||||
if(algorithm.equals("default"))
|
||||
secureRandom = new SecureRandom();
|
||||
else
|
||||
secureRandom = SecureRandom.getInstance(algorithm);
|
||||
System.out.println("Algorithm:" + secureRandom.getAlgorithm());
|
||||
switch(method) {
|
||||
case "seed": secureRandom.generateSeed(amount); break;
|
||||
case "bytes": secureRandom.nextBytes(new byte[amount]); break;
|
||||
case "long": secureRandom.nextLong(); break;
|
||||
case "int": secureRandom.nextInt(); break;
|
||||
default: throw new IllegalArgumentException("Not supported random function: " + method);
|
||||
}
|
||||
System.out.println("SecureRandom function invoked");
|
||||
}
|
||||
}
|
|
@ -83,6 +83,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
|
|||
clusterStatusNoCollection();
|
||||
clusterStatusWithCollection();
|
||||
clusterStatusWithCollectionAndShard();
|
||||
clusterStatusWithCollectionAndMultipleShards();
|
||||
clusterStatusWithRouteKey();
|
||||
clusterStatusAliasTest();
|
||||
clusterStatusRolesTest();
|
||||
|
@ -122,6 +123,29 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
|
|||
}
|
||||
}
|
||||
|
||||
private void clusterStatusWithCollectionAndMultipleShards() throws IOException, SolrServerException {
|
||||
try (CloudSolrClient client = createCloudClient(null)) {
|
||||
final CollectionAdminRequest.ClusterStatus request = new CollectionAdminRequest.ClusterStatus();
|
||||
request.setCollectionName(COLLECTION_NAME);
|
||||
request.setShardName(SHARD1 + "," + SHARD2);
|
||||
|
||||
NamedList<Object> rsp = request.process(client).getResponse();
|
||||
NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
|
||||
assertNotNull("Cluster state should not be null", cluster);
|
||||
NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
|
||||
assertNotNull("Collections should not be null in cluster state", collections);
|
||||
assertNotNull(collections.get(COLLECTION_NAME));
|
||||
assertEquals(1, collections.size());
|
||||
Map<String, Object> collection = (Map<String, Object>) collections.get(COLLECTION_NAME);
|
||||
Map<String, Object> shardStatus = (Map<String, Object>) collection.get("shards");
|
||||
assertEquals(2, shardStatus.size());
|
||||
Map<String, Object> firstSelectedShardStatus = (Map<String, Object>) shardStatus.get(SHARD1);
|
||||
assertNotNull(firstSelectedShardStatus);
|
||||
Map<String, Object> secondSelectedShardStatus = (Map<String, Object>) shardStatus.get(SHARD2);
|
||||
assertNotNull(secondSelectedShardStatus);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void listCollection() throws IOException, SolrServerException {
|
||||
try (CloudSolrClient client = createCloudClient(null)) {
|
||||
|
|
|
@ -207,6 +207,7 @@ public class TestCollapseQParserPlugin extends SolrTestCaseJ4 {
|
|||
}
|
||||
|
||||
@Test
|
||||
@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/SOLR-11974")
|
||||
public void testStringCollapse() throws Exception {
|
||||
for (final String hint : new String[] {"", " hint="+CollapsingQParserPlugin.HINT_TOP_FC}) {
|
||||
testCollapseQueries("group_s", hint, false);
|
||||
|
|
|
@ -693,12 +693,8 @@ public class TestRankQueryPlugin extends QParserPlugin {
|
|||
public void setScorer(Scorer scorer) throws IOException {}
|
||||
|
||||
public void collect(int doc) throws IOException {
|
||||
int valuesDocID = values.docID();
|
||||
if (valuesDocID < doc) {
|
||||
valuesDocID = values.advance(doc);
|
||||
}
|
||||
long value;
|
||||
if (valuesDocID == doc) {
|
||||
if (values.advanceExact(doc)) {
|
||||
value = values.longValue();
|
||||
} else {
|
||||
value = 0;
|
||||
|
|
|
@ -289,6 +289,30 @@
|
|||
</analyzer>
|
||||
</fieldType>
|
||||
|
||||
|
||||
<!-- SortableTextField generaly functions exactly like TextField,
|
||||
except that it supports, and by default uses, docValues for sorting (or faceting)
|
||||
on the first 1024 characters of the original field values (which is configurable).
|
||||
|
||||
This makes it a bit more useful then TextField in many situations, but the trade-off
|
||||
is that it takes up more space on disk; which is why it's not used in place of TextField
|
||||
for every fieldType in this _default schema.
|
||||
-->
|
||||
<dynamicField name="*_txt_sort" type="text_gen_sort" indexed="true" stored="true"/>
|
||||
<fieldType name="text_gen_sort" class="solr.SortableTextField" positionIncrementGap="100" multiValued="true">
|
||||
<analyzer type="index">
|
||||
<tokenizer class="solr.StandardTokenizerFactory"/>
|
||||
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
|
||||
<filter class="solr.LowerCaseFilterFactory"/>
|
||||
</analyzer>
|
||||
<analyzer type="query">
|
||||
<tokenizer class="solr.StandardTokenizerFactory"/>
|
||||
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
|
||||
<filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
|
||||
<filter class="solr.LowerCaseFilterFactory"/>
|
||||
</analyzer>
|
||||
</fieldType>
|
||||
|
||||
<!-- A text field with defaults appropriate for English: it tokenizes with StandardTokenizer,
|
||||
removes English stop words (lang/stopwords_en.txt), down cases, protects words from protwords.txt, and
|
||||
finally applies Porter's stemming. The query time analyzer also applies synonyms from synonyms.txt. -->
|
||||
|
|
|
@ -134,7 +134,7 @@
|
|||
<field name="pre" type="preanalyzed" indexed="true" stored="true"/>
|
||||
<field name="sku" type="text_en_splitting_tight" indexed="true" stored="true" omitNorms="true"/>
|
||||
<field name="name" type="text_general" indexed="true" stored="true"/>
|
||||
<field name="manu" type="text_general" indexed="true" stored="true" omitNorms="true"/>
|
||||
<field name="manu" type="text_gen_sort" indexed="true" stored="true" omitNorms="true" multiValued="false"/>
|
||||
<field name="cat" type="string" indexed="true" stored="true" multiValued="true"/>
|
||||
<field name="features" type="text_general" indexed="true" stored="true" multiValued="true"/>
|
||||
<field name="includes" type="text_general" indexed="true" stored="true" termVectors="true" termPositions="true" termOffsets="true" />
|
||||
|
@ -154,11 +154,11 @@
|
|||
"content_type": From the HTTP headers of incoming stream
|
||||
"resourcename": From SolrCell request param resource.name
|
||||
-->
|
||||
<field name="title" type="text_general" indexed="true" stored="true" multiValued="true"/>
|
||||
<field name="subject" type="text_general" indexed="true" stored="true"/>
|
||||
<field name="title" type="text_gen_sort" indexed="true" stored="true" multiValued="true"/>
|
||||
<field name="subject" type="text_gen_sort" indexed="true" stored="true" multiValued="false"/>
|
||||
<field name="description" type="text_general" indexed="true" stored="true"/>
|
||||
<field name="comments" type="text_general" indexed="true" stored="true"/>
|
||||
<field name="author" type="text_general" indexed="true" stored="true"/>
|
||||
<field name="author" type="text_gen_sort" indexed="true" stored="true" multiValued="false"/>
|
||||
<field name="keywords" type="text_general" indexed="true" stored="true"/>
|
||||
<field name="category" type="text_general" indexed="true" stored="true"/>
|
||||
<field name="resourcename" type="text_general" indexed="true" stored="true"/>
|
||||
|
@ -424,6 +424,28 @@
|
|||
</analyzer>
|
||||
</fieldType>
|
||||
|
||||
<!-- SortableTextField generaly functions exactly like TextField,
|
||||
except that it supports, and by default uses, docValues for sorting (or faceting)
|
||||
on the first 1024 characters of the original field values (which is configurable).
|
||||
|
||||
This makes it a bit more useful then TextField in many situations, but the trade-off
|
||||
is that it takes up more space on disk; which is why it's not used in place of TextField
|
||||
for every fieldType in this _default schema.
|
||||
-->
|
||||
<fieldType name="text_gen_sort" class="solr.SortableTextField" positionIncrementGap="100" multiValued="true">
|
||||
<analyzer type="index">
|
||||
<tokenizer class="solr.StandardTokenizerFactory"/>
|
||||
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
|
||||
<filter class="solr.LowerCaseFilterFactory"/>
|
||||
</analyzer>
|
||||
<analyzer type="query">
|
||||
<tokenizer class="solr.StandardTokenizerFactory"/>
|
||||
<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
|
||||
<filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
|
||||
<filter class="solr.LowerCaseFilterFactory"/>
|
||||
</analyzer>
|
||||
</fieldType>
|
||||
|
||||
<!-- A text field with defaults appropriate for English: it
|
||||
tokenizes with StandardTokenizer, removes English stop words
|
||||
(lang/stopwords_en.txt), down cases, protects words from protwords.txt, and
|
||||
|
|
|
@ -252,6 +252,15 @@ public class XMLResponseParser extends ResponseParser
|
|||
case ARR: nl.add( name, readArray( parser ) ); depth--; continue;
|
||||
case RESULT: nl.add( name, readDocuments( parser ) ); depth--; continue;
|
||||
case DOC: nl.add( name, readDocument( parser ) ); depth--; continue;
|
||||
case BOOL:
|
||||
case DATE:
|
||||
case DOUBLE:
|
||||
case FLOAT:
|
||||
case INT:
|
||||
case LONG:
|
||||
case NULL:
|
||||
case STR:
|
||||
break;
|
||||
}
|
||||
throw new XMLStreamException( "branch element not handled!", parser.getLocation() );
|
||||
}
|
||||
|
@ -316,6 +325,15 @@ public class XMLResponseParser extends ResponseParser
|
|||
case ARR: vals.add( readArray( parser ) ); depth--; continue;
|
||||
case RESULT: vals.add( readDocuments( parser ) ); depth--; continue;
|
||||
case DOC: vals.add( readDocument( parser ) ); depth--; continue;
|
||||
case BOOL:
|
||||
case DATE:
|
||||
case DOUBLE:
|
||||
case FLOAT:
|
||||
case INT:
|
||||
case LONG:
|
||||
case NULL:
|
||||
case STR:
|
||||
break;
|
||||
}
|
||||
throw new XMLStreamException( "branch element not handled!", parser.getLocation() );
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
</ant>
|
||||
</target>
|
||||
|
||||
<target name="compile-core" depends="resolve, compile-solr-core, compile-test-framework">
|
||||
<target name="compile-core" depends="resolve, clover, compile-solr-core, compile-test-framework">
|
||||
<!-- TODO: why does test-framework override compile-core to use this special classpath? -->
|
||||
<compile srcdir="${src.dir}" destdir="${build.dir}/classes/java">
|
||||
<classpath refid="test.base.classpath"/>
|
||||
|
@ -49,9 +49,6 @@
|
|||
</copy>
|
||||
</target>
|
||||
|
||||
<!-- redefine the clover setup, because we dont want to run clover for the test-framework -->
|
||||
<target name="-clover.setup" if="run.clover"/>
|
||||
|
||||
<!-- redefine the forbidden apis for tests, as we check ourselves -->
|
||||
<target name="-check-forbidden-tests" depends="-init-forbidden-apis,compile-core">
|
||||
<forbidden-apis suppressAnnotation="**.SuppressForbidden" signaturesFile="${common.dir}/tools/forbiddenApis/tests.txt" classpathref="forbidden-apis.allclasses.classpath">
|
||||
|
|
Loading…
Reference in New Issue