mirror of
https://github.com/apache/archiva.git
synced 2025-02-06 18:19:02 +00:00
Search now has good results. We removed the content indexing and actually included fields to search. Query now uses AND instead of the default OR for the QueryParser. Very googly.
git-svn-id: https://svn.apache.org/repos/asf/archiva/branches/archiva-search-improvements@723612 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
2d3c706e11
commit
d53eab3b32
@ -159,10 +159,8 @@ public void processFile( String path )
|
||||
FileContentRecord record = new FileContentRecord();
|
||||
try
|
||||
{
|
||||
File file = new File( repositoryDir, path );
|
||||
record.setRepositoryId( this.repository.getId() );
|
||||
record.setFilename( path );
|
||||
record.setContents( FileUtils.readFileToString( file, null ) );
|
||||
|
||||
// Test for possible artifact reference syntax.
|
||||
try
|
||||
@ -179,10 +177,6 @@ public void processFile( String path )
|
||||
|
||||
index.modifyRecord( record );
|
||||
}
|
||||
catch ( IOException e )
|
||||
{
|
||||
triggerConsumerError( READ_CONTENT, "Unable to read file contents: " + e.getMessage() );
|
||||
}
|
||||
catch ( RepositoryIndexException e )
|
||||
{
|
||||
triggerConsumerError( INDEX_ERROR, "Unable to index file contents: " + e.getMessage() );
|
||||
|
@ -23,6 +23,8 @@
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.maven.archiva.indexer.lucene.analyzers.FilenamesTokenizer;
|
||||
import org.apache.maven.archiva.indexer.lucene.analyzers.ArtifactIdTokenizer;
|
||||
import org.apache.maven.archiva.indexer.lucene.analyzers.GroupIdTokenizer;
|
||||
|
||||
import java.io.Reader;
|
||||
|
||||
@ -42,6 +44,16 @@ public TokenStream tokenStream( String field, Reader reader )
|
||||
return new FilenamesTokenizer( reader );
|
||||
}
|
||||
|
||||
if ( FileContentKeys.ARTIFACTID.equals( field ))
|
||||
{
|
||||
return new ArtifactIdTokenizer(reader);
|
||||
}
|
||||
|
||||
if ( FileContentKeys.GROUPID.equals( field ) )
|
||||
{
|
||||
return new GroupIdTokenizer(reader);
|
||||
}
|
||||
|
||||
return STANDARD.tokenStream( field, reader );
|
||||
}
|
||||
}
|
||||
|
@ -37,7 +37,6 @@
|
||||
public class FileContentConverter
|
||||
implements LuceneEntryConverter
|
||||
{
|
||||
|
||||
public Document convert( LuceneRepositoryContentRecord record )
|
||||
{
|
||||
if ( !( record instanceof FileContentRecord ) )
|
||||
@ -62,9 +61,8 @@ public Document convert( LuceneRepositoryContentRecord record )
|
||||
doc.addFieldTokenized( ArtifactKeys.TYPE, filecontent.getArtifact().getType() );
|
||||
doc.addFieldUntokenized( ArtifactKeys.CLASSIFIER, filecontent.getArtifact().getClassifier() );
|
||||
}
|
||||
|
||||
|
||||
doc.addFieldTokenized( FileContentKeys.FILENAME, filecontent.getFilename() );
|
||||
doc.addFieldTokenized( FileContentKeys.CONTENT, filecontent.getContents() );
|
||||
|
||||
return doc.getDocument();
|
||||
}
|
||||
@ -91,7 +89,6 @@ public LuceneRepositoryContentRecord convert( Document document )
|
||||
|
||||
// Filecontent Specifics
|
||||
record.setFilename( document.get( FileContentKeys.FILENAME ) );
|
||||
record.setContents( document.get( FileContentKeys.CONTENT ) );
|
||||
|
||||
return record;
|
||||
}
|
||||
|
@ -43,8 +43,17 @@ public FileContentHandlers()
|
||||
{
|
||||
analyzer = new FileContentAnalyzer();
|
||||
converter = new FileContentConverter();
|
||||
queryParser = new MultiFieldQueryParser( new String[] { FileContentKeys.FILENAME, FileContentKeys.CONTENT },
|
||||
analyzer );
|
||||
queryParser = new MultiFieldQueryParser( new String[] {
|
||||
FileContentKeys.FILENAME,
|
||||
FileContentKeys.ARTIFACTID,
|
||||
FileContentKeys.GROUPID,
|
||||
FileContentKeys.ARTIFACTID_EXACT,
|
||||
FileContentKeys.GROUPID_EXACT,
|
||||
FileContentKeys.VERSION,
|
||||
FileContentKeys.VERSION_EXACT},
|
||||
analyzer );
|
||||
//We prefer the narrowing approach to search results.
|
||||
queryParser.setDefaultOperator(MultiFieldQueryParser.Operator.AND);
|
||||
}
|
||||
|
||||
public String getId()
|
||||
|
@ -32,6 +32,4 @@ public class FileContentKeys
|
||||
public static final String ID = "filecontent";
|
||||
|
||||
public static final String FILENAME = "filename";
|
||||
|
||||
public static final String CONTENT = "content";
|
||||
}
|
||||
|
@ -39,8 +39,6 @@ public class FileContentRecord
|
||||
*/
|
||||
private ArchivaArtifact artifact;
|
||||
|
||||
private String contents;
|
||||
|
||||
public String getRepositoryId()
|
||||
{
|
||||
return repositoryId;
|
||||
@ -51,16 +49,6 @@ public void setRepositoryId( String repositoryId )
|
||||
this.repositoryId = repositoryId;
|
||||
}
|
||||
|
||||
public String getContents()
|
||||
{
|
||||
return contents;
|
||||
}
|
||||
|
||||
public void setContents( String contents )
|
||||
{
|
||||
this.contents = contents;
|
||||
}
|
||||
|
||||
public String getPrimaryKey()
|
||||
{
|
||||
return repositoryId + ":" + filename;
|
||||
|
@ -0,0 +1,45 @@
|
||||
package org.apache.maven.archiva.indexer.lucene.analyzers;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import java.io.Reader;
|
||||
import org.apache.lucene.analysis.CharTokenizer;
|
||||
|
||||
/**
|
||||
* Lucene Tokenizer for {@link ArtifactKeys#ARTIFACTID} fields.
|
||||
*/
|
||||
public class ArtifactIdTokenizer extends CharTokenizer
|
||||
{
|
||||
public ArtifactIdTokenizer( Reader reader )
|
||||
{
|
||||
super( reader );
|
||||
}
|
||||
|
||||
/**
|
||||
* Break on "-" for "atlassian-plugins-core"
|
||||
* @param c
|
||||
* @return
|
||||
*/
|
||||
@Override
|
||||
protected boolean isTokenChar(char c)
|
||||
{
|
||||
return (c != '-');
|
||||
}
|
||||
}
|
@ -152,7 +152,6 @@ public void testQuickSearchArtifactBytecodeSearch()
|
||||
FileContentRecord record = new FileContentRecord();
|
||||
record.setRepositoryId( "repo1.mirror" );
|
||||
record.setArtifact( artifact );
|
||||
record.setContents( "org.apache.archiva:archiva-test:1.0:jar org.apache.archiva.test.MyClassName" );
|
||||
record.setFilename( "archiva-test-1.0.jar" );
|
||||
|
||||
results.addHit( record );
|
||||
@ -198,7 +197,6 @@ public void testQuickSearchArtifactRegularSearch()
|
||||
FileContentRecord record = new FileContentRecord();
|
||||
record.setRepositoryId( "repo1.mirror" );
|
||||
record.setArtifact( artifact );
|
||||
record.setContents( "org.apache.archiva:archiva-test:1.0:jar" );
|
||||
record.setFilename( "archiva-test-1.0.jar" );
|
||||
|
||||
results.addHit( record );
|
||||
|
Loading…
x
Reference in New Issue
Block a user