Try to fix travis
This commit is contained in:
parent
54a63ba8d7
commit
e176d9a4b7
|
@ -6,7 +6,7 @@ jdk:
|
|||
- oraclejdk8
|
||||
env:
|
||||
global:
|
||||
- MAVEN_OPTS="-XX:MaxPermSize=512m -Xmx2g"
|
||||
- MAVEN_OPTS="-Xmx1500m"
|
||||
|
||||
cache:
|
||||
directories:
|
||||
|
|
|
@ -36,7 +36,6 @@ import javax.persistence.PersistenceContextType;
|
|||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.lang3.Validate;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.Token;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.highlight.Formatter;
|
||||
import org.apache.lucene.search.highlight.Highlighter;
|
||||
|
@ -53,11 +52,13 @@ import org.springframework.transaction.annotation.Transactional;
|
|||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import ca.uhn.fhir.jpa.entity.ResourceIndexedSearchParamString;
|
||||
import ca.uhn.fhir.jpa.entity.ResourceTable;
|
||||
import ca.uhn.fhir.model.api.IQueryParameterType;
|
||||
import ca.uhn.fhir.model.dstu.resource.BaseResource;
|
||||
import ca.uhn.fhir.model.primitive.IdDt;
|
||||
import ca.uhn.fhir.rest.param.StringParam;
|
||||
import ca.uhn.fhir.rest.param.TokenParam;
|
||||
import ca.uhn.fhir.rest.server.Constants;
|
||||
import ca.uhn.fhir.rest.server.exceptions.InternalErrorException;
|
||||
import ca.uhn.fhir.rest.server.exceptions.InvalidRequestException;
|
||||
|
@ -68,11 +69,11 @@ public class FhirSearchDao extends BaseHapiFhirDao<IBaseResource> implements ISe
|
|||
@PersistenceContext(type = PersistenceContextType.TRANSACTION)
|
||||
private EntityManager myEntityManager;
|
||||
|
||||
private void addTextSearch(QueryBuilder qb, BooleanJunction<?> bool, List<List<? extends IQueryParameterType>> contentAndTerms, String field) {
|
||||
if (contentAndTerms == null) {
|
||||
private void addTextSearch(QueryBuilder theQueryBuilder, BooleanJunction<?> theBoolean, List<List<? extends IQueryParameterType>> theTerms, String theFieldName) {
|
||||
if (theTerms == null) {
|
||||
return;
|
||||
}
|
||||
for (List<? extends IQueryParameterType> nextAnd : contentAndTerms) {
|
||||
for (List<? extends IQueryParameterType> nextAnd : theTerms) {
|
||||
Set<String> terms = new HashSet<String>();
|
||||
for (IQueryParameterType nextOr : nextAnd) {
|
||||
StringParam nextOrString = (StringParam) nextOr;
|
||||
|
@ -83,21 +84,46 @@ public class FhirSearchDao extends BaseHapiFhirDao<IBaseResource> implements ISe
|
|||
}
|
||||
if (terms.isEmpty() == false) {
|
||||
String joinedTerms = StringUtils.join(terms, ' ');
|
||||
bool.must(qb.keyword().onField(field).matching(joinedTerms).createQuery());
|
||||
theBoolean.must(theQueryBuilder.keyword().onField(theFieldName).matching(joinedTerms).createQuery());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private List<Long> doSearch(String theResourceName, SearchParameterMap theParams, Long theReferencingPid) {
|
||||
FullTextEntityManager em = org.hibernate.search.jpa.Search.getFullTextEntityManager(myEntityManager);
|
||||
|
||||
/*
|
||||
* Handle textual params
|
||||
*/
|
||||
for (String nextParamName : theParams.keySet()) {
|
||||
for (List<? extends IQueryParameterType> nextAndList : theParams.get(nextParamName)) {
|
||||
for (Iterator<? extends IQueryParameterType> orIterator = nextAndList.iterator(); orIterator.hasNext(); ) {
|
||||
IQueryParameterType nextParam = orIterator.next();
|
||||
if (nextParam instanceof TokenParam) {
|
||||
TokenParam nextTokenParam = (TokenParam)nextParam;
|
||||
if (nextTokenParam.isText()) {
|
||||
QueryBuilder qb = em.getSearchFactory().buildQueryBuilder().forEntity(ResourceIndexedSearchParamString.class).get();
|
||||
BooleanJunction<?> bool = qb.bool();
|
||||
|
||||
bool.must(qb.keyword().onField("myParamsString").matching(nextTokenParam.getValue()).createQuery());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
QueryBuilder qb = em.getSearchFactory().buildQueryBuilder().forEntity(ResourceTable.class).get();
|
||||
|
||||
BooleanJunction<?> bool = qb.bool();
|
||||
|
||||
/*
|
||||
* Handle _content parameter (resource body content)
|
||||
*/
|
||||
List<List<? extends IQueryParameterType>> contentAndTerms = theParams.remove(Constants.PARAM_CONTENT);
|
||||
addTextSearch(qb, bool, contentAndTerms, "myContentText");
|
||||
|
||||
/*
|
||||
* Handle _text parameter (resource narrative content)
|
||||
*/
|
||||
List<List<? extends IQueryParameterType>> textAndTerms = theParams.remove(Constants.PARAM_TEXT);
|
||||
addTextSearch(qb, bool, textAndTerms, "myNarrativeText");
|
||||
|
||||
|
|
|
@ -32,15 +32,79 @@ import org.apache.commons.lang3.builder.EqualsBuilder;
|
|||
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
||||
import org.apache.commons.lang3.builder.ToStringBuilder;
|
||||
import org.apache.commons.lang3.builder.ToStringStyle;
|
||||
import org.apache.lucene.analysis.core.LowerCaseFilterFactory;
|
||||
import org.apache.lucene.analysis.core.StopFilterFactory;
|
||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilterFactory;
|
||||
import org.apache.lucene.analysis.ngram.EdgeNGramFilterFactory;
|
||||
import org.apache.lucene.analysis.ngram.NGramFilterFactory;
|
||||
import org.apache.lucene.analysis.pattern.PatternTokenizerFactory;
|
||||
import org.apache.lucene.analysis.phonetic.PhoneticFilterFactory;
|
||||
import org.apache.lucene.analysis.snowball.SnowballPorterFilterFactory;
|
||||
import org.apache.lucene.analysis.standard.StandardFilterFactory;
|
||||
import org.apache.lucene.analysis.standard.StandardTokenizerFactory;
|
||||
import org.hibernate.search.annotations.Analyze;
|
||||
import org.hibernate.search.annotations.Analyzer;
|
||||
import org.hibernate.search.annotations.AnalyzerDef;
|
||||
import org.hibernate.search.annotations.AnalyzerDefs;
|
||||
import org.hibernate.search.annotations.ContainedIn;
|
||||
import org.hibernate.search.annotations.Field;
|
||||
import org.hibernate.search.annotations.Fields;
|
||||
import org.hibernate.search.annotations.Parameter;
|
||||
import org.hibernate.search.annotations.Store;
|
||||
import org.hibernate.search.annotations.TokenFilterDef;
|
||||
import org.hibernate.search.annotations.TokenizerDef;
|
||||
|
||||
//@formatter:off
|
||||
@Embeddable
|
||||
@Entity
|
||||
@Table(name = "HFJ_SPIDX_STRING"/* , indexes= {@Index(name="IDX_SP_STRING", columnList="SP_VALUE_NORMALIZED")} */)
|
||||
@org.hibernate.annotations.Table(appliesTo = "HFJ_SPIDX_STRING", indexes = {
|
||||
@org.hibernate.annotations.Index(name = "IDX_SP_STRING", columnNames = { "RES_TYPE", "SP_NAME", "SP_VALUE_NORMALIZED" })
|
||||
})
|
||||
@AnalyzerDefs({
|
||||
@AnalyzerDef(name = "autocompleteEdgeAnalyzer",
|
||||
tokenizer = @TokenizerDef(factory = PatternTokenizerFactory.class, params= {
|
||||
@Parameter(name="pattern", value="(.*)"),
|
||||
@Parameter(name="group", value="1")
|
||||
}),
|
||||
filters = {
|
||||
@TokenFilterDef(factory = LowerCaseFilterFactory.class),
|
||||
@TokenFilterDef(factory = StopFilterFactory.class),
|
||||
@TokenFilterDef(factory = EdgeNGramFilterFactory.class, params = {
|
||||
@Parameter(name = "minGramSize", value = "3"),
|
||||
@Parameter(name = "maxGramSize", value = "50")
|
||||
}),
|
||||
}),
|
||||
@AnalyzerDef(name = "autocompletePhoneticAnalyzer",
|
||||
tokenizer = @TokenizerDef(factory=StandardTokenizerFactory.class),
|
||||
filters = {
|
||||
@TokenFilterDef(factory=StandardFilterFactory.class),
|
||||
@TokenFilterDef(factory=StopFilterFactory.class),
|
||||
@TokenFilterDef(factory=PhoneticFilterFactory.class, params = {
|
||||
@Parameter(name="encoder", value="DoubleMetaphone")
|
||||
}),
|
||||
@TokenFilterDef(factory=SnowballPorterFilterFactory.class, params = {
|
||||
@Parameter(name="language", value="English")
|
||||
})
|
||||
}),
|
||||
@AnalyzerDef(name = "autocompleteNGramAnalyzer",
|
||||
tokenizer = @TokenizerDef(factory = StandardTokenizerFactory.class),
|
||||
filters = {
|
||||
@TokenFilterDef(factory = WordDelimiterFilterFactory.class),
|
||||
@TokenFilterDef(factory = LowerCaseFilterFactory.class),
|
||||
@TokenFilterDef(factory = NGramFilterFactory.class, params = {
|
||||
@Parameter(name = "minGramSize", value = "3"),
|
||||
@Parameter(name = "maxGramSize", value = "20")
|
||||
}),
|
||||
}),
|
||||
@AnalyzerDef(name = "standardAnalyzer",
|
||||
tokenizer = @TokenizerDef(factory = StandardTokenizerFactory.class),
|
||||
filters = {
|
||||
@TokenFilterDef(factory = LowerCaseFilterFactory.class),
|
||||
}) // Def
|
||||
}
|
||||
)
|
||||
//@formatter:on
|
||||
public class ResourceIndexedSearchParamString extends BaseResourceIndexedSearchParam {
|
||||
|
||||
/*
|
||||
|
@ -51,6 +115,12 @@ public class ResourceIndexedSearchParamString extends BaseResourceIndexedSearchP
|
|||
private static final long serialVersionUID = 1L;
|
||||
|
||||
@Column(name = "SP_VALUE_EXACT", length = MAX_LENGTH, nullable = true)
|
||||
@Fields({
|
||||
@Field(name = "myValueText", index = org.hibernate.search.annotations.Index.YES, store = Store.YES, analyze = Analyze.YES, analyzer = @Analyzer(definition = "standardAnalyzer")),
|
||||
@Field(name = "myValueTextEdgeNGram", index = org.hibernate.search.annotations.Index.YES, store = Store.NO, analyze = Analyze.YES, analyzer = @Analyzer(definition = "autocompleteEdgeAnalyzer")),
|
||||
@Field(name = "myValueTextNGram", index = org.hibernate.search.annotations.Index.YES, store = Store.NO, analyze = Analyze.YES, analyzer = @Analyzer(definition = "autocompleteNGramAnalyzer")),
|
||||
@Field(name = "myValueTextPhonetic", index = org.hibernate.search.annotations.Index.YES, store = Store.NO, analyze = Analyze.YES, analyzer = @Analyzer(definition = "autocompletePhoneticAnalyzer"))
|
||||
})
|
||||
private String myValueExact;
|
||||
|
||||
@Column(name = "SP_VALUE_NORMALIZED", length = MAX_LENGTH, nullable = true)
|
||||
|
|
|
@ -48,7 +48,6 @@ import org.apache.lucene.analysis.ngram.EdgeNGramFilterFactory;
|
|||
import org.apache.lucene.analysis.ngram.NGramFilterFactory;
|
||||
import org.apache.lucene.analysis.pattern.PatternTokenizerFactory;
|
||||
import org.apache.lucene.analysis.phonetic.PhoneticFilterFactory;
|
||||
import org.apache.lucene.analysis.shingle.ShingleFilterFactory;
|
||||
import org.apache.lucene.analysis.snowball.SnowballPorterFilterFactory;
|
||||
import org.apache.lucene.analysis.standard.StandardFilterFactory;
|
||||
import org.apache.lucene.analysis.standard.StandardTokenizerFactory;
|
||||
|
@ -86,11 +85,6 @@ import ca.uhn.fhir.rest.server.exceptions.UnprocessableEntityException;
|
|||
@Parameter(name="group", value="1")
|
||||
}),
|
||||
filters = {
|
||||
// @TokenFilterDef(factory = PatternReplaceFilterFactory.class, params = {
|
||||
// @Parameter(name = "pattern",value = "([^a-zA-Z0-9\\.])"),
|
||||
// @Parameter(name = "replacement", value = " "),
|
||||
// @Parameter(name = "replace", value = "all")
|
||||
// }),
|
||||
@TokenFilterDef(factory = LowerCaseFilterFactory.class),
|
||||
@TokenFilterDef(factory = StopFilterFactory.class),
|
||||
@TokenFilterDef(factory = EdgeNGramFilterFactory.class, params = {
|
||||
|
@ -104,7 +98,7 @@ import ca.uhn.fhir.rest.server.exceptions.UnprocessableEntityException;
|
|||
@TokenFilterDef(factory=StandardFilterFactory.class),
|
||||
@TokenFilterDef(factory=StopFilterFactory.class),
|
||||
@TokenFilterDef(factory=PhoneticFilterFactory.class, params = {
|
||||
@Parameter(name="encoder", value="DoubleMetaphone")
|
||||
@Parameter(name="encoder", value="DoubleMetaphone")
|
||||
}),
|
||||
@TokenFilterDef(factory=SnowballPorterFilterFactory.class, params = {
|
||||
@Parameter(name="language", value="English")
|
||||
|
@ -119,22 +113,11 @@ import ca.uhn.fhir.rest.server.exceptions.UnprocessableEntityException;
|
|||
@Parameter(name = "minGramSize", value = "3"),
|
||||
@Parameter(name = "maxGramSize", value = "20")
|
||||
}),
|
||||
// @TokenFilterDef(factory = PatternReplaceFilterFactory.class, params = {
|
||||
// @Parameter(name = "pattern",value = "([^a-zA-Z0-9\\.])"),
|
||||
// @Parameter(name = "replacement", value = " "),
|
||||
// @Parameter(name = "replace", value = "all")
|
||||
// })
|
||||
}),
|
||||
@AnalyzerDef(name = "standardAnalyzer",
|
||||
tokenizer = @TokenizerDef(factory = StandardTokenizerFactory.class),
|
||||
filters = {
|
||||
// @TokenFilterDef(factory = WordDelimiterFilterFactory.class),
|
||||
@TokenFilterDef(factory = LowerCaseFilterFactory.class),
|
||||
// @TokenFilterDef(factory = PatternReplaceFilterFactory.class, params = {
|
||||
// @Parameter(name = "pattern", value = "([^a-zA-Z0-9\\.])"),
|
||||
// @Parameter(name = "replacement", value = " "),
|
||||
// @Parameter(name = "replace", value = "all")
|
||||
// })
|
||||
}) // Def
|
||||
}
|
||||
)
|
||||
|
|
Loading…
Reference in New Issue