Merge branch 'master' into 2975-attribution
This commit is contained in:
commit
86ab77446e
|
@ -282,7 +282,7 @@ public abstract class BaseApp {
|
|||
}
|
||||
|
||||
private Optional<BaseCommand> parseCommand(String[] theArgs) {
|
||||
Optional<BaseCommand> commandOpt = getNextCommand(theArgs);
|
||||
Optional<BaseCommand> commandOpt = getNextCommand(theArgs, 0);
|
||||
|
||||
if (! commandOpt.isPresent()) {
|
||||
String message = "Unrecognized command: " + ansi().bold().fg(Ansi.Color.RED) + theArgs[0] + ansi().boldOff().fg(Ansi.Color.WHITE);
|
||||
|
@ -294,8 +294,8 @@ public abstract class BaseApp {
|
|||
return commandOpt;
|
||||
}
|
||||
|
||||
private Optional<BaseCommand> getNextCommand(String[] theArgs) {
|
||||
return ourCommands.stream().filter(cmd -> cmd.getCommandName().equals(theArgs[0])).findFirst();
|
||||
private Optional<BaseCommand> getNextCommand(String[] theArgs, int thePosition) {
|
||||
return ourCommands.stream().filter(cmd -> cmd.getCommandName().equals(theArgs[thePosition])).findFirst();
|
||||
}
|
||||
|
||||
private void processHelp(String[] theArgs) {
|
||||
|
@ -303,7 +303,7 @@ public abstract class BaseApp {
|
|||
logUsage();
|
||||
return;
|
||||
}
|
||||
Optional<BaseCommand> commandOpt = getNextCommand(theArgs);
|
||||
Optional<BaseCommand> commandOpt = getNextCommand(theArgs, 1);
|
||||
if (! commandOpt.isPresent()) {
|
||||
String message = "Unknown command: " + theArgs[1];
|
||||
System.err.println(message);
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
package ca.uhn.fhir.cli;
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.PrintStream;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
|
||||
public class BaseAppTest {
|
||||
|
||||
private final PrintStream standardOut = System.out;
|
||||
private final ByteArrayOutputStream outputStreamCaptor = new ByteArrayOutputStream();
|
||||
|
||||
@BeforeEach
|
||||
public void setUp() {
|
||||
System.setOut(new PrintStream(outputStreamCaptor));
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
public void tearDown() {
|
||||
System.setOut(standardOut);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testHelpOption() {
|
||||
App.main(new String[]{"help", "create-package"});
|
||||
assertThat(outputStreamCaptor.toString().trim(), outputStreamCaptor.toString().trim(), containsString("Usage"));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
type: fix
|
||||
issue: 2973
|
||||
title: "CLI `smileutil help {command}` returns `Unknown command` which should return the usage of `command`. This has been corrected."
|
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
type: fix
|
||||
title: "Fixed a bug where two identical tags in parallel entries being created in a batch would fail."
|
|
@ -31,12 +31,11 @@ In addition, the Elasticsearch client service, `ElasticsearchSvcImpl` will need
|
|||
```java
|
||||
@Bean()
|
||||
public ElasticsearchSvcImpl elasticsearchSvc() {
|
||||
String elasticsearchHost = "localhost";
|
||||
String elasticsearchUserId = "elastic";
|
||||
String elasticsearchHost = "localhost:9200";
|
||||
String elasticsearchUsername = "elastic";
|
||||
String elasticsearchPassword = "changeme";
|
||||
int elasticsearchPort = 9301;
|
||||
|
||||
return new ElasticsearchSvcImpl(elasticsearchHost, elasticsearchPort, elasticsearchUserId, elasticsearchPassword);
|
||||
return new ElasticsearchSvcImpl(elasticsearchHost, elasticsearchUsername, elasticsearchPassword);
|
||||
}
|
||||
```
|
||||
|
||||
|
|
|
@ -366,8 +366,8 @@ public abstract class BaseTransactionProcessor {
|
|||
IBase nextRequestEntry = null;
|
||||
for (int i=0; i<requestEntriesSize; i++ ) {
|
||||
nextRequestEntry = requestEntries.get(i);
|
||||
BundleTask bundleTask = new BundleTask(completionLatch, theRequestDetails, responseMap, i, nextRequestEntry, theNestedMode);
|
||||
getTaskExecutor().execute(bundleTask);
|
||||
RetriableBundleTask retriableBundleTask = new RetriableBundleTask(completionLatch, theRequestDetails, responseMap, i, nextRequestEntry, theNestedMode);
|
||||
getTaskExecutor().execute(retriableBundleTask);
|
||||
}
|
||||
|
||||
// waiting for all tasks to be completed
|
||||
|
@ -1683,7 +1683,7 @@ public abstract class BaseTransactionProcessor {
|
|||
return theStatusCode + " " + defaultString(Constants.HTTP_STATUS_NAMES.get(theStatusCode));
|
||||
}
|
||||
|
||||
public class BundleTask implements Runnable {
|
||||
public class RetriableBundleTask implements Runnable {
|
||||
|
||||
private final CountDownLatch myCompletedLatch;
|
||||
private final RequestDetails myRequestDetails;
|
||||
|
@ -1691,51 +1691,73 @@ public abstract class BaseTransactionProcessor {
|
|||
private final Map<Integer, Object> myResponseMap;
|
||||
private final int myResponseOrder;
|
||||
private final boolean myNestedMode;
|
||||
private BaseServerResponseException myLastSeenException;
|
||||
|
||||
protected BundleTask(CountDownLatch theCompletedLatch, RequestDetails theRequestDetails, Map<Integer, Object> theResponseMap, int theResponseOrder, IBase theNextReqEntry, boolean theNestedMode) {
|
||||
protected RetriableBundleTask(CountDownLatch theCompletedLatch, RequestDetails theRequestDetails, Map<Integer, Object> theResponseMap, int theResponseOrder, IBase theNextReqEntry, boolean theNestedMode) {
|
||||
this.myCompletedLatch = theCompletedLatch;
|
||||
this.myRequestDetails = theRequestDetails;
|
||||
this.myNextReqEntry = theNextReqEntry;
|
||||
this.myResponseMap = theResponseMap;
|
||||
this.myResponseOrder = theResponseOrder;
|
||||
this.myNestedMode = theNestedMode;
|
||||
this.myLastSeenException = null;
|
||||
}
|
||||
|
||||
|
||||
private void processBatchEntry() {
|
||||
IBaseBundle subRequestBundle = myVersionAdapter.createBundle(org.hl7.fhir.r4.model.Bundle.BundleType.TRANSACTION.toCode());
|
||||
myVersionAdapter.addEntry(subRequestBundle, myNextReqEntry);
|
||||
|
||||
IBaseBundle nextResponseBundle = processTransactionAsSubRequest(myRequestDetails, subRequestBundle, "Batch sub-request", myNestedMode);
|
||||
|
||||
IBase subResponseEntry = (IBase) myVersionAdapter.getEntries(nextResponseBundle).get(0);
|
||||
myResponseMap.put(myResponseOrder, subResponseEntry);
|
||||
|
||||
/*
|
||||
* If the individual entry didn't have a resource in its response, bring the sub-transaction's OperationOutcome across so the client can see it
|
||||
*/
|
||||
if (myVersionAdapter.getResource(subResponseEntry) == null) {
|
||||
IBase nextResponseBundleFirstEntry = (IBase) myVersionAdapter.getEntries(nextResponseBundle).get(0);
|
||||
myResponseMap.put(myResponseOrder, nextResponseBundleFirstEntry);
|
||||
}
|
||||
}
|
||||
private boolean processBatchEntryWithRetry() {
|
||||
int maxAttempts =3;
|
||||
for (int attempt = 1;; attempt++) {
|
||||
try {
|
||||
processBatchEntry();
|
||||
return true;
|
||||
} catch (BaseServerResponseException e) {
|
||||
//If we catch a known and structured exception from HAPI, just fail.
|
||||
myLastSeenException = e;
|
||||
return false;
|
||||
} catch (Throwable t) {
|
||||
myLastSeenException = new InternalErrorException(t);
|
||||
//If we have caught a non-tag-storage failure we are unfamiliar with, or we have exceeded max attempts, exit.
|
||||
if (!DaoFailureUtil.isTagStorageFailure(t) || attempt >= maxAttempts) {
|
||||
ourLog.error("Failure during BATCH sub transaction processing", t);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
BaseServerResponseExceptionHolder caughtEx = new BaseServerResponseExceptionHolder();
|
||||
try {
|
||||
IBaseBundle subRequestBundle = myVersionAdapter.createBundle(org.hl7.fhir.r4.model.Bundle.BundleType.TRANSACTION.toCode());
|
||||
myVersionAdapter.addEntry(subRequestBundle, myNextReqEntry);
|
||||
|
||||
IBaseBundle nextResponseBundle = processTransactionAsSubRequest(myRequestDetails, subRequestBundle, "Batch sub-request", myNestedMode);
|
||||
|
||||
IBase subResponseEntry = (IBase) myVersionAdapter.getEntries(nextResponseBundle).get(0);
|
||||
myResponseMap.put(myResponseOrder, subResponseEntry);
|
||||
|
||||
/*
|
||||
* If the individual entry didn't have a resource in its response, bring the sub-transaction's OperationOutcome across so the client can see it
|
||||
*/
|
||||
if (myVersionAdapter.getResource(subResponseEntry) == null) {
|
||||
IBase nextResponseBundleFirstEntry = (IBase) myVersionAdapter.getEntries(nextResponseBundle).get(0);
|
||||
myResponseMap.put(myResponseOrder, nextResponseBundleFirstEntry);
|
||||
}
|
||||
|
||||
} catch (BaseServerResponseException e) {
|
||||
caughtEx.setException(e);
|
||||
} catch (Throwable t) {
|
||||
ourLog.error("Failure during BATCH sub transaction processing", t);
|
||||
caughtEx.setException(new InternalErrorException(t));
|
||||
boolean success = processBatchEntryWithRetry();
|
||||
if (!success) {
|
||||
populateResponseMapWithLastSeenException();
|
||||
}
|
||||
|
||||
if (caughtEx.getException() != null) {
|
||||
// add exception to the response map
|
||||
myResponseMap.put(myResponseOrder, caughtEx);
|
||||
}
|
||||
|
||||
// checking for the parallelism
|
||||
ourLog.debug("processing bacth for {} is completed", myVersionAdapter.getEntryRequestUrl(myNextReqEntry));
|
||||
ourLog.debug("processing batch for {} is completed", myVersionAdapter.getEntryRequestUrl(myNextReqEntry));
|
||||
myCompletedLatch.countDown();
|
||||
}
|
||||
|
||||
private void populateResponseMapWithLastSeenException() {
|
||||
BaseServerResponseExceptionHolder caughtEx = new BaseServerResponseExceptionHolder();
|
||||
caughtEx.setException(myLastSeenException);
|
||||
myResponseMap.put(myResponseOrder, caughtEx);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
package ca.uhn.fhir.jpa.dao;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
/**
|
||||
* Utility class to help identify classes of failure.
|
||||
*/
|
||||
public class DaoFailureUtil {
|
||||
|
||||
public static boolean isTagStorageFailure(Throwable t) {
|
||||
if (StringUtils.isBlank(t.getMessage())) {
|
||||
return false;
|
||||
} else {
|
||||
String msg = t.getMessage().toLowerCase();
|
||||
return msg.contains("hfj_tag_def") || msg.contains("hfj_res_tag");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -25,6 +25,7 @@ import ca.uhn.fhir.interceptor.api.IInterceptorBroadcaster;
|
|||
import ca.uhn.fhir.interceptor.api.Pointcut;
|
||||
import ca.uhn.fhir.jpa.api.model.ResourceVersionConflictResolutionStrategy;
|
||||
import ca.uhn.fhir.jpa.dao.BaseHapiFhirDao;
|
||||
import ca.uhn.fhir.jpa.dao.DaoFailureUtil;
|
||||
import ca.uhn.fhir.rest.api.server.RequestDetails;
|
||||
import ca.uhn.fhir.rest.api.server.storage.TransactionDetails;
|
||||
import ca.uhn.fhir.rest.server.exceptions.InternalErrorException;
|
||||
|
@ -93,10 +94,9 @@ public class HapiTransactionService {
|
|||
* known to the system already, they'll both try to create a row in HFJ_TAG_DEF,
|
||||
* which is the tag definition table. In that case, a constraint error will be
|
||||
* thrown by one of the client threads, so we auto-retry in order to avoid
|
||||
* annopying spurious failures for the client.
|
||||
* annoying spurious failures for the client.
|
||||
*/
|
||||
if (e.getMessage().contains("HFJ_TAG_DEF") || e.getMessage().contains("hfj_tag_def") ||
|
||||
e.getMessage().contains("HFJ_RES_TAG") || e.getMessage().contains("hfj_res_tag")) {
|
||||
if (DaoFailureUtil.isTagStorageFailure(e)) {
|
||||
maxRetries = 3;
|
||||
}
|
||||
|
||||
|
|
|
@ -1087,7 +1087,7 @@ public class SearchCoordinatorSvcImpl implements ISearchCoordinatorSvc {
|
|||
ourLog.trace("Performing count");
|
||||
ISearchBuilder sb = newSearchBuilder();
|
||||
Iterator<Long> countIterator = sb.createCountQuery(myParams, mySearch.getUuid(), myRequest, myRequestPartitionId);
|
||||
Long count = countIterator.hasNext() ? countIterator.next() : 0;
|
||||
Long count = countIterator.hasNext() ? countIterator.next() : 0L;
|
||||
ourLog.trace("Got count {}", count);
|
||||
|
||||
TransactionTemplate txTemplate = new TransactionTemplate(myManagedTxManager);
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.hibernate.search.backend.elasticsearch.cfg.ElasticsearchIndexSettings
|
|||
import org.hibernate.search.mapper.orm.schema.management.SchemaManagementStrategyName;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Properties;
|
||||
|
@ -49,13 +50,13 @@ import static org.slf4j.LoggerFactory.getLogger;
|
|||
* FHIR JPA server. This class also injects a starter template into the ES cluster.
|
||||
*/
|
||||
public class ElasticsearchHibernatePropertiesBuilder {
|
||||
private static final Logger ourLog = getLogger(ElasticsearchHibernatePropertiesBuilder.class);
|
||||
private static final Logger ourLog = getLogger(ElasticsearchHibernatePropertiesBuilder.class);
|
||||
|
||||
|
||||
private IndexStatus myRequiredIndexStatus = IndexStatus.YELLOW.YELLOW;
|
||||
private IndexStatus myRequiredIndexStatus = IndexStatus.YELLOW;
|
||||
private SchemaManagementStrategyName myIndexSchemaManagementStrategy = SchemaManagementStrategyName.CREATE;
|
||||
|
||||
private String myRestUrl;
|
||||
private String myHosts;
|
||||
private String myUsername;
|
||||
private String myPassword;
|
||||
private long myIndexManagementWaitTimeoutMillis = 10000L;
|
||||
|
@ -77,11 +78,8 @@ public class ElasticsearchHibernatePropertiesBuilder {
|
|||
|
||||
// the below properties are used for ElasticSearch integration
|
||||
theProperties.put(BackendSettings.backendKey(BackendSettings.TYPE), "elasticsearch");
|
||||
|
||||
|
||||
theProperties.put(BackendSettings.backendKey(ElasticsearchIndexSettings.ANALYSIS_CONFIGURER), HapiElasticsearchAnalysisConfigurer.class.getName());
|
||||
|
||||
theProperties.put(BackendSettings.backendKey(ElasticsearchBackendSettings.HOSTS), myRestUrl);
|
||||
theProperties.put(BackendSettings.backendKey(ElasticsearchBackendSettings.HOSTS), myHosts);
|
||||
theProperties.put(BackendSettings.backendKey(ElasticsearchBackendSettings.PROTOCOL), myProtocol);
|
||||
|
||||
if (StringUtils.isNotBlank(myUsername)) {
|
||||
|
@ -102,8 +100,7 @@ public class ElasticsearchHibernatePropertiesBuilder {
|
|||
//This tells elasticsearch to use our custom index naming strategy.
|
||||
theProperties.put(BackendSettings.backendKey(ElasticsearchBackendSettings.LAYOUT_STRATEGY), IndexNamePrefixLayoutStrategy.class.getName());
|
||||
|
||||
injectStartupTemplate(myProtocol, myRestUrl, myUsername, myPassword);
|
||||
|
||||
injectStartupTemplate(myProtocol, myHosts, myUsername, myPassword);
|
||||
}
|
||||
|
||||
public ElasticsearchHibernatePropertiesBuilder setRequiredIndexStatus(IndexStatus theRequiredIndexStatus) {
|
||||
|
@ -111,11 +108,8 @@ public class ElasticsearchHibernatePropertiesBuilder {
|
|||
return this;
|
||||
}
|
||||
|
||||
public ElasticsearchHibernatePropertiesBuilder setRestUrl(String theRestUrl) {
|
||||
if (theRestUrl.contains("://")) {
|
||||
throw new ConfigurationException("Elasticsearch URL cannot include a protocol, that is a separate property. Remove http:// or https:// from this URL.");
|
||||
}
|
||||
myRestUrl = theRestUrl;
|
||||
public ElasticsearchHibernatePropertiesBuilder setHosts(String hosts) {
|
||||
myHosts = hosts;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -150,18 +144,13 @@ public class ElasticsearchHibernatePropertiesBuilder {
|
|||
* TODO GGG HS: In HS6.1, we should have a native way of performing index settings manipulation at bootstrap time, so this should
|
||||
* eventually be removed in favour of whatever solution they come up with.
|
||||
*/
|
||||
void injectStartupTemplate(String theProtocol, String theHostAndPort, String theUsername, String thePassword) {
|
||||
void injectStartupTemplate(String theProtocol, String theHosts, @Nullable String theUsername, @Nullable String thePassword) {
|
||||
PutIndexTemplateRequest ngramTemplate = new PutIndexTemplateRequest("ngram-template")
|
||||
.patterns(Arrays.asList("*resourcetable-*", "*termconcept-*"))
|
||||
.settings(Settings.builder().put("index.max_ngram_diff", 50));
|
||||
|
||||
int colonIndex = theHostAndPort.indexOf(":");
|
||||
String host = theHostAndPort.substring(0, colonIndex);
|
||||
Integer port = Integer.valueOf(theHostAndPort.substring(colonIndex + 1));
|
||||
String qualifiedHost = theProtocol + "://" + host;
|
||||
|
||||
try {
|
||||
RestHighLevelClient elasticsearchHighLevelRestClient = ElasticsearchRestClientFactory.createElasticsearchHighLevelRestClient(qualifiedHost, port, theUsername, thePassword);
|
||||
RestHighLevelClient elasticsearchHighLevelRestClient = ElasticsearchRestClientFactory.createElasticsearchHighLevelRestClient(theProtocol, theHosts, theUsername, thePassword);
|
||||
ourLog.info("Adding starter template for large ngram diffs");
|
||||
AcknowledgedResponse acknowledgedResponse = elasticsearchHighLevelRestClient.indices().putTemplate(ngramTemplate, RequestOptions.DEFAULT);
|
||||
assert acknowledgedResponse.isAcknowledged();
|
||||
|
|
|
@ -20,6 +20,8 @@ package ca.uhn.fhir.jpa.search.lastn;
|
|||
* #L%
|
||||
*/
|
||||
|
||||
import ca.uhn.fhir.context.ConfigurationException;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.auth.AuthScope;
|
||||
|
@ -27,45 +29,51 @@ import org.apache.http.auth.UsernamePasswordCredentials;
|
|||
import org.apache.http.client.CredentialsProvider;
|
||||
import org.apache.http.impl.client.BasicCredentialsProvider;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.elasticsearch.client.Node;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestClientBuilder;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class ElasticsearchRestClientFactory {
|
||||
|
||||
|
||||
private static String determineScheme(String theHostname) {
|
||||
int schemeIdx = theHostname.indexOf("://");
|
||||
if (schemeIdx > 0) {
|
||||
return theHostname.substring(0, schemeIdx);
|
||||
} else {
|
||||
return "http";
|
||||
static public RestHighLevelClient createElasticsearchHighLevelRestClient(
|
||||
String protocol, String hosts, @Nullable String theUsername, @Nullable String thePassword) {
|
||||
|
||||
if (hosts.contains("://")) {
|
||||
throw new ConfigurationException("Elasticsearch URLs cannot include a protocol, that is a separate property. Remove http:// or https:// from this URL.");
|
||||
}
|
||||
}
|
||||
|
||||
private static String stripHostOfScheme(String theHostname) {
|
||||
int schemeIdx = theHostname.indexOf("://");
|
||||
if (schemeIdx > 0) {
|
||||
return theHostname.substring(schemeIdx + 3);
|
||||
} else {
|
||||
return theHostname;
|
||||
String[] hostArray = hosts.split(",");
|
||||
List<Node> clientNodes = Arrays.stream(hostArray)
|
||||
.map(String::trim)
|
||||
.filter(s -> s.contains(":"))
|
||||
.map(h -> {
|
||||
int colonIndex = h.indexOf(":");
|
||||
String host = h.substring(0, colonIndex);
|
||||
int port = Integer.parseInt(h.substring(colonIndex + 1));
|
||||
return new Node(new HttpHost(host, port, protocol));
|
||||
})
|
||||
.collect(Collectors.toList());
|
||||
if (hostArray.length != clientNodes.size()) {
|
||||
throw new ConfigurationException("Elasticsearch URLs have to contain ':' as a host:port separator. Example: localhost:9200,localhost:9201,localhost:9202");
|
||||
}
|
||||
|
||||
RestClientBuilder clientBuilder = RestClient.builder(clientNodes.toArray(new Node[0]));
|
||||
if (StringUtils.isNotBlank(theUsername) && StringUtils.isNotBlank(thePassword)) {
|
||||
final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
|
||||
credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials(theUsername, thePassword));
|
||||
clientBuilder.setHttpClientConfigCallback(httpClientBuilder -> httpClientBuilder
|
||||
.setDefaultCredentialsProvider(credentialsProvider));
|
||||
}
|
||||
Header[] defaultHeaders = new Header[]{new BasicHeader("Content-Type", "application/json")};
|
||||
clientBuilder.setDefaultHeaders(defaultHeaders);
|
||||
|
||||
return new RestHighLevelClient(clientBuilder);
|
||||
|
||||
}
|
||||
|
||||
static public RestHighLevelClient createElasticsearchHighLevelRestClient(String theHostname, int thePort, String theUsername, String thePassword) {
|
||||
final CredentialsProvider credentialsProvider =
|
||||
new BasicCredentialsProvider();
|
||||
credentialsProvider.setCredentials(AuthScope.ANY,
|
||||
new UsernamePasswordCredentials(theUsername, thePassword));
|
||||
RestClientBuilder clientBuilder = RestClient.builder(
|
||||
new HttpHost(stripHostOfScheme(theHostname), thePort, determineScheme(theHostname)))
|
||||
.setHttpClientConfigCallback(httpClientBuilder -> httpClientBuilder
|
||||
.setDefaultCredentialsProvider(credentialsProvider));
|
||||
|
||||
Header[] defaultHeaders = new Header[]{new BasicHeader("Content-Type", "application/json")};
|
||||
clientBuilder.setDefaultHeaders(defaultHeaders);
|
||||
|
||||
return new RestHighLevelClient(clientBuilder);
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -68,11 +68,11 @@ import org.elasticsearch.search.aggregations.bucket.terms.ParsedTerms;
|
|||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.metrics.ParsedTopHits;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
|
@ -125,13 +125,13 @@ public class ElasticsearchSvcImpl implements IElasticsearchSvc {
|
|||
private PartitionSettings myPartitionSettings;
|
||||
|
||||
//This constructor used to inject a dummy partitionsettings in test.
|
||||
public ElasticsearchSvcImpl(PartitionSettings thePartitionSetings, String theHostname, int thePort, String theUsername, String thePassword) {
|
||||
this(theHostname, thePort, theUsername, thePassword);
|
||||
public ElasticsearchSvcImpl(PartitionSettings thePartitionSetings, String theHostname, @Nullable String theUsername, @Nullable String thePassword) {
|
||||
this(theHostname, theUsername, thePassword);
|
||||
this.myPartitionSettings = thePartitionSetings;
|
||||
}
|
||||
|
||||
public ElasticsearchSvcImpl(String theHostname, int thePort, String theUsername, String thePassword) {
|
||||
myRestHighLevelClient = ElasticsearchRestClientFactory.createElasticsearchHighLevelRestClient(theHostname, thePort, theUsername, thePassword);
|
||||
public ElasticsearchSvcImpl(String theHostname, @Nullable String theUsername, @Nullable String thePassword) {
|
||||
myRestHighLevelClient = ElasticsearchRestClientFactory.createElasticsearchHighLevelRestClient("http", theHostname, theUsername, thePassword);
|
||||
|
||||
try {
|
||||
createObservationIndexIfMissing();
|
||||
|
|
|
@ -128,7 +128,7 @@ public class ElasticsearchWithPrefixConfig {
|
|||
.settings(Settings.builder().put("index.max_ngram_diff", 50));
|
||||
|
||||
try {
|
||||
RestHighLevelClient elasticsearchHighLevelRestClient = ElasticsearchRestClientFactory.createElasticsearchHighLevelRestClient("http://" + host, httpPort, "", "");
|
||||
RestHighLevelClient elasticsearchHighLevelRestClient = ElasticsearchRestClientFactory.createElasticsearchHighLevelRestClient("http", host + ":" + httpPort, "", "");
|
||||
AcknowledgedResponse acknowledgedResponse = elasticsearchHighLevelRestClient.indices().putTemplate(ngramTemplate, RequestOptions.DEFAULT);
|
||||
assert acknowledgedResponse.isAcknowledged();
|
||||
} catch (IOException theE) {
|
||||
|
|
|
@ -41,7 +41,7 @@ public class TestR4ConfigWithElasticSearch extends TestR4Config {
|
|||
.setIndexSchemaManagementStrategy(SchemaManagementStrategyName.CREATE)
|
||||
.setIndexManagementWaitTimeoutMillis(10000)
|
||||
.setRequiredIndexStatus(IndexStatus.YELLOW)
|
||||
.setRestUrl(host+ ":" + httpPort)
|
||||
.setHosts(host + ":" + httpPort)
|
||||
.setProtocol("http")
|
||||
.setUsername("")
|
||||
.setPassword("")
|
||||
|
|
|
@ -21,7 +21,7 @@ public class TestR4ConfigWithElasticsearchClient extends TestR4ConfigWithElastic
|
|||
public ElasticsearchSvcImpl myElasticsearchSvc() {
|
||||
int elasticsearchPort = elasticContainer().getMappedPort(9200);
|
||||
String host = elasticContainer().getHost();
|
||||
return new ElasticsearchSvcImpl(host, elasticsearchPort, "", "");
|
||||
return new ElasticsearchSvcImpl(host + ":" + elasticsearchPort, null, null);
|
||||
}
|
||||
|
||||
@PreDestroy
|
||||
|
|
|
@ -33,7 +33,7 @@ public class ElasticsearchPrefixTest {
|
|||
public void test() throws IOException {
|
||||
//Given
|
||||
RestHighLevelClient elasticsearchHighLevelRestClient = ElasticsearchRestClientFactory.createElasticsearchHighLevelRestClient(
|
||||
"http://" + elasticsearchContainer.getHost(), elasticsearchContainer.getMappedPort(9200), "", "");
|
||||
"http", elasticsearchContainer.getHost() + ":" + elasticsearchContainer.getMappedPort(9200), "", "");
|
||||
|
||||
//When
|
||||
RestClient lowLevelClient = elasticsearchHighLevelRestClient.getLowLevelClient();
|
||||
|
|
|
@ -10,6 +10,7 @@ import ca.uhn.fhir.jpa.model.entity.ResourceIndexedSearchParamString;
|
|||
import ca.uhn.fhir.jpa.model.entity.ResourceTable;
|
||||
import ca.uhn.fhir.jpa.model.entity.ResourceTag;
|
||||
import ca.uhn.fhir.jpa.model.entity.TagTypeEnum;
|
||||
import ca.uhn.fhir.jpa.partition.SystemRequestDetails;
|
||||
import ca.uhn.fhir.jpa.provider.SystemProviderDstu2Test;
|
||||
import ca.uhn.fhir.jpa.searchparam.SearchParameterMap;
|
||||
import ca.uhn.fhir.model.api.ResourceMetadataKeyEnum;
|
||||
|
|
|
@ -249,6 +249,45 @@ public class ResourceProviderR4BundleTest extends BaseResourceProviderR4Test {
|
|||
assertEquals(ids.get(4), bundleEntries.get(6).getResource().getIdElement().toUnqualifiedVersionless().getValueAsString());
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTagCacheWorksWithBatchMode() {
|
||||
Bundle input = new Bundle();
|
||||
input.setType(BundleType.BATCH);
|
||||
|
||||
Patient p = new Patient();
|
||||
p.setId("100");
|
||||
p.setGender(AdministrativeGender.MALE);
|
||||
p.addIdentifier().setSystem("urn:foo").setValue("A");
|
||||
p.addName().setFamily("Smith");
|
||||
p.getMeta().addTag().setSystem("mysystem").setCode("mycode");
|
||||
input.addEntry().setResource(p).getRequest().setMethod(HTTPVerb.POST);
|
||||
|
||||
Patient p2 = new Patient();
|
||||
p2.setId("200");
|
||||
p2.setGender(AdministrativeGender.MALE);
|
||||
p2.addIdentifier().setSystem("urn:foo").setValue("A");
|
||||
p2.addName().setFamily("Smith");
|
||||
p2.getMeta().addTag().setSystem("mysystem").setCode("mycode");
|
||||
input.addEntry().setResource(p).getRequest().setMethod(HTTPVerb.POST);
|
||||
|
||||
Patient p3 = new Patient();
|
||||
p3.setId("pat-300");
|
||||
p3.setGender(AdministrativeGender.MALE);
|
||||
p3.addIdentifier().setSystem("urn:foo").setValue("A");
|
||||
p3.addName().setFamily("Smith");
|
||||
p3.getMeta().addTag().setSystem("mysystem").setCode("mycode");
|
||||
input.addEntry().setResource(p).getRequest().setMethod(HTTPVerb.PUT).setUrl("Patient/pat-300");
|
||||
|
||||
Bundle output = myClient.transaction().withBundle(input).execute();
|
||||
output.getEntry().stream()
|
||||
.map(BundleEntryComponent::getResponse)
|
||||
.map(Bundle.BundleEntryResponseComponent::getStatus)
|
||||
.forEach(statusCode -> {
|
||||
assertEquals(statusCode, "201 Created");
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
private List<String> createPatients(int count) {
|
||||
List<String> ids = new ArrayList<String>();
|
||||
|
|
|
@ -23,17 +23,11 @@ class ElasticsearchHibernatePropertiesBuilderTest {
|
|||
|
||||
ElasticsearchHibernatePropertiesBuilder myPropertiesBuilder = spy(ElasticsearchHibernatePropertiesBuilder.class);
|
||||
|
||||
@BeforeEach
|
||||
public void prepMocks() {
|
||||
//ensures we don't try to reach out to a real ES server on apply.
|
||||
doNothing().when(myPropertiesBuilder).injectStartupTemplate(any(), any(), any(), any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRestUrlCannotContainProtocol() {
|
||||
public void testHostsCannotContainProtocol() {
|
||||
String host = "localhost:9200";
|
||||
String protocolHost = "https://" + host;
|
||||
String failureMessage = "Elasticsearch URL cannot include a protocol, that is a separate property. Remove http:// or https:// from this URL.";
|
||||
String failureMessage = "Elasticsearch URLs cannot include a protocol, that is a separate property. Remove http:// or https:// from this URL.";
|
||||
|
||||
myPropertiesBuilder
|
||||
.setProtocol("https")
|
||||
|
@ -42,19 +36,42 @@ class ElasticsearchHibernatePropertiesBuilderTest {
|
|||
|
||||
//SUT
|
||||
try {
|
||||
myPropertiesBuilder.setRestUrl(protocolHost);
|
||||
myPropertiesBuilder.setHosts(protocolHost)
|
||||
.apply(new Properties());
|
||||
fail();
|
||||
} catch (ConfigurationException e ) {
|
||||
assertThat(e.getMessage(), is(equalTo(failureMessage)));
|
||||
}
|
||||
|
||||
doNothing().when(myPropertiesBuilder).injectStartupTemplate(any(), any(), any(), any());
|
||||
Properties properties = new Properties();
|
||||
myPropertiesBuilder
|
||||
.setRestUrl(host)
|
||||
.setHosts(host)
|
||||
.apply(properties);
|
||||
|
||||
assertThat(properties.getProperty(BackendSettings.backendKey(ElasticsearchBackendSettings.HOSTS)), is(equalTo(host)));
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testHostsValueValidation() {
|
||||
String host = "localhost_9200,localhost:9201,localhost:9202";
|
||||
String failureMessage = "Elasticsearch URLs have to contain ':' as a host:port separator. Example: localhost:9200,localhost:9201,localhost:9202";
|
||||
|
||||
myPropertiesBuilder
|
||||
.setProtocol("https")
|
||||
.setHosts(host)
|
||||
.setUsername("whatever")
|
||||
.setPassword("whatever");
|
||||
|
||||
//SUT
|
||||
try {
|
||||
myPropertiesBuilder
|
||||
.apply(new Properties());
|
||||
fail();
|
||||
} catch (ConfigurationException e ) {
|
||||
assertThat(e.getMessage(), is(equalTo(failureMessage)));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ public class LastNElasticsearchSvcMultipleObservationsIT {
|
|||
public void before() throws IOException {
|
||||
PartitionSettings partitionSettings = new PartitionSettings();
|
||||
partitionSettings.setPartitioningEnabled(false);
|
||||
elasticsearchSvc = new ElasticsearchSvcImpl(partitionSettings, elasticsearchContainer.getHost(), elasticsearchContainer.getMappedPort(9200), "", "");
|
||||
elasticsearchSvc = new ElasticsearchSvcImpl(partitionSettings, elasticsearchContainer.getHost() + ":" + elasticsearchContainer.getMappedPort(9200), null, null);
|
||||
|
||||
if (!indexLoaded) {
|
||||
createMultiplePatientsAndObservations();
|
||||
|
|
|
@ -97,7 +97,7 @@ public class LastNElasticsearchSvcSingleObservationIT {
|
|||
public void before() {
|
||||
PartitionSettings partitionSettings = new PartitionSettings();
|
||||
partitionSettings.setPartitioningEnabled(false);
|
||||
elasticsearchSvc = new ElasticsearchSvcImpl(partitionSettings, elasticsearchContainer.getHost(), elasticsearchContainer.getMappedPort(9200), "", "");
|
||||
elasticsearchSvc = new ElasticsearchSvcImpl(partitionSettings, elasticsearchContainer.getHost() + ":" + elasticsearchContainer.getMappedPort(9200), "", "");
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
|
|
Loading…
Reference in New Issue