Work on docs
This commit is contained in:
parent
e05ea676e9
commit
2bcb1c9c1c
|
@ -77,6 +77,16 @@
|
|||
<version>4.1.0-SNAPSHOT</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.jdom</groupId>
|
||||
<artifactId>jdom2</artifactId>
|
||||
<version>2.0.6</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-simple</artifactId>
|
||||
<version>1.7.28</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
/*-
|
||||
* #%L
|
||||
* HAPI FHIR - Docs
|
||||
* %%
|
||||
* Copyright (C) 2014 - 2019 University Health Network
|
||||
* %%
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* #L%
|
||||
*/
|
||||
|
||||
import org.jdom2.Content;
|
||||
import org.jdom2.Element;
|
||||
import org.jdom2.Namespace;
|
||||
import org.jdom2.Text;
|
||||
import org.jdom2.input.DOMBuilder;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.xml.sax.SAXException;
|
||||
|
||||
import javax.xml.parsers.DocumentBuilder;
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* This is just here to force a javadoc to be built in order to keep
|
||||
* Maven Central happy
|
||||
*/
|
||||
public class ChangelogMigrator {
|
||||
|
||||
private static final Logger ourLog = LoggerFactory.getLogger(ChangelogMigrator.class);
|
||||
private static final Namespace NS = Namespace.getNamespace( "http://maven.apache.org/changes/1.0.0");
|
||||
|
||||
public static void main(String[] args) throws ParserConfigurationException, IOException, SAXException {
|
||||
|
||||
org.jdom2.Document document = null;
|
||||
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
|
||||
//If want to make namespace aware.
|
||||
//factory.setNamespaceAware(true);
|
||||
DocumentBuilder documentBuilder = factory.newDocumentBuilder();
|
||||
org.w3c.dom.Document w3cDocument = documentBuilder.parse(new File("src/changes/changes.xml"));
|
||||
document = new DOMBuilder().build(w3cDocument);
|
||||
|
||||
int actionCount = 0;
|
||||
int releaseCount = 0;
|
||||
|
||||
|
||||
Element docElement = document.getRootElement();
|
||||
Element bodyElement = docElement.getChild("body", NS);
|
||||
List<Element> releases = bodyElement.getChildren("release", NS);
|
||||
for (Element nextRelease : releases) {
|
||||
String version = nextRelease.getAttributeValue("version");
|
||||
String date = nextRelease.getAttributeValue("date");
|
||||
String description = nextRelease.getAttributeValue("description");
|
||||
ourLog.info("Found release {} - {} - {}", version, date, description);
|
||||
releaseCount++;
|
||||
|
||||
for (Element nextAction : nextRelease.getChildren("action", NS)) {
|
||||
StringBuilder contentBuilder = new StringBuilder();
|
||||
for (Content nextContents : nextAction.getContent()) {
|
||||
if (nextContents instanceof Text) {
|
||||
String text = ((Text) nextContents).getTextNormalize();
|
||||
contentBuilder.append(text);
|
||||
} else {
|
||||
throw new IllegalStateException("Unknown type: " + nextContents.getClass());
|
||||
}
|
||||
}
|
||||
|
||||
actionCount++;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
ourLog.info("Found {} releases and {} actions", releaseCount, actionCount);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
/*-
|
||||
* #%L
|
||||
* HAPI FHIR - Docs
|
||||
* %%
|
||||
* Copyright (C) 2014 - 2019 University Health Network
|
||||
* %%
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* #L%
|
||||
*/
|
||||
/**
|
||||
* This is just here to force a javadoc to be built in order to keep
|
||||
* Maven Central happy
|
||||
*/
|
||||
public class DocsMarker {
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
# Logging
|
||||
|
||||
Java has an abundance of logging frameworks, none of which are perfect. Many libraries depend on one or more of these frameworks but also have dependencies who depend on a different one. These dependencies can cause conflicts and be very irritating to solve.
|
||||
|
||||
## Quick Start: Using Logback
|
||||
|
||||
If you don't want to spend much time worrying about logging, it's probably easiest to just include the [Logback](http://logback.qos.ch/) JAR along with your application.
|
||||
|
||||
Logback is a powerful and flexible framework. To configure it, simply include a "logback.xml" file on your classpath. The following contents may be placed in this file to simply log at a suitable level to the console:
|
||||
|
||||
```xml
|
||||
<configuration scan="true" scanPeriod="30 seconds">
|
||||
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
<encoder>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} [%file:%line] %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT" />
|
||||
</root>
|
||||
|
||||
</configuration>
|
||||
```
|
||||
|
||||
For more detail on how logging can be configured, see the following section.
|
||||
|
||||
# Configuring HAPI's Logging - SLF4j
|
||||
|
||||
<img src="/hapi-fhir/docs/images/hapi-fhir-logging.svg" width="723" height="273" alt="Logging arch diagram" align="right"/>
|
||||
|
||||
HAPI uses [SLF4j](http://www.slf4j.org/) for all internal logging. SLF4j is a *logging facade* framework, meaning that it doesn't actually handle log output (i.e. it isn't actually writing log lines to disk) but rather it is able to delegate that task to any of a number of underlying frameworks (e.g. log4j, logback, JDK logging, etc.)
|
||||
|
||||
This means that in order to successfully log anything, you will need to
|
||||
add two (or three) dependency JARs to your application:
|
||||
|
||||
* **slf4j-api-vXX.jar**: This is the SLF4j API and is neccesary for HAPI to function
|
||||
* An actual logging implementation, as well as its SLF4j binding. For example:
|
||||
* The recommended logging framework to use is Logback. Logback is absolutely not neccesary for HAPI to function correctly, but it has a number of nice features and is a good default choice. To use logback, you would include `logback-vXX.jar`.
|
||||
* If you wanted to use log4j you would include `log4j-vXX.jar` as well as `slf4j-log4j-vXX.jar`. Log4j is a mature framework that is very widely used.
|
||||
* If you wanted to use JDK logging (aka java.util.Logging) you would include `slf4j-jdk14-vXX.jar`. JDK logging is included with Java but is not particularly full featured compared to many other frameworks.
|
||||
|
||||
## Commons-Logging
|
||||
|
||||
<img src="/hapi-fhir/docs/images/hapi-fhir-logging-complete.svg" width="614" height="153" alt="Logging arch diagram" align="right"/>
|
||||
|
||||
Note that HAPI's client uses Apache HttpComponents Client internally, and that library uses Apache Commons Logging as a logging facade. The recommended approach to using HAPI is to not include any commons-logging JAR in your application, but rather to include a copy of jcl-over-slf4j-vXX.jar. This JAR will simulate commons-logging, but will redirect its logging statements to the same target as SLF4j has been configured to.
|
||||
|
||||
The diagram at the right shows the chain of command for logging under this scheme.
|
||||
|
||||
Note that some popular libraries (e.g. Spring Framework) also use commons-logging for logging. As such they may include a commons-logging JAR automatically as a transitive dependency in Maven. If you are using jcl-over-slf4j and it isn't working correctly, it is often worth checking the list of JARs included in your application to see whether commons-logging has also been added. It can then be specifically excluded in Maven.
|
||||
|
||||
<br clear="all"/>
|
||||
|
||||
# Client Payload Logging
|
||||
|
||||
To enable detailed logging of client requests and responses (what URL is being requested, what headers and payload are being received, etc.), an interceptor may be added to the client which logs each transaction. See [Logging Requests and Responses](./doc_rest_client_interceptor.html#req_resp_logging) for more information.
|
||||
|
||||
# Server Request Logging
|
||||
|
||||
To enable detailed logging of server requests and responses, an interceptor may be added to the server which logs each transaction. See [Logging Server Requests](./doc_rest_server_interceptor.html#Logging) for more information.
|
||||
|
|
@ -4,18 +4,26 @@ page.introduction.table_of_contents=Table of Contents
|
|||
page.introduction.introduction=Introduction
|
||||
|
||||
section.model.title=Working With The FHIR Model
|
||||
page.model.working_with_resources=Working With Resources
|
||||
page.model.profiles_and_extensions=Profiles and Extensions
|
||||
page.model.converter=Version Converters
|
||||
page.model.custom_structures=Custom Structures
|
||||
|
||||
section.server.title=Server
|
||||
section.server.title=Plain Server
|
||||
page.server.cors=CORS
|
||||
|
||||
section.android.title=Android Support
|
||||
section.jpa.title=JPA Server
|
||||
section.jpa.jpa_server=JPA Server
|
||||
|
||||
section.interceptors.title=Interceptors
|
||||
page.interceptors.interceptors=Interceptors Overview
|
||||
|
||||
section.android.title=Android
|
||||
page.android.client=Android Client
|
||||
|
||||
section.tools.title=Tools
|
||||
page.tools.hapi_fhir_cli=Command Line Interface (CLI) Tool
|
||||
|
||||
section.appendix.title=Appendix
|
||||
page.appendix.logging=Logging
|
||||
page.appendix.changelog=Changelog
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
# Interceptors: Overview
|
||||
|
||||
HAPI FHIR 3.8.0 introduced a new interceptor framework that is used across the entire library. In previous versions of HAPI FHIR, a "Server Interceptor" framework existed and a separate "Client Interceptor" framework existed. These have now been combined into a single unified (and very powerful) framework.
|
||||
|
||||
Interceptor classes may "hook into" various points in the processing chain in both the client and the server.
|
||||
|
||||
```java
|
||||
{{snippet:classpath:/ca/uhn/hapi/fhir/docs/NewInterceptors.java|resource}}
|
||||
```
|
|
@ -0,0 +1,221 @@
|
|||
# JPA Server
|
||||
|
||||
The HAPI FHIR [RestfulServer](/hapi-fhir/docs/server/restful_server.html) module can be used to create a FHIR server endpoint against an arbitrary data source, which could be a database of your own design, an existing clinical system, a set of files, or anything else you come up with.
|
||||
|
||||
HAPI also provides a persistence module which can be used to provide a complete RESTful server implementation, backed by a database of your choosing. This module uses the [JPA 2.0](http://en.wikipedia.org/wiki/Java_Persistence_API) API to store data in a database without depending on any specific database technology.
|
||||
|
||||
**Important Note:** This implementation uses a fairly simple table design, with a single table being used to hold resource bodies (which are stored as CLOBs, optionally GZipped to save space) and a set of tables to hold search indexes, tags, history details, etc. This design is only one of many possible ways
|
||||
of designing a FHIR server so it is worth considering whether it is appropriate for the problem you are trying to solve.
|
||||
|
||||
# Getting Started
|
||||
|
||||
The recommended way to get started with HAPI's JPA server module is
|
||||
to begin with the starter project. This project can be found at the following link: [https://github.com/hapifhir/hapi-fhir-jpaserver-starter](https://github.com/hapifhir/hapi-fhir-jpaserver-starter)
|
||||
|
||||
This project is a fully contained FHIR server, supporting all standard operations (read/create/delete/etc). It bundles an embedded instance of the <a href="http://h2database.com">H2 Java Database</a> so that the server can run without depending on any external database, but it can also be configured to use an installation of Oracle, Postgres, etc.
|
||||
|
||||
# Configuration Options
|
||||
|
||||
## External/Absolute Resource References
|
||||
|
||||
Clients may sometimes post resources to your server that contain absolute resource references. For example, consider the following resource:
|
||||
|
||||
```xml
|
||||
|
||||
<Patient xmlns="http://hl7.org/fhir">
|
||||
<id value="patient-infant-01"/>
|
||||
<name>
|
||||
<use value="official"/>
|
||||
<family value="Miller"/>
|
||||
<given value="Samuel"/>
|
||||
</name>
|
||||
<managingOrganization>
|
||||
<reference value="http://example.com/fhir/Organization/123"/>
|
||||
</managingOrganization>
|
||||
</Patient>
|
||||
```
|
||||
|
||||
By default, the server will reject this reference, as only local references are permitted by the server. This can be changed however.
|
||||
|
||||
If you want the server to recognize that this URL is actually a local reference (i.e. because the server will be deployed to the base URL `http://example.com/fhir/`) you can configure the server to recognize this URL via the following DaoConfig setting:
|
||||
|
||||
```java
|
||||
@Bean
|
||||
public DaoConfig daoConfig() {
|
||||
DaoConfig retVal = new DaoConfig();
|
||||
// ... other config ...
|
||||
retVal.getTreatBaseUrlsAsLocal().add("http://example.com/fhir/");
|
||||
return retVal;
|
||||
}
|
||||
```
|
||||
|
||||
On the other hand, if you want the server to be configurable to allow remote references, you can set this with the confguration below. Using the `setAllowExternalReferences` means that it will be possible to search for references that refer to these external references.
|
||||
|
||||
```java
|
||||
@Bean
|
||||
public DaoConfig daoConfig() {
|
||||
DaoConfig retVal = new DaoConfig();
|
||||
// Allow external references
|
||||
retVal.setAllowExternalReferences(true);
|
||||
|
||||
// If you are allowing external references, it is recommended to
|
||||
// also tell the server which references actually will be local
|
||||
retVal.getTreatBaseUrlsAsLocal().add("http://mydomain.com/fhir");
|
||||
return retVal;
|
||||
}
|
||||
```
|
||||
|
||||
## Logical References
|
||||
|
||||
In some cases, you may have references which are <i>Logical References</i>,
|
||||
which means that they act as an identifier and not necessarily as a literal
|
||||
web address.
|
||||
|
||||
A common use for logical references is in references to conformance resources, such as ValueSets, StructureDefinitions, etc. For example, you might refer to the ValueSet `http://hl7.org/fhir/ValueSet/quantity-comparator` from your own resources. In this case, you are not neccesarily telling the server that this is a real address that it should resolve, but rather that this is an identifier for a ValueSet where `ValueSet.url` has the given URI/URL.
|
||||
|
||||
HAPI can be configured to treat certain URI/URL patterns as logical by using the DaoConfig#setTreatReferencesAsLogical property (see [JavaDoc](/hapi-fhir/apidocs/hapi-fhir-jpaserver-base/ca/uhn/fhir/jpa/dao/DaoConfig.html#setTreatReferencesAsLogical-java.util.Set-)).
|
||||
|
||||
For example:
|
||||
|
||||
```java
|
||||
// Treat specific URL as logical
|
||||
myDaoConfig.getTreatReferencesAsLogical().add("http://mysystem.com/ValueSet/cats-and-dogs");
|
||||
|
||||
// Treat all references with given prefix as logical
|
||||
myDaoConfig.getTreatReferencesAsLogical().add("http://mysystem.com/mysystem-vs-*");
|
||||
```
|
||||
|
||||
# Search Result Caching
|
||||
|
||||
By default, search results will be cached for one minute. This means that if a client performs a search for <code>Patient?name=smith</code> and gets back 500 results, if a client performs the same search within 60000 milliseconds the previously loaded search results will be returned again. This also means that any new Patient resources named "Smith" within the last minute will not be reflected in the results.
|
||||
|
||||
Under many normal scenarios this is a n acceptable performance tradeoff, but in some cases it is not. If you want to disable caching, you have two options:
|
||||
|
||||
### Globally Disable / Change Caching Timeout
|
||||
|
||||
You can change the global cache using the following setting:
|
||||
|
||||
```java
|
||||
myDaoConfig.setReuseCachedSearchResultsForMillis(null);
|
||||
```
|
||||
|
||||
### Disable Cache at the Request Level
|
||||
|
||||
Clients can selectively disable caching for an individual request using the Cache-Control header:
|
||||
|
||||
```http
|
||||
Cache-Control: no-cache
|
||||
```
|
||||
|
||||
### Disable Paging at the Request Level
|
||||
|
||||
If the client knows that they will only want a small number of results (for example, a UI containing 20 results is being shown and the client knows that they will never load the next page of results) the client
|
||||
may also use the <code>no-store</code> directive along with a HAPI FHIR extension called <code>max-results</code> in order to specify that only the given number of results should be fetched. This directive disabled paging entirely for the request and causes the request to return immediately when the given number of results is found. This can cause a noticeable performance improvement in some cases.
|
||||
|
||||
```http
|
||||
Cache-Control: no-store, max-results=20
|
||||
```
|
||||
|
||||
# Architecture
|
||||
|
||||
<img src="/hapi-fhir/docs/images/jpa_architecture.png" alt="Architecture" align="right"/>
|
||||
|
||||
The HAPI JPA Server has the following components:
|
||||
|
||||
* **Resource Providers:** A RESTful server [Resource Provider](/hapi-fhir/docs/server/restful_server.html#resource_providers) is provided for each resource type in a given release of FHIR. Each resource provider implements a [@Search](/hapi-fhir/apidocs/hapi-fhir-base/ca/uhn/fhir/rest/annotation/Search.html) method implementing the complete set of search parameters defined in the FHIR specification for the given resource type.
|
||||
|
||||
The resource providers also extend a superclass which implements all of the other FHIR methods, such as Read, Create, Delete, etc.
|
||||
|
||||
Note that these resource providers are generated as a part of the HAPI build process, so they are not checked into Git. The resource providers do not actually implement any of the logic in searching, updating, etc. They simply receive the incoming HTTP calls (via the RestfulServer) and pass along the incoming requests to the DAOs.
|
||||
|
||||
* **HAPI DAOs:** The DAOs actually implement all of the database business logic relating to the storage, indexing, and retrieval of FHIR resources, using the underlying JPA API.
|
||||
|
||||
* **Hibernate:** The HAPI JPA Server uses the JPA library, implemented by Hibernate. No Hibernate specific features are used, so the library should also work with other providers (e.g. Eclipselink) but it is not tested regularly with them.
|
||||
|
||||
* **Database:** The RESTful server uses an embedded Derby database, but can be configured to talk to [any database supported by Hibernate](https://developer.jboss.org/wiki/SupportedDatabases2?_sscc=t).
|
||||
|
||||
# Additional Information
|
||||
|
||||
* [This page](https://www.openhealthhub.org/t/hapi-terminology-server-uk-snomed-ct-import/592) has information on loading national editions (UK specifically) of SNOMED CT files into the database.
|
||||
|
||||
<a name="upgrading"/>
|
||||
|
||||
# Upgrading HAPI FHIR JPA
|
||||
|
||||
HAPI FHIR JPA is a constantly evolving product, with new features being added to each new version of the library. As a result, it is generally necessary to execute a database migration as a part of an upgrade to HAPI FHIR.
|
||||
|
||||
When upgrading the JPA server from one version of HAPI FHIR to a newer version, often there will be changes to the database schema. The **Migrate Database** command can be used to perform a migration from one version to the next.
|
||||
|
||||
Note that this feature was added in HAPI FHIR 3.5.0. It is not able to migrate from versions prior to HAPI FHIR 3.4.0. **Please make a backup of your database before running this command!**
|
||||
|
||||
The following example shows how to use the migrator utility to migrate between two versions.
|
||||
|
||||
```bash
|
||||
./hapi-fhir-cli migrate-database -d DERBY_EMBEDDED -u "jdbc:derby:directory:target/jpaserver_derby_files;create=true" -n "" -p "" -f V3_4_0 -t V3_5_0
|
||||
```
|
||||
|
||||
You may use the following command to get detailed help on the options:
|
||||
|
||||
```bash
|
||||
./hapi-fhir-cli help migrate-database
|
||||
```
|
||||
|
||||
Note the arguments:
|
||||
|
||||
* `-d [dialect]` – This indicates the database dialect to use. See the detailed help for a list of options
|
||||
* `-f [version]` – The version to migrate from
|
||||
* `-t [version]` – The version to migrate to
|
||||
|
||||
# Oracle Support
|
||||
|
||||
Note that the Oracle JDBC drivers are not distributed in the Maven Central repository, so they are not included in HAPI FHIR. In order to use this command with an Oracle database, you will need to invoke the CLI as follows:
|
||||
|
||||
```bash
|
||||
java -cp hapi-fhir-cli.jar ca.uhn.fhir.cli.App migrate-database -d ORACLE_12C -u "[url]" -n "[username]" -p "[password]" -f V3_4_0 -t V3_5_0
|
||||
```
|
||||
|
||||
## Migrating 3.4.0 to 3.5.0+
|
||||
|
||||
As of HAPI FHIR 3.5.0 a new mechanism for creating the JPA index tables (HFJ_SPIDX_xxx) has been implemented. This new mechanism uses hashes in place of large multi-column indexes. This improves both lookup times as well as required storage space. This change also paves the way for future ability to provide efficient multi-tenant searches (which is not yet implemented but is planned as an incremental improvement).
|
||||
|
||||
This change is not a lightweight change however, as it requires a rebuild of the index tables in order to generate the hashes. This can take a long time on databases that already have a large amount of data.
|
||||
|
||||
As a result, in HAPI FHIR JPA 3.6.0, an efficient way of upgrading existing databases was added. Under this new scheme, columns for the hashes are added but values are not calculated initially, database indexes are not modified on the HFJ_SPIDX_xxx tables, and the previous columns are still used for searching as was the case in HAPI FHIR JPA 3.4.0.
|
||||
|
||||
In order to perform a migration using this functionality, the following steps should be followed:
|
||||
|
||||
* Stop your running HAPI FHIR JPA instance (and remember to make a backup of your database before proceeding with any changes!)
|
||||
* Modify your `DaoConfig` to specify that hash-based searches should not be used, using the following setting: `myDaoConfig.setDisableHashBasedSearches(true);`
|
||||
* Make sure that you have your JPA settings configured to not automatically create database indexes and columns using the following setting in your JPA Properties: `extraProperties.put("hibernate.hbm2ddl.auto", "none");`
|
||||
* Run the database migrator command, including the entry `-x no-migrate-350-hashes` on the command line. For example:
|
||||
|
||||
```
|
||||
./hapi-fhir-cli migrate-database -d DERBY_EMBEDDED -u "jdbc:derby:directory:target/jpaserver_derby_files;create=true" -n "" -p "" -f V3_4_0 -t V3_6_0 -x no-migrate-350-hashes
|
||||
```
|
||||
|
||||
* Rebuild and start your HAPI FHIR JPA server. At this point you should have a working HAPI FHIR JPA 3.6.0 server that is is still using HAPI FHIR 3.4.0 search indexes. Search hashes will be generated for any newly created or updated data but existing data will have null hashes.
|
||||
* With the system running, request a complete reindex of the data in the database using
|
||||
an HTTP request such as the following: `POST /$mark-all-resources-for-reindexing`. Note that this is a custom operation built into the HAPI FHIR JPA server. It should be secured in a real deployment, so Authentication is likely required for this call.
|
||||
* You can track the reindexing process by watching your server logs, but also by using the following SQL executed directly against your database:
|
||||
|
||||
```sql
|
||||
SELECT * FROM HFJ_RES_REINDEX_JOB
|
||||
```
|
||||
|
||||
* When this query no longer returns any rows, the reindexing process is complete.
|
||||
* At this time, HAPI FHIR should be stopped once again in order to convert it to using the hash based indexes.
|
||||
* Modify your `DaoConfig` to specify that hash-based searches are used, using the following setting (this is the default setting, so it could also simply be omitted): `myDaoConfig.setDisableHashBasedSearches(false);`
|
||||
* Execute the migrator tool again, this time omitting the flag option, e.g.
|
||||
|
||||
```bash
|
||||
./hapi-fhir-cli migrate-database -d DERBY_EMBEDDED -u "jdbc:derby:directory:target/jpaserver_derby_files;create=true" -n "" -p "" -f V3_4_0 -t V3_6_0
|
||||
```
|
||||
* Rebuild, and start HAPI FHIR JPA again.
|
||||
|
||||
# Cascading Deletes
|
||||
|
||||
An interceptor called `CascadingDeleteInterceptor` may be registered against the Server. When this interceptor is enabled, cascading deletes may be performed using either of the following:
|
||||
|
||||
* The request may include the following parameter: `_cascade=delete`
|
||||
* The request may include the following header: `X-Cascade: delete`
|
||||
|
|
@ -38,7 +38,7 @@ Undeclared extensions can also be added to datatypes (composite or primitive).
|
|||
{{snippet:classpath:/ca/uhn/hapi/fhir/docs/ExtensionsDstu3.java|resourceStringExtension}}
|
||||
```
|
||||
|
||||
## Sub-Extensions
|
||||
# Sub-Extensions
|
||||
|
||||
Extensions may also have child extensions as their content, instead of a datatype. This is done by adding a child undeclared extension to the parent extension.
|
||||
|
||||
|
@ -54,7 +54,7 @@ Extensions may also have child extensions as their content, instead of a datatyp
|
|||
{{snippet:classpath:/ca/uhn/hapi/fhir/docs/ExtensionsDstu3.java|subExtension}}
|
||||
```
|
||||
|
||||
## Retrieving Extension Values
|
||||
# Retrieving Extension Values
|
||||
|
||||
HAPI provides a few ways of accessing extension values in resources which are received from other sources (i.e. downloaded by a client).
|
||||
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
# Working with Resources
|
||||
|
||||
Every resource type defined by FHIR has a corresponding class, which contains a number of getters and setters for the basic properties of that resource.
|
||||
|
||||
HAPI tries to make populating objects easier, by providing lots of convenience methods. For example, the Observation resource has an "issued" property which is of the FHIR "instant" type (a system time with either seconds or milliseconds precision). There are methods to use the actual FHIR datatype, but also convenience methods which use built-in Java types.
|
||||
|
||||
```java
|
||||
{{snippet:classpath:/ca/uhn/hapi/fhir/docs/FhirDataModel.java|datatypes}}
|
||||
```
|
||||
|
||||
# Navigating Structures
|
||||
|
||||
Most HAPI structures provide getters that automatically create child objects on access. This means it is simple to navigate complex structures without needing to worry about instantiating child objects.
|
||||
|
||||
```java
|
||||
{{snippet:classpath:/ca/uhn/hapi/fhir/docs/FhirDataModel.java|nonNull}}
|
||||
```
|
||||
|
||||
## Coded/Enumerated Values
|
||||
|
||||
There are many places in the FHIR specification where a "coded" string is used. This means that a code must be chosen from a list of allowable values.
|
||||
|
||||
### Closed Valuesets / Codes
|
||||
|
||||
The FHIR specification defines a number of "closed" ValueSets, such as
|
||||
the one used for [Patient.gender](http://hl7.org/fhir/valueset-administrative-gender.html). These valuesets must either be empty, or be populated with a value drawn from the list of allowable values defined by FHIR. HAPI provides special typesafe Enums to help in dealing with these fields.
|
||||
|
||||
```java
|
||||
{{snippet:classpath:/ca/uhn/hapi/fhir/docs/FhirDataModel.java|codes}}
|
||||
```
|
||||
|
||||
# Convenience Methods
|
||||
|
||||
The FHIR data model is rich enough to meet common use cases, but sometimes that richness adds complexity. For example, a Patient may have multiple names (a preferred name, a nickname, etc.) and each of those names may have multiple last names, multiple prefixes, etc.
|
||||
|
||||
The example below shows populating a name entry for a Patient. Note the use of the StringDt type, which encapsulates a regular String, but allows for extensions to be added.
|
||||
|
||||
```java
|
||||
{{snippet:classpath:/ca/uhn/hapi/fhir/docs/FhirDataModel.java|namesHard}}
|
||||
```
|
||||
|
||||
HAPI also provides for simple setters that use Java primitive types and can be chained, leading to much simpler code.
|
||||
|
||||
```java
|
||||
{{snippet:classpath:/ca/uhn/hapi/fhir/docs/FhirDataModel.java|namesEasy}}
|
||||
```
|
||||
|
||||
# Examples
|
||||
|
||||
## Populating an Observation Resource
|
||||
|
||||
The following example shows how to create an observation resource containing a numeric datatype.
|
||||
|
||||
```java
|
||||
{{snippet:classpath:/ca/uhn/hapi/fhir/docs/FhirDataModel.java|observation}}
|
||||
```
|
|
@ -1,166 +1,168 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<document xmlns="http://maven.apache.org/XDOC/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
|
||||
|
||||
<properties>
|
||||
<title>Data Model</title>
|
||||
<author email="jamesagnew@users.sourceforge.net">James Agnew</author>
|
||||
</properties>
|
||||
|
||||
<body>
|
||||
|
||||
<!-- The body of the document contains a number of sections -->
|
||||
<section name="Working with Resources">
|
||||
|
||||
<p>
|
||||
Every resource type defined by FHIR has a corresponding
|
||||
class, which contains a number of getters and setters for
|
||||
the basic properties of that resource.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
HAPI tries to make populating objects easier, by providing lots of
|
||||
convenience methods. For example, the Observation resource has an
|
||||
"issued" property which is of the FHIR "instant" type (a system time with
|
||||
either seconds or milliseconds precision). There are methods to
|
||||
use the actual FHIR datatype, but also convenience methods which
|
||||
use built-in Java types.
|
||||
</p>
|
||||
|
||||
<macro name="snippet">
|
||||
<param name="id" value="datatypes" />
|
||||
<param name="file" value="examples/src/main/java/example/FhirDataModel.java" />
|
||||
</macro>
|
||||
|
||||
<subsection name="Navigating Structures">
|
||||
|
||||
<p>
|
||||
Most HAPI structures provide getters that automatically create
|
||||
child objects on access. This means it is simple to navigate
|
||||
complex structures without needing to worry about instantiating
|
||||
child objects.
|
||||
</p>
|
||||
|
||||
<macro name="snippet">
|
||||
<param name="id" value="nonNull" />
|
||||
<param name="file" value="examples/src/main/java/example/FhirDataModel.java" />
|
||||
</macro>
|
||||
|
||||
</subsection>
|
||||
|
||||
<subsection name="Coded/Enumerated Values">
|
||||
|
||||
<p>
|
||||
There are many places in the FHIR specification where a "coded" string is
|
||||
used. This means that a code must be chosen from a list of allowable values.
|
||||
</p>
|
||||
|
||||
<h4>Closed Valuesets / Codes</h4>
|
||||
|
||||
<p>
|
||||
The FHIR specification defines a number of "closed" ValueSets, such as
|
||||
the one used for
|
||||
<!-- TODO: replace the link below with a non GitHub link once DSTU2 is balloted -->
|
||||
<a href="http://hl7-fhir.github.io/administrative-gender.html">Patient.gender</a>
|
||||
(<i>note that this field was not a closed ValueSet in DSTU1 but is as of DSTU2</i>).
|
||||
These valuesets must either be empty, or be populated with a value drawn from
|
||||
the list of allowable values defined by FHIR. HAPI provides special typesafe
|
||||
Enums to help in dealing with these fields.
|
||||
</p>
|
||||
|
||||
<macro name="snippet">
|
||||
<param name="id" value="codes" />
|
||||
<param name="file" value="examples/src/main/java/example/FhirDataModel.java" />
|
||||
</macro>
|
||||
|
||||
<h4>Open Valusets / CodeableConcepts</h4>
|
||||
|
||||
<p>
|
||||
The FHIR specification also defines a number of "open" ValueSets, such as
|
||||
the one used for
|
||||
<a href="http://hl7.org/implement/standards/fhir/valueset-marital-status.html">Patient.maritalStatus</a>.
|
||||
These fields may define a set of allowable codes, but also allow you to
|
||||
use your own codes instead if none of the given codes matches your needs. This
|
||||
is called an <a href="http://hl7.org/implement/standards/fhir/terminologies.html#incomplete">incomplete binding</a>.
|
||||
Some fields may even define a set of codes that serve as nothing more than
|
||||
an example as to the type of codes that would be used there. This is known as
|
||||
an <a href="http://hl7.org/implement/standards/fhir/terminologies.html#example">example binding</a>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For these fields, a CodeableConcept datatype is generally used by the
|
||||
FHIR specification. This datatype allows multiple "codings", which
|
||||
are a code and codesystem pair, optionally with a display name as well.
|
||||
The following example shows how to interact with this type.
|
||||
</p>
|
||||
|
||||
<macro name="snippet">
|
||||
<param name="id" value="codeableConcepts" />
|
||||
<param name="file" value="examples/src/main/java/example/FhirDataModel.java" />
|
||||
</macro>
|
||||
|
||||
<p>
|
||||
HAPI also provides typesafe enums to help in working with CodeableConcept
|
||||
fields.
|
||||
</p>
|
||||
|
||||
<macro name="snippet">
|
||||
<param name="id" value="codeableConceptEnums" />
|
||||
<param name="file" value="examples/src/main/java/example/FhirDataModel.java" />
|
||||
</macro>
|
||||
|
||||
</subsection>
|
||||
|
||||
<subsection name="Convenience Methods">
|
||||
|
||||
<p>
|
||||
The FHIR data model is rich enough to meet common use cases, but sometimes
|
||||
that richness adds complexity. For example, a Patient may have multiple names
|
||||
(a preferred name, a nickname, etc.) and each of those names may have multiple
|
||||
last names, multiple prefixes, etc.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
The example below shows populating a name entry for a Patient. Note the
|
||||
use of the StringDt type, which encapsulates a regular String, but allows for
|
||||
extensions to be added.
|
||||
</p>
|
||||
|
||||
<macro name="snippet">
|
||||
<param name="id" value="namesHard" />
|
||||
<param name="file" value="examples/src/main/java/example/FhirDataModel.java" />
|
||||
</macro>
|
||||
|
||||
<p>
|
||||
HAPI also provides for simple setters that use Java primitive types
|
||||
and can be chained, leading to much simpler code.
|
||||
</p>
|
||||
|
||||
<macro name="snippet">
|
||||
<param name="id" value="namesEasy" />
|
||||
<param name="file" value="examples/src/main/java/example/FhirDataModel.java" />
|
||||
</macro>
|
||||
|
||||
</subsection>
|
||||
|
||||
</section>
|
||||
|
||||
<section name="Examples">
|
||||
|
||||
<subsection name="Populating an Observation Resource">
|
||||
|
||||
<p>
|
||||
The following example shows how to create an observation resource containing
|
||||
a numeric datatype.
|
||||
</p>
|
||||
<macro name="snippet">
|
||||
<param name="id" value="observation" />
|
||||
<param name="file" value="examples/src/main/java/example/FhirDataModel.java" />
|
||||
</macro>
|
||||
|
||||
</subsection>
|
||||
|
||||
</section>
|
||||
|
||||
</body>
|
||||
|
||||
</document>
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<document xmlns="http://maven.apache.org/XDOC/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
|
||||
|
||||
<!-- This has been migrated -->
|
||||
|
||||
<properties>
|
||||
<title>Data Model</title>
|
||||
<author email="jamesagnew@users.sourceforge.net">James Agnew</author>
|
||||
</properties>
|
||||
|
||||
<body>
|
||||
|
||||
<!-- The body of the document contains a number of sections -->
|
||||
<section name="Working with Resources">
|
||||
|
||||
<p>
|
||||
Every resource type defined by FHIR has a corresponding
|
||||
class, which contains a number of getters and setters for
|
||||
the basic properties of that resource.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
HAPI tries to make populating objects easier, by providing lots of
|
||||
convenience methods. For example, the Observation resource has an
|
||||
"issued" property which is of the FHIR "instant" type (a system time with
|
||||
either seconds or milliseconds precision). There are methods to
|
||||
use the actual FHIR datatype, but also convenience methods which
|
||||
use built-in Java types.
|
||||
</p>
|
||||
|
||||
<macro name="snippet">
|
||||
<param name="id" value="datatypes" />
|
||||
<param name="file" value="examples/src/main/java/example/FhirDataModel.java" />
|
||||
</macro>
|
||||
|
||||
<subsection name="Navigating Structures">
|
||||
|
||||
<p>
|
||||
Most HAPI structures provide getters that automatically create
|
||||
child objects on access. This means it is simple to navigate
|
||||
complex structures without needing to worry about instantiating
|
||||
child objects.
|
||||
</p>
|
||||
|
||||
<macro name="snippet">
|
||||
<param name="id" value="nonNull" />
|
||||
<param name="file" value="examples/src/main/java/example/FhirDataModel.java" />
|
||||
</macro>
|
||||
|
||||
</subsection>
|
||||
|
||||
<subsection name="Coded/Enumerated Values">
|
||||
|
||||
<p>
|
||||
There are many places in the FHIR specification where a "coded" string is
|
||||
used. This means that a code must be chosen from a list of allowable values.
|
||||
</p>
|
||||
|
||||
<h4>Closed Valuesets / Codes</h4>
|
||||
|
||||
<p>
|
||||
The FHIR specification defines a number of "closed" ValueSets, such as
|
||||
the one used for
|
||||
<!-- TODO: replace the link below with a non GitHub link once DSTU2 is balloted -->
|
||||
<a href="http://hl7-fhir.github.io/administrative-gender.html">Patient.gender</a>
|
||||
(<i>note that this field was not a closed ValueSet in DSTU1 but is as of DSTU2</i>).
|
||||
These valuesets must either be empty, or be populated with a value drawn from
|
||||
the list of allowable values defined by FHIR. HAPI provides special typesafe
|
||||
Enums to help in dealing with these fields.
|
||||
</p>
|
||||
|
||||
<macro name="snippet">
|
||||
<param name="id" value="codes" />
|
||||
<param name="file" value="examples/src/main/java/example/FhirDataModel.java" />
|
||||
</macro>
|
||||
|
||||
<h4>Open Valusets / CodeableConcepts</h4>
|
||||
|
||||
<p>
|
||||
The FHIR specification also defines a number of "open" ValueSets, such as
|
||||
the one used for
|
||||
<a href="http://hl7.org/implement/standards/fhir/valueset-marital-status.html">Patient.maritalStatus</a>.
|
||||
These fields may define a set of allowable codes, but also allow you to
|
||||
use your own codes instead if none of the given codes matches your needs. This
|
||||
is called an <a href="http://hl7.org/implement/standards/fhir/terminologies.html#incomplete">incomplete binding</a>.
|
||||
Some fields may even define a set of codes that serve as nothing more than
|
||||
an example as to the type of codes that would be used there. This is known as
|
||||
an <a href="http://hl7.org/implement/standards/fhir/terminologies.html#example">example binding</a>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
For these fields, a CodeableConcept datatype is generally used by the
|
||||
FHIR specification. This datatype allows multiple "codings", which
|
||||
are a code and codesystem pair, optionally with a display name as well.
|
||||
The following example shows how to interact with this type.
|
||||
</p>
|
||||
|
||||
<macro name="snippet">
|
||||
<param name="id" value="codeableConcepts" />
|
||||
<param name="file" value="examples/src/main/java/example/FhirDataModel.java" />
|
||||
</macro>
|
||||
|
||||
<p>
|
||||
HAPI also provides typesafe enums to help in working with CodeableConcept
|
||||
fields.
|
||||
</p>
|
||||
|
||||
<macro name="snippet">
|
||||
<param name="id" value="codeableConceptEnums" />
|
||||
<param name="file" value="examples/src/main/java/example/FhirDataModel.java" />
|
||||
</macro>
|
||||
|
||||
</subsection>
|
||||
|
||||
<subsection name="Convenience Methods">
|
||||
|
||||
<p>
|
||||
The FHIR data model is rich enough to meet common use cases, but sometimes
|
||||
that richness adds complexity. For example, a Patient may have multiple names
|
||||
(a preferred name, a nickname, etc.) and each of those names may have multiple
|
||||
last names, multiple prefixes, etc.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
The example below shows populating a name entry for a Patient. Note the
|
||||
use of the StringDt type, which encapsulates a regular String, but allows for
|
||||
extensions to be added.
|
||||
</p>
|
||||
|
||||
<macro name="snippet">
|
||||
<param name="id" value="namesHard" />
|
||||
<param name="file" value="examples/src/main/java/example/FhirDataModel.java" />
|
||||
</macro>
|
||||
|
||||
<p>
|
||||
HAPI also provides for simple setters that use Java primitive types
|
||||
and can be chained, leading to much simpler code.
|
||||
</p>
|
||||
|
||||
<macro name="snippet">
|
||||
<param name="id" value="namesEasy" />
|
||||
<param name="file" value="examples/src/main/java/example/FhirDataModel.java" />
|
||||
</macro>
|
||||
|
||||
</subsection>
|
||||
|
||||
</section>
|
||||
|
||||
<section name="Examples">
|
||||
|
||||
<subsection name="Populating an Observation Resource">
|
||||
|
||||
<p>
|
||||
The following example shows how to create an observation resource containing
|
||||
a numeric datatype.
|
||||
</p>
|
||||
<macro name="snippet">
|
||||
<param name="id" value="observation" />
|
||||
<param name="file" value="examples/src/main/java/example/FhirDataModel.java" />
|
||||
</macro>
|
||||
|
||||
</subsection>
|
||||
|
||||
</section>
|
||||
|
||||
</body>
|
||||
|
||||
</document>
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<document xmlns="http://maven.apache.org/XDOC/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
|
||||
|
||||
<!-- This has been migrated -->
|
||||
|
||||
<properties>
|
||||
<title>Interceptors</title>
|
||||
<author email="jamesagnew@gmail.com">James Agnew</author>
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
<document xmlns="http://maven.apache.org/XDOC/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
|
||||
|
||||
<!-- Content has been migrated -->
|
||||
|
||||
<properties>
|
||||
<title>JPA Server</title>
|
||||
<author email="jamesagnew@users.sourceforge.net">James Agnew</author>
|
||||
|
|
|
@ -1,154 +1,156 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<document xmlns="http://maven.apache.org/XDOC/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
|
||||
|
||||
<properties>
|
||||
<title>Logging</title>
|
||||
<author email="jamesagnew@users.sourceforge.net">James Agnew</author>
|
||||
</properties>
|
||||
|
||||
<body>
|
||||
|
||||
<section name="Logging">
|
||||
|
||||
<p>
|
||||
Java has an abundance of logging frameworks, none of which are perfect. Many libraries
|
||||
depend on one or more of these frameworks but also have dependencies who depend on a
|
||||
different one. These dependencies can cause conflicts and be very irritating to solve.
|
||||
</p>
|
||||
|
||||
<subsection name="Quick Start: Using Logback">
|
||||
|
||||
<p>
|
||||
If you don't want to spend much time worrying about logging, it's probably
|
||||
easiest to just include the <a href="http://logback.qos.ch/">Logback</a>
|
||||
JAR along with your application.
|
||||
</p>
|
||||
<p>
|
||||
Logback is a powerful and flexible framework. To configure it, simply
|
||||
include a "logback.xml" file on your classpath. The following contents
|
||||
may be placed in this file to simply log at a suitable level
|
||||
to the console:
|
||||
</p>
|
||||
<source><![CDATA[<configuration scan="true" scanPeriod="30 seconds">
|
||||
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
<encoder>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} [%file:%line] %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT" />
|
||||
</root>
|
||||
|
||||
</configuration>]]></source>
|
||||
|
||||
<p>
|
||||
For more detail on how logging can be configured, see the
|
||||
following section.
|
||||
</p>
|
||||
|
||||
</subsection>
|
||||
|
||||
<subsection name="Configuring HAPI's Logging - SLF4j">
|
||||
|
||||
<img src="svg/hapi-fhir-logging.svg" width="723" height="273" alt="Logging arch diagram" align="right"/>
|
||||
|
||||
<p>
|
||||
HAPI uses
|
||||
<a href="http://www.slf4j.org/">SLF4j</a>
|
||||
for all internal logging. SLF4j is a "logging facade" framework, meaning
|
||||
that it doesn't actually handle log output (i.e. it isn't actually writing log lines
|
||||
to disk) but rather it is able to delegate that task to any of a number of
|
||||
underlying frameworks (e.g. log4j, logback, JDK logging, etc.)
|
||||
</p>
|
||||
|
||||
<p>
|
||||
This means that in order to successfully log anything, you will need to
|
||||
add two (or three) dependency JARs to your application:
|
||||
</p>
|
||||
<ul>
|
||||
<li><b>slf4j-api-vXX.jar</b>: This is the SLF4j API and is neccesary for HAPI to function</li>
|
||||
<li>
|
||||
An actual logging implementation, as well as its SLF4j binding. For example:
|
||||
<ul>
|
||||
<li>
|
||||
The recommended logging framework to use is Logback. Logback is absolutely
|
||||
not neccesary for HAPI to function correctly, but it has a number of nice features
|
||||
and is a good default choice. To use logback, you would include
|
||||
<code><b>logback-vXX.jar</b></code>.
|
||||
</li>
|
||||
<li>
|
||||
If you wanted to use log4j you would include <code><b>log4j-vXX.jar</b></code>
|
||||
as well as <code><b>slf4j-log4j-vXX.jar</b></code>. Log4j is a mature
|
||||
framework that is very widely used.
|
||||
</li>
|
||||
<li>
|
||||
If you wanted to use JDK logging (aka java.util.Logging) you would include
|
||||
<code><b>slf4j-jdk14-vXX.jar</b></code>. JDK logging is included with
|
||||
Java but is not particularly full featured compared to many other frameworks.
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
</subsection>
|
||||
|
||||
<subsection name="Commons-Logging">
|
||||
|
||||
<img src="svg/hapi-fhir-logging-complete.svg" width="614" height="153" alt="Logging arch diagram" align="right"/>
|
||||
|
||||
<p>
|
||||
Note that HAPI's client uses Apache HttpComponents Client internally, and that
|
||||
library uses Apache Commons Logging as a logging facade. The recommended approach to
|
||||
using HAPI is to not include any commons-logging JAR in your application, but rather to
|
||||
include a copy of jcl-over-slf4j-vXX.jar. This JAR will simulate commons-logging,
|
||||
but will redirect its logging statements to the same target as SLF4j has been
|
||||
configured to.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
The diagram at the right shows the chain of command for logging under this scheme.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Note that some popular libraries (e.g. Spring Framework) also use commons-logging
|
||||
for logging. As such they may include a commons-logging JAR automatically as
|
||||
a transitive dependency in Maven. If you are using jcl-over-slf4j and it isn't
|
||||
working correctly, it is often worth checking the list of JARs included in your
|
||||
application to see whether commons-logging has also been added. It can then be specifically
|
||||
excluded in Maven.
|
||||
</p>
|
||||
|
||||
<br clear="all"/>
|
||||
|
||||
</subsection>
|
||||
|
||||
</section>
|
||||
|
||||
<section name="Client Payload Logging">
|
||||
|
||||
<p>
|
||||
To enable detailed logging of client requests and responses (what URL is being requested, what headers and payload
|
||||
are being received, etc.), an interceptor may be added to the client which logs each transaction. See
|
||||
<a href="./doc_rest_client_interceptor.html#req_resp_logging">Logging Requests and Responses</a> for more information.
|
||||
</p>
|
||||
|
||||
</section>
|
||||
|
||||
<section name="Server Request Logging">
|
||||
|
||||
<p>
|
||||
To enable detailed logging of server requests and responses,
|
||||
an interceptor may be added to the server which logs each transaction. See
|
||||
<a href="./doc_rest_server_interceptor.html#Logging">Logging Server Requests</a> for more information.
|
||||
</p>
|
||||
|
||||
</section>
|
||||
|
||||
</body>
|
||||
|
||||
</document>
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<document xmlns="http://maven.apache.org/XDOC/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
|
||||
|
||||
<!-- Contents have been migrated -->
|
||||
|
||||
<properties>
|
||||
<title>Logging</title>
|
||||
<author email="jamesagnew@users.sourceforge.net">James Agnew</author>
|
||||
</properties>
|
||||
|
||||
<body>
|
||||
|
||||
<section name="Logging">
|
||||
|
||||
<p>
|
||||
Java has an abundance of logging frameworks, none of which are perfect. Many libraries
|
||||
depend on one or more of these frameworks but also have dependencies who depend on a
|
||||
different one. These dependencies can cause conflicts and be very irritating to solve.
|
||||
</p>
|
||||
|
||||
<subsection name="Quick Start: Using Logback">
|
||||
|
||||
<p>
|
||||
If you don't want to spend much time worrying about logging, it's probably
|
||||
easiest to just include the <a href="http://logback.qos.ch/">Logback</a>
|
||||
JAR along with your application.
|
||||
</p>
|
||||
<p>
|
||||
Logback is a powerful and flexible framework. To configure it, simply
|
||||
include a "logback.xml" file on your classpath. The following contents
|
||||
may be placed in this file to simply log at a suitable level
|
||||
to the console:
|
||||
</p>
|
||||
<source><![CDATA[<configuration scan="true" scanPeriod="30 seconds">
|
||||
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
<encoder>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} [%file:%line] %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT" />
|
||||
</root>
|
||||
|
||||
</configuration>]]></source>
|
||||
|
||||
<p>
|
||||
For more detail on how logging can be configured, see the
|
||||
following section.
|
||||
</p>
|
||||
|
||||
</subsection>
|
||||
|
||||
<subsection name="Configuring HAPI's Logging - SLF4j">
|
||||
|
||||
<img src="svg/hapi-fhir-logging.svg" width="723" height="273" alt="Logging arch diagram" align="right"/>
|
||||
|
||||
<p>
|
||||
HAPI uses
|
||||
<a href="http://www.slf4j.org/">SLF4j</a>
|
||||
for all internal logging. SLF4j is a "logging facade" framework, meaning
|
||||
that it doesn't actually handle log output (i.e. it isn't actually writing log lines
|
||||
to disk) but rather it is able to delegate that task to any of a number of
|
||||
underlying frameworks (e.g. log4j, logback, JDK logging, etc.)
|
||||
</p>
|
||||
|
||||
<p>
|
||||
This means that in order to successfully log anything, you will need to
|
||||
add two (or three) dependency JARs to your application:
|
||||
</p>
|
||||
<ul>
|
||||
<li><b>slf4j-api-vXX.jar</b>: This is the SLF4j API and is neccesary for HAPI to function</li>
|
||||
<li>
|
||||
An actual logging implementation, as well as its SLF4j binding. For example:
|
||||
<ul>
|
||||
<li>
|
||||
The recommended logging framework to use is Logback. Logback is absolutely
|
||||
not neccesary for HAPI to function correctly, but it has a number of nice features
|
||||
and is a good default choice. To use logback, you would include
|
||||
<code><b>logback-vXX.jar</b></code>.
|
||||
</li>
|
||||
<li>
|
||||
If you wanted to use log4j you would include <code><b>log4j-vXX.jar</b></code>
|
||||
as well as <code><b>slf4j-log4j-vXX.jar</b></code>. Log4j is a mature
|
||||
framework that is very widely used.
|
||||
</li>
|
||||
<li>
|
||||
If you wanted to use JDK logging (aka java.util.Logging) you would include
|
||||
<code><b>slf4j-jdk14-vXX.jar</b></code>. JDK logging is included with
|
||||
Java but is not particularly full featured compared to many other frameworks.
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
</subsection>
|
||||
|
||||
<subsection name="Commons-Logging">
|
||||
|
||||
<img src="svg/hapi-fhir-logging-complete.svg" width="614" height="153" alt="Logging arch diagram" align="right"/>
|
||||
|
||||
<p>
|
||||
Note that HAPI's client uses Apache HttpComponents Client internally, and that
|
||||
library uses Apache Commons Logging as a logging facade. The recommended approach to
|
||||
using HAPI is to not include any commons-logging JAR in your application, but rather to
|
||||
include a copy of jcl-over-slf4j-vXX.jar. This JAR will simulate commons-logging,
|
||||
but will redirect its logging statements to the same target as SLF4j has been
|
||||
configured to.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
The diagram at the right shows the chain of command for logging under this scheme.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Note that some popular libraries (e.g. Spring Framework) also use commons-logging
|
||||
for logging. As such they may include a commons-logging JAR automatically as
|
||||
a transitive dependency in Maven. If you are using jcl-over-slf4j and it isn't
|
||||
working correctly, it is often worth checking the list of JARs included in your
|
||||
application to see whether commons-logging has also been added. It can then be specifically
|
||||
excluded in Maven.
|
||||
</p>
|
||||
|
||||
<br clear="all"/>
|
||||
|
||||
</subsection>
|
||||
|
||||
</section>
|
||||
|
||||
<section name="Client Payload Logging">
|
||||
|
||||
<p>
|
||||
To enable detailed logging of client requests and responses (what URL is being requested, what headers and payload
|
||||
are being received, etc.), an interceptor may be added to the client which logs each transaction. See
|
||||
<a href="./doc_rest_client_interceptor.html#req_resp_logging">Logging Requests and Responses</a> for more information.
|
||||
</p>
|
||||
|
||||
</section>
|
||||
|
||||
<section name="Server Request Logging">
|
||||
|
||||
<p>
|
||||
To enable detailed logging of server requests and responses,
|
||||
an interceptor may be added to the server which logs each transaction. See
|
||||
<a href="./doc_rest_server_interceptor.html#Logging">Logging Server Requests</a> for more information.
|
||||
</p>
|
||||
|
||||
</section>
|
||||
|
||||
</body>
|
||||
|
||||
</document>
|
||||
|
|
Loading…
Reference in New Issue