Merge branch 'master' into HHH-12866-support-for-update-of-in-oracle
This commit is contained in:
commit
ea35641033
|
@ -0,0 +1,7 @@
|
|||
# Reclaim disk space, otherwise we only have 13 GB free at the start of a job
|
||||
|
||||
docker rmi node:10 node:12 mcr.microsoft.com/azure-pipelines/node8-typescript:latest
|
||||
# That is 18 GB
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
# That is 1.2 GB
|
||||
sudo rm -rf /usr/share/swift
|
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
jira:
|
||||
projectKey: "HHH"
|
|
@ -0,0 +1,120 @@
|
|||
# The main CI of Hibernate ORM is https://ci.hibernate.org/job/hibernate-orm-6.0-h2-main/.
|
||||
# However, Hibernate ORM builds run on GitHub actions regularly
|
||||
# to check that it still works and can be used in GitHub forks.
|
||||
# See https://docs.github.com/en/free-pro-team@latest/actions
|
||||
# for more information about GitHub actions.
|
||||
|
||||
name: Hibernate ORM build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'wip/6.0'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'wip/6.0'
|
||||
jobs:
|
||||
build:
|
||||
name: Java 8
|
||||
runs-on: ubuntu-latest
|
||||
# We want to know the test results of all matrix entries
|
||||
continue-on-error: true
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# When GitHub Actions supports it: https://github.com/actions/toolkit/issues/399
|
||||
# We will use the experimental flag as indicator whether a failure should cause a workflow failure
|
||||
include:
|
||||
- rdbms: h2
|
||||
experimental: false
|
||||
- rdbms: derby
|
||||
experimental: true
|
||||
- rdbms: mariadb
|
||||
experimental: true
|
||||
- rdbms: postgresql
|
||||
experimental: true
|
||||
- rdbms: oracle
|
||||
experimental: true
|
||||
- rdbms: db2
|
||||
experimental: true
|
||||
- rdbms: mssql
|
||||
experimental: true
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Reclaim Disk Space
|
||||
run: .github/ci-prerequisites.sh
|
||||
- name: Set up Java 8
|
||||
uses: actions/setup-java@v1
|
||||
with:
|
||||
java-version: 1.8
|
||||
- name: Get year/month for cache key
|
||||
id: get-date
|
||||
run: |
|
||||
echo "::set-output name=yearmonth::$(/bin/date -u "+%Y-%m")"
|
||||
shell: bash
|
||||
- name: Cache Maven local repository
|
||||
uses: actions/cache@v2
|
||||
id: cache-maven
|
||||
with:
|
||||
path: |
|
||||
~/.m2/repository
|
||||
~/.gradle/caches/
|
||||
~/.gradle/wrapper/
|
||||
# refresh cache every month to avoid unlimited growth
|
||||
key: maven-localrepo-${{ steps.get-date.outputs.yearmonth }}
|
||||
- name: Run build script
|
||||
env:
|
||||
RDBMS: ${{ matrix.rdbms }}
|
||||
run: ./ci/build-github.sh
|
||||
shell: bash
|
||||
- name: Upload test reports (if Gradle failed)
|
||||
uses: actions/upload-artifact@v2
|
||||
if: failure()
|
||||
with:
|
||||
name: test-reports-java8-${{ matrix.rdbms }}
|
||||
path: |
|
||||
./**/target/reports/tests/
|
||||
./**/target/reports/checkstyle/
|
||||
- name: Omit produced artifacts from build cache
|
||||
run: ./ci/before-cache.sh
|
||||
build11:
|
||||
name: Java 11
|
||||
runs-on: ubuntu-latest
|
||||
# We want to know the test results of all matrix entries
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Java 11
|
||||
uses: actions/setup-java@v1
|
||||
with:
|
||||
java-version: 11
|
||||
- name: Get year/month for cache key
|
||||
id: get-date
|
||||
run: |
|
||||
echo "::set-output name=yearmonth::$(/bin/date -u "+%Y-%m")"
|
||||
shell: bash
|
||||
- name: Cache Maven local repository
|
||||
uses: actions/cache@v2
|
||||
id: cache-maven
|
||||
with:
|
||||
path: |
|
||||
~/.m2/repository
|
||||
~/.gradle/caches/
|
||||
~/.gradle/wrapper/
|
||||
# refresh cache every month to avoid unlimited growth
|
||||
key: maven-localrepo-${{ steps.get-date.outputs.yearmonth }}
|
||||
- name: Run build script
|
||||
run: ./ci/build-github.sh
|
||||
shell: bash
|
||||
- name: Upload test reports (if Gradle failed)
|
||||
uses: actions/upload-artifact@v2
|
||||
if: failure()
|
||||
with:
|
||||
name: test-reports-java11
|
||||
path: |
|
||||
./**/target/reports/tests/
|
||||
./**/target/reports/checkstyle/
|
||||
- name: Omit produced artifacts from build cache
|
||||
run: ./ci/before-cache.sh
|
|
@ -1,8 +1,9 @@
|
|||
# Typically *NIX text editors, by default, append '~' to files on saving to make backups
|
||||
*~
|
||||
|
||||
# Gradle work directory
|
||||
# Gradle work directory and caches
|
||||
.gradle
|
||||
.gradletasknamecache
|
||||
|
||||
# Build output directies
|
||||
/target
|
||||
|
|
59
.travis.yml
59
.travis.yml
|
@ -1,17 +1,56 @@
|
|||
dist: trusty
|
||||
language: java
|
||||
|
||||
jdk:
|
||||
- oraclejdk8
|
||||
install:
|
||||
- ./gradlew assemble
|
||||
script:
|
||||
- travis_wait 45 ./gradlew check
|
||||
- ./ci/build-travis.sh
|
||||
before_cache:
|
||||
- rm -f $HOME/.gradle/caches/modules-2/modules-2.lock
|
||||
- rm -fr $HOME/.gradle/caches/*/plugin-resolution/
|
||||
- rm -f $HOME/.gradle/caches/*/fileHashes/fileHashes.bin
|
||||
- rm -f $HOME/.gradle/caches/*/fileHashes/fileHashes.lock
|
||||
- ./ci/before-cache.sh
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.gradle/caches/
|
||||
- $HOME/.gradle/wrapper/
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
- env: RDBMS=h2
|
||||
jdk: oraclejdk8
|
||||
sudo: required
|
||||
# - env: RDBMS=derby
|
||||
# jdk: oraclejdk8
|
||||
# sudo: required
|
||||
# - env: RDBMS=mariadb
|
||||
# jdk: oraclejdk8
|
||||
# sudo: true
|
||||
# services:
|
||||
# - docker
|
||||
# - env: RDBMS=postgresql
|
||||
# jdk: oraclejdk8
|
||||
# sudo: true
|
||||
# services:
|
||||
# - docker
|
||||
# - env: RDBMS=oracle
|
||||
# jdk: oraclejdk8
|
||||
# sudo: true
|
||||
# services:
|
||||
# - docker
|
||||
# - env: RDBMS=db2
|
||||
# jdk: oraclejdk8
|
||||
# sudo: true
|
||||
# services:
|
||||
# - docker
|
||||
# - env: RDBMS=mssql
|
||||
# jdk: oraclejdk8
|
||||
# sudo: true
|
||||
# services:
|
||||
# - docker
|
||||
- env: JDK=11
|
||||
install:
|
||||
- curl -L -o install-jdk.sh https://github.com/sormuras/bach/raw/master/install-jdk.sh
|
||||
- source ./install-jdk.sh --target ./openjdk11 --url https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk-11.0.9%2B11.1/OpenJDK11U-jdk_x64_linux_hotspot_11.0.9_11.tar.gz
|
||||
allow_failures:
|
||||
# - env: RDBMS=derby
|
||||
# - env: RDBMS=mariadb
|
||||
# - env: RDBMS=postgresql
|
||||
# - env: RDBMS=oracle
|
||||
# - env: RDBMS=db2
|
||||
# - env: RDBMS=mssql
|
||||
- env: JDK=11
|
|
@ -21,19 +21,18 @@ While we try to keep requirements for contributing to a minimum, there are a few
|
|||
we ask that you mind.
|
||||
|
||||
For code contributions, these guidelines include:
|
||||
* respect the project code style - find templates for [Eclipse](https://community.jboss.org/docs/DOC-16649)
|
||||
and [IntelliJ IDEA](https://community.jboss.org/docs/DOC-15468)
|
||||
* respect the project code style - find templates for [IntelliJ IDEA](https://community.jboss.org/docs/DOC-15468) or [Eclipse](https://community.jboss.org/docs/DOC-16649)
|
||||
* have a corresponding JIRA issue and the key for this JIRA issue should be used in the commit message
|
||||
* have a set of appropriate tests. For bug reports, the tests reproduce the initial reported bug
|
||||
and illustrates that the solution actually fixes the bug. For features/enhancements, the
|
||||
tests illustrate the feature working as intended. In both cases the tests are incorporated into
|
||||
the project to protect against regressions.
|
||||
the project to protect against regressions
|
||||
* if applicable, documentation is updated to reflect the introduced changes
|
||||
* the code compiles and the tests pass (`./gradlew clean build`)
|
||||
|
||||
For documentation contributions, mainly just respect the project code style, especially in regards
|
||||
to use of tabs - as mentioned above, code style templates are available for both Eclipse and IntelliJ
|
||||
IDEA IDEs. Ideally these contributions would also have a corresponding JIRA issue, although this
|
||||
to use of tabs - as mentioned above, code style templates are available for both IntelliJ IDEA and Eclipse
|
||||
IDEs. Ideally these contributions would also have a corresponding JIRA issue, although this
|
||||
is less necessary for documentation contributions.
|
||||
|
||||
|
||||
|
@ -42,12 +41,12 @@ is less necessary for documentation contributions.
|
|||
If you are just getting started with Git, GitHub and/or contributing to Hibernate via
|
||||
GitHub there are a few pre-requisite steps to follow:
|
||||
|
||||
* Make sure you have a [Hibernate JIRA account](https://hibernate.atlassian.net)
|
||||
* Make sure you have a [GitHub account](https://github.com/signup/free)
|
||||
* [Fork](https://help.github.com/articles/fork-a-repo) the Hibernate repository. As discussed in
|
||||
* make sure you have a [Hibernate JIRA account](https://hibernate.atlassian.net)
|
||||
* make sure you have a [GitHub account](https://github.com/signup/free)
|
||||
* [fork](https://help.github.com/articles/fork-a-repo) the Hibernate repository. As discussed in
|
||||
the linked page, this also includes:
|
||||
* [Set](https://help.github.com/articles/set-up-git) up your local git install
|
||||
* Clone your fork
|
||||
* [set up your local git install](https://help.github.com/articles/set-up-git)
|
||||
* clone your fork
|
||||
* See the wiki pages for setting up your IDE, whether you use
|
||||
[IntelliJ IDEA](https://community.jboss.org/wiki/ContributingToHibernateUsingIntelliJ)
|
||||
or [Eclipse](https://community.jboss.org/wiki/ContributingToHibernateUsingEclipse)<sup>(1)</sup>.
|
||||
|
@ -59,7 +58,7 @@ Create a [topic branch](http://git-scm.com/book/en/Git-Branching-Branching-Workf
|
|||
on which you will work. The convention is to incorporate the JIRA issue key in the name of this branch,
|
||||
although this is more of a mnemonic strategy than a hard-and-fast rule - but doing so helps:
|
||||
* remember what each branch is for
|
||||
* isolate the work from other contributions you may be working on.
|
||||
* isolate the work from other contributions you may be working on
|
||||
|
||||
_If there is not already a JIRA issue covering the work you want to do, create one._
|
||||
|
||||
|
@ -69,17 +68,17 @@ on the JIRA HHH-123 : `git checkout -b HHH-123 master`
|
|||
|
||||
## Code
|
||||
|
||||
Do yo thing!
|
||||
Do your thing!
|
||||
|
||||
|
||||
## Commit
|
||||
|
||||
* Make commits of logical units.
|
||||
* Be sure to use the JIRA issue key in the commit message. This is how JIRA will pick
|
||||
up the related commits and display them on the JIRA issue.
|
||||
* Make sure you have added the necessary tests for your changes.
|
||||
* Run _all_ the tests to assure nothing else was accidentally broken.
|
||||
* Make sure your source does not violate the checkstyles.
|
||||
* make commits of logical units
|
||||
* be sure to **use the JIRA issue key** in the commit message. This is how JIRA will pick
|
||||
up the related commits and display them on the JIRA issue
|
||||
* make sure you have added the necessary tests for your changes
|
||||
* run _all_ the tests to assure nothing else was accidentally broken
|
||||
* make sure your source does not violate the _checkstyles_
|
||||
|
||||
_Prior to committing, if you want to pull in the latest upstream changes (highly
|
||||
appreciated btw), please use rebasing rather than merging. Merging creates
|
||||
|
@ -87,10 +86,9 @@ appreciated btw), please use rebasing rather than merging. Merging creates
|
|||
|
||||
## Submit
|
||||
|
||||
* Push your changes to the topic branch in your fork of the repository.
|
||||
* Initiate a [pull request](http://help.github.com/articles/creating-a-pull-request)
|
||||
* Update the JIRA issue, adding a comment including a link to the created pull request
|
||||
_if the JIRA key was not used in the commit message_.
|
||||
* push your changes to the topic branch in your fork of the repository
|
||||
* initiate a [pull request](http://help.github.com/articles/creating-a-pull-request)
|
||||
* update the JIRA issue by providing the PR link in the **Pull Request** column on the right
|
||||
|
||||
|
||||
It is important that this topic branch on your fork:
|
||||
|
@ -98,9 +96,9 @@ It is important that this topic branch on your fork:
|
|||
* be isolated to just the work on this one JIRA issue, or multiple issues if they are
|
||||
related and also fixed/implemented by this work. The main point is to not push
|
||||
commits for more than one PR to a single branch - GitHub PRs are linked to
|
||||
a branch rather than specific commits.
|
||||
a branch rather than specific commits
|
||||
* remain until the PR is closed. Once the underlying branch is deleted the corresponding
|
||||
PR will be closed, if not already, and the changes will be lost.
|
||||
PR will be closed, if not already, and the changes will be lost
|
||||
|
||||
# Notes
|
||||
<sup>(1)</sup> Gradle `eclipse` plugin is no longer supported, so the recommended way to import the project in your IDE is with the proper IDE tools/plugins. Don't try to run `./gradlew clean eclipse --refresh-dependencies` from the command line as you'll get an error because `eclipse` no longer exists
|
||||
|
|
18
README.md
18
README.md
|
@ -2,14 +2,14 @@
|
|||
|
||||
|
||||
Hibernate ORM is a library providing Object/Relational Mapping (ORM) support
|
||||
to applications, libraries and frameworks.
|
||||
to applications, libraries, and frameworks.
|
||||
|
||||
It also provides an implementation of the JPA specification, which is the standard Java specification for ORM.
|
||||
|
||||
This is the repository of its source code: see [Hibernate.org](http://hibernate.org/orm/) for additional information.
|
||||
|
||||
[![Build Status](http://ci.hibernate.org/job/hibernate-orm-master-h2-main/badge/icon)](http://ci.hibernate.org/job/hibernate-orm-master-h2-main/)
|
||||
|
||||
[![Language grade: Java](https://img.shields.io/lgtm/grade/java/g/hibernate/hibernate-orm.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/hibernate/hibernate-orm/context:java)
|
||||
|
||||
Building from sources
|
||||
=========
|
||||
|
@ -23,8 +23,8 @@ Gradle.
|
|||
|
||||
Contributors should read the [Contributing Guide](CONTRIBUTING.md).
|
||||
|
||||
See the guides for setting up [IntelliJ](https://developer.jboss.org/wiki/ContributingToHibernateUsingIntelliJ) or
|
||||
[Eclipse](https://developer.jboss.org/wiki/ContributingToHibernateUsingEclipse) as your development environment.
|
||||
See the guides for setting up [IntelliJ](http://hibernate.org/community/contribute/intellij-idea/) or
|
||||
[Eclipse](https://hibernate.org/community/contribute/eclipse-ide/) as your development environment.
|
||||
|
||||
Check out the _Getting Started_ section in CONTRIBUTING.md for getting started working on Hibernate source.
|
||||
|
||||
|
@ -46,7 +46,7 @@ particular that are indispensable:
|
|||
|
||||
* [Gradle User Guide](https://docs.gradle.org/current/userguide/userguide_single.html) is a typical user guide in that
|
||||
it follows a topical approach to describing all of the capabilities of Gradle.
|
||||
* [Gradle DSL Guide](https://docs.gradle.org/current/dsl/index.html) is quite unique and excellent in quickly
|
||||
* [Gradle DSL Guide](https://docs.gradle.org/current/dsl/index.html) is unique and excellent in quickly
|
||||
getting up to speed on certain aspects of Gradle.
|
||||
|
||||
|
||||
|
@ -54,7 +54,7 @@ Using the Gradle Wrapper
|
|||
------------------------
|
||||
|
||||
For contributors who do not otherwise use Gradle and do not want to install it, Gradle offers a very cool
|
||||
features called the wrapper. It lets you run Gradle builds without a previously installed Gradle distro in
|
||||
feature called the wrapper. It lets you run Gradle builds without a previously installed Gradle distro in
|
||||
a zero-conf manner. Hibernate configures the Gradle wrapper for you. If you would rather use the wrapper and
|
||||
not install Gradle (or to make sure you use the version of Gradle intended for older builds) you would just use
|
||||
the command `gradlew` (or `gradlew.bat`) rather than `gradle` (or `gradle.bat`) in the following discussions.
|
||||
|
@ -80,7 +80,7 @@ sub-project and execute that task if the sub-project defines it. To execute a t
|
|||
either:
|
||||
|
||||
1. `cd` into that module directory and execute the task
|
||||
2. name the "task path". For example, in order to run the tests for the _hibernate-core_ module from the root directory you could say `gradle hibernate-core:test`
|
||||
2. name the "task path". For example, to run the tests for the _hibernate-core_ module from the root directory you could say `gradle hibernate-core:test`
|
||||
|
||||
Common Java related tasks
|
||||
------------------------
|
||||
|
@ -115,7 +115,7 @@ Coming soon...
|
|||
Using "profiles"
|
||||
------------------------
|
||||
|
||||
The Hibernate build defines a number of database testing "profiles" in `databases.gradle`. These
|
||||
The Hibernate build defines several database testing "profiles" in `databases.gradle`. These
|
||||
profiles can be activated by name using the `db` build property which can be passed either as
|
||||
a JVM system prop (`-D`) or as a Gradle project property (`-P`). Examples below use the Gradle
|
||||
project property approach.
|
||||
|
@ -127,7 +127,7 @@ Use the following command:
|
|||
|
||||
gradle clean compile -Pdb=pgsql
|
||||
|
||||
_*NOTE : If you are running tests against a JDBC driver that is not available via Maven central (generally due to license nonsense - Oracle, DB2, etc) be sure to add these drivers to your local Maven repo cache (~/.m2/repository) or (better) add it to a personal Maven repo server*_
|
||||
_*NOTE: If you are running tests against a JDBC driver that is not available via Maven central be sure to add these drivers to your local Maven repo cache (~/.m2/repository) or (better) add it to a personal Maven repo server*_
|
||||
|
||||
Running database-specific tests from the IDE using "profiles"
|
||||
-------------------------------------------------------------
|
||||
|
|
64
build.gradle
64
build.gradle
|
@ -1,5 +1,3 @@
|
|||
import org.apache.tools.ant.filters.ReplaceTokens
|
||||
|
||||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
|
@ -11,34 +9,33 @@ buildscript {
|
|||
repositories {
|
||||
jcenter()
|
||||
mavenCentral()
|
||||
maven {
|
||||
name "jboss-snapshots"
|
||||
url "http://snapshots.jboss.org/maven2/"
|
||||
}
|
||||
}
|
||||
dependencies {
|
||||
classpath 'org.hibernate.build.gradle:gradle-maven-publish-auth:2.0.1'
|
||||
classpath 'org.hibernate.build.gradle:hibernate-matrix-testing:2.0.0.Final'
|
||||
classpath 'org.hibernate.build.gradle:hibernate-matrix-testing:3.0.0.Final'
|
||||
classpath 'org.hibernate.build.gradle:version-injection-plugin:1.0.0'
|
||||
classpath 'org.hibernate.build.gradle:gradle-xjc-plugin:1.0.2.Final'
|
||||
classpath 'gradle.plugin.com.github.lburgazzoli:gradle-karaf-plugin:0.1.1'
|
||||
classpath 'gradle.plugin.com.github.lburgazzoli:gradle-karaf-plugin:0.5.1'
|
||||
classpath 'org.asciidoctor:asciidoctor-gradle-plugin:1.5.7'
|
||||
classpath 'com.jfrog.bintray.gradle:gradle-bintray-plugin:1.8.3'
|
||||
classpath 'de.thetaphi:forbiddenapis:2.5'
|
||||
classpath 'com.jfrog.bintray.gradle:gradle-bintray-plugin:1.8.4'
|
||||
classpath 'de.thetaphi:forbiddenapis:3.0.1'
|
||||
}
|
||||
}
|
||||
|
||||
plugins {
|
||||
id 'com.gradle.build-scan' version '1.9'
|
||||
id 'me.champeau.buildscan-recipes' version '0.1.7'
|
||||
id 'me.champeau.buildscan-recipes' version '0.2.3'
|
||||
id 'org.hibernate.build.xjc' version '2.0.1' apply false
|
||||
id 'org.hibernate.build.maven-repo-auth' version '3.0.3' apply false
|
||||
id 'biz.aQute.bnd' version '5.1.1' apply false
|
||||
}
|
||||
|
||||
allprojects {
|
||||
repositories {
|
||||
mavenCentral()
|
||||
maven {
|
||||
name "jboss-snapshots"
|
||||
url "http://snapshots.jboss.org/maven2/"
|
||||
//Allow loading additional dependencies from a local path;
|
||||
//useful to load JDBC drivers which can not be distributed in public.
|
||||
if (System.env['ADDITIONAL_REPO'] != null) {
|
||||
flatDir {
|
||||
dirs "${System.env.ADDITIONAL_REPO}"
|
||||
}
|
||||
}
|
||||
}
|
||||
apply plugin: 'idea'
|
||||
|
@ -58,18 +55,21 @@ task release {
|
|||
"the fact that subprojects will appropriately define a release task " +
|
||||
"themselves if they have any release-related activities to perform"
|
||||
|
||||
// Force to release with JDK 8. Releasing with JDK 11 is not supported yet:
|
||||
// - the hibernate-orm-modules tests do not run due to an issue with the ASM version currently used by Gradle
|
||||
doFirst {
|
||||
if ( !JavaVersion.current().isJava8() ) {
|
||||
throw new IllegalStateException( "Please use JDK 8 to perform the release." )
|
||||
def javaVersionsInUse = [gradle.ext.javaVersions.main.compiler, gradle.ext.javaVersions.main.release,
|
||||
gradle.ext.javaVersions.test.compiler, gradle.ext.javaVersions.test.release,
|
||||
gradle.ext.javaVersions.test.launcher].toSet()
|
||||
// Force to release with JDK 8. It used to not work on JDK11 because of the hibernate-orm-modules module,
|
||||
// but this limitation might be resolved now that this module has been deleted?
|
||||
if ( javaVersionsInUse != [JavaLanguageVersion.of( 8 )].toSet() ) {
|
||||
throw new IllegalStateException( "Please use JDK 8 to perform the release. Currently using: ${javaVersionsInUse}" )
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task publish {
|
||||
description = "The task performed when we want to just publish maven artifacts. Relies on " +
|
||||
"the fact that subprojects will have a task named pubappropriately define a release task " +
|
||||
"the fact that subprojects will appropriately define a release task " +
|
||||
"themselves if they have any release-related activities to perform"
|
||||
}
|
||||
|
||||
|
@ -87,29 +87,25 @@ task ciBuild {
|
|||
|
||||
|
||||
wrapper {
|
||||
gradleVersion = '4.8.1'
|
||||
// To upgrade the version of gradle used in the wrapper, run:
|
||||
// ./gradlew wrapper --gradle-version NEW_VERSION
|
||||
distributionType = Wrapper.DistributionType.ALL
|
||||
}
|
||||
|
||||
|
||||
buildScan {
|
||||
licenseAgreementUrl = 'https://gradle.com/terms-of-service'
|
||||
licenseAgree = 'yes'
|
||||
termsOfServiceUrl = 'https://gradle.com/terms-of-service'
|
||||
termsOfServiceAgree = 'yes'
|
||||
}
|
||||
|
||||
buildScanRecipes {
|
||||
recipe 'git-commit', baseUrl: 'https://github.com/hibernate/hibernate-orm/tree'
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
//idea {
|
||||
// project {
|
||||
// jdkName = baselineJavaVersion
|
||||
// languageLevel = baselineJavaVersion
|
||||
// jdkName = gradle.ext.baselineJavaVersion
|
||||
// languageLevel = gradle.ext.baselineJavaVersion
|
||||
//
|
||||
// vcs = 'Git'
|
||||
// }
|
||||
|
|
|
@ -7,15 +7,6 @@
|
|||
repositories {
|
||||
mavenCentral()
|
||||
jcenter()
|
||||
|
||||
maven {
|
||||
name 'jboss-nexus'
|
||||
url "http://repository.jboss.org/nexus/content/groups/public/"
|
||||
}
|
||||
maven {
|
||||
name "jboss-snapshots"
|
||||
url "http://snapshots.jboss.org/maven2/"
|
||||
}
|
||||
}
|
||||
|
||||
apply plugin: "groovy"
|
||||
|
@ -27,5 +18,11 @@ dependencies {
|
|||
compile localGroovy()
|
||||
|
||||
compile 'org.hibernate.build.gradle:gradle-animalSniffer-plugin:1.0.1.Final'
|
||||
compile 'org.hibernate.build.gradle:hibernate-matrix-testing:2.0.0.Final'
|
||||
compile 'org.hibernate.build.gradle:hibernate-matrix-testing:3.0.0.Final'
|
||||
}
|
||||
|
||||
tasks.withType( GroovyCompile ) {
|
||||
options.encoding = 'UTF-8'
|
||||
sourceCompatibility = 8
|
||||
targetCompatibility = 8
|
||||
}
|
||||
|
|
820
changelog.txt
820
changelog.txt
|
@ -3,6 +3,825 @@ Hibernate 5 Changelog
|
|||
|
||||
Note: Please refer to JIRA to learn more about each issue.
|
||||
|
||||
Changes in 5.4.14.Final (April 6, 2020)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31836/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-13886] - ColumnDefinition broken for audit mappings
|
||||
* [HHH-13889] - Case Select in Criteria API does not bind literals using parameters
|
||||
* [HHH-13929] - ClassCastException on use of PersistenceUtilHelper when entities use Enhanced Proxies
|
||||
|
||||
** Task
|
||||
* [HHH-13685] - Upgrade to Gradle 5
|
||||
* [HHH-13689] - Replace uses of the deprecated osgi plugin with the biz.aQute.bnd plugin
|
||||
* [HHH-13925] - Upgrade to Gradle 6.3
|
||||
|
||||
** Improvement
|
||||
* [HHH-13930] - Improve ByteBuddyProxyHelper to allow defining proxy classes without actually loading the class
|
||||
* [HHH-13934] - GraalVM native-image metadata needs to register class metadata antlr.CommonToken
|
||||
* [HHH-13935] - Allow subclasses of StandardServiceRegistryBuilder to initialize a custom list of StandardServiceInitiator(s)
|
||||
|
||||
|
||||
Changes in 5.4.13.Final (March 26, 2020)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31829/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-13322] - Sequence increment is not correctly determined
|
||||
* [HHH-13619] - size() does not work properly as select expression
|
||||
* [HHH-13711] - H2 dialect not accurate for drop table since version 1.4.200
|
||||
* [HHH-13870] - Gradle plugin causes compile task to be always out of date
|
||||
* [HHH-13875] - Optional one-to-one does not always join the associated entity table when querying
|
||||
* [HHH-13876] - Fix an obvious bug in StandardStack implementation
|
||||
* [HHH-13891] - ProxyFactory should not be built if any ID or property getter/setter methods are final
|
||||
* [HHH-13910] - MySQL57Dialect selected by automatic dialect resolution when using MySQL 8.0 database
|
||||
|
||||
** New Feature
|
||||
* [HHH-13799] - JPA Criteria API support for Hibernate Spatial
|
||||
|
||||
** Task
|
||||
* [HHH-13874] - Deprecate relevant methods that are supposed to be removed in v6.0
|
||||
|
||||
** Improvement
|
||||
* [HHH-13103] - Allow Hibernate Types to get access to the current configuration properties using constructor injection
|
||||
* [HHH-13853] - Pass the merged Integration settings and Persistence Unit properties to buildBootstrapServiceRegistry
|
||||
* [HHH-13855] - Remove unnecessary declaration of JtaManager in HibernatePersistenceProviderAdaptor
|
||||
* [HHH-13872] - Make the Java Stream close the underlying ScrollableResultsIterator upon calling a terminal operation
|
||||
* [HHH-13873] - IdTableHelper can skip opening a connection when there's no statements to execute
|
||||
* [HHH-13878] - Increase the scope of some methods to make them accessible outside of Hibernate ORM
|
||||
* [HHH-13879] - Slow query log should use System#nanoTime not System#currentTimeMillis
|
||||
* [HHH-13897] - ResultSetProcessingContextImpl: no need to clear collections before discarding the reference to them
|
||||
|
||||
|
||||
Changes in 5.4.12.Final (February 13, 2020)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31827/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-13858] - Fix Oracle failing tests
|
||||
* [HHH-13859] - NPE on scanning for entities in a project having module-info.class resources
|
||||
|
||||
** New Feature
|
||||
* [HHH-13861] - Expose the doWork() and doReturningWork() APIs on StatelessSession as well
|
||||
* [HHH-13863] - Introduce a module to distribute some helpers useful to compile Hibernate ORM to GraalVM native images
|
||||
|
||||
** Improvement
|
||||
* [HHH-13864] - Cosmetic change of format when reporting version number
|
||||
|
||||
|
||||
Changes in 5.4.11.Final (February 07, 2020)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31818/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-6615] - int type in Revision number
|
||||
* [HHH-6686] - JPQL operator "is empty" failes for @ElementCollection
|
||||
* [HHH-10844] - Resolve columnDefinition to appropriate sql-type for audit mappings
|
||||
* [HHH-13373] - Hibernate report query hibernate_sequence table error in spring-boot application starting on a multi-database mariadb server
|
||||
* [HHH-13456] - ForeignGenerator Throws ClassCastException When Using StatelessSession
|
||||
* [HHH-13472] - Error creating hibernate_sequence in MariaDB 10.3
|
||||
* [HHH-13644] - NullPointerException when calling StoredProcedureQuery.getResultStream() instead of StoredProcedureQuery.getResultList()
|
||||
* [HHH-13677] - org.hibernate.flushMode property not applied
|
||||
* [HHH-13704] - Make sure javassist is really an optional dependency
|
||||
* [HHH-13752] - Delete doesn't work when many-to-many uses non-primary key for join table
|
||||
* [HHH-13759] - Bytecode enhancement fails for an embedded field in a MappedSuperclass
|
||||
* [HHH-13760] - Envers tries to use relationship's entity as value for column instead of numeric identifier (cast class exception happens) for LAZY @ManyToOne
|
||||
* [HHH-13770] - Envers - modified flag column value set to null from 5.4.7 onwards
|
||||
* [HHH-13780] - Allow NamedQuery to set hint QueryHints.PASS_DISTINCT_THROUGH
|
||||
* [HHH-13783] - org.hibernate.MappingException: The increment size of the sequence is set to [10] in the entity mapping while … size is [1]
|
||||
* [HHH-13792] - L2 entity cache is evicted prior to committing transaction for HQL/native updates
|
||||
* [HHH-13796] - Missing from clause in query from BinaryLogicOperatorNode row value constructor translation
|
||||
* [HHH-13804] - HibernateProxy might need to be instantiated even with build-time enhancement
|
||||
* [HHH-13806] - CoreMessageLogger#unableToLoadCommand is not printing the cause of the error
|
||||
* [HHH-13808] - Incorrect String format in log
|
||||
* [HHH-13831] - Replaced listener is not called when EventListenerGroup#fireEventOnEachListener is called
|
||||
|
||||
** Task
|
||||
* [HHH-13726] - Extract org.hibernate.internal.SessionFactoryImpl#prepareEventListeners from SessionFactoryImpl
|
||||
* [HHH-13767] - Remove mention of Oracle and DB2 not being in MC
|
||||
* [HHH-13821] - Update to Byte Buddy 1.10.7
|
||||
* [HHH-13822] - OSGi integration tests need to be able to download dependencies from Maven Central using HTTPS
|
||||
* [HHH-13823] - Various visibility changes to help prototyping of Hibernate RX
|
||||
* [HHH-13833] - Byte Buddy enhancer should use ASM7 opcodes to improve compatibility with code compiled for Java 11
|
||||
* [HHH-13837] - Initialize the Hibernate VERSION as a real constant
|
||||
* [HHH-13838] - Allow extension of PersistenceXmlParser
|
||||
* [HHH-13849] - Convert ProxyFactoryFactory and BytecodeProvider into a Service
|
||||
|
||||
** Improvement
|
||||
* [HHH-8776] - Ability for JPA entity-graphs to handle non-lazy attributes as lazy
|
||||
* [HHH-11958] - Apply QueryHints.HINT_READONLY to load operations
|
||||
* [HHH-12856] - Upgrade DB2400 dialect to use the DB2 for i improvements
|
||||
* [HHH-13390] - Upgrade JPA MetaModel Generator (jpamodelgen) to support Gradle Incremental Compile
|
||||
* [HHH-13800] - Correct some typos in the javadocs of hibernate-core module
|
||||
* [HHH-13802] - fix javadoc warnings in 'hibernate-core'
|
||||
* [HHH-13809] - Various improvements in the user guides
|
||||
* [HHH-13830] - Fixing typo on the build task description
|
||||
* [HHH-13832] - Optimise setting of default Flush Mode on a newly created Session
|
||||
* [HHH-13850] - Clear the BytecodeProvider caches both after SessionFactory creation and stop
|
||||
* [HHH-13851] - Rework initialization of ProxyFactoryFactory to move responsibility out of PojoEntityTuplizer
|
||||
* [HHH-13854] - Allow extensions of StandardServiceRegistryBuilder to ignore Environment variables
|
||||
|
||||
|
||||
Changes in 5.4.10.Final (December 05, 2019)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31811/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-9301] - Group by on alias doesn't replace alias
|
||||
* [HHH-12895] - Extra LEFT JOIN generated with @ManyToOne and @JoinTable when projecting on main entity id
|
||||
* [HHH-13355] - StaleStateException for updates to optional secondary table using saveOrUpdate
|
||||
* [HHH-13365] - Entities in joined subclass table are not inserted with batch size > 0 using sequence-identity ID generator
|
||||
* [HHH-13608] - Oracle8iDialect should use CASE_INSENSITIVE pattern matching when checking the statement type
|
||||
* [HHH-13722] - ArrayStoreException in Constraint.generateName
|
||||
* [HHH-13737] - Add debug logging and a test case for HHH-13433
|
||||
* [HHH-13742] - Missing from clause with joined inheritance property in association subquery
|
||||
* [HHH-13758] - Limit Handler for SQL server doesn't work with CTE queries with strings literals
|
||||
* [HHH-13764] - Annotations are ignored during enhancement if they are on the getter instead of the field
|
||||
|
||||
** Task
|
||||
* [HHH-13739] - Upgrade to Agroal 1.7
|
||||
* [HHH-13761] - Debug logging of JPA compliance settings didn't log the value of the settings
|
||||
* [HHH-13762] - Update vibur-dbcp dependency to 25.0
|
||||
|
||||
** Improvement
|
||||
* [HHH-8091] - Hibernate produces SQL - "in ()" - which is invalid in at least Oracle, MySQL and Postgres
|
||||
* [HHH-13755] - Update Hibernate Gradle Plugin example in the documentation
|
||||
|
||||
|
||||
Changes in 5.4.9.Final (November 14, 2019)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31806/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-12030] - Symbol$TypeVariableSymbol cannot be cast to TypeElement
|
||||
* [HHH-13307] - On release of batch it still contained JDBC statements using JTA
|
||||
* [HHH-13433] - EntityManager.find() should only check for roll-back-only condition if there is an active JTA transaction, otherwise ORM should throw convert( e, lockOptions )
|
||||
* [HHH-13614] - Allow the IntegratorProvider to be supplied via its FQN in the JPA persistence.xml
|
||||
* [HHH-13670] - Missing from clause in query with joined inheritance, regression in 5.4.5
|
||||
* [HHH-13687] - TenantSchemaResolver not called in integration test after upgrade from 5.4.4 to >=5.4.5
|
||||
* [HHH-13690] - Multi-tenancy supporting session factories can not be created
|
||||
* [HHH-13698] - Hibernate does not recognize MySQL 8 error code 3572 as PessimisticLockException
|
||||
* [HHH-13700] - Configuration property CONNECTION_PROVIDER_DISABLES_AUTOCOMMIT should not be passed to the JDBC connection properties
|
||||
* [HHH-13705] - Enhancement as Proxy with inline dirty checking - flush of an @ManyToOne with an Embedded value having not null properties causes PropertyValueException
|
||||
* [HHH-13710] - Wrong tenant-identifier in Envers temporary session
|
||||
* [HHH-13712] - inheritance - select count query is not working with inheritance
|
||||
* [HHH-13727] - h2 database with DATABASE_TO_UPPER=false throws org.h2.jdbc.JdbcSQLSyntaxErrorException: Table "sequences" not found
|
||||
|
||||
** Task
|
||||
* [HHH-13730] - Upgrade to Classmate 1.4.0
|
||||
* [HHH-13731] - Upgrade to Classmate 1.5.1
|
||||
* [HHH-13733] - Upgrade to Jandex 2.1.1.Final
|
||||
|
||||
** Improvement
|
||||
* [HHH-13654] - Avoid clearing of collections when closing StatefulPersistenceContext
|
||||
* [HHH-13723] - Hint sizing of ArrayList in ResultSetProcessingContextImpl
|
||||
|
||||
|
||||
Changes in 5.4.8.Final (October 28, 2019)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31804/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-12965] - Hibernate Envers Audit tables are created with foreign key with the entity. Because of this I am not able to delete any entries from the entity tables.
|
||||
* [HHH-13446] - java.lang.VerifyError from compile-time enhanced @Entity
|
||||
* [HHH-13651] - NPE on flushing when ElementCollection field contains null element
|
||||
* [HHH-13695] - DDL export forgets to close a Statement
|
||||
* [HHH-13696] - Multiple OSGi bundles initializing concurrently would overlap classloaders
|
||||
|
||||
** Improvement
|
||||
* [HHH-13686] - Upgrade to Agroal 1.6
|
||||
|
||||
|
||||
Changes in 5.4.7.Final (October 21, 2019)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31799/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-4235] - MapBinder.createFormulatedValue() does not honor DB schema name when creating query
|
||||
* [HHH-13633] - Bugs join-fetching a collection when scrolling with a stateless session using enhancement as proxy
|
||||
* [HHH-13634] - PersistenceContext can get cleared before load completes using StatelessSessionImpl
|
||||
* [HHH-13640] - Uninitialized HibernateProxy mapped as NO_PROXY gets initialized when reloaded with enhancement-as-proxy enabled
|
||||
* [HHH-13653] - Uninitialized entity does not get initialized when a setter is called with enhancement-as-proxy enabled
|
||||
* [HHH-13655] - Envers Map<Enum, Integer> causes NullPointerException when mapped with @MapKeyEnumerated since Hibernate 5.4.6
|
||||
* [HHH-13663] - Session#setHibernateFlushMode() method not callable without an active transaction
|
||||
* [HHH-13665] - Selecting an entity annotated with @Immutable but not with @Cachable causes a NPE when use_reference_entries is enabled
|
||||
* [HHH-13672] - The temporary PersistenceContext of a StatelessSession is not cleared after a refresh operation
|
||||
* [HHH-13675] - Optimize PersistentBag.groupByEqualityHash()
|
||||
|
||||
** New Feature
|
||||
* [HHH-10398] - _MOD columns not named correctly when using custom column names
|
||||
|
||||
** Task
|
||||
* [HHH-13680] - Upgrade to Byte Buddy 1.10.2
|
||||
* [HHH-13681] - Upgrade to Byteman 4.0.8
|
||||
|
||||
** Improvement
|
||||
* [HHH-12858] - integration overrides during JPA bootstrap ought to override all logically related settings
|
||||
* [HHH-13432] - Have EntityManagerFactory expose persistence.xml `jta-data-source` element as a `javax.persistence.nonJtaDataSource` property
|
||||
* [HHH-13660] - Reduce allocation costs of IdentityMaps used by ResultSetProcessingContextImpl
|
||||
* [HHH-13662] - Avoid initializing XmlMappingBinderAccess when no XML mappings are defined
|
||||
* [HHH-13666] - AssertionFailure: Exception releasing cache locks upon After/BeforeTransactionCompletionProcess failure
|
||||
* [HHH-13673] - Cryptic error when providing import.sql file without a terminal char at the end of each line
|
||||
|
||||
|
||||
Changes in 5.4.6.Final (September 30, 2019)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31794/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-11797] - Envers Map<Enum, Entity> not auditing correctly
|
||||
* [HHH-13493] - For a native query, the SessionImpl class does not call applyQuerySettingsAndHints
|
||||
* [HHH-13597] - Building DatabaseInformation fails on H2 without DATABASE_TO_UPPER
|
||||
* [HHH-13625] - After upgrading to 5.4.5, it's no longer possible to bootstrap Hibernate if the org.hibernate.cfg LOG is set to DEBUG
|
||||
* [HHH-13645] - StatsNamedContainer#getOrCompute throws NullPointerException when computed value is null
|
||||
|
||||
** Improvement
|
||||
* [HHH-13130] - Provide Gradle-based bytecode enhancement as a task separate from the compileJava task
|
||||
|
||||
|
||||
Changes in 5.4.5.Final (September 17, 2019)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31779/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-13259] - StackOverflowError from StringHelper
|
||||
* [HHH-13466] - ClassCastException when changing a Collection association to a Set if @PreUpdate listener exists
|
||||
* [HHH-13544] - Restore logged warning on jdbc code mapping issue in NationalizedTypeMappings
|
||||
* [HHH-13550] - Fix Oracle failure for test added by HHH-13424
|
||||
* [HHH-13551] - StrategyRegistrationProvider does not properly handle implementations from different classloader
|
||||
* [HHH-13554] - QueryAndSQLTest.testNativeQueryWithFormulaAttributeWithoutAlias() fails on Oracle, MSSQL, Sybase, DB2, MariaDB
|
||||
* [HHH-13555] - FetchGraphTest, MergeProxyTest and ProxyDeletionTest fail due to ConstraintViolationException
|
||||
* [HHH-13556] - Tests doing dynamic fetch scrolling a collection fail on DB2
|
||||
* [HHH-13557] - LocalTimeTest#writeThenNativeRead and OffsetTimeTest#writeThenNativeRead tests are failing on SQL Server
|
||||
* [HHH-13558] - InstantTest, LocalDateTimeTest, OffsetDateTimeTest, ZonedDateTimeTest failing on Sybase for year 1600
|
||||
* [HHH-13564] - Envers - Getting NPE while reading revisions of entity with @EmbeddedId composite key located in parent @MappedSuperclass
|
||||
* [HHH-13569] - org.hibernate.test.annotations.embedded.EmbeddedTest failures on Sybase
|
||||
* [HHH-13570] - Test failures due to Sybase not supporting UPDATE statement with WITH(NOWAIT)
|
||||
* [HHH-13571] - Test failures due to cross joined table out of scope of a subsequent JOIN on Sybase
|
||||
* [HHH-13573] - Test failure due to Sybase not supporting cascade delete on foreign key definitions
|
||||
* [HHH-13574] - SybaseASE does not support PARTITION BY
|
||||
* [HHH-13577] - LockTest.testContendedPessimisticLock and StatementIsClosedAfterALockExceptionTest.testStatementIsClosed tests fail on Sybase
|
||||
* [HHH-13580] - LocalTimeTest#writeThenNativeRead* and OffsetTimeTest#writeThenNativeRead* failing on MySQL
|
||||
* [HHH-13581] - LocalTimeTest#writeThenRead* and OffsetTimeTest#writeThenRead* failing on MariaDB
|
||||
* [HHH-13582] - LocalDateTest failures on MySQL
|
||||
* [HHH-13586] - ClassCastException when using a single region name for both entity and query results
|
||||
* [HHH-13590] - TransientObjectException merging a non-proxy association to a HibernateProxy
|
||||
* [HHH-13592] - AutoFlushEvent#isFlushRequired is always false
|
||||
* [HHH-13607] - Exception thrown while flushing uninitialized enhanced proxy with immutable natural ID
|
||||
* [HHH-13611] - Restore EntityMetamodel constructor to take SessionFactoryImplementor argument instead of PersisterCreationContext.
|
||||
* [HHH-13616] - Enable the hibernate-orm-modules test for JDK 11
|
||||
* [HHH-13621] - Exception if spaces after value of javax.persistence.schema-generation.scripts.action in hibernate.properties
|
||||
|
||||
** New Feature
|
||||
* [HHH-13249] - Introduce an option to Log slow queries instead of all queries
|
||||
|
||||
** Task
|
||||
* [HHH-13525] - Make test SessionDelegatorBaseImplTest more resilient to previously existing alias definition
|
||||
* [HHH-13526] - Optimise ResourceRegistryStandardImpl#release
|
||||
* [HHH-13527] - Performance regression in org.hibernate.stat.internal.StatisticsImpl
|
||||
* [HHH-13528] - Invoke afterStatements only at the end of releasing all statements for a batch
|
||||
* [HHH-13529] - Performance regression in org.hibernate.engine.spi.SessionFactoryImplementor#getDialect
|
||||
* [HHH-13531] - Some more opportunities to reuse the constants pool in AliasConstantsHelper
|
||||
* [HHH-13534] - AbstractLoadPlanBasedLoader never needs a List of AfterLoadAction
|
||||
* [HHH-13546] - Make the sessionFactory field in StatisticsImpl required
|
||||
* [HHH-13549] - Cleanup dead code in StringHelper
|
||||
* [HHH-13552] - CollectionType needs a direct reference to its Persister
|
||||
* [HHH-13553] - Fix test failures on SAP HANA
|
||||
* [HHH-13561] - Do not retrieve the same ActionQueue multiple times
|
||||
* [HHH-13562] - List of TransactionObserver for JdbcResourceLocalTransactionCoordinatorImpl should be lazily initialized
|
||||
* [HHH-13563] - ResultSetReturnImpl is looking up JdbcServices on each construction
|
||||
* [HHH-13565] - Improve Session opening efficiency
|
||||
* [HHH-13568] - Instances of NaturalIdXrefDelegate should be lazily initialized if possible
|
||||
* [HHH-13605] - InstantTest, OffsetDateTimeTest, ZonedDateTimeTest fail for MariaDB on CI
|
||||
* [HHH-13606] - LocalDateTimeTest fails for HANA on CI
|
||||
* [HHH-13622] - Upgrade the WildFly Transaction Client to 1.1.7.Final
|
||||
|
||||
** Improvement
|
||||
* [HHH-13133] - Print message about 'successfully enhanced class' as debug in Maven enhancement plugin
|
||||
* [HHH-13412] - Move hibernate.connection description out of c3p0 section
|
||||
* [HHH-13512] - Avoid allocating an array in org.hibernate.internal.util.StringHelper#unquote(String[], Dialect) if there are no changes to be applied
|
||||
* [HHH-13521] - Avoid excessive validation of enabled filters
|
||||
* [HHH-13522] - Optimise LoadQueryInfluencers by making maps lazily initialized
|
||||
* [HHH-13523] - StatementPreparerImpl should not need to retrieve the JDBCService as often
|
||||
* [HHH-13524] - Remove unused fields xref,unassociatedResultSets from JdbcCoordinatorImpl
|
||||
* [HHH-13541] - ExceptionConverter instance in AbstractSharedSessionContract should be lazily initialized
|
||||
* [HHH-13548] - Since SessionOwner is deprecated several fields in SessionImpl can be removed
|
||||
* [HHH-13576] - Invoking tracef() or debugf() w/o an array of parameters actually allocates an empty Object[]
|
||||
* [HHH-13579] - Cleanup of resources in ResourceRegistryStandardImpl allocates many Iterators
|
||||
* [HHH-13584] - Reduce ServiceRegistry lookups in LocalConnectionAccess in SessionFactory
|
||||
* [HHH-13585] - Duplicate resource release in PessimisticReadSelectLockingStrategy
|
||||
* [HHH-13587] - Initialize selected collections of StatefulPersistenceContext lazily
|
||||
* [HHH-13588] - MySQL Dialect: missed functions: weight_string, to_base64, from_base64, regexp_replace, regexp_instr, regexp_substr
|
||||
* [HHH-13589] - Minor memory allocation improvements in ActionQueue
|
||||
* [HHH-13591] - Replaces simple uses of array iteration with a corresponding for-each loop
|
||||
* [HHH-13594] - ResourceRegistryStandardImpl#release could avoid allocating a capturing lambda
|
||||
* [HHH-13599] - Avoid ArrayList allocation in JtaTransactionCoordinatorImp in common scenario
|
||||
* [HHH-13600] - Avoid allocation of capturing lambdas in ParameterTranslationsImpl and AbstractDomainDataRegion
|
||||
|
||||
** Deprecation
|
||||
* [HHH-13595] - Deprecate ConnectionObserver
|
||||
|
||||
|
||||
Changes in 5.4.4.Final (July 29, 2019)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31774/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-12642] - Lazy enhanced entity as relationship is always loaded in a criteria query
|
||||
* [HHH-13357] - OffsetTimeTest fails using TimeAsTimestampRemappingH2Dialect in non-GMT European time zones
|
||||
* [HHH-13379] - Regression of Instant serialization
|
||||
* [HHH-13409] - Hibernate ORM does not detect services provided by libraries in the module path
|
||||
* [HHH-13424] - Table nullability should not depend on JpaCompliance.isJpaCacheComplianceEnabled()
|
||||
* [HHH-13443] - Build failing to parse *.properties file attributes containing trailing space
|
||||
* [HHH-13454] - org.hibernate.orm.test.query.criteria.BasicCriteriaExecutionTests fails on Oracle
|
||||
* [HHH-13455] - Enabling Enhancement as a Proxy causes IllegalStateException when using Javassist
|
||||
* [HHH-13459] - Unit test lock up when they run on PostgreSQL
|
||||
* [HHH-13460] - FetchGraphTest is failing on MariaDB
|
||||
* [HHH-13463] - Hibernate has a dependency on plexus-utils:3.0.1 that is vulnerable to CVE-2017-1000487 with a CVSS of 7.5
|
||||
* [HHH-13492] - OptimisticLockException after locking, refreshing, and updating an entity
|
||||
* [HHH-13500] - Subquery of DefaultAuditStrategy results in a wrong revision
|
||||
* [HHH-13505] - NullPointerException thrown by StatisticsImpl#getCacheRegionStatistics
|
||||
* [HHH-13514] - Calling the wrong method inside SessionDelegatorBaseImpl#createStoredProcedureQuery
|
||||
|
||||
** New Feature
|
||||
* [HHH-11147] - Allow enhanced entities to be returned in a completely uninitialized state
|
||||
* [HHH-13154] - Add support for JPA criteria on stateless sessions
|
||||
|
||||
** Task
|
||||
* [HHH-13415] - Improve build compatibility with JDK11.0.3
|
||||
* [HHH-13461] - Style and formatting fixes: CollectionEntry
|
||||
* [HHH-13504] - Upgrade ByteBuddy to 1.9.11
|
||||
* [HHH-13513] - Partial revert of string interning introduced by HHH-3924
|
||||
* [HHH-13520] - Deprecate mutators on SqlStatementLogger
|
||||
|
||||
** Improvement
|
||||
* [HHH-11032] - Improve performance of PersistentBag.equalsSnapshot
|
||||
* [HHH-13226] - Typo in some configuration properties (HBM2DDL vs HBM2DLL)
|
||||
* [HHH-13303] - Fix some alerts from LGTM
|
||||
* [HHH-13428] - Minor cleanup of build scripts
|
||||
* [HHH-13429] - Upgrade WildFly provisioning plugin to version 0.0.11
|
||||
* [HHH-13442] - CollectionType#getCollection() method improvements
|
||||
* [HHH-13444] - Remove ignored EntityMode field from CollectionKey
|
||||
* [HHH-13447] - Minimize number of EventListenerRegistry lookups within a Session use
|
||||
* [HHH-13448] - Avoid retrieving PRE_LOAD and POST_LOAD Event listeners within the inner loops of TwoPhaseLoad
|
||||
* [HHH-13450] - Do not compute the full role name of a collection unless necessary
|
||||
* [HHH-13451] - Logging typo in CascadingActions causing significant allocations
|
||||
* [HHH-13452] - Missing log level guard on formatting in DefaultPersistEventListener#entityIsDeleted
|
||||
* [HHH-13453] - Optimise CascadingActions for the most likely case
|
||||
* [HHH-13458] - Update Hibernate's custom IdentityMap to better match its use
|
||||
* [HHH-13462] - Introduce a fastpath for SessionImpl#fireLoad to be used by internal loops
|
||||
* [HHH-13465] - Allow inlined access to the PersistenceContext for internal methods
|
||||
* [HHH-13467] - Make average BatchFetchQueue consume less memory
|
||||
* [HHH-13471] - Avoid invoking delayedAfterCompletion() multiple times from the same SessionImpl method
|
||||
* [HHH-13475] - SessionImpl#applyQuerySettingsAndHints should not rely on defensive copies to just read properties
|
||||
* [HHH-13476] - Micro-optimisations of TwoPhaseLoad#getOverridingEager
|
||||
* [HHH-13477] - Make heavily invoked method final: EventListenerGroupImpl#listeners()
|
||||
* [HHH-13478] - Various low hanging fruits identified by CPU flame graphs
|
||||
* [HHH-13494] - LobTypeMappings should not use a Bounded ConcurrentHashmap
|
||||
* [HHH-13495] - NationalizedTypeMappings should not use a Bounded ConcurrentHashmap
|
||||
* [HHH-13496] - Apply some minor yet improvements identified via static code analysis tools
|
||||
* [HHH-13508] - Reuse alias names generated by BasicLoader#generateSuffixes
|
||||
* [HHH-13511] - Remove old org.hibernate.loader.DefaultEntityAliases#intern
|
||||
|
||||
|
||||
Changes in 5.4.3.Final (May 30, 2019)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31762/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-13343] - Bytecode enhancement using ByteBuddy fails when the class is not available from the provided ClassLoader
|
||||
* [HHH-13364] - Query.getSingleResult and getResultList() throw PessimisticLockException when pessimistic lock fails with timeout
|
||||
* [HHH-13418] - Revert log level caching in static fields
|
||||
|
||||
** Task
|
||||
* [HHH-13416] - Unguarded debug message being rendered in org.hibernate.engine.internal.Collections.processReachableCollection
|
||||
* [HHH-13419] - Support building javadoc with JDK 11.0.3
|
||||
* [HHH-13421] - Disable OSGi testing for JDK 11+
|
||||
|
||||
|
||||
|
||||
Changes in 5.4.2.Final (March 21, 2019)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31748/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-13194] - Some methods returning org.hibernate.query.Query are not defined for StatelessSession
|
||||
* [HHH-13216] - Criteria query doesn't handle BigDecimal correctly
|
||||
* [HHH-13217] - Don't throw exception if both @MappedSuperclass and @Inheritance are used
|
||||
* [HHH-13219] - The sentence "The chapter 6 (e.g. Criteria API)" should be "The chapter 6 (i.e., Criteria API)"
|
||||
* [HHH-13225] - Fix minor version detection in BasicDialectResolver
|
||||
* [HHH-13227] - UnsupportedOperationException when programmatically overriding hibernate.ejb.cfgfile
|
||||
* [HHH-13228] - The modification of a @OneToOne with @MapsId property is silently ignored during a merge operation
|
||||
* [HHH-13229] - Sequences in MariaDB doesnt work on existing sequence
|
||||
* [HHH-13230] - The AvailableSettings.HBM2DDL_HALT_ON_ERROR setting does not accept String values
|
||||
* [HHH-13233] - Eager loading via EntityGraph doesn't work with subgraph
|
||||
* [HHH-13236] - @Column insertable and updatable on ElementCollection items' fields are ignored when generating statements
|
||||
* [HHH-13239] - The query hint javax.persistence.lock.timeout doesn't work correctly on HANA
|
||||
* [HHH-13241] - Constraint violation when deleting entites in bi-directional, lazy OneToMany association with bytecode enhancement
|
||||
* [HHH-13244] - setting hibernate.jpa.compliance.proxy=true and org.hibernate debug level to DEBUG breaks hibernate
|
||||
* [HHH-13256] - Fix the fieldsPreUpdateNeeded property index allocation in AbstractEntityPersister#update
|
||||
* [HHH-13262] - javax.persistence.TransactionRequiredException: Executing an update/delete query
|
||||
* [HHH-13265] - Remove double semicolon
|
||||
* [HHH-13266] - LocalDateTime values are wrong around 1900 (caused by JDK-8061577)
|
||||
* [HHH-13269] - Embeddable collection regression due to HHH-11544
|
||||
* [HHH-13277] - HibernateMethodLookupDispatcher - Issue with Security Manager
|
||||
* [HHH-13281] - java.lang.ClassCastException: org.hibernate.internal.SessionImpl cannot be cast to org.hibernate.ejb.HibernateEntityManager
|
||||
* [HHH-13285] - ClassCastException: org.dom4j.DocumentFactory cannot be cast to org.dom4j.DocumentFactory after dom4j update
|
||||
* [HHH-13300] - query.getSingleResult() throws org.hibernate.NonUniqueResultException instead of javax.persistence.NonUniqueResultException
|
||||
* [HHH-13309] - Extended bean managers implementing the new interface are not correctly detected
|
||||
* [HHH-13324] - NullPointerException if security manager is set after startup
|
||||
* [HHH-13326] - Transaction passed to Hibernate Interceptor methods is null when JTA is used
|
||||
|
||||
** New Feature
|
||||
* [HHH-13202] - Add support for PostgreSQL "GENERATED BY DEFAULT AS IDENTITY"
|
||||
|
||||
** Task
|
||||
* [HHH-13232] - Upgrade ByteBuddy to 1.9.8
|
||||
* [HHH-13238] - Reuse static logger in QueryTranslatorFactoryInitiator
|
||||
* [HHH-13254] - Upgrade Agroal to 1.4
|
||||
* [HHH-13258] - ASTQueryTranslatorFactory.createQueryTranslator undocumented Parameter
|
||||
* [HHH-13271] - Javadoc build failures on JDK 12
|
||||
* [HHH-13272] - Upgrade to Byte Buddy 1.9.10
|
||||
* [HHH-13275] - Re-introduce usage of net.bytebuddy.experimental=true when testing on JDK > 11
|
||||
* [HHH-13304] - MySQLDialect shouldn't access System Properties
|
||||
* [HHH-13305] - Deprecate public static helpers returning the current Dialect
|
||||
* [HHH-13306] - Remove verbose logging from the standard ExceptionMapper
|
||||
* [HHH-13332] - Upgrade to c3p0 0.9.5.3
|
||||
|
||||
** Improvement
|
||||
* [HHH-6190] - Leverage JmxService to expose management and monitoring capabilities.
|
||||
* [HHH-10148] - SQLServer2012SpatialDialect
|
||||
* [HHH-13220] - In the ByteBuddy enhancer, avoid creating a PersistentAttributeTransformer if the class is not enhanced
|
||||
* [HHH-13257] - Support mysql8 spatial
|
||||
|
||||
** Patch
|
||||
* [HHH-13330] - Minor typo in ManagedBeanRegistryInitiator
|
||||
|
||||
Changes in 5.4.1.Final (January 19, 2019)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31726/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-11807] - Enhanced entity delete not cascaded for some case
|
||||
* [HHH-12555] - Merging a blob on an entity results in a class cast exception
|
||||
* [HHH-13059] - OneToMany with referencedColumnName returns too many entities
|
||||
* [HHH-13068] - "order_inserts = true" causes FK Violation when inserting Self Referential Entity with Single_Table Inherited Entities
|
||||
* [HHH-13080] - ManyToMany List update with detached entities throws EntityExistsException
|
||||
* [HHH-13084] - Querying entity with non-ID property named 'id' fails if entity has an IdClass composite key
|
||||
* [HHH-13094] - Setting @Any.fetch to FetchType.EAGER doesn't work
|
||||
* [HHH-13104] - Oracle 12c / SAP Hana insert fails when entity contains only an identity-based column.
|
||||
* [HHH-13114] - Query "select count(h) from Human h" fails if a subclass has a non-Id property named "id"
|
||||
* [HHH-13129] - Cascaded merge fails for detached bytecode-enhanced entity with uninitialized ToOne
|
||||
* [HHH-13138] - Work around class loading issues so that bytecode enhanced tests can run as expected
|
||||
* [HHH-13145] - Generated metamodel class can't be compiled.
|
||||
* [HHH-13146] - Hibernate Ehcache no longer supports the `net.sf.ehcache.hibernate.cache_lock_timeout` configuration property
|
||||
* [HHH-13151] - TreatedRoot misses fetches in query
|
||||
* [HHH-13153] - No content in 15.1. Query API of User Guide
|
||||
* [HHH-13160] - Polymorphic query for InheritanceType.TABLE_PER_CLASS is not using UNION ALL
|
||||
* [HHH-13163] - Fix DDLWithoutCallbackTest#testRangeChecksGetApplied which fails on MariaDB
|
||||
* [HHH-13164] - Detecting transient state of mandatory toOne relations is broken
|
||||
* [HHH-13167] - When omitting the OTHERWISE clause in a CASE expression built with Criteria API, Hibernate throws a NullPointerException
|
||||
* [HHH-13169] - Table alias used instead of exact table name in multitable update query
|
||||
* [HHH-13172] - Log a warning instead of throwing an Exception when @AttributeOverride is used in conjunction with inheritance
|
||||
* [HHH-13175] - Eager subsequent-select fails when EntityGraph is specified for find operation
|
||||
* [HHH-13184] - Oracle dialect detection does not return latest dialect in the default case
|
||||
* [HHH-13189] - org.hibernate.Query#setParameter(String, Object) is extremely slow
|
||||
* [HHH-13191] - LazyInitializationException when Envers persists audit data that uses a proxy with JPA_PROXY_COMPLIANCE enabled under JTA
|
||||
* [HHH-13192] - Select alias in Criteria API seems to bleed into where condition
|
||||
* [HHH-13199] - NullPointerException when using case on select clause using JPA Criteria API
|
||||
|
||||
** New Feature
|
||||
* [HHH-13204] - Introduce a configuration flag to skip processing of XML mapping metadata
|
||||
* [HHH-13209] - Experimental feature: Allow for ServiceRegistry "suspend and restore"
|
||||
|
||||
** Task
|
||||
* [HHH-13197] - Reduce bootstrap log verbosity
|
||||
* [HHH-13198] - Introduce a global configuration flag to disable JPA callbacks
|
||||
* [HHH-13210] - Don't log about running a script of type ScriptSourceInputNonExistentImpl
|
||||
* [HHH-13211] - Reduce logging verbosity of QueryTranslatorFactoryInitiator
|
||||
|
||||
** Improvement
|
||||
* [HHH-12878] - StaleStateException does not log out the stale object or the statement that was executed
|
||||
* [HHH-13162] - Upgrade MySQL and MariaDB Dialects as they support UNION ALL
|
||||
* [HHH-13165] - Don't use confusing "this" in the User Guide admonition blocks
|
||||
* [HHH-13181] - Reduce bootstrap log verbosity
|
||||
* [HHH-13186] - MariaDB dialect detection does not return the latest by default
|
||||
* [HHH-13206] - Apply dialect checks from both method and class level.
|
||||
|
||||
|
||||
|
||||
Changes in 5.4.0.Final (December 12, 2018)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31738/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-13110] - @PreUpdate method on a Embeddable null on the parent caused NullPointerException
|
||||
* [HHH-13141] - Could not fetch the SequenceInformation from the database when using SQL Server with a case-sensitive collation
|
||||
* [HHH-13147] - DelayedPostInsertIdentifier handling broken since 5.3.0
|
||||
|
||||
** New Feature
|
||||
* [HHH-13083] - Add st_makeenvelope to the supported PostGIS functions in hibernate-spatial
|
||||
|
||||
** Task
|
||||
* [HHH-13095] - Document how to use arithmetic expressions in CASE statements
|
||||
* [HHH-13096] - Document that composite identifier cannot use auto-generated properties
|
||||
|
||||
** Improvement
|
||||
* [HHH-10778] - Add support for non-public AttributeConverter implementations
|
||||
* [HHH-13144] - Move the doInAutoCommit utility to TranscationUtil
|
||||
* [HHH-13156] - Enhance the @AnyMetaDef annotation section with more details about the optimal placement
|
||||
|
||||
|
||||
|
||||
Changes in 5.4.0.CR2 (November 29, 2018)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31729/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-13107] - JtaWithStatementsBatchTest fails on Oracle
|
||||
* [HHH-13111] - Subquery cannot occur in select clause anymore for Criteria API
|
||||
* [HHH-13112] - Proxies on entity types in the default package lead to MappingException with JDK9+
|
||||
* [HHH-13116] - Jaxb-runtime transitive dependencies are ignored when building a Maven project on JDK11 with a dependency to Hibernate ORM
|
||||
* [HHH-13118] - Use the provided class loader even when using the default URI
|
||||
* [HHH-13128] - Missing jaxb-runtime dependency for hibernate-jpamodelgen
|
||||
|
||||
** Task
|
||||
* [HHH-13043] - Upgrade to JAXB 2.3
|
||||
|
||||
** Improvement
|
||||
* [HHH-13102] - Document how catalog and schema attributes need to be applied based on the underlying DB capabilities
|
||||
* [HHH-13115] - Document how to define timezone per tenant when using Multitenant Database
|
||||
* [HHH-13124] - Document the CachingRegionFactory resolution algorithm has changed
|
||||
* [HHH-13125] - Remove the Javadoc links in the User Guide pointing internal classes
|
||||
* [HHH-13127] - Document JAXB dependencies should be added for using hibernate-jpamodelgen in Eclipse IDE
|
||||
|
||||
|
||||
|
||||
Changes in 5.4.0.CR1 (November 15, 2018)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
https://hibernate.atlassian.net/projects/HHH/versions/31691/tab/release-report-done
|
||||
|
||||
** Bug
|
||||
* [HHH-7686] - org.hibernate.proxy.map.MapProxy loses all important state on serialization
|
||||
* [HHH-8805] - [SchemaUpdate] javax.persistence.ForeignKey doesn't respect ConstraintMode.NO_CONSTRAINT
|
||||
* [HHH-10201] - Hibernate does not audit @ElementCollection when revision_on_collection_change is false
|
||||
* [HHH-10603] - ORA-00932: inconsistent datatypes: expected - got BLOB after HHH-10345 with Oracle12cDialect
|
||||
* [HHH-10891] - Exception at bootstrap when @Any is inside an @Embeddable object
|
||||
* [HHH-11096] - @CreationTimestamp doesn't works with @Column(nullable=false)
|
||||
* [HHH-11209] - NullPointerException in EntityType.replace() with a PersistentBag
|
||||
* [HHH-11771] - @Id annotation ignored with @MappedSuperclass inheritance
|
||||
* [HHH-12200] - Docs mention outdated APIs
|
||||
* [HHH-12320] - @JoinColumn's foreign key custom name does not work with @MapsId
|
||||
* [HHH-12425] - LazyInitializationIssue when enhancement enableDirtyTracking is enabled after session is committed
|
||||
* [HHH-12436] - Attempted to assign id from null one-to-one property
|
||||
* [HHH-12492] - JPA delete query generated has missing table alias and thus incorrect semantics
|
||||
* [HHH-12542] - WildFly integration test, HibernateNativeAPINaturalIdTestCase, fails when security manager is enabled
|
||||
* [HHH-12666] - Add an option for restoring 5.1 native exception handling
|
||||
* [HHH-12675] - @Table(Inverse=true) ignored for SecondaryTables in JoinedSubclassEntityPersister
|
||||
* [HHH-12695] - Incompatibility in return value for org.hibernate.procedure.ParameterRegistration.getType() 5.1 vs 5.3
|
||||
* [HHH-12718] - Entity changes in @PreUpdate callback are not persisted when lazy loading is active for more than one field
|
||||
* [HHH-12720] - LazyInitializationException with hibernate.enable_lazy_load_no_trans
|
||||
* [HHH-12740] - Subselect fetching doesn't work when multiLoad was used
|
||||
* [HHH-12753] - org.hibernate.envers.test.integration.collection.StringMapNationalizedLobTest fails with DB2
|
||||
* [HHH-12768] - TimeAndTimestampTest fails with SQL Server and MYSQL
|
||||
* [HHH-12771] - Caused by: java.lang.UnsupportedOperationException: Cache provider [org.hibernate.cache.ehcache.internal.EhcacheRegionFactory@3271ec2a] does not support `transactional` access
|
||||
* [HHH-12776] - NullPointerException when executing native query on an Audited Entity
|
||||
* [HHH-12779] - Revert HHH-12670 - Allows native SQL queries that take a given resultClass to map the result set to the required type
|
||||
* [HHH-12781] - Update Javassist dependency to 3.23.1
|
||||
* [HHH-12784] - Javassist support broken by HHH-12760
|
||||
* [HHH-12786] - Deleting an entity leads to NullPointerException in ByteBuddy proxy
|
||||
* [HHH-12787] - SessionJdbcBatchTest hangs with DB2
|
||||
* [HHH-12791] - ComponentTuplizer generates a LOT of proxy classes when using Bytebuddy as bytecode provider
|
||||
* [HHH-12795] - Setting FlushMode to manual for a @NamedQuery is ignored
|
||||
* [HHH-12797] - Fix cache modes relationships table layout in the documentation
|
||||
* [HHH-12798] - Nested spatial functions are not rendered correctly on SAP HANA
|
||||
* [HHH-12800] - TuplizerInstantiatesByteBuddySubclassTest uses ByteBuddy operation unsafe with JDK 11
|
||||
* [HHH-12802] - Hibernate does not throw an exception when more than one entity is loaded with the same ID
|
||||
* [HHH-12815] - LocalDateCustomSessionLevelTimeZoneTest fails with mysql 5.5 and 5.7
|
||||
* [HHH-12822] - Skip "case when" tests requiring casts for DB2
|
||||
* [HHH-12823] - CompositeIdTest.testDistinctCountOfEntityWithCompositeId fails on databases that don't support tuple distinct counts because it expects wrong exception
|
||||
* [HHH-12824] - ASTParserLoadingTest.testComponentNullnessChecks fail with DB2 because it uses legacy-style query parameter
|
||||
* [HHH-12825] - CriteriaHQLAlignmentTest.testCountReturnValues fails on databases that don't support tuple distinct counts because it expects wrong exception
|
||||
* [HHH-12826] - Persist cascade of collection fails when orphan removal enabled with flush mode commit.
|
||||
* [HHH-12827] - NUMERIC column type is not handled correctly on DB2
|
||||
* [HHH-12829] - Invalid references to outdated EhCache classes
|
||||
* [HHH-12832] - SchemaUpdateHaltOnErrorTest and SchemaMigratorHaltOnErrorTest fail with DB2
|
||||
* [HHH-12833] - UniqueConstraintDropTest fails with DB2
|
||||
* [HHH-12834] - org.hibernate.envers.test.integration.collection.StringMapNationalizedLobTest fails with Sybase
|
||||
* [HHH-12835] - Wrong assertion in BatchFetchQueueHelper
|
||||
* [HHH-12838] - AndNationalizedTests fails with DB2
|
||||
* [HHH-12839] - EntityProxySerializationTest fails with oracle
|
||||
* [HHH-12842] - Non-optional OneToOne relation can't be lazy loaded anymore
|
||||
* [HHH-12843] - CreateDeleteTest and FlushIdGenTest fail with ORA-00936 on oracle
|
||||
* [HHH-12844] - HbmWithIdentityTest fails with ORA-00936 on oracle
|
||||
* [HHH-12846] - Merge cascade of collection fails when orphan removal enabled with flush mode commit
|
||||
* [HHH-12847] - NullPointerException in FetchStyleLoadPlanBuildingAssociationVisitationStrategy::adjustJoinFetchIfNeeded
|
||||
* [HHH-12848] - UpgradeSkipLockedTest, PessimisticReadSkipLockedTest and OracleFollowOnLockingTest fail with Oracle12c
|
||||
* [HHH-12849] - QuotedIdentifierTest fails with ORA-04043 on Oracle12c
|
||||
* [HHH-12850] - null values for columns mapped as "Boolean" cause exception when saving entity with SAP Sql Anywhere jdbc4
|
||||
* [HHH-12851] - ConverterTest fails with SQL Server depending on collation
|
||||
* [HHH-12861] - SchemaUpdate doesn't work with Sybase
|
||||
* [HHH-12863] - SchemaUpdateTest should be skipped with Sybase
|
||||
* [HHH-12868] - Using CacheConcurrencyStrategy.NONE leads to a NPE when trying to load an entity
|
||||
* [HHH-12869] - SingletonEhcacheRegionFactory initialization fails
|
||||
* [HHH-12871] - Metamodel contains managed types related to dynamic-map entities that have been excluded.
|
||||
* [HHH-12875] - Class level where="..." clause in hbm.xml mappings is not enforced on collections of that class
|
||||
* [HHH-12880] - LockModeTest hangs indefinitely with Sybase due to HHH-12847
|
||||
* [HHH-12882] - Where clauses mapped on collections and entities need parentheses when used in conjunction
|
||||
* [HHH-12883] - NaturalIdDereferenceTest fails with Sybase
|
||||
* [HHH-12890] - Fix link to JPA Metamodel generator documentation
|
||||
* [HHH-12903] - CommitFlushCollectionTest fails when running on Oracle.
|
||||
* [HHH-12905] - Passing null as parameter is not allowed even when enablePassingNulls() has been called
|
||||
* [HHH-12906] - Statistics.getCollectionRoleNames() reports incorrect value
|
||||
* [HHH-12913] - AuditJoinTable does not work when specified in an AuditOverride annotation.
|
||||
* [HHH-12915] - Concurrency issue within org.hibernate.internal.SessionFactoryImpl
|
||||
* [HHH-12920] - AbstractCachedDomainDataAccess.clearCache() throws MissingFormatArgumentException at DEBUG level
|
||||
* [HHH-12921] - Hibernate Connection Pool Validation Thread should be defined as a daemon Thread
|
||||
* [HHH-12927] - Immutable warning issued if immutable entities are referenced but not changed during update
|
||||
* [HHH-12931] - Revert HHH-12542 as it introduces some issues with the security manager
|
||||
* [HHH-12932] - Add privileged blocks in ByteBuddyState initialization
|
||||
* [HHH-12933] - Generate_statistics grows QueryStatistics ConcurrentHashMap indefinitely
|
||||
* [HHH-12934] - Exception handling documentation does not apply only to "Session-per-application anti-pattern"
|
||||
* [HHH-12935] - Constraint and AuxiliaryDatabaseObject export identifiers are not qualified by schema or catalog
|
||||
* [HHH-12937] - Where clause for collections of basic, embeddable and "any" elements is ignored when mapped using hbm.xml
|
||||
* [HHH-12939] - Database name not quoted at schema update
|
||||
* [HHH-12944] - MultiIdentifierLoadAccess ignores the 2nd level cache
|
||||
* [HHH-12945] - MapJoin#entry() does not seem to work.
|
||||
* [HHH-12955] - hibernate.hbm2ddl.auto option 'create-only' not recognized
|
||||
* [HHH-12963] - HANA dialect doesn't set internal configuration for use_unicode_string_types correctly
|
||||
* [HHH-12964] - Upgrade to dom4j 2.1.1
|
||||
* [HHH-12966] - Make statistics serializable to allow JMX access
|
||||
* [HHH-12968] - Flush is not flushing inserts for inherited tables before a select within a transaction
|
||||
* [HHH-12973] - Inconsistent identity generation when using the default @SequenceGenerator with a database sequence having the increment size of 1
|
||||
* [HHH-12975] - Foreign key constraint cannot be disabled with @JoinColumn(foreignKey=@ForeignKey(NO_CONSTRAINT)) in some cases
|
||||
* [HHH-12979] - Setting hibernate.javax.cache.uri property value as relative path causes an error
|
||||
* [HHH-12990] - JPA Model generator does not work in Java 9+
|
||||
* [HHH-12992] - ClassCastException for an audited ordered collection
|
||||
* [HHH-12995] - Querying DECIMAL columns via Double fields can lead to precision loss on SAP HANA
|
||||
* [HHH-13000] - Association with -ToOne relation with JoinTable can't be refreshed on entity with PESSIMISTIC_LOCK
|
||||
* [HHH-13001] - NPE rendering nested criteria expressions
|
||||
* [HHH-13012] - JDBC default connection release mode does not match documentation
|
||||
* [HHH-13027] - org.hibernate.ejb.HibernatePersistence can no longer be used as a persistence provider name
|
||||
* [HHH-13040] - MetadataBuilderContributor should be applied before MetadataBuildingProcess.prepare
|
||||
* [HHH-13042] - DelayedPostInsertIdentifier cannot be cast to class java.lang.Integer
|
||||
* [HHH-13044] - MapsId and flushMode commit causes PersistenceException
|
||||
* [HHH-13045] - Accessing id property of association within element collection in query leads to exception
|
||||
* [HHH-13050] - On release of batch it still contained JDBC statements logged; unable to release batch statement
|
||||
* [HHH-13053] - DelayedPostInsertIdentifier EXCEPTION on flush of identity created PARENT that has sequence created CHILD with cascade, when batch_update is true
|
||||
* [HHH-13060] - Throw an exception when two entities are declared with the same name
|
||||
* [HHH-13062] - Migrate User Guide links from Java EE 7 to 8
|
||||
* [HHH-13064] - Documentation of Lock and LockModeType is on two columns instead of 3
|
||||
* [HHH-13070] - callbackRegistry is not injected to event listener when appended to listener group using REPLACE_ORIGINAL duplication strategy
|
||||
* [HHH-13076] - Hibernate “Transaction already active” behaviour with custom transaction manager
|
||||
* [HHH-13097] - Hibernate enhancer is superslow after upgrade to latest 5.3 or 5.4-SNAPSHOT
|
||||
|
||||
** New Feature
|
||||
* [HHH-12857] - Support the security manager with ByteBuddy as bytecode provider
|
||||
|
||||
** Task
|
||||
* [HHH-10782] - Add a comment about what you can expect from a query plan cache cleanup
|
||||
* [HHH-12730] - User types built using 5.1 are not binary compatible with 5.3
|
||||
* [HHH-12751] - Remove the hibernate-infinispan OSGi feature as it's now provided by Infinispan
|
||||
* [HHH-12762] - No longer needing to use port-offset in WildFly integration tests
|
||||
* [HHH-12774] - JARs missing from the distribution ZIP
|
||||
* [HHH-12785] - Test Javassist support
|
||||
* [HHH-12788] - Enable mockito-inline for the Agroal integration module
|
||||
* [HHH-12789] - Upgrade to Mockito 2.19.0
|
||||
* [HHH-12793] - Upgrade Karaf, pax-exam and reenable the OSGi tests
|
||||
* [HHH-12799] - Enforce version alignment of Mockito and ByteBuddy dependencies
|
||||
* [HHH-12801] - Error message in SqlFunctionMetadataBuilderContributorIllegalClassArgumentTest differs with JDK 11
|
||||
* [HHH-12803] - Upgrade ByteBuddy to 1.8.13
|
||||
* [HHH-12805] - Upgrade Mockito to 2.19.1
|
||||
* [HHH-12807] - Disable the hibernate-orm-modules tests for JDK 11
|
||||
* [HHH-12808] - Upgrade Gradle to 4.8.1
|
||||
* [HHH-12809] - Use an HTTP link for the Javadoc link to our Bean Validation documentation
|
||||
* [HHH-12813] - Disable Asciidoclet in Javadoc generation
|
||||
* [HHH-12816] - Enable the experimental features of ByteBuddy when building with JDK 11
|
||||
* [HHH-12820] - Merge the migration guides in the code base
|
||||
* [HHH-12828] - ScannerTests#testGetBytesFromInputStream() is not stable enough
|
||||
* [HHH-12840] - Simplify implementation of LocalObjectUuidHelper
|
||||
* [HHH-12841] - DriverManagerConnectionProviderImpl: implement lazy Thread starting and fix visibility guarantees
|
||||
* [HHH-12877] - Upgrade ByteBuddy to 1.8.15
|
||||
* [HHH-12884] - Upgrade to Gradle 4.9
|
||||
* [HHH-12887] - Remove direct dependency on ASM
|
||||
* [HHH-12894] - Simplify code of StandardSessionFactoryServiceInitiators
|
||||
* [HHH-12898] - Enable integration tests for Oracle Standard Edition Two 12.1.0.2.v12 on the AWS build slaves
|
||||
* [HHH-12899] - Enable integration tests for MS SQL Server on the AWS build slaves
|
||||
* [HHH-12901] - Enable loading of additional JDBC drivers from a local path
|
||||
* [HHH-12904] - Removing some dead code in InFlightMetadataCollectorImpl and MetadataImpl
|
||||
* [HHH-12909] - Upgrade ByteBuddy to 1.8.17
|
||||
* [HHH-12911] - Removing unused field from org.hibernate.mapping.Collection
|
||||
* [HHH-12914] - Avoid need to create a java.lang.reflect.Proxy to implement org.hibernate.engine.jdbc.BlobProxy
|
||||
* [HHH-12947] - Remove need for BootstrapContext where it's unused
|
||||
* [HHH-12948] - Allow using a custom SessionFactoryOptionsBuilder to create a SessionFactoryBuilderImpl
|
||||
* [HHH-12949] - Upgrade to Gradle 4.10
|
||||
* [HHH-12950] - Use the Annotation Processor specific scopes of Gradle
|
||||
* [HHH-12954] - Refactor boot/model/relational/Database to avoid holding references to MetadataBuildingOptions
|
||||
* [HHH-13006] - Upgrade to Gradle 4.10.2
|
||||
* [HHH-13007] - No longer use net.bytebuddy.experimental=true when testing on JDK11
|
||||
* [HHH-13014] - Expose bytecode common utilities as SPI
|
||||
* [HHH-13015] - Optimise loading of EntityCopyObserver implementation
|
||||
* [HHH-13018] - Upgrade to Hibernate Commons Annotations 5.0.5.Final
|
||||
* [HHH-13026] - Documentation: fixing link to Infinispan documentation section regarding Hibernate 2LC
|
||||
* [HHH-13028] - Make ASTPrinter caches use less memory
|
||||
* [HHH-13029] - Avoid static singletons to hold on Strategy Registration Implementations
|
||||
* [HHH-13030] - Provide a`hibernate-orm-modules` tested version for WildFly 14.0.x
|
||||
* [HHH-13033] - Upgrade ByteBuddy to 1.9.1
|
||||
* [HHH-13034] - Upgrade Hibernate Validator to 6.0.13.Final
|
||||
* [HHH-13035] - Upgrade Agroal to 1.2
|
||||
* [HHH-13036] - Upgrade HikariCP to 3.2.0
|
||||
* [HHH-13037] - Upgrade Vibur to 22.2
|
||||
* [HHH-13038] - Upgrade ByteBuddy to 1.9.2
|
||||
* [HHH-13039] - Upgrade Agroal to 1.3
|
||||
* [HHH-13047] - Deprecate Environment#verifyProperties
|
||||
* [HHH-13048] - Allow for parallel processing of entity enhancements via ByteBuddy
|
||||
* [HHH-13074] - Upgrade ByteBuddy to 1.9.3
|
||||
* [HHH-13075] - Upgrade Javassist to 3.24.0-GA
|
||||
* [HHH-13091] - Upgrade Ehcache 2 to 2.10.6 and Ehcache 3 to 3.6.1
|
||||
* [HHH-13092] - Upgrade Hibernate Commons Annotations to 5.1.0.Final
|
||||
* [HHH-13099] - Update to Byte Buddy 1.9.4
|
||||
* [HHH-13100] - All custom implementation of Byte Buddy "Implementation" s should have a proper equals and hashcode
|
||||
* [HHH-13101] - Document hibernate.id.disable_delayed_identity_inserts in user guide and migration guide
|
||||
|
||||
** Improvement
|
||||
* [HHH-9038] - Use "cascade constraints" when dropping tables for HSQLDB
|
||||
* [HHH-9241] - Allow declaring non-java.util Collection interfaces
|
||||
* [HHH-12144] - Support JTS version 1.16.0
|
||||
* [HHH-12196] - Sybase Dialect not supporting max result - paging
|
||||
* [HHH-12349] - User Guide documentation for @Filter is too verbose
|
||||
* [HHH-12361] - In the User Guide, omit constructors and equals/hashCode for brevity
|
||||
* [HHH-12368] - java.sql.SQLFeatureNotSupportedException in LobCreatorBuilderImpl
|
||||
* [HHH-12608] - Add the ST_DWithin() function in DB2 Spatial Dialect
|
||||
* [HHH-12653] - Throw MappingException if both @MappedSuperclass and @Inheritance are used
|
||||
* [HHH-12713] - Make EntityGraph creation more convenient
|
||||
* [HHH-12763] - Log which JtaPlatform implementation is used at startup on info level.
|
||||
* [HHH-12770] - HHH-12770 - Document @NotFound(action = NotFoundAction.IGNORE) and FetchType.LAZY behavior
|
||||
* [HHH-12775] - Avoid join on property access mapped by natural id
|
||||
* [HHH-12778] - BasicProxyFactoryImpl.getProxy() swallows exception
|
||||
* [HHH-12804] - No need to mock Map in CollectionBinderTest
|
||||
* [HHH-12811] - @UpdateTimestamp and @CreationTimestamp missing @Target annotation and breaking in Kotlin
|
||||
* [HHH-12830] - Improve error output with transaction issues
|
||||
* [HHH-12855] - Add query plan compilation statistics
|
||||
* [HHH-12872] - Reduce memory consumption of XSD schema validations
|
||||
* [HHH-12879] - Remove unnecessary constants in StandardDialectResolver
|
||||
* [HHH-12892] - Fix spelling issues in the User Guide
|
||||
* [HHH-12896] - Minor memory improvements in HQLQueryPlan
|
||||
* [HHH-12907] - Avoid garbage collection pressure when creating proxies with ByteBuddy
|
||||
* [HHH-12917] - Interning of strings for Filter definitions
|
||||
* [HHH-12918] - Interning of strings for Formula and Column exctraction templates
|
||||
* [HHH-12919] - Interning of strings for EntityReferenceAliases
|
||||
* [HHH-12922] - Hibernate Connection Pool Validation Thread should have a name
|
||||
* [HHH-12925] - The JDBC Type to Hibernate matching is limited to the Dialect only instead of considering the Type contributions as well
|
||||
* [HHH-12928] - Remove vestigial 'naturalIdTypes' field
|
||||
* [HHH-12929] - Add AtomikosJtaPlatform implementation
|
||||
* [HHH-12946] - Include JAXB as a dependency as it's not provided by JDK 11
|
||||
* [HHH-12952] - Document the hibernate.statistics.query_max_size behavior and explain its implications
|
||||
* [HHH-12957] - Calling custom functions in JPQL with the function() is missing in the documentation
|
||||
* [HHH-12961] - The links in the Javadoc of the SAP HANA dialects don't work
|
||||
* [HHH-12962] - Document how to tune the query plan cache size
|
||||
* [HHH-12974] - Document @OnDelete behavior in regards to disabling the Persistence Context entity removal cascading event
|
||||
* [HHH-12977] - Update latest dialect for MySQL
|
||||
* [HHH-12978] - Enum value binding is not logged by BasicBinder
|
||||
* [HHH-12982] - Generify Hibernate#unproxy
|
||||
* [HHH-12989] - Support heterogeneous collections passed to JPA's (criteria) Expression#in
|
||||
* [HHH-13003] - Skip processing of empty resource names defined for hibernate.hbm2ddl.import_files
|
||||
* [HHH-13005] - Upgrade ByteBuddy to 1.9.0
|
||||
* [HHH-13009] - No documentation for "hibernate.javax.cache.cache_manager"
|
||||
* [HHH-13011] - Add option enabling/disabling use of an entity's mapped where-clause when loading collections of that entity
|
||||
* [HHH-13017] - Exception on Service stop isn't logging the full stack
|
||||
* [HHH-13020] - When proxying an entity having a private default constructor, the log message is not very clear about the problem
|
||||
* [HHH-13021] - Add support for SAP HANA full-text search functions
|
||||
* [HHH-13022] - Make OSGi integration work on JDK11
|
||||
* [HHH-13025] - Rename MetadataContext#getEmbeddableTypeMap to getEmbeddableTypeSet
|
||||
* [HHH-13051] - Structure of audit-tables should be extensible
|
||||
* [HHH-13056] - Fix typo in documentation: "listner" should be "listener"
|
||||
* [HHH-13057] - Prevent Byte Buddy's Advice helper to need reloading many resources from the ClassLoader
|
||||
* [HHH-13061] - Introduce a 'none' option for BytecodeProvider implementation choice
|
||||
* [HHH-13069] - Update the links to JBoss Nexus to use the direct repository over https
|
||||
* [HHH-13071] - Remove deprecated call from gradle plugin
|
||||
* [HHH-13078] - Not choosing the right dialect for SQL Anywhere 17
|
||||
* [HHH-13090] - Allow to use specific cache implementations in Ehcache cache provider
|
||||
|
||||
|
||||
|
||||
Changes in 5.3.2.final (July 5, 2018)
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
@ -1249,6 +2068,7 @@ https://hibernate.atlassian.net/projects/HHH/versions/23950
|
|||
* [HHH-10863] - Improve consistency of how we call implicitNamingStrategy.determineBasicColumnName with element collections
|
||||
* [HHH-10865] - Join table of lazy loading many-to-many relation not saved when lazy initialization bytecode enhancement is active
|
||||
* [HHH-10870] - Parameter lookup for Iterable fails on Criteria API
|
||||
* [HHH-10874] - @Where annotation is not processed with "Extra-lazy" loading for bidirectional collections
|
||||
* [HHH-10876] - DefaultIdentifierGeneratorFactory does not consider the hibernate.id.new_generator_mappings setting
|
||||
* [HHH-10885] - JPA Native Queries with ordinal parameter are zero based
|
||||
* [HHH-10886] - The follow-on-locking Oracle mechanism should consider UNION as well
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
#! /bin/bash
|
||||
|
||||
rm -f $HOME/.gradle/caches/modules-2/modules-2.lock
|
||||
rm -fr $HOME/.gradle/caches/*/plugin-resolution/
|
||||
rm -f $HOME/.gradle/caches/*/fileHashes/fileHashes.bin
|
||||
rm -f $HOME/.gradle/caches/*/fileHashes/fileHashes.lock
|
||||
rm -fr $HOME/.m2/repository/org/hibernate
|
|
@ -0,0 +1,23 @@
|
|||
#! /bin/bash
|
||||
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
|
||||
java -version
|
||||
|
||||
if [ "$RDBMS" == 'mysql' ]; then
|
||||
bash $DIR/../docker_db.sh mysql_5_7
|
||||
elif [ "$RDBMS" == 'mysql8' ]; then
|
||||
bash $DIR/../docker_db.sh mysql_8_0
|
||||
elif [ "$RDBMS" == 'mariadb' ]; then
|
||||
bash $DIR/../docker_db.sh mariadb
|
||||
elif [ "$RDBMS" == 'postgresql' ]; then
|
||||
bash $DIR/../docker_db.sh postgresql_9_5
|
||||
elif [ "$RDBMS" == 'db2' ]; then
|
||||
bash $DIR/../docker_db.sh db2
|
||||
elif [ "$RDBMS" == 'oracle' ]; then
|
||||
bash $DIR/../docker_db.sh oracle
|
||||
elif [ "$RDBMS" == 'mssql' ]; then
|
||||
bash $DIR/../docker_db.sh mssql
|
||||
fi
|
||||
|
||||
exec bash $DIR/build.sh
|
|
@ -0,0 +1,27 @@
|
|||
#! /bin/bash
|
||||
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
|
||||
java -version
|
||||
|
||||
if [ "$RDBMS" == 'mysql' ]; then
|
||||
sudo service mysql stop
|
||||
bash $DIR/../docker_db.sh mysql_5_7
|
||||
elif [ "$RDBMS" == 'mysql8' ]; then
|
||||
sudo service mysql stop
|
||||
bash $DIR/../docker_db.sh mysql_8_0
|
||||
elif [ "$RDBMS" == 'mariadb' ]; then
|
||||
sudo service mysql stop
|
||||
bash $DIR/../docker_db.sh mariadb
|
||||
elif [ "$RDBMS" == 'postgresql' ]; then
|
||||
sudo service postgres stop
|
||||
bash $DIR/../docker_db.sh postgresql_9_5
|
||||
elif [ "$RDBMS" == 'db2' ]; then
|
||||
bash $DIR/../docker_db.sh db2
|
||||
elif [ "$RDBMS" == 'oracle' ]; then
|
||||
bash $DIR/../docker_db.sh oracle
|
||||
elif [ "$RDBMS" == 'mssql' ]; then
|
||||
bash $DIR/../docker_db.sh mssql
|
||||
fi
|
||||
|
||||
exec bash $DIR/build.sh
|
|
@ -0,0 +1,18 @@
|
|||
#! /bin/bash
|
||||
|
||||
goal=
|
||||
if [ "$RDBMS" == "derby" ]; then
|
||||
goal="-Pdb=derby"
|
||||
elif [ "$RDBMS" == "mariadb" ]; then
|
||||
goal="-Pdb=mariadb_ci"
|
||||
elif [ "$RDBMS" == "postgresql" ]; then
|
||||
goal="-Pdb=pgsql_ci"
|
||||
elif [ "$RDBMS" == "oracle" ]; then
|
||||
goal="-Pdb=oracle_ci"
|
||||
elif [ "$RDBMS" == "db2" ]; then
|
||||
goal="-Pdb=db2_ci"
|
||||
elif [ "$RDBMS" == "mssql" ]; then
|
||||
goal="-Pdb=mssql_ci"
|
||||
fi
|
||||
|
||||
exec ./gradlew check ${goal} -Plog-test-progress=true --stacktrace
|
|
@ -0,0 +1,14 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
jdbcDependency 'org.postgresql:postgresql:42.2.8'
|
|
@ -0,0 +1,25 @@
|
|||
#
|
||||
# Hibernate, Relational Persistence for Idiomatic Java
|
||||
#
|
||||
# License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
# See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
#
|
||||
|
||||
hibernate.dialect org.hibernate.dialect.CockroachDB192Dialect
|
||||
hibernate.connection.driver_class org.postgresql.Driver
|
||||
hibernate.connection.url jdbc:postgresql://localhost:26257/defaultdb?sslmode=disable
|
||||
hibernate.connection.username root
|
||||
hibernate.connection.password
|
||||
|
||||
hibernate.connection.pool_size 5
|
||||
|
||||
hibernate.show_sql false
|
||||
hibernate.format_sql true
|
||||
|
||||
hibernate.max_fetch_depth 5
|
||||
|
||||
hibernate.cache.region_prefix hibernate.test
|
||||
hibernate.cache.region.factory_class org.hibernate.testing.cache.CachingRegionFactory
|
||||
|
||||
hibernate.service.allow_crawling=false
|
||||
hibernate.session.events.log=true
|
|
@ -4,4 +4,5 @@
|
|||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
jdbcDependency "com.oracle.jdbc:ojdbc6:11.1.0.7.0"
|
||||
|
||||
jdbcDependency 'com.ibm.db2:jcc:11.5.4.0'
|
|
@ -0,0 +1,26 @@
|
|||
#
|
||||
# Hibernate, Relational Persistence for Idiomatic Java
|
||||
#
|
||||
# License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
# See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
#
|
||||
|
||||
hibernate.dialect org.hibernate.dialect.DB2Dialect
|
||||
hibernate.connection.driver_class com.ibm.db2.jcc.DB2Driver
|
||||
hibernate.connection.url jdbc:db2://localhost:50000/orm_test
|
||||
hibernate.connection.username orm_test
|
||||
hibernate.connection.password orm_test
|
||||
|
||||
hibernate.connection.pool_size 5
|
||||
|
||||
hibernate.show_sql false
|
||||
hibernate.format_sql true
|
||||
|
||||
hibernate.max_fetch_depth 5
|
||||
|
||||
hibernate.cache.region_prefix hibernate.test
|
||||
hibernate.cache.region.factory_class org.hibernate.testing.cache.CachingRegionFactory
|
||||
|
||||
javax.persistence.validation.mode=NONE
|
||||
hibernate.service.allow_crawling=false
|
||||
hibernate.session.events.log=true
|
|
@ -5,4 +5,4 @@
|
|||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
|
||||
jdbcDependency 'com.sap.cloud.db.jdbc:ngdbc:2.2.16'
|
||||
jdbcDependency 'com.sap.cloud.db.jdbc:ngdbc:2.4.59'
|
|
@ -4,4 +4,4 @@
|
|||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
jdbcDependency 'org.mariadb.jdbc:mariadb-java-client:1.5.7'
|
||||
jdbcDependency 'org.mariadb.jdbc:mariadb-java-client:2.2.4'
|
|
@ -0,0 +1,8 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
|
||||
jdbcDependency 'com.microsoft.sqlserver:mssql-jdbc:6.4.0.jre8'
|
|
@ -0,0 +1,26 @@
|
|||
#
|
||||
# Hibernate, Relational Persistence for Idiomatic Java
|
||||
#
|
||||
# License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
# See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
#
|
||||
|
||||
hibernate.dialect org.hibernate.dialect.SQLServer2012Dialect
|
||||
hibernate.connection.driver_class com.microsoft.sqlserver.jdbc.SQLServerDriver
|
||||
hibernate.connection.url jdbc:sqlserver://hibernate-testing-mssql-express.ccuzkqo3zqzq.us-east-1.rds.amazonaws.com
|
||||
hibernate.connection.username hibernate_orm_test
|
||||
hibernate.connection.password hibernate_orm_test
|
||||
|
||||
hibernate.connection.pool_size 5
|
||||
|
||||
hibernate.show_sql false
|
||||
hibernate.format_sql true
|
||||
|
||||
hibernate.max_fetch_depth 5
|
||||
|
||||
hibernate.cache.region_prefix hibernate.test
|
||||
hibernate.cache.region.factory_class org.hibernate.testing.cache.CachingRegionFactory
|
||||
|
||||
javax.persistence.validation.mode=NONE
|
||||
hibernate.service.allow_crawling=false
|
||||
hibernate.session.events.log=true
|
|
@ -4,4 +4,4 @@
|
|||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
jdbcDependency 'com.oracle.ojdbc:ojdbc7:12.1.0.2.0'
|
||||
jdbcDependency 'com.oracle.database.jdbc:ojdbc8:21.1.0.0'
|
|
@ -6,9 +6,10 @@
|
|||
#
|
||||
|
||||
hibernate.dialect org.hibernate.dialect.Oracle12cDialect
|
||||
hibernate.connection.driver_class oracle.jdbc.driver.OracleDriver
|
||||
hibernate.connection.url jdbc:oracle:thin:@orm-testing.ccuzkqo3zqzq.us-east-1.rds.amazonaws.com:1521:ORCL
|
||||
hibernate.connection.username ormmasteruser
|
||||
hibernate.connection.driver_class oracle.jdbc.OracleDriver
|
||||
hibernate.connection.url jdbc:oracle:thin:@hibernate-testing-oracle-se.ccuzkqo3zqzq.us-east-1.rds.amazonaws.com:1521:ORCL
|
||||
hibernate.connection.username hibernate_orm_test
|
||||
hibernate.connection.password hibernate_orm_test
|
||||
|
||||
hibernate.connection.pool_size 5
|
||||
|
||||
|
|
|
@ -4,4 +4,4 @@
|
|||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
jdbcDependency 'org.postgresql:postgresql:42.2.2'
|
||||
jdbcDependency 'org.postgresql:postgresql:42.2.19'
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
hibernate.dialect org.hibernate.dialect.PostgreSQL94Dialect
|
||||
hibernate.connection.driver_class org.postgresql.Driver
|
||||
hibernate.connection.url jdbc:postgresql:hibernate_orm_test
|
||||
hibernate.connection.url jdbc:postgresql://localhost:5432/hibernate_orm_test?preparedStatementCacheQueries=0
|
||||
hibernate.connection.username hibernate_orm_test
|
||||
hibernate.connection.password hibernate_orm_test
|
||||
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
.asciidoctor
|
||||
*.png
|
||||
*.html
|
||||
*.pdf
|
|
@ -0,0 +1,424 @@
|
|||
:link-parallelArrays: https://en.wikipedia.org/wiki/Parallel_array["parallel arrays"]
|
||||
= Domain Model Mappings
|
||||
|
||||
:toc2:
|
||||
:toclevels: 3
|
||||
:sectanchors:
|
||||
:numbered:
|
||||
|
||||
Describes Hibernate's handling of domain model metadata
|
||||
|
||||
== Mapping sources
|
||||
|
||||
Mapping sources include `hbm.xml` files, `orm.xml` files and annotated classes. There are
|
||||
other specialized forms of sources, but they all come back to locating annotated classes
|
||||
and XML mappings.
|
||||
|
||||
The main actors in managing the sources include:
|
||||
|
||||
`MetadataSources`::
|
||||
Used to collect mapping sources to be processed together
|
||||
|
||||
`JaxbHbmHibernateMapping`::
|
||||
Main JAXB binding for a `hbm.xml` mapping document
|
||||
|
||||
`Binding`::
|
||||
Represents an XML mapping within the `MetadataSources`. Wraps either a
|
||||
`JaxbHbmHibernateMapping` or DOM `Document` representing a JPA `orm.xml`.
|
||||
|
||||
`MappingBinder`::
|
||||
Responsible for generating `Binding` instances.
|
||||
|
||||
The handling for XML mappings is as follows:
|
||||
|
||||
[plantuml,hbm,png]
|
||||
.hbm.xml processing
|
||||
----
|
||||
@startuml
|
||||
skinparam handwritten true
|
||||
|
||||
Application -> MetadataSources : add(hbm.xml)
|
||||
MetadataSources -> MappingBinder : parse(hbm.xml)
|
||||
MappingBinder -> JAXB : bind(hbm.xml)
|
||||
MappingBinder <- JAXB : JaxbHbmHibernateMapping
|
||||
MetadataSources <- MappingBinder : Binding<JaxbHbmHibernateMapping>
|
||||
|
||||
@enduml
|
||||
----
|
||||
|
||||
[plantuml,orm,png]
|
||||
.orm.xml processing
|
||||
----
|
||||
@startuml
|
||||
skinparam handwritten true
|
||||
|
||||
Application -> MetadataSources : add(orm.xml)
|
||||
MetadataSources -> MappingBinder : parse(orm.xml)
|
||||
MappingBinder -> DOM : bind(orm.xml)
|
||||
MappingBinder <- DOM : Document
|
||||
MetadataSources <- MappingBinder : Binding<Document>
|
||||
|
||||
@enduml
|
||||
----
|
||||
|
||||
NOTE: `MetadataSources` receives XML files without any knowledge of whether the file
|
||||
is a Hibernate mapping (`hbm.xml`) or a JPA mapping (`orm.xml`). `MappingBinder` makes
|
||||
that distinction based on doctype, schema, etc.
|
||||
|
||||
|
||||
== Boot-time metamodel
|
||||
|
||||
The `org.hibernate.mapping` package defines most of the boot-time model.
|
||||
|
||||
|
||||
[plantuml,bootmodel,png]
|
||||
.Boot model actors
|
||||
----
|
||||
@startmindmap
|
||||
skinparam handwritten true
|
||||
+ Boot model
|
||||
++ PersistentClass
|
||||
++ Property
|
||||
++ Value
|
||||
++ IdGenerator
|
||||
++ TypeDef
|
||||
-- Table
|
||||
-- Selectable
|
||||
-- PrimaryKey
|
||||
-- Constraint
|
||||
@endmindmap
|
||||
----
|
||||
|
||||
=== PersistentClass
|
||||
|
||||
Models an entity
|
||||
|
||||
[plantuml,persistentclass,png]
|
||||
.PersistentClass hierarchy
|
||||
----
|
||||
@startuml
|
||||
interface Value
|
||||
|
||||
class Property
|
||||
Property -- Value : value >
|
||||
|
||||
class PersistentClass {
|
||||
entityName : String
|
||||
}
|
||||
PersistentClass *- Property : properties >
|
||||
|
||||
class RootClass {
|
||||
table : Table
|
||||
}
|
||||
|
||||
class JoinedSubclass {
|
||||
table : Table
|
||||
}
|
||||
|
||||
class UnionSubclass {
|
||||
table : Table
|
||||
}
|
||||
|
||||
PersistentClass <|-- RootClass
|
||||
PersistentClass <|-- Subclass
|
||||
Subclass <|-- JoinedSubclass
|
||||
Subclass <|-- SingleTableSubclass
|
||||
Subclass <|-- UnionSubclass
|
||||
@enduml
|
||||
----
|
||||
|
||||
|
||||
=== Value
|
||||
|
||||
Models a value. A value ultimately corresponds to a `org.hibernate.type.Type`. We will discuss
|
||||
this "simple" distinction when we talk about Types in the run-time metamodel section.
|
||||
|
||||
|
||||
[plantuml,value,png]
|
||||
.Value hierarchy
|
||||
----
|
||||
@startuml
|
||||
class SimpleValue
|
||||
note left of SimpleValue : By itself represents\na basic value
|
||||
class OneToMany
|
||||
note top of OneToMany : Used as element descriptor for\none-to-many collections
|
||||
|
||||
Value <|-- KeyValue
|
||||
Value <|-- OneToMany
|
||||
|
||||
KeyValue <|-- SimpleValue
|
||||
|
||||
SimpleValue <|-- DependentValue
|
||||
|
||||
SimpleValue <|-- Component
|
||||
|
||||
SimpleValue <|-- Any
|
||||
|
||||
SimpleValue <|-- ToOne
|
||||
ToOne <|-- ManyToOne
|
||||
ToOne <|-- OneToOne
|
||||
|
||||
Value <|-- Collection
|
||||
Collection <|-- Bag
|
||||
Collection <|-- Set
|
||||
Collection <|-- IdentifierCollection
|
||||
IdentifierCollection <|-- IdentifierBag
|
||||
Collection <|-- IndexedCollection
|
||||
IndexedCollection <|-- List
|
||||
List <|-- Array
|
||||
IndexedCollection <|-- Map
|
||||
|
||||
@enduml
|
||||
----
|
||||
|
||||
|
||||
=== Database model
|
||||
|
||||
[plantuml,db,png]
|
||||
.Database model
|
||||
----
|
||||
@startuml
|
||||
class Identifier
|
||||
Identifier : String text
|
||||
Identifier : boolean quoted
|
||||
|
||||
Selectable <|-- Column
|
||||
Column : Identifider name
|
||||
|
||||
Selectable <|-- Formula
|
||||
Formula : String fragment
|
||||
|
||||
Constraint <|-- PrimaryKey
|
||||
Constraint <|-- UniqueKey
|
||||
Constraint <|-- ForeignKey
|
||||
|
||||
class Table
|
||||
Table : Identifier name
|
||||
Table : Identifier schema
|
||||
Table : Identifier catalog
|
||||
Table : PrimaryKey : primaryKey
|
||||
Table : Selectable : selectables
|
||||
|
||||
class Index
|
||||
class Sequence
|
||||
|
||||
interface Exportable
|
||||
Exportable <|-- Table
|
||||
Exportable <|-- Constraint
|
||||
Exportable <|-- Index
|
||||
Exportable <|-- Sequence
|
||||
Exportable <|-- AuxilaryDatabaseObject
|
||||
|
||||
interface TableOwner
|
||||
TableOwner : Table table
|
||||
|
||||
TableOwner <|-- RootClass
|
||||
TableOwner <|-- JoinedSubclass
|
||||
TableOwner <|-- UnionSubclass
|
||||
@enduml
|
||||
----
|
||||
|
||||
|
||||
=== Transition from sources to boot-time model
|
||||
|
||||
The boot-time metamodel is built iteratively. The general paradigm in this transition is to
|
||||
instantiate one of these boot-time objects which are then populated in multiple later steps (via
|
||||
setters, additions, etc).
|
||||
|
||||
The main actors in this process are `HbmMetadataSourceProcessorImpl` and `AnnotationMetadataSourceProcessorImpl`.
|
||||
|
||||
|
||||
[plantuml,source2boot,png]
|
||||
.Transition sources to boot-time model
|
||||
----
|
||||
@startuml
|
||||
skinparam handwritten true
|
||||
autonumber
|
||||
|
||||
Application -> MetadataBuilder : build()
|
||||
MetadataBuilder -> MetadataBuildingProcess : build()
|
||||
MetadataBuildingProcess -> MetadataSourceProcessor
|
||||
|
||||
MetadataSourceProcessor -> HbmMetadataSourceProcessorImpl : process hbm.xml Bindings
|
||||
|
||||
MetadataSourceProcessor -> AnnotationMetadataSourceProcessorImpl : process annotations + orm.xml Bindings
|
||||
|
||||
MetadataBuilder <- MetadataBuildingProcess : Metadata
|
||||
Application <- MetadataBuilder : Metadata
|
||||
|
||||
@enduml
|
||||
----
|
||||
|
||||
|
||||
== Run-time metamodel
|
||||
|
||||
|
||||
[plantuml,runtimemodel,png]
|
||||
.Run-time model actors
|
||||
----
|
||||
@startmindmap
|
||||
skinparam handwritten true
|
||||
+ Boot model
|
||||
++ EntityPersister
|
||||
++ CollectionPersister
|
||||
++ Tuplizer
|
||||
-- Type
|
||||
-- IdentifierGenerator
|
||||
@endmindmap
|
||||
----
|
||||
|
||||
=== EntityPersister
|
||||
|
||||
Manages persistence of an entity to/from its defined table(s). Maintains flattened
|
||||
state regarding various aspects of the entity's value mappings as {link-parallelArrays}.
|
||||
An entity's value mappings include:
|
||||
* identifier
|
||||
* attribute state
|
||||
* (discriminator)
|
||||
* (version)
|
||||
|
||||
[plantuml,entitypersister,png]
|
||||
.EntityPersister hierarchy
|
||||
----
|
||||
@startuml
|
||||
skinparam handwritten true
|
||||
interface EntityPersister
|
||||
|
||||
abstract class AbstractEntityPersister
|
||||
EntityPersister <|-- AbstractEntityPersister
|
||||
|
||||
AbstractEntityPersister <|-- SingleTableEntityPersister
|
||||
AbstractEntityPersister <|-- JoinedEntityPersister
|
||||
AbstractEntityPersister <|-- UnionEntityPersister
|
||||
@enduml
|
||||
----
|
||||
|
||||
|
||||
=== CollectionPersister
|
||||
|
||||
Manages persistence of a collection to its defined table(s). Maintains flattened
|
||||
state as {link-parallelArrays} regarding various aspects of the value mappings making
|
||||
up the collection. These aspects include:
|
||||
* key -- the FK
|
||||
* element
|
||||
* (identifier) -- @IdBag
|
||||
* (list-index | map-key)
|
||||
|
||||
[plantuml,collectionpersister,png]
|
||||
.CollectionPersister hierarchy
|
||||
----
|
||||
@startuml
|
||||
skinparam handwritten true
|
||||
interface CollectionPersister
|
||||
|
||||
abstract class AbstractCollectionPersister
|
||||
CollectionPersister <|-- CollectionPersister
|
||||
|
||||
AbstractCollectionPersister <|-- BasicCollectionPersister
|
||||
AbstractCollectionPersister <|-- OneToManyCollectionPersister
|
||||
|
||||
note left of BasicCollectionPersister : collection mappings\nwith a collection table
|
||||
@enduml
|
||||
----
|
||||
|
||||
=== Type
|
||||
|
||||
Describes a value mapping which is some form of non-identified state.
|
||||
|
||||
[plantuml,type,png]
|
||||
.Type hierarchy
|
||||
----
|
||||
@startuml
|
||||
skinparam handwritten true
|
||||
|
||||
interface Type
|
||||
|
||||
interface IdentifierType
|
||||
Type <|-- IdentifierType
|
||||
|
||||
interface DiscriminatorType
|
||||
IdentifierType <|-- DiscriminatorType
|
||||
|
||||
interface VersionType
|
||||
Type <|-- VersionType
|
||||
|
||||
interface BasicType
|
||||
Type <|-- BasicType
|
||||
|
||||
interface CompositeType
|
||||
Type <|-- CompositeType
|
||||
CompositeType *- Type : subtypes
|
||||
|
||||
interface AssociationType
|
||||
Type <|-- AssociationType
|
||||
|
||||
interface AnyType {
|
||||
discriminatorType : DiscriminatorType
|
||||
identifierType : IdentifierType
|
||||
}
|
||||
AssociationType <|-- AnyType
|
||||
CompositeType <|-- AnyType
|
||||
|
||||
interface UserType
|
||||
interface CustomType
|
||||
CustomType -- UserType : wrappedUserType
|
||||
Type <|-- CustomType
|
||||
@enduml
|
||||
----
|
||||
|
||||
`IdentifierType`::
|
||||
Specialized Type contract for types that can be used as an identifier
|
||||
|
||||
`DiscriminatorType`::
|
||||
Specialized Type contract for types that can be used as a discriminator
|
||||
|
||||
`VersionType`::
|
||||
Specialized Type contract for types that can be used as a version
|
||||
|
||||
`BasicType`::
|
||||
Mapping to a single column
|
||||
|
||||
`CompositeType`::
|
||||
Mapping to one or more columns
|
||||
|
||||
`AssociationType`::
|
||||
Mapping to an entity association
|
||||
|
||||
`AnyType`::
|
||||
Models a discriminated association which is similar to an association referencing a
|
||||
discriminated-subclass entity in that the mapping involves a discriminator. However,
|
||||
in an ANY mapping the discriminator is on the referring side. This will map to at least
|
||||
2 columns - one for the discriminator plus one or more identifier columns.
|
||||
|
||||
`EntityType`::
|
||||
Models a foreign-key, which "from this side" is a to-one. Could map to a single column or multiple.
|
||||
|
||||
`CollectionType`::
|
||||
Models a foreign-key, which "from this side" is a to-many. Will map to at
|
||||
|
||||
|
||||
=== Transition from boot-time model to run-time model
|
||||
|
||||
This transition involves processing the boot model objects (`PersistentClass`, `Value`, etc) and building
|
||||
the corresponding run-time counterparts (`EntityPersister`, `Type`, etc).
|
||||
|
||||
The main actors in this transition are the `SessionFactory` itself, `MetamodelImplementor` and
|
||||
`TypeConfiguration`:
|
||||
|
||||
|
||||
[plantuml,boot2run,png]
|
||||
.Transition boot-time model to run-time model
|
||||
----
|
||||
@startuml
|
||||
skinparam handwritten true
|
||||
|
||||
Application -> SessionFactoryBuilder : build()
|
||||
SessionFactoryBuilder -> SessionFactory : new
|
||||
SessionFactory -> TypeConfiguration : scope
|
||||
|
||||
...
|
||||
|
||||
@enduml
|
||||
----
|
||||
|
||||
##
|
|
@ -0,0 +1,248 @@
|
|||
#! /bin/bash
|
||||
|
||||
mysql_5_7() {
|
||||
docker rm -f mysql || true
|
||||
docker run --name mysql -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -p3306:3306 -d mysql:5.7 --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
|
||||
}
|
||||
|
||||
mysql_8_0() {
|
||||
docker rm -f mysql || true
|
||||
docker run --name mysql -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -p3306:3306 -d mysql:8.0.21 --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
|
||||
}
|
||||
|
||||
mariadb() {
|
||||
docker rm -f mariadb || true
|
||||
docker run --name mariadb -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -p3306:3306 -d mariadb:10.5.8 --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
|
||||
}
|
||||
|
||||
postgresql_9_5() {
|
||||
docker rm -f postgres || true
|
||||
docker run --name postgres -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p5432:5432 -d postgres:9.5
|
||||
}
|
||||
|
||||
postgis(){
|
||||
docker rm -f postgis || true
|
||||
docker run --name postgis -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p5432:5432 -d postgis/postgis:11-2.5
|
||||
}
|
||||
|
||||
db2() {
|
||||
docker rm -f db2 || true
|
||||
docker run --name db2 --privileged -e DB2INSTANCE=orm_test -e DB2INST1_PASSWORD=orm_test -e DBNAME=orm_test -e LICENSE=accept -e AUTOCONFIG=false -e ARCHIVE_LOGS=false -e TO_CREATE_SAMPLEDB=false -e REPODB=false -p 50000:50000 -d ibmcom/db2:11.5.5.0
|
||||
# Give the container some time to start
|
||||
OUTPUT=
|
||||
while [[ $OUTPUT != *"INSTANCE"* ]]; do
|
||||
echo "Waiting for DB2 to start..."
|
||||
sleep 10
|
||||
OUTPUT=$(docker logs db2)
|
||||
done
|
||||
docker exec -t db2 su - orm_test bash -c ". /database/config/orm_test/sqllib/db2profile && /database/config/orm_test/sqllib/bin/db2 'connect to orm_test' && /database/config/orm_test/sqllib/bin/db2 'CREATE USER TEMPORARY TABLESPACE usr_tbsp MANAGED BY AUTOMATIC STORAGE'"
|
||||
}
|
||||
|
||||
db2_spatial() {
|
||||
docker rm -f db2spatial || true
|
||||
temp_dir=$(mktemp -d)
|
||||
cat <<EOF >${temp_dir}/ewkt.sql
|
||||
create or replace function db2gse.asewkt(geometry db2gse.st_geometry)
|
||||
returns clob(2G)
|
||||
specific db2gse.asewkt1
|
||||
language sql
|
||||
deterministic
|
||||
no external action
|
||||
reads sql data
|
||||
return 'srid=' || varchar(db2gse.st_srsid(geometry)) || ';' || db2gse.st_astext(geometry)
|
||||
;
|
||||
|
||||
-- Create SQL function to create a geometry from EWKT format
|
||||
create or replace function db2gse.geomfromewkt(instring varchar(32000))
|
||||
returns db2gse.st_geometry
|
||||
specific db2gse.fromewkt1
|
||||
language sql
|
||||
deterministic
|
||||
no external action
|
||||
reads sql data
|
||||
return db2gse.st_geometry(
|
||||
substr(instring,posstr(instring,';')+1, length(instring) - posstr(instring,';')),
|
||||
integer(substr(instring,posstr(instring,'=')+1,posstr(instring,';')-(posstr(instring,'=')+1)))
|
||||
)
|
||||
;
|
||||
-- Create a DB2 transform group to return and accept EWKT
|
||||
CREATE TRANSFORM FOR db2gse.ST_Geometry EWKT (
|
||||
FROM SQL WITH FUNCTION db2gse.asewkt(db2gse.ST_Geometry),
|
||||
TO SQL WITH FUNCTION db2gse.geomfromewkt(varchar(32000)) )
|
||||
;
|
||||
|
||||
-- Redefine the default DB2_PROGRAM to return and accept EWKT instead of WKT
|
||||
DROP TRANSFORM DB2_PROGRAM FOR db2gse.ST_Geometry;
|
||||
CREATE TRANSFORM FOR db2gse.ST_Geometry DB2_PROGRAM (
|
||||
FROM SQL WITH FUNCTION db2gse.asewkt(db2gse.ST_Geometry),
|
||||
TO SQL WITH FUNCTION db2gse.geomfromewkt(varchar(32000)) )
|
||||
;
|
||||
EOF
|
||||
docker run --name db2spatial --privileged -e DB2INSTANCE=orm_test -e DB2INST1_PASSWORD=orm_test -e DBNAME=orm_test -e LICENSE=accept -e AUTOCONFIG=false -e ARCHIVE_LOGS=false -e TO_CREATE_SAMPLEDB=false -e REPODB=false \
|
||||
-v ${temp_dir}:/conf \
|
||||
-p 50000:50000 -d ibmcom/db2:11.5.5.0
|
||||
|
||||
# Give the container some time to start
|
||||
OUTPUT=
|
||||
while [[ $OUTPUT != *"Setup has completed."* ]]; do
|
||||
echo "Waiting for DB2 to start..."
|
||||
sleep 10
|
||||
OUTPUT=$(docker logs db2spatial)
|
||||
done
|
||||
sleep 10
|
||||
echo "Enabling spatial extender"
|
||||
docker exec -t db2spatial su - orm_test bash -c "/database/config/orm_test/sqllib/db2profile && /database/config/orm_test/sqllib/bin/db2se enable_db orm_test"
|
||||
echo "Installing required transform group"
|
||||
docker exec -t db2spatial su - orm_test bash -c "/database/config/orm_test/sqllib/db2profile && /database/config/orm_test/sqllib/bin/db2 'connect to orm_test' && /database/config/orm_test/sqllib/bin/db2 -tvf /conf/ewkt.sql"
|
||||
|
||||
}
|
||||
|
||||
mssql() {
|
||||
docker rm -f mssql || true
|
||||
docker run --name mssql -d -p 1433:1433 -e "SA_PASSWORD=Hibernate_orm_test" -e ACCEPT_EULA=Y microsoft/mssql-server-linux:2017-CU13
|
||||
sleep 5
|
||||
n=0
|
||||
until [ "$n" -ge 5 ]
|
||||
do
|
||||
# We need a database that uses a non-lock based MVCC approach
|
||||
# https://github.com/microsoft/homebrew-mssql-release/issues/2#issuecomment-682285561
|
||||
docker exec mssql bash -c 'echo "create database hibernate_orm_test collate SQL_Latin1_General_CP1_CI_AS; alter database hibernate_orm_test set READ_COMMITTED_SNAPSHOT ON" | /opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P Hibernate_orm_test -i /dev/stdin' && break
|
||||
echo "Waiting for SQL Server to start..."
|
||||
n=$((n+1))
|
||||
sleep 5
|
||||
done
|
||||
if [ "$n" -ge 5 ]; then
|
||||
echo "SQL Server failed to start and configure after 25 seconds"
|
||||
else
|
||||
echo "SQL Server successfully started"
|
||||
fi
|
||||
}
|
||||
|
||||
oracle() {
|
||||
docker rm -f oracle || true
|
||||
# We need to use the defaults
|
||||
# SYSTEM/Oracle18
|
||||
docker run --shm-size=1536m --name oracle -d -p 1521:1521 --ulimit nofile=1048576:1048576 quillbuilduser/oracle-18-xe
|
||||
until [ "`docker inspect -f {{.State.Health.Status}} oracle`" == "healthy" ];
|
||||
do
|
||||
echo "Waiting for Oracle to start..."
|
||||
sleep 10;
|
||||
done
|
||||
echo "Oracle successfully started"
|
||||
# We increase file sizes to avoid online resizes as that requires lots of CPU which is restricted in XE
|
||||
docker exec oracle bash -c "source /home/oracle/.bashrc; bash -c \"
|
||||
cat <<EOF | \$ORACLE_HOME/bin/sqlplus sys/Oracle18@localhost/XE as sysdba
|
||||
alter database tempfile '/opt/oracle/oradata/XE/temp01.dbf' resize 400M;
|
||||
alter database datafile '/opt/oracle/oradata/XE/system01.dbf' resize 1000M;
|
||||
alter database datafile '/opt/oracle/oradata/XE/sysaux01.dbf' resize 600M;
|
||||
alter database datafile '/opt/oracle/oradata/XE/undotbs01.dbf' resize 300M;
|
||||
alter database add logfile group 4 '/opt/oracle/oradata/XE/redo04.log' size 500M reuse;
|
||||
alter database add logfile group 5 '/opt/oracle/oradata/XE/redo05.log' size 500M reuse;
|
||||
alter database add logfile group 6 '/opt/oracle/oradata/XE/redo06.log' size 500M reuse;
|
||||
|
||||
alter system switch logfile;
|
||||
alter system switch logfile;
|
||||
alter system switch logfile;
|
||||
alter system checkpoint;
|
||||
|
||||
alter database drop logfile group 1;
|
||||
alter database drop logfile group 2;
|
||||
alter database drop logfile group 3;
|
||||
alter system set open_cursors=1000 sid='*' scope=both;
|
||||
EOF\""
|
||||
}
|
||||
|
||||
oracle_ee() {
|
||||
docker rm -f oracle || true
|
||||
# We need to use the defaults
|
||||
# sys as sysdba/Oradoc_db1
|
||||
docker run --name oracle -d -p 1521:1521 store/oracle/database-enterprise:12.2.0.1-slim
|
||||
# Give the container some time to start
|
||||
OUTPUT=
|
||||
while [[ $OUTPUT != *"NLS_CALENDAR"* ]]; do
|
||||
echo "Waiting for Oracle to start..."
|
||||
sleep 10
|
||||
OUTPUT=$(docker logs oracle)
|
||||
done
|
||||
echo "Oracle successfully started"
|
||||
# We increase file sizes to avoid online resizes as that requires lots of CPU which is restricted in XE
|
||||
docker exec oracle bash -c "source /home/oracle/.bashrc; \$ORACLE_HOME/bin/sqlplus sys/Oradoc_db1@ORCLCDB as sysdba <<EOF
|
||||
create user c##hibernate_orm_test identified by hibernate_orm_test container=all;
|
||||
grant connect, resource, dba to c##hibernate_orm_test container=all;
|
||||
alter database tempfile '/u02/app/oracle/oradata/ORCL/temp01.dbf' resize 400M;
|
||||
alter database datafile '/u02/app/oracle/oradata/ORCL/system01.dbf' resize 1000M;
|
||||
alter database datafile '/u02/app/oracle/oradata/ORCL/sysaux01.dbf' resize 900M;
|
||||
alter database datafile '/u02/app/oracle/oradata/ORCL/undotbs01.dbf' resize 300M;
|
||||
alter database add logfile group 4 '/u02/app/oracle/oradata/ORCL/redo04.log' size 500M reuse;
|
||||
alter database add logfile group 5 '/u02/app/oracle/oradata/ORCL/redo05.log' size 500M reuse;
|
||||
alter database add logfile group 6 '/u02/app/oracle/oradata/ORCL/redo06.log' size 500M reuse;
|
||||
|
||||
alter system switch logfile;
|
||||
alter system switch logfile;
|
||||
alter system switch logfile;
|
||||
alter system checkpoint;
|
||||
|
||||
alter database drop logfile group 1;
|
||||
alter database drop logfile group 2;
|
||||
alter database drop logfile group 3;
|
||||
alter session set container=ORCLPDB1;
|
||||
alter database datafile '/u02/app/oracle/oradata/ORCLCDB/orclpdb1/system01.dbf' resize 500M;
|
||||
alter database datafile '/u02/app/oracle/oradata/ORCLCDB/orclpdb1/sysaux01.dbf' resize 500M;
|
||||
EOF"
|
||||
}
|
||||
|
||||
hana() {
|
||||
temp_dir=$(mktemp -d)
|
||||
echo '{"master_password" : "H1bernate_test"}' >$temp_dir/password.json
|
||||
chmod 777 -R $temp_dir
|
||||
docker rm -f hana || true
|
||||
docker run -d --name hana -p 39013:39013 -p 39017:39017 -p 39041-39045:39041-39045 -p 1128-1129:1128-1129 -p 59013-59014:59013-59014 \
|
||||
--ulimit nofile=1048576:1048576 \
|
||||
--sysctl kernel.shmmax=1073741824 \
|
||||
--sysctl net.ipv4.ip_local_port_range='40000 60999' \
|
||||
--sysctl kernel.shmmni=524288 \
|
||||
--sysctl kernel.shmall=8388608 \
|
||||
-v $temp_dir:/config \
|
||||
store/saplabs/hanaexpress:2.00.045.00.20200121.1 \
|
||||
--passwords-url file:///config/password.json \
|
||||
--agree-to-sap-license
|
||||
# Give the container some time to start
|
||||
OUTPUT=
|
||||
while [[ $OUTPUT != *"Startup finished"* ]]; do
|
||||
echo "Waiting for HANA to start..."
|
||||
sleep 10
|
||||
OUTPUT=$(docker logs hana)
|
||||
done
|
||||
echo "HANA successfully started"
|
||||
}
|
||||
|
||||
cockroachdb() {
|
||||
docker rm -f cockroach || true
|
||||
docker run -d --name=cockroach -p 26257:26257 -p 8080:8080 cockroachdb/cockroach:v20.2.4 start-single-node --insecure
|
||||
OUTPUT=
|
||||
while [[ $OUTPUT != *"CockroachDB node starting"* ]]; do
|
||||
echo "Waiting for CockroachDB to start..."
|
||||
sleep 10
|
||||
OUTPUT=$(docker logs cockroach)
|
||||
done
|
||||
echo "Cockroachdb successfully started"
|
||||
|
||||
}
|
||||
|
||||
if [ -z ${1} ]; then
|
||||
echo "No db name provided"
|
||||
echo "Provide one of:"
|
||||
echo -e "\tmysql_5_7"
|
||||
echo -e "\tmysql_8_0"
|
||||
echo -e "\tmariadb"
|
||||
echo -e "\tpostgresql_9_5"
|
||||
echo -e "\tdb2"
|
||||
echo -e "\tmssql"
|
||||
echo -e "\toracle"
|
||||
echo -e "\tpostgis"
|
||||
echo -e "\tdb2_spatial"
|
||||
echo -e "\thana"
|
||||
echo -e "\tcockroachdb"
|
||||
else
|
||||
${1}
|
||||
fi
|
|
@ -15,7 +15,7 @@ ext {
|
|||
'hibernate-infinispan',
|
||||
'hibernate-ehcache',
|
||||
'hibernate-java8',
|
||||
'hibernate-orm-modules',
|
||||
'hibernate-integrationtest-java-modules',
|
||||
'release'
|
||||
]
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ dependencies {
|
|||
|
||||
compile( libraries.jpa )
|
||||
compile( project( ':hibernate-core' ) )
|
||||
compile( project( ':hibernate-jpamodelgen' ) )
|
||||
annotationProcessor( project( ':hibernate-jpamodelgen' ) )
|
||||
|
||||
testCompile( 'org.apache.commons:commons-lang3:3.4' )
|
||||
|
||||
|
@ -63,6 +63,7 @@ dependencies {
|
|||
testRuntime( libraries.mariadb )
|
||||
testRuntime( libraries.mssql )
|
||||
testRuntime( libraries.hana )
|
||||
testRuntime( libraries.cockroachdb )
|
||||
|
||||
testCompile( project( ':hibernate-jcache' ) )
|
||||
testRuntime( libraries.ehcache3 )
|
||||
|
@ -128,9 +129,29 @@ task aggregateJavadocs(type: Javadoc) {
|
|||
'http://docs.jboss.org/cdi/api/2.0/',
|
||||
'https://javaee.github.io/javaee-spec/javadocs/'
|
||||
]
|
||||
|
||||
if ( gradle.ext.javaVersions.main.compiler.asInt() >= 11 ) {
|
||||
//The need to set `--source 1.8` applies to all JVMs after 11, and also to 11
|
||||
// but after excluding the first two builds; see also specific comments on
|
||||
// https://bugs.openjdk.java.net/browse/JDK-8212233?focusedCommentId=14245762
|
||||
// For now, let's be compatible with JDK 11.0.3+. We can improve on it if people
|
||||
// complain they cannot build with JDK 11.0.0, 11.0.1 and 11.0.2.
|
||||
logger.lifecycle "Forcing Javadoc in Java 8 compatible mode"
|
||||
options.source = gradle.ext.baselineJavaVersion
|
||||
}
|
||||
|
||||
if ( JavaVersion.current().isJava8Compatible() ) {
|
||||
options.addStringOption( 'Xdoclint:none', '-quiet' )
|
||||
options.addStringOption( 'Xdoclint:none', '-quiet' )
|
||||
|
||||
if ( gradle.ext.javaToolchainEnabled ) {
|
||||
options.setJFlags( getProperty( 'toolchain.javadoc.jvmargs' ).toString().
|
||||
split( ' ' ).toList().findAll( { !it.isEmpty() } ) )
|
||||
}
|
||||
}
|
||||
|
||||
if ( gradle.ext.javaToolchainEnabled ) {
|
||||
// Display version of Java tools
|
||||
doFirst {
|
||||
logger.lifecycle "Aggregating javadoc with '${javadocTool.get().metadata.installationPath}'"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -223,7 +244,8 @@ task renderUserGuide(type: AsciidoctorTask, group: 'Documentation') {
|
|||
stylesheet: "css/hibernate.css",
|
||||
majorMinorVersion: rootProject.ormVersion.family,
|
||||
fullVersion: rootProject.ormVersion.fullName,
|
||||
docinfo: 'private'
|
||||
docinfo: 'private',
|
||||
jpaJavadocUrlPrefix: "https://javaee.github.io/javaee-spec/javadocs/javax/persistence/"
|
||||
|
||||
resources {
|
||||
from('src/main/asciidoc/userguide/') {
|
||||
|
|
|
@ -11,7 +11,7 @@ It will also delve into the ways third-party integrators and applications can le
|
|||
=== What is a Service?
|
||||
|
||||
A services provides a certain types of functionality, in a pluggable manner.
|
||||
Specifically they are interfaces defining certain functionality and then implementations of those `Service` contract interfaces.
|
||||
Specifically, they are interfaces defining certain functionality and then implementations of those `Service` contract interfaces.
|
||||
The interface is known as the `Service` role; the implementation class is known as the `Service` implementation.
|
||||
The pluggability comes from the fact that the `Service` implementation adheres to contract defined by the interface of the `Service` role and that consumers of the `Service` program to the `Service` role, not the implementation.
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ hibernate-spatial:: Hibernate's Spatial/GIS data-type support
|
|||
hibernate-osgi:: Hibernate support for running in OSGi containers.
|
||||
hibernate-agroal:: Integrates the http://agroal.github.io/[Agroal] connection pooling library into Hibernate
|
||||
hibernate-c3p0:: Integrates the http://www.mchange.com/projects/c3p0/[C3P0] connection pooling library into Hibernate
|
||||
hibernate-hikaricp:: Integrates the http://brettwooldridge.github.io/HikariCP/[HikariCP] connection pooling library into Hibernate
|
||||
hibernate-hikaricp:: Integrates the https://github.com/brettwooldridge/HikariCP/[HikariCP] connection pooling library into Hibernate
|
||||
hibernate-vibur:: Integrates the http://www.vibur.org/[Vibur DBCP] connection pooling library into Hibernate
|
||||
hibernate-proxool:: Integrates the http://proxool.sourceforge.net/[Proxool] connection pooling library into Hibernate
|
||||
hibernate-jcache:: Integrates the https://jcp.org/en/jsr/detail?id=107$$[JCache] caching specification into Hibernate,
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
[preface]
|
||||
== Preface
|
||||
|
||||
Working with both Object-Oriented software and Relational Databases can be cumbersome and time consuming.
|
||||
Development costs are significantly higher due to a paradigm mismatch between how data is represented in objects
|
||||
Working with both Object-Oriented software and Relational Databases can be cumbersome and time-consuming.
|
||||
Development costs are significantly higher due to a number of "paradigm mismatches" between how data is represented in objects
|
||||
versus relational databases. Hibernate is an Object/Relational Mapping (ORM) solution for Java environments. The
|
||||
term Object/Relational Mapping refers to the technique of mapping data between an object model representation to
|
||||
a relational data model representation. See http://en.wikipedia.org/wiki/Object-relational_mapping for a good
|
||||
|
@ -14,7 +14,10 @@ takes a look at many of the mismatch problems.
|
|||
Although having a strong background in SQL is not required to use Hibernate, having a basic understanding of the
|
||||
concepts can help you understand Hibernate more quickly and fully. An understanding of data modeling principles
|
||||
is especially important. Both http://www.agiledata.org/essays/dataModeling101.html and
|
||||
http://en.wikipedia.org/wiki/Data_modeling are good starting points for understanding these data modeling principles.
|
||||
http://en.wikipedia.org/wiki/Data_modeling are good starting points for understanding these data modeling
|
||||
principles. If you are completely new to database access in Java,
|
||||
https://www.marcobehler.com/guides/a-guide-to-accessing-databases-in-java contains a good overview of the various parts,
|
||||
pieces and options.
|
||||
|
||||
Hibernate takes care of the mapping from Java classes to database tables, and from Java data types to SQL data
|
||||
types. In addition, it provides data query and retrieval facilities. It can significantly reduce development
|
||||
|
@ -32,4 +35,4 @@ representation to a graph of objects.
|
|||
|
||||
See http://hibernate.org/orm/contribute/ for information on getting involved.
|
||||
|
||||
IMPORTANT: The projects and code for the tutorials referenced in this guide are available as link:hibernate-tutorials.zip[]
|
||||
IMPORTANT: The projects and code for the tutorials referenced in this guide are available as link:hibernate-tutorials.zip[]
|
||||
|
|
|
@ -58,7 +58,7 @@ There are other ways to specify configuration properties, including:
|
|||
* Place a file named hibernate.properties in a root directory of the classpath.
|
||||
* Place a file named hibernate.properties in a root directory of the classpath.
|
||||
* Pass an instance of java.util.Properties to `Configuration#setProperties`.
|
||||
* Set System properties using java `-Dproperty=value`.
|
||||
* Set System properties using Java `-Dproperty=value`.
|
||||
* Include `<property/>` elements in `hibernate.cfg.xml`
|
||||
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
This guide discusses the process of bootstrapping a Hibernate `org.hibernate.SessionFactory`. It also
|
||||
discusses the ways in which applications and integrators can hook-in to and affect that process. This
|
||||
bootstrapping process is defined in 2 distinct steps. The first step is the building of a ServiceRegistry
|
||||
holding the services Hibernate will need at bootstrap- and run-time. The second step is the building of
|
||||
holding the services Hibernate will need at bootstrap- and runtime. The second step is the building of
|
||||
a Metadata object representing the mapping information for the application's model and its mapping to
|
||||
the database.
|
||||
|
||||
|
|
|
@ -23,11 +23,11 @@ Ultimately all enhancement is handled by the `org.hibernate.bytecode.enhance.spi
|
|||
enhancement can certainly be crafted on top of Enhancer, but that is beyond the scope of this guide. Here we
|
||||
will focus on the means Hibernate already exposes for performing these enhancements.
|
||||
|
||||
=== Run-time enhancement
|
||||
=== Runtime enhancement
|
||||
|
||||
Currently run-time enhancement of the domain model is only supported in managed JPA environments following the JPA defined SPI for performing class transformations.
|
||||
Currently runtime enhancement of the domain model is only supported in managed JPA environments following the JPA defined SPI for performing class transformations.
|
||||
|
||||
Even then, this support is disabled by default. To enable run-time enhancement, specify one of the following configuration properties:
|
||||
Even then, this support is disabled by default. To enable runtime enhancement, specify one of the following configuration properties:
|
||||
|
||||
`*hibernate.enhancer.enableDirtyTracking*` (e.g. `true` or `false` (default value))::
|
||||
Enable dirty tracking feature in runtime bytecode enhancement.
|
||||
|
|
|
@ -167,7 +167,7 @@ appropriate and all classes X, Y, Z, and K.
|
|||
== Usage
|
||||
|
||||
The jar file for the annotation processor can be found in the
|
||||
http://repository.jboss.com/[JBoss Maven repository] under:
|
||||
https://search.maven.org/[Maven Central repository] under:
|
||||
|
||||
====
|
||||
[source, XML]
|
||||
|
@ -182,12 +182,12 @@ http://repository.jboss.com/[JBoss Maven repository] under:
|
|||
====
|
||||
|
||||
Alternatively, it can be found in the ORM distribution bundle on
|
||||
http://sourceforge.net/projects/hibernate/files/hibernate4[SourceForge].
|
||||
https://sourceforge.net/projects/hibernate/files/hibernate-orm/[SourceForge].
|
||||
|
||||
In most cases the annotation processor will automatically run provided
|
||||
the processor jar is added to the build classpath and a JDK >6 is used.
|
||||
the processor jar is added to the build classpath.
|
||||
This happens due to Java's Service Provider contract and the fact
|
||||
the the Hibernate Static Metamodel Generator jar files contains the
|
||||
the Hibernate Static Metamodel Generator jar files contains the
|
||||
file _javax.annotation.processing.Processor_ in the _META-INF/services_ directory.
|
||||
|
||||
The fully qualified name of the processor itself is:
|
||||
|
@ -246,8 +246,8 @@ pass the processor option to the compiler plugin:
|
|||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>1.6</source>
|
||||
<target>1.6</target>
|
||||
<source>1.8</source>
|
||||
<target>1.8</target>
|
||||
<compilerArguments>
|
||||
<processor>org.hibernate.jpamodelgen.JPAMetaModelEntityProcessor</processor>
|
||||
</compilerArguments>
|
||||
|
@ -273,14 +273,14 @@ plugin as seen in below.
|
|||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>1.6</source>
|
||||
<target>1.6</target>
|
||||
<source>1.8</source>
|
||||
<target>1.8</target>
|
||||
<compilerArgument>-proc:none</compilerArgument>
|
||||
</configuration>
|
||||
</plugin>
|
||||
----
|
||||
====
|
||||
Once disabled, the http://code.google.com/p/maven-annotation-plugin[maven-processor-plugin]
|
||||
Once disabled, the https://bsorrentino.github.io/maven-annotation-plugin/[maven-processor-plugin]
|
||||
for annotation processing can be used:
|
||||
|
||||
[[maven-processor-plugin]]
|
||||
|
@ -375,6 +375,9 @@ Just check the "Enable annotation processing" option, configure the directory fo
|
|||
generated sources and finally add the Hibernate Static Metamodel Generator and JPA 2 jar
|
||||
files to the factory path.
|
||||
|
||||
If you use JDK 11+, you also need to add the `javax.xml.bind:jaxb-api` and
|
||||
`org.glassfish.jaxb:jaxb-runtime` jars as JAXB is not included in the JDK anymore.
|
||||
|
||||
image:eclipse-annotation-processor-config.png[]
|
||||
|
||||
=== Processor specific options
|
||||
|
|
|
@ -12,7 +12,7 @@ applications can leverage and customize Services and Registries.
|
|||
|
||||
== What is a Service?
|
||||
|
||||
Services provide various types of functionality, in a pluggable manner. Specifically they are interfaces defining
|
||||
Services provide various types of functionality, in a pluggable manner. Specifically, they are interfaces defining
|
||||
certain functionality and then implementations of those service contract interfaces. The interface is known as the
|
||||
service role; the implementation class is known as the service implementation. The pluggability comes from the fact
|
||||
that the service implementation adheres to contract defined by the interface of the service role and that consumers
|
||||
|
|
|
@ -4,321 +4,14 @@
|
|||
|
||||
The http://wildfly.org/[WildFly application server] includes Hibernate ORM as the default JPA provider out of the box.
|
||||
|
||||
This means that you don't need to package Hibernate ORM with the applications you deploy on WildFly,
|
||||
instead the application server will automatically enable Hibernate support if it detects that your application is using JPA.
|
||||
In previous versions of Hibernate ORM, we offered a "feature pack" to enable anyone to use the very latest version in
|
||||
WildFly as soon as a new release of Hibernate ORM was published.
|
||||
|
||||
You can also benefit from these modules when not using JPA, to avoid including Hibernate ORM and all its
|
||||
dependencies into your deployment.
|
||||
This will require activating the module explicitly using a `jboss-deployment-structure.xml` file or a Manifest entry:
|
||||
see https://docs.jboss.org/author/display/WFLY/Class+Loading+in+WildFly[Class Loading in WildFly] for some examples.
|
||||
|
||||
Often a newer version of Hibernate ORM is available than the one coming with a given WildFly release; to make sure
|
||||
you can enjoy the latest version of Hibernate on any reasonably recent WildFly edition we publish _WildFly feature packs_, these can be used with various WildFly provisioning tools to create a custom server with a different
|
||||
version of Hibernate ORM.
|
||||
|
||||
== What is a WildFly feature pack
|
||||
|
||||
WildFly is a runtime built on https://jboss-modules.github.io/jboss-modules/manual/[JBoss Modules]; this is a light weight and efficient modular classloader which allows the different components of a modern server to be defined as independent modules.
|
||||
|
||||
Hibernate ORM and its components are defined as one such module; this implies you can even have multiple different versions of an Hibernate ORM module in the same runtime while having their classpaths isolated from each other: you can add the very latest Hibernate ORM releases to WildFly without having to remove the existing copy.
|
||||
|
||||
This gives you the flexibility to use the latest version for one of your application with the peace of mind that you won't break other applications requiring a different version of Hibernate. We don't generally recommend to abuse this system but it's often useful to be able for example to upgrade and test one application at a time, rather than having to mandate a new version for multiple services and have to update them all in one shot.
|
||||
|
||||
A feature pack is a zip file containing some XML files which define the structure of the JBoss Module(s) and list the Java "jar" files which will be needed by identifying them via Maven coordinates.
|
||||
|
||||
This has some further benefits:
|
||||
|
||||
- A feature pack is very small as it's just a zipped file with some lines of XML.
|
||||
- In terms of disk space you can build a "thin" server which doesn't actually include a copy of your Maven artifacts but just loads the classes on demand from your local Maven cache.
|
||||
- You still have the option to build a "full" server so that it can be re-distributed without needing to copy a local Maven repository.
|
||||
- When using the provisioning tool you benefit from a composable approach, so N different packs can be combined to form a custom server.
|
||||
- Since the feature pack XML merely lists which artifacts are recommended (rather than including a binary copy) it is easy to override the exact versions; this is ideal to apply micro, urgent fixes.
|
||||
- A feature pack can declare transitive dependencies on other feature packs, so you will automatically be provided all non-optional dependencies of Hibernate ORM.
|
||||
|
||||
It is also interesting to highlight what it is not: differently than most build systems, the focus of JBoss Modules is not on how a project is built but how it should be run.
|
||||
|
||||
An important aspect is that runtime dependencies of a JBoss Module are *not transitive*: so for example if the latest Hibernate ORM requires Byte Buddy version 5 (as an example) while any other module that your application needs depends on Byte Buddy version 6 this will not be a problem.
|
||||
|
||||
Upgrading your applications could not be easier, as you won't have to ensure that all your dependencies are aligned to use the same versions.
|
||||
|
||||
|
||||
== How to get the latest Hibernate ORM feature pack for WildFly
|
||||
|
||||
The feature pack can be downloaded from Maven Central, to facilitate automatic unpacking during your build.
|
||||
Such a feature pack is released whenever any new version of Hibernate ORM is released.
|
||||
|
||||
.Maven identifier for the WildFly feature pack
|
||||
|
||||
====
|
||||
[source, XML]
|
||||
[subs="verbatim,attributes"]
|
||||
----
|
||||
<groupId>org.hibernate</groupId>
|
||||
<artifactId>hibernate-orm-jbossmodules</artifactId>
|
||||
<version>{fullVersion}</version>
|
||||
----
|
||||
====
|
||||
|
||||
Typically you won't download this file directly but you will use either a Maven plugin or a Gradle plugin to build the custom WildFly server.
|
||||
|
||||
== Create a Provisioning Configuration File
|
||||
|
||||
You will need a small XML file to define which feature packs you want to assemble.
|
||||
|
||||
The following example will create a full WildFly server but also include a copy of the latest Hibernate ORM modules:
|
||||
|
||||
|
||||
.Example Provisioning Configuration File
|
||||
====
|
||||
[source, XML]
|
||||
[subs="verbatim,attributes"]
|
||||
----
|
||||
<server-provisioning xmlns="urn:wildfly:server-provisioning:1.1" copy-module-artifacts="true">
|
||||
<feature-packs>
|
||||
<feature-pack
|
||||
groupId="org.hibernate"
|
||||
artifactId="hibernate-orm-jbossmodules"
|
||||
version="${hibernate-orm.version}" />
|
||||
<feature-pack
|
||||
groupId="org.wildfly"
|
||||
artifactId="wildfly-feature-pack"
|
||||
version="${wildfly.version}" />
|
||||
</feature-packs>
|
||||
</server-provisioning>
|
||||
----
|
||||
====
|
||||
|
||||
Of course should you wish your custom server to have more features you can list additional feature packs.
|
||||
|
||||
It is also possible to build a "thin" server by not setting the _copy-module-artifacts_ flag, or you can further customize and filter out things you want removed.
|
||||
|
||||
See https://github.com/wildfly/wildfly-build-tools[the README of the WildFly Build Tools project] on Github for more details.
|
||||
|
||||
Next you can use either the https://github.com/wildfly/wildfly-build-tools[Maven plugin] or the https://plugins.gradle.org/plugin/org.wildfly.build.featurepack[Gradle plugin] to actually create a fresh copy of your custom server.
|
||||
|
||||
== Maven users: invoke the WildFly Provisioning Plugin
|
||||
|
||||
Assuming the previous Provisioning Configuration File is saved as `server-provisioning.xml`, you will just have to refer the plugin to it, pick an output directory name and bing the plugin to the build lifecycle.
|
||||
|
||||
.Example Maven Provisioning
|
||||
====
|
||||
[source, XML]
|
||||
----
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.wildfly.build</groupId>
|
||||
<artifactId>wildfly-server-provisioning-maven-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>server-provisioning</id>
|
||||
<goals>
|
||||
<goal>build</goal>
|
||||
</goals>
|
||||
<phase>compile</phase>
|
||||
<configuration>
|
||||
<config-file>server-provisioning.xml</config-file>
|
||||
<server-name>wildfly-custom</server-name>
|
||||
</configuration>
|
||||
</execution>
|
||||
----
|
||||
====
|
||||
|
||||
==== JPA version override
|
||||
|
||||
With WildFly 12 being built with JavaEE7 in mind, it ships the JPA 2.1 API.
|
||||
|
||||
Hibernate ORM 5.3 requires JPA 2.2, and it is not possible at this time to replace the JPA API using the Maven provisioning plugin so you'll have to apply a "WildFly patch" as well.
|
||||
|
||||
A WildFly patch can be applied from the WildFly CLI; here we show how to automate it all with Maven plugins.
|
||||
|
||||
.Example Maven script to patch the JPA version in WildFly:
|
||||
====
|
||||
[source, XML]
|
||||
----
|
||||
<plugin>
|
||||
<artifactId>maven-dependency-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>fetch-jpa-patch</id>
|
||||
<phase>process-test-resources</phase>
|
||||
<goals>
|
||||
<goal>copy</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<artifactItems>
|
||||
<artifactItem>
|
||||
<groupId>org.hibernate.javax.persistence</groupId>
|
||||
<artifactId>hibernate-jpa-api-2.2-wildflymodules</artifactId>
|
||||
<classifier>wildfly-12.0.0.Final-patch</classifier>
|
||||
<version>1.0.0.Beta2</version>
|
||||
<type>zip</type>
|
||||
<outputDirectory>${project.build.directory}</outputDirectory>
|
||||
<overWrite>true</overWrite>
|
||||
<destFileName>hibernate-jpa-api-2.2-wildflymodules-patch.zip</destFileName>
|
||||
</artifactItem>
|
||||
</artifactItems>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.wildfly.plugins</groupId>
|
||||
<artifactId>wildfly-maven-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>apply-wildfly-jpa22-patch-file</id>
|
||||
<phase>pre-integration-test</phase>
|
||||
<goals>
|
||||
<goal>execute-commands</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<offline>true</offline>
|
||||
<jbossHome>${jbossHome.provisionedPath}</jbossHome>
|
||||
<!-- The CLI script below will fail if the patch was already applied in a previous build -->
|
||||
<fail-on-error>false</fail-on-error>
|
||||
<commands>
|
||||
<command>patch apply --override-all ${project.build.directory}/hibernate-jpa-api-2.2-wildflymodules-patch.zip</command>
|
||||
</commands>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
----
|
||||
====
|
||||
|
||||
== Gradle users: invoke the WildFly Provisioning plugin
|
||||
|
||||
A Gradle plugin is also available, and in this case it will take just a couple of lines.
|
||||
|
||||
Remember when creating a "thin server": the WildFly classloader will not be able to load jars from the local Gradle cache: this might trigger a second download as it looks into local Maven repositories exclusively.
|
||||
Especially if you are developing additional feature packs using Gradle, make sure to publish them into a Maven repository so that WildFly can load them.
|
||||
|
||||
Follows a full Gradle build script; in contrast to the previous Maven example which is incomplete to keep it short, is a fully working build script. Also it won't require to apply additional patches to replace the JPA version.
|
||||
|
||||
.Example Gradle Provisioning
|
||||
====
|
||||
[source, Groovy]
|
||||
----
|
||||
plugins {
|
||||
id "org.wildfly.build.provision" version '0.0.6'
|
||||
}
|
||||
|
||||
repositories {
|
||||
mavenLocal()
|
||||
mavenCentral()
|
||||
maven {
|
||||
name 'jboss-nexus'
|
||||
url "http://repository.jboss.org/nexus/content/groups/public/"
|
||||
}
|
||||
}
|
||||
|
||||
provision {
|
||||
//Optional destination directory:
|
||||
destinationDir = file("wildfly-custom")
|
||||
|
||||
//Update the JPA API:
|
||||
override( 'org.hibernate.javax.persistence:hibernate-jpa-2.1-api' ) {
|
||||
groupId = 'javax.persistence'
|
||||
artifactId = 'javax.persistence-api'
|
||||
version = '2.2'
|
||||
}
|
||||
configuration = file( 'wildfly-server-provisioning.xml' )
|
||||
//Define variables which need replacing in the provisioning configuration!
|
||||
variables['wildfly.version'] = '12.0.0.Final'
|
||||
variables['hibernate-orm.version'] = '5.3.0.Final'
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
you could paste this into a new file named `build.gradle` in an empty directory, then invoke:
|
||||
|
||||
gradle provision
|
||||
|
||||
and you'll have a full WildFly 12.0.0.Final server generated in the `wildfly-custom` subdirectory, including a copy of Hibernate ORM version 5.3.0.Final (in addition to the any other version that WildFly normally includes).
|
||||
|
||||
|
||||
==== A note on repositories:
|
||||
|
||||
mavenLocal()::
|
||||
strictly not necessary but will make your builds much faster if you run it more than once.
|
||||
jboss-nexus::
|
||||
This additional repository is required. Most components of WildFly are available in Maven Central but there are some occasional exceptions.
|
||||
|
||||
==== The JPA version override
|
||||
|
||||
The JPA API is a fundamental component of the application server as it is used to integrate with various other standards; at this stage while the feature packs offer some degree of composability it is not yet possible
|
||||
to have additional, independent copies of the JPA API: it needs to be replaced.
|
||||
|
||||
Hibernate ORM 5.3.0 requires JPA 2.2, yet WildFly 12 ships with JPA version 2.1. Luckily this provisioning tool is also able to override any artifact resolution.
|
||||
|
||||
Of course when future versions of WildFly will be based on JPA 2.2, this step might soon no longer be necessary.
|
||||
|
||||
|
||||
== WildFly module identifiers: slots and conventions
|
||||
|
||||
Note that the Hibernate ORM modules coming with WildFly will remain untouched: you can switch between the original version and the new version from the ZIP file as needed as a matter of configuration. Different applications can use different versions.
|
||||
|
||||
The application server identifies modules using a name and a _slot_.
|
||||
By default, the module _org.hibernate:main_ will be used to provide JPA support for given deployments: _main_ is the default slot and represents the copy of Hibernate ORM coming with WildFly itself.
|
||||
|
||||
By convention all modules included with WildFly use the "main" slot, while the modules released by the Hibernate project
|
||||
will use a slot name which matches the version, and also provide an alias to match its "major.minor" version.
|
||||
|
||||
Our suggestion is to depend on the module using the "major.minor" representation, as this simplifies rolling out bugfix
|
||||
releases (micro version updates) of Hibernate ORM without changing application configuration (micro versions are always expected to be backwards compatible and released as bugfix only).
|
||||
|
||||
For example if your application wants to use the latest version of Hibernate ORM version {majorMinorVersion}.x it should declare to use the module _org.hibernate:{majorMinorVersion}_. You can of course decide to use the full version instead for more precise control, in case an application requires a very specific version.
|
||||
|
||||
== Switch to a different Hibernate ORM slot
|
||||
|
||||
In order to use a different module other than the default _org.hibernate:main_ specify the identifier of the module you wish to use via the `jboss.as.jpa.providerModule` property in the _persistence.xml_ file of your application, as in the following example.
|
||||
|
||||
[[wildfly-using-custom-hibernate-orm-version]]
|
||||
.Using an alternative module slot of Hibernate ORM
|
||||
====
|
||||
[source, XML]
|
||||
[subs="verbatim,attributes"]
|
||||
----
|
||||
<persistence xmlns="http://xmlns.jcp.org/xml/ns/persistence"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://xmlns.jcp.org/xml/ns/persistence
|
||||
http://xmlns.jcp.org/xml/ns/persistence/persistence_2_1.xsd"
|
||||
version="2.1" >
|
||||
|
||||
<persistence-unit name="examplePu">
|
||||
|
||||
<!-- ... -->
|
||||
|
||||
<properties>
|
||||
<property name="jboss.as.jpa.providerModule" value="org.hibernate:{majorMinorVersion}"/>
|
||||
</properties>
|
||||
|
||||
<!-- ... -->
|
||||
|
||||
</persistence-unit>
|
||||
</persistence>
|
||||
----
|
||||
====
|
||||
|
||||
Needless to say, this will affect the classpath of your application: if your single application declares multiple
|
||||
persistence units, they should all make a consistent choice!
|
||||
|
||||
This property is documented in the https://docs.jboss.org/author/display/WFLY/JPA+Reference+Guide[WildFly JPA Reference Guide];
|
||||
you might want to check it out as it lists several other useful properties.
|
||||
|
||||
== Limitations of using the custom WildFly modules
|
||||
|
||||
When using the custom modules provided by the feature packs you're going to give up on some of the integration which the application server normally automates.
|
||||
|
||||
For example enabling an Infinispan 2nd level cache is straight forward when using the default Hibernate ORM
|
||||
module, as WildFly will automatically setup the dependency to the Infinispan and clustering components.
|
||||
When using these custom modules such integration will no longer work automatically: you can still
|
||||
enable all normally available features but these will require explicit configuration, as if you were
|
||||
running Hibernate in a different container, or in no container.
|
||||
|
||||
You might be able to get a matching feature pack from the Infinispan or Ehcache projects, you can create a module yourself (after all it's just a simple XML file), or you can just add such additional dependencies in your application as in the old days: modules and feature packs give you some advantages but good old-style jars are also still a viable option.
|
||||
|
||||
Needless to say, those users not interested in having the very latest versions can just use the versions integrated in WildFly and benefit from the library combinations carefully tested by the WildFly team.
|
||||
Unfortunately, since version 5.5 is upgrading to JPA 3.0 and targets integration with components of the Jakarta
|
||||
EE 9 stack, such feature had to be disabled.
|
||||
|
||||
As soon as WildFly releases a Jakarta EE 9 compatible server it might be possible to re-introduce such a feature, but
|
||||
we can't guarantee that we will do this as the server changed the tooling to define such packs.
|
||||
|
||||
As usual, please let us know how important this is for you, and while we'll gladly help to make this happen we might need
|
||||
to rely on volunteers to help by contributing patches, testing it out and providing feedback.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
== References
|
||||
|
||||
[bibliography]
|
||||
- [[[PoEAA]]] Martin Fowler. Patterns of Enterprise Application Architecture.
|
||||
- [[[PoEAA]]] Martin Fowler. https://www.martinfowler.com/books/eaa.html[Patterns of Enterprise Application Architecture].
|
||||
Addison-Wesley Publishing Company. 2003.
|
||||
- [[[JPwH]]] Christian Bauer & Gavin King. http://www.manning.com/bauer2[Java Persistence with Hibernate]. Manning Publications Co. 2007.
|
||||
- [[[JPwH]]] Christian Bauer & Gavin King. https://www.manning.com/books/java-persistence-with-hibernate-second-edition[Java Persistence with Hibernate, Second Edition]. Manning Publications Co. 2015.
|
||||
|
|
|
@ -30,6 +30,7 @@ include::chapters/multitenancy/MultiTenancy.adoc[]
|
|||
include::chapters/osgi/OSGi.adoc[]
|
||||
include::chapters/envers/Envers.adoc[]
|
||||
include::chapters/portability/Portability.adoc[]
|
||||
include::chapters/statistics/Statistics.adoc[]
|
||||
|
||||
include::appendices/Configurations.adoc[]
|
||||
include::appendices/Annotations.adoc[]
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
[[preface]]
|
||||
== Preface
|
||||
|
||||
Working with both Object-Oriented software and Relational Databases can be cumbersome and time consuming.
|
||||
Working with both Object-Oriented software and Relational Databases can be cumbersome and time-consuming.
|
||||
Development costs are significantly higher due to a paradigm mismatch between how data is represented in objects versus relational databases.
|
||||
Hibernate is an Object/Relational Mapping solution for Java environments.
|
||||
The term http://en.wikipedia.org/wiki/Object-relational_mapping[Object/Relational Mapping] refers to the technique of mapping data from an object model representation to a relational data model representation (and visa versa).
|
||||
The term http://en.wikipedia.org/wiki/Object-relational_mapping[Object/Relational Mapping] refers to the technique of mapping data from an object model representation to a relational data model representation (and vice versa).
|
||||
|
||||
Hibernate not only takes care of the mapping from Java classes to database tables (and from Java data types to SQL data types), but also provides data query and retrieval facilities.
|
||||
It can significantly reduce development time otherwise spent with manual data handling in SQL and JDBC.
|
||||
|
@ -44,9 +44,9 @@ While having a strong background in SQL is not required to use Hibernate, it cer
|
|||
Probably even more important is an understanding of data modeling principles.
|
||||
You might want to consider these resources as a good starting point:
|
||||
|
||||
* http://en.wikipedia.org/wiki/Data_modeling[Data Modeling Wikipedia definition]
|
||||
* http://en.wikipedia.org/wiki/Data_modeling[Data modeling Wikipedia definition]
|
||||
* http://www.agiledata.org/essays/dataModeling101.html[Data Modeling 101]
|
||||
|
||||
Understanding the basics of transactions and design patterns such as _Unit of Work_ <<Bibliography.adoc#PoEAA,PoEAA>> or _Application Transaction_ are important as well.
|
||||
Understanding the basics of transactions and design patterns such as _Unit of Work_ (<<Bibliography.adoc#PoEAA,PoEAA>>) or _Application Transaction_ are important as well.
|
||||
These topics will be discussed in the documentation, but a prior understanding will certainly help.
|
||||
====
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -13,7 +13,7 @@ this feature is not suitable for a production environment.
|
|||
An automated schema migration tool (e.g. https://flywaydb.org/[Flyway], http://www.liquibase.org/[Liquibase]) allows you to use any database-specific DDL feature (e.g. Rules, Triggers, Partitioned Tables).
|
||||
Every migration should have an associated script, which is stored on the Version Control System, along with the application source code.
|
||||
|
||||
When the application is deployed on a production-like QA environment, and the deploy worked as expected, then pushing the deploy to a production environment should be straightforward since the latest schema migration was already tested.
|
||||
When the application is deployed on a production-like QA environment, and the deployment worked as expected, then pushing the deployment to a production environment should be straightforward since the latest schema migration was already tested.
|
||||
|
||||
[TIP]
|
||||
====
|
||||
|
@ -62,7 +62,7 @@ This saves database roundtrips, and so it https://leanpub.com/high-performance-j
|
|||
|
||||
Not only `INSERT` and `UPDATE` statements, but even `DELETE` statements can be batched as well.
|
||||
For `INSERT` and `UPDATE` statements, make sure that you have all the right configuration properties in place, like ordering inserts and updates and activating batching for versioned data.
|
||||
Check out this article for more details on this topic.
|
||||
Check out https://vladmihalcea.com/how-to-batch-insert-and-update-statements-with-hibernate/[this article] for more details on this topic.
|
||||
|
||||
For `DELETE` statements, there is no option to order parent and child statements, so cascading can interfere with the JDBC batching process.
|
||||
|
||||
|
@ -81,7 +81,7 @@ When it comes to identifiers, you can either choose a natural id or a synthetic
|
|||
|
||||
For natural identifiers, the *assigned* identifier generator is the right choice.
|
||||
|
||||
For synthetic keys, the application developer can either choose a randomly generates fixed-size sequence (e.g. UUID) or a natural identifier.
|
||||
For synthetic keys, the application developer can either choose a randomly generated fixed-size sequence (e.g. UUID) or a natural identifier.
|
||||
Natural identifiers are very practical, being more compact than their UUID counterparts, so there are multiple generators to choose from:
|
||||
|
||||
- `IDENTITY`
|
||||
|
@ -127,22 +127,22 @@ On the other hand, the more exotic the association mapping, the better the chanc
|
|||
Therefore, the `@ManyToOne` and the `@OneToOne` child-side association are best to represent a `FOREIGN KEY` relationship.
|
||||
|
||||
The parent-side `@OneToOne` association requires bytecode enhancement
|
||||
so that the association can be loaded lazily. Otherwise, the parent-side is always fetched even if the association is marked with `FetchType.LAZY`.
|
||||
so that the association can be loaded lazily. Otherwise, the parent-side association is always fetched even if the association is marked with `FetchType.LAZY`.
|
||||
|
||||
For this reason, it's best to map `@OneToOne` association using `@MapsId` so that the `PRIMARY KEY` is shared between the child and the parent entities.
|
||||
When using `@MapsId`, the parent-side becomes redundant since the child-entity can be easily fetched using the parent entity identifier.
|
||||
When using `@MapsId`, the parent-side association becomes redundant since the child-entity can be easily fetched using the parent entity identifier.
|
||||
|
||||
For collections, the association can be either:
|
||||
|
||||
- unidirectional
|
||||
- bidirectional
|
||||
|
||||
For unidirectional collections, `Sets` are the best choice because they generate the most efficient SQL statements.
|
||||
Unidirectional `Lists` are less efficient than a `@ManyToOne` association.
|
||||
For unidirectional collections, ``Set``s are the best choice because they generate the most efficient SQL statements.
|
||||
Unidirectional ``List``s are less efficient than a `@ManyToOne` association.
|
||||
|
||||
Bidirectional associations are usually a better choice because the `@ManyToOne` side controls the association.
|
||||
|
||||
Embeddable collections (``@ElementCollection`) are unidirectional associations, hence `Sets` are the most efficient, followed by ordered `Lists`, whereas bags (unordered `Lists`) are the least efficient.
|
||||
Embeddable collections (``@ElementCollection``) are unidirectional associations, hence ``Set``s are the most efficient, followed by ordered ``List``s, whereas bags (unordered ``List``s) are the least efficient.
|
||||
|
||||
The `@ManyToMany` annotation is rarely a good choice because it treats both sides as unidirectional associations.
|
||||
|
||||
|
@ -198,7 +198,7 @@ Prior to JPA, Hibernate used to have all associations as `LAZY` by default.
|
|||
However, when JPA 1.0 specification emerged, it was thought that not all providers would use Proxies. Hence, the `@ManyToOne` and the `@OneToOne` associations are now `EAGER` by default.
|
||||
|
||||
The `EAGER` fetching strategy cannot be overwritten on a per query basis, so the association is always going to be retrieved even if you don't need it.
|
||||
More, if you forget to `JOIN FETCH` an `EAGER` association in a JPQL query, Hibernate will initialize it with a secondary statement, which in turn can lead to N+1 query issues.
|
||||
Moreover, if you forget to `JOIN FETCH` an `EAGER` association in a JPQL query, Hibernate will initialize it with a secondary statement, which in turn can lead to N+1 query issues.
|
||||
====
|
||||
|
||||
So, `EAGER` fetching is to be avoided. For this reason, it's better if all associations are marked as `LAZY` by default.
|
||||
|
@ -208,19 +208,19 @@ There are good and bad ways to treat the `LazyInitializationException`.
|
|||
|
||||
The best way to deal with `LazyInitializationException` is to fetch all the required associations prior to closing the Persistence Context.
|
||||
The `JOIN FETCH` directive is good for `@ManyToOne` and `OneToOne` associations, and for at most one collection (e.g. `@OneToMany` or `@ManyToMany`).
|
||||
If you need to fetch multiple collections, to avoid a Cartesian Product, you should use secondary queries which are triggered either by navigating the `LAZY` association or by calling `Hibernate#initialize(proxy)` method.
|
||||
If you need to fetch multiple collections, to avoid a Cartesian Product, you should use secondary queries which are triggered either by navigating the `LAZY` association or by calling `Hibernate#initialize(Object proxy)` method.
|
||||
|
||||
[[best-practices-caching]]
|
||||
=== Caching
|
||||
|
||||
Hibernate has two caching layers:
|
||||
|
||||
- the first-level cache (Persistence Context) which is a application-level repeatable reads.
|
||||
- the second-level cache which, unlike application-level caches, it doesn't store entity aggregates but normalized dehydrated entity entries.
|
||||
- the first-level cache (Persistence Context) which provides application-level repeatable reads.
|
||||
- the second-level cache which, unlike application-level caches, doesn't store entity aggregates but normalized dehydrated entity entries.
|
||||
|
||||
The first-level cache is not a caching solution "per se", being more useful for ensuring `READ COMMITTED` isolation level.
|
||||
|
||||
While the first-level cache is short lived, being cleared when the underlying `EntityManager` is closed, the second-level cache is tied to an `EntityManagerFactory`.
|
||||
While the first-level cache is short-lived, being cleared when the underlying `EntityManager` is closed, the second-level cache is tied to an `EntityManagerFactory`.
|
||||
Some second-level caching providers offer support for clusters. Therefore, a node needs only to store a subset of the whole cached data.
|
||||
|
||||
Although the second-level cache can reduce transaction response time since entities are retrieved from the cache rather than from the database,
|
||||
|
@ -229,12 +229,12 @@ and you should consider these alternatives prior to jumping to a second-level ca
|
|||
|
||||
- tuning the underlying database cache so that the working set fits into memory, therefore reducing Disk I/O traffic.
|
||||
- optimizing database statements through JDBC batching, statement caching, indexing can reduce the average response time, therefore increasing throughput as well.
|
||||
- database replication is also a very valuable option to increase read-only transaction throughput
|
||||
- database replication is also a very valuable option to increase read-only transaction throughput.
|
||||
|
||||
After properly tuning the database, to further reduce the average response time and increase the system throughput, application-level caching becomes inevitable.
|
||||
|
||||
Topically, a key-value application-level cache like https://memcached.org/[Memcached] or http://redis.io/[Redis] is a common choice to store data aggregates.
|
||||
If you can duplicate all data in the key-value store, you have the option of taking down the database system for maintenance without completely loosing availability since read-only traffic can still be served from the cache.
|
||||
Typically, a key-value application-level cache like https://memcached.org/[Memcached] or http://redis.io/[Redis] is a common choice to store data aggregates.
|
||||
If you can duplicate all data in the key-value store, you have the option of taking down the database system for maintenance without completely losing availability since read-only traffic can still be served from the cache.
|
||||
|
||||
One of the main challenges of using an application-level cache is ensuring data consistency across entity aggregates.
|
||||
That's where the second-level cache comes to the rescue.
|
||||
|
@ -255,7 +255,7 @@ Both `READ_WRITE` and `TRANSACTIONAL` use write-through caching, while `NONSTRIC
|
|||
For this reason, `NONSTRICT_READ_WRITE` is not very suitable if entities are changed frequently.
|
||||
|
||||
When using clustering, the second-level cache entries are spread across multiple nodes.
|
||||
When using http://blog.infinispan.org/2015/10/hibernate-second-level-cache.html[Infinispan distributed cache], only `READ_WRITE` and `NONSTRICT_READ_WRITE` are available for read-write caches.
|
||||
When using https://infinispan.org/blog/2015/10/01/hibernate-second-level-cache/[Infinispan distributed cache], only `READ_WRITE` and `NONSTRICT_READ_WRITE` are available for read-write caches.
|
||||
Bear in mind that `NONSTRICT_READ_WRITE` offers a weaker consistency guarantee since stale updates are possible.
|
||||
|
||||
[NOTE]
|
||||
|
|
|
@ -5,12 +5,12 @@
|
|||
=== Strategy configurations
|
||||
|
||||
Many configuration settings define pluggable strategies that Hibernate uses for various purposes.
|
||||
The configuration of many of these strategy type settings accept definition in various forms.
|
||||
The documentation of such configuration settings refer here.
|
||||
The configurations of many of these strategy type settings accept definition in various forms.
|
||||
The documentation of such configuration settings refers here.
|
||||
The types of forms available in such cases include:
|
||||
|
||||
short name (if defined)::
|
||||
Certain built-in strategy implementations have a corresponding short name.
|
||||
Certain built-in strategy implementations have a corresponding short name
|
||||
strategy instance::
|
||||
An instance of the strategy implementation to use can be specified
|
||||
strategy Class reference::
|
||||
|
@ -22,17 +22,17 @@ strategy Class name::
|
|||
=== General configuration
|
||||
|
||||
`*hibernate.dialect*` (e.g. `org.hibernate.dialect.PostgreSQL94Dialect`)::
|
||||
The classname of a Hibernate https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/dialect/Dialect.html[`Dialect`] from which Hibernate can generate SQL optimized for a particular relational database.
|
||||
The class name of a Hibernate https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/dialect/Dialect.html[`Dialect`] from which Hibernate can generate SQL optimized for a particular relational database.
|
||||
+
|
||||
In most cases Hibernate can choose the correct https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/dialect/Dialect.html[`Dialect`] implementation based on the JDBC metadata returned by the JDBC driver.
|
||||
In most cases, Hibernate can choose the correct https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/dialect/Dialect.html[`Dialect`] implementation based on the JDBC metadata returned by the JDBC driver.
|
||||
+
|
||||
`*hibernate.current_session_context_class*` (e.g. `jta`, `thread`, `managed`, or a custom class implementing `org.hibernate.context.spi.CurrentSessionContext`)::
|
||||
+
|
||||
Supply a custom strategy for the scoping of the _current_ `Session`.
|
||||
Supplies a custom strategy for the scoping of the _current_ `Session`.
|
||||
+
|
||||
The definition of what exactly _current_ means is controlled by the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/context/spi/CurrentSessionContext.html[`CurrentSessionContext`] implementation in use.
|
||||
+
|
||||
Note that for backwards compatibility, if a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/context/spi/CurrentSessionContext.html[`CurrentSessionContext`] is not configured but JTA is configured this will default to the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/context/internal/JTASessionContext.html[`JTASessionContext`].
|
||||
Note that for backward compatibility, if a `CurrentSessionContext` is not configured but JTA is configured this will default to the `JTASessionContext`.
|
||||
|
||||
[[configurations-jpa-compliance]]
|
||||
=== JPA compliance
|
||||
|
@ -42,10 +42,10 @@ This setting controls if Hibernate `Transaction` should behave as defined by th
|
|||
since it extends the JPA one.
|
||||
|
||||
`*hibernate.jpa.compliance.query*` (e.g. `true` or `false` (default value))::
|
||||
Controls whether Hibernate's handling of `javax.persistence.Query` (JPQL, Criteria and native-query) should strictly follow the JPA spec.
|
||||
Controls whether Hibernate's handling of `javax.persistence.Query` (JPQL, Criteria and native query) should strictly follow the JPA spec.
|
||||
+
|
||||
This includes both in terms of parsing or translating a query as well as calls to the `javax.persistence.Query` methods throwing spec
|
||||
defined exceptions where as Hibernate might not.
|
||||
defined exceptions whereas Hibernate might not.
|
||||
|
||||
`*hibernate.jpa.compliance.list*` (e.g. `true` or `false` (default value))::
|
||||
Controls whether Hibernate should recognize what it considers a "bag" (`org.hibernate.collection.internal.PersistentBag`)
|
||||
|
@ -58,12 +58,12 @@ is just missing (and its defaults will apply).
|
|||
JPA defines specific exceptions upon calling specific methods on `javax.persistence.EntityManager` and `javax.persistence.EntityManagerFactory`
|
||||
objects which have been closed previously.
|
||||
+
|
||||
This setting controls whether the JPA spec defined behavior or the Hibernate behavior will be used.
|
||||
This setting controls whether the JPA spec-defined behavior or the Hibernate behavior will be used.
|
||||
+
|
||||
If enabled, Hibernate will operate in the JPA specified way, throwing exceptions when the spec says it should.
|
||||
|
||||
`*hibernate.jpa.compliance.proxy*` (e.g. `true` or `false` (default value))::
|
||||
The JPA spec says that a `javax.persistence.EntityNotFoundException` should be thrown when accessing an entity Proxy
|
||||
The JPA spec says that a `javax.persistence.EntityNotFoundException` should be thrown when accessing an entity proxy
|
||||
which does not have an associated table row in the database.
|
||||
+
|
||||
Traditionally, Hibernate does not initialize an entity proxy when accessing its identifier since we already know the identifier value,
|
||||
|
@ -77,7 +77,7 @@ The JPA spec says that the scope of TableGenerator and SequenceGenerator names i
|
|||
Traditionally, Hibernate has considered the names locally scoped.
|
||||
+
|
||||
If enabled, the names used by `@TableGenerator` and `@SequenceGenerator` will be considered global so configuring two different generators
|
||||
with the same name will cause a `java.lang.IllegalArgumentException' to be thrown at boot time.
|
||||
with the same name will cause a `java.lang.IllegalArgumentException` to be thrown at boot time.
|
||||
|
||||
[[configurations-database-connection]]
|
||||
=== Database connection properties
|
||||
|
@ -105,17 +105,17 @@ See discussion of `hibernate.connection.provider_disables_autocommit` as well.
|
|||
`*hibernate.connection.provider_disables_autocommit*` (e.g. `true` or `false` (default value))::
|
||||
Indicates a promise by the user that Connections that Hibernate obtains from the configured ConnectionProvider
|
||||
have auto-commit disabled when they are obtained from that provider, whether that provider is backed by
|
||||
a DataSource or some other Connection pooling mechanism. Generally this occurs when:
|
||||
* Hibernate is configured to get Connections from an underlying DataSource, and that DataSource is already configured to disable auto-commit on its managed Connections
|
||||
a DataSource or some other Connection pooling mechanism. Generally, this occurs when:
|
||||
* Hibernate is configured to get Connections from an underlying DataSource, and that DataSource is already configured to disable auto-commit on its managed Connections.
|
||||
* Hibernate is configured to get Connections from a non-DataSource connection pool and that connection pool is already configured to disable auto-commit.
|
||||
For the Hibernate provided implementation this will depend on the value of `hibernate.connection.autocommit` setting.
|
||||
+
|
||||
Hibernate uses this assurance as an opportunity to opt-out of certain operations that may have a performance
|
||||
impact (although this impact is general negligible). Specifically, when a transaction is started via the
|
||||
Hibernate uses this assurance as an opportunity to opt out of certain operations that may have a performance
|
||||
impact (although this impact is generally negligible). Specifically, when a transaction is started via the
|
||||
Hibernate or JPA transaction APIs Hibernate will generally immediately acquire a Connection from the
|
||||
provider and:
|
||||
* check whether the Connection is initially in auto-commit mode via a call to `Connection#getAutocommit` to know how to clean up the Connection when released.
|
||||
* start a JDBC transaction by calling `Connection#setAutocommit(false)`
|
||||
* start a JDBC transaction by calling `Connection#setAutocommit(false)`.
|
||||
+
|
||||
We can skip both of those steps if we know that the ConnectionProvider will always return Connections with auto-commit disabled.
|
||||
That is the purpose of this setting. By setting it to `true`, the `Connection` acquisition can be delayed until the first
|
||||
|
@ -129,6 +129,39 @@ from the provider do not, in fact, have auto-commit disabled.
|
|||
Doing so will lead to Hibernate executing SQL operations outside of any JDBC/SQL transaction.
|
||||
====
|
||||
|
||||
`*hibernate.connection.handling_mode*`::
|
||||
Specifies how Hibernate should manage JDBC connections in terms of acquiring and releasing.
|
||||
This configuration property supersedes `*hibernate.connection.acquisition_mode*` and
|
||||
`*hibernate.connection.release_mode*`.
|
||||
+
|
||||
The connection handling mode strategies are defined by the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/resource/jdbc/spi/PhysicalConnectionHandlingMode.html[`PhysicalConnectionHandlingMode`] enumeration.
|
||||
+
|
||||
The configuration can be either a `PhysicalConnectionHandlingMode` reference or its case-insensitive `String` representation.
|
||||
+
|
||||
For more details about the `PhysicalConnectionHandlingMode` and Hibernate connection handling, check out the
|
||||
<<chapters/jdbc/Database_Access.adoc#database-connection-handling,Connection handling>> section.
|
||||
|
||||
[line-through]#`*hibernate.connection.acquisition_mode*`# (e.g. `immediate`)::
|
||||
[NOTE]
|
||||
====
|
||||
This setting is deprecated. You should use the `*hibernate.connection.handling_mode*` instead.
|
||||
====
|
||||
|
||||
Specifies how Hibernate should acquire JDBC connections. The possible values are given by `org.hibernate.ConnectionAcquisitionMode`.
|
||||
|
||||
Should generally only configure this or `hibernate.connection.release_mode`, not both.
|
||||
|
||||
[line-through]#`*hibernate.connection.release_mode*`# (e.g. `auto` (default value))::
|
||||
[NOTE]
|
||||
====
|
||||
This setting is deprecated. You should use the `*hibernate.connection.handling_mode*` instead.
|
||||
====
|
||||
|
||||
Specifies how Hibernate should release JDBC connections. The possible values are given by the current transaction mode (`after_transaction` for JDBC transactions and `after_statement` for JTA transactions).
|
||||
|
||||
Should generally only configure this or `hibernate.connection.acquisition_mode`, not both.
|
||||
|
||||
`*hibernate.connection.datasource*`::
|
||||
Either a `javax.sql.DataSource` instance or a JNDI name under which to locate the `DataSource`.
|
||||
+
|
||||
|
@ -142,32 +175,22 @@ Names the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/
|
|||
Can reference:
|
||||
+
|
||||
** an instance of `ConnectionProvider`
|
||||
** a `Class<? extends ConnectionProvider` object reference
|
||||
** a `Class<? extends ConnectionProvider>` object reference
|
||||
** a fully qualified name of a class implementing `ConnectionProvider`
|
||||
+
|
||||
|
||||
The term `class` appears in the setting name due to legacy reasons; however it can accept instances.
|
||||
The term `class` appears in the setting name due to legacy reasons. However, it can accept instances.
|
||||
|
||||
`*hibernate.jndi.class*`::
|
||||
Names the JNDI `javax.naming.InitialContext` class.
|
||||
|
||||
`*hibernate.jndi.url*` (e.g. java:global/jdbc/default)::
|
||||
`*hibernate.jndi.url*` (e.g. `java:global/jdbc/default`)::
|
||||
Names the JNDI provider/connection url.
|
||||
|
||||
`*hibernate.jndi*`::
|
||||
Names a prefix used to define arbitrary JNDI `javax.naming.InitialContext` properties.
|
||||
+
|
||||
These properties are passed along to `javax.naming.InitialContext#InitialContext(java.util.Hashtable)`
|
||||
|
||||
`*hibernate.connection.acquisition_mode*` (e.g. `immediate`)::
|
||||
Specifies how Hibernate should acquire JDBC connections. The possible values are given by `org.hibernate.ConnectionAcquisitionMode`.
|
||||
+
|
||||
Should generally only configure this or `hibernate.connection.release_mode`, not both.
|
||||
|
||||
`*hibernate.connection.release_mode*` (e.g. `auto` (default value))::
|
||||
Specifies how Hibernate should release JDBC connections. The possible values are given by the current transaction mode (`after_transaction` for JDBC transactions and `after_statement` for JTA transactions).
|
||||
+
|
||||
Should generally only configure this or `hibernate.connection.acquisition_mode`, not both.
|
||||
These properties are passed along to `javax.naming.InitialContext#InitialContext(java.util.Hashtable)` method.
|
||||
|
||||
==== Hibernate internal connection pool options
|
||||
|
||||
|
@ -178,7 +201,7 @@ Minimum number of connections for the built-in Hibernate connection pool.
|
|||
Maximum number of connections for the built-in Hibernate connection pool.
|
||||
|
||||
`*hibernate.connection.pool_validation_interval*` (e.g. 30 (default value))::
|
||||
The number of seconds between two consecutive pool validations. During validation, the pool size can increase or decreases based on the connection acquisition request count.
|
||||
The number of seconds between two consecutive pool validations. During validation, the pool size can increase or decrease based on the connection acquisition request count.
|
||||
|
||||
[[configurations-c3p0]]
|
||||
=== c3p0 properties
|
||||
|
@ -196,7 +219,7 @@ The number of seconds between two consecutive pool validations. During validatio
|
|||
Maximum size of C3P0 statement cache. Refers to http://www.mchange.com/projects/c3p0/#maxStatements[c3p0 `maxStatements` setting].
|
||||
|
||||
`*hibernate.c3p0.acquire_increment*` (e.g. 2)::
|
||||
Number of connections acquired at a time when there's no connection available in the pool. Refers to http://www.mchange.com/projects/c3p0/#acquireIncrement[c3p0 `acquireIncrement` setting].
|
||||
The number of connections acquired at a time when there's no connection available in the pool. Refers to http://www.mchange.com/projects/c3p0/#acquireIncrement[c3p0 `acquireIncrement` setting].
|
||||
|
||||
`*hibernate.c3p0.idle_test_period*` (e.g. 5)::
|
||||
Idle time before a C3P0 pooled connection is validated. Refers to http://www.mchange.com/projects/c3p0/#idleConnectionTestPeriod[c3p0 `idleConnectionTestPeriod` setting].
|
||||
|
@ -241,10 +264,25 @@ If true, the value specified by the `generator` attribute of the `@GeneratedValu
|
|||
The default value is `true` meaning that `@GeneratedValue.generator()` will be used as the sequence/table name by default.
|
||||
Users migrating from earlier versions using the legacy `hibernate_sequence` name should disable this setting.
|
||||
|
||||
`*hibernate.ejb.identifier_generator_strategy_provider*` (e.g. fully-qualified class name or an actual the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/spi/IdentifierGeneratorStrategyProvider.html[`IdentifierGeneratorStrategyProvider`] instance)::
|
||||
`*hibernate.ejb.identifier_generator_strategy_provider*` (e.g. fully-qualified class name or an actual https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/spi/IdentifierGeneratorStrategyProvider.html[`IdentifierGeneratorStrategyProvider`] instance)::
|
||||
This setting allows you to provide an instance or the class implementing the `org.hibernate.jpa.spi.IdentifierGeneratorStrategyProvider` interface,
|
||||
so you can provide a set of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/id/IdentifierGenerator.html[`IdentifierGenerator`] strategies allowing to override the Hibernate Core default ones.
|
||||
|
||||
`*hibernate.id.disable_delayed_identity_inserts*` (e.g. `true` or `false` (default value))::
|
||||
If true, inserts that use generated-identifiers (identity/sequences) will never be delayed and will always be inserted immediately.
|
||||
This should be used if you run into any errors with `DelayedPostInsertIdentifier` and should be considered a _temporary_ fix.
|
||||
Please report your mapping that causes the problem to us so we can examine the default algorithm to see if your use case should be included.
|
||||
+
|
||||
The default value is `false` which means Hibernate will use an algorithm to determine if the insert can be delayed or if the insert should be performed immediately.
|
||||
|
||||
`*hibernate.id.sequence.increment_size_mismatch_strategy*` (e.g. `LOG`, `FIX` or `EXCEPTION` (default value))::
|
||||
This setting defines the `org.hibernate.id.SequenceMismatchStrategy` used when
|
||||
Hibernate detects a mismatch between a sequence configuration in an entity mapping
|
||||
and its database sequence object counterpart.
|
||||
+
|
||||
The default value is given by the `org.hibernate.id.SequenceMismatchStrategy#EXCEPTION`,
|
||||
meaning that an Exception is thrown when detecting such a conflict.
|
||||
|
||||
==== Quoting options
|
||||
|
||||
`*hibernate.globally_quoted_identifiers*` (e.g. `true` or `false` (default value))::
|
||||
|
@ -252,7 +290,7 @@ Should all database identifiers be quoted.
|
|||
|
||||
`*hibernate.globally_quoted_identifiers_skip_column_definitions*` (e.g. `true` or `false` (default value))::
|
||||
Assuming `hibernate.globally_quoted_identifiers` is `true`, this allows the global quoting to skip column-definitions as defined by `javax.persistence.Column`,
|
||||
`javax.persistence.JoinColumn`, etc, and while it avoids column-definitions being quoted due to global quoting, they can still be explicitly quoted in the annotation/xml mappings.
|
||||
`javax.persistence.JoinColumn`, etc., and while it avoids column-definitions being quoted due to global quoting, they can still be explicitly quoted in the annotation/xml mappings.
|
||||
|
||||
`*hibernate.auto_quote_keyword*` (e.g. `true` or `false` (default value))::
|
||||
Specifies whether to automatically quote any names that are deemed keywords.
|
||||
|
@ -289,7 +327,7 @@ The following short names are defined for this setting:
|
|||
`legacy-hbm`::: Uses the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/model/naming/ImplicitNamingStrategyLegacyHbmImpl.html[`ImplicitNamingStrategyLegacyHbmImpl`]
|
||||
`component-path`::: Uses the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/model/naming/ImplicitNamingStrategyComponentPathImpl.html[`ImplicitNamingStrategyComponentPathImpl`]
|
||||
+
|
||||
If this property happens to be empty, the fallback is to use `default` strategy.
|
||||
If this property happens to be empty, the fallback is to use the `default` strategy.
|
||||
|
||||
`*hibernate.physical_naming_strategy*` (e.g. `org.hibernate.boot.model.naming.PhysicalNamingStrategyStandardImpl` (default value))::
|
||||
Used to specify the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/model/naming/PhysicalNamingStrategy.html[`PhysicalNamingStrategy`] class to use.
|
||||
|
@ -300,7 +338,7 @@ Used to specify the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/jav
|
|||
Pass an implementation of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/archive/scan/spi/Scanner.html[`Scanner`].
|
||||
By default, https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/archive/scan/internal/StandardScanner.html[`StandardScanner`] is used.
|
||||
+
|
||||
Accepts either:
|
||||
Accepts:
|
||||
+
|
||||
** an actual `Scanner` instance
|
||||
** a reference to a Class that implements `Scanner`
|
||||
|
@ -309,7 +347,7 @@ Accepts either:
|
|||
`*hibernate.archive.interpreter*`::
|
||||
Pass https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/archive/spi/ArchiveDescriptorFactory.html[`ArchiveDescriptorFactory`] to use in the scanning process.
|
||||
+
|
||||
Accepts either:
|
||||
Accepts:
|
||||
+
|
||||
** an actual `ArchiveDescriptorFactory` instance
|
||||
** a reference to a Class that implements `ArchiveDescriptorFactory`
|
||||
|
@ -319,7 +357,7 @@ Accepts either:
|
|||
See information on https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/archive/scan/spi/Scanner.html[`Scanner`] about expected constructor forms.
|
||||
|
||||
`*hibernate.archive.autodetection*` (e.g. `hbm,class` (default value))::
|
||||
Identifies a comma-separate list of values indicating the mapping types we should auto-detect during scanning.
|
||||
Identifies a comma-separated list of values indicating the mapping types we should auto-detect during scanning.
|
||||
+
|
||||
Allowable values include:
|
||||
+
|
||||
|
@ -327,7 +365,7 @@ Allowable values include:
|
|||
`hbm`::: scan `hbm` mapping files (e.g. `hbm.xml`) to extract entity mapping metadata
|
||||
+
|
||||
|
||||
By default both HBM, annotations, and JPA XML mappings are scanned.
|
||||
By default HBM, annotations, and JPA XML mappings are scanned.
|
||||
+
|
||||
When using JPA, to disable the automatic scanning of all entity classes, the `exclude-unlisted-classes` `persistence.xml` element must be set to true.
|
||||
Therefore, when setting `exclude-unlisted-classes` to true, only the classes that are explicitly declared in the `persistence.xml` configuration files are going to be taken into consideration.
|
||||
|
@ -336,14 +374,14 @@ Therefore, when setting `exclude-unlisted-classes` to true, only the classes tha
|
|||
Used to specify the order in which metadata sources should be processed.
|
||||
Value is a delimited-list whose elements are defined by https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/cfg/MetadataSourceType.html[`MetadataSourceType`].
|
||||
+
|
||||
Default is `hbm,class"`, therefore `hbm.xml` files are processed first, followed by annotations (combined with `orm.xml` mappings).
|
||||
The default is `hbm,class`, therefore `hbm.xml` files are processed first, followed by annotations (combined with `orm.xml` mappings).
|
||||
+
|
||||
When using JPA, the XML mapping overrides a conflicting annotation mapping that targets the same entity attribute.
|
||||
|
||||
==== JDBC-related options
|
||||
|
||||
`*hibernate.use_nationalized_character_data*` (e.g. `true` or `false` (default value))::
|
||||
Enable nationalized character support on all string / clob based attribute ( string, char, clob, text etc ).
|
||||
Enable nationalized character support on all string / clob based attribute ( string, char, clob, text, etc. ).
|
||||
|
||||
`*hibernate.jdbc.lob.non_contextual_creation*` (e.g. `true` or `false` (default value))::
|
||||
Should we not use contextual LOB creation (aka based on `java.sql.Connection#createBlob()` et al)? The default value for HANA, H2, and PostgreSQL is `true`.
|
||||
|
@ -373,12 +411,12 @@ Such a need is very uncommon and not recommended.
|
|||
==== Misc options
|
||||
|
||||
`*hibernate.create_empty_composites.enabled*` (e.g. `true` or `false` (default value))::
|
||||
Enable instantiation of composite/embeddable objects when all of its attribute values are `null`. The default (and historical) behavior is that a `null` reference will be used to represent the composite when all of its attributes are `null`.
|
||||
Enable instantiation of composite/embeddable objects when all of its attribute values are `null`. The default (and historical) behavior is that a `null` reference will be used to represent the composite when all of its attributes are ``null``s.
|
||||
+
|
||||
This is an experimental feature that has known issues. It should not be used in production until it is stabilized. See Hibernate Jira issue https://hibernate.atlassian.net/browse/HHH-11936[HHH-11936] for details.
|
||||
|
||||
`*hibernate.entity_dirtiness_strategy*` (e.g. fully-qualified class name or an actual `CustomEntityDirtinessStrategy` instance)::
|
||||
Setting to identify a `org.hibernate.CustomEntityDirtinessStrategy` to use.
|
||||
Setting to identify an `org.hibernate.CustomEntityDirtinessStrategy` to use.
|
||||
|
||||
`*hibernate.default_entity_mode*` (e.g. `pojo` (default value) or `dynamic-map`)::
|
||||
Default `EntityMode` for entity representation for all sessions opened from this `SessionFactory`, defaults to `pojo`.
|
||||
|
@ -396,7 +434,7 @@ Enable lazy loading feature in runtime bytecode enhancement. This way, even basi
|
|||
Enable association management feature in runtime bytecode enhancement which automatically synchronizes a bidirectional association when only one side is changed.
|
||||
|
||||
`*hibernate.bytecode.provider*` (e.g. `bytebuddy` (default value))::
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/bytecode/spi/BytecodeProvider.html[`BytecodeProvider`] built-in implementation flavor. Currently, only `bytebuddy` and `javassist` are valid values.
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/bytecode/spi/BytecodeProvider.html[`BytecodeProvider`] built-in implementation flavor. Currently, only `bytebuddy` and `javassist` are valid values; `bytebuddy` is the default and recommended choice; `javassist` will be removed soon.
|
||||
|
||||
`*hibernate.bytecode.use_reflection_optimizer*` (e.g. `true` or `false` (default value))::
|
||||
Should we use reflection optimization? The reflection optimizer implements the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/bytecode/spi/ReflectionOptimizer.html[`ReflectionOptimizer`] interface and improves entity instantiation and property getter/setter calls.
|
||||
|
@ -444,7 +482,7 @@ Should named queries be checked during startup?
|
|||
Global setting for whether `null` parameter bindings should be passed to database procedure/function calls as part of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/procedure/ProcedureCall.html[`ProcedureCall`] handling.
|
||||
Implicitly Hibernate will not pass the `null`, the intention being to allow any default argument values to be applied.
|
||||
+
|
||||
This defines a global setting, which can then be controlled per parameter via `org.hibernate.procedure.ParameterRegistration#enablePassingNulls(boolean)`
|
||||
This defines a global setting, which can then be controlled per parameter via `org.hibernate.procedure.ParameterRegistration#enablePassingNulls(boolean)`.
|
||||
+
|
||||
Values are `true` (pass the NULLs) or `false` (do not pass the NULLs).
|
||||
|
||||
|
@ -460,15 +498,15 @@ Can reference a
|
|||
`StatementInspector` implementation class name (fully-qualified class name).
|
||||
|
||||
`*hibernate.query.validate_parameters*` (e.g. `true` (default value) or `false`)::
|
||||
This configuration property can be used to disable parameters validation performed by `org.hibernate.query.Query#setParameter` when the the Session is bootstrapped via JPA
|
||||
`javax.persistence.EntityManagerFactory`
|
||||
This configuration property can be used to disable parameters validation performed by `org.hibernate.query.Query#setParameter` when the Session is bootstrapped via JPA
|
||||
`javax.persistence.EntityManagerFactory`.
|
||||
|
||||
`*hibernate.criteria.literal_handling_mode*` (e.g. `AUTO` (default value), `BIND` or `INLINE`)::
|
||||
By default, Criteria queries uses bind parameters for any literal that is not a numeric value.
|
||||
By default, Criteria queries use bind parameters for any literal that is not a numeric value.
|
||||
However, to increase the likelihood of JDBC statement caching, you might want to use bind parameters for numeric values too.
|
||||
+
|
||||
The `org.hibernate.query.criteria.LiteralHandlingMode#BIND` mode will use bind variables for any literal value.
|
||||
The `org.hibernate.query.criteria.LiteralHandlingMode#INLINE` mode will inline literal values as-is.
|
||||
The `org.hibernate.query.criteria.LiteralHandlingMode#INLINE` mode will inline literal values as is.
|
||||
+
|
||||
To prevent SQL injection, never use `org.hibernate.query.criteria.LiteralHandlingMode#INLINE` with String variables.
|
||||
Always use constants with the `org.hibernate.query.criteria.LiteralHandlingMode#INLINE` mode.
|
||||
|
@ -481,6 +519,30 @@ Raises an exception when in-memory pagination over collection fetch is about to
|
|||
+
|
||||
Disabled by default. Set to true to enable.
|
||||
|
||||
`*hibernate.query.immutable_entity_update_query_handling_mode*` (e.g. `EXCEPTION` or `WARNING` (default value))::
|
||||
Defines how `Immutable` entities are handled when executing a bulk update query.
|
||||
+
|
||||
By default, the (`ImmutableEntityUpdateQueryHandlingMode#WARNING`) mode is used, meaning that
|
||||
a warning log message is issued when an `@Immutable` entity is to be updated via a bulk update statement.
|
||||
+
|
||||
If the (`ImmutableEntityUpdateQueryHandlingMode#EXCEPTION`) mode is used, then a `HibernateException` is thrown instead.
|
||||
|
||||
`*hibernate.query.in_clause_parameter_padding*` (e.g. `true` or `false` (default value))::
|
||||
By default, the IN clause expands to include all bind parameter values.
|
||||
+
|
||||
However, for database systems supporting execution plan caching,
|
||||
there's a better chance of hitting the cache if the number of possible IN clause parameters lowers.
|
||||
+
|
||||
For this reason, we can expand the bind parameters to power-of-two: 4, 8, 16, 32, 64.
|
||||
This way, an IN clause with 5, 6, or 7 bind parameters will use the 8 IN clause,
|
||||
therefore reusing its execution plan.
|
||||
|
||||
`*hibernate.query.omit_join_of_superclass_tables*` (e.g. `false` or `true` (default value))::
|
||||
When you use `javax.persistence.InheritanceType#JOINED` strategy for inheritance mapping and query
|
||||
a value from an entity, all superclass tables are joined in the query regardless you need them.
|
||||
+
|
||||
With this setting set to true only superclass tables which are really needed are joined.
|
||||
|
||||
==== Multi-table bulk HQL operations
|
||||
|
||||
`*hibernate.hql.bulk_id_strategy*` (e.g. A fully-qualified class name, an instance, or a `Class` object reference)::
|
||||
|
@ -489,21 +551,24 @@ Provide a custom https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javado
|
|||
`*hibernate.hql.bulk_id_strategy.global_temporary.drop_tables*` (e.g. `true` or `false` (default value))::
|
||||
For databases that don't support local tables, but just global ones, this configuration property allows you to DROP the global tables used for multi-table bulk HQL operations when the `SessionFactory` or the `EntityManagerFactory` is closed.
|
||||
|
||||
`*hibernate.hql.bulk_id_strategy.local_temporary.drop_tables*` (e.g. `true` or `false` (default value))::
|
||||
This configuration property allows you to DROP the local temporary tables used for multi-table bulk HQL operations when the `SessionFactory` or the `EntityManagerFactory` is closed. This is useful when testing with a single connection pool against different schemas.
|
||||
|
||||
`*hibernate.hql.bulk_id_strategy.persistent.drop_tables*` (e.g. `true` or `false` (default value))::
|
||||
This configuration property is used by the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/hql/spi/id/persistent/PersistentTableBulkIdStrategy.html[`PersistentTableBulkIdStrategy`], that mimics temporary tables for databases which do not support temporary tables.
|
||||
It follows a pattern similar to the ANSI SQL definition of global temporary table using a "session id" column to segment rows from the various sessions.
|
||||
It follows a pattern similar to the ANSI SQL definition of the global temporary table using a "session id" column to segment rows from the various sessions.
|
||||
+
|
||||
This configuration property allows you to DROP the tables used for multi-table bulk HQL operations when the `SessionFactory` or the `EntityManagerFactory` is closed.
|
||||
|
||||
`*hibernate.hql.bulk_id_strategy.persistent.schema*` (e.g. Database schema name. By default, the `hibernate.default_schema` is used.)::
|
||||
This configuration property is used by the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/hql/spi/id/persistent/PersistentTableBulkIdStrategy.html[`PersistentTableBulkIdStrategy`], that mimics temporary tables for databases which do not support temporary tables.
|
||||
It follows a pattern similar to the ANSI SQL definition of global temporary table using a "session id" column to segment rows from the various sessions.
|
||||
It follows a pattern similar to the ANSI SQL definition of the global temporary table using a "session id" column to segment rows from the various sessions.
|
||||
+
|
||||
This configuration property defines the database schema used for storing the temporary tables used for bulk HQL operations.
|
||||
|
||||
`*hibernate.hql.bulk_id_strategy.persistent.catalog*` (e.g. Database catalog name. By default, the `hibernate.default_catalog` is used.)::
|
||||
This configuration property is used by the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/hql/spi/id/persistent/PersistentTableBulkIdStrategy.html[`PersistentTableBulkIdStrategy`], that mimics temporary tables for databases which do not support temporary tables.
|
||||
It follows a pattern similar to the ANSI SQL definition of global temporary table using a "session id" column to segment rows from the various sessions.
|
||||
It follows a pattern similar to the ANSI SQL definition of the global temporary table using a "session id" column to segment rows from the various sessions.
|
||||
+
|
||||
This configuration property defines the database catalog used for storing the temporary tables used for bulk HQL operations.
|
||||
|
||||
|
@ -515,9 +580,9 @@ Legacy 4.x behavior favored performing pagination in-memory by avoiding the use
|
|||
In 5.x, the limit handler behavior favors performance, thus, if the dialect doesn't support offsets, an exception is thrown instead.
|
||||
|
||||
`*hibernate.query.conventional_java_constants*` (e.g. `true` (default value) or `false`)::
|
||||
Setting which indicates whether or not Java constant follow the https://docs.oracle.com/javase/tutorial/java/nutsandbolts/variables.html[Java Naming conventions].
|
||||
Setting which indicates whether or not Java constants follow the https://docs.oracle.com/javase/tutorial/java/nutsandbolts/variables.html[Java Naming conventions].
|
||||
+
|
||||
Default is `true`.
|
||||
The default is `true`.
|
||||
Existing applications may want to disable this (set it `false`) if non-conventional Java constants are used.
|
||||
However, there is a significant performance overhead for using non-conventional Java constants
|
||||
since Hibernate cannot determine if aliases should be treated as Java constants or not.
|
||||
|
@ -544,19 +609,19 @@ Set this property to `true` if your JDBC driver returns correct row counts from
|
|||
`*hibernate.batch_fetch_style*` (e.g. `LEGACY`(default value))::
|
||||
Names the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/loader/BatchFetchStyle.html[`BatchFetchStyle`] to use.
|
||||
+
|
||||
Can specify either the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/loader/BatchFetchStyle.html[`BatchFetchStyle`] name (insensitively), or a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/loader/BatchFetchStyle.html[`BatchFetchStyle`] instance. `LEGACY}` is the default value.
|
||||
Can specify either the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/loader/BatchFetchStyle.html[`BatchFetchStyle`] name (case insensitively), or a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/loader/BatchFetchStyle.html[`BatchFetchStyle`] instance. `LEGACY` is the default value.
|
||||
|
||||
`*hibernate.jdbc.batch.builder*` (e.g. The fully qualified name of an https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/batch/spi/BatchBuilder.html[`BatchBuilder`] implementation class type or an actual object instance)::
|
||||
`*hibernate.jdbc.batch.builder*` (e.g. the fully qualified name of a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/batch/spi/BatchBuilder.html[`BatchBuilder`] implementation class type or an actual object instance)::
|
||||
Names the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/batch/spi/BatchBuilder.html[`BatchBuilder`] implementation to use.
|
||||
|
||||
[[configurations-database-fetch]]
|
||||
==== Fetching properties
|
||||
|
||||
`*hibernate.max_fetch_depth*` (e.g. A value between `0` and `3`)::
|
||||
Sets a maximum depth for the outer join fetch tree for single-ended associations. A single-ended association is a one-to-one or many-to-one assocation. A value of `0` disables default outer join fetching.
|
||||
`*hibernate.max_fetch_depth*` (e.g. a value between `0` and `3`)::
|
||||
Sets a maximum depth for the outer join fetch tree for single-ended associations. A single-ended association is a one-to-one or many-to-one association. A value of `0` disables default outer join fetching.
|
||||
|
||||
`*hibernate.default_batch_fetch_size*` (e.g. `4`,`8`, or `16`)::
|
||||
Default size for Hibernate Batch fetching of associations (lazily fetched associations can be fetched in batches to prevent N+1 query problems).
|
||||
The default size for Hibernate Batch fetching of associations (lazily fetched associations can be fetched in batches to prevent N+1 query problems).
|
||||
|
||||
`*hibernate.jdbc.fetch_size*` (e.g. `0` or an integer)::
|
||||
A non-zero value determines the JDBC fetch size, by calling `Statement.setFetchSize()`.
|
||||
|
@ -576,7 +641,7 @@ Enable wrapping of JDBC result sets in order to speed up column name lookups for
|
|||
`*hibernate.enable_lazy_load_no_trans*` (e.g. `true` or `false` (default value))::
|
||||
Initialize Lazy Proxies or Collections outside a given Transactional Persistence Context.
|
||||
+
|
||||
Although enabling this configuration can make `LazyInitializationException` go away, it's better to use a fetch plan that guarantees that all properties are properly initialised before the Session is closed.
|
||||
Although enabling this configuration can make `LazyInitializationException` go away, it's better to use a fetch plan that guarantees that all properties are properly initialized before the Session is closed.
|
||||
+
|
||||
In reality, you shouldn't probably enable this setting anyway.
|
||||
|
||||
|
@ -591,6 +656,9 @@ Write all SQL statements to the console. This is an alternative to setting the l
|
|||
`*hibernate.format_sql*` (e.g. `true` or `false` (default value))::
|
||||
Pretty-print the SQL in the log and console.
|
||||
|
||||
`*hibernate.highlight_sql*` (e.g. `true` or `false` (default value))::
|
||||
Colorize the SQL in the console using ANSI escape codes.
|
||||
|
||||
`*hibernate.use_sql_comments*` (e.g. `true` or `false` (default value))::
|
||||
If true, Hibernate generates comments inside the SQL, for easier debugging.
|
||||
|
||||
|
@ -599,7 +667,7 @@ If true, Hibernate generates comments inside the SQL, for easier debugging.
|
|||
`*hibernate.generate_statistics*` (e.g. `true` or `false`)::
|
||||
Causes Hibernate to collect statistics for performance tuning.
|
||||
|
||||
`*hibernate.stats.factory*` (e.g. the fully qualified name of an https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/stat/spi/StatisticsFactory.html[`StatisticsFactory`] implementation or an actual instance)::
|
||||
`*hibernate.stats.factory*` (e.g. the fully qualified name of a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/stat/spi/StatisticsFactory.html[`StatisticsFactory`] implementation or an actual instance)::
|
||||
The `StatisticsFactory` allow you to customize how the Hibernate Statistics are being collected.
|
||||
|
||||
`*hibernate.session.events.log*` (e.g. `true` or `false`)::
|
||||
|
@ -615,7 +683,7 @@ Either a shortcut name (e.g. `jcache`, `ehcache`) or the fully-qualified name of
|
|||
|
||||
`*hibernate.cache.default_cache_concurrency_strategy*`::
|
||||
Setting used to give the name of the default https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/CacheConcurrencyStrategy.html[`CacheConcurrencyStrategy`] to use
|
||||
when either `@javax.persistence.Cacheable` or `@org.hibernate.annotations.Cache`. `@org.hibernate.annotations.Cache` is used to override the global setting.
|
||||
when `@javax.persistence.Cacheable`, `@org.hibernate.annotations.Cache` or `@org.hibernate.annotations.Cache` is used to override the global setting.
|
||||
|
||||
`*hibernate.cache.use_minimal_puts*` (e.g. `true` (default value) or `false`)::
|
||||
Optimizes second-level cache operation to minimize writes, at the cost of more frequent reads. This is most useful for clustered caches and is enabled by default for clustered cache implementations.
|
||||
|
@ -624,10 +692,10 @@ Optimizes second-level cache operation to minimize writes, at the cost of more f
|
|||
Enables the query cache. You still need to set individual queries to be cachable.
|
||||
|
||||
`*hibernate.cache.use_second_level_cache*` (e.g. `true` (default value) or `false`)::
|
||||
Enable/disable the second level cache, which is enabled by default, although the default `RegionFactor` is `NoCachingRegionFactory` (meaning there is no actual caching implementation).
|
||||
Enable/disable the second-level cache, which is enabled by default, although the default `RegionFactor` is `NoCachingRegionFactory` (meaning there is no actual caching implementation).
|
||||
|
||||
`*hibernate.cache.query_cache_factory*` (e.g. Fully-qualified classname)::
|
||||
A custom https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/cache/spi/QueryCacheFactory.html[`QueryCacheFactory`] interface. The default is the built-in `StandardQueryCacheFactory`.
|
||||
`*hibernate.cache.query_cache_factory*` (e.g. fully-qualified class name)::
|
||||
A custom https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/cache/spi/TimestampsCacheFactory.html[`TimestampsCacheFactory`] interface. The default is the built-in `StandardTimestampsCacheFactory`.
|
||||
|
||||
`*hibernate.cache.region_prefix*` (e.g. A string)::
|
||||
A prefix for second-level cache region names.
|
||||
|
@ -639,29 +707,29 @@ Forces Hibernate to store data in the second-level cache in a more human-readabl
|
|||
Enables the automatic eviction of a bi-directional association's collection cache when an element in the `ManyToOne` collection is added/updated/removed without properly managing the change on the `OneToMany` side.
|
||||
|
||||
`*hibernate.cache.use_reference_entries*` (e.g. `true` or `false`)::
|
||||
Optimizes second-level cache operation to store immutable entities (aka "reference") which do not have associations into cache directly, this case, lots of disasseble and deep copy operations can be avoid. Default value of this property is `false`.
|
||||
Optimizes second-level cache operation to store immutable entities (aka "reference") which do not have associations into cache directly. In this case, disassembling and deep copy operations can be avoided. The default value of this property is `false`.
|
||||
|
||||
`*hibernate.ejb.classcache*` (e.g. `hibernate.ejb.classcache.org.hibernate.ejb.test.Item` = `read-write`)::
|
||||
Sets the associated entity class cache concurrency strategy for the designated region. Caching configuration should follow the following pattern `hibernate.ejb.classcache.<fully.qualified.Classname>` usage[, region] where usage is the cache strategy used and region the cache region name.
|
||||
Sets the associated entity class cache concurrency strategy for the designated region. Caching configuration should follow the following pattern `hibernate.ejb.classcache.<fully.qualified.Classname> = usage[, region]` where usage is the cache strategy used and region the cache region name.
|
||||
|
||||
`*hibernate.ejb.collectioncache*` (e.g. `hibernate.ejb.collectioncache.org.hibernate.ejb.test.Item.distributors` = `read-write, RegionName`)::
|
||||
Sets the associated collection cache concurrency strategy for the designated region. Caching configuration should follow the following pattern `hibernate.ejb.collectioncache.<fully.qualified.Classname>.<role>` usage[, region] where usage is the cache strategy used and region the cache region name
|
||||
Sets the associated collection cache concurrency strategy for the designated region. Caching configuration should follow the following pattern `hibernate.ejb.collectioncache.<fully.qualified.Classname>.<role> = usage[, region]` where usage is the cache strategy used and region the cache region name.
|
||||
|
||||
[[configurations-infinispan]]
|
||||
=== Infinispan properties
|
||||
|
||||
For more details about how to customize the Infinispan second-level cache provider, check out the
|
||||
http://infinispan.org/docs/stable/user_guide/user_guide.html#configuration_properties[Infinispan User Guide]
|
||||
https://infinispan.org/docs/stable/titles/integrating/integrating.html#configuration_properties[Infinispan User Guide].
|
||||
|
||||
[[configurations-transactions]]
|
||||
=== Transactions properties
|
||||
|
||||
`*hibernate.transaction.jta.platform*` (e.g. `JBossAS`, `BitronixJtaPlatform`)::
|
||||
Names the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatform.html[`JtaPlatform`] implementation to use for integrating with JTA systems.
|
||||
Can reference either a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatform.html[`JtaPlatform`] instance or the name of the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatform.html[`JtaPlatform`] implementation class
|
||||
Can reference either a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatform.html[`JtaPlatform`] instance or the name of the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatform.html[`JtaPlatform`] implementation class.
|
||||
|
||||
`*hibernate.jta.prefer_user_transaction*` (e.g. `true` or `false` (default value))::
|
||||
Should we prefer using the `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform#retrieveUserTransaction` over using `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform#retrieveTransactionManager`
|
||||
Should we prefer using the `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform#retrieveUserTransaction` over using `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform#retrieveTransactionManager`?
|
||||
|
||||
`*hibernate.transaction.jta.platform_resolver*`::
|
||||
Names the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatformResolver.html[`JtaPlatformResolver`] implementation to use.
|
||||
|
@ -681,12 +749,12 @@ Causes the session to be closed during the after completion phase of the transac
|
|||
`*hibernate.transaction.coordinator_class*`::
|
||||
Names the implementation of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/resource/transaction/spi/TransactionCoordinatorBuilder.html[`TransactionCoordinatorBuilder`] to use for creating https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/resource/transaction/spi/TransactionCoordinator.html[`TransactionCoordinator`] instances.
|
||||
+
|
||||
Can be a`TransactionCoordinatorBuilder` instance, `TransactionCoordinatorBuilder` implementation `Class` reference, a `TransactionCoordinatorBuilder` implementation class name (fully-qualified name) or a short name.
|
||||
Can be a `TransactionCoordinatorBuilder` instance, `TransactionCoordinatorBuilder` implementation `Class` reference, a `TransactionCoordinatorBuilder` implementation class name (fully-qualified name) or a short name.
|
||||
+
|
||||
The following short names are defined for this setting:
|
||||
+
|
||||
`jdbc`::: Manages transactions via calls to `java.sql.Connection` (default for non-JPA applications)
|
||||
`jta`::: Manages transactions via JTA. See <<chapters/bootstrap/Bootstrap.adoc#bootstrap-jpa-compliant,Java EE bootstrapping>>
|
||||
`jdbc`::: Manages transactions via calls to `java.sql.Connection` (default for non-JPA applications).
|
||||
`jta`::: Manages transactions via JTA. See <<chapters/bootstrap/Bootstrap.adoc#bootstrap-jpa-compliant,Java EE bootstrapping>>.
|
||||
+
|
||||
|
||||
If a JPA application does not provide a setting for `hibernate.transaction.coordinator_class`, Hibernate will
|
||||
|
@ -726,12 +794,12 @@ The multi-tenancy strategy in use.
|
|||
Names a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/connections/spi/MultiTenantConnectionProvider.html[`MultiTenantConnectionProvider`] implementation to use. As `MultiTenantConnectionProvider` is also a service, can be configured directly through the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/registry/StandardServiceRegistryBuilder.html[`StandardServiceRegistryBuilder`].
|
||||
|
||||
`*hibernate.tenant_identifier_resolver*`::
|
||||
Names a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/context/spi/CurrentTenantIdentifierResolver.html[`CurrentTenantIdentifierResolver`] implementation to resolve the resolve the current tenant identifier so that calling `SessionFactory#openSession()` would get a `Session` that's connected to the right tenant.
|
||||
Names a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/context/spi/CurrentTenantIdentifierResolver.html[`CurrentTenantIdentifierResolver`] implementation to resolve the current tenant identifier so that calling `SessionFactory#openSession()` would get a `Session` that's connected to the right tenant.
|
||||
+
|
||||
Can be a `CurrentTenantIdentifierResolver` instance, `CurrentTenantIdentifierResolver` implementation `Class` object reference or a `CurrentTenantIdentifierResolver` implementation class name.
|
||||
|
||||
`*hibernate.multi_tenant.datasource.identifier_for_any*` (e.g. `true` or `false` (default value))::
|
||||
When the `hibernate.connection.datasource` property value is resolved to a `javax.naming.Context` object, this configuration property defines the JNDI name used to locate the `DataSource` used for fetching the initial `Connection` which is used to access to the database metadata of the underlying database(s) (in situations where we do not have a tenant id, like startup processing).
|
||||
When the `hibernate.connection.datasource` property value is resolved to a `javax.naming.Context` object, this configuration property defines the JNDI name used to locate the `DataSource` used for fetching the initial `Connection` which is used to access the database metadata of the underlying database(s) (in situations where we do not have a tenant id, like startup processing).
|
||||
|
||||
[[configurations-hbmddl]]
|
||||
=== Automatic schema generation
|
||||
|
@ -745,8 +813,8 @@ Valid options are defined by the `externalHbm2ddlName` value of the https://docs
|
|||
`drop`::: Database dropping will be generated.
|
||||
`create`::: Database dropping will be generated followed by database creation.
|
||||
`create-drop`::: Drop the schema and recreate it on SessionFactory startup. Additionally, drop the schema on SessionFactory shutdown.
|
||||
`validate`::: Validate the database schema
|
||||
`update`::: Update the database schema
|
||||
`validate`::: Validate the database schema.
|
||||
`update`::: Update the database schema.
|
||||
|
||||
`*javax.persistence.schema-generation.database.action*` (e.g. `none` (default value), `create-only`, `drop`, `create`, `create-drop`, `validate`, and `update`)::
|
||||
Setting to perform `SchemaManagementTool` actions automatically as part of the `SessionFactory` lifecycle.
|
||||
|
@ -767,7 +835,7 @@ Valid options are defined by the `externalJpaName` value of the https://docs.jbo
|
|||
`drop-and-create`::: Database dropping will be generated followed by database creation.
|
||||
|
||||
`*javax.persistence.schema-generation-connection*`::
|
||||
Allows passing a specific `java.sql.Connection` instance to be used by `SchemaManagementTool`
|
||||
Allows passing a specific `java.sql.Connection` instance to be used by `SchemaManagementTool`.
|
||||
|
||||
`*javax.persistence.database-product-name*`::
|
||||
Specifies the name of the database provider in cases where a Connection to the underlying database is not available (aka, mainly in generating scripts).
|
||||
|
@ -788,21 +856,21 @@ Specifies the minor version of the underlying database, as would be returned by
|
|||
This value is used to help more precisely determine how to perform schema generation tasks for the underlying database in cases where `javax.persistence.database-product-name` and `javax.persistence.database-major-version` does not provide enough distinction.
|
||||
|
||||
`*javax.persistence.schema-generation.create-source*`::
|
||||
Specifies whether schema generation commands for schema creation are to be determine based on object/relational mapping metadata, DDL scripts, or a combination of the two.
|
||||
Specifies whether schema generation commands for schema creation are to be determined based on object/relational mapping metadata, DDL scripts, or a combination of the two.
|
||||
See https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/SourceType.html[`SourceType`] for valid set of values.
|
||||
+
|
||||
If no value is specified, a default is assumed as follows:
|
||||
+
|
||||
* if source scripts are specified (per `javax.persistence.schema-generation.create-script-source`), then `scripts` is assumed
|
||||
* if source scripts are specified (per `javax.persistence.schema-generation.create-script-source`), then `script` is assumed
|
||||
* otherwise, `metadata` is assumed
|
||||
|
||||
`*javax.persistence.schema-generation.drop-source*`::
|
||||
Specifies whether schema generation commands for schema dropping are to be determine based on object/relational mapping metadata, DDL scripts, or a combination of the two.
|
||||
Specifies whether schema generation commands for schema dropping are to be determined based on object/relational mapping metadata, DDL scripts, or a combination of the two.
|
||||
See https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/SourceType.html[`SourceType`] for valid set of values.
|
||||
+
|
||||
If no value is specified, a default is assumed as follows:
|
||||
+
|
||||
* if source scripts are specified (per `javax.persistence.schema-generation.create-script-source`), then `scripts` is assumed
|
||||
* if source scripts are specified (per `javax.persistence.schema-generation.drop-script-source`), then the `script` option is assumed
|
||||
* otherwise, `metadata` is assumed
|
||||
|
||||
`*javax.persistence.schema-generation.create-script-source*`::
|
||||
|
@ -821,7 +889,7 @@ For cases where the `javax.persistence.schema-generation.scripts.action` value i
|
|||
|
||||
`*javax.persistence.hibernate.hbm2ddl.import_files*` (e.g. `import.sql` (default value))::
|
||||
Comma-separated names of the optional files containing SQL DML statements executed during the `SessionFactory` creation.
|
||||
File order matters, the statements of a give file are executed before the statements of the following one.
|
||||
File order matters, the statements of a given file are executed before the statements of the following one.
|
||||
+
|
||||
These statements are only executed if the schema is created, meaning that `hibernate.hbm2ddl.auto` is set to `create`, `create-drop`, or `update`.
|
||||
`javax.persistence.schema-generation.create-script-source` / `javax.persistence.schema-generation.drop-script-source` should be preferred.
|
||||
|
@ -834,16 +902,16 @@ A "SQL load script" is a script that performs some database initialization (INSE
|
|||
Reference to the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/hbm2ddl/ImportSqlCommandExtractor.html[`ImportSqlCommandExtractor`] implementation class to use for parsing source/import files as defined by `javax.persistence.schema-generation.create-script-source`,
|
||||
`javax.persistence.schema-generation.drop-script-source` or `hibernate.hbm2ddl.import_files`.
|
||||
+
|
||||
Reference may refer to an instance, a Class implementing `ImportSqlCommandExtractor` of the fully-qualified name of the `ImportSqlCommandExtractor` implementation.
|
||||
Reference may refer to an instance, a Class implementing `ImportSqlCommandExtractor` or the fully-qualified name of the `ImportSqlCommandExtractor` implementation.
|
||||
If the fully-qualified name is given, the implementation must provide a no-arg constructor.
|
||||
+
|
||||
The default value is https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/hbm2ddl/SingleLineSqlCommandExtractor.html[`SingleLineSqlCommandExtractor`].
|
||||
|
||||
`*hibernate.hbm2dll.create_namespaces*` (e.g. `true` or `false` (default value))::
|
||||
Specifies whether to automatically create also the database schema/catalog.
|
||||
`*hibernate.hbm2ddl.create_namespaces*` (e.g. `true` or `false` (default value))::
|
||||
Specifies whether to automatically create the database schema/catalog also.
|
||||
|
||||
`*javax.persistence.create-database-schemas*` (e.g. `true` or `false` (default value))::
|
||||
The JPA variant of `hibernate.hbm2dll.create_namespaces`. Specifies whether the persistence provider is to create the database schema(s) in addition to creating database objects (tables, sequences, constraints, etc).
|
||||
The JPA variant of `hibernate.hbm2ddl.create_namespaces`. Specifies whether the persistence provider is to create the database schema(s) in addition to creating database objects (tables, sequences, constraints, etc).
|
||||
The value of this boolean property should be set to `true` if the persistence provider is to create schemas in the database or to generate DDL that contains "CREATE SCHEMA" commands.
|
||||
+
|
||||
If this property is not supplied (or is explicitly `false`), the provider should not attempt to create database schemas.
|
||||
|
@ -856,21 +924,24 @@ Used to specify the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/jav
|
|||
Setting to choose the strategy used to access the JDBC Metadata.
|
||||
Valid options are defined by the `strategy` value of the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/JdbcMetadaAccessStrategy.html[`JdbcMetadaAccessStrategy`] enum:
|
||||
+
|
||||
`grouped`::: https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaMigrator.html[`SchemaMigrator`] and https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaValidator.html[`SchemaValidator`] execute a single `java.sql.DatabaseMetaData#getTables(String, String, String, String[])` call to retrieve all the database table in order to determine if all the `javax.persistence.Entity` have a corresponding mapped database tables.This strategy may require `hibernate.default_schema` and/or `hibernate.default_catalog` to be provided.
|
||||
`grouped`::: https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaMigrator.html[`SchemaMigrator`] and https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaValidator.html[`SchemaValidator`] execute a single `java.sql.DatabaseMetaData#getTables(String, String, String, String[])` call to retrieve all the database table in order to determine if all the ``javax.persistence.Entity``s have a corresponding mapped database tables. This strategy may require `hibernate.default_schema` and/or `hibernate.default_catalog` to be provided.
|
||||
`individually`::: https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaMigrator.html[`SchemaMigrator`] and https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaValidator.html[`SchemaValidator`] execute one `java.sql.DatabaseMetaData#getTables(String, String, String, String[])` call for each `javax.persistence.Entity` in order to determine if a corresponding database table exists.
|
||||
|
||||
`*hibernate.hbm2ddl.delimiter*` (e.g. `;`)::
|
||||
Identifies the delimiter to use to separate schema management statements in script outputs.
|
||||
|
||||
`*hibernate.schema_management_tool*` (e.g. A schema name)::
|
||||
Used to specify the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaManagementTool.html[`SchemaManagementTool`] to use for performing schema management. The default is to use https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/internal/HibernateSchemaManagementTool.html[`HibernateSchemaManagementTool`]
|
||||
Used to specify the `SchemaManagementTool` to use for performing schema management. The default is to use `HibernateSchemaManagementTool`.
|
||||
|
||||
`*hibernate.synonyms*` (e.g. `true` or `false` (default value))::
|
||||
If enabled, allows schema update and validation to support synonyms. Due to the possibility that this would return duplicate tables (especially in Oracle), this is disabled by default.
|
||||
|
||||
`*hibernate.hbm2dll.extra_physical_table_types*` (e.g. `BASE TABLE`)::
|
||||
`*hibernate.hbm2ddl.extra_physical_table_types*` (e.g. `BASE TABLE`)::
|
||||
Identifies a comma-separated list of values to specify extra table types, other than the default `TABLE` value, to recognize as defining a physical table by schema update, creation and validation.
|
||||
|
||||
`*hibernate.hbm2ddl.default_constraint_mode*` (`CONSTRAINT` (default value) or `NO_CONSTRAINT`)::
|
||||
Default `javax.persistence.ConstraintMode` for foreign key mapping if `PROVIDER_DEFAULT` strategy used.
|
||||
|
||||
`*hibernate.schema_update.unique_constraint_strategy*` (e.g. `DROP_RECREATE_QUIETLY`, `RECREATE_QUIETLY`, `SKIP`)::
|
||||
Unique columns and unique keys both use unique constraints in most dialects.
|
||||
`SchemaUpdate` needs to create these constraints, but DBs support for finding existing constraints is extremely inconsistent.
|
||||
|
@ -881,7 +952,7 @@ Therefore, the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs
|
|||
`DROP_RECREATE_QUIETLY`::: Default option.
|
||||
Attempt to drop, then (re-)create each unique constraint. Ignore any exceptions being thrown.
|
||||
`RECREATE_QUIETLY`:::
|
||||
Attempts to (re-)create unique constraints, ignoring exceptions thrown if the constraint already existed
|
||||
Attempts to (re-)create unique constraints, ignoring exceptions thrown if the constraint already existed.
|
||||
`SKIP`:::
|
||||
Does not attempt to create unique constraints on a schema update.
|
||||
|
||||
|
@ -894,13 +965,13 @@ Whether the schema migration tool should halt on error, therefore terminating th
|
|||
[[configurations-exception-handling]]
|
||||
=== Exception handling
|
||||
|
||||
`*hibernate.jdbc.sql_exception_converter*` (e.g. Fully-qualified name of class implementing `SQLExceptionConverter`)::
|
||||
`*hibernate.jdbc.sql_exception_converter*` (e.g. fully-qualified name of class implementing `SQLExceptionConverter`)::
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/exception/spi/SQLExceptionConverter.html[`SQLExceptionConverter`] to use for converting `SQLExceptions` to Hibernate's `JDBCException` hierarchy. The default is to use the configured https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/dialect/Dialect.html[`Dialect`]'s preferred `SQLExceptionConverter`.
|
||||
|
||||
`*hibernate.native_exception_handling_51_compliance*` (e.g. `true` or `false` (default value))::
|
||||
Indicates if exception handling for a `SessionFactory` built via Hibernate's native bootstrapping
|
||||
should behave the same as native exception handling in Hibernate ORM 5.1. When set to `true`,
|
||||
`HibernateException` will be not wrapped or converted according to the JPA specification. This
|
||||
`HibernateException` will not be wrapped or converted according to the JPA specification. This
|
||||
setting will be ignored for a `SessionFactory` built via JPA bootstrapping.
|
||||
|
||||
[[configurations-session-events]]
|
||||
|
@ -910,7 +981,7 @@ setting will be ignored for a `SessionFactory` built via JPA bootstrapping.
|
|||
Fully qualified class name implementing the `SessionEventListener` interface.
|
||||
|
||||
`*hibernate.session_factory.interceptor*` (e.g. `org.hibernate.EmptyInterceptor` (default value))::
|
||||
Names a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/Interceptor[`Interceptor`] implementation to be applied to every `Session` created by the current `org.hibernate.SessionFactory`
|
||||
Names an https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/Interceptor[`Interceptor`] implementation to be applied to every `Session` created by the current `org.hibernate.SessionFactory`.
|
||||
+
|
||||
Can reference:
|
||||
+
|
||||
|
@ -923,7 +994,7 @@ Can reference:
|
|||
WARNING: Deprecated setting. Use `hibernate.session_factory.session_scoped_interceptor` instead.
|
||||
|
||||
`*hibernate.session_factory.session_scoped_interceptor*` (e.g. fully-qualified class name or class reference)::
|
||||
Names a `org.hibernate.Interceptor` implementation to be applied to the `org.hibernate.SessionFactory` and propagated to each `Session` created from the `SessionFactory`.
|
||||
Names an `org.hibernate.Interceptor` implementation to be applied to the `org.hibernate.SessionFactory` and propagated to each `Session` created from the `SessionFactory`.
|
||||
+
|
||||
This setting identifies an `Interceptor` implementation that is to be applied to every `Session` opened from the `SessionFactory`,
|
||||
but unlike `hibernate.session_factory.interceptor`, a unique instance of the `Interceptor` is
|
||||
|
@ -933,7 +1004,7 @@ Can reference:
|
|||
+
|
||||
* `Interceptor` instance
|
||||
* `Interceptor` implementation `Class` object reference
|
||||
* `java.util.function.Supplier` instance which is used to retrieve the `Interceptor` instance.
|
||||
* `java.util.function.Supplier` instance which is used to retrieve the `Interceptor` instance
|
||||
+
|
||||
NOTE: Specifically, this setting cannot name an `Interceptor` instance.
|
||||
|
||||
|
@ -999,7 +1070,7 @@ Names the `ClassLoader` used to load user application classes.
|
|||
Names the `ClassLoader` Hibernate should use to perform resource loading.
|
||||
|
||||
`*hibernate.classLoader.hibernate*`::
|
||||
Names the `ClassLoader` responsible for loading Hibernate classes. By default this is the `ClassLoader` that loaded this class.
|
||||
Names the `ClassLoader` responsible for loading Hibernate classes. By default, this is the `ClassLoader` that loaded this class.
|
||||
|
||||
`*hibernate.classLoader.environment*`::
|
||||
Names the `ClassLoader` used when Hibernate is unable to locates classes on the `hibernate.classLoader.application` or `hibernate.classLoader.hibernate`.
|
||||
|
@ -1008,13 +1079,13 @@ Names the `ClassLoader` used when Hibernate is unable to locates classes on the
|
|||
=== Bootstrap properties
|
||||
|
||||
`*hibernate.integrator_provider*` (e.g. The fully qualified name of an https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/boot/spi/IntegratorProvider.html[`IntegratorProvider`])::
|
||||
Used to define a list of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/integrator/spi/Integrator.html[`Integrator`] which are used during bootstrap process to integrate various services.
|
||||
Used to define a list of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/integrator/spi/Integrator.html[`Integrator`] which is used during the bootstrap process to integrate various services.
|
||||
|
||||
`*hibernate.strategy_registration_provider*` (e.g. The fully qualified name of an https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/boot/spi/StrategyRegistrationProviderList.html[`StrategyRegistrationProviderList`])::
|
||||
Used to define a list of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/registry/selector/StrategyRegistrationProvider.html[`StrategyRegistrationProvider`] which are used during bootstrap process to provide registrations of strategy selector(s).
|
||||
Used to define a list of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/registry/selector/StrategyRegistrationProvider.html[`StrategyRegistrationProvider`] which is used during the bootstrap process to provide registrations of strategy selector(s).
|
||||
|
||||
`*hibernate.type_contributors*` (e.g. The fully qualified name of an https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/boot/spi/TypeContributorList.html[`TypeContributorList`])::
|
||||
Used to define a list of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/model/TypeContributor.html[`TypeContributor`] which are used during bootstrap process to contribute types.
|
||||
Used to define a list of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/model/TypeContributor.html[`TypeContributor`] which is used during the bootstrap process to contribute types.
|
||||
|
||||
`*hibernate.persister.resolver*` (e.g. The fully qualified name of a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/persister/spi/PersisterClassResolver.html[`PersisterClassResolver`] or a `PersisterClassResolver` instance)::
|
||||
Used to define an implementation of the `PersisterClassResolver` interface which can be used to customize how an entity or a collection is being persisted.
|
||||
|
@ -1025,14 +1096,14 @@ Like a `PersisterClassResolver`, the `PersisterFactory` can be used to customize
|
|||
`*hibernate.service.allow_crawling*` (e.g. `true` (default value) or `false`)::
|
||||
Crawl all available service bindings for an alternate registration of a given Hibernate `Service`.
|
||||
|
||||
`*hibernate.metadata_builder_contributor*` (e.g. The instance, the class or the fully qualified class name of an https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/boot/spi/MetadataBuilderContributor.html[`MetadataBuilderContributor`])::
|
||||
Used to define a instance, the class or the fully qualified class name of an https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/boot/spi/MetadataBuilderContributor.html[`MetadataBuilderContributor`] which can be used to configure the `MetadataBuilder` when bootstrapping via the JPA `EntityManagerFactory`.
|
||||
`*hibernate.metadata_builder_contributor*` (e.g. The instance, the class or the fully qualified class name of a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/boot/spi/MetadataBuilderContributor.html[`MetadataBuilderContributor`])::
|
||||
Used to define an instance, the class or the fully qualified class name of a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/boot/spi/MetadataBuilderContributor.html[`MetadataBuilderContributor`] which can be used to configure the `MetadataBuilder` when bootstrapping via the JPA `EntityManagerFactory`.
|
||||
|
||||
[[configurations-misc]]
|
||||
=== Miscellaneous properties
|
||||
|
||||
`*hibernate.dialect_resolvers*`::
|
||||
Names any additional https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/dialect/spi/DialectResolver.html[`DialectResolver`] implementations to register with the standard https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/dialect/spi/DialectFactory.html[`DialectFactory`]
|
||||
Names any additional https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/dialect/spi/DialectResolver.html[`DialectResolver`] implementations to register with the standard https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/dialect/spi/DialectFactory.html[`DialectFactory`].
|
||||
|
||||
`*hibernate.session_factory_name*` (e.g. A JNDI name)::
|
||||
Setting used to name the Hibernate `SessionFactory`.
|
||||
|
@ -1043,7 +1114,7 @@ If `hibernate.session_factory_name_is_jndi` is set to `true`, this is also the n
|
|||
`*hibernate.session_factory_name_is_jndi*` (e.g. `true` (default value) or `false`)::
|
||||
Does the value defined by `hibernate.session_factory_name` represent a JNDI namespace into which the `org.hibernate.SessionFactory` should be bound and made accessible?
|
||||
+
|
||||
Defaults to `true` for backwards compatibility. Set this to `false` if naming a SessionFactory is needed for serialization purposes, but no writable JNDI context exists in the runtime environment or if the user simply does not want JNDI to be used.
|
||||
Defaults to `true` for backward compatibility. Set this to `false` if naming a SessionFactory is needed for serialization purposes, but no writable JNDI context exists in the runtime environment or if the user simply does not want JNDI to be used.
|
||||
|
||||
`*hibernate.ejb.entitymanager_factory_name*` (e.g. By default, the persistence unit name is used, otherwise a randomly generated UUID)::
|
||||
Internally, Hibernate keeps track of all `EntityManagerFactory` instances using the `EntityManagerFactoryRegistry`. The name is used as a key to identify a given `EntityManagerFactory` reference.
|
||||
|
@ -1052,7 +1123,7 @@ Internally, Hibernate keeps track of all `EntityManagerFactory` instances using
|
|||
XML configuration file to use to configure Hibernate.
|
||||
|
||||
`*hibernate.ejb.discard_pc_on_close*` (e.g. `true` or `false` (default value))::
|
||||
If true, the persistence context will be discarded (think `clear()` when the method is called.
|
||||
If true, the persistence context will be discarded (think `clear()` when the method is called).
|
||||
Otherwise, the persistence context will stay alive till the transaction completion: all objects will remain managed, and any change will be synchronized with the database (default to false, ie wait for transaction completion).
|
||||
|
||||
`*hibernate.ejb.metamodel.population*` (e.g. `enabled` or `disabled`, or `ignoreUnsupported` (default value))::
|
||||
|
@ -1060,8 +1131,8 @@ Setting that indicates whether to build the JPA types.
|
|||
+
|
||||
Accepts three values:
|
||||
+
|
||||
enabled::: Do the build
|
||||
disabled::: Do not do the build
|
||||
enabled::: Do the build.
|
||||
disabled::: Do not do the build.
|
||||
ignoreUnsupported::: Do the build, but ignore any non-JPA features that would otherwise result in a failure (e.g. `@Any` annotation).
|
||||
|
||||
`*hibernate.jpa.static_metamodel.population*` (e.g. `enabled` or `disabled`, or `skipUnsupported` (default value))::
|
||||
|
@ -1069,8 +1140,8 @@ Setting that controls whether we seek out JPA _static metamodel_ classes and pop
|
|||
+
|
||||
Accepts three values:
|
||||
+
|
||||
enabled::: Do the population
|
||||
disabled::: Do not do the population
|
||||
enabled::: Do the population.
|
||||
disabled::: Do not do the population.
|
||||
skipUnsupported::: Do the population, but ignore any non-JPA features that would otherwise result in the population failing (e.g. `@Any` annotation).
|
||||
|
||||
`*hibernate.delay_cdi_access*` (e.g. `true` or `false` (default value))::
|
||||
|
@ -1093,26 +1164,29 @@ true::: allows to flush an update out of a transaction
|
|||
false::: does not allow
|
||||
|
||||
`*hibernate.collection_join_subquery*` (e.g. `true` (default value) or `false`)::
|
||||
Setting which indicates whether or not the new JOINS over collection tables should be rewritten to subqueries.
|
||||
Setting which indicates whether or not the new JOINs over collection tables should be rewritten to subqueries.
|
||||
|
||||
`*hibernate.allow_refresh_detached_entity*` (e.g. `true` (default value when using Hibernate native bootstrapping) or `false` (default value when using JPA bootstrapping))::
|
||||
Setting that allows to call `javax.persistence.EntityManager#refresh(entity)` or `Session#refresh(entity)` on a detached instance even when the `org.hibernate.Session` is obtained from a JPA `javax.persistence.EntityManager`.
|
||||
|
||||
`*hibernate.use_entity_where_clause_for_collections*` (e.g., `true` (default) or `false`)::
|
||||
Setting controls whether an entity's "where" clause, mapped using `@Where(clause = "...")` or `<entity ... where="...">` is taken into account when loading one-to-many or many-to-many collections of that type of entity.
|
||||
|
||||
`*hibernate.event.merge.entity_copy_observer*` (e.g. `disallow` (default value), `allow`, `log` (testing purpose only) or fully-qualified class name)::
|
||||
Setting that specifies how Hibernate will respond when multiple representations of the same persistent entity ("entity copy") is detected while merging.
|
||||
+
|
||||
The possible values are:
|
||||
+
|
||||
disallow (the default)::: throws `IllegalStateException` if an entity copy is detected
|
||||
disallow::: throws `IllegalStateException` if an entity copy is detected
|
||||
allow::: performs the merge operation on each entity copy that is detected
|
||||
log::: (provided for testing only) performs the merge operation on each entity copy that is detected and logs information about the entity copies.
|
||||
This setting requires DEBUG logging be enabled for https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/event/internal/EntityCopyAllowedLoggedObserver.html[`EntityCopyAllowedLoggedObserver`].
|
||||
+
|
||||
|
||||
In addition, the application may customize the behavior by providing an implementation of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/event/spi/EntityCopyObserver.html[`EntityCopyObserver`] and setting `hibernate.event.merge.entity_copy_observer` to the class name.
|
||||
When this property is set to `allow` or `log`, Hibernate will merge each entity copy detected while cascading the merge operation.
|
||||
In the process of merging each entity copy, Hibernate will cascade the merge operation from each entity copy to its associations with `cascade=CascadeType.MERGE` or `CascadeType.ALL`.
|
||||
In the process of merging each entity copy, Hibernate will cascade the merge operation from each entity copy to its associations with `cascade = CascadeType.MERGE` or `cascade = CascadeType.ALL`.
|
||||
The entity state resulting from merging an entity copy will be overwritten when another entity copy is merged.
|
||||
+
|
||||
|
||||
For more details, check out the <<chapters/pc/PersistenceContext.adoc#pc-merge-gotchas,Merge gotchas>> section.
|
||||
|
||||
[[configurations-envers]]
|
||||
|
@ -1147,8 +1221,8 @@ Enable or disable the SpecJ proprietary mapping syntax which differs from JPA sp
|
|||
`*hibernate.temp.use_jdbc_metadata_defaults*` (e.g. `true` (default value) or `false`)::
|
||||
This setting is used to control whether we should consult the JDBC metadata to determine certain Settings default values when the database may not be available (mainly in tools usage).
|
||||
|
||||
`*hibernate.connection_provider.injection_data*` (e.g. `java.util.Map`)::
|
||||
Connection provider settings to be injected in the currently configured connection provider.
|
||||
`*hibernate.connection_provider.injection_data*`::
|
||||
Connection provider settings to be injected (a `Map` instance) in the currently configured connection provider.
|
||||
|
||||
`*hibernate.jandex_index*` (e.g. `org.jboss.jandex.Index`)::
|
||||
`*hibernate.jandex_index*`::
|
||||
Names a Jandex `org.jboss.jandex.Index` instance to use.
|
||||
|
|
|
@ -7,9 +7,10 @@ I like to think of `Configuration` as a big pot to which we add a bunch of stuff
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
There are some significant draw backs to this approach which led to its deprecation and the development of the new approach, which is discussed in <<chapters/bootstrap/Bootstrap.adoc#bootstrap-native,Native Bootstrapping>>.
|
||||
There are some significant drawbacks to the legacy bootstrapping mechanism which led to its deprecation and the development of the new approach, which is discussed in <<chapters/bootstrap/Bootstrap.adoc#bootstrap-native,Native Bootstrapping>>.
|
||||
|
||||
`Configuration` is semi-deprecated but still available for use, in a limited form that eliminates these drawbacks.
|
||||
"Under the covers", `Configuration` uses the new bootstrapping code, so the things available there as also available here in terms of auto-discovery.
|
||||
"Under the covers", `Configuration` uses the new bootstrapping code, so the things available there are also available here in terms of auto-discovery.
|
||||
====
|
||||
|
||||
You can obtain the `Configuration` by instantiating it directly.
|
||||
|
@ -19,11 +20,11 @@ You then specify mapping metadata (XML mapping documents, annotated classes) tha
|
|||
----
|
||||
Configuration cfg = new Configuration()
|
||||
// addResource does a classpath resource lookup
|
||||
.addResource("Item.hbm.xml")
|
||||
.addResource("Bid.hbm.xml")
|
||||
.addResource( "Item.hbm.xml" )
|
||||
.addResource( "Bid.hbm.xml" )
|
||||
|
||||
// calls addResource using "/org/hibernate/auction/User.hbm.xml"
|
||||
.addClass(`org.hibernate.auction.User.class`)
|
||||
.addClass( org.hibernate.auction.User.class )
|
||||
|
||||
// parses Address class for mapping annotations
|
||||
.addAnnotatedClass( Address.class )
|
||||
|
@ -31,9 +32,9 @@ Configuration cfg = new Configuration()
|
|||
// reads package-level (package-info.class) annotations in the named package
|
||||
.addPackage( "org.hibernate.auction" )
|
||||
|
||||
.setProperty("hibernate.dialect", "org.hibernate.dialect.H2Dialect")
|
||||
.setProperty("hibernate.connection.datasource", "java:comp/env/jdbc/test")
|
||||
.setProperty("hibernate.order_updates", "true");
|
||||
.setProperty( "hibernate.dialect", "org.hibernate.dialect.H2Dialect" )
|
||||
.setProperty( "hibernate.connection.datasource", "java:comp/env/jdbc/test" )
|
||||
.setProperty( "hibernate.order_updates", "true" );
|
||||
----
|
||||
|
||||
There are other ways to specify Configuration information, including:
|
||||
|
@ -41,11 +42,11 @@ There are other ways to specify Configuration information, including:
|
|||
* Place a file named hibernate.properties in a root directory of the classpath
|
||||
* Pass an instance of java.util.Properties to `Configuration#setProperties`
|
||||
* Via a `hibernate.cfg.xml` file
|
||||
* System properties using java `-Dproperty=value`
|
||||
* System properties using Java `-Dproperty=value`
|
||||
|
||||
== Migration
|
||||
|
||||
Mapping Configuration methods to the corresponding methods in the new APIs..
|
||||
Mapping Configuration methods to the corresponding methods in the new APIs.
|
||||
|
||||
|===
|
||||
|`Configuration#addFile`|`Configuration#addFile`
|
||||
|
|
|
@ -20,13 +20,13 @@ The `Session` is a factory for `Criteria` instances.
|
|||
|
||||
[source,java]
|
||||
----
|
||||
Criteria crit = sess.createCriteria(Cat.class);
|
||||
crit.setMaxResults(50);
|
||||
Criteria crit = sess.createCriteria( Cat.class );
|
||||
crit.setMaxResults( 50 );
|
||||
List cats = crit.list();
|
||||
----
|
||||
|
||||
[[criteria-entity-name]]
|
||||
=== JPA vs Hibernate entity name
|
||||
=== JPA vs. Hibernate entity name
|
||||
|
||||
When using the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/SharedSessionContract.html#createCriteria-java.lang.String-[`Session#createCriteria(String entityName)` or `StatelessSession#createCriteria(String entityName)`],
|
||||
the *entityName* means the fully-qualified name of the underlying entity and not the name denoted by the `name` attribute of the JPA `@Entity` annotation.
|
||||
|
@ -49,10 +49,9 @@ If you provide the JPA entity name to a legacy Criteria query:
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List<Event> events =
|
||||
entityManager.unwrap( Session.class )
|
||||
.createCriteria( "ApplicationEvent" )
|
||||
.list();
|
||||
List<Event> events = entityManager.unwrap( Session.class )
|
||||
.createCriteria( "ApplicationEvent" )
|
||||
.list();
|
||||
----
|
||||
|
||||
Hibernate is going to throw the following `MappingException`:
|
||||
|
@ -66,10 +65,9 @@ On the other hand, the Hibernate entity name (the fully qualified class name) wo
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List<Event> events =
|
||||
entityManager.unwrap( Session.class )
|
||||
.createCriteria( Event.class.getName() )
|
||||
.list();
|
||||
List<Event> events = entityManager.unwrap( Session.class )
|
||||
.createCriteria( Event.class.getName() )
|
||||
.list();
|
||||
----
|
||||
|
||||
For more about this topic, check out the https://hibernate.atlassian.net/browse/HHH-2597[HHH-2597] JIRA issue.
|
||||
|
@ -82,9 +80,9 @@ The class `org.hibernate.criterion.Restrictions` defines factory methods for obt
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.like("name", "Fritz%") )
|
||||
.add( Restrictions.between("weight", minWeight, maxWeight) )
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Restrictions.like( "name", "Fritz%" ) )
|
||||
.add( Restrictions.between( "weight", minWeight, maxWeight ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -92,18 +90,18 @@ Restrictions can be grouped logically.
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.like("name", "Fritz%") )
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Restrictions.like( "name", "Fritz%" ) )
|
||||
.add( Restrictions.or(
|
||||
Restrictions.eq( "age", new Integer(0) ),
|
||||
Restrictions.isNull("age")
|
||||
) )
|
||||
Restrictions.isNull( "age" ) )
|
||||
)
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Restrictions.in( "name", new String[] { "Fritz", "Izi", "Pk" } ) )
|
||||
.add( Restrictions.disjunction()
|
||||
.add( Restrictions.isNull("age") )
|
||||
|
@ -119,8 +117,10 @@ One of the most useful `Restrictions` allows you to specify SQL directly.
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.sqlRestriction("lower({alias}.name) like lower(?)", "Fritz%", Hibernate.STRING) )
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Restrictions.sqlRestriction(
|
||||
"lower({alias}.name) like lower(?)", "Fritz%", Hibernate.STRING )
|
||||
)
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -132,8 +132,8 @@ You can create a `Property` by calling `Property.forName()`:
|
|||
[source,java]
|
||||
----
|
||||
|
||||
Property age = Property.forName("age");
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
Property age = Property.forName( "age" );
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Restrictions.disjunction()
|
||||
.add( age.isNull() )
|
||||
.add( age.eq( new Integer(0) ) )
|
||||
|
@ -151,35 +151,35 @@ You can order the results using `org.hibernate.criterion.Order`.
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.like("name", "F%")
|
||||
.addOrder( Order.asc("name").nulls(NullPrecedence.LAST) )
|
||||
.addOrder( Order.desc("age") )
|
||||
.setMaxResults(50)
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Restrictions.like( "name", "F%" ) )
|
||||
.addOrder( Order.asc( "name" ).nulls( NullPrecedence.LAST ) )
|
||||
.addOrder( Order.desc( "age" ) )
|
||||
.setMaxResults( 50 )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Property.forName("name").like("F%") )
|
||||
.addOrder( Property.forName("name").asc() )
|
||||
.addOrder( Property.forName("age").desc() )
|
||||
.setMaxResults(50)
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Property.forName( "name" ).like( "F%" ) )
|
||||
.addOrder( Property.forName( "name" ).asc() )
|
||||
.addOrder( Property.forName( "age" ).desc() )
|
||||
.setMaxResults( 50 )
|
||||
.list();
|
||||
----
|
||||
|
||||
[[criteria-associations]]
|
||||
=== Associations
|
||||
|
||||
By navigating associations using `createCriteria()` you can specify constraints upon related entities:
|
||||
By navigating associations using `createCriteria()`, you can specify constraints upon related entities:
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.like("name", "F%") )
|
||||
.createCriteria("kittens")
|
||||
.add( Restrictions.like("name", "F%") )
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Restrictions.like( "name", "F%" ) )
|
||||
.createCriteria( "kittens" )
|
||||
.add( Restrictions.like( "name", "F%" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -189,30 +189,30 @@ There is also an alternate form that is useful in certain circumstances:
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.createAlias("kittens", "kt")
|
||||
.createAlias("mate", "mt")
|
||||
.add( Restrictions.eqProperty("kt.name", "mt.name") )
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.createAlias( "kittens", "kt" )
|
||||
.createAlias( "mate", "mt" )
|
||||
.add( Restrictions.eqProperty( "kt.name", "mt.name" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
(`createAlias()` does not create a new instance of `Criteria`.)
|
||||
Note that `createAlias()` does not create a new instance of `Criteria`.
|
||||
|
||||
The kittens collections held by the `Cat` instances returned by the previous two queries are _not_ pre-filtered by the criteria.
|
||||
If you want to retrieve just the kittens that match the criteria, you must use a `ResultTransformer`.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.createCriteria("kittens", "kt")
|
||||
.add( Restrictions.eq("name", "F%") )
|
||||
.setResultTransformer(Criteria.ALIAS_TO_ENTITY_MAP)
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.createCriteria( "kittens", "kt" )
|
||||
.add( Restrictions.eq( "name", "F%" ) )
|
||||
.setResultTransformer( Criteria.ALIAS_TO_ENTITY_MAP )
|
||||
.list();
|
||||
Iterator iter = cats.iterator();
|
||||
while ( iter.hasNext() ) {
|
||||
Map map = (Map) iter.next();
|
||||
Cat cat = (Cat) map.get(Criteria.ROOT_ALIAS);
|
||||
Cat kitten = (Cat) map.get("kt");
|
||||
Cat cat = (Cat) map.get( Criteria.ROOT_ALIAS );
|
||||
Cat kitten = (Cat) map.get( "kt" );
|
||||
}
|
||||
----
|
||||
|
||||
|
@ -221,20 +221,16 @@ Additionally, you may manipulate the result set using a left outer join:
|
|||
[source]
|
||||
----
|
||||
List cats = session.createCriteria( Cat.class )
|
||||
.createAlias("mate", "mt", Criteria.LEFT_JOIN, Restrictions.like("mt.name", "good%") )
|
||||
.addOrder(Order.asc("mt.age"))
|
||||
.createAlias( "mate", "mt", Criteria.LEFT_JOIN, Restrictions.like( "mt.name", "good%" ) )
|
||||
.addOrder( Order.asc( "mt.age" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
This will return all of the `Cat`s with a mate whose name starts with "good" ordered by their mate's age, and all cats who do not have a mate.
|
||||
This will return all of the ``Cat``s with a mate whose name starts with "good" ordered by their mate's age, and all cats who do not have a mate.
|
||||
This is useful when there is a need to order or limit in the database prior to returning complex/large result sets,
|
||||
and removes many instances where multiple queries would have to be performed and the results unioned by java in memory.
|
||||
and removes many instances where multiple queries would have to be performed and the results unioned by Java in memory.
|
||||
|
||||
Without this feature, first all of the cats without a mate would need to be loaded in one query.
|
||||
|
||||
A second query would need to retrieve the cats with mates who's name started with "good" sorted by the mates age.
|
||||
|
||||
Thirdly, in memory; the lists would need to be joined manually.
|
||||
Without this feature, firstly all of the cats without a mate would need to be loaded in one query. Then a second query would need to retrieve the cats with mates whose name started with "good" sorted by the mates age. Thirdly, in memory, the lists would need to be joined manually.
|
||||
|
||||
[[criteria-dynamicfetching]]
|
||||
=== Dynamic association fetching
|
||||
|
@ -243,10 +239,10 @@ You can specify association fetching semantics at runtime using `setFetchMode()`
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.like("name", "Fritz%") )
|
||||
.setFetchMode("mate", FetchMode.EAGER)
|
||||
.setFetchMode("kittens", FetchMode.EAGER)
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Restrictions.like( "name", "Fritz%" ) )
|
||||
.setFetchMode( "mate", FetchMode.EAGER )
|
||||
.setFetchMode( "kittens", FetchMode.EAGER )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -261,21 +257,21 @@ For example, suppose the `Cat` has a component property `fullName` with sub-prop
|
|||
|
||||
[source]
|
||||
----
|
||||
List cats = session.createCriteria(Cat.class)
|
||||
.add(Restrictions.eq("fullName.lastName", "Cattington"))
|
||||
List cats = session.createCriteria( Cat.class )
|
||||
.add( Restrictions.eq( "fullName.lastName", "Cattington" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
Note: this does not apply when querying collections of components, for that see below <<criteria-collections>>
|
||||
Note: this does not apply when querying collections of components, for that see <<criteria-collections>> below.
|
||||
|
||||
[[criteria-collections]]
|
||||
=== Collections
|
||||
|
||||
When using criteria against collections, there are two distinct cases.
|
||||
One is if the collection contains entities (eg. `<one-to-many/>` or `<many-to-many/>`) or components (`<composite-element/>` ),
|
||||
One is if the collection contains entities (e.g. `<one-to-many/>` or `<many-to-many/>`) or components (`<composite-element/>` ),
|
||||
and the second is if the collection contains scalar values (`<element/>`).
|
||||
In the first case, the syntax is as given above in the section <<criteria-associations>> where we restrict the `kittens` collection.
|
||||
Essentially we create a `Criteria` object against the collection property and restrict the entity or component properties using that instance.
|
||||
Essentially, we create a `Criteria` object against the collection property and restrict the entity or component properties using that instance.
|
||||
|
||||
For querying a collection of basic values, we still create the `Criteria` object against the collection,
|
||||
but to reference the value, we use the special property "elements".
|
||||
|
@ -283,9 +279,9 @@ For an indexed collection, we can also reference the index property using the sp
|
|||
|
||||
[source]
|
||||
----
|
||||
List cats = session.createCriteria(Cat.class)
|
||||
.createCriteria("nickNames")
|
||||
.add(Restrictions.eq("elements", "BadBoy"))
|
||||
List cats = session.createCriteria( Cat.class )
|
||||
.createCriteria( "nickNames" )
|
||||
.add( Restrictions.eq( "elements", "BadBoy" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -297,10 +293,10 @@ The class `org.hibernate.criterion.Example` allows you to construct a query crit
|
|||
[source,java]
|
||||
----
|
||||
Cat cat = new Cat();
|
||||
cat.setSex('F');
|
||||
cat.setColor(Color.BLACK);
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.add( Example.create(cat) )
|
||||
cat.setSex( 'F' );
|
||||
cat.setColor( Color.BLACK );
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.add( Example.create( cat ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -311,13 +307,13 @@ You can adjust how the `Example` is applied.
|
|||
|
||||
[source,java]
|
||||
----
|
||||
Example example = Example.create(cat)
|
||||
.excludeZeroes() //exclude zero valued properties
|
||||
.excludeProperty("color") //exclude the property named "color"
|
||||
.ignoreCase() //perform case insensitive string comparisons
|
||||
.enableLike(); //use like for string comparisons
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.add(example)
|
||||
Example example = Example.create( cat )
|
||||
.excludeZeroes() //exclude zero valued properties
|
||||
.excludeProperty( "color" ) //exclude the property named "color"
|
||||
.ignoreCase() //perform case insensitive string comparisons
|
||||
.enableLike(); //use like for string comparisons
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.add( example )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -325,10 +321,11 @@ You can even use examples to place criteria upon associated objects.
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.add( Example.create(cat) )
|
||||
.createCriteria("mate")
|
||||
.add( Example.create( cat.getMate() ) )
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.add( Example.create( cat ) )
|
||||
.createCriteria( "mate" )
|
||||
.add( Example.create( cat.getMate() )
|
||||
)
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -340,20 +337,20 @@ You can apply a projection to a query by calling `setProjection()`.
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.setProjection( Projections.rowCount() )
|
||||
.add( Restrictions.eq("color", Color.BLACK) )
|
||||
.add( Restrictions.eq( "color", Color.BLACK ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.setProjection( Projections.projectionList()
|
||||
.add( Projections.rowCount() )
|
||||
.add( Projections.avg("weight") )
|
||||
.add( Projections.max("weight") )
|
||||
.add( Projections.groupProperty("color") )
|
||||
.add( Projections.avg( "weight" ) )
|
||||
.add( Projections.max( "weight" ) )
|
||||
.add( Projections.groupProperty( "color" ) )
|
||||
)
|
||||
.list();
|
||||
----
|
||||
|
@ -366,17 +363,17 @@ Here are two different ways to do this:
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.setProjection( Projections.alias( Projections.groupProperty("color"), "colr" ) )
|
||||
.addOrder( Order.asc("colr") )
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.setProjection( Projections.alias( Projections.groupProperty( "color" ), "colr" ) )
|
||||
.addOrder( Order.asc( "colr" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.setProjection( Projections.groupProperty("color").as("colr") )
|
||||
.addOrder( Order.asc("colr") )
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.setProjection( Projections.groupProperty( "color" ).as( "colr" ) )
|
||||
.addOrder( Order.asc( "colr" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -385,28 +382,28 @@ As a shortcut, you can assign an alias when you add the projection to a projecti
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.setProjection( Projections.projectionList()
|
||||
.add( Projections.rowCount(), "catCountByColor" )
|
||||
.add( Projections.avg("weight"), "avgWeight" )
|
||||
.add( Projections.max("weight"), "maxWeight" )
|
||||
.add( Projections.groupProperty("color"), "color" )
|
||||
.add( Projections.avg( "weight" ), "avgWeight" )
|
||||
.add( Projections.max( "weight" ), "maxWeight" )
|
||||
.add( Projections.groupProperty( "color" ), "color" )
|
||||
)
|
||||
.addOrder( Order.desc("catCountByColor") )
|
||||
.addOrder( Order.desc("avgWeight") )
|
||||
.addOrder( Order.desc( "catCountByColor" ) )
|
||||
.addOrder( Order.desc( "avgWeight" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Domestic.class, "cat")
|
||||
.createAlias("kittens", "kit")
|
||||
List results = session.createCriteria( Domestic.class, "cat" )
|
||||
.createAlias( "kittens", "kit" )
|
||||
.setProjection( Projections.projectionList()
|
||||
.add( Projections.property("cat.name"), "catName" )
|
||||
.add( Projections.property("kit.name"), "kitName" )
|
||||
.add( Projections.property( "cat.name" ), "catName" )
|
||||
.add( Projections.property( "kit.name" ), "kitName" )
|
||||
)
|
||||
.addOrder( Order.asc("catName") )
|
||||
.addOrder( Order.asc("kitName") )
|
||||
.addOrder( Order.asc( "catName" ) )
|
||||
.addOrder( Order.asc( "kitName" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -414,23 +411,23 @@ You can also use `Property.forName()` to express projections:
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.setProjection( Property.forName("name") )
|
||||
.add( Property.forName("color").eq(Color.BLACK) )
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.setProjection( Property.forName( "name" ) )
|
||||
.add( Property.forName( "color" ).eq( Color.BLACK ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.setProjection( Projections.projectionList()
|
||||
.add( Projections.rowCount().as("catCountByColor") )
|
||||
.add( Property.forName("weight").avg().as("avgWeight") )
|
||||
.add( Property.forName("weight").max().as("maxWeight") )
|
||||
.add( Property.forName("color").group().as("color" )
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.setProjection(Projections.projectionList()
|
||||
.add( Projections.rowCount().as( "catCountByColor" ) )
|
||||
.add( Property.forName( "weight" ).avg().as( "avgWeight" ) )
|
||||
.add( Property.forName( "weight" ).max().as( "maxWeight" ) )
|
||||
.add( Property.forName( "color" ).group().as( "color" ) )
|
||||
)
|
||||
.addOrder( Order.desc("catCountByColor") )
|
||||
.addOrder( Order.desc("avgWeight") )
|
||||
.addOrder( Order.desc( "catCountByColor" ) )
|
||||
.addOrder( Order.desc( "avgWeight" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -441,12 +438,12 @@ The `DetachedCriteria` class allows you to create a query outside the scope of a
|
|||
|
||||
[source,java]
|
||||
----
|
||||
DetachedCriteria query = DetachedCriteria.forClass(Cat.class)
|
||||
.add( Property.forName("sex").eq('F') );
|
||||
DetachedCriteria query = DetachedCriteria.forClass( Cat.class )
|
||||
.add( Property.forName( "sex" ).eq( 'F' ) );
|
||||
|
||||
Session session = ....;
|
||||
Transaction txn = session.beginTransaction();
|
||||
List results = query.getExecutableCriteria(session).setMaxResults(100).list();
|
||||
List results = query.getExecutableCriteria( session ).setMaxResults( 100 ).list();
|
||||
txn.commit();
|
||||
session.close();
|
||||
----
|
||||
|
@ -456,19 +453,19 @@ A `DetachedCriteria` can also be used to express a subquery.
|
|||
|
||||
[source,java]
|
||||
----
|
||||
DetachedCriteria avgWeight = DetachedCriteria.forClass(Cat.class)
|
||||
.setProjection( Property.forName("weight").avg() );
|
||||
session.createCriteria(Cat.class)
|
||||
.add( Property.forName("weight").gt(avgWeight) )
|
||||
DetachedCriteria avgWeight = DetachedCriteria.forClass( Cat.class )
|
||||
.setProjection( Property.forName( "weight" ).avg() );
|
||||
session.createCriteria( Cat.class )
|
||||
.add( Property.forName( "weight" ).gt( avgWeight ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
DetachedCriteria weights = DetachedCriteria.forClass(Cat.class)
|
||||
.setProjection( Property.forName("weight") );
|
||||
session.createCriteria(Cat.class)
|
||||
.add( Subqueries.geAll("weight", weights) )
|
||||
DetachedCriteria weights = DetachedCriteria.forClass( Cat.class )
|
||||
.setProjection( Property.forName( "weight" ) );
|
||||
session.createCriteria( Cat.class )
|
||||
.add( Subqueries.geAll( "weight", weights ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -476,11 +473,11 @@ Correlated subqueries are also possible:
|
|||
|
||||
[source,java]
|
||||
----
|
||||
DetachedCriteria avgWeightForSex = DetachedCriteria.forClass(Cat.class, "cat2")
|
||||
.setProjection( Property.forName("weight").avg() )
|
||||
.add( Property.forName("cat2.sex").eqProperty("cat.sex") );
|
||||
session.createCriteria(Cat.class, "cat")
|
||||
.add( Property.forName("weight").gt(avgWeightForSex) )
|
||||
DetachedCriteria avgWeightForSex = DetachedCriteria.forClass( Cat.class, "cat2" )
|
||||
.setProjection( Property.forName( "weight" ).avg() )
|
||||
.add( Property.forName( "cat2.sex" ).eqProperty( "cat.sex" ) );
|
||||
session.createCriteria( Cat.class, "cat" )
|
||||
.add( Property.forName( "weight" ).gt( avgWeightForSex ) )
|
||||
.list();
|
||||
----
|
||||
Example of multi-column restriction based on a subquery:
|
||||
|
@ -488,9 +485,12 @@ Example of multi-column restriction based on a subquery:
|
|||
[source,java]
|
||||
----
|
||||
DetachedCriteria sizeQuery = DetachedCriteria.forClass( Man.class )
|
||||
.setProjection( Projections.projectionList().add( Projections.property( "weight" ) )
|
||||
.add( Projections.property( "height" ) ) )
|
||||
.setProjection( Projections.projectionList()
|
||||
.add( Projections.property( "weight" ) )
|
||||
.add( Projections.property( "height" ) )
|
||||
)
|
||||
.add( Restrictions.eq( "name", "John" ) );
|
||||
|
||||
session.createCriteria( Woman.class )
|
||||
.add( Subqueries.propertiesEq( new String[] { "weight", "height" }, sizeQuery ) )
|
||||
.list();
|
||||
|
@ -527,10 +527,11 @@ Once you have enabled the Hibernate query cache, the `Restrictions.naturalId()`
|
|||
|
||||
[source,java]
|
||||
----
|
||||
session.createCriteria(User.class)
|
||||
session.createCriteria( User.class )
|
||||
.add( Restrictions.naturalId()
|
||||
.set("name", "gavin")
|
||||
.set("org", "hb")
|
||||
).setCacheable(true)
|
||||
.set( "name", "gavin" )
|
||||
.set( "org", "hb" )
|
||||
)
|
||||
.setCacheable( true )
|
||||
.uniqueResult();
|
||||
----
|
||||
|
|
|
@ -37,7 +37,7 @@ include::{sourcedir}/timestamp_version.xml[]
|
|||
|column |The name of the column which holds the timestamp. Optional, defaults to the property name
|
||||
|name |The name of a JavaBeans style property of Java type `Date` or `Timestamp` of the persistent class.
|
||||
|access |The strategy Hibernate uses to access the property value. Optional, defaults to `property`.
|
||||
|unsaved-value |A version property which indicates than instance is newly instantiated, and unsaved.
|
||||
|unsaved-value |A version property which indicates that the instance is newly instantiated and unsaved.
|
||||
This distinguishes it from detached instances that were saved or loaded in a previous session.
|
||||
The default value of `undefined` indicates that Hibernate uses the identifier property value.
|
||||
|source |Whether Hibernate retrieves the timestamp from the database or the current JVM.
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
== Legacy Hibernate Native Queries
|
||||
|
||||
[[legacy-sql-named-queries]]
|
||||
=== Legacy Named SQL queries
|
||||
=== Legacy named SQL queries
|
||||
|
||||
Named SQL queries can also be defined during mapping and called in exactly the same way as a named HQL query.
|
||||
In this case, you do _not_ need to call `addEntity()` anymore.
|
||||
|
@ -77,7 +77,7 @@ You must declare the column alias and Hibernate type using the `<return-scalar>`
|
|||
|
||||
You can externalize the resultset mapping information in a `<resultset>` element which will allow you to either reuse them across several named queries or through the `setResultSetMapping()` API.
|
||||
|
||||
.<resultset> mapping used to externalize mappinginformation
|
||||
.<resultset> mapping used to externalize mapping information
|
||||
====
|
||||
[source,xml]
|
||||
----
|
||||
|
@ -102,7 +102,7 @@ You can externalize the resultset mapping information in a `<resultset>` element
|
|||
----
|
||||
====
|
||||
|
||||
You can, alternatively, use the resultset mapping information in your hbm files directly in java code.
|
||||
You can, alternatively, use the resultset mapping information in your hbm files directly in Java code.
|
||||
|
||||
.Programmatically specifying the result mapping information
|
||||
====
|
||||
|
@ -110,7 +110,7 @@ You can, alternatively, use the resultset mapping information in your hbm files
|
|||
----
|
||||
List cats = session
|
||||
.createSQLQuery( "select {cat.*}, {kitten.*} from cats cat, cats kitten where kitten.mother = cat.id" )
|
||||
.setResultSetMapping("catAndKitten")
|
||||
.setResultSetMapping( "catAndKitten" )
|
||||
.list();
|
||||
----
|
||||
====
|
||||
|
@ -228,7 +228,7 @@ Native call syntax is not supported.
|
|||
For Oracle the following rules apply:
|
||||
|
||||
* A function must return a result set.
|
||||
The first parameter of a procedure must be an `OUT` that returns a result set.
|
||||
* The first parameter of a procedure must be an `OUT` that returns a result set.
|
||||
This is done by using a `SYS_REFCURSOR` type in Oracle 9 or 10.
|
||||
In Oracle you need to define a `REF CURSOR` type.
|
||||
See Oracle literature for further information.
|
||||
|
@ -267,12 +267,12 @@ The following example shows how to define custom SQL operations using annotation
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
If you expect to call a store procedure, be sure to set the `callable` attribute to `true`, in annotations as well as in xml.
|
||||
If you expect to call a stored procedure, be sure to set the `callable` attribute to `true` in both annotation and XML-based mappings.
|
||||
====
|
||||
|
||||
To check that the execution happens correctly, Hibernate allows you to define one of those three strategies:
|
||||
|
||||
* none: no check is performed: the store procedure is expected to fail upon issues
|
||||
* none: no check is performed; the store procedure is expected to fail upon issues
|
||||
* count: use of rowcount to check that the update is successful
|
||||
* param: like COUNT but using an output parameter rather that the standard mechanism
|
||||
|
||||
|
@ -312,7 +312,7 @@ Here is an example of a statement level override:
|
|||
[source,xml]
|
||||
----
|
||||
<sql-query name = "person">
|
||||
<return alias = "pers" class = "Person" lock-mod e= "upgrade"/>
|
||||
<return alias = "pers" class = "Person" lock-mode= "upgrade"/>
|
||||
SELECT NAME AS {pers.name}, ID AS {pers.id}
|
||||
FROM PERSON
|
||||
WHERE ID=?
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
image:images/architecture/data_access_layers.svg[Data Access Layers]
|
||||
|
||||
Hibernate, as an ORM solution, effectively "sits between" the Java application data access layer and the Relational Database, as can be seen in the diagram above.
|
||||
The Java application makes use of the Hibernate APIs to load, store, query, etc its domain data.
|
||||
The Java application makes use of the Hibernate APIs to load, store, query, etc. its domain data.
|
||||
Here we will introduce the essential Hibernate APIs.
|
||||
This will be a brief introduction; we will discuss these contracts in detail later.
|
||||
|
||||
|
@ -16,12 +16,12 @@ As a JPA provider, Hibernate implements the Java Persistence API specifications
|
|||
image:images/architecture/JPA_Hibernate.svg[image]
|
||||
|
||||
SessionFactory (`org.hibernate.SessionFactory`):: A thread-safe (and immutable) representation of the mapping of the application domain model to a database.
|
||||
Acts as a factory for `org.hibernate.Session` instances. The `EntityManagerFactory` is the JPA equivalent of a `SessionFactory` and basically those two converge into the same `SessionFactory` implementation.
|
||||
Acts as a factory for `org.hibernate.Session` instances. The `EntityManagerFactory` is the JPA equivalent of a `SessionFactory` and basically, those two converge into the same `SessionFactory` implementation.
|
||||
+
|
||||
A `SessionFactory` is very expensive to create, so, for any given database, the application should have only one associated `SessionFactory`.
|
||||
The `SessionFactory` maintains services that Hibernate uses across all `Session(s)` such as second level caches, connection pools, transaction system integrations, etc.
|
||||
|
||||
Session (`org.hibernate.Session`):: A single-threaded, short-lived object conceptually modeling a "Unit of Work" <<Bibliography.adoc#PoEAA,PoEAA>>.
|
||||
Session (`org.hibernate.Session`):: A single-threaded, short-lived object conceptually modeling a "Unit of Work" (<<Bibliography.adoc#PoEAA,PoEAA>>).
|
||||
In JPA nomenclature, the `Session` is represented by an `EntityManager`.
|
||||
+
|
||||
Behind the scenes, the Hibernate `Session` wraps a JDBC `java.sql.Connection` and acts as a factory for `org.hibernate.Transaction` instances.
|
||||
|
|
|
@ -38,7 +38,7 @@ The following settings control this behavior.
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Since version 5.2, Hibernate allows overriding the global JDBC batch size given by the `hibernate.jdbc.batch_size` configuration property for a given `Session`.
|
||||
Since version 5.2, Hibernate allows overriding the global JDBC batch size given by the `hibernate.jdbc.batch_size` configuration property on a per `Session` basis.
|
||||
====
|
||||
|
||||
[[batch-session-jdbc-batch-size-example]]
|
||||
|
@ -66,14 +66,14 @@ include::{sourcedir}/BatchTest.java[tags=batch-session-batch-example]
|
|||
|
||||
There are several problems associated with this example:
|
||||
|
||||
. Hibernate caches all the newly inserted `Customer` instances in the session-level c1ache, so, when the transaction ends, 100 000 entities are managed by the persistence context.
|
||||
If the maximum memory allocated to the JVM is rather low, this example could fails with an `OutOfMemoryException`.
|
||||
. Hibernate caches all the newly inserted `Customer` instances in the session-level cache, so, when the transaction ends, 100 000 entities are managed by the persistence context.
|
||||
If the maximum memory allocated to the JVM is rather low, this example could fail with an `OutOfMemoryException`.
|
||||
The Java 1.8 JVM allocated either 1/4 of available RAM or 1Gb, which can easily accommodate 100 000 objects on the heap.
|
||||
. long-running transactions can deplete a connection pool so other transactions don't get a chance to proceed.
|
||||
. JDBC batching is not enabled by default, so every insert statement requires a database roundtrip.
|
||||
To enable JDBC batching, set the `hibernate.jdbc.batch_size` property to an integer between 10 and 50.
|
||||
|
||||
[NOTE]
|
||||
[IMPORTANT]
|
||||
====
|
||||
Hibernate disables insert batching at the JDBC level transparently if you use an identity identifier generator.
|
||||
====
|
||||
|
@ -118,7 +118,7 @@ However, it is good practice to close the `ScrollableResults` explicitly.
|
|||
|
||||
`StatelessSession` is a command-oriented API provided by Hibernate.
|
||||
Use it to stream data to and from the database in the form of detached objects.
|
||||
A `StatelessSession` has no persistence context associated with it and does not provide many of the higher-level life cycle semantics.
|
||||
A `StatelessSession` has no persistence context associated with it and does not provide many of the higher-level lifecycle semantics.
|
||||
|
||||
Some of the things not provided by a `StatelessSession` include:
|
||||
|
||||
|
@ -163,7 +163,7 @@ Hibernate provides methods for bulk SQL-style DML statement execution, in the fo
|
|||
Both the Hibernate native Query Language and JPQL (Java Persistence Query Language) provide support for bulk UPDATE and DELETE.
|
||||
|
||||
[[batch-bulk-hql-update-delete-example]]
|
||||
.Psuedo-syntax for UPDATE and DELETE statements using HQL
|
||||
.Pseudo-syntax for UPDATE and DELETE statements using HQL
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -175,7 +175,7 @@ DELETE FROM EntityName e WHERE e.name = ?
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
The `FROM` and `WHERE` clauses are each optional, but it's good practice to use them.
|
||||
Although the `FROM` and `WHERE` clauses are optional, it is good practice to declare them explicitly.
|
||||
====
|
||||
|
||||
The `FROM` clause can only refer to a single entity, which can be aliased.
|
||||
|
@ -220,7 +220,7 @@ include::{sourcedir}/BatchTest.java[tags=batch-bulk-hql-update-version-example]
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
If you use the `VERSIONED` statement, you cannot use custom version types, which use class `org.hibernate.usertype.UserVersionType`.
|
||||
If you use the `VERSIONED` statement, you cannot use custom version types that implement the `org.hibernate.usertype.UserVersionType`.
|
||||
|
||||
This feature is only available in HQL since it's not standardized by JPA.
|
||||
====
|
||||
|
@ -243,8 +243,8 @@ include::{sourcedir}/BatchTest.java[tags=batch-bulk-hql-delete-example]
|
|||
----
|
||||
====
|
||||
|
||||
Method `Query.executeUpdate()` returns an `int` value, which indicates the number of entities effected by the operation.
|
||||
This may or may not correlate to the number of rows effected in the database.
|
||||
Method `Query.executeUpdate()` returns an `int` value, which indicates the number of entities affected by the operation.
|
||||
This may or may not correlate to the number of rows affected in the database.
|
||||
A JPQL/HQL bulk operation might result in multiple SQL statements being executed, such as for joined-subclass.
|
||||
In the example of joined-subclass, a `DELETE` against one of the subclasses may actually result in deletes in the tables underlying the join, or further down the inheritance hierarchy.
|
||||
|
||||
|
@ -282,7 +282,7 @@ Otherwise, Hibernate throws an exception during parsing.
|
|||
Available in-database generators are `org.hibernate.id.SequenceGenerator` and its subclasses, and objects which implement `org.hibernate.id.PostInsertIdentifierGenerator`.
|
||||
|
||||
For properties mapped as either version or timestamp, the insert statement gives you two options.
|
||||
You can either specify the property in the properties_list, in which case its value is taken from the corresponding select expressions, or omit it from the properties_list,
|
||||
You can either specify the property in the properties_list, in which case its value is taken from the corresponding select expressions or omit it from the properties_list,
|
||||
in which case the seed value defined by the org.hibernate.type.VersionType is used.
|
||||
|
||||
[[batch-bulk-hql-insert-example]]
|
||||
|
@ -394,7 +394,7 @@ So, the entity identifiers are selected first and used for each particular updat
|
|||
[TIP]
|
||||
====
|
||||
The IN clause row value expression has long been supported by Oracle, PostgreSQL, and nowadays by MySQL 5.7.
|
||||
However, SQL Server 2014 does not support this syntax, so you'll have to use a different strategy.
|
||||
However, SQL Server 2014 does not support it, so you'll have to use a different strategy.
|
||||
====
|
||||
|
||||
[[batch-bulk-hql-strategies-InlineIdsSubSelectValueListBulkIdStrategy]]
|
||||
|
@ -422,8 +422,8 @@ include::{extrasdir}/batch-bulk-hql-InlineIdsSubSelectValueListBulkIdStrategy-de
|
|||
|
||||
[TIP]
|
||||
====
|
||||
The underlying database must support the VALUES list clause, like PostgreSQL or SQL Server 2008.
|
||||
However, this strategy requires the IN-clause row value expression for composite identifiers so you can use this strategy only with PostgreSQL.
|
||||
The underlying database must support the `VALUES` list clause, like PostgreSQL or SQL Server 2008.
|
||||
However, this strategy requires the IN-clause row value expression for composite identifiers, and for this reason, you can only use the `InlineIdsSubSelectValueListBulkIdStrategy` strategy with PostgreSQL.
|
||||
====
|
||||
|
||||
[[batch-bulk-hql-strategies-InlineIdsOrClauseBulkIdStrategy]]
|
||||
|
@ -451,7 +451,7 @@ include::{extrasdir}/batch-bulk-hql-InlineIdsOrClauseBulkIdStrategy-delete-query
|
|||
|
||||
[TIP]
|
||||
====
|
||||
This strategy has the advantage of being supported by all the major relational database systems (e.g. Oracle, SQL Server, MySQL, and PostgreSQL).
|
||||
The `InlineIdsOrClauseBulkIdStrategy` strategy has the advantage of being supported by all the major relational database systems (e.g. Oracle, SQL Server, MySQL, and PostgreSQL).
|
||||
====
|
||||
|
||||
[[batch-bulk-hql-strategies-CteValuesListBulkIdStrategy]]
|
||||
|
@ -479,16 +479,17 @@ include::{extrasdir}/batch-bulk-hql-CteValuesListBulkIdStrategy-delete-query-exa
|
|||
|
||||
[TIP]
|
||||
====
|
||||
The underlying database must support the CTE (Common Table Expressions) that can be referenced from non-query statements as well, like PostgreSQL since 9.1 or SQL Server since 2005.
|
||||
The underlying database must support CTE (Common Table Expressions) that can be referenced from non-query statements as well. For instance, PostgreSQL supports this feature since version 9.1 and SQL Server offers support for it since version 2005.
|
||||
|
||||
The underlying database must also support the VALUES list clause, like PostgreSQL or SQL Server 2008.
|
||||
|
||||
However, this strategy requires the IN-clause row value expression for composite identifiers, so you can only use this strategy only with PostgreSQL.
|
||||
However, this strategy requires the IN-clause row value expression for composite identifiers, so you can only use this strategy with PostgreSQL.
|
||||
====
|
||||
|
||||
If you can use temporary tables, that's probably the best choice.
|
||||
However, if you are not allowed to create temporary tables, you must pick one of these four strategies that works with your underlying database.
|
||||
Before making your mind, you should benchmark which one works best for your current workload.
|
||||
For instance, http://blog.2ndquadrant.com/postgresql-ctes-are-optimization-fences/[CTE are optimization fences in PostgreSQL], so make sure you measure before taking a decision.
|
||||
Before making up your mind, you should benchmark which one works best for your current workload.
|
||||
For instance, http://blog.2ndquadrant.com/postgresql-ctes-are-optimization-fences/[CTE are optimization fences in PostgreSQL], so make sure you measure before making a decision.
|
||||
|
||||
If you're using Oracle or MySQL 5.7, you can choose either `InlineIdsOrClauseBulkIdStrategy` or `InlineIdsInClauseBulkIdStrategy`.
|
||||
For older version of MySQL, then you can only use `InlineIdsOrClauseBulkIdStrategy`.
|
||||
|
|
|
@ -4,19 +4,10 @@
|
|||
:boot-spi-sourcedir: ../../../../../../../hibernate-core/src/test/java/org/hibernate/boot/spi
|
||||
:extrasdir: extras
|
||||
|
||||
org.hibernate.boot.spi.metadatabuildercontributor;
|
||||
|
||||
The term bootstrapping refers to initializing and starting a software component.
|
||||
In Hibernate, we are specifically talking about the process of building a fully functional `SessionFactory` instance or `EntityManagerFactory` instance, for JPA.
|
||||
The process is very different for each.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
This chapter will not focus on all the possibilities of bootstrapping.
|
||||
Those will be covered in each specific more-relevant chapters later on.
|
||||
Instead, we focus here on the API calls needed to perform the bootstrapping.
|
||||
====
|
||||
|
||||
[TIP]
|
||||
====
|
||||
During the bootstrap process, you might want to customize Hibernate behavior so make sure you check the <<appendices/Configurations.adoc#configurations,Configurations>> section as well.
|
||||
|
@ -26,8 +17,8 @@ During the bootstrap process, you might want to customize Hibernate behavior so
|
|||
=== Native Bootstrapping
|
||||
|
||||
This section discusses the process of bootstrapping a Hibernate `SessionFactory`.
|
||||
Specifically it discusses the bootstrapping APIs as redesigned in 5.0.
|
||||
For a discussion of the legacy bootstrapping API, see <<appendices/Legacy_Bootstrap.adoc#appendix-legacy-bootstrap,Legacy Bootstrapping>>
|
||||
Specifically, it addresses the bootstrapping APIs as redesigned in 5.0.
|
||||
For a discussion of the legacy bootstrapping API, see <<appendices/Legacy_Bootstrap.adoc#appendix-legacy-bootstrap,Legacy Bootstrapping>>.
|
||||
|
||||
[[bootstrap-native-registry]]
|
||||
==== Building the ServiceRegistry
|
||||
|
@ -39,18 +30,18 @@ First is the `org.hibernate.boot.registry.BootstrapServiceRegistry`.
|
|||
The `BootstrapServiceRegistry` is intended to hold services that Hibernate needs at both bootstrap and run time.
|
||||
This boils down to 3 services:
|
||||
|
||||
`org.hibernate.boot.registry.classloading.spi.ClassLoaderService`:: which controls how Hibernate interacts with `ClassLoader`s
|
||||
`org.hibernate.boot.registry.classloading.spi.ClassLoaderService`:: which controls how Hibernate interacts with ``ClassLoader``s.
|
||||
`org.hibernate.integrator.spi.IntegratorService`:: which controls the management and discovery of `org.hibernate.integrator.spi.Integrator` instances.
|
||||
`org.hibernate.boot.registry.selector.spi.StrategySelector`:: which control how Hibernate resolves implementations of various strategy contracts.
|
||||
`org.hibernate.boot.registry.selector.spi.StrategySelector`:: which controls how Hibernate resolves implementations of various strategy contracts.
|
||||
This is a very powerful service, but a full discussion of it is beyond the scope of this guide.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
If you are ok with the default behavior of Hibernate in regards to these `BootstrapServiceRegistry` services
|
||||
(which is quite often the case, especially in stand-alone environments), then building the `BootstrapServiceRegistry` can be skipped.
|
||||
(which is quite often the case, especially in stand-alone environments), then you don't need to explicitly build the `BootstrapServiceRegistry`.
|
||||
====
|
||||
|
||||
If you wish to alter how the `BootstrapServiceRegistry` is built, that is controlled through the `org.hibernate.boot.registry.BootstrapServiceRegistryBuilder:`
|
||||
If you wish to alter how the `BootstrapServiceRegistry` is built, that is controlled through the `org.hibernate.boot.registry.BootstrapServiceRegistryBuilder`:
|
||||
|
||||
[[bootstrap-bootstrap-native-registry-BootstrapServiceRegistry-example]]
|
||||
.Controlling `BootstrapServiceRegistry` building
|
||||
|
@ -110,11 +101,11 @@ include::{sourcedir}/BootstrapTest.java[tags=bootstrap-event-listener-registrati
|
|||
[[bootstrap-native-metadata]]
|
||||
==== Building the Metadata
|
||||
|
||||
The second step in native bootstrapping is the building of a `org.hibernate.boot.Metadata` object containing the parsed representations of an application domain model and its mapping to a database.
|
||||
The second step in native bootstrapping is the building of an `org.hibernate.boot.Metadata` object containing the parsed representations of an application domain model and its mapping to a database.
|
||||
The first thing we obviously need to build a parsed representation is the source information to be parsed (annotated classes, `hbm.xml` files, `orm.xml` files).
|
||||
This is the purpose of `org.hibernate.boot.MetadataSources`:
|
||||
This is the purpose of `org.hibernate.boot.MetadataSources`.
|
||||
|
||||
`MetadataSources` has many other methods as well, explore its API and https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/MetadataSources.html[Javadocs] for more information.
|
||||
`MetadataSources` has many other methods as well. Explore its API and https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/MetadataSources.html[Javadocs] for more information.
|
||||
Also, all methods on `MetadataSources` offer fluent-style call chaining::
|
||||
|
||||
[[bootstrap-native-metadata-source-example]]
|
||||
|
@ -133,7 +124,7 @@ If you are ok with the default behavior in building the Metadata then you can si
|
|||
====
|
||||
Notice that a `ServiceRegistry` can be passed at a number of points in this bootstrapping process.
|
||||
The suggested approach is to build a `StandardServiceRegistry` yourself and pass that along to the `MetadataSources` constructor.
|
||||
From there, `MetadataBuilder`, `Metadata`, `SessionFactoryBuilder` and `SessionFactory` will all pick up that same `StandardServiceRegistry`.
|
||||
From there, `MetadataBuilder`, `Metadata`, `SessionFactoryBuilder`, and `SessionFactory` will all pick up that same `StandardServiceRegistry`.
|
||||
====
|
||||
|
||||
However, if you wish to adjust the process of building `Metadata` from `MetadataSources`,
|
||||
|
@ -156,7 +147,7 @@ include::{sourcedir}/BootstrapTest.java[tags=bootstrap-native-metadata-builder-e
|
|||
The final step in native bootstrapping is to build the `SessionFactory` itself.
|
||||
Much like discussed above, if you are ok with the default behavior of building a `SessionFactory` from a `Metadata` reference, you can simply call the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/Metadata.html#buildSessionFactory--[`buildSessionFactory`] method on the `Metadata` object.
|
||||
|
||||
However, if you would like to adjust that building process you will need to use `SessionFactoryBuilder` as obtained via [`Metadata#getSessionFactoryBuilder`. Again, see its https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/Metadata.html#getSessionFactoryBuilder--[Javadocs] for more details.
|
||||
However, if you would like to adjust that building process, you will need to use `SessionFactoryBuilder` as obtained via `Metadata#getSessionFactoryBuilder`. Again, see its https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/Metadata.html#getSessionFactoryBuilder--[Javadocs] for more details.
|
||||
|
||||
[[bootstrap-native-SessionFactory-example]]
|
||||
.Native Bootstrapping - Putting it all together
|
||||
|
@ -248,11 +239,11 @@ include::{sourcedir}/BootstrapTest.java[tags=bootstrap-jpa-compliant-EntityManag
|
|||
[NOTE]
|
||||
====
|
||||
If you don't want to provide a `persistence.xml` configuration file, JPA allows you to provide all the configuration options in a
|
||||
http://docs.oracle.com/javaee/7/api/javax/persistence/spi/PersistenceUnitInfo.html[`PersistenceUnitInfo`] implementation and call
|
||||
{jpaJavadocUrlPrefix}spi/PersistenceUnitInfo.html[`PersistenceUnitInfo`] implementation and call
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/HibernatePersistenceProvider.html#createContainerEntityManagerFactory-javax.persistence.spi.PersistenceUnitInfo-java.util.Map-[`HibernatePersistenceProvider.html#createContainerEntityManagerFactory`].
|
||||
====
|
||||
|
||||
To inject the default Persistence Context, you can use the http://docs.oracle.com/javaee/7/api/javax/persistence/PersistenceContext.html[`@PersistenceContext`] annotation.
|
||||
To inject the default Persistence Context, you can use the {jpaJavadocUrlPrefix}PersistenceContext.html[`@PersistenceContext`] annotation.
|
||||
|
||||
[[bootstrap-jpa-compliant-PersistenceContext-example]]
|
||||
.Inject the default `EntityManager`
|
||||
|
@ -264,9 +255,9 @@ include::{sourcedir}/BootstrapTest.java[tags=bootstrap-jpa-compliant-Persistence
|
|||
====
|
||||
|
||||
To inject a specific Persistence Context,
|
||||
you can use the http://docs.oracle.com/javaee/7/api/javax/persistence/PersistenceContext.html[`@PersistenceContext`] annotation,
|
||||
you can use the {jpaJavadocUrlPrefix}PersistenceContext.html[`@PersistenceContext`] annotation,
|
||||
and you can even pass `EntityManager`-specific properties using the
|
||||
http://docs.oracle.com/javaee/7/api/javax/persistence/PersistenceProperty.html[`@PersistenceProperty`] annotation.
|
||||
{jpaJavadocUrlPrefix}PersistenceProperty.html[`@PersistenceProperty`] annotation.
|
||||
|
||||
|
||||
[[bootstrap-jpa-compliant-PersistenceContext-configurable-example]]
|
||||
|
@ -291,22 +282,22 @@ JPA offers two mapping options:
|
|||
- annotations
|
||||
- XML mappings
|
||||
|
||||
Although annotations are much more common, there are projects were XML mappings are preferred.
|
||||
Although annotations are much more common, there are projects where XML mappings are preferred.
|
||||
You can even mix annotations and XML mappings so that you can override annotation mappings with XML configurations that can be easily changed without recompiling the project source code.
|
||||
This is possible because if there are two conflicting mappings, the XML mappings takes precedence over its annotation counterpart.
|
||||
This is possible because if there are two conflicting mappings, the XML mappings take precedence over its annotation counterpart.
|
||||
|
||||
The JPA specifications requires the XML mappings to be located on the class path:
|
||||
The JPA specification requires the XML mappings to be located on the classpath:
|
||||
|
||||
[quote, Section 8.2.1.6.2 of the JPA 2.1 Specification]
|
||||
____
|
||||
An object/relational mapping XML file named `orm.xml` may be specified in the `META-INF` directory in the root of the persistence unit or in the `META-INF` directory of any jar file referenced by the `persistence.xml`.
|
||||
|
||||
Alternatively, or in addition, one or more mapping files may be referenced by the mapping-file elements of the persistence-unit element. These mapping files may be present anywhere on the class path.
|
||||
Alternatively, or in addition, one or more mapping files may be referenced by the mapping-file elements of the persistence-unit element. These mapping files may be present anywhere on the classpath.
|
||||
____
|
||||
|
||||
Therefore, the mapping files can reside in the application jar artifacts, or they can be stored in an external folder location with the cogitation that that location be included in the class path.
|
||||
Therefore, the mapping files can reside in the application jar artifacts, or they can be stored in an external folder location with the cogitation that that location be included in the classpath.
|
||||
|
||||
Hibernate is more lenient in this regard so you can use any external location even outside of the application configured class path.
|
||||
Hibernate is more lenient in this regard so you can use any external location even outside of the application configured classpath.
|
||||
|
||||
[[bootstrap-jpa-compliant-persistence-xml-external-mappings-example]]
|
||||
.META-INF/persistence.xml configuration file for external XML mappings
|
||||
|
@ -343,4 +334,4 @@ The above `MetadataBuilderContributor` is used to register a `SqlFuction` which
|
|||
By having access to the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/MetadataBuilder.html[`MetadataBuilder`] class that's used by the underlying `SessionFactory`, the JPA bootstrap becomes just as flexible as the Hibernate native bootstrap mechanism.
|
||||
|
||||
You can then pass the custom `MetadataBuilderContributor` via the `hibernate.metadata_builder_contributor` configuration property as explained in the <<appendices/Configurations.adoc#configurations-bootstrap, configuration chapter>>
|
||||
You can then pass the custom `MetadataBuilderContributor` via the `hibernate.metadata_builder_contributor` configuration property as explained in the <<appendices/Configurations.adoc#configurations-bootstrap, Configuration chapter>>.
|
||||
|
|
|
@ -10,8 +10,9 @@ It is possible to configure a JVM-level (`SessionFactory`-level) or even a clust
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Be aware that caches are not aware of changes made to the persistent store by other applications.
|
||||
They can, however, be configured to regularly expire cached data.
|
||||
Be aware that Hibernate caches are not aware of changes made to the persistent store by other applications.
|
||||
|
||||
To address this limitation, you can configure a TTL (Time To Live) retention policy at the second-level cache region level so that the underlying cache entries expire regularly.
|
||||
====
|
||||
|
||||
[[caching-config]]
|
||||
|
@ -32,23 +33,24 @@ Detailed information is provided later in this chapter.
|
|||
[[caching-config-properties]]
|
||||
==== Caching configuration properties
|
||||
|
||||
Besides specific provider configuration, there are a number of configurations options on the Hibernate side of the integration that control various caching behaviors:
|
||||
Besides provider specific configuration, there are a number of configurations options on the Hibernate side of the integration that control various caching behaviors:
|
||||
|
||||
`hibernate.cache.use_second_level_cache`::
|
||||
Enable or disable second level caching overall. Default is true, although the default region factory is `NoCachingRegionFactory`.
|
||||
Enable or disable second level caching overall. By default, if the currently configured
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/cache/spi/RegionFactory.html[`RegionFactory`] is not the `NoCachingRegionFactory`, then the second-level cache is going to be enabled. Otherwise, the second-level cache is disabled.
|
||||
`hibernate.cache.use_query_cache`::
|
||||
Enable or disable second level caching of query results. Default is false.
|
||||
Enable or disable second level caching of query results. The default is false.
|
||||
`hibernate.cache.query_cache_factory`::
|
||||
Query result caching is handled by a special contract that deals with staleness-based invalidation of the results.
|
||||
The default implementation does not allow stale results at all. Use this for applications that would like to relax that.
|
||||
Names an implementation of `org.hibernate.cache.spi.QueryCacheFactory`
|
||||
Names an implementation of `org.hibernate.cache.spi.TimestampsCacheFactory`.
|
||||
`hibernate.cache.use_minimal_puts`::
|
||||
Optimizes second-level cache operations to minimize writes, at the cost of more frequent reads. Providers typically set this appropriately.
|
||||
`hibernate.cache.region_prefix`::
|
||||
Defines a name to be used as a prefix to all second-level cache region names.
|
||||
`hibernate.cache.default_cache_concurrency_strategy`::
|
||||
In Hibernate second-level caching, all regions can be configured differently including the concurrency strategy to use when accessing that particular region.
|
||||
This setting allows to define a default strategy to be used.
|
||||
This setting allows defining a default strategy to be used.
|
||||
This setting is very rarely required as the pluggable providers do specify the default strategy to use.
|
||||
Valid values include:
|
||||
* read-only,
|
||||
|
@ -61,12 +63,12 @@ Besides specific provider configuration, there are a number of configurations op
|
|||
`hibernate.cache.auto_evict_collection_cache`::
|
||||
Enables or disables the automatic eviction of a bidirectional association's collection cache entry when the association is changed just from the owning side.
|
||||
This is disabled by default, as it has a performance impact to track this state.
|
||||
However if your application does not manage both sides of bidirectional association where the collection side is cached,
|
||||
However, if your application does not manage both sides of bidirectional association where the collection side is cached,
|
||||
the alternative is to have stale data in that collection cache.
|
||||
`hibernate.cache.use_reference_entries`::
|
||||
Enable direct storage of entity references into the second level cache for read-only or immutable entities.
|
||||
`hibernate.cache.keys_factory`::
|
||||
When storing entries into second-level cache as key-value pair, the identifiers can be wrapped into tuples
|
||||
When storing entries into the second-level cache as a key-value pair, the identifiers can be wrapped into tuples
|
||||
<entity type, tenant, identifier> to guarantee uniqueness in case that second-level cache stores all entities
|
||||
in single space. These tuples are then used as keys in the cache. When the second-level cache implementation
|
||||
(incl. its configuration) guarantees that different entity types are stored separately and multi-tenancy is not
|
||||
|
@ -87,7 +89,7 @@ or by using the `javax.persistence.sharedCache.mode` property in your configurat
|
|||
The following values are possible:
|
||||
|
||||
`ENABLE_SELECTIVE` (Default and recommended value)::
|
||||
Entities are not cached unless explicitly marked as cacheable (with the https://javaee.github.io/javaee-spec/javadocs/javax/persistence/Cacheable.html[`@Cacheable`] annotation).
|
||||
Entities are not cached unless explicitly marked as cacheable (with the {jpaJavadocUrlPrefix}Cacheable.html[`@Cacheable`] annotation).
|
||||
`DISABLE_SELECTIVE`::
|
||||
Entities are cached unless explicitly marked as non-cacheable.
|
||||
`ALL`::
|
||||
|
@ -114,8 +116,9 @@ transactional::
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Rather than using a global cache concurrency strategy, it is recommended to define this setting on a per entity basis.
|
||||
Use the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Cache.html[`@org.hibernate.annotations.Cache`] annotation for that.
|
||||
Rather than using a global setting, it is recommended to define the cache concurrency strategy on a per entity basis.
|
||||
|
||||
Use the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Cache.html[`@org.hibernate.annotations.Cache`] annotation for this purpose.
|
||||
====
|
||||
|
||||
The `@Cache` annotation define three attributes:
|
||||
|
@ -146,10 +149,9 @@ ____
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
As of Hibernate ORM 5.3, it's now possible to possible to override a base class `@Cacheable` or `@Cache` definition in subclasses.
|
||||
As of Hibernate ORM 5.3, you can now override a base class `@Cacheable` or `@Cache` definition at subclass level.
|
||||
|
||||
However, the Hibernate cache concurrency strategy (e.g. read-only, nonstrict-read-write, read-write, transactional) is still defined at the root entity level
|
||||
and cannot be overridden.
|
||||
However, the Hibernate cache concurrency strategy (e.g. read-only, nonstrict-read-write, read-write, transactional) is still defined at the root entity level and cannot be overridden.
|
||||
====
|
||||
|
||||
Nevertheless, the reasons why we advise you to have all entities belonging to an inheritance tree share the same caching definition can be summed as follows:
|
||||
|
@ -305,10 +307,13 @@ include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-query-native-example
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
The query cache does not cache the state of the actual entities in the cache;
|
||||
it caches only identifier values and results of value type.
|
||||
|
||||
|
||||
For entity queries, the query cache does not cache the state of the actual entities. Instead, it stores the entity identifiers, and when the query result is fetched from the cache, the entity state is going to be loaded from the second-level cache entity regions.
|
||||
|
||||
Just as with collection caching, the query cache should always be used in conjunction with the second-level cache for those entities expected to be cached as part of a query result cache.
|
||||
|
||||
For projection queries, the query cache stores the dehydrated entity state (e.g. `Object[]`) associated with the underlying JDBC `ResultSet`.
|
||||
====
|
||||
|
||||
[[caching-query-region]]
|
||||
|
@ -317,7 +322,7 @@ Just as with collection caching, the query cache should always be used in conjun
|
|||
This setting creates two new cache regions:
|
||||
|
||||
`default-query-results-region`::
|
||||
Holding the cached query results
|
||||
Holding the cached query results.
|
||||
`default-update-timestamps-region`::
|
||||
Holding timestamps of the most recent updates to queryable tables.
|
||||
These are used to validate the results as they are served from the query cache.
|
||||
|
@ -326,7 +331,7 @@ This setting creates two new cache regions:
|
|||
====
|
||||
If you configure your underlying cache implementation to use expiration, it's very important
|
||||
that the timeout of the underlying cache region for the `default-update-timestamps-region`
|
||||
is set to a higher value than the timeouts of any of the query caches.
|
||||
is set to a higher value than the timeout setting of any of the query caches.
|
||||
|
||||
In fact, we recommend that the `default-update-timestamps-region` region is not configured for expiration (time-based) or eviction (size/memory-based) at all.
|
||||
Note that an LRU (Least Recently Used) cache eviction policy is never appropriate for this particular cache region.
|
||||
|
@ -376,11 +381,11 @@ include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-query-region-store-m
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
When using http://docs.oracle.com/javaee/7/api/javax/persistence/CacheStoreMode.html#REFRESH[`CacheStoreMode.REFRESH`] or https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/CacheMode.html#REFRESH[`CacheMode.REFRESH`] in conjunction with the region you have defined for the given query,
|
||||
When using {jpaJavadocUrlPrefix}CacheStoreMode.html#REFRESH[`CacheStoreMode.REFRESH`] or https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/CacheMode.html#REFRESH[`CacheMode.REFRESH`] in conjunction with the region you have defined for the given query,
|
||||
Hibernate will selectively force the results cached in that particular region to be refreshed.
|
||||
|
||||
This is particularly useful in cases where underlying data may have been updated via a separate process
|
||||
and is a far more efficient alternative to bulk eviction of the region via `SessionFactory` eviction which looks as follows:
|
||||
This behavior is particularly useful in cases when the underlying data may have been updated via a separate process
|
||||
and is a far more efficient alternative to the bulk eviction of the region via `SessionFactory` eviction which looks as follows:
|
||||
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -393,8 +398,8 @@ include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-query-region-native-
|
|||
|
||||
Traditionally, Hibernate defined the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/CacheMode.html[`CacheMode`] enumeration to describe
|
||||
the ways of interactions with the cached data.
|
||||
JPA split cache modes by storage (http://docs.oracle.com/javaee/7/api/javax/persistence/CacheStoreMode.html[`CacheStoreMode`])
|
||||
and retrieval (http://docs.oracle.com/javaee/7/api/javax/persistence/CacheRetrieveMode.html[`CacheRetrieveMode`]).
|
||||
JPA split cache modes by storage ({jpaJavadocUrlPrefix}CacheStoreMode.html[`CacheStoreMode`])
|
||||
and retrieval ({jpaJavadocUrlPrefix}CacheRetrieveMode.html[`CacheRetrieveMode`]).
|
||||
|
||||
The relationship between Hibernate and JPA cache modes can be seen in the following table:
|
||||
|
||||
|
@ -402,11 +407,11 @@ The relationship between Hibernate and JPA cache modes can be seen in the follow
|
|||
[cols=",,",options="header",]
|
||||
|======================================
|
||||
|Hibernate | JPA | Description
|
||||
|`CacheMode.NORMAL` |`CacheStoreMode.USE` and `CacheRetrieveMode.USE` | Default. Reads/writes data from/into cache
|
||||
|`CacheMode.NORMAL` |`CacheStoreMode.USE` and `CacheRetrieveMode.USE` | Default. Reads/writes data from/into the cache
|
||||
|`CacheMode.REFRESH` |`CacheStoreMode.REFRESH` and `CacheRetrieveMode.BYPASS` | Doesn't read from cache, but writes to the cache upon loading from the database
|
||||
|`CacheMode.PUT` |`CacheStoreMode.USE` and `CacheRetrieveMode.BYPASS` | Doesn't read from cache, but writes to the cache as it reads from the database
|
||||
|`CacheMode.GET` |`CacheStoreMode.BYPASS` and `CacheRetrieveMode.USE` | Read from the cache, but doesn't write to cache
|
||||
|`CacheMode.IGNORE` |`CacheStoreMode.BYPASS` and `CacheRetrieveMode.BYPASS` | Doesn't read/write data from/into cache
|
||||
|`CacheMode.IGNORE` |`CacheStoreMode.BYPASS` and `CacheRetrieveMode.BYPASS` | Doesn't read/write data from/into the cache
|
||||
|======================================
|
||||
|
||||
Setting the cache mode can be done either when loading entities directly or when executing a query.
|
||||
|
@ -455,7 +460,7 @@ include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-management-cache-mod
|
|||
Because the second level cache is bound to the `EntityManagerFactory` or the `SessionFactory`,
|
||||
cache eviction must be done through these two interfaces.
|
||||
|
||||
JPA only supports entity eviction through the https://javaee.github.io/javaee-spec/javadocs/javax/persistence/Cache.html[`javax.persistence.Cache`] interface:
|
||||
JPA only supports entity eviction through the {jpaJavadocUrlPrefix}Cache.html[`javax.persistence.Cache`] interface:
|
||||
|
||||
[[caching-management-evict-jpa-example]]
|
||||
.Evicting entities with JPA
|
||||
|
@ -507,8 +512,9 @@ include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-statistics-example]
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Use of the build-in integration for https://jcp.org/en/jsr/detail?id=107[JCache] requires that the `hibernate-jcache` module jar (and all of its dependencies) are on the classpath.
|
||||
In addition a JCache implementation needs to be added as well.
|
||||
To use the built-in integration for https://jcp.org/en/jsr/detail?id=107[JCache], you need the `hibernate-jcache` module jar (and all of its dependencies) to be on the classpath.
|
||||
|
||||
In addition, a JCache implementation needs to be added as well.
|
||||
A list of compatible implementations can be found https://jcp.org/aboutJava/communityprocess/implementations/jsr107/index.html[on the JCP website].
|
||||
An alternative source of compatible implementations can be found through https://github.com/cruftex/jsr107-test-zoo[the JSR-107 test zoo].
|
||||
====
|
||||
|
@ -559,6 +565,28 @@ In order to control which provider to use and specify configuration for the `Cac
|
|||
|
||||
Only by specifying the second property `hibernate.javax.cache.uri` will you be able to have a `CacheManager` per `SessionFactory`.
|
||||
|
||||
===== Using a non-default JCache `CacheManager`
|
||||
|
||||
If you don't want to use the default `CacheManager`, you need to set the `hibernate.javax.cache.cache_manager` configuration property
|
||||
to one of the following values:
|
||||
|
||||
Object reference:: If the value is an `Object` instance implementing the `CacheManager` interface,
|
||||
the provided `CacheManager` instance will be used.
|
||||
`Class`:: If the value is a Java `Class` object that implements the `CacheManager` interface,
|
||||
Hibernate will create a new instance for that `Class` and use it instead of the default one.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
When passing a Java `Class` that implements the `CacheManager` interface, you must make sure that the `CacheManager` implementation class provides a default no-arg constructor because that's going to be used to instantiate a `CacheManager` implementation `Object`.
|
||||
====
|
||||
`String`:: If the value is a Java `String`, Hibernate expects it to be the fully-qualified `Class` name
|
||||
of the `CacheManager` implementation which will be used to instantiate the non-default `CacheManager`.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
When passing the fully-qualified class name, you must make sure that the associated `Class` type provides a default no-arg constructor because that's going to be used to instantiate a `CacheManager` implementation `Object`.
|
||||
====
|
||||
|
||||
[[caching-provider-jcache-missing-cache-strategy]]
|
||||
==== JCache missing cache strategy
|
||||
|
||||
|
@ -582,11 +610,9 @@ and also log a warning about the missing cache.
|
|||
|
||||
[WARNING]
|
||||
====
|
||||
Note that caches created this way may be very badly configured (unlimited size and no eviction in particular)
|
||||
unless the cache provider was explicitly configured to use a specific configuration for default caches.
|
||||
Note that caches created this way may not be suitable for production usage (unlimited size and no eviction in particular) unless the cache provider explicitly provides a specific configuration for default caches.
|
||||
|
||||
Ehcache in particular allows to set such default configuration using cache templates,
|
||||
see http://www.ehcache.org/documentation/3.0/107.html#supplement-jsr-107-configurations
|
||||
Ehcache, in particular, allows to set such default configuration using cache templates. See the http://www.ehcache.org/documentation/3.0/107.html#supplement-jsr-107-configurations[Ehcache documentation] for more details.
|
||||
====
|
||||
|
||||
[[caching-provider-ehcache]]
|
||||
|
@ -596,7 +622,7 @@ This integration covers Ehcache 2.x, in order to use Ehcache 3.x as second level
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Use of the build-in integration for http://www.ehcache.org/[Ehcache] requires that the `hibernate-ehcache` module jar (and all of its dependencies) are on the classpath.
|
||||
Use of the built-in integration for http://www.ehcache.org/[Ehcache] requires that the `hibernate-ehcache` module jar (and all of its dependencies) are on the classpath.
|
||||
====
|
||||
|
||||
[[caching-provider-ehcache-region-factory]]
|
||||
|
@ -644,7 +670,7 @@ shared among multiple `SessionFactory` instances in the same JVM.
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
http://www.ehcache.org/documentation/2.8/integrations/hibernate#optional[Ehcache documentation] recommends using multiple non-singleton `CacheManager(s)` when there are multiple Hibernate `SessionFactory` instances running in the same JVM.
|
||||
The http://www.ehcache.org/documentation/2.8/integrations/hibernate#optional[Ehcache documentation] recommends using multiple non-singleton ``CacheManager``s when there are multiple Hibernate `SessionFactory` instances running in the same JVM.
|
||||
====
|
||||
|
||||
[[caching-provider-ehcache-missing-cache-strategy]]
|
||||
|
@ -670,16 +696,15 @@ and also log a warning about the missing cache.
|
|||
|
||||
[WARNING]
|
||||
====
|
||||
Note that caches created this way may be very badly configured (large size in particular)
|
||||
unless an appropriate `<defaultCache>` entry is added to the Ehcache configuration.
|
||||
Note that caches created this way may be very badly configured (large size in particular) unless an appropriate `<defaultCache>` entry is added to the Ehcache configuration.
|
||||
====
|
||||
|
||||
[[caching-provider-infinispan]]
|
||||
=== Infinispan
|
||||
|
||||
Infinispan is a distributed in-memory key/value data store, available as a cache or data grid, which can be used as a Hibernate 2nd-level cache provider as well.
|
||||
Infinispan is a distributed in-memory key/value data store, available as a cache or data grid, which can be used as a Hibernate second-level cache provider as well.
|
||||
|
||||
It supports advanced functionality such as transactions, events, querying, distributed processing, off-heap and geographical failover.
|
||||
|
||||
For more details, check out the
|
||||
http://infinispan.org/docs/stable/user_guide/user_guide.html#jpa_hibernate_2l_cache[Infinispan User Guide].
|
||||
https://infinispan.org/docs/stable/titles/integrating/integrating.html#integrating_jpa_hibernate[Infinispan User Guide].
|
||||
|
|
|
@ -10,8 +10,9 @@ When placed on the identifier getter, Hibernate will use property-based access.
|
|||
|
||||
[IMPORTANT]
|
||||
====
|
||||
You should pay attention to https://docs.oracle.com/javase/8/docs/api/java/beans/Introspector.html#decapitalize(java.lang.String)[Java Beans specification] in regard to naming properties to avoid
|
||||
issues such as https://hibernate.atlassian.net/browse/HCANN-63[Property name beginning with at least two uppercase characters has odd functionality in HQL]!
|
||||
To avoid issues such as
|
||||
https://hibernate.atlassian.net/browse/HCANN-63[HCANN-63 - Property name beginning with at least two uppercase characters has odd functionality in HQL], you should pay attention to
|
||||
https://docs.oracle.com/javase/8/docs/api/java/beans/Introspector.html#decapitalize(java.lang.String)[Java Bean specification] in regard to naming properties.
|
||||
====
|
||||
|
||||
Embeddable types inherit the access strategy from their parent entities.
|
||||
|
@ -34,7 +35,9 @@ To exclude a field from being part of the entity persistent state, the field mus
|
|||
[NOTE]
|
||||
====
|
||||
Another advantage of using field-based access is that some entity attributes can be hidden from outside the entity.
|
||||
|
||||
An example of such attribute is the entity `@Version` field, which, usually, does not need to be manipulated by the data access layer.
|
||||
|
||||
With field-based access, we can simply omit the getter and the setter for this version field, and Hibernate can still leverage the optimistic concurrency control mechanism.
|
||||
====
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ include::{extrasdir}/associations-one-to-many-unidirectional-example.sql[]
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
The `@OneToMany` association is by definition a parent association, even if it's a unidirectional or a bidirectional one.
|
||||
The `@OneToMany` association is by definition a parent association, regardless of whether it's a unidirectional or a bidirectional one.
|
||||
Only the parent side of an association makes sense to cascade its entity state transitions to children.
|
||||
====
|
||||
|
||||
|
@ -93,7 +93,7 @@ Upon removing a `Phone` from the phones collection, the association row is delet
|
|||
[NOTE]
|
||||
====
|
||||
The unidirectional associations are not very efficient when it comes to removing child entities.
|
||||
In this particular example, upon flushing the persistence context, Hibernate deletes all database child entries and reinserts the ones that are still found in the in-memory persistence context.
|
||||
In the example above, upon flushing the persistence context, Hibernate deletes all database rows from the link table (e.g. `Person_Phone`) that are associated with the parent `Person` entity and reinserts the ones that are still found in the `@OneToMany` collection.
|
||||
|
||||
On the other hand, a bidirectional `@OneToMany` association is much more efficient because the child entity controls the association.
|
||||
====
|
||||
|
@ -123,7 +123,8 @@ include::{extrasdir}/associations-one-to-many-bidirectional-example.sql[]
|
|||
[IMPORTANT]
|
||||
====
|
||||
Whenever a bidirectional association is formed, the application developer must make sure both sides are in-sync at all times.
|
||||
The `addPhone()` and `removePhone()` are utilities methods that synchronize both ends whenever a child element is added or removed.
|
||||
|
||||
The `addPhone()` and `removePhone()` are utility methods that synchronize both ends whenever a child element is added or removed.
|
||||
====
|
||||
|
||||
Because the `Phone` class has a `@NaturalId` column (the phone number being unique),
|
||||
|
@ -146,7 +147,7 @@ include::{extrasdir}/associations-one-to-many-bidirectional-lifecycle-example.sq
|
|||
Unlike the unidirectional `@OneToMany`, the bidirectional association is much more efficient when managing the collection persistence state.
|
||||
Every element removal only requires a single update (in which the foreign key column is set to `NULL`), and,
|
||||
if the child entity lifecycle is bound to its owning parent so that the child cannot exist without its parent,
|
||||
then we can annotate the association with the `orphan-removal` attribute and disassociating the child will trigger a delete statement on the actual child table row as well.
|
||||
then we can annotate the association with the `orphanRemoval` attribute and dissociating the child will trigger a delete statement on the actual child table row as well.
|
||||
|
||||
[[associations-one-to-one]]
|
||||
==== `@OneToOne`
|
||||
|
@ -176,7 +177,7 @@ From a relational database point of view, the underlying schema is identical to
|
|||
as the client-side controls the relationship based on the foreign key column.
|
||||
|
||||
But then, it's unusual to consider the `Phone` as a client-side and the `PhoneDetails` as the parent-side because the details cannot exist without an actual phone.
|
||||
A much more natural mapping would be if the `Phone` were the parent-side, therefore pushing the foreign key into the `PhoneDetails` table.
|
||||
A much more natural mapping would be the `Phone` were the parent-side, therefore pushing the foreign key into the `PhoneDetails` table.
|
||||
This mapping requires a bidirectional `@OneToOne` association as you can see in the following example:
|
||||
|
||||
[[associations-one-to-one-bidirectional]]
|
||||
|
@ -248,13 +249,13 @@ include::{sourcedir}/OneToOneBidirectionalLazyTest.java[tags=associations-one-to
|
|||
====
|
||||
|
||||
For more about how to enable Bytecode enhancement,
|
||||
see the <<chapters/pc/BytecodeEnhancement.adoc#BytecodeEnhancement, BytecodeEnhancement chapter>>.
|
||||
see the <<chapters/pc/BytecodeEnhancement.adoc#BytecodeEnhancement, Bytecode Enhancement chapter>>.
|
||||
|
||||
[[associations-many-to-many]]
|
||||
==== `@ManyToMany`
|
||||
|
||||
The `@ManyToMany` association requires a link table that joins two entities.
|
||||
Like the `@OneToMany` association, `@ManyToMany` can be a either unidirectional or bidirectional.
|
||||
Like the `@OneToMany` association, `@ManyToMany` can be either unidirectional or bidirectional.
|
||||
|
||||
[[associations-many-to-many-unidirectional]]
|
||||
===== Unidirectional `@ManyToMany`
|
||||
|
@ -392,8 +393,9 @@ Because this mapping is formed out of two bidirectional associations, the helper
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
The aforementioned example uses a Hibernate specific mapping for the link entity since JPA doesn't allow building a composite identifier out of multiple `@ManyToOne` associations.
|
||||
For more details, see the <<chapters/domain/identifiers.adoc#identifiers-composite-associations,Composite identifiers - associations>> section.
|
||||
The aforementioned example uses a Hibernate-specific mapping for the link entity since JPA doesn't allow building a composite identifier out of multiple `@ManyToOne` associations.
|
||||
|
||||
For more details, see the <<chapters/domain/identifiers.adoc#identifiers-composite-associations,composite identifiers with associations>> section.
|
||||
====
|
||||
|
||||
The entity state transitions are better managed than in the previous bidirectional `@ManyToMany` case.
|
||||
|
@ -425,6 +427,11 @@ However, you can configure this behavior so that Hibernate can ignore such an Ex
|
|||
|
||||
To ignore non-existing parent entity references, even though not really recommended, it's possible to use the annotation `org.hibernate.annotation.NotFound` annotation with a value of `org.hibernate.annotations.NotFoundAction.IGNORE`.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The `@ManyToOne` and `@OneToOne` associations that are annotated with `@NotFound(action = NotFoundAction.IGNORE)` are always fetched eagerly even if the `fetch` strategy is set to `FetchType.LAZY`.
|
||||
====
|
||||
|
||||
Considering the following `City` and `Person` entity mappings:
|
||||
|
||||
[[associations-not-found-domain-model-example]]
|
||||
|
@ -439,7 +446,7 @@ include::{sourcedir}/NotFoundTest.java[tags=associations-not-found-domain-model-
|
|||
If we have the following entities in our database:
|
||||
|
||||
[[associations-not-found-persist-example]]
|
||||
.`@NotFound` mapping example
|
||||
.`@NotFound` persist example
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
|
@ -458,7 +465,7 @@ include::{sourcedir}/NotFoundTest.java[tags=associations-not-found-find-example,
|
|||
----
|
||||
====
|
||||
|
||||
However, if we change the `cityName` attribute to a non-existing city:
|
||||
However, if we change the `cityName` attribute to a non-existing city's name:
|
||||
|
||||
[[associations-not-found-non-existing-persist-example]]
|
||||
.`@NotFound` change to non-existing City example
|
||||
|
@ -479,3 +486,273 @@ Hibernate is not going to throw any exception, and it will assign a value of `nu
|
|||
include::{sourcedir}/NotFoundTest.java[tags=associations-not-found-non-existing-find-example,indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
[[associations-any]]
|
||||
==== `@Any` mapping
|
||||
|
||||
The `@Any` mapping is useful to emulate a unidirectional `@ManyToOne` association when there can be multiple target entities.
|
||||
|
||||
Because the `@Any` mapping defines a polymorphic association to classes from multiple tables,
|
||||
this association type requires the FK column which provides the associated parent identifier and
|
||||
a metadata information for the associated entity type.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
This is not the usual way of mapping polymorphic associations and you should use this only in special cases (e.g. audit logs, user session data, etc).
|
||||
====
|
||||
|
||||
The `@Any` annotation describes the column holding the metadata information.
|
||||
To link the value of the metadata information and an actual entity type, the `@AnyDef` and `@AnyDefs` annotations are used.
|
||||
The `metaType` attribute allows the application to specify a custom type that maps database column values to persistent classes that have identifier properties of the type specified by `idType`.
|
||||
You must specify the mapping from values of the `metaType` to class names.
|
||||
|
||||
For the next examples, consider the following `Property` class hierarchy:
|
||||
|
||||
[[associations-any-property-example]]
|
||||
.`Property` class hierarchy
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/any/Property.java[tags=associations-any-property-example]
|
||||
|
||||
include::{sourcedir}/any/IntegerProperty.java[tags=associations-any-property-example]
|
||||
|
||||
include::{sourcedir}/any/StringProperty.java[tags=associations-any-property-example]
|
||||
----
|
||||
====
|
||||
|
||||
A `PropertyHolder` can reference any such property, and, because each `Property` belongs to a separate table, the `@Any` annotation is, therefore, required.
|
||||
|
||||
[[associations-any-example]]
|
||||
.`@Any` mapping usage
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/any/PropertyHolder.java[tags=associations-any-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/associations-any-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
As you can see, there are two columns used to reference a `Property` instance: `property_id` and `property_type`.
|
||||
The `property_id` is used to match the `id` column of either the `string_property` or `integer_property` tables,
|
||||
while the `property_type` is used to match the `string_property` or the `integer_property` table.
|
||||
|
||||
The table resolving mapping is defined by the `metaDef` attribute which references an `@AnyMetaDef` mapping.
|
||||
|
||||
The `package-info.java` contains the `@AnyMetaDef` mapping:
|
||||
|
||||
[[associations-any-meta-def-example]]
|
||||
.`@AnyMetaDef` mapping usage
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/any/package-info.java[tags=associations-any-meta-def-example]
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Although the `@AnyMetaDef` mapping could be set right next to the `@Any` annotation, it is good practice to configure it at the class or package level, especially if you need to reuse it for multiple `@Any` mappings.
|
||||
====
|
||||
|
||||
To see the `@Any` annotation in action, consider the next examples.
|
||||
|
||||
If we persist an `IntegerProperty` as well as a `StringProperty` entity, and associate
|
||||
the `StringProperty` entity with a `PropertyHolder`,
|
||||
Hibernate will generate the following SQL queries:
|
||||
|
||||
[[associations-any-persist-example]]
|
||||
.`@Any` mapping persist example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/any/AnyTest.java[tags=associations-any-persist-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/associations-any-persist-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
When fetching the `PropertyHolder` entity and navigating its `property` association,
|
||||
Hibernate will fetch the associated `StringProperty` entity like this:
|
||||
|
||||
[[associations-any-query-example]]
|
||||
.`@Any` mapping query example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/any/AnyTest.java[tags=associations-any-query-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/associations-any-query-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[[associations-many-to-any]]
|
||||
===== `@ManyToAny` mapping
|
||||
|
||||
While the `@Any` mapping is useful to emulate a `@ManyToOne` association when there can be multiple target entities,
|
||||
to emulate a `@OneToMany` association, the `@ManyToAny` annotation must be used.
|
||||
|
||||
In the following example, the `PropertyRepository` entity has a collection of `Property` entities.
|
||||
|
||||
The `repository_properties` link table holds the associations between `PropertyRepository` and `Property` entities.
|
||||
|
||||
[[associations-many-to-any-example]]
|
||||
.`@ManyToAny` mapping usage
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/any/PropertyRepository.java[tags=associations-many-to-any-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/associations-many-to-any-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
To see the `@ManyToAny` annotation in action, consider the next examples.
|
||||
|
||||
If we persist an `IntegerProperty` as well as a `StringProperty` entity,
|
||||
and associate both of them with a `PropertyRepository` parent entity,
|
||||
Hibernate will generate the following SQL queries:
|
||||
|
||||
[[associations-many-to-any-persist-example]]
|
||||
.`@ManyToAny` mapping persist example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/any/ManyToAnyTest.java[tags=associations-many-to-any-persist-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/associations-many-to-any-persist-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
When fetching the `PropertyRepository` entity and navigating its `properties` association,
|
||||
Hibernate will fetch the associated `IntegerProperty` and `StringProperty` entities like this:
|
||||
|
||||
[[associations-many-to-any-query-example]]
|
||||
.`@ManyToAny` mapping query example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/any/ManyToAnyTest.java[tags=associations-many-to-any-query-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/associations-many-to-any-query-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[[associations-JoinFormula]]
|
||||
==== `@JoinFormula` mapping
|
||||
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/JoinFormula.html[`@JoinFormula`] annotation is used to customize the join between a child Foreign Key and a parent row Primary Key.
|
||||
|
||||
[[associations-JoinFormula-example]]
|
||||
.`@JoinFormula` mapping usage
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/JoinFormulaTest.java[tags=associations-JoinFormula-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/associations-JoinFormula-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
The `country` association in the `User` entity is mapped by the country identifier provided by the `phoneNumber` property.
|
||||
|
||||
Considering we have the following entities:
|
||||
|
||||
[[associations-JoinFormula-persistence-example]]
|
||||
.`@JoinFormula` mapping usage
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/JoinFormulaTest.java[tags=associations-JoinFormula-persistence-example]
|
||||
----
|
||||
====
|
||||
|
||||
When fetching the `User` entities, the `country` property is mapped by the `@JoinFormula` expression:
|
||||
|
||||
[[associations-JoinFormula-fetching-example]]
|
||||
.`@JoinFormula` mapping usage
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/JoinFormulaTest.java[tags=associations-JoinFormula-fetching-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/associations-JoinFormula-fetching-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
Therefore, the `@JoinFormula` annotation is used to define a custom join association between the parent-child association.
|
||||
|
||||
[[associations-JoinColumnOrFormula]]
|
||||
==== `@JoinColumnOrFormula` mapping
|
||||
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/JoinColumnOrFormula.html[`@JoinColumnOrFormula`] annotation is used to customize the join between a child Foreign Key and a parent row Primary Key when we need to take into consideration a column value as well as a `@JoinFormula`.
|
||||
|
||||
[[associations-JoinColumnOrFormula-example]]
|
||||
.`@JoinColumnOrFormula` mapping usage
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/JoinColumnOrFormulaTest.java[tags=associations-JoinColumnOrFormula-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/associations-JoinColumnOrFormula-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
The `country` association in the `User` entity is mapped by the `language` property value and the associated `Country` `is_default` column value.
|
||||
|
||||
Considering we have the following entities:
|
||||
|
||||
[[associations-JoinColumnOrFormula-persistence-example]]
|
||||
.`@JoinColumnOrFormula` persist example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/JoinColumnOrFormulaTest.java[tags=associations-JoinColumnOrFormula-persistence-example]
|
||||
----
|
||||
====
|
||||
|
||||
When fetching the `User` entities, the `country` property is mapped by the `@JoinColumnOrFormula` expression:
|
||||
|
||||
[[associations-JoinColumnOrFormula-fetching-example]]
|
||||
.`@JoinColumnOrFormula` fetching example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/JoinColumnOrFormulaTest.java[tags=associations-JoinColumnOrFormula-fetching-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/associations-JoinColumnOrFormula-fetching-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
Therefore, the `@JoinColumnOrFormula` annotation is used to define a custom join association between the parent-child association.
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -3,11 +3,11 @@
|
|||
:sourcedir: ../../../../../test/java/org/hibernate/userguide/collections
|
||||
:extrasdir: extras/collections
|
||||
|
||||
Naturally Hibernate also allows to persist collections.
|
||||
These persistent collections can contain almost any other Hibernate type, including: basic types, custom types, embeddables and references to other entities.
|
||||
Naturally Hibernate also allows persisting collections.
|
||||
These persistent collections can contain almost any other Hibernate type, including basic types, custom types, embeddables, and references to other entities.
|
||||
In this context, the distinction between value and reference semantics is very important.
|
||||
An object in a collection might be handled with _value_ semantics (its life cycle being fully depends on the collection owner),
|
||||
or it might be a reference to another entity with its own life cycle.
|
||||
An object in a collection might be handled with _value_ semantics (its lifecycle being fully dependant on the collection owner),
|
||||
or it might be a reference to another entity with its own lifecycle.
|
||||
In the latter case, only the _link_ between the two objects is considered to be a state held by the collection.
|
||||
|
||||
The owner of the collection is always an entity, even if the collection is defined by an embeddable type.
|
||||
|
@ -37,6 +37,7 @@ include::{sourcedir}/BasicTypeElementCollectionTest.java[tags=collections-collec
|
|||
[NOTE]
|
||||
====
|
||||
It is important that collections be defined using the appropriate Java Collections Framework interface rather than a specific implementation.
|
||||
|
||||
From a theoretical perspective, this just follows good design principles.
|
||||
From a practical perspective, Hibernate (like other persistence providers) will use their own collection implementations which conform to the Java Collections Framework interfaces.
|
||||
====
|
||||
|
@ -46,7 +47,7 @@ The persistent collections injected by Hibernate behave like `ArrayList`, `HashS
|
|||
[[collections-synopsis]]
|
||||
==== Collections as a value type
|
||||
|
||||
Value and embeddable type collections have a similar behavior as simple value types because they are automatically persisted when referenced by a persistent object and automatically deleted when unreferenced.
|
||||
Value and embeddable type collections have a similar behavior to basic types since they are automatically persisted when referenced by a persistent object and automatically deleted when unreferenced.
|
||||
If a collection is passed from one persistent object to another, its elements might be moved from one table to another.
|
||||
|
||||
[IMPORTANT]
|
||||
|
@ -101,7 +102,7 @@ Depending on the number of elements, this behavior might not be efficient, if ma
|
|||
A workaround is to use an `@OrderColumn`, which, although not as efficient as when using the actual link table primary key, might improve the efficiency of the remove operations.
|
||||
|
||||
[[collections-value-type-collection-order-column-remove-example]]
|
||||
.Removing collection elements using the order column
|
||||
.Removing collection elements using @OrderColumn
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
|
@ -170,7 +171,7 @@ In the following sections, we will go through all these collection types and dis
|
|||
[[collections-bag]]
|
||||
==== Bags
|
||||
|
||||
Bags are unordered lists and we can have unidirectional bags or bidirectional ones.
|
||||
Bags are unordered lists, and we can have unidirectional bags or bidirectional ones.
|
||||
|
||||
[[collections-unidirectional-bag]]
|
||||
===== Unidirectional bags
|
||||
|
@ -195,7 +196,8 @@ include::{extrasdir}/collections-unidirectional-bag-example.sql[]
|
|||
[NOTE]
|
||||
====
|
||||
Because both the parent and the child sides are entities, the persistence context manages each entity separately.
|
||||
Cascades can propagate an entity state transition from a parent entity to its children.
|
||||
|
||||
The cascading mechanism allows you to propagate an entity state transition from a parent entity to its children.
|
||||
====
|
||||
|
||||
By marking the parent side with the `CascadeType.ALL` attribute, the unidirectional association lifecycle becomes very similar to that of a value type collection.
|
||||
|
@ -219,7 +221,8 @@ In the example above, once the parent entity is persisted, the child entities ar
|
|||
[NOTE]
|
||||
====
|
||||
Just like value type collections, unidirectional bags are not as efficient when it comes to modifying the collection structure (removing or reshuffling elements).
|
||||
Because the parent-side cannot uniquely identify each individual child, Hibernate might delete all child table rows associated with the parent entity and re-add them according to the current collection state.
|
||||
|
||||
Because the parent-side cannot uniquely identify each individual child, Hibernate deletes all link table rows associated with the parent entity and re-adds the remaining ones that are found in the current collection state.
|
||||
====
|
||||
|
||||
[[collections-bidirectional-bag]]
|
||||
|
@ -270,7 +273,7 @@ include::{extrasdir}/collections-bidirectional-bag-orphan-removal-example.sql[]
|
|||
----
|
||||
====
|
||||
|
||||
When rerunning the previous example, the child will get removed because the parent-side propagates the removal upon disassociating the child entity reference.
|
||||
When rerunning the previous example, the child will get removed because the parent-side propagates the removal upon dissociating the child entity reference.
|
||||
|
||||
[[collections-list]]
|
||||
==== Ordered Lists
|
||||
|
@ -414,11 +417,11 @@ include::{extrasdir}/collections-customizing-ordered-list-ordinal-persist-exampl
|
|||
===== Customizing ORDER BY SQL clause
|
||||
|
||||
While the JPA
|
||||
http://docs.oracle.com/javaee/7/api/javax/persistence/OrderBy.html[`@OrderBy`] annotation allows you to specify the entity attributes used for sorting
|
||||
{jpaJavadocUrlPrefix}OrderBy.html[`@OrderBy`] annotation allows you to specify the entity attributes used for sorting
|
||||
when fetching the current annotated collection, the Hibernate specific
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/OrderBy.html[`@OrderBy`] annotation is used to specify a *SQL* clause instead.
|
||||
|
||||
In the following example, the `@OrderBy` annotations uses the `CHAR_LENGTH` SQL function to order the `Article` entities
|
||||
In the following example, the `@OrderBy` annotation uses the `CHAR_LENGTH` SQL function to order the `Article` entities
|
||||
by the number of characters of the `name` attribute.
|
||||
|
||||
[[collections-customizing-ordered-by-sql-clause-mapping-example]]
|
||||
|
@ -470,7 +473,8 @@ The only difference is that `Set` doesn't allow duplicates, but this constraint
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
When using sets, it's very important to supply proper equals/hashCode implementations for child entities.
|
||||
When using Sets, it's very important to supply proper equals/hashCode implementations for child entities.
|
||||
|
||||
In the absence of a custom equals/hashCode implementation logic, Hibernate will use the default Java reference-based object equality which might render unexpected results when mixing detached and managed object instances.
|
||||
====
|
||||
|
||||
|
@ -541,7 +545,7 @@ include::{sourcedir}/UnidirectionalComparatorSortedSetTest.java[lines=75..77,ind
|
|||
[[collections-map]]
|
||||
==== Maps
|
||||
|
||||
A `java.util.Map` is a ternary association because it requires a parent entity, a map key and a value.
|
||||
A `java.util.Map` is a ternary association because it requires a parent entity, a map key, and a value.
|
||||
An entity can either be a map key or a map value, depending on the mapping.
|
||||
Hibernate allows using the following map keys:
|
||||
|
||||
|
@ -601,12 +605,11 @@ include::{extrasdir}/collections-map-custom-key-type-sql-example.sql[]
|
|||
----
|
||||
|
||||
The `call_register` records the call history for every `person`.
|
||||
The `call_timestamp_epoch` column stores the phone call timestamp as a Unix timestamp since epoch.
|
||||
The `call_timestamp_epoch` column stores the phone call timestamp as a Unix timestamp since the Unix epoch.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The `@MapKeyColumn` annotation is used to define the table column holding the key
|
||||
while the `@Column` mapping gives the value of the `java.util.Map` in question.
|
||||
The `@MapKeyColumn` annotation is used to define the table column holding the key while the `@Column` mapping gives the value of the `java.util.Map` in question.
|
||||
====
|
||||
|
||||
Since we want to map all the calls by their associated `java.util.Date`, not by their timestamp since epoch which is a number, the entity mapping looks as follows:
|
||||
|
@ -644,7 +647,7 @@ include::{sourcedir}/MapKeyClassTest.java[tags=collections-map-key-class-type-ma
|
|||
====
|
||||
|
||||
If you want to use the `PhoneNumber` interface as a `java.util.Map` key, then you need to supply the
|
||||
http://docs.oracle.com/javaee/7/api/javax/persistence/MapKeyClass.html[`@MapKeyClass`] annotation as well.
|
||||
{jpaJavadocUrlPrefix}MapKeyClass.html[`@MapKeyClass`] annotation as well.
|
||||
|
||||
[[collections-map-key-class-mapping-example]]
|
||||
.`@MapKeyClass` mapping example
|
||||
|
@ -700,7 +703,7 @@ include::{extrasdir}/collections-map-key-class-fetch-example.sql[]
|
|||
A unidirectional map exposes a parent-child association from the parent-side only.
|
||||
|
||||
The following example shows a unidirectional map which also uses a `@MapKeyTemporal` annotation.
|
||||
The map key is a timestamp and it's taken from the child entity table.
|
||||
The map key is a timestamp, and it's taken from the child entity table.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
|
@ -851,7 +854,7 @@ The reason why the `Queue` interface is not used for the entity attribute is bec
|
|||
- `java.util.SortedSet`
|
||||
- `java.util.SortedMap`
|
||||
|
||||
However, the custom collection type can still be customized as long as the base type is one of the aformentioned persistent types.
|
||||
However, the custom collection type can still be customized as long as the base type is one of the aforementioned persistent types.
|
||||
====
|
||||
|
||||
This way, the `Phone` collection can be used as a `java.util.Queue`:
|
||||
|
|
|
@ -7,19 +7,19 @@
|
|||
[IMPORTANT]
|
||||
====
|
||||
JPA only acknowledges the entity model mapping so, if you are concerned about JPA provider portability, it's best to stick to the strict POJO model.
|
||||
On the other hand, Hibernate can work with both POJO entities as well as with dynamic entity models.
|
||||
On the other hand, Hibernate can work with both POJO entities and dynamic entity models.
|
||||
====
|
||||
|
||||
[[mapping-model-dynamic]]
|
||||
==== Dynamic mapping models
|
||||
|
||||
Persistent entities do not necessarily have to be represented as POJO/JavaBean classes.
|
||||
Hibernate also supports dynamic models (using `Map` of `Maps` at runtime).
|
||||
Hibernate also supports dynamic models (using `Map` of ``Map``s at runtime).
|
||||
With this approach, you do not write persistent classes, only mapping files.
|
||||
|
||||
A given entity has just one entity mode within a given SessionFactory.
|
||||
This is a change from previous versions which allowed to define multiple entity modes for an entity and to select which to load.
|
||||
Entity modes can now be mixed within a domain model; a dynamic entity might reference a POJO entity, and vice versa.
|
||||
Entity modes can now be mixed within a domain model; a dynamic entity might reference a POJO entity and vice versa.
|
||||
|
||||
[[mapping-model-dynamic-example]]
|
||||
.Dynamic domain model Hibernate mapping
|
||||
|
@ -60,8 +60,8 @@ include::{extrasdir}/dynamic/mapping-model-dynamic-example.sql[indent=0]
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
The main advantage of dynamic models is quick turnaround time for prototyping without the need for entity class implementation.
|
||||
The main down-fall is that you lose compile-time type checking and will likely deal with many exceptions at runtime.
|
||||
The main advantage of dynamic models is the quick turnaround time for prototyping without the need for entity class implementation.
|
||||
The main downfall is that you lose compile-time type checking and will likely deal with many exceptions at runtime.
|
||||
However, as a result of the Hibernate mapping, the database schema can easily be normalized and sound, allowing to add a proper domain model implementation on top later on.
|
||||
|
||||
It is also interesting to note that dynamic models are great for certain integration use cases as well.
|
||||
|
|
|
@ -5,17 +5,17 @@
|
|||
|
||||
Historically Hibernate called these components.
|
||||
JPA calls them embeddables.
|
||||
Either way the concept is the same: a composition of values.
|
||||
Either way, the concept is the same: a composition of values.
|
||||
|
||||
For example we might have a `Publisher` class that is a composition of `name` and `country`,
|
||||
For example, we might have a `Publisher` class that is a composition of `name` and `country`,
|
||||
or a `Location` class that is a composition of `country` and `city`.
|
||||
|
||||
.Usage of the word _embeddable_
|
||||
[NOTE]
|
||||
====
|
||||
To avoid any confusion with the annotation that marks a given embeddable type, the annotation will be further referred as `@Embeddable`.
|
||||
To avoid any confusion with the annotation that marks a given embeddable type, the annotation will be further referred to as `@Embeddable`.
|
||||
|
||||
Throughout this chapter and thereafter, for brevity sake, embeddable types may also be referred as _embeddable_.
|
||||
Throughout this chapter and thereafter, for brevity sake, embeddable types may also be referred to as _embeddable_.
|
||||
====
|
||||
|
||||
[[embeddable-type-mapping-example]]
|
||||
|
@ -27,7 +27,7 @@ include::{sourcedir}/NestedEmbeddableTest.java[tag=embeddable-type-mapping-examp
|
|||
----
|
||||
====
|
||||
|
||||
An embeddable type is another form of value type, and its lifecycle is bound to a parent entity type, therefore inheriting the attribute access from its parent (for details on attribute access, see <<chapters/domain/entity.adoc#access-embeddable-types,Access strategies>>).
|
||||
An embeddable type is another form of a value type, and its lifecycle is bound to a parent entity type, therefore inheriting the attribute access from its parent (for details on attribute access, see <<chapters/domain/entity.adoc#access-embeddable-types,Access strategies>>).
|
||||
|
||||
Embeddable types can be made up of basic values as well as associations, with the caveat that, when used as collection elements, they cannot define collections themselves.
|
||||
|
||||
|
@ -36,7 +36,7 @@ Embeddable types can be made up of basic values as well as associations, with th
|
|||
Most often, embeddable types are used to group multiple basic type mappings and reuse them across several entities.
|
||||
|
||||
[[simple-embeddable-type-mapping-example]]
|
||||
.Simple Embeddedable
|
||||
.Simple Embeddable
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
|
@ -55,14 +55,14 @@ JPA defines two terms for working with an embeddable type: `@Embeddable` and `@E
|
|||
|
||||
`@Embeddable` is used to describe the mapping type itself (e.g. `Publisher`).
|
||||
|
||||
`@Embedded` is for referencing a given embeddable type (e.g. `book#publisher`).
|
||||
`@Embedded` is for referencing a given embeddable type (e.g. `book.publisher`).
|
||||
====
|
||||
|
||||
So, the embeddable type is represented by the `Publisher` class and
|
||||
the parent entity makes use of it through the `book#publisher` object composition.
|
||||
|
||||
The composed values are mapped to the same table as the parent table.
|
||||
Composition is part of good Object-oriented data modeling (idiomatic Java).
|
||||
Composition is part of good object-oriented data modeling (idiomatic Java).
|
||||
In fact, that table could also be mapped by the following entity type instead.
|
||||
|
||||
[[alternative-to-embeddable-type-mapping-example]]
|
||||
|
@ -74,13 +74,13 @@ include::{sourcedir}/SimpleEmbeddableEquivalentTest.java[tag=embeddable-type-map
|
|||
----
|
||||
====
|
||||
|
||||
The composition form is certainly more Object-oriented, and that becomes more evident as we work with multiple embeddable types.
|
||||
The composition form is certainly more object-oriented, and that becomes more evident as we work with multiple embeddable types.
|
||||
|
||||
[[embeddable-multiple]]
|
||||
==== Multiple embeddable types
|
||||
|
||||
Although from an object-oriented perspective, it's much more convenient to work with embeddable types, this example doesn't work as-is.
|
||||
When the same embeddable type is included multiple times in the same parent entity type, the JPA specification demands setting the associated column names explicitly.
|
||||
When the same embeddable type is included multiple times in the same parent entity type, the JPA specification demands to set the associated column names explicitly.
|
||||
|
||||
This requirement is due to how object properties are mapped to database columns.
|
||||
By default, JPA expects a database column having the same name with its associated object property.
|
||||
|
@ -94,10 +94,10 @@ We have a few options to handle this issue.
|
|||
JPA defines the `@AttributeOverride` annotation to handle this scenario.
|
||||
This way, the mapping conflict is resolved by setting up explicit name-based property-column type mappings.
|
||||
|
||||
If an Embeddabe type is used multiple times in some entity, you need to use the
|
||||
http://docs.oracle.com/javaee/7/api/javax/persistence/AttributeOverride.html[`@AttributeOverride`] and
|
||||
http://docs.oracle.com/javaee/7/api/javax/persistence/AssociationOverride.html[`@AssociationOverride`] annotations
|
||||
to override the default column names definied by the Embeddable.
|
||||
If an Embeddable type is used multiple times in some entity, you need to use the
|
||||
{jpaJavadocUrlPrefix}AttributeOverride.html[`@AttributeOverride`] and
|
||||
{jpaJavadocUrlPrefix}AssociationOverride.html[`@AssociationOverride`] annotations
|
||||
to override the default column names defined by the Embeddable.
|
||||
|
||||
Considering you have the following `Publisher` embeddable type
|
||||
which defines a `@ManyToOne` association with the `Country` entity:
|
||||
|
@ -116,7 +116,7 @@ include::{extrasdir}/embeddable/embeddable-type-association-mapping-example.sql[
|
|||
----
|
||||
====
|
||||
|
||||
Now, if you have a `Book` entity which declares two `Publisher` embeddable types for the ebook and paperback version,
|
||||
Now, if you have a `Book` entity which declares two `Publisher` embeddable types for the ebook and paperback versions,
|
||||
you cannot use the default `Publisher` embeddable mapping since there will be a conflict between the two embeddable column mappings.
|
||||
|
||||
Therefore, the `Book` entity needs to override the embeddable type mappings for each `Publisher` attribute:
|
||||
|
@ -140,7 +140,7 @@ include::{extrasdir}/embeddable/embeddable-type-override-mapping-example.sql[]
|
|||
|
||||
[IMPORTANT]
|
||||
====
|
||||
This is a Hibernate specific feature.
|
||||
The `ImplicitNamingStrategyComponentPathImpl` is a Hibernate-specific feature.
|
||||
Users concerned with JPA provider portability should instead prefer explicit column naming with `@AttributeOverride`.
|
||||
====
|
||||
|
||||
|
@ -179,17 +179,17 @@ You could even develop your own naming strategy to do other types of implicit na
|
|||
[[embeddable-collections]]
|
||||
==== Collections of embeddable types
|
||||
|
||||
Collections of embeddable types are specifically value collections (as embeddable types are a value type).
|
||||
Collections of embeddable types are specifically valued collections (as embeddable types are value types).
|
||||
Value collections are covered in detail in <<chapters/domain/collections.adoc#collections-value,Collections of value types>>.
|
||||
|
||||
[[embeddable-mapkey]]
|
||||
==== Embeddable types as Map key
|
||||
==== Embeddable type as a Map key
|
||||
|
||||
Embeddable types can also be used as `Map` keys.
|
||||
This topic is converted in detail in <<chapters/domain/collections.adoc#collections-map,Map - key>>.
|
||||
|
||||
[[embeddable-identifier]]
|
||||
==== Embeddable types as identifiers
|
||||
==== Embeddable type as identifier
|
||||
|
||||
Embeddable types can also be used as entity type identifiers.
|
||||
This usage is covered in detail in <<chapters/domain/identifiers.adoc#identifiers-composite,Composite identifiers>>.
|
||||
|
@ -197,4 +197,99 @@ This usage is covered in detail in <<chapters/domain/identifiers.adoc#identifier
|
|||
[IMPORTANT]
|
||||
====
|
||||
Embeddable types that are used as collection entries, map keys or entity type identifiers cannot include their own collection mappings.
|
||||
====
|
||||
====
|
||||
|
||||
[[embeddable-Target]]
|
||||
==== `@Target` mapping
|
||||
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Target.html[`@Target`] annotation is used to specify the implementation class of a given association that is mapped via an interface.
|
||||
The
|
||||
{jpaJavadocUrlPrefix}ManyToOne.html[`@ManyToOne`],
|
||||
{jpaJavadocUrlPrefix}OneToOne.html[`@OneToOne`],
|
||||
{jpaJavadocUrlPrefix}OneToMany.html[`@OneToMany`], and
|
||||
{jpaJavadocUrlPrefix}ManyToMany.html[`@ManyToMany`]
|
||||
feature a {jpaJavadocUrlPrefix}ManyToOne.html#targetEntity--[`targetEntity`] attribute to specify the actual class of the entity association when an interface is used for the mapping.
|
||||
|
||||
The {jpaJavadocUrlPrefix}ElementCollection.html[`@ElementCollection`] association has a {jpaJavadocUrlPrefix}ElementCollection.html#targetClass--[`targetClass`] attribute for the same purpose.
|
||||
|
||||
However, for simple embeddable types, there is no such construct and so you need to use the Hibernate-specific `@Target` annotation instead.
|
||||
|
||||
[[embeddable-Target-example]]
|
||||
.`@Target` mapping usage
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/TargetTest.java[tags=embeddable-Target-example]
|
||||
----
|
||||
====
|
||||
|
||||
The `coordinates` embeddable type is mapped as the `Coordinates` interface.
|
||||
However, Hibernate needs to know the actual implementation tye, which is `GPS` in this case,
|
||||
hence the `@Target` annotation is used to provide this information.
|
||||
|
||||
Assuming we have persisted the following `City` entity:
|
||||
|
||||
[[embeddable-Target-persist-example]]
|
||||
.`@Target` persist example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/TargetTest.java[tags=embeddable-Target-persist-example]
|
||||
----
|
||||
====
|
||||
|
||||
When fetching the `City` entity, the `coordinates` property is mapped by the `@Target` expression:
|
||||
|
||||
[[embeddable-Target-fetching-example]]
|
||||
.`@Target` fetching example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/TargetTest.java[tags=embeddable-Target-fetching-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/embeddable/embeddable-Target-fetching-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
Therefore, the `@Target` annotation is used to define a custom join association between the parent-child association.
|
||||
|
||||
[[embeddable-Parent]]
|
||||
==== `@Parent` mapping
|
||||
|
||||
The Hibernate-specific `@Parent` annotation allows you to reference the owner entity from within an embeddable.
|
||||
|
||||
[[embeddable-Parent-example]]
|
||||
.`@Parent` mapping usage
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/ParentTest.java[tags=embeddable-Parent-example]
|
||||
----
|
||||
====
|
||||
|
||||
Assuming we have persisted the following `City` entity:
|
||||
|
||||
[[embeddable-Parent-persist-example]]
|
||||
.`@Parent` persist example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/ParentTest.java[tags=embeddable-Parent-persist-example]
|
||||
----
|
||||
====
|
||||
|
||||
When fetching the `City` entity, the `city` property of the embeddable type acts as a back reference to the owning parent entity:
|
||||
|
||||
[[embeddable-Parent-fetching-example]]
|
||||
.`@Parent` fetching example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/ParentTest.java[tags=embeddable-Parent-fetching-example]
|
||||
----
|
||||
====
|
||||
|
||||
Therefore, the `@Parent` annotation is used to define the association between an embeddable type and the owning entity.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[[entity]]
|
||||
=== Entity types
|
||||
:sourcedir-locking: ../../../../../test/java/org/hibernate/userguide/locking
|
||||
:sourcedir-mapping: ../../../../../test/java/org/hibernate/userguide/mapping/
|
||||
:sourcedir-mapping: ../../../../../test/java/org/hibernate/userguide/mapping
|
||||
:sourcedir-proxy: ../../../../../test/java/org/hibernate/userguide/proxy
|
||||
:sourcedir-persister: ../../../../../test/java/org/hibernate/userguide/persister
|
||||
:extrasdir: extras
|
||||
|
@ -10,18 +10,18 @@
|
|||
[NOTE]
|
||||
====
|
||||
The entity type describes the mapping between the actual persistable domain model object and a database table row.
|
||||
To avoid any confusion with the annotation that marks a given entity type, the annotation will be further referred as `@Entity`.
|
||||
To avoid any confusion with the annotation that marks a given entity type, the annotation will be further referred to as `@Entity`.
|
||||
|
||||
Throughout this chapter and thereafter, entity types will be simply referred as _entity_.
|
||||
Throughout this chapter and thereafter, entity types will be simply referred to as _entity_.
|
||||
====
|
||||
|
||||
[[entity-pojo]]
|
||||
==== POJO Models
|
||||
|
||||
Section _2.1 The Entity Class_ of the _JPA 2.1 specification_ defines its requirements for an entity class.
|
||||
Applications that wish to remain portable across JPA providers should adhere to these requirements.
|
||||
Applications that wish to remain portable across JPA providers should adhere to these requirements:
|
||||
|
||||
* The entity class must be annotated with the `javax.persistence.Entity` annotation (or be denoted as such in XML mapping)
|
||||
* The entity class must be annotated with the `javax.persistence.Entity` annotation (or be denoted as such in XML mapping).
|
||||
* The entity class must have a public or protected no-argument constructor. It may define additional constructors as well.
|
||||
* The entity class must be a top-level class.
|
||||
* An enum or interface may not be designated as an entity.
|
||||
|
@ -38,7 +38,7 @@ Hibernate, however, is not as strict in its requirements. The differences from t
|
|||
* The entity class _need not_ be a top-level class.
|
||||
* Technically Hibernate can persist final classes or classes with final persistent state accessor (getter/setter) methods.
|
||||
However, it is generally not a good idea as doing so will stop Hibernate from being able to generate proxies for lazy-loading the entity.
|
||||
* Hibernate does not restrict the application developer from exposing instance variables and reference them from outside the entity class itself.
|
||||
* Hibernate does not restrict the application developer from exposing instance variables and referencing them from outside the entity class itself.
|
||||
The validity of such a paradigm, however, is debatable at best.
|
||||
|
||||
Let's look at each requirement in detail.
|
||||
|
@ -54,9 +54,9 @@ For the very same reason, you should also avoid declaring persistent attribute g
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Starting in 5.0 Hibernate offers a more robust version of bytecode enhancement as another means for handling lazy loading.
|
||||
Starting with 5.0, Hibernate offers a more robust version of bytecode enhancement as another means for handling lazy loading.
|
||||
Hibernate had some bytecode re-writing capabilities prior to 5.0 but they were very rudimentary.
|
||||
See the <<chapters/pc/BytecodeEnhancement.adoc#BytecodeEnhancement,BytecodeEnhancement>> for additional information on fetching and on bytecode enhancement.
|
||||
See the <<chapters/pc/BytecodeEnhancement.adoc#BytecodeEnhancement,Bytecode Enhancement>> for additional information on fetching and on bytecode enhancement.
|
||||
====
|
||||
|
||||
[[entity-pojo-constructor]]
|
||||
|
@ -71,22 +71,23 @@ That said, the constructor should be defined with at least package visibility if
|
|||
[[entity-pojo-accessors]]
|
||||
==== Declare getters and setters for persistent attributes
|
||||
|
||||
The JPA specification requires this, otherwise the model would prevent accessing the entity persistent state fields directly from outside the entity itself.
|
||||
The JPA specification requires this, otherwise, the model would prevent accessing the entity persistent state fields directly from outside the entity itself.
|
||||
|
||||
Although Hibernate does not require it, it is recommended to follow the JavaBean conventions and define getters and setters for entity persistent attributes.
|
||||
Nevertheless, you can still tell Hibernate to directly access the entity fields.
|
||||
|
||||
Attributes (whether fields or getters/setters) need not be declared public.
|
||||
Hibernate can deal with attributes declared with public, protected, package or private visibility.
|
||||
Hibernate can deal with attributes declared with the public, protected, package or private visibility.
|
||||
Again, if wanting to use runtime proxy generation for lazy loading, the getter/setter should grant access to at least package visibility.
|
||||
|
||||
[[entity-pojo-identifier]]
|
||||
==== Provide identifier attribute(s)
|
||||
==== Providing identifier attribute(s)
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
Historically this was considered optional.
|
||||
However, not defining identifier attribute(s) on the entity should be considered a deprecated feature that will be removed in an upcoming release.
|
||||
Historically, providing identifier attributes was considered optional.
|
||||
|
||||
However, not defining identifier attributes on the entity should be considered a deprecated feature that will be removed in an upcoming release.
|
||||
====
|
||||
|
||||
The identifier attribute does not necessarily need to be mapped to the column(s) that physically define the primary key.
|
||||
|
@ -94,7 +95,7 @@ However, it should map to column(s) that can uniquely identify each row.
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
We recommend that you declare consistently-named identifier attributes on persistent classes and that you use a nullable (i.e., non-primitive) type.
|
||||
We recommend that you declare consistently-named identifier attributes on persistent classes and that you use a wrapper (i.e., non-primitive) type (e.g. `Long` or `Integer`).
|
||||
====
|
||||
|
||||
The placement of the `@Id` annotation marks the <<chapters/domain/access.adoc#access,persistence state access strategy>>.
|
||||
|
@ -113,12 +114,34 @@ Hibernate offers multiple identifier generation strategies, see the <<chapters/d
|
|||
[[entity-pojo-mapping]]
|
||||
==== Mapping the entity
|
||||
|
||||
The main piece in mapping the entity is the `javax.persistence.Entity` annotation.
|
||||
The `@Entity` annotation defines just one attribute `name` which is used to give a specific entity name for use in JPQL queries.
|
||||
By default, the entity name represents the unqualified name of the entity class itself.
|
||||
The main piece in mapping the entity is the {jpaJavadocUrlPrefix}Entity.html[`javax.persistence.Entity`] annotation.
|
||||
|
||||
The `@Entity` annotation defines just the {jpaJavadocUrlPrefix}Entity.html#name--[`name`] attribute which is used to give a specific entity name for use in JPQL queries.
|
||||
|
||||
By default, if the name attribute of the `@Entity` annotation is missing, the unqualified name of the entity class itself will be used as the entity name.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
Because the entity name is given by the unqualified name of the class, Hibernate does not allow registering multiple entities with the same name even if the entity classes reside in different packages.
|
||||
|
||||
Without imposing this restriction, Hibernate would not know which entity class is referenced in a JPQL query if the unqualified entity name is associated with more then one entity classes.
|
||||
====
|
||||
|
||||
In the following example, the entity name (e.g. `Book`) is given by the unqualified name of the entity class name.
|
||||
|
||||
[[entity-pojo-mapping-implicit-name-example]]
|
||||
.`@Entity` mapping with an implicit name
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir-mapping}/identifier/Book.java[tag=entity-pojo-mapping-implicit-name-example, indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
However, the entity name can also be set explicitly as illustrated by the following example.
|
||||
|
||||
[[entity-pojo-mapping-example]]
|
||||
.Simple `@Entity` mapping
|
||||
.`@Entity` mapping with an explicit name
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
|
@ -140,6 +163,74 @@ include::{sourcedir-mapping}/identifier/SimpleEntityTableTest.java[tag=entity-po
|
|||
----
|
||||
====
|
||||
|
||||
[[mapping-entity-table-catalog]]
|
||||
===== Mapping the catalog of the associated table
|
||||
|
||||
Without specifying the catalog of the associated database table a given entity is mapped to, Hibernate will use the default catalog associated with the current database connection.
|
||||
|
||||
However, if your database hosts multiple catalogs, you can specify the catalog where a given table is located using the `catalog` attribute of the JPA {jpaJavadocUrlPrefix}Table.html[`@Table`] annotation.
|
||||
|
||||
Let's assume we are using MySQL and want to map a `Book` entity to the `book` table located in the `public` catalog
|
||||
which looks as follows.
|
||||
|
||||
[[mapping-post-table-catalog-mysql-example]]
|
||||
.The `book` table located in the `public` catalog
|
||||
====
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/entity/mapping-post-table-catalog-mysql-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
Now, to map the `Book` entity to the `book` table in the `public` catalog we can use the `catalog` attribute of the `@Table` JPA annotation.
|
||||
|
||||
[[mapping-entity-table-catalog-mysql-example]]
|
||||
.Specifying the database catalog using the `@Table` annotation
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir-mapping}/identifier/EntityTableCatalogTest.java[tag=mapping-entity-table-catalog-mysql-example, indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
[[mapping-entity-table-schema]]
|
||||
===== Mapping the schema of the associated table
|
||||
|
||||
Without specifying the schema of the associated database table a given entity is mapped to, Hibernate will use the default schema associated with the current database connection.
|
||||
|
||||
However, if your database supports schemas, you can specify the schema where a given table is located using the `schema` attribute of the JPA {jpaJavadocUrlPrefix}Table.html[`@Table`] annotation.
|
||||
|
||||
Let's assume we are using PostgreSQL and want to map a `Book` entity to the `book` table located in the `library` schema
|
||||
which looks as follows.
|
||||
|
||||
[[mapping-post-table-schema-postgresql-example]]
|
||||
.The `book` table located in the `library` schema
|
||||
====
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/entity/mapping-post-table-schema-postgresql-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
Now, to map the `Book` entity to the `book` table in the `library` schema we can use the `schema` attribute of the `@Table` JPA annotation.
|
||||
|
||||
[[mapping-entity-table-catalog-mysql-example]]
|
||||
.Specifying the database schema using the `@Table` annotation
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir-mapping}/identifier/EntityTableSchemaTest.java[tag=mapping-entity-table-schema-postgresql-example, indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
The `schema` attribute of the `@Table` annotation works only if the underlying database supports schemas (e.g. PostgreSQL).
|
||||
|
||||
Therefore, if you're using MySQL or MariaDB, which do not support schemas natively (schemas being just an alias for catalog), you need to use the
|
||||
`catalog` attribute, and not the `schema` one.
|
||||
====
|
||||
|
||||
[[mapping-model-pojo-equalshashcode]]
|
||||
==== Implementing `equals()` and `hashCode()`
|
||||
|
||||
|
@ -157,7 +248,7 @@ Beyond this one very specific use case and few others we will discuss below, you
|
|||
|
||||
So what's all the fuss? Normally, most Java objects provide a built-in `equals()` and `hashCode()` based on the object's identity, so each new object will be different from all others.
|
||||
This is generally what you want in ordinary Java programming.
|
||||
Conceptually however this starts to break down when you start to think about the possibility of multiple instances of a class representing the same data.
|
||||
Conceptually, however, this starts to break down when you start to think about the possibility of multiple instances of a class representing the same data.
|
||||
|
||||
This is, in fact, exactly the case when dealing with data coming from a database.
|
||||
Every time we load a specific `Person` from the database we would naturally get a unique instance.
|
||||
|
@ -177,7 +268,7 @@ include::{sourcedir-mapping}/identifier/SimpleEntityTest.java[tag=entity-pojo-id
|
|||
Consider we have a `Library` parent entity which contains a `java.util.Set` of `Book` entities:
|
||||
|
||||
[[entity-pojo-set-mapping-example]]
|
||||
Library entity mapping
|
||||
.Library entity mapping
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
|
@ -210,11 +301,11 @@ include::{sourcedir-mapping}/identifier/SimpleEntityTest.java[tag=entity-pojo-mu
|
|||
----
|
||||
====
|
||||
|
||||
Specifically the outcome in this last example will depend on whether the `Book` class
|
||||
Specifically, the outcome in this last example will depend on whether the `Book` class
|
||||
implemented equals/hashCode, and, if so, how.
|
||||
|
||||
If the `Book` class did not override the default equals/hashCode,
|
||||
then the two `Book` object reference are not going to be equal since their references are different.
|
||||
then the two `Book` object references are not going to be equal since their references are different.
|
||||
|
||||
Consider yet another case:
|
||||
|
||||
|
@ -253,7 +344,7 @@ include::{sourcedir-mapping}/identifier/NaiveEqualsHashCodeEntityTest.java[tag=e
|
|||
----
|
||||
====
|
||||
|
||||
The issue here is a conflict between the use of generated identifier, the contract of `Set` and the equals/hashCode implementations.
|
||||
The issue here is a conflict between the use of the generated identifier, the contract of `Set`, and the equals/hashCode implementations.
|
||||
`Set` says that the equals/hashCode value for an object should not change while the object is part of the `Set`.
|
||||
But that is exactly what happened here because the equals/hasCode are based on the (generated) id, which was not set until the JPA transaction is committed.
|
||||
|
||||
|
@ -328,7 +419,7 @@ To find the `Account` balance, we need to query the `AccountSummary` which share
|
|||
|
||||
However, the `AccountSummary` is not mapped to a physical table, but to an SQL query.
|
||||
|
||||
So, if we have the following `AccountTransaction` record, the `AccountSummary` balance will mach the proper amount of money in this `Account`.
|
||||
So, if we have the following `AccountTransaction` record, the `AccountSummary` balance will match the proper amount of money in this `Account`.
|
||||
|
||||
[[mapping-Subselect-entity-find-example]]
|
||||
.Finding a `@Subselect` entity
|
||||
|
@ -356,18 +447,18 @@ The goal of the `@Synchronize` annotation in the `AccountSummary` entity mapping
|
|||
underlying `@Subselect` SQL query. This is because, unlike JPQL and HQL queries, Hibernate cannot parse the underlying native SQL query.
|
||||
|
||||
With the `@Synchronize` annotation in place,
|
||||
when executing a HQL or JPQL which selects from the `AccountSummary` entity,
|
||||
when executing an HQL or JPQL which selects from the `AccountSummary` entity,
|
||||
Hibernate will trigger a Persistence Context flush if there are pending `Account`, `Client` or `AccountTransaction` entity state transitions.
|
||||
====
|
||||
|
||||
[[entity-proxy]]
|
||||
==== Define a custom entity proxy
|
||||
|
||||
By default, when it needs to use a proxy instead of the actual Pojo, Hibernate is going to use a Bytecode manipulation library like
|
||||
By default, when it needs to use a proxy instead of the actual POJO, Hibernate is going to use a Bytecode manipulation library like
|
||||
http://jboss-javassist.github.io/javassist/[Javassist] or
|
||||
http://bytebuddy.net/[Byte Buddy].
|
||||
|
||||
However, if the entity class is final, Javassist will not create a proxy and you will get a Pojo even when you only need a proxy reference.
|
||||
However, if the entity class is final, Javassist will not create a proxy and you will get a POJO even when you only need a proxy reference.
|
||||
In this case, you could proxy an interface that this particular entity implements, as illustrated by the following example.
|
||||
|
||||
[[entity-proxy-interface-mapping]]
|
||||
|
@ -399,7 +490,7 @@ include::{extrasdir}/entity/entity-proxy-persist-mapping.sql[]
|
|||
====
|
||||
|
||||
As you can see in the associated SQL snippet, Hibernate issues no SQL SELECT query since the proxy can be
|
||||
constructed without needing to fetch the actual entity Pojo.
|
||||
constructed without needing to fetch the actual entity POJO.
|
||||
|
||||
[[entity-tuplizer]]
|
||||
==== Dynamic entity proxies using the @Tuplizer annotation
|
||||
|
@ -407,7 +498,7 @@ constructed without needing to fetch the actual entity Pojo.
|
|||
It is possible to map your entities as dynamic proxies using
|
||||
the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Tuplizer.html[`@Tuplizer`] annotation.
|
||||
|
||||
In the following entity mapping, both the embeddable and the entity are mapped as interfaces, not Pojos.
|
||||
In the following entity mapping, both the embeddable and the entity are mapped as interfaces, not POJOs.
|
||||
|
||||
[[entity-tuplizer-entity-mapping]]
|
||||
.Dynamic entity proxy mapping
|
||||
|
@ -458,7 +549,7 @@ include::{sourcedir-proxy}/tuplizer/DataProxyHandler.java[tag=entity-tuplizer-in
|
|||
----
|
||||
====
|
||||
|
||||
With the `DynamicInstantiator` in place, we can work with the dynamic proxy entities just like with Pojo entities.
|
||||
With the `DynamicInstantiator` in place, we can work with the dynamic proxy entities just like with POJO entities.
|
||||
|
||||
[[entity-tuplizer-dynamic-proxy-example]]
|
||||
.Persisting entities and embeddables as dynamic proxies
|
||||
|
@ -493,4 +584,4 @@ include::{sourcedir-persister}/Book.java[tag=entity-persister-mapping,indent=0]
|
|||
====
|
||||
|
||||
By providing your own `EntityPersister` and `CollectionPersister` implementations,
|
||||
you can control how entities and collections are persisted in to the database.
|
||||
you can control how entities and collections are persisted into the database.
|
|
@ -1,2 +1,2 @@
|
|||
INSERT INTO Phone (phone_number, phone_type, id)
|
||||
VALUES ('123-456-78990', 2, 1)
|
||||
VALUES ('123-456-78990', 1, 1)
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
create table public.book (
|
||||
id bigint not null,
|
||||
author varchar(255),
|
||||
title varchar(255),
|
||||
primary key (id)
|
||||
) engine=InnoDB
|
|
@ -0,0 +1,6 @@
|
|||
create table library.book (
|
||||
id int8 not null,
|
||||
author varchar(255),
|
||||
title varchar(255),
|
||||
primary key (id)
|
||||
)
|
|
@ -21,7 +21,7 @@ See <<chapters/domain/natural_id.adoc#naturalid,Natural Ids>>.
|
|||
====
|
||||
Technically the identifier does not have to map to the column(s) physically defined as the table primary key.
|
||||
They just need to map to column(s) that uniquely identify each row.
|
||||
However this documentation will continue to use the terms identifier and primary key interchangeably.
|
||||
However, this documentation will continue to use the terms identifier and primary key interchangeably.
|
||||
====
|
||||
|
||||
Every entity must define an identifier. For entity inheritance hierarchies, the identifier must be defined just on the entity that is the root of the hierarchy.
|
||||
|
@ -84,7 +84,7 @@ Identifier value generations strategies are discussed in detail in the <<identif
|
|||
==== Composite identifiers
|
||||
|
||||
Composite identifiers correspond to one or more persistent attributes.
|
||||
Here are the rules governing composite identifiers, as defined by the JPA specification.
|
||||
Here are the rules governing composite identifiers, as defined by the JPA specification:
|
||||
|
||||
* The composite identifier must be represented by a "primary key class".
|
||||
The primary key class may be defined using the `javax.persistence.EmbeddedId` annotation (see <<identifiers-composite-aggregated>>),
|
||||
|
@ -95,12 +95,13 @@ or defined using the `javax.persistence.IdClass` annotation (see <<identifiers-c
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
The restriction that a composite identifier has to be represented by a "primary key class" is only JPA specific.
|
||||
Hibernate does allow composite identifiers to be defined without a "primary key class", although that modeling technique is deprecated and therefore omitted from this discussion.
|
||||
The restriction that a composite identifier has to be represented by a "primary key class" (e.g. `@EmbeddedId` or `@IdClass`) is only JPA-specific.
|
||||
|
||||
Hibernate does allow composite identifiers to be defined without a "primary key class" via multiple `@Id` attributes.
|
||||
====
|
||||
|
||||
The attributes making up the composition can be either basic, composite, ManyToOne.
|
||||
Note especially that collections and one-to-ones are never appropriate.
|
||||
The attributes making up the composition can be either basic, composite, `@ManyToOne`.
|
||||
Note especially that collection and one-to-one are never appropriate.
|
||||
|
||||
[[identifiers-composite-aggregated]]
|
||||
==== Composite identifiers with `@EmbeddedId`
|
||||
|
@ -130,10 +131,10 @@ include::{sourcedir}/EmbeddedIdManyToOneTest.java[tag=identifiers-basic-embedded
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Hibernate supports directly modeling the ManyToOne in the PK class, whether `@EmbeddedId` or `@IdClass`.
|
||||
Hibernate supports directly modeling `@ManyToOne` associations in the Primary Key class, whether `@EmbeddedId` or `@IdClass`.
|
||||
|
||||
However, that is not portably supported by the JPA specification.
|
||||
In JPA terms one would use "derived identifiers"; for details, see <<identifiers-derived>>.
|
||||
In JPA terms, one would use "derived identifiers". For more details, see <<identifiers-derived>>.
|
||||
====
|
||||
|
||||
[[identifiers-composite-nonaggregated]]
|
||||
|
@ -175,7 +176,7 @@ include::{sourcedir}/IdClassGeneratedValueTest.java[tag=identifiers-basic-idclas
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
This feature exists because of a highly questionable interpretation of the JPA specification made by the SpecJ committee.
|
||||
This feature which allows auto-generated values in composite identifiers exists because of a highly questionable interpretation of the JPA specification made by the SpecJ committee.
|
||||
|
||||
Hibernate does not feel that JPA defines support for this, but added the feature simply to be usable in SpecJ benchmarks.
|
||||
Use of this feature may or may not be portable from a JPA perspective.
|
||||
|
@ -208,27 +209,110 @@ include::{sourcedir}/IdManyToOneTest.java[tag=identifiers-composite-id-fetching-
|
|||
----
|
||||
====
|
||||
|
||||
[[identifiers-composite-generated]]
|
||||
==== Composite identifiers with generated properties
|
||||
|
||||
When using composite identifiers, the underlying identifier properties must be manually assigned by the user.
|
||||
|
||||
Automatically generated properties are not supported to be used to generate the value of an underlying property that makes the composite identifier.
|
||||
|
||||
Therefore, you cannot use any of the automatic property generator described by the <<chapters/domain/basic_types.adoc#mapping-generated, generated properties section>> like `@Generated`, `@CreationTimestamp` or `@ValueGenerationType` or database-generated values.
|
||||
|
||||
Nevertheless, you can still generate the identifier properties prior to constructing the composite identifier, as illustrated by the following examples.
|
||||
|
||||
Assuming we have the following `EventId` composite identifier and an `Event` entity which uses the aforementioned composite identifier.
|
||||
|
||||
[[identifiers-composite-generated-mapping-example]]
|
||||
.The Event entity and EventId composite identifier
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/composite/Event.java[tag=identifiers-composite-generated-mapping-example, indent=0]
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/composite/EventId.java[tag=identifiers-composite-generated-mapping-example, indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
[[identifiers-composite-generated-in-memory]]
|
||||
===== In-memory generated composite identifier properties
|
||||
|
||||
If you want to generate the composite identifier properties in-memory,
|
||||
you need to do that as follows:
|
||||
|
||||
[[identifiers-composite-generated-in-memory-example]]
|
||||
.In-memory generated composite identifier properties example
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/composite/EmbeddedIdInMemoryGeneratedValueTest.java[tag=identifiers-composite-generated-in-memory-example, indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
Notice that the `createdOn` property of the `EventId` composite identifier was generated by the data access code and assigned to the
|
||||
identifier prior to persisting the `Event` entity.
|
||||
|
||||
[[identifiers-composite-generated-database]]
|
||||
===== Database generated composite identifier properties
|
||||
|
||||
If you want to generate the composite identifier properties using a database function or stored procedure,
|
||||
you could to do it as illustrated by the following example.
|
||||
|
||||
[[identifiers-composite-generated-database-example]]
|
||||
.Database generated composite identifier properties example
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/composite/EmbeddedIdDatabaseGeneratedValueTest.java[tag=identifiers-composite-generated-database-example, indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
Notice that the `createdOn` property of the `EventId` composite identifier was generated by calling the `CURRENT_TIMESTAMP` database function,
|
||||
and we assigned it to the composite identifier prior to persisting the `Event` entity.
|
||||
|
||||
[[identifiers-generators]]
|
||||
==== Generated identifier values
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
For discussion of generated values for non-identifier attributes, see <<chapters/domain/basic_types.adoc#mapping-generated,Generated properties>>.
|
||||
You can also auto-generate values for non-identifier attributes. For more details, see the <<chapters/domain/basic_types.adoc#mapping-generated,Generated properties>> section.
|
||||
====
|
||||
|
||||
Hibernate supports identifier value generation across a number of different types.
|
||||
Remember that JPA portably defines identifier value generation just for integer types.
|
||||
|
||||
Identifier value generation is indicates using the `javax.persistence.GeneratedValue` annotation.
|
||||
Identifier value generation is indicated using the `javax.persistence.GeneratedValue` annotation.
|
||||
The most important piece of information here is the specified `javax.persistence.GenerationType` which indicates how values will be generated.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The discussions below assume that the application is using Hibernate's "new generator mappings" as indicated by the `hibernate.id.new_generator_mappings` setting or
|
||||
`MetadataBuilder.enableNewIdentifierGeneratorSupport` method during bootstrap.
|
||||
Starting with Hibernate 5, this is set to true by default.
|
||||
If applications set this to false the resolutions discussed here will be very different.
|
||||
The rest of the discussion here assumes this setting is enabled (true).
|
||||
|
||||
Starting with Hibernate 5, this is set to `true` by default.
|
||||
In applications where the `hibernate.id.new_generator_mappings` configuration is set to `false` the resolutions discussed here will be very different.
|
||||
The rest of the discussion here assumes this setting is enabled (`true`).
|
||||
====
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
In Hibernate 5.3, Hibernate attempts to delay the insert of entities if the flush-mode does not equal `AUTO`.
|
||||
This was slightly problematic for entities that used `IDENTITY` or `SEQUENCE` generated identifiers that were
|
||||
also involved in some form of association with another entity in the same transaction.
|
||||
|
||||
In Hibernate 5.4, Hibernate attempts to remedy the problem using an algorithm to decide if the insert should
|
||||
be delayed or if it requires immediate insertion. We wanted to restore the behavior prior to 5.3 only for
|
||||
very specific use cases where it made sense.
|
||||
|
||||
Entity mappings can sometimes be complex and it is possible a corner case was overlooked. Hibernate offers a
|
||||
way to completely disable the 5.3 behavior in the event problems occur with `DelayedPostInsertIdentifier`. To
|
||||
enable the legacy behavior, set `hibernate.id.disable_delayed_identity_inserts=true`.
|
||||
|
||||
This configuration option is meant to act as a _temporary_ fix and bridge the gap between the changes in this
|
||||
behavior across Hibernate 5.x releases. If this configuration setting is necessary for a mapping, please open
|
||||
a JIRA and report the mapping so that the algorithm can be reviewed.
|
||||
====
|
||||
|
||||
`AUTO` (the default):: Indicates that the persistence provider (Hibernate) should choose an appropriate generation strategy. See <<identifiers-generators-auto>>.
|
||||
|
@ -241,7 +325,7 @@ The rest of the discussion here assumes this setting is enabled (true).
|
|||
|
||||
How a persistence provider interprets the AUTO generation type is left up to the provider.
|
||||
|
||||
The default behavior is to look at the java type of the identifier attribute.
|
||||
The default behavior is to look at the Java type of the identifier attribute.
|
||||
|
||||
If the identifier type is UUID, Hibernate is going to use a <<identifiers-generators-uuid, UUID identifier>>.
|
||||
|
||||
|
@ -249,7 +333,7 @@ If the identifier type is numerical (e.g. `Long`, `Integer`), then Hibernate is
|
|||
The `IdGeneratorStrategyInterpreter` has two implementations:
|
||||
|
||||
`FallbackInterpreter`::
|
||||
This is the default strategy since Hibernate 5.0. For older versions, this strategy is enabled through the <<appendices/Configurations.adoc#configurations-mapping,`hibernate.id.new_generator_mappings`>> configuration property .
|
||||
This is the default strategy since Hibernate 5.0. For older versions, this strategy is enabled through the <<appendices/Configurations.adoc#configurations-mapping,`hibernate.id.new_generator_mappings`>> configuration property.
|
||||
When using this strategy, `AUTO` always resolves to `SequenceStyleGenerator`.
|
||||
If the underlying database supports sequences, then a SEQUENCE generator is used. Otherwise, a TABLE generator is going to be used instead.
|
||||
`LegacyFallbackInterpreter`::
|
||||
|
@ -288,7 +372,7 @@ include::{sourcedir}/SequenceGeneratorNamedTest.java[tag=identifiers-generators-
|
|||
----
|
||||
====
|
||||
|
||||
The `javax.persistence.SequenceGenerator` annotataion allows you to specify additional configurations as well.
|
||||
The `javax.persistence.SequenceGenerator` annotation allows you to specify additional configurations as well.
|
||||
|
||||
[[identifiers-generators-sequence-configured]]
|
||||
.Configured sequence
|
||||
|
@ -303,7 +387,7 @@ include::{sourcedir}/SequenceGeneratorConfiguredTest.java[tag=identifiers-genera
|
|||
==== Using IDENTITY columns
|
||||
|
||||
For implementing identifier value generation based on IDENTITY columns,
|
||||
Hibernate makes use of its `org.hibernate.id.IdentityGenerator` id generator which expects the identifier to generated by INSERT into the table.
|
||||
Hibernate makes use of its `org.hibernate.id.IdentityGenerator` id generator which expects the identifier to be generated by INSERT into the table.
|
||||
IdentityGenerator understands 3 different ways that the INSERT-generated value might be retrieved:
|
||||
|
||||
* If Hibernate believes the JDBC environment supports `java.sql.Statement#getGeneratedKeys`, then that approach will be used for extracting the IDENTITY generated keys.
|
||||
|
@ -312,20 +396,22 @@ IdentityGenerator understands 3 different ways that the INSERT-generated value m
|
|||
|
||||
[IMPORTANT]
|
||||
====
|
||||
It is important to realize that this imposes a runtime behavior where the entity row *must* be physically inserted prior to the identifier value being known.
|
||||
This can mess up extended persistence contexts (conversations).
|
||||
Because of the runtime imposition/inconsistency Hibernate suggest other forms of identifier value generation be used.
|
||||
It is important to realize that using IDENTITY columns imposes a runtime behavior where the entity row *must* be physically inserted prior to the identifier value being known.
|
||||
|
||||
This can mess up extended persistence contexts (long conversations).
|
||||
Because of the runtime imposition/inconsistency, Hibernate suggests other forms of identifier value generation be used (e.g. SEQUENCE).
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
There is yet another important runtime impact of choosing IDENTITY generation: Hibernate will not be able to JDBC batching for inserts of the entities that use IDENTITY generation.
|
||||
The importance of this depends on the application specific use cases.
|
||||
If the application is not usually creating many new instances of a given type of entity that uses IDENTITY generation, then this is not an important impact since batching would not have been helpful anyway.
|
||||
There is yet another important runtime impact of choosing IDENTITY generation: Hibernate will not be able to batch INSERT statements for the entities using the IDENTITY generation.
|
||||
|
||||
The importance of this depends on the application-specific use cases.
|
||||
If the application is not usually creating many new instances of a given entity type using the IDENTITY generator, then this limitation will be less important since batching would not have been very helpful anyway.
|
||||
====
|
||||
|
||||
[[identifiers-generators-table]]
|
||||
==== Using table identifier generator
|
||||
==== Using the table identifier generator
|
||||
|
||||
Hibernate achieves table-based identifier generation based on its `org.hibernate.id.enhanced.TableGenerator` which defines a table capable of holding multiple named value segments for any number of entities.
|
||||
|
||||
|
@ -351,7 +437,7 @@ If no table name is given Hibernate assumes an implicit name of `hibernate_seque
|
|||
Additionally, because no `javax.persistence.TableGenerator#pkColumnValue` is specified,
|
||||
Hibernate will use the default segment (`sequence_name='default'`) from the hibernate_sequences table.
|
||||
|
||||
However, you can configure the table identifier generator using the http://docs.oracle.com/javaee/7/api/javax/persistence/TableGenerator.html[`@TableGenerator`] annotation.
|
||||
However, you can configure the table identifier generator using the {jpaJavadocUrlPrefix}TableGenerator.html[`@TableGenerator`] annotation.
|
||||
|
||||
[[identifiers-generators-table-configured-mapping-example]]
|
||||
.Configured table generator
|
||||
|
@ -392,7 +478,7 @@ This is supported through its `org.hibernate.id.UUIDGenerator` id generator.
|
|||
`UUIDGenerator` supports pluggable strategies for exactly how the UUID is generated.
|
||||
These strategies are defined by the `org.hibernate.id.UUIDGenerationStrategy` contract.
|
||||
The default strategy is a version 4 (random) strategy according to IETF RFC 4122.
|
||||
Hibernate does ship with an alternative strategy which is a RFC 4122 version 1 (time-based) strategy (using ip address rather than mac address).
|
||||
Hibernate does ship with an alternative strategy which is a RFC 4122 version 1 (time-based) strategy (using IP address rather than mac address).
|
||||
|
||||
[[identifiers-generators-uuid-mapping-example]]
|
||||
.Implicitly using the random UUID strategy
|
||||
|
@ -421,13 +507,13 @@ include::{sourcedir}/UuidCustomGeneratedValueTest.java[tag=identifiers-generator
|
|||
Most of the Hibernate generators that separately obtain identifier values from database structures support the use of pluggable optimizers.
|
||||
Optimizers help manage the number of times Hibernate has to talk to the database in order to generate identifier values.
|
||||
For example, with no optimizer applied to a sequence-generator, every time the application asked Hibernate to generate an identifier it would need to grab the next sequence value from the database.
|
||||
But if we can minimize the number of times we need to communicate with the database here, the application will be able to perform better.
|
||||
Which is, in fact, the role of these optimizers.
|
||||
But if we can minimize the number of times we need to communicate with the database here, the application will be able to perform better,
|
||||
which is, in fact, the role of these optimizers.
|
||||
|
||||
none:: No optimization is performed. We communicate with the database each and every time an identifier value is needed from the generator.
|
||||
|
||||
pooled-lo:: The pooled-lo optimizer works on the principle that the increment-value is encoded into the database table/sequence structure.
|
||||
In sequence-terms this means that the sequence is defined with a greater-that-1 increment size.
|
||||
In sequence-terms, this means that the sequence is defined with a greater-than-1 increment size.
|
||||
+
|
||||
For example, consider a brand new sequence defined as `create sequence m_sequence start with 1 increment by 20`.
|
||||
This sequence essentially defines a "pool" of 20 usable id values each and every time we ask it for its next-value.
|
||||
|
@ -483,7 +569,7 @@ include::{extrasdir}/id/identifiers-generators-pooled-lo-optimizer-persist-examp
|
|||
----
|
||||
====
|
||||
|
||||
As you can see from the list of generated SQL statements, you can insert 3 entities for one database sequence call.
|
||||
As you can see from the list of generated SQL statements, you can insert 3 entities with just one database sequence call.
|
||||
This way, the pooled and the pooled-lo optimizers allow you to reduce the number of database roundtrips, therefore reducing the overall transaction response time.
|
||||
|
||||
[[identifiers-derived]]
|
||||
|
@ -527,8 +613,7 @@ include::{sourcedir-associations}/OneToOnePrimaryKeyJoinColumnTest.java[tag=iden
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Unlike `@MapsId`, the application developer is responsible for ensuring that the identifier and the many-to-one (or one-to-one) association are in sync
|
||||
as you can see in the `PersonDetails#setPerson` method.
|
||||
Unlike `@MapsId`, the application developer is responsible for ensuring that the entity identifier and the many-to-one (or one-to-one) association are in sync, as you can see in the `PersonDetails#setPerson` method.
|
||||
====
|
||||
|
||||
[[identifiers-rowid]]
|
||||
|
@ -538,7 +623,7 @@ If you annotate a given entity with the `@RowId` annotation and the underlying d
|
|||
then Hibernate can use the `ROWID` pseudo-column for CRUD operations.
|
||||
|
||||
[[identifiers-rowid-mapping]]
|
||||
.`@RowId` entity maapping
|
||||
.`@RowId` entity mapping
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -560,4 +645,4 @@ include::{sourcedir}/RowIdTest.java[tag=identifiers-rowid-example]
|
|||
----
|
||||
include::{extrasdir}/id/identifiers-rowid-example.sql[]
|
||||
----
|
||||
====
|
||||
====
|
||||
|
|
|
@ -107,5 +107,5 @@ include::{extrasdir}/collection-immutability-update-example.log.txt[]
|
|||
|
||||
[TIP]
|
||||
====
|
||||
While immutable entity changes are simply discarded, modifying an immutable collection end up in a `HibernateException` being thrown.
|
||||
While immutable entity changes are simply discarded, modifying an immutable collection will end up in a `HibernateException` being thrown.
|
||||
====
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
Although relational database systems don't provide support for inheritance, Hibernate provides several strategies to leverage this object-oriented trait onto domain model entities:
|
||||
|
||||
MappedSuperclass:: Inheritance is implemented in domain model only without reflecting it in the database schema. See <<entity-inheritance-mapped-superclass>>.
|
||||
MappedSuperclass:: Inheritance is implemented in the domain model only without reflecting it in the database schema. See <<entity-inheritance-mapped-superclass>>.
|
||||
Single table:: The domain model class hierarchy is materialized into a single table which contains entities belonging to different class types. See <<entity-inheritance-single-table>>.
|
||||
Joined table:: The base class and all the subclasses have their own database tables and fetching a subclass entity requires a join with the parent table as well. See <<entity-inheritance-joined-table>>.
|
||||
Table per class:: Each subclass has its own table containing both the subclass and the base class properties. See <<entity-inheritance-table-per-class>>.
|
||||
|
@ -13,11 +13,11 @@ Table per class:: Each subclass has its own table containing both the subclass a
|
|||
[[entity-inheritance-mapped-superclass]]
|
||||
==== MappedSuperclass
|
||||
|
||||
In the following domain model class hierarchy, a 'DebitAccount' and a 'CreditAccount' share the same 'Account' base class.
|
||||
In the following domain model class hierarchy, a `DebitAccount` and a `CreditAccount` share the same `Account` base class.
|
||||
|
||||
image:images/domain/inheritance/inheritance_class_diagram.svg[Inheritance class diagram]
|
||||
|
||||
When using `MappedSuperclass`, the inheritance is visible in the domain model only and each database table contains both the base class and the subclass properties.
|
||||
When using `MappedSuperclass`, the inheritance is visible in the domain model only, and each database table contains both the base class and the subclass properties.
|
||||
|
||||
[[entity-inheritance-mapped-superclass-example]]
|
||||
.`@MappedSuperclass` inheritance
|
||||
|
@ -35,8 +35,8 @@ include::{extrasdir}/entity-inheritance-mapped-superclass-example.sql[]
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Because the `@MappedSuperclass` inheritance model is not mirrored at database level,
|
||||
it's not possible to use polymorphic queries (fetching subclasses by their base class).
|
||||
Because the `@MappedSuperclass` inheritance model is not mirrored at the database level,
|
||||
it's not possible to use polymorphic queries referencing the `@MappedSuperclass` when fetching persistent objects by their base class.
|
||||
====
|
||||
|
||||
[[entity-inheritance-single-table]]
|
||||
|
@ -119,20 +119,20 @@ You can also use `@DiscriminatorFormula` to express in SQL a virtual discriminat
|
|||
This is particularly useful when the discriminator value can be extracted from one or more columns of the table.
|
||||
Both `@DiscriminatorColumn` and `@DiscriminatorFormula` are to be set on the root entity (once per persisted hierarchy).
|
||||
|
||||
`@org.hibernate.annotations.DiscriminatorOptions` allows to optionally specify Hibernate specific discriminator options which are not standardized in JPA.
|
||||
`@org.hibernate.annotations.DiscriminatorOptions` allows to optionally specify Hibernate-specific discriminator options which are not standardized in JPA.
|
||||
The available options are `force` and `insert`.
|
||||
|
||||
The `force` attribute is useful if the table contains rows with _extra_ discriminator values that are not mapped to a persistent class.
|
||||
This could for example occur when working with a legacy database.
|
||||
If `force` is set to true Hibernate will specify the allowed discriminator values in the SELECT query, even when retrieving all instances of the root class.
|
||||
This could, for example, occur when working with a legacy database.
|
||||
If `force` is set to `true`, Hibernate will specify the allowed discriminator values in the SELECT query even when retrieving all instances of the root class.
|
||||
|
||||
The second option, `insert`, tells Hibernate whether or not to include the discriminator column in SQL INSERTs.
|
||||
Usually, the column should be part of the INSERT statement, but if your discriminator column is also part of a mapped composite identifier you have to set this option to false.
|
||||
Usually, the column should be part of the INSERT statement, but if your discriminator column is also part of a mapped composite identifier you have to set this option to `false`.
|
||||
====
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
There used to be `@org.hibernate.annotations.ForceDiscriminator` annotation which was deprecated in version 3.6 and later removed. Use `@DiscriminatorOptions` instead.
|
||||
There used to be a `@org.hibernate.annotations.ForceDiscriminator` annotation which was deprecated in version 3.6 and later removed. Use `@DiscriminatorOptions` instead.
|
||||
====
|
||||
|
||||
[[entity-inheritance-discriminator-formula]]
|
||||
|
@ -226,7 +226,7 @@ include::{extrasdir}/entity-inheritance-joined-table-example.sql[]
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
The primary key of this table is also a foreign key to the superclass table and described by the `@PrimaryKeyJoinColumns`.
|
||||
The primary keys of the `CreditAccount` and `DebitAccount` tables are also foreign keys to the superclass table primary key and described by the `@PrimaryKeyJoinColumns`.
|
||||
|
||||
The table name still defaults to the non-qualified class name.
|
||||
Also, if `@PrimaryKeyJoinColumn` is not set, the primary key / foreign key columns are assumed to have the same names as the primary key columns of the primary table of the superclass.
|
||||
|
@ -276,7 +276,7 @@ Each table defines all persistent states of the class, including the inherited s
|
|||
|
||||
In Hibernate, it is not necessary to explicitly map such inheritance hierarchies.
|
||||
You can map each class as a separate entity root.
|
||||
However, if you wish use polymorphic associations (e.g. an association to the superclass of your hierarchy), you need to use the union subclass mapping.
|
||||
However, if you wish to use polymorphic associations (e.g. an association to the superclass of your hierarchy), you need to use the union subclass mapping.
|
||||
|
||||
[[entity-inheritance-table-per-class-example]]
|
||||
.Table per class
|
||||
|
@ -310,6 +310,23 @@ include::{extrasdir}/entity-inheritance-table-per-class-query-example.sql[]
|
|||
[IMPORTANT]
|
||||
====
|
||||
Polymorphic queries require multiple UNION queries, so be aware of the performance implications of a large class hierarchy.
|
||||
|
||||
Unfortunately, not all database systems support UNION ALL, in which case, UNION is going to be used instead of UNION ALL.
|
||||
|
||||
The following Hibernate dialects support UNION ALL:
|
||||
|
||||
- `AbstractHANADialect`
|
||||
- `AbstractTransactSQLDialect`
|
||||
- `CUBRIDDialect`
|
||||
- `DB2Dialect`
|
||||
- `H2Dialect`
|
||||
- `HSQLDialect`
|
||||
- `Ingres9Dialect`
|
||||
- `MySQL5Dialect`
|
||||
- `Oracle8iDialect`
|
||||
- `Oracle9Dialect`
|
||||
- `PostgreSQL81Dialect`
|
||||
- `RDMSOS2200Dialect`
|
||||
====
|
||||
|
||||
[[entity-inheritance-polymorphism]]
|
||||
|
@ -324,7 +341,7 @@ However, you can even query
|
|||
For instance, considering the following `DomainModelEntity` interface:
|
||||
|
||||
[[entity-inheritance-polymorphism-interface-example]]
|
||||
.Domain Model Entity interface
|
||||
.DomainModelEntity interface
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
|
@ -333,7 +350,7 @@ include::{sourcedir}/polymorphism/DomainModelEntity.java[tags=entity-inheritance
|
|||
====
|
||||
|
||||
If we have two entity mappings, a `Book` and a `Blog`,
|
||||
and the `Book` entity is mapped with the
|
||||
and the `Blog` entity is mapped with the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Polymorphism.html[`@Polymorphism`] annotation
|
||||
and taking the `PolymorphismType.EXPLICIT` setting:
|
||||
|
||||
|
|
|
@ -4,11 +4,11 @@
|
|||
|
||||
Part of the mapping of an object model to the relational database is
|
||||
mapping names from the object model to the corresponding database names.
|
||||
Hibernate looks at this as 2 stage process:
|
||||
Hibernate looks at this as 2-stage process:
|
||||
|
||||
* The first stage is determining a proper logical name from the domain model mapping. A
|
||||
logical name can be either explicitly specified by the user (using `@Column` or
|
||||
`@Table` e.g.) or it can be implicitly determined by Hibernate through an
|
||||
logical name can be either explicitly specified by the user (e.g., using `@Column` or
|
||||
`@Table`) or it can be implicitly determined by Hibernate through an
|
||||
<<ImplicitNamingStrategy>> contract.
|
||||
* Second is the resolving of this logical name to a physical name which is defined
|
||||
by the <<PhysicalNamingStrategy>> contract.
|
||||
|
@ -25,7 +25,7 @@ Also, the NamingStrategy contract was often not flexible enough to properly appl
|
|||
"rule", either because the API lacked the information to decide or because the API was honestly
|
||||
not well defined as it grew.
|
||||
|
||||
Due to these limitation, `org.hibernate.cfg.NamingStrategy` has been deprecated and then removed
|
||||
Due to these limitation, `org.hibernate.cfg.NamingStrategy` has been deprecated
|
||||
in favor of ImplicitNamingStrategy and PhysicalNamingStrategy.
|
||||
====
|
||||
|
||||
|
@ -38,7 +38,7 @@ repetitive information a developer must provide for mapping a domain model.
|
|||
====
|
||||
JPA defines inherent rules about implicit logical name determination. If JPA provider
|
||||
portability is a major concern, or if you really just like the JPA-defined implicit
|
||||
naming rules, be sure to stick with ImplicitNamingStrategyJpaCompliantImpl (the default)
|
||||
naming rules, be sure to stick with ImplicitNamingStrategyJpaCompliantImpl (the default).
|
||||
|
||||
Also, JPA defines no separation between logical and physical name. Following the JPA
|
||||
specification, the logical name *is* the physical name. If JPA provider portability
|
||||
|
@ -58,7 +58,7 @@ determine a logical name when the mapping did not provide an explicit name.
|
|||
image:images/domain/naming/implicit_naming_strategy_diagram.svg[Implicit Naming Strategy Diagram]
|
||||
|
||||
Hibernate defines multiple ImplicitNamingStrategy implementations out-of-the-box. Applications
|
||||
are also free to plug-in custom implementations.
|
||||
are also free to plug in custom implementations.
|
||||
|
||||
There are multiple ways to specify the ImplicitNamingStrategy to use. First, applications can specify
|
||||
the implementation using the `hibernate.implicit_naming_strategy` configuration setting which accepts:
|
||||
|
@ -83,7 +83,7 @@ to specify the ImplicitNamingStrategy to use. See
|
|||
[[PhysicalNamingStrategy]]
|
||||
==== PhysicalNamingStrategy
|
||||
|
||||
Many organizations define rules around the naming of database objects (tables, columns, foreign-keys, etc).
|
||||
Many organizations define rules around the naming of database objects (tables, columns, foreign keys, etc).
|
||||
The idea of a PhysicalNamingStrategy is to help implement such naming rules without having to hard-code them
|
||||
into the mapping via explicit names.
|
||||
|
||||
|
@ -93,10 +93,11 @@ would be, for example, to say that the physical column name should instead be ab
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
It is true that the resolution to `acct_num` could have been handled in an ImplicitNamingStrategy in this case.
|
||||
But the point is separation of concerns. The PhysicalNamingStrategy will be applied regardless of whether
|
||||
the attribute explicitly specified the column name or whether we determined that implicitly. The
|
||||
ImplicitNamingStrategy would only be applied if an explicit name was not given. So it depends on needs
|
||||
It is true that the resolution to `acct_num` could have been handled using an `ImplicitNamingStrategy` in this case.
|
||||
|
||||
But the point here is the separation of concerns. The `PhysicalNamingStrategy` will be applied regardless of whether
|
||||
the attribute explicitly specified the column name or whether we determined that implicitly. The
|
||||
`ImplicitNamingStrategy` would only be applied if an explicit name was not given. So, it all depends on needs
|
||||
and intent.
|
||||
====
|
||||
|
||||
|
@ -105,7 +106,7 @@ applications and integrations can define custom implementations of this Physical
|
|||
contract. Here is an example PhysicalNamingStrategy for a fictitious company named Acme Corp
|
||||
whose naming standards are to:
|
||||
|
||||
* prefer underscore-delimited words rather than camel-casing
|
||||
* prefer underscore-delimited words rather than camel casing
|
||||
* replace certain words with standard abbreviations
|
||||
|
||||
.Example PhysicalNamingStrategy implementation
|
||||
|
|
|
@ -10,8 +10,7 @@ As we will see later, Hibernate provides a dedicated, efficient API for loading
|
|||
[[naturalid-mapping]]
|
||||
==== Natural Id Mapping
|
||||
|
||||
Natural ids are defined in terms of on
|
||||
e or more persistent attributes.
|
||||
Natural ids are defined in terms of one or more persistent attributes.
|
||||
|
||||
[[naturalid-simple-basic-attribute-mapping-example]]
|
||||
.Natural id using single basic attribute
|
||||
|
@ -43,7 +42,7 @@ include::{sourcedir}/MultipleNaturalIdTest.java[tags=naturalid-multiple-attribut
|
|||
[[naturalid-api]]
|
||||
==== Natural Id API
|
||||
|
||||
As stated before, Hibernate provides an API for loading entities by their associate natural id.
|
||||
As stated before, Hibernate provides an API for loading entities by their associated natural id.
|
||||
This is represented by the `org.hibernate.NaturalIdLoadAccess` contract obtained via Session#byNaturalId.
|
||||
|
||||
[NOTE]
|
||||
|
@ -72,17 +71,17 @@ include::{sourcedir}/MultipleNaturalIdTest.java[tags=naturalid-load-access-examp
|
|||
|
||||
NaturalIdLoadAccess offers 2 distinct methods for obtaining the entity:
|
||||
|
||||
`load()`:: obtains a reference to the entity, making sure that the entity state is initialized
|
||||
`load()`:: obtains a reference to the entity, making sure that the entity state is initialized.
|
||||
`getReference()`:: obtains a reference to the entity. The state may or may not be initialized.
|
||||
If the entity is already associated with the current running Session, that reference (loaded or not) is returned.
|
||||
If the entity is not loaded in the current Session and the entity supports proxy generation, an uninitialized proxy is generated and returned, otherwise the entity is loaded from the database and returned.
|
||||
|
||||
`NaturalIdLoadAccess` allows loading an entity by natural id and at the same time apply a pessimistic lock.
|
||||
`NaturalIdLoadAccess` allows loading an entity by natural id and at the same time applies a pessimistic lock.
|
||||
For additional details on locking, see the <<chapters/locking/Locking.adoc#locking,Locking>> chapter.
|
||||
|
||||
We will discuss the last method available on NaturalIdLoadAccess ( `setSynchronizationEnabled()` ) in <<naturalid-mutability-caching>>.
|
||||
|
||||
Because the `Company` and `PostalCarrier` entities define "simple" natural ids, we can load them as follows:
|
||||
Because the `Book` entities in the first two examples define "simple" natural ids, we can load them as follows:
|
||||
|
||||
[[naturalid-simple-load-access-example]]
|
||||
.Loading by simple natural id
|
||||
|
@ -99,7 +98,7 @@ include::{sourcedir}/CompositeNaturalIdTest.java[tags=naturalid-simple-load-acce
|
|||
====
|
||||
|
||||
Here we see the use of the `org.hibernate.SimpleNaturalIdLoadAccess` contract,
|
||||
obtained via `Session#bySimpleNaturalId().
|
||||
obtained via `Session#bySimpleNaturalId()`.
|
||||
|
||||
`SimpleNaturalIdLoadAccess` is similar to `NaturalIdLoadAccess` except that it does not define the using method.
|
||||
Instead, because these _simple_ natural ids are defined based on just one attribute we can directly pass
|
||||
|
@ -116,7 +115,7 @@ If the entity does not define a natural id, or if the natural id is not of a "si
|
|||
A natural id may be mutable or immutable. By default the `@NaturalId` annotation marks an immutable natural id attribute.
|
||||
An immutable natural id is expected to never change its value.
|
||||
|
||||
If the value(s) of the natural id attribute(s) change, `@NaturalId(mutable=true)` should be used instead.
|
||||
If the value(s) of the natural id attribute(s) change, `@NaturalId(mutable = true)` should be used instead.
|
||||
|
||||
[[naturalid-mutable-mapping-example]]
|
||||
.Mutable natural id mapping
|
||||
|
@ -136,7 +135,7 @@ To be clear: this is only pertinent for mutable natural ids.
|
|||
[IMPORTANT]
|
||||
====
|
||||
This _discovery and adjustment_ have a performance impact.
|
||||
If an application is certain that none of its mutable natural ids already associated with the Session have changed, it can disable that checking by calling `setSynchronizationEnabled(false)` (the default is true).
|
||||
If you are certain that none of the mutable natural ids already associated with the current `Session` have changed, you can disable this checking by calling `setSynchronizationEnabled(false)` (the default is `true`).
|
||||
This will force Hibernate to circumvent the checking of mutable natural ids.
|
||||
====
|
||||
|
||||
|
|
|
@ -6,21 +6,21 @@
|
|||
Hibernate understands both the Java and JDBC representations of application data.
|
||||
The ability to read/write this data from/to the database is the function of a Hibernate _type_.
|
||||
A type, in this usage, is an implementation of the `org.hibernate.type.Type` interface.
|
||||
This Hibernate type also describes various aspects of behavior of the Java type such as how to check for equality, how to clone values, etc.
|
||||
This Hibernate type also describes various behavioral aspects of the Java type such as how to check for equality, how to clone values, etc.
|
||||
|
||||
.Usage of the word _type_
|
||||
[NOTE]
|
||||
====
|
||||
The Hibernate type is neither a Java type nor a SQL data type.
|
||||
It provides information about both of these as well as understanding marshalling between.
|
||||
It provides information about mapping a Java type to an SQL type as well as how to persist and fetch a given Java type to and from a relational database.
|
||||
|
||||
When you encounter the term type in discussions of Hibernate, it may refer to the Java type, the JDBC type, or the Hibernate type, depending on context.
|
||||
When you encounter the term type in discussions of Hibernate, it may refer to the Java type, the JDBC type, or the Hibernate type, depending on the context.
|
||||
====
|
||||
|
||||
To help understand the type categorizations, let's look at a simple table and domain model that we wish to map.
|
||||
|
||||
[[mapping-types-basic-example]]
|
||||
.Simple table and domain model
|
||||
.A simple table and domain model
|
||||
====
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
|
@ -50,9 +50,9 @@ The persistent attributes of the `Contact` class are value types.
|
|||
|
||||
Value types are further classified into three sub-categories:
|
||||
|
||||
Basic types:: in mapping the `Contact` table, all attributes except for name would be basic types. Basic types are discussed in detail in <<chapters/domain/basic_types.adoc#basic,_Basic Types_>>
|
||||
Embeddable types:: the name attribute is an example of an embeddable type, which is discussed in details in <<chapters/domain/embeddables.adoc#embeddables,_Embeddable Types_>>
|
||||
Collection types:: although not featured in the aforementioned example, collection types are also a distinct category among value types. Collection types are further discussed in <<chapters/domain/collections.adoc#collections,_Collections_>>
|
||||
Basic types:: in mapping the `Contact` table, all attributes except for name would be basic types. Basic types are discussed in detail in <<chapters/domain/basic_types.adoc#basic,_Basic types_>>
|
||||
Embeddable types:: the `name` attribute is an example of an embeddable type, which is discussed in details in <<chapters/domain/embeddables.adoc#embeddables,_Embeddable types_>>
|
||||
*Collection* types:: although not featured in the aforementioned example, collection types are also a distinct category among value types. Collection types are further discussed in <<chapters/domain/collections.adoc#collections,_Collections_>>
|
||||
|
||||
[[categorization-entity]]
|
||||
==== Entity types
|
||||
|
@ -62,4 +62,4 @@ Entities are domain model classes which correlate to rows in a database table, u
|
|||
Because of the requirement for a unique identifier, entities exist independently and define their own lifecycle.
|
||||
The `Contact` class itself would be an example of an entity.
|
||||
|
||||
Mapping entities is discussed in detail in <<chapters/domain/entity.adoc#entity,_Entity_>>.
|
||||
Mapping entities is discussed in detail in <<chapters/domain/entity.adoc#entity,_Entity types_>>.
|
||||
|
|
|
@ -23,8 +23,8 @@ You can create, modify and delete the entities as always.
|
|||
[IMPORTANT]
|
||||
====
|
||||
The use of JPA's `CriteriaUpdate` and `CriteriaDelete` bulk operations are not currently supported by Envers
|
||||
due to how an entity's lifecycle events are dispatched. Such operations should be avoided as they're not
|
||||
captured by Envers and leads to incomplete audit history.
|
||||
due to how an entity's lifecycle events are dispatched. Such operations should be avoided as they're not
|
||||
captured by Envers and lead to incomplete audit history.
|
||||
====
|
||||
|
||||
If you look at the generated schema for your entities, or at the data persisted by Hibernate, you will notice that there are no changes.
|
||||
|
@ -113,7 +113,7 @@ The `REVTYPE` column value is taken from the https://docs.jboss.org/hibernate/or
|
|||
|2 | `DEL` |A database table row was deleted.
|
||||
|=================================
|
||||
|
||||
The audit (history) of an entity can be accessed using the `AuditReader` interface, which can be obtained having an open `EntityManager` or `Session` via the `AuditReaderFactory`.
|
||||
The audit (history) of an entity can be accessed using the `AuditReader` interface, which can be obtained by having an open `EntityManager` or `Session` via the `AuditReaderFactory`.
|
||||
|
||||
[[envers-audited-revisions-example]]
|
||||
.Getting a list of revisions for the `Customer` entity
|
||||
|
@ -148,11 +148,11 @@ include::{extrasdir}/envers-audited-rev1-example.sql[]
|
|||
When executing the aforementioned SQL query, there are two parameters:
|
||||
|
||||
revision_number::
|
||||
The first parameter marks the revision number we are interested in or the latest one that exist up to this particular revision.
|
||||
The first parameter marks the revision number we are interested in or the latest one that exists up to this particular revision.
|
||||
revision_type::
|
||||
The second parameter specifies that we are not interested in `DEL` `RevisionType` so that deleted entries are filtered out.
|
||||
|
||||
The same goes for the second revision associated to the `UPDATE` statement.
|
||||
The same goes for the second revision associated with the `UPDATE` statement.
|
||||
|
||||
[[envers-audited-rev2-example]]
|
||||
.Getting the second revision for the `Customer` entity
|
||||
|
@ -210,7 +210,7 @@ Name of a field in the audit entity that will hold the revision number.
|
|||
Name of a field in the audit entity that will hold the type of the revision (currently, this can be: `add`, `mod`, `del`).
|
||||
|
||||
`*org.hibernate.envers.revision_on_collection_change*` (default: `true` )::
|
||||
Should a revision be generated when a not-owned relation field changes (this can be either a collection in a one-to-many relation, or the field using `mappedBy` attribute in a one-to-one relation).
|
||||
Should a revision be generated when a not-owned relation field changes (this can be either a collection in a one-to-many relation or the field using `mappedBy` attribute in a one-to-one relation).
|
||||
|
||||
`*org.hibernate.envers.do_not_audit_optimistic_locking_field*` (default: `true` )::
|
||||
When true, properties to be used for optimistic locking, annotated with `@Version`, will not be automatically audited (their history won't be stored; it normally doesn't make sense to store it).
|
||||
|
@ -221,17 +221,17 @@ Should the entity data be stored in the revision when the entity is deleted (ins
|
|||
This is not normally needed, as the data is present in the last-but-one revision.
|
||||
Sometimes, however, it is easier and more efficient to access it in the last revision (then the data that the entity contained before deletion is stored twice).
|
||||
|
||||
`*org.hibernate.envers.default_schema*` (default: `null` - same schema as table being audited)::
|
||||
`*org.hibernate.envers.default_schema*` (default: `null` - same schema as the table being audited)::
|
||||
The default schema name that should be used for audit tables.
|
||||
+
|
||||
Can be overridden using the `@AuditTable( schema="..." )` annotation.
|
||||
Can be overridden using the `@AuditTable( schema = "..." )` annotation.
|
||||
+
|
||||
If not present, the schema will be the same as the schema of the table being audited.
|
||||
|
||||
`*org.hibernate.envers.default_catalog*` (default: `null` - same catalog as table being audited)::
|
||||
`*org.hibernate.envers.default_catalog*` (default: `null` - same catalog as the table being audited)::
|
||||
The default catalog name that should be used for audit tables.
|
||||
+
|
||||
Can be overridden using the `@AuditTable( catalog="..." )` annotation.
|
||||
Can be overridden using the `@AuditTable( catalog = "..." )` annotation.
|
||||
+
|
||||
If not present, the catalog will be the same as the catalog of the normal tables.
|
||||
|
||||
|
@ -255,13 +255,13 @@ This property is only evaluated if the `ValidityAuditStrategy` is used.
|
|||
|
||||
`*org.hibernate.envers.audit_strategy_validity_revend_timestamp_field_name*`(default: `REVEND_TSTMP` )::
|
||||
Column name of the timestamp of the end revision until which the data was valid.
|
||||
Only used if the `ValidityAuditStrategy` is used, and `org.hibernate.envers.audit_strategy_validity_store_revend_timestamp` evaluates to true
|
||||
Only used if the `ValidityAuditStrategy` is used, and `org.hibernate.envers.audit_strategy_validity_store_revend_timestamp` evaluates to true.
|
||||
|
||||
`*org.hibernate.envers.use_revision_entity_with_native_id*` (default: `true` )::
|
||||
Boolean flag that determines the strategy of revision number generation.
|
||||
Default implementation of revision entity uses native identifier generator.
|
||||
+
|
||||
If current database engine does not support identity columns, users are advised to set this property to false.
|
||||
If the current database engine does not support identity columns, users are advised to set this property to false.
|
||||
+
|
||||
In this case revision numbers are created by preconfigured `org.hibernate.id.enhanced.SequenceStyleGenerator`.
|
||||
See: `org.hibernate.envers.DefaultRevisionEntity` and `org.hibernate.envers.enhanced.SequenceIdRevisionEntity`.
|
||||
|
@ -272,7 +272,7 @@ The default implementation creates `REVCHANGES` table that stores entity names o
|
|||
Single record encapsulates the revision identifier (foreign key to `REVINFO` table) and a string value.
|
||||
For more information, refer to <<envers-tracking-modified-entities-revchanges>> and <<envers-tracking-modified-entities-queries>>.
|
||||
|
||||
`*org.hibernate.envers.global_with_modified_flag*` (default: `false`, can be individually overridden with `@Audited( withModifiedFlag=true )` )::
|
||||
`*org.hibernate.envers.global_with_modified_flag*` (default: `false`, can be individually overridden with `@Audited( withModifiedFlag = true )` )::
|
||||
Should property modification flags be stored for all audited entities and all properties.
|
||||
+
|
||||
When set to true, for all properties an additional boolean column in the audit tables will be created, filled with information if the given property changed in the given revision.
|
||||
|
@ -284,7 +284,10 @@ For more information, refer to <<envers-tracking-properties-changes>> and <<enve
|
|||
`*org.hibernate.envers.modified_flag_suffix*` (default: `_MOD` )::
|
||||
The suffix for columns storing "Modified Flags".
|
||||
+
|
||||
For example: a property called "age", will by default get modified flag with column name "age_MOD".
|
||||
For example, a property called "age", will by default get modified flag with column name "age_MOD".
|
||||
|
||||
`*org.hibernate.envers.modified_column_naming_strategy*` (default: `org.hibernate.envers.boot.internal.LegacyModifiedColumnNamingStrategy` )::
|
||||
The naming strategy to be used for modified flag columns in the audit metadata.
|
||||
|
||||
`*org.hibernate.envers.embeddable_set_ordinal_field_name*` (default: `SETORDINAL` )::
|
||||
Name of column used for storing ordinal of the change in sets of embeddable elements.
|
||||
|
@ -298,15 +301,25 @@ Guarantees proper validity audit strategy behavior when application reuses ident
|
|||
`*org.hibernate.envers.original_id_prop_name*` (default: `originalId` )::
|
||||
Specifies the composite-id key property name used by the audit table mappings.
|
||||
|
||||
`*org.hibernate.envers.find_by_revision_exact_match*` (default: `false` )::
|
||||
Specifies whether or not `AuditReader#find` methods which accept a revision-number argument are to find results based on fuzzy-match or exact-match behavior.
|
||||
+
|
||||
The old (legacy) behavior has always been to perform a fuzzy-match where these methods would return a match if any revision existed for the primary-key with a revision-number less-than or equal-to the revision method argument.
|
||||
This behavior is great when you want to find the snapshot of a non-related entity based on another entity's revision number.
|
||||
+
|
||||
The new (optional) behavior when this option is enabled forces the query to perform an exact-match instead.
|
||||
In order for these methods to return a non-`null` value, a revision entry must exist for the entity with the specified primary key and revision number; otherwise the result will be `null`.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
The following configuration options have been added recently and should
|
||||
be regarded as experimental:
|
||||
The following configuration options have been added recently and should be regarded as experimental:
|
||||
|
||||
. `org.hibernate.envers.track_entities_changed_in_revision`
|
||||
. `org.hibernate.envers.using_modified_flag`
|
||||
. `org.hibernate.envers.modified_flag_suffix`
|
||||
. `org.hibernate.envers.modified_column_naming_strategy`
|
||||
. `org.hibernate.envers.original_id_prop_name`
|
||||
. `org.hibernate.envers.find_by_revision_exact_match`
|
||||
====
|
||||
|
||||
[[envers-additional-mappings]]
|
||||
|
@ -336,7 +349,7 @@ you can set the `@AuditOverride( forClass = SomeEntity.class, isAudited = true/f
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
The `@Audited` annotation also features an `auditParents` attribute but it's now deprecated in favor of `@AuditOverride`,
|
||||
The `@Audited` annotation also features an `auditParents` attribute but it's now deprecated in favor of `@AuditOverride`.
|
||||
====
|
||||
|
||||
[[envers-audit-strategy]]
|
||||
|
@ -356,7 +369,7 @@ IMPORTANT: These subqueries are notoriously slow and difficult to index.
|
|||
. The alternative is a validity audit strategy.
|
||||
This strategy stores the start-revision and the end-revision of audit information.
|
||||
For each row inserted, updated or deleted in an audited table, one or more rows are inserted in the audit tables, together with the start revision of its validity.
|
||||
But at the same time the end-revision field of the previous audit rows (if available) are set to this revision.
|
||||
But at the same time, the end-revision field of the previous audit rows (if available) is set to this revision.
|
||||
Queries on the audit information can then use 'between start and end revision' instead of subqueries as used by the default audit strategy.
|
||||
+
|
||||
The consequence of this strategy is that persisting audit information will be a bit slower because of the extra updates involved,
|
||||
|
@ -367,7 +380,7 @@ IMPORTANT: This can be improved even further by adding extra indexes.
|
|||
[[envers-audit-ValidityAuditStrategy]]
|
||||
==== Configuring the `ValidityAuditStrategy`
|
||||
|
||||
To better visualize how the `ValidityAuditStrategy`, consider the following exercise where
|
||||
To better visualize how the `ValidityAuditStrategy` works, consider the following exercise where
|
||||
we replay the previous audit logging example for the `Customer` entity.
|
||||
|
||||
First, you need to configure the `ValidityAuditStrategy`:
|
||||
|
@ -382,7 +395,7 @@ include::{sourcedir}/ValidityStrategyAuditTest.java[tags=envers-audited-validity
|
|||
====
|
||||
|
||||
If, you're using the `persistence.xml` configuration file,
|
||||
then the mapping will looks as follows:
|
||||
then the mapping will look as follows:
|
||||
|
||||
[source, XML, indent=0]
|
||||
----
|
||||
|
@ -403,9 +416,9 @@ include::{extrasdir}/envers-audited-validity-mapping-example.sql[]
|
|||
----
|
||||
====
|
||||
|
||||
As you can see, the `REVEND` column is added as well as its Foreign key to the `REVINFO` table.
|
||||
As you can see, the `REVEND` column is added as well as its foreign key to the `REVINFO` table.
|
||||
|
||||
When rerunning thee previous `Customer` audit log queries against the `ValidityAuditStrategy`,
|
||||
When rerunning the previous `Customer` audit log queries against the `ValidityAuditStrategy`,
|
||||
we get the following results:
|
||||
|
||||
[[envers-audited-validity-rev1-example]]
|
||||
|
@ -419,7 +432,7 @@ include::{extrasdir}/envers-audited-validity-rev1-example.sql[]
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Compared to the default strategy, the `ValidityAuditStrategy` generates simpler queries that can render better execution plans.
|
||||
Compared to the default strategy, the `ValidityAuditStrategy` generates simpler queries that can render better SQL execution plans.
|
||||
====
|
||||
|
||||
[[envers-revisionlog]]
|
||||
|
@ -430,15 +443,27 @@ When Envers starts a new revision, it creates a new revision entity which stores
|
|||
By default, that includes just:
|
||||
|
||||
revision number::
|
||||
An integral value (`int/Integer` or `long/Long`). Essentially the primary key of the revision
|
||||
An integral value (`int/Integer` or `long/Long`). Essentially, the primary key of the revision.
|
||||
+
|
||||
[WARNING]
|
||||
====
|
||||
A revision number value should **always** be increasing and never overflows.
|
||||
|
||||
The default implementations provided by Envers use an `int` data type which has an upper bounds of `Integer.MAX_VALUE`.
|
||||
It is critical that users consider whether this upper bounds is feasible for your application. If a large range is needed, consider using a custom revision entity mapping using a `long` data type.
|
||||
|
||||
In the event that the revision number reaches its upper bounds wrapping around becoming negative, an `AuditException` will be thrown causing the current transaction to be rolled back.
|
||||
This guarantees that the audit history remains in a valid state that can be queried by the Envers Query API.
|
||||
====
|
||||
|
||||
revision timestamp::
|
||||
Either a `long/Long` or `java.util.Date` value representing the instant at which the revision was made.
|
||||
When using a `java.util.Date`, instead of a `long/Long` for the revision timestamp, take care not to store it to a column data type which will loose precision.
|
||||
When using a `java.util.Date`, instead of a `long/Long` for the revision timestamp, take care not to store it to a column data type which will lose precision.
|
||||
|
||||
Envers handles this information as an entity.
|
||||
By default it uses its own internal class to act as the entity, mapped to the `REVINFO` table.
|
||||
You can, however, supply your own approach to collecting this information which might be useful to capture additional details such as who made a change
|
||||
or the ip address from which the request came.
|
||||
or the IP address from which the request came.
|
||||
There are two things you need to make this work:
|
||||
|
||||
. First, you will need to tell Envers about the entity you wish to use.
|
||||
|
@ -448,7 +473,7 @@ You can extend from `org.hibernate.envers.DefaultRevisionEntity`, if you wish, t
|
|||
+
|
||||
Simply add the custom revision entity as you do your normal entities and Envers will *find it*.
|
||||
+
|
||||
NOTE: It is an error for there to be multiple entities marked as `@org.hibernate.envers.RevisionEntity`
|
||||
NOTE: It is an error for there to be multiple entities marked as `@org.hibernate.envers.RevisionEntity`.
|
||||
|
||||
. Second, you need to tell Envers how to create instances of your revision entity which is handled by the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/envers/RevisionListener.html#newRevision-java.lang.Object-[`newRevision( Object revisionEntity )`]
|
||||
|
@ -457,9 +482,9 @@ method of the `org.hibernate.envers.RevisionListener` interface.
|
|||
You tell Envers your custom `org.hibernate.envers.RevisionListener` implementation to use by specifying it on the `@org.hibernate.envers.RevisionEntity` annotation, using the value attribute.
|
||||
If your `RevisionListener` class is inaccessible from `@RevisionEntity` (e.g. it exists in a different module),
|
||||
set `org.hibernate.envers.revision_listener` property to its fully qualified class name.
|
||||
Class name defined by the configuration parameter overrides revision entity's value attribute.
|
||||
Class name defined by the configuration parameter overrides the revision entity's value attribute.
|
||||
|
||||
Considering we have a `CurrentUser` utility which stores the current logged user:
|
||||
Considering we have a `CurrentUser` utility which stores the currently logged user:
|
||||
|
||||
[[envers-revisionlog-CurrentUser-example]]
|
||||
.`CurrentUser` utility
|
||||
|
@ -527,7 +552,7 @@ As demonstrated by the example above, the username is properly set and propagate
|
|||
|
||||
[WARNING]
|
||||
====
|
||||
**This strategy is deprecated since version 5.2. The alternative is to use dependency injection offered as of version 5.3.**
|
||||
**This strategy is deprecated since version 5.2. The alternative is to use dependency injection offered as of version 5.3.**
|
||||
|
||||
An alternative method to using the `org.hibernate.envers.RevisionListener` is to instead call the
|
||||
[line-through]#https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/envers/AuditReader.html#getCurrentRevision-java.lang.Class-boolean-[`getCurrentRevision( Class<T> revisionEntityClass, boolean persist )`]#
|
||||
|
@ -545,7 +570,7 @@ The method accepts a `persist` parameter indicating whether the revision entity
|
|||
As of Hibernate Envers 5.3, dependency injection is now supported for a `RevisionListener`.
|
||||
|
||||
This feature is up to the various dependency frameworks, such as CDI and Spring, to supply the
|
||||
necessary implementation during Hibernate ORM bootstrap to support injection. If no qualifying
|
||||
necessary implementation during Hibernate ORM bootstrap to support injection. If no qualifying
|
||||
implementation is supplied, the `RevisionListener` will be constructed without injection.
|
||||
====
|
||||
|
||||
|
@ -553,7 +578,7 @@ implementation is supplied, the `RevisionListener` will be constructed without i
|
|||
=== Tracking entity names modified during revisions
|
||||
|
||||
By default, entity types that have been changed in each revision are not being tracked.
|
||||
This implies the necessity to query all tables storing audited data in order to retrieve changes made during specified revision.
|
||||
This implies the necessity to query all tables storing audited data in order to retrieve changes made during the specified revision.
|
||||
Envers provides a simple mechanism that creates `REVCHANGES` table which stores entity names of modified persistent objects.
|
||||
Single record encapsulates the revision identifier (foreign key to `REVINFO` table) and a string value.
|
||||
|
||||
|
@ -607,7 +632,7 @@ include::{extrasdir}/envers-tracking-modified-entities-revchanges-after-rename-e
|
|||
Users, that have chosen one of the approaches listed above,
|
||||
can retrieve all entities modified in a specified revision by utilizing API described in <<envers-tracking-modified-entities-queries>>.
|
||||
|
||||
Users are also allowed to implement custom mechanism of tracking modified entity types.
|
||||
Users are also allowed to implement custom mechanisms of tracking modified entity types.
|
||||
In this case, they shall pass their own implementation of `org.hibernate.envers.EntityTrackingRevisionListener`
|
||||
interface as the value of `@org.hibernate.envers.RevisionEntity` annotation.
|
||||
|
||||
|
@ -637,7 +662,7 @@ include::{sourcedir}/EntityTypeChangeAuditTrackingRevisionListenerTest.java[tags
|
|||
The `CustomTrackingRevisionEntity` contains a `@OneToMany` list of `ModifiedTypeRevisionEntity`
|
||||
|
||||
[[envers-tracking-modified-entities-revchanges-EntityType-example]]
|
||||
.The `EntityType` encapsulatets the entity type name before a class name modification
|
||||
.The `EntityType` encapsulates the entity type name before a class name modification
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -645,7 +670,7 @@ include::{sourcedir}/EntityTypeChangeAuditTrackingRevisionListenerTest.java[tags
|
|||
----
|
||||
====
|
||||
|
||||
Now, when fetching the `CustomTrackingRevisionEntity`, you cna get access to the previous entity class name.
|
||||
Now, when fetching the `CustomTrackingRevisionEntity`, you can get access to the previous entity class name.
|
||||
|
||||
[[envers-tracking-modified-entities-revchanges-query-example]]
|
||||
.Getting the `EntityType` through the `CustomTrackingRevisionEntity`
|
||||
|
@ -657,10 +682,10 @@ include::{sourcedir}/EntityTypeChangeAuditTrackingRevisionListenerTest.java[tags
|
|||
====
|
||||
|
||||
[[envers-tracking-properties-changes]]
|
||||
=== Tracking entity changes at property level
|
||||
=== Tracking entity changes at the property level
|
||||
|
||||
By default, the only information stored by Envers are revisions of modified entities.
|
||||
This approach lets user create audit queries based on historical values of entity properties.
|
||||
This approach lets users create audit queries based on historical values of entity properties.
|
||||
Sometimes it is useful to store additional metadata for each revision, when you are interested also in the type of changes, not only about the resulting values.
|
||||
|
||||
The feature described in <<envers-tracking-modified-entities-revchanges>> makes it possible to tell which entities were modified in a given revision.
|
||||
|
@ -668,20 +693,20 @@ The feature described in <<envers-tracking-modified-entities-revchanges>> makes
|
|||
The feature described here takes it one step further.
|
||||
_Modification Flags_ enable Envers to track which properties of audited entities were modified in a given revision.
|
||||
|
||||
Tracking entity changes at property level can be enabled by:
|
||||
Tracking entity changes at the property level can be enabled by:
|
||||
|
||||
. setting `org.hibernate.envers.global_with_modified_flag` configuration property to `true`.
|
||||
This global switch will cause adding modification flags to be stored for all audited properties of all audited entities.
|
||||
|
||||
. using `@Audited( withModifiedFlag=true )` on a property or on an entity.
|
||||
. using `@Audited( withModifiedFlag = true )` on a property or on an entity.
|
||||
|
||||
The trade-off coming with this functionality is an increased size of audit tables and a very little, almost negligible, performance drop during audit writes.
|
||||
This is due to the fact that every tracked property has to have an accompanying boolean column in the schema that stores information about the property modifications.
|
||||
Of course it is Envers job to fill these columns accordingly - no additional work by the developer is required.
|
||||
Of course, it is Enver's job to fill these columns accordingly - no additional work by the developer is required.
|
||||
Because of costs mentioned, it is recommended to enable the feature selectively, when needed with use of the granular configuration means described above.
|
||||
|
||||
[[envers-tracking-properties-changes-mapping-example]]
|
||||
.Mapping for tracking entity changes at property level
|
||||
.Mapping for tracking entity changes at the property level
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -697,7 +722,7 @@ include::{extrasdir}/envers-tracking-properties-changes-mapping-example.sql[]
|
|||
As you can see, every property features a `_MOD` column (e.g. `createdOn_MOD`) in the audit log.
|
||||
|
||||
[[envers-tracking-properties-changes-example]]
|
||||
.Tracking entity changes at property level example
|
||||
.Tracking entity changes at the property level example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -712,6 +737,77 @@ include::{extrasdir}/envers-tracking-properties-changes-example.sql[]
|
|||
|
||||
To see how "Modified Flags" can be utilized, check out the very simple query API that uses them: <<envers-tracking-properties-changes-queries>>.
|
||||
|
||||
[[envers-tracking-properties-changes-strategy]]
|
||||
=== Selecting strategy for tracking property level changes
|
||||
|
||||
By default, Envers uses the `legacy` modified column naming strategy.
|
||||
This strategy is designed to add columns based on the following rule-set:
|
||||
|
||||
. If property is annotated with `@Audited` and the _modifiedColumnName_ attribute is specified, the column will directly be based on the supplied name.
|
||||
. If property is not annotated with `@Audited` or if no _modifiedColumnName_ attribute is given, the column will be named after the java class property, appended with the configured suffix, the default being `_MOD`.
|
||||
|
||||
While this strategy has no performance drawbacks, it does present concerns for users who prefer consistency without verbosity.
|
||||
Lets take the following entity mapping as an example.
|
||||
|
||||
```
|
||||
@Audited(withModifiedFlags = true)
|
||||
@Entity
|
||||
public class Customer {
|
||||
@Id
|
||||
private Integer id;
|
||||
@Column(name = "customer_name")
|
||||
private String name;
|
||||
}
|
||||
```
|
||||
|
||||
This mapping will actually lead to some inconsistent naming between columns, see below for how the model's name will be stored in `customer_name` but the modified column that tracks whether this column changes between revisions is named `name_MOD`.
|
||||
|
||||
```
|
||||
CREATE TABLE Customer_AUD (
|
||||
id bigint not null,
|
||||
REV integer not null,
|
||||
REVTYPE tinyint not null,
|
||||
customer_name varchar(255),
|
||||
name_MOD boolean,
|
||||
primary key(id, REV)
|
||||
)
|
||||
```
|
||||
|
||||
An additional strategy called `improved`, aims to address these inconsistent column naming concerns.
|
||||
This strategy uses the following rule-set:
|
||||
|
||||
. Property is a Basic type (Single Column valued property)
|
||||
.. Use the _modifiedColumnName_ directly if one is supplied on the property mapping
|
||||
.. Otherwise use the resolved ORM column name appended with the modified flag suffix configured value
|
||||
. Property is an Association (to-one mapping) with a Foreign Key using a single column
|
||||
.. Use the _modifiedColumnName_ directly if one is supplied on the property mapping
|
||||
.. Otherwise use the resolved ORM column name appended with the modified flag suffix configured value
|
||||
. Property is an Association (to-one mapping) with a Foreign Key using multiple columns
|
||||
.. Use the _modifiedColumnName_ directly if one is supplied on the property mapping
|
||||
.. Otherwise use the property name appended with the modified flag suffix configured value
|
||||
. Property is an Embeddable
|
||||
.. Use the _modifiedColumnName_ directly if one is supplied on the property mapping
|
||||
.. Otherwise use the property name appended with the modified flag suffix configured value
|
||||
|
||||
While using this strategy, the same `Customer` mapping will generate the following table schema:
|
||||
|
||||
```
|
||||
CREATE TABLE Customer_AUD (
|
||||
id bigint not null,
|
||||
REV integer not null,
|
||||
REVTYPE tinyint not null,
|
||||
customer_name varchar(255),
|
||||
customer_name_MOD boolean,
|
||||
primary key(id, REV)
|
||||
)
|
||||
```
|
||||
|
||||
When already using Envers in conjunction with the modified columns flag feature, it is advised not to enable the new strategy immediately as schema changes would be required.
|
||||
You will need to either migrate your existing schema manually to adhere to the rules above or use the explicit _modifiedColumnName_ attribute on the `@Audited` annotation for existing columns that use the feature.
|
||||
|
||||
To configure a custom strategy implementation or use the improved strategy, the configuration option `org.hibernate.envers.modified_column_naming_strategy` will need to be set.
|
||||
This option can be the fully qualified class name of a `ModifiedColumnNameStrategy` implementation or `legacy` or `improved` for either of the two provided implementations.
|
||||
|
||||
[[envers-queries]]
|
||||
=== Queries
|
||||
|
||||
|
@ -720,19 +816,19 @@ You can think of historic data as having two dimensions:
|
|||
horizontal:: The state of the database at a given revision. Thus, you can query for entities as they were at revision N.
|
||||
vertical:: The revisions, at which entities changed. Hence, you can query for revisions, in which a given entity changed.
|
||||
|
||||
The queries in Envers are similar to Hibernate Criteria queries, so if you are common with them, using Envers queries will be much easier.
|
||||
The queries in Envers are similar to Hibernate Criteria queries, so if you are familiar with them, using Envers queries will be much easier.
|
||||
|
||||
The main limitation of the current queries implementation is that you cannot traverse relations.
|
||||
You can only specify constraints on the ids of the related entities, and only on the "owning" side of the relation.
|
||||
This however will be changed in future releases.
|
||||
This, however, will be changed in future releases.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The queries on the audited data will be in many cases much slower than corresponding queries on "live" data,
|
||||
as, especially for the default audit strategy, they involve correlated subselects.
|
||||
|
||||
Queries are improved both in terms of speed and possibilities, when using the validity audit strategy,
|
||||
which stores both start and end revisions for entities. See <<envers-audit-ValidityAuditStrategy>>.
|
||||
Queries are improved both in terms of speed and possibilities when using the validity audit strategy,
|
||||
which stores both start and end revisions for entities. See <<envers-audit-ValidityAuditStrategy>> for a more detailed discussion.
|
||||
====
|
||||
|
||||
[[entities-at-revision]]
|
||||
|
@ -794,7 +890,7 @@ include::{sourcedir}/QueryAuditTest.java[tags=entities-filtering-by-entity-ident
|
|||
----
|
||||
====
|
||||
|
||||
Apart for strict equality matching, you can also use an `IN` clause to provide multiple entity identifiers:
|
||||
Apart from strict equality matching, you can also use an `IN` clause to provide multiple entity identifiers:
|
||||
|
||||
[[entities-in-clause-filtering-by-entity-identifier-example]]
|
||||
.Getting the `Customer` entities whose `address` identifier matches one of the given entity identifiers
|
||||
|
@ -844,10 +940,10 @@ You can add constraints to this query in the same way as to the previous one.
|
|||
|
||||
There are some additional possibilities:
|
||||
|
||||
. using `AuditEntity.revisionNumber()` you can specify constraints, projections and order on the revision number, in which the audited entity was modified
|
||||
. using `AuditEntity.revisionNumber()` you can specify constraints, projections and order on the revision number, in which the audited entity was modified.
|
||||
|
||||
. similarly, using `AuditEntity.revisionProperty( propertyName )` you can specify constraints, projections and order on a property of the revision entity,
|
||||
corresponding to the revision in which the audited entity was modified
|
||||
corresponding to the revision in which the audited entity was modified.
|
||||
|
||||
. `AuditEntity.revisionType()` gives you access as above to the type of the revision (`ADD`, `MOD`, `DEL`).
|
||||
|
||||
|
@ -862,7 +958,7 @@ include::{sourcedir}/QueryAuditTest.java[tags=revisions-of-entity-query-by-revis
|
|||
|
||||
The second additional feature you can use in queries for revisions is the ability to _maximize_/_minimize_ a property.
|
||||
|
||||
For example, if you want to select the smallest possibler revision at which the value of the `createdOn`
|
||||
For example, if you want to select the smallest possible revision at which the value of the `createdOn`
|
||||
attribute was larger then a given value,
|
||||
you can run the following query:
|
||||
|
||||
|
@ -872,7 +968,7 @@ you can run the following query:
|
|||
include::{sourcedir}/QueryAuditTest.java[tags=revisions-of-entity-query-minimize-example]
|
||||
----
|
||||
|
||||
The `minimize()` and `maximize()` methods return a criteria, to which you can add constraints,
|
||||
The `minimize()` and `maximize()` methods return a criterion, to which you can add constraints,
|
||||
which must be met by the entities with the _maximized_/_minimized_ properties.
|
||||
|
||||
You probably also noticed that there are two boolean parameters, passed when creating the query.
|
||||
|
@ -907,10 +1003,10 @@ In other words, the result set would contain a list of `Customer` instances, one
|
|||
hold the audited property data at the _maximum_ revision number for each `Customer` primary key.
|
||||
|
||||
[[envers-tracking-properties-changes-queries]]
|
||||
=== Querying for revisions of entity that modified a given property
|
||||
=== Querying for entity revisions that modified a given property
|
||||
|
||||
For the two types of queries described above it's possible to use special `Audit` criteria called `hasChanged()` and `hasNotChanged()`
|
||||
that makes use of the functionality described in <<envers-tracking-properties-changes>>.
|
||||
that make use of the functionality described in <<envers-tracking-properties-changes>>.
|
||||
|
||||
Let's have a look at various queries that can benefit from these two criteria.
|
||||
|
||||
|
@ -946,7 +1042,7 @@ Using this query we won't get all other revisions in which `lastName` wasn't tou
|
|||
From the SQL query you can see that the `lastName_MOD` column is being used in the WHERE clause,
|
||||
hence the aforementioned requirement for tracking modification flags.
|
||||
|
||||
Of course, nothing prevents user from combining `hasChanged` condition with some additional criteria.
|
||||
Of course, nothing prevents users from combining `hasChanged` condition with some additional criteria.
|
||||
|
||||
[[envers-tracking-properties-changes-queries-hasChanged-and-hasNotChanged-example]]
|
||||
.Getting all `Customer` revisions for which the `lastName` attribute has changed and the `firstName` attribute has not changed
|
||||
|
@ -998,17 +1094,17 @@ You can now obtain this information easily by using the following query:
|
|||
[source, JAVA, indent=0]
|
||||
----
|
||||
List results = AuditReaderFactory.get( entityManager )
|
||||
.createQuery()
|
||||
.forRevisionsOfEntityWithChanges( Customer.class, false )
|
||||
.add( AuditEntity.id().eq( 1L ) )
|
||||
.getResultList();
|
||||
.createQuery()
|
||||
.forRevisionsOfEntityWithChanges( Customer.class, false )
|
||||
.add( AuditEntity.id().eq( 1L ) )
|
||||
.getResultList();
|
||||
|
||||
for ( Object entry : results ) {
|
||||
final Object[] array = (Object[]) entry;
|
||||
final Set<String> propertiesChanged = (Set<String>) array[3];
|
||||
for ( String propertyName : propertiesChanged ) {
|
||||
final Object[] array = (Object[]) entry;
|
||||
final Set<String> propertiesChanged = (Set<String>) array[3];
|
||||
for ( String propertyName : propertiesChanged ) {
|
||||
/* Do something useful with the modified property `propertyName` */
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
====
|
||||
|
@ -1028,23 +1124,28 @@ This basic query allows retrieving entity names and corresponding Java classes c
|
|||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/EntityTypeChangeAuditTest.java[tags=envers-tracking-modified-entities-queries-example]
|
||||
include::{sourcedir}/EntityTypeChangeAuditTest.java[tags=envers-tracking-modified-entities-queries-example1]
|
||||
----
|
||||
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/EntityTypeChangeAuditTest.java[tags=envers-tracking-modified-entities-queries-example2]
|
||||
----
|
||||
====
|
||||
|
||||
Other queries (also accessible from `org.hibernate.envers.CrossTypeRevisionChangesReader`):
|
||||
|
||||
`List<Object> findEntities( Number )`::
|
||||
`List<Object> findEntities(Number)`::
|
||||
Returns snapshots of all audited entities changed (added, updated and removed) in a given revision.
|
||||
Executes `N+1` SQL queries, where `N` is a number of different entity classes modified within specified revision.
|
||||
Executes `N + 1` SQL queries, where `N` is a number of different entity classes modified within specified revision.
|
||||
|
||||
`List<Object> findEntities( Number, RevisionType )`::
|
||||
`List<Object> findEntities(Number, RevisionType)`::
|
||||
Returns snapshots of all audited entities changed (added, updated or removed) in a given revision filtered by modification type.
|
||||
Executes `N+1` SQL queries, where `N` is a number of different entity classes modified within specified revision.
|
||||
Executes `N + 1` SQL queries, where `N` is a number of different entity classes modified within specified revision.
|
||||
|
||||
`Map<RevisionType, List<Object>> findEntitiesGroupByRevisionType( Number )`::
|
||||
`Map<RevisionType, List<Object>> findEntitiesGroupByRevisionType(Number)`::
|
||||
Returns a map containing lists of entity snapshots grouped by modification operation (e.g. addition, update and removal).
|
||||
Executes `3N+1` SQL queries, where `N` is a number of different entity classes modified within specified revision.
|
||||
Executes `3N + 1` SQL queries, where `N` is a number of different entity classes modified within specified revision.
|
||||
|
||||
[[envers-querying-entity-relation-joins]]
|
||||
=== Querying for entities using entity relation joins
|
||||
|
@ -1059,7 +1160,7 @@ to traverse entity relations through an audit query, you must use the relation t
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Relation joins can be applied to `many-to-one` and `many-to-one` mappings only when using `JoinType.LEFT` or `JoinType.INNER`.
|
||||
Relation joins can be applied to `many-to-one` and `one-to-one` mappings only when using `JoinType.LEFT` or `JoinType.INNER`.
|
||||
====
|
||||
|
||||
The basis for creating an entity relation join query is as follows:
|
||||
|
@ -1084,7 +1185,7 @@ include::{sourcedir}/QueryAuditTest.java[tags=envers-querying-entity-relation-le
|
|||
|
||||
Like any other query, constraints may be added to restrict the results.
|
||||
|
||||
For example, to find a `Customers` entities at a given revision whose addresses are in `România`,
|
||||
For example, to find all `Customer` entities at a given revision whose addresses are in `România`,
|
||||
you can use the following query:
|
||||
|
||||
[[envers-querying-entity-relation-join-restriction]]
|
||||
|
@ -1165,7 +1266,7 @@ include::{extrasdir}/envers-querying-entity-relation-nested-join-multiple-restri
|
|||
|
||||
Lastly, this example illustrates how related entity properties can be compared in a single constraint.
|
||||
|
||||
Assuming, the `Customer` and the `Address` were previously changed as follows:
|
||||
Assuming the `Customer` and the `Address` were previously changed as follows:
|
||||
|
||||
[[envers-querying-entity-relation-nested-join-multiple-restrictions-combined-entities]]
|
||||
.Changing the `Address` to match the `Country` name
|
||||
|
@ -1196,7 +1297,7 @@ include::{extrasdir}/envers-querying-entity-relation-nested-join-multiple-restri
|
|||
[[envers-querying-revision-entities]]
|
||||
=== Querying for revision information without loading entities
|
||||
|
||||
It may sometimes be useful to load information about revisions to find out who performed specific revisions or
|
||||
Sometimes, it may be useful to load information about revisions to find out who performed specific revisions or
|
||||
to know what entity names were modified but the change log about the related audited entities isn't needed.
|
||||
This API allows an efficient way to get the revision information entity log without instantiating the actual
|
||||
entities themselves.
|
||||
|
@ -1213,7 +1314,7 @@ AuditQuery query = getAuditReader().createQuery()
|
|||
This query will return all revision information entities for revisions between 1 and 25 including those which are
|
||||
related to deletions. If deletions are not of interest, you would pass `false` as the second argument.
|
||||
|
||||
Note this this query uses the `DefaultRevisionEntity` class type. The class provided will vary depending on the
|
||||
Note that this query uses the `DefaultRevisionEntity` class type. The class provided will vary depending on the
|
||||
configuration properties used to configure Envers or if you supply your own revision entity. Typically users who
|
||||
will use this API will likely be providing a custom revision entity implementation to obtain custom information
|
||||
being maintained per revision.
|
||||
|
@ -1243,7 +1344,7 @@ To use customized Envers event listeners, the following steps are needed:
|
|||
[NOTE]
|
||||
====
|
||||
The use of `hibernate.listeners.envers.autoRegister` has been deprecated.
|
||||
A new configuration setting `hibernate.envers.autoRegisterListeners` should be used instead.
|
||||
The new `hibernate.envers.autoRegisterListeners` configuration setting should be used instead.
|
||||
====
|
||||
|
||||
[[envers-schema]]
|
||||
|
@ -1255,35 +1356,35 @@ but this can be overridden by specifying a different suffix/prefix in the config
|
|||
|
||||
The audit table contains the following columns:
|
||||
|
||||
id:: `id` of the original entity (this can be more then one column in the case of composite primary keys)
|
||||
id:: `id` of the original entity (this can be more then one column in the case of composite primary keys).
|
||||
revision number:: an integer, which matches to the revision number in the revision entity table.
|
||||
revision type:: The `org.hibernate.envers.RevisionType` enumeration ordinal stating if the change represent an INSERT, UPDATE or DELETE.
|
||||
audited fields:: properties from the original entity being audited
|
||||
revision type:: The `org.hibernate.envers.RevisionType` enumeration ordinal stating if the change represents an INSERT, UPDATE or DELETE.
|
||||
audited fields:: properties from the original entity being audited.
|
||||
|
||||
The primary key of the audit table is the combination of the original id of the entity and the revision number,
|
||||
so there can be at most one historic entry for a given entity instance at a given revision.
|
||||
|
||||
The current entity data is stored in the original table and in the audit table.
|
||||
This is a duplication of data, however as this solution makes the query system much more powerful, and as memory is cheap, hopefully this won't be a major drawback for the users.
|
||||
This is a duplication of data, however, as this solution makes the query system much more powerful, and as memory is cheap, hopefully, this won't be a major drawback for the users.
|
||||
|
||||
A row in the audit table with entity id `ID`, revision `N` and data `D` means: entity with id `ID` has data `D` from revision `N` upwards.
|
||||
A row in the audit table with entity id `ID`, revision `N`, and data `D` means: entity with id `ID` has data `D` from revision `N` upwards.
|
||||
Hence, if we want to find an entity at revision `M`, we have to search for a row in the audit table, which has the revision number smaller or equal to `M`, but as large as possible.
|
||||
If no such row is found, or a row with a "deleted" marker is found, it means that the entity didn't exist at that revision.
|
||||
|
||||
The "revision type" field can currently have three values: `0`, `1` and `2`, which means `ADD`, `MOD` and `DEL`, respectively.
|
||||
The "revision type" field can currently have three values: `0`, `1` and `2`, which means `ADD`, `MOD`, and `DEL`, respectively.
|
||||
A row with a revision of type `DEL` will only contain the id of the entity and no data (all fields `NULL`), as it only serves as a marker saying "this entity was deleted at that revision".
|
||||
|
||||
Additionally, there is a revision entity table which contains the information about the global revision.
|
||||
By default the generated table is named `REVINFO` and contains just two columns: `ID` and `TIMESTAMP`.
|
||||
By default, the generated table is named `REVINFO` and contains just two columns: `ID` and `TIMESTAMP`.
|
||||
A row is inserted into this table on each new revision, that is, on each commit of a transaction, which changes audited data.
|
||||
The name of this table can be configured, the name of its columns as well as adding additional columns can be achieved as discussed in <<envers-revisionlog>>.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
While global revisions are a good way to provide correct auditing of relations, some people have pointed out that this may be a bottleneck in systems, where data is very often modified.
|
||||
While global revisions are a good way to provide correct auditing of relations, some people have pointed out that this may be a bottleneck in systems where data is very often modified.
|
||||
|
||||
One viable solution is to introduce an option to have an entity "locally revisioned", that is revisions would be created for it independently.
|
||||
This woulld not enable correct versioning of relations, but it would work without the `REVINFO` table.
|
||||
This would not enable correct versioning of relations, but it would work without the `REVINFO` table.
|
||||
|
||||
Another possibility is to introduce a notion of "revisioning groups", which would group entities sharing the same revision numbering.
|
||||
Each such group would have to consist of one or more strongly connected components belonging to the entity graph induced by relations between entities.
|
||||
|
@ -1297,7 +1398,7 @@ Your opinions on the subject are very welcome on the forum.
|
|||
If you would like to generate the database schema file with Hibernate,
|
||||
you simply need to use the hbm2ddl too.
|
||||
|
||||
This task will generate the definitions of all entities, both of which are audited by Envers and those which are not.
|
||||
This task will generate the definitions of all entities, both of those which are audited by Envers and those which are not.
|
||||
|
||||
See the <<chapters/schema/Schema.adoc#schema-generation, Schema generation>> chapter for more info.
|
||||
|
||||
|
@ -1323,19 +1424,19 @@ include::{extrasdir}/envers-generateschema-example.sql[]
|
|||
==== What isn't and will not be supported
|
||||
|
||||
Bags are not supported because they can contain non-unique elements.
|
||||
Persisting, a bag of `String`s violates the relational database principle that each table is a set of tuples.
|
||||
Persisting a bag of `String`s violates the relational database principle that each table is a set of tuples.
|
||||
|
||||
In case of bags, however (which require a join table), if there is a duplicate element, the two tuples corresponding to the elements will be the same.
|
||||
Hibernate allows this, however Envers (or more precisely: the database connector) will throw an exception when trying to persist two identical elements because of a unique constraint violation.
|
||||
Although Hibernate allows this, Envers (or more precisely the database connector) will throw an exception when trying to persist two identical elements because of a unique constraint violation.
|
||||
|
||||
There are at least two ways out if you need bag semantics:
|
||||
|
||||
. use an indexed collection, with the `@javax.persistence.OrderColumn` annotation
|
||||
. use an indexed collection, with the `@javax.persistence.OrderColumn` annotation.
|
||||
. provide a unique id for your elements with the `@CollectionId` annotation.
|
||||
|
||||
==== What isn't and _will_ be supported
|
||||
|
||||
. Bag style collections with a `@CollectionId` identifier column (see https://hibernate.atlassian.net/browse/HHH-3950[HHH-3950]).
|
||||
* Bag style collections with a `@CollectionId` identifier column (see https://hibernate.atlassian.net/browse/HHH-3950[HHH-3950]).
|
||||
|
||||
=== `@OneToMany` with `@JoinColumn`
|
||||
|
||||
|
@ -1344,12 +1445,12 @@ Envers, however, has to do this so that when you read the revisions in which the
|
|||
|
||||
To be able to name the additional join table, there is a special annotation: `@AuditJoinTable`, which has similar semantics to JPA `@JoinTable`.
|
||||
|
||||
One special case are relations mapped with `@OneToMany` with `@JoinColumn` on the one side, and `@ManyToOne` and `@JoinColumn( insertable=false, updatable=false`) on the many side.
|
||||
One special case is to have relations mapped with `@OneToMany` with `@JoinColumn` on the one side, and `@ManyToOne` and `@JoinColumn( insertable = false, updatable = false`) on the many side.
|
||||
Such relations are, in fact, bidirectional, but the owning side is the collection.
|
||||
|
||||
To properly audit such relations with Envers, you can use the `@AuditMappedBy` annotation.
|
||||
It enables you to specify the reverse property (using the `mappedBy` element).
|
||||
In case of indexed collections, the index column must also be mapped in the referenced entity (using `@Column( insertable=false, updatable=false )`, and specified using `positionMappedBy`.
|
||||
In case of indexed collections, the index column must also be mapped in the referenced entity (using `@Column( insertable = false, updatable = false )`, and specified using `positionMappedBy`.
|
||||
This annotation will affect only the way Envers works.
|
||||
Please note that the annotation is experimental and may change in the future.
|
||||
|
||||
|
@ -1363,14 +1464,14 @@ Because audit tables tend to grow indefinitely, they can quickly become really l
|
|||
When the audit tables have grown to a certain limit (varying per RDBMS and/or operating system) it makes sense to start using table partitioning.
|
||||
SQL table partitioning offers a lot of advantages including, but certainly not limited to:
|
||||
|
||||
. Improved query performance by selectively moving rows to various partitions (or even purging old rows)
|
||||
. Improved query performance by selectively moving rows to various partitions (or even purging old rows).
|
||||
. Faster data loads, index creation, etc.
|
||||
|
||||
[[envers-partitioning-columns]]
|
||||
=== Suitable columns for audit table partitioning
|
||||
|
||||
Generally, SQL tables must be partitioned on a column that exists within the table.
|
||||
As a rule it makes sense to use either the _end revision_ or the _end revision timestamp_ column for partitioning of audit tables.
|
||||
As a rule, it makes sense to use either the _end revision_ or the _end revision timestamp_ column for partitioning of audit tables.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
|
@ -1418,11 +1519,11 @@ Currently, the salary table contains the following rows for a certain person X:
|
|||
|==================
|
||||
|
||||
The salary for the current fiscal year (2010) is unknown.
|
||||
The agency requires that all changes in registered salaries for a fiscal year are recorded (i.e. an audit trail).
|
||||
The agency requires that all changes in registered salaries for a fiscal year are recorded (i.e., an audit trail).
|
||||
The rationale behind this is that decisions made at a certain date are based on the registered salary at that time.
|
||||
And at any time it must be possible reproduce the reason why a certain decision was made at a certain date.
|
||||
And at any time it must be possible to reproduce the reason why a certain decision was made at a certain date.
|
||||
|
||||
The following audit information is available, sorted on in order of occurrence:
|
||||
The following audit information is available, sorted in order of occurrence:
|
||||
|
||||
.Salaries - audit table
|
||||
[width="100%",cols="20%,20%,20%,20%,20%",options="header",]
|
||||
|
@ -1442,17 +1543,17 @@ The following audit information is available, sorted on in order of occurrence:
|
|||
|
||||
To partition this data, the _level of relevancy_ must be defined. Consider the following:
|
||||
|
||||
. For fiscal year 2006 there is only one revision.
|
||||
. For the fiscal year 2006, there is only one revision.
|
||||
It has the oldest _revision timestamp_ of all audit rows,
|
||||
but should still be regarded as relevant because it's the latest modification for this fiscal year in the salary table (its _end revision timestamp_ is null).
|
||||
+
|
||||
Also, note that it would be very unfortunate if in 2011 there would be an update of the salary for fiscal year 2006 (which is possible in until at least 10 years after the fiscal year),
|
||||
Also, note that it would be very unfortunate if in 2011 there would be an update of the salary for the fiscal year 2006 (which is possible until at least 10 years after the fiscal year),
|
||||
and the audit information would have been moved to a slow disk (based on the age of the __revision timestamp__).
|
||||
Remember that, in this case, Envers will have to update the _end revision timestamp_ of the most recent audit row.
|
||||
. There are two revisions in the salary of fiscal year 2007 which both have nearly the same _revision timestamp_ and a different __end revision timestamp__.
|
||||
. There are two revisions in the salary of the fiscal year 2007 which both have nearly the same _revision timestamp_ and a different __end revision timestamp__.
|
||||
|
||||
On first sight, it is evident that the first revision was a mistake and probably not relevant.
|
||||
The only relevant revision for 2007 is the one with _end revision timestamp_ null.
|
||||
The only relevant revision for 2007 is the one with _end revision timestamp_ value of null.
|
||||
|
||||
Based on the above, it is evident that only the _end revision timestamp_ is suitable for audit table partitioning.
|
||||
The _revision timestamp_ is not suitable.
|
||||
|
@ -1473,15 +1574,15 @@ the audit row will remain in the same partition (the 'extension bucket').
|
|||
|
||||
And sometime in 2011, the last partition (or 'extension bucket') is split into two new partitions:
|
||||
|
||||
. _end revision timestamp_ year = 2010:: This partition contains audit data that is potentially relevant (in 2011).
|
||||
. _end revision timestamp_ year >= 2011 or null:: This partition contains the most interesting audit data and is the new 'extension bucket'.
|
||||
. _end revision timestamp_ year = 2010: This partition contains audit data that is potentially relevant (in 2011).
|
||||
. _end revision timestamp_ year >= 2011 or null: This partition contains the most interesting audit data and is the new 'extension bucket'.
|
||||
|
||||
[[envers-links]]
|
||||
=== Envers links
|
||||
|
||||
. http://hibernate.org[Hibernate main page]
|
||||
. http://community.jboss.org/en/envers?view=discussions[Forum]
|
||||
. http://hibernate.org/community/[Forum]
|
||||
. https://hibernate.atlassian.net/[JIRA issue tracker] (when adding issues concerning Envers, be sure to select the "envers" component!)
|
||||
. https://hibernate.hipchat.com/chat/room/1238636[HipChat channel]
|
||||
. https://hibernate.zulipchat.com/#narrow/stream/132096-hibernate-user[Zulip channel]
|
||||
. https://community.jboss.org/wiki/EnversFAQ[FAQ]
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ include::{sourcedir}/InterceptorTest.java[tags=events-interceptors-example]
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
You can either implement `Interceptor` directly or extend `org.hibernate.EmptyInterceptor`.
|
||||
You can either implement `Interceptor` directly or extend the `org.hibernate.EmptyInterceptor` base class.
|
||||
====
|
||||
|
||||
An Interceptor can be either `Session`-scoped or `SessionFactory`-scoped.
|
||||
|
@ -42,7 +42,7 @@ include::{sourcedir}/InterceptorTest.java[tags=events-interceptors-session-scope
|
|||
|
||||
A `SessionFactory`-scoped interceptor is registered with the `Configuration` object prior to building the `SessionFactory`.
|
||||
Unless a session is opened explicitly specifying the interceptor to use, the `SessionFactory`-scoped interceptor will be applied to all sessions opened from that `SessionFactory`.
|
||||
`SessionFactory`-scoped interceptors must be thread safe.
|
||||
`SessionFactory`-scoped interceptors must be thread-safe.
|
||||
Ensure that you do not store session-specific states since multiple sessions will use this interceptor potentially concurrently.
|
||||
|
||||
[[events-interceptors-session-factory-scope-example]]
|
||||
|
@ -63,12 +63,12 @@ Many methods of the `Session` interface correlate to an event type.
|
|||
The full range of defined event types is declared as enum values on `org.hibernate.event.spi.EventType`.
|
||||
When a request is made of one of these methods, the Session generates an appropriate event and passes it to the configured event listener(s) for that type.
|
||||
|
||||
Applications are free to implement a customization of one of the listener interfaces (i.e., the `LoadEvent` is processed by the registered implementation of the `LoadEventListener` interface), in which case their implementation would
|
||||
be responsible for processing any `load()` requests made of the `Session`.
|
||||
Applications can customize the listener interfaces (i.e., the `LoadEvent` is processed by the registered implementation of the `LoadEventListener` interface), in which case their implementations would
|
||||
be responsible for processing the `load()` requests made of the `Session`.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The listeners should be considered stateless; they are shared between requests, and should not save any state as instance variables.
|
||||
The listeners should be considered stateless. They are shared between requests, and should not save any state as instance variables.
|
||||
====
|
||||
|
||||
A custom listener implements the appropriate interface for the event it wants to process and/or extend one of the convenience base classes
|
||||
|
@ -88,13 +88,13 @@ include::{sourcedir}/ListenerTest.java[tags=events-interceptors-load-listener-ex
|
|||
[[events-mixing-events-and-interceptors]]
|
||||
=== Mixing Events and Interceptors
|
||||
|
||||
When you want to customize the entity state transition behavior, you have to options:
|
||||
When you want to customize the entity state transition behavior, you have two options:
|
||||
|
||||
. you provide a custom `Interceptor`, which is taken into consideration by the default Hibernate event listeners.
|
||||
For example, the `Interceptor#onSave()` method is invoked by Hibernate `AbstractSaveEventListener`.
|
||||
Or, the `Interceptor#onLoad()` is called by the `DefaultPreLoadEventListener`.
|
||||
. you can replace any given default event listener with your own implementation.
|
||||
When doing this, you should probably extend the default listeners because otherwise you'd have to take care of all the low-level entity state transition logic.
|
||||
When doing this, you should probably extend the default listeners because otherwise, you'd have to take care of all the low-level entity state transition logic.
|
||||
For example, if you replace the `DefaultPreLoadEventListener` with your own implementation, then, only if you call the `Interceptor#onLoad()` method explicitly, you can mix the custom load event listener with a custom Hibernate interceptor.
|
||||
|
||||
[[events-declarative-security]]
|
||||
|
@ -140,7 +140,7 @@ JPA also defines a more limited set of callbacks through annotations.
|
|||
|
||||
There are two available approaches defined for specifying callback handling:
|
||||
|
||||
* The first approach is to annotate methods on the entity itself to receive notification of particular entity life cycle event(s).
|
||||
* The first approach is to annotate methods on the entity itself to receive notifications of a particular entity lifecycle event(s).
|
||||
* The second is to use a separate entity listener class.
|
||||
An entity listener is a stateless class with a no-arg constructor.
|
||||
The callback annotations are placed on a method of this class instead of the entity class.
|
||||
|
@ -173,7 +173,7 @@ When that is the case, the defined order of execution is well defined by the JPA
|
|||
* Any default listeners associated with the entity are invoked first, in the order they were specified in the XML. See the `javax.persistence.ExcludeDefaultListeners` annotation.
|
||||
* Next, entity listener class callbacks associated with the entity hierarchy are invoked, in the order they are defined in the `EntityListeners`.
|
||||
If multiple classes in the entity hierarchy define entity listeners, the listeners defined for a superclass are invoked before the listeners defined for its subclasses.
|
||||
See the `javax.persistence.ExcludeSuperclassListener`s annotation.
|
||||
See the ``javax.persistence.ExcludeSuperclassListener``'s annotation.
|
||||
* Lastly, callback methods defined on the entity hierarchy are invoked.
|
||||
If a callback type is annotated on both an entity and one or more of its superclasses without method overriding, both would be called, the most general superclass first.
|
||||
An entity class is also allowed to override a callback method defined in a superclass in which case the super callback would not get invoked; the overriding method would get invoked provided it is annotated.
|
||||
|
@ -185,7 +185,7 @@ The JPA specification allows you to define a default entity listener which is go
|
|||
Default entity listeners can only be defined in XML mapping files.
|
||||
|
||||
[[events-default-listener-mapping-example]]
|
||||
.Default event listner mapping
|
||||
.Default event listener mapping
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -213,7 +213,7 @@ include::{sourcedir}/DefaultEntityListenerTest.java[tags=events-default-listener
|
|||
When persisting a `Person` or `Book` entity, the `createdOn` is going to be set by the `onPersist` method of the `DefaultEntityListener`.
|
||||
|
||||
[[events-default-listener-persist-example]]
|
||||
.Default event listner persist event
|
||||
.Default event listener persist event
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -229,7 +229,7 @@ include::{extrasdir}/events-default-listener-persist-example.sql[]
|
|||
When updating a `Person` or `Book` entity, the `updatedOn` is going to be set by the `onUpdate` method of the `DefaultEntityListener`.
|
||||
|
||||
[[events-default-listener-update-example]]
|
||||
.Default event listner update event
|
||||
.Default event listener update event
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -247,14 +247,14 @@ include::{extrasdir}/events-default-listener-update-example.sql[]
|
|||
|
||||
If you already registered a default entity listener, but you don't want to apply it to a particular entity,
|
||||
you can use the
|
||||
http://docs.oracle.com/javaee/7/api/javax/persistence/ExcludeDefaultListeners.html[`@ExcludeDefaultListeners`] and
|
||||
http://docs.oracle.com/javaee/7/api/javax/persistence/ExcludeSuperclassListeners.html[`@ExcludeSuperclassListeners`] JPA annotations.
|
||||
{jpaJavadocUrlPrefix}ExcludeDefaultListeners.html[`@ExcludeDefaultListeners`] and
|
||||
{jpaJavadocUrlPrefix}ExcludeSuperclassListeners.html[`@ExcludeSuperclassListeners`] JPA annotations.
|
||||
|
||||
`@ExcludeDefaultListeners` instructs the current class to ignore the default entity listeners for the current entity
|
||||
while `@ExcludeSuperclassListeners` is used to ignore the default entity listeners propagated to the `BaseEntity` super-class.
|
||||
|
||||
[[events-exclude-default-listener-mapping-example]]
|
||||
.Exclude default event listner mapping
|
||||
.Exclude default event listener mapping
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -267,7 +267,7 @@ the `createdOn` is not going to be set by the `onPersist` method of the `Default
|
|||
because the `Publisher` entity was marked with the `@ExcludeDefaultListeners` and `@ExcludeSuperclassListeners` annotations.
|
||||
|
||||
[[events-exclude-default-listener-persist-example]]
|
||||
.Excluding default event listner events
|
||||
.Excluding default event listener events
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
|
|
@ -8,7 +8,7 @@ Tuning how an application does fetching is one of the biggest factors in determi
|
|||
Fetching too much data, in terms of width (values/columns) and/or depth (results/rows),
|
||||
adds unnecessary overhead in terms of both JDBC communication and ResultSet processing.
|
||||
Fetching too little data might cause additional fetching to be needed.
|
||||
Tuning how an application fetches data presents a great opportunity to influence the application overall performance.
|
||||
Tuning how an application fetches data presents a great opportunity to influence the overall application performance.
|
||||
|
||||
[[fetching-basics]]
|
||||
=== The basics
|
||||
|
@ -20,14 +20,14 @@ The concept of fetching breaks down into two different questions.
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
"now" is generally termed eager or immediate. "later" is generally termed lazy or delayed.
|
||||
"Now" is generally termed eager or immediate while "later" is generally termed lazy or delayed.
|
||||
====
|
||||
|
||||
There are a number of scopes for defining fetching:
|
||||
|
||||
_static_::
|
||||
Static definition of fetching strategies is done in the mappings.
|
||||
The statically-defined fetch strategies is used in the absence of any dynamically defined strategies
|
||||
The statically-defined fetch strategies are used in the absence of any dynamically defined strategies.
|
||||
SELECT:::
|
||||
Performs a separate SQL select to load the data. This can either be EAGER (the second select is issued immediately) or LAZY (the second select is delayed until the data is needed).
|
||||
This is the strategy generally termed N+1.
|
||||
|
@ -40,13 +40,13 @@ _static_::
|
|||
Performs a separate SQL select to load associated data based on the SQL restriction used to load the owner.
|
||||
Again, this can either be EAGER (the second select is issued immediately) or LAZY (the second select is delayed until the data is needed).
|
||||
_dynamic_ (sometimes referred to as runtime)::
|
||||
Dynamic definition is really use-case centric. There are multiple ways to define dynamic fetching:
|
||||
_fetch profiles_::: defined in mappings, but can be enabled/disabled on the `Session`.
|
||||
HQL/JPQL::: and both Hibernate and JPA Criteria queries have the ability to specify fetching, specific to said query.
|
||||
entity graphs::: Starting in Hibernate 4.2 (JPA 2.1) this is also an option.
|
||||
The dynamic definition is really use-case centric. There are multiple ways to define dynamic fetching:
|
||||
fetch profiles::: defined in mappings, but can be enabled/disabled on the `Session`.
|
||||
HQL / JPQL::: both Hibernate and JPA Criteria queries have the ability to specify fetching, specific to said query.
|
||||
entity graphs::: starting in Hibernate 4.2 (JPA 2.1), this is also an option.
|
||||
|
||||
[[fetching-direct-vs-query]]
|
||||
=== Direct fetching vs entity queries
|
||||
=== Direct fetching vs. entity queries
|
||||
|
||||
To see the difference between direct fetching and entity queries in regard to eagerly fetched associations, consider the following entities:
|
||||
|
||||
|
@ -101,7 +101,7 @@ so Hibernate requires a secondary select to ensure that the EAGER association is
|
|||
[IMPORTANT]
|
||||
====
|
||||
If you forget to JOIN FETCH all EAGER associations, Hibernate is going to issue a secondary select for each and every one of those
|
||||
which, in turn, can lean to N+1 query issues.
|
||||
which, in turn, can lead to N+1 query issues.
|
||||
|
||||
For this reason, you should prefer LAZY associations.
|
||||
====
|
||||
|
@ -122,8 +122,10 @@ include::{sourcedir}/FetchingTest.java[tags=fetching-strategies-domain-model-exa
|
|||
|
||||
[IMPORTANT]
|
||||
====
|
||||
The Hibernate recommendation is to statically mark all associations lazy and to use dynamic fetching strategies for eagerness.
|
||||
This is unfortunately at odds with the JPA specification which defines that all one-to-one and many-to-one associations should be eagerly fetched by default.
|
||||
The Hibernate recommendation is to statically mark all associations lazy and to use dynamic fetching strategies for eagerness.
|
||||
|
||||
This is unfortunately at odds with the JPA specification which defines that all one-to-one and many-to-one associations should be eagerly fetched by default.
|
||||
|
||||
Hibernate, as a JPA provider, honors that default.
|
||||
====
|
||||
|
||||
|
@ -186,8 +188,15 @@ In both cases, this resolves to exactly one database query to get all that infor
|
|||
[[fetching-strategies-dynamic-fetching-entity-graph]]
|
||||
=== Dynamic fetching via JPA entity graph
|
||||
|
||||
JPA 2.1 introduced entity graphs so the application developer has more control over fetch plans.
|
||||
JPA 2.1 introduced ``entity graph`` so the application developer has more control over fetch plans. It has two modes to choose from:
|
||||
|
||||
fetch graph:::
|
||||
In this case, all attributes specified in the entity graph will be treated as FetchType.EAGER, and all attributes not specified will *ALWAYS* be treated as FetchType.LAZY.
|
||||
|
||||
load graph:::
|
||||
In this case, all attributes specified in the entity graph will be treated as FetchType.EAGER, but attributes not specified use their static mapping specification.
|
||||
|
||||
Below is an `fetch graph` dynamic fetching example:
|
||||
[[fetching-strategies-dynamic-fetching-entity-graph-example]]
|
||||
.Fetch graph example
|
||||
====
|
||||
|
@ -204,21 +213,20 @@ include::{sourcedir}/GraphFetchingTest.java[tags=fetching-strategies-dynamic-fet
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Although the JPA standard specifies that you can override an EAGER fetching association at runtime using the `javax.persistence.fetchgraph` hint,
|
||||
currently, Hibernate does not implement this feature, so EAGER associations cannot be fetched lazily.
|
||||
For more info, check out the https://hibernate.atlassian.net/browse/HHH-8776[HHH-8776] Jira issue.
|
||||
|
||||
When executing a JPQL query, if an EAGER association is omitted, Hibernate will issue a secondary select for every association needed to be fetched eagerly,
|
||||
which can lead dto N+1 query issues.
|
||||
which can lead to N+1 query issues.
|
||||
|
||||
For this reason, it's better to use LAZY associations, and only fetch them eagerly on a per-query basis.
|
||||
====
|
||||
|
||||
[[fetching-strategies-dynamic-fetching-entity-subgraph]]
|
||||
==== JPA entity subgraphs
|
||||
An EntityGraph is the root of a "load plan" and must correspond to an EntityType.
|
||||
|
||||
An entity graph specifies which attributes to be fetched, but it limited to a single entity only.
|
||||
To fetch associations from a child entity, you need to use the http://docs.oracle.com/javaee/7/api/javax/persistence/NamedSubgraph.html[`@NamedSubgraph`] annotation.
|
||||
|
||||
[[fetching-strategies-dynamic-fetching-entity-subgraph]]
|
||||
==== JPA (key) subgraphs
|
||||
|
||||
A sub-graph is used to control the fetching of sub-attributes of the AttributeNode it is applied to.
|
||||
It is generally defined via the {jpaJavadocUrlPrefix}NamedSubgraph.html[`@NamedSubgraph`] annotation.
|
||||
|
||||
If we have a `Project` parent entity which has an `employees` child associations,
|
||||
and we'd like to fetch the `department` for the `Employee` child association.
|
||||
|
@ -248,6 +256,125 @@ include::{extrasdir}/fetching-strategies-dynamic-fetching-entity-subgraph-exampl
|
|||
----
|
||||
====
|
||||
|
||||
Specifying a sub-graph is only valid for an attribute (or its "key") whose type is a ManagedType. So
|
||||
while an EntityGraph must correspond to an EntityType, a Subgraph is legal for any ManagedType. An
|
||||
attribute's key is defined as either:
|
||||
|
||||
* For a singular attribute, the attribute's type must be an IdentifiableType and that IdentifiableType must
|
||||
have a composite identifier. The "key sub-graph" is applied to the identifier type. The
|
||||
non-key sub-graph applies to the attribute's value, which must be a ManagedType.
|
||||
* For a plural attribute, the attribute must be a Map and the Map's key value must be a ManagedType.
|
||||
The "key sub-graph" is applied to the Map's key type. In this case, the non-key sub-graph applies
|
||||
to the plural attribute's value/element.
|
||||
|
||||
|
||||
[[fetching-strategies-dynamic-fetching-entity-subgraph-subtype]]
|
||||
==== JPA SubGraph sub-typing
|
||||
|
||||
SubGraphs can also be sub-type specific. Given an attribute whose value is an inheritance hierarchy,
|
||||
we can refer to attributes of a specific sub-type using the forms of sub-graph definition that accept
|
||||
the sub-type Class.
|
||||
|
||||
|
||||
[[fetching-strategies-dynamic-fetching-entity-graph-parsing]]
|
||||
==== Creating and applying JPA graphs from text representations
|
||||
|
||||
Hibernate allows the creation of JPA fetch/load graphs by parsing a textual representation
|
||||
of the graph. Generally speaking, the textual representation of a graph is a comma-separated
|
||||
list of attribute names, optionally including any sub-graph specifications.
|
||||
`org.hibernate.graph.EntityGraphParser` is the starting point for such parsing operations.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Parsing a textual representation of a graph is not (yet) a part of the JPA specification. So the
|
||||
syntax described here is specific to Hibernate. We do hope to eventually make this syntax part of
|
||||
the JPA specification proper.
|
||||
====
|
||||
|
||||
|
||||
.Parsing a simple graph
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/GraphParsingTest.java[tags=fetching-strategies-dynamic-fetching-entity-graph-parsing-example-1]
|
||||
----
|
||||
====
|
||||
|
||||
This example actually functions exactly as <<fetching-strategies-dynamic-fetching-entity-subgraph-example>>,
|
||||
just using a parsed graph rather than a named graph.
|
||||
|
||||
|
||||
The syntax also supports defining "key sub-graphs". To specify a key sub-graph, `.key` is added
|
||||
to the end of the attribute name.
|
||||
|
||||
.Parsing an entity key graph
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/GraphParsingTest.java[tags=fetching-strategies-dynamic-fetching-entity-graph-parsing-key-example-1]
|
||||
----
|
||||
====
|
||||
|
||||
.Parsing a map key graph
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/GraphParsingTest.java[tags=fetching-strategies-dynamic-fetching-entity-graph-parsing-key-example-2]
|
||||
----
|
||||
====
|
||||
|
||||
Parsing can also handle sub-type specific sub-graphs. For example, given an entity hierarchy of
|
||||
`LegalEntity` <- (`Corporation` | `Person` | `NonProfit`) and an attribute named `responsibleParty` whose
|
||||
type is the `LegalEntity` base type we might have:
|
||||
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
responsibleParty(Corporation: ceo)
|
||||
----
|
||||
====
|
||||
|
||||
We can even duplicate the attribute names to apply different sub-type sub-graphs:
|
||||
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
responsibleParty(taxIdNumber), responsibleParty(Corporation: ceo), responsibleParty(NonProfit: sector)
|
||||
----
|
||||
====
|
||||
|
||||
The duplicated attribute names are handled according to the JPA specification which says that duplicate
|
||||
specification of the attribute node results in the originally registered AttributeNode to be re-used
|
||||
effectively merging the 2 AttributeNode specifications together. In other words, the above specification
|
||||
creates a single AttributeNode with 3 distinct SubGraphs. It is functionally the same as calling:
|
||||
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
Class<Invoice> invoiceClass = ...;
|
||||
javax.persistence.EntityGraph<Invoice> invoiceGraph = entityManager.createEntityGraph( invoiceClass );
|
||||
invoiceGraph.addAttributeNode( "responsibleParty" );
|
||||
invoiceGraph.addSubgraph( "responsibleParty" ).addAttributeNode( "taxIdNumber" );
|
||||
invoiceGraph.addSubgraph( "responsibleParty", Corporation.class ).addAttributeNode( "ceo" );
|
||||
invoiceGraph.addSubgraph( "responsibleParty", NonProfit.class ).addAttributeNode( "sector" );
|
||||
----
|
||||
====
|
||||
|
||||
[[fetching-strategies-dynamic-fetching-entity-graph-merging]]
|
||||
==== Combining multiple JPA entity graphs into one
|
||||
|
||||
Multiple entity graphs can be combined into a single "super graph" that acts as a union. Graph from
|
||||
the previous example can also be built by combining separate aspect graphs into one, such as:
|
||||
|
||||
.Combining multiple graphs into one
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/GraphParsingTest.java[tags=fetching-strategies-dynamic-fetching-entity-graph-merging-example]
|
||||
----
|
||||
====
|
||||
|
||||
|
||||
[[fetching-strategies-dynamic-fetching-profile]]
|
||||
=== Dynamic fetching via Hibernate profiles
|
||||
|
||||
|
@ -308,7 +435,7 @@ include::{extrasdir}/fetching-batch-fetching-example.sql[]
|
|||
----
|
||||
====
|
||||
|
||||
As you can see in the example above, there are only two SQL statements used to fetch the `Employee` entities associated to multiple `Department` entities.
|
||||
As you can see in the example above, there are only two SQL statements used to fetch the `Employee` entities associated with multiple `Department` entities.
|
||||
|
||||
[TIP]
|
||||
====
|
||||
|
@ -324,7 +451,7 @@ it allows you to fetch all the required data with a single query.
|
|||
=== The `@Fetch` annotation mapping
|
||||
|
||||
Besides the `FetchType.LAZY` or `FetchType.EAGER` JPA annotations,
|
||||
you can also use the Hibernate-specific `@Fetch` annotation that accepts one of the following `FetchMode(s)`:
|
||||
you can also use the Hibernate-specific `@Fetch` annotation that accepts one of the following ``FetchMode``s:
|
||||
|
||||
SELECT::
|
||||
The association is going to be fetched lazily using a secondary select for each individual entity,
|
||||
|
@ -388,7 +515,7 @@ include::{sourcedir}/FetchModeSubselectTest.java[tags=fetching-strategies-fetch-
|
|||
----
|
||||
====
|
||||
|
||||
Now, we are going to fetch all `Department` entities that match a given filtering criteria
|
||||
Now, we are going to fetch all `Department` entities that match a given filtering predicate
|
||||
and then navigate their `employees` collections.
|
||||
|
||||
Hibernate is going to avoid the N+1 query issue by generating a single SQL statement to initialize all `employees` collections
|
||||
|
@ -467,7 +594,7 @@ The possible values are given by the `https://docs.jboss.org/hibernate/orm/{majo
|
|||
`FALSE`:: Eagerly load it.
|
||||
`EXTRA`:: Prefer extra queries over full collection loading.
|
||||
|
||||
The `TRUE` and `FALSE` values are deprecated since you should be using the JPA http://docs.oracle.com/javaee/7/api/javax/persistence/FetchType.html[`FetchType`] attribute of the <<annotations-jpa-elementcollection>>, <<annotations-jpa-onetomany>>, or <<annotations-jpa-manytomany>> collection.
|
||||
The `TRUE` and `FALSE` values are deprecated since you should be using the JPA {jpaJavadocUrlPrefix}FetchType.html[`FetchType`] attribute of the <<annotations-jpa-elementcollection>>, <<annotations-jpa-onetomany>>, or <<annotations-jpa-manytomany>> collection.
|
||||
|
||||
The `EXTRA` value has no equivalent in the JPA specification, and it's used to avoid loading the entire collection even when the collection is accessed for the first time.
|
||||
Each element is fetched individually using a secondary query.
|
||||
|
@ -487,7 +614,7 @@ include::{sourcedir}/LazyCollectionTest.java[tags=fetching-LazyCollection-domain
|
|||
either List(s) that are annotated with @OrderColumn or Map(s).
|
||||
|
||||
For bags (e.g. regular List(s) of entities that do not preserve any certain ordering),
|
||||
the @LazyCollection(LazyCollectionOption.EXTRA)` behaves like any other `FetchType.LAZY` collection
|
||||
the `@LazyCollection(LazyCollectionOption.EXTRA)` behaves like any other `FetchType.LAZY` collection
|
||||
(the collection is fetched entirely upon being accessed for the first time).
|
||||
====
|
||||
|
||||
|
@ -523,5 +650,5 @@ include::{extrasdir}/fetching-LazyCollection-select-example.sql[]
|
|||
====
|
||||
Therefore, the child entities were fetched one after the other without triggering a full collection initialization.
|
||||
|
||||
For this reason, caution is advised because `LazyCollectionOption.EXTRA` lazy collections are prone to N+1 query issues.
|
||||
====
|
||||
For this reason, caution is advised since accessing all elements using `LazyCollectionOption.EXTRA` can lead to N+1 query issues.
|
||||
====
|
||||
|
|
|
@ -7,7 +7,7 @@ Flushing is the process of synchronizing the state of the persistence context wi
|
|||
The `EntityManager` and the Hibernate `Session` expose a set of methods, through which the application developer can change the persistent state of an entity.
|
||||
|
||||
The persistence context acts as a transactional write-behind cache, queuing any entity state change.
|
||||
Like any write-behind cache, changes are first applied in-memory and synchronized with the database during flush time.
|
||||
Like any write-behind cache, changes are first applied in-memory and synchronized with the database during the flush time.
|
||||
The flush operation takes every entity state change and translates it to an `INSERT`, `UPDATE` or `DELETE` statement.
|
||||
|
||||
[NOTE]
|
||||
|
@ -17,11 +17,11 @@ See the <<chapters/batch/Batching.adoc#batch,Batching chapter>> for more informa
|
|||
====
|
||||
|
||||
The flushing strategy is given by the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/Session.html#getFlushMode--[`flushMode`] of the current running Hibernate `Session`.
|
||||
Although JPA defines only two flushing strategies (https://javaee.github.io/javaee-spec/javadocs/javax/persistence/FlushModeType.html#AUTO[`AUTO`] and https://javaee.github.io/javaee-spec/javadocs/javax/persistence/FlushModeType.html#COMMIT[`COMMIT`]),
|
||||
Although JPA defines only two flushing strategies ({jpaJavadocUrlPrefix}FlushModeType.html#AUTO[`AUTO`] and {jpaJavadocUrlPrefix}FlushModeType.html#COMMIT[`COMMIT`]),
|
||||
Hibernate has a much broader spectrum of flush types:
|
||||
|
||||
ALWAYS:: Flushes the `Session` before every query.
|
||||
AUTO:: This is the default mode and it flushes the `Session` only if necessary.
|
||||
AUTO:: This is the default mode, and it flushes the `Session` only if necessary.
|
||||
COMMIT:: The `Session` tries to delay the flush until the current `Transaction` is committed, although it might flush prematurely too.
|
||||
MANUAL:: The `Session` flushing is delegated to the application, which must call `Session.flush()` explicitly in order to apply the persistence context changes.
|
||||
|
||||
|
@ -36,7 +36,7 @@ By default, Hibernate uses the `AUTO` flush mode which triggers a flush in the f
|
|||
|
||||
==== `AUTO` flush on commit
|
||||
|
||||
In the following example, an entity is persisted and then the transaction is committed.
|
||||
In the following example, an entity is persisted, and then the transaction is committed.
|
||||
|
||||
[[flushing-auto-flush-commit-example]]
|
||||
.Automatic flushing on commit
|
||||
|
@ -58,7 +58,7 @@ Hibernate logs the message prior to inserting the entity because the flush only
|
|||
====
|
||||
This is valid for the `SEQUENCE` and `TABLE` identifier generators.
|
||||
The `IDENTITY` generator must execute the insert right after calling `persist()`.
|
||||
For details, see the discussion of generators in <<chapters/domain/identifiers.adoc#identifiers,_Identifier generators_>>.
|
||||
For more details, see the discussion of generators in <<chapters/domain/identifiers.adoc#identifiers,_Identifier generators_>>.
|
||||
====
|
||||
|
||||
==== `AUTO` flush on JPQL/HQL query
|
||||
|
@ -79,7 +79,7 @@ include::{extrasdir}/flushing-auto-flush-jpql-example.sql[]
|
|||
----
|
||||
====
|
||||
|
||||
The reason why the `Advertisement` entity query didn't trigger a flush is because there's no overlapping between the `Advertisement` and the `Person` tables:
|
||||
The reason why the `Advertisement` entity query didn't trigger a flush is that there's no overlapping between the `Advertisement` and the `Person` tables:
|
||||
|
||||
[[flushing-auto-flush-jpql-entity-example]]
|
||||
.Automatic flushing on JPQL/HQL entities
|
||||
|
@ -106,7 +106,7 @@ include::{extrasdir}/flushing-auto-flush-jpql-overlap-example.sql[]
|
|||
----
|
||||
====
|
||||
|
||||
This time, the flush was triggered by a JPQL query because the pending entity persist action overlaps with the query being executed.
|
||||
This time, the flush was triggered by a JPQL query because the pending entity persisting action overlaps with the query being executed.
|
||||
|
||||
==== `AUTO` flush on native SQL query
|
||||
|
||||
|
@ -173,7 +173,7 @@ include::{extrasdir}/flushing-commit-flush-jpql-example.sql[]
|
|||
Because the JPA doesn't impose a strict rule on delaying flushing, when executing a native SQL query, the persistence context is going to be flushed.
|
||||
|
||||
[[flushing-commit-flush-sql-example]]
|
||||
.`COMMIT` flushing on SQL
|
||||
.`COMMIT` flushing on native SQL
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -197,7 +197,7 @@ The `ALWAYS` is only available with the native `Session` API.
|
|||
The `ALWAYS` flush mode triggers a persistence context flush even when executing a native SQL query against the `Session` API.
|
||||
|
||||
[[flushing-always-flush-sql-example]]
|
||||
.`COMMIT` flushing on SQL
|
||||
.`COMMIT` flushing on native SQL
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -214,7 +214,7 @@ include::{extrasdir}/flushing-always-flush-sql-example.sql[]
|
|||
=== `MANUAL` flush
|
||||
|
||||
Both the `EntityManager` and the Hibernate `Session` define a `flush()` method that, when called, triggers a manual flush.
|
||||
Hibernate also defines a `MANUAL` flush mode so the persistence context can only be flushed manually.
|
||||
Hibernate also provides a `MANUAL` flush mode so the persistence context can only be flushed manually.
|
||||
|
||||
[[flushing-manual-flush-example]]
|
||||
.`MANUAL` flushing
|
||||
|
@ -230,18 +230,18 @@ include::{extrasdir}/flushing-manual-flush-example.sql[]
|
|||
----
|
||||
====
|
||||
|
||||
The `INSERT` statement was not executed because the persistence context because there was no manual `flush()` call.
|
||||
The `INSERT` statement was not executed because there was no manual `flush()` call.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
This mode is useful when using multi-request logical transactions and only the last request should flush the persistence context.
|
||||
The `MANUAL` flush mode is useful when using multi-request logical transactions, and only the last request should flush the persistence context.
|
||||
====
|
||||
|
||||
[[flushing-order]]
|
||||
=== Flush operation order
|
||||
|
||||
From a database perspective, a row state can be altered using either an `INSERT`, an `UPDATE` or a `DELETE` statement.
|
||||
Because entity state changes are automatically converted to SQL statements, it's important to know which entity actions are associated to a given SQL statement.
|
||||
Because entity state changes are automatically converted to SQL statements, it's important to know which entity actions are associated with a given SQL statement.
|
||||
|
||||
`INSERT`:: The `INSERT` statement is generated either by the `EntityInsertAction` or `EntityIdentityInsertAction`. These actions are scheduled by the `persist` operation, either explicitly or through cascading the `PersistEvent` from a parent to a child entity.
|
||||
`DELETE`:: The `DELETE` statement is generated by the `EntityDeleteAction` or `OrphanRemovalAction`.
|
||||
|
@ -277,6 +277,7 @@ The `ActionQueue` executes all operations in the following order:
|
|||
. `OrphanRemovalAction`
|
||||
. `EntityInsertAction` or `EntityIdentityInsertAction`
|
||||
. `EntityUpdateAction`
|
||||
. `QueuedOperationCollectionAction`
|
||||
. `CollectionRemoveAction`
|
||||
. `CollectionUpdateAction`
|
||||
. `CollectionRecreateAction`
|
||||
|
|
|
@ -28,7 +28,7 @@ Hibernate will internally determine which `ConnectionProvider` to use based on t
|
|||
|
||||
Hibernate can integrate with a `javax.sql.DataSource` for obtaining JDBC Connections.
|
||||
Applications would tell Hibernate about the `DataSource` via the (required) `hibernate.connection.datasource` setting which can either specify a JNDI name or would reference the actual `DataSource` instance.
|
||||
For cases where a JNDI name is given, be sure to read <<chapters/jndi/JNDI.adoc#jndi,JNDI>>
|
||||
For cases where a JNDI name is given, be sure to read <<chapters/jndi/JNDI.adoc#jndi,JNDI>>.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
|
@ -39,27 +39,32 @@ The `DataSource` `ConnectionProvider` also (optionally) accepts the `hibernate.c
|
|||
If specified, the https://docs.oracle.com/javase/8/docs/api/javax/sql/DataSource.html#getConnection-java.lang.String-java.lang.String-[`DataSource#getConnection(String username, String password)`] will be used.
|
||||
Otherwise, the no-arg form is used.
|
||||
|
||||
[[database-connectionprovider-driver]]
|
||||
=== Driver Configuration
|
||||
`hibernate.connection.driver_class`:: The name of the JDBC Driver class to use
|
||||
`hibernate.connection.url`:: The JDBC connection url
|
||||
`hibernate.connection.*`:: All such setting names (except the <<appendices/Configurations.adoc#configurations-database-connection,predefined ones>>) will have the `hibernate.connection.` prefix stripped. The remaining name and the original value will be passed to the driver as a JDBC connection property
|
||||
|
||||
NOTE: Not all properties apply to all situations. For example, if you are providing a data source, `hibernate.connection.driver_class` setting will not be used.
|
||||
|
||||
[[database-connectionprovider-c3p0]]
|
||||
=== Using c3p0
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
To use this integration, the application must include the hibernate-c3p0 module jar (as well as its dependencies) on the classpath.
|
||||
To use the c3p0 integration, the application must include the `hibernate-c3p0` module jar (as well as its dependencies) on the classpath.
|
||||
====
|
||||
|
||||
Hibernate also provides support for applications to use http://www.mchange.com/projects/c3p0/[c3p0] connection pooling.
|
||||
When using this c3p0 support, a number of additional configuration settings are recognized.
|
||||
When c3p0 support is enabled, a number of c3p0-specific configuration settings are recognized in addition to the general ones described in <<database-connectionprovider-driver>>.
|
||||
|
||||
Transaction isolation of the Connections is managed by the `ConnectionProvider` itself. See <<database-connectionprovider-isolation>>.
|
||||
|
||||
`hibernate.connection.driver_class`:: The name of the JDBC Driver class to use
|
||||
`hibernate.connection.url`:: The JDBC connection url.
|
||||
Any settings prefixed with `hibernate.connection.` (other than the "special ones"):: These all have the `hibernate.connection.` prefix stripped and the rest will be passed as JDBC connection properties
|
||||
`hibernate.c3p0.min_size` or `c3p0.minPoolSize`:: The minimum size of the c3p0 pool. See http://www.mchange.com/projects/c3p0/#minPoolSize[c3p0 minPoolSize]
|
||||
`hibernate.c3p0.max_size` or `c3p0.maxPoolSize`:: The maximum size of the c3p0 pool. See http://www.mchange.com/projects/c3p0/#maxPoolSize[c3p0 maxPoolSize]
|
||||
`hibernate.c3p0.timeout` or `c3p0.maxIdleTime`:: The Connection idle time. See http://www.mchange.com/projects/c3p0/#maxIdleTime[c3p0 maxIdleTime]
|
||||
`hibernate.c3p0.max_statements` or `c3p0.maxStatements`:: Controls the c3p0 PreparedStatement cache size (if using). See http://www.mchange.com/projects/c3p0/#maxStatements[c3p0 maxStatements]
|
||||
`hibernate.c3p0.acquire_increment` or `c3p0.acquireIncrement`:: Number of connections c3p0 should acquire at a time when pool is exhausted. See http://www.mchange.com/projects/c3p0/#acquireIncrement[c3p0 acquireIncrement]
|
||||
`hibernate.c3p0.acquire_increment` or `c3p0.acquireIncrement`:: Number of connections c3p0 should acquire at a time when the pool is exhausted. See http://www.mchange.com/projects/c3p0/#acquireIncrement[c3p0 acquireIncrement]
|
||||
`hibernate.c3p0.idle_test_period` or `c3p0.idleConnectionTestPeriod`:: Idle time before a c3p0 pooled connection is validated. See http://www.mchange.com/projects/c3p0/#idleConnectionTestPeriod[c3p0 idleConnectionTestPeriod]
|
||||
`hibernate.c3p0.initialPoolSize`:: The initial c3p0 pool size. If not specified, default is to use the min pool size. See http://www.mchange.com/projects/c3p0/#initialPoolSize[c3p0 initialPoolSize]
|
||||
Any other settings prefixed with `hibernate.c3p0.`:: Will have the `hibernate.` portion stripped and be passed to c3p0.
|
||||
|
@ -70,7 +75,7 @@ Any other settings prefixed with `c3p0.`:: Get passed to c3p0 as is. See http://
|
|||
|
||||
[IMPORTANT]
|
||||
====
|
||||
To use this integration, the application must include the hibernate-proxool module jar (as well as its dependencies) on the classpath.
|
||||
To use the Proxool integration, the application must include the `hibernate-proxool` module jar (as well as its dependencies) on the classpath.
|
||||
====
|
||||
|
||||
Hibernate also provides support for applications to use http://proxool.sourceforge.net/[Proxool] connection pooling.
|
||||
|
@ -102,10 +107,10 @@ See http://proxool.sourceforge.net/configure.html[proxool configuration].
|
|||
|
||||
[IMPORTANT]
|
||||
====
|
||||
To use this integration, the application must include the hibernate-hikari module jar (as well as its dependencies) on the classpath.
|
||||
To use the HikariCP this integration, the application must include the `hibernate-hikari` module jar (as well as its dependencies) on the classpath.
|
||||
====
|
||||
|
||||
Hibernate also provides support for applications to use http://brettwooldridge.github.io/HikariCP/[Hikari] connection pool.
|
||||
Hibernate also provides support for applications to use https://github.com/brettwooldridge/HikariCP/[HikariCP] connection pool.
|
||||
|
||||
Set all of your Hikari settings in Hibernate prefixed by `hibernate.hikari.` and this `ConnectionProvider` will pick them up and pass them along to Hikari.
|
||||
Additionally, this `ConnectionProvider` will pick up the following Hibernate-specific properties and map them to the corresponding Hikari ones (any `hibernate.hikari.` prefixed ones have precedence):
|
||||
|
@ -123,7 +128,7 @@ Note that Hikari only supports JDBC standard isolation levels (apparently).
|
|||
|
||||
[IMPORTANT]
|
||||
====
|
||||
To use this integration, the application must include the hibernate-vibur module jar (as well as its dependencies) on the classpath.
|
||||
To use the Vibur DBCP integration, the application must include the `hibernate-vibur` module jar (as well as its dependencies) on the classpath.
|
||||
====
|
||||
|
||||
Hibernate also provides support for applications to use http://www.vibur.org/[Vibur DBCP] connection pool.
|
||||
|
@ -143,7 +148,7 @@ Additionally, this `ConnectionProvider` will pick up the following Hibernate-spe
|
|||
|
||||
[IMPORTANT]
|
||||
====
|
||||
To use this integration, the application must include the hibernate-agroal module jar (as well as its dependencies) on the classpath.
|
||||
To use the Agroal integration, the application must include the `hibernate-agroal` module jar (as well as its dependencies) on the classpath.
|
||||
====
|
||||
|
||||
Hibernate also provides support for applications to use http://agroal.github.io/[Agroal] connection pool.
|
||||
|
@ -180,13 +185,69 @@ This usage is discouraged and not discussed here.
|
|||
All of the provided ConnectionProvider implementations, other than `DataSourceConnectionProvider`, support consistent setting of transaction isolation for all `Connections` obtained from the underlying pool.
|
||||
The value for `hibernate.connection.isolation` can be specified in one of 3 formats:
|
||||
|
||||
* the integer value accepted at the JDBC level
|
||||
* the integer value accepted at the JDBC level.
|
||||
* the name of the `java.sql.Connection` constant field representing the isolation you would like to use.
|
||||
For example, `TRANSACTION_REPEATABLE_READ` for https://docs.oracle.com/javase/8/docs/api/java/sql/Connection.html#TRANSACTION_REPEATABLE_READ[`java.sql.Connection#TRANSACTION_REPEATABLE_READ`].
|
||||
Not that this is only supported for JDBC standard isolation levels, not for isolation levels specific to a particular JDBC driver.
|
||||
* a short-name version of the java.sql.Connection constant field without the `TRANSACTION_` prefix. For example, `REPEATABLE_READ` for https://docs.oracle.com/javase/8/docs/api/java/sql/Connection.html#TRANSACTION_REPEATABLE_READ[`java.sql.Connection#TRANSACTION_REPEATABLE_READ`].
|
||||
Again, this is only supported for JDBC standard isolation levels, not for isolation levels specific to a particular JDBC driver.
|
||||
|
||||
[[database-connection-handling]]
|
||||
=== Connection handling
|
||||
|
||||
The connection handling mode is defined by the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/resource/jdbc/spi/PhysicalConnectionHandlingMode.html[`PhysicalConnectionHandlingMode`] enumeration which provides the following strategies:
|
||||
|
||||
`IMMEDIATE_ACQUISITION_AND_HOLD`::
|
||||
The `Connection` will be acquired as soon as the `Session` is opened and held until the `Session` is closed.
|
||||
`DELAYED_ACQUISITION_AND_HOLD`::
|
||||
The `Connection` will be acquired as soon as it is needed and then held until the `Session` is closed.
|
||||
`DELAYED_ACQUISITION_AND_RELEASE_AFTER_STATEMENT`::
|
||||
The `Connection` will be acquired as soon as it is needed and will be released after each statement is executed.
|
||||
`DELAYED_ACQUISITION_AND_RELEASE_AFTER_TRANSACTION`::
|
||||
The `Connection` will be acquired as soon as it is needed and will be released after each transaction is completed.
|
||||
|
||||
If you don't want to use the default connection handling mode, you can specify a connection handling mode via the `hibernate.connection.handling_mode` configuration property. For more details, check out the
|
||||
<<appendices/Configurations.adoc#configurations-database-connection,Database connection properties>> section.
|
||||
|
||||
==== Transaction type and connection handling
|
||||
|
||||
By default, the connection handling mode is given by the underlying transaction coordinator. There are two types of transactions: `RESOURCE_LOCAL` (which involves a single database `Connection` and the transaction is controlled via the `commit` and `rollback` `Connection` methods) and `JTA` (which may involve multiple resources including database connections, JMS queues, etc).
|
||||
|
||||
===== RESOURCE_LOCAL transaction connection handling
|
||||
|
||||
For `RESOURCE_LOCAL` transactions, the connection handling mode is `DELAYED_ACQUISITION_AND_RELEASE_AFTER_TRANSACTION` meaning that the database connection is acquired when needed and released after the current running transaction is either committed or rolled back.
|
||||
|
||||
However, because Hibernate needs to make sure that the default autocommit mode is disabled on the JDBC `Connection`
|
||||
when starting a new transaction, the `Connection` is acquired and the autocommit mode is set to `false`.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
If you are using a connection pool `DataSource` that already disabled the autocommit mode for every pooled `Connection`, you should set the `hibernate.connection.provider_disables_autocommit` to `true` and the database connection acquisition will be, indeed, delayed until Hibernate needs to execute the first SQL statement.
|
||||
====
|
||||
|
||||
===== JTA transaction connection handling
|
||||
|
||||
For `JTA` transactions, the connection handling mode is `DELAYED_ACQUISITION_AND_RELEASE_AFTER_STATEMENT` meaning that the database connection is acquired when needed and released after each statement execution.
|
||||
|
||||
The reason for releasing the database connection after statement execution is because some Java EE application servers
|
||||
report a connection leak when a method call goes from one EJB to another. However, even if the JDBC `Connection` is released to the pool, the `Connection` is still allocated to the current executing `Thread`, hence when executing a subsequent statement in the current running transaction, the same `Connection` object reference will be obtained from the pool.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
If the Java EE application server or JTA transaction manager supports switching from one EJB to another while the transaction gets propagated from the outer EJB to the inner one,
|
||||
and no connection leak false positive is being reported, then you should consider switching to `DELAYED_ACQUISITION_AND_RELEASE_AFTER_TRANSACTION` via the `hibernate.connection.handling_mode` configuration property.
|
||||
====
|
||||
|
||||
==== User-provided connections
|
||||
|
||||
If the current `Session` was created using the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/SessionBuilder.html[`SessionBuilder`] and a JDBC `Connection` was provided via the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/SessionBuilder.html#connection-java.sql.Connection-[`SessionBuilder#connection`] method, then the user-provided `Connection` is going to be used, and
|
||||
the connection handling mode will be `IMMEDIATE_ACQUISITION_AND_HOLD`.
|
||||
|
||||
Therefore for user-provided connection, the connection is acquired right away and held until the current `Session` is closed, without being influenced by the JPA or Hibernate transaction context.
|
||||
|
||||
[[database-dialect]]
|
||||
=== Database Dialect
|
||||
|
||||
|
@ -194,7 +255,7 @@ Although SQL is relatively standardized, each database vendor uses a subset and
|
|||
This is referred to as the database's dialect.
|
||||
Hibernate handles variations across these dialects through its `org.hibernate.dialect.Dialect` class and the various subclasses for each database vendor.
|
||||
|
||||
In most cases Hibernate will be able to determine the proper Dialect to use by asking some questions of the JDBC Connection during bootstrap.
|
||||
In most cases, Hibernate will be able to determine the proper Dialect to use by asking some questions of the JDBC Connection during bootstrap.
|
||||
For information on Hibernate's ability to determine the proper Dialect to use (and your ability to influence that resolution), see <<chapters/portability/Portability.adoc#portability-dialectresolver,Dialect resolution>>.
|
||||
|
||||
If for some reason it is not able to determine the proper one or you want to use a custom Dialect, you will need to set the `hibernate.dialect` setting.
|
||||
|
@ -203,20 +264,24 @@ If for some reason it is not able to determine the proper one or you want to use
|
|||
[width="100%",cols="28%,72%",options="header",]
|
||||
|=======================================================================
|
||||
|Dialect (short name) |Remarks
|
||||
|Cache71 |Support for the Caché database, version 2007.1
|
||||
|Cache71 |Support for the Caché database, version 2007.1.
|
||||
|CockroachDB192 |Support for the CockroachDB database version 19.2.
|
||||
|CockroachDB201 |Support for the CockroachDB database version 20.1.
|
||||
|CUBRID |Support for the CUBRID database, version 8.3. May work with later versions.
|
||||
|DB2 |Support for the DB2 database, version 8.2.
|
||||
|DB297 |Support for the DB2 database, version 9.7.
|
||||
|DB2390 |Support for DB2 Universal Database for OS/390, also known as DB2/390.
|
||||
|DB2400 |Support for DB2 Universal Database for iSeries, also known as DB2/400.
|
||||
|DB2400V7R3 |Support for DB2 Universal Database for i, also known as DB2/400, version 7.3
|
||||
|DerbyTenFive |Support for the Derby database, version 10.5
|
||||
|DerbyTenSix |Support for the Derby database, version 10.6
|
||||
|DerbyTenSeven |Support for the Derby database, version 10.7
|
||||
|Firebird |Support for the Firebird database
|
||||
|FrontBase |Support for the Frontbase database
|
||||
|H2 |Support for the H2 database
|
||||
|HANAColumnStore |Support for the SAP HANA database column store. This is the recommended dialect for the SAP HANA database.
|
||||
|HANARowStore |Support for the SAP HANA database row store
|
||||
|HANACloudColumnStore |Support for the SAP HANA Cloud database column store.
|
||||
|HANAColumnStore |Support for the SAP HANA database column store, version 2.x. This is the recommended dialect for the SAP HANA database. May work with SAP HANA, version 1.x
|
||||
|HANARowStore |Support for the SAP HANA database row store, version 2.x. May work with SAP HANA, version 1.x
|
||||
|HSQL |Support for the HSQL (HyperSQL) database
|
||||
|Informix |Support for the Informix database
|
||||
|Ingres |Support for the Ingres database, version 9.2
|
||||
|
@ -229,8 +294,8 @@ If for some reason it is not able to determine the proper one or you want to use
|
|||
|MySQL5 |Support for the MySQL database, version 5.x
|
||||
|MySQL5InnoDB |Support for the MySQL database, version 5.x preferring the InnoDB storage engine when exporting tables.
|
||||
|MySQL57InnoDB |Support for the MySQL database, version 5.7 preferring the InnoDB storage engine when exporting tables. May work with newer versions
|
||||
|MariaDB |Support for the MariadB database. May work with newer versions
|
||||
|MariaDB53 |Support for the MariadB database, version 5.3 and newer.
|
||||
|MariaDB |Support for the MariaDB database. May work with newer versions
|
||||
|MariaDB53 |Support for the MariaDB database, version 5.3 and newer.
|
||||
|Oracle8i |Support for the Oracle database, version 8i
|
||||
|Oracle9i |Support for the Oracle database, version 9i
|
||||
|Oracle10g |Support for the Oracle database, version 10g
|
||||
|
|
|
@ -10,9 +10,9 @@ Generally, it does this when the application:
|
|||
* is using JTA transactions and the `JtaPlatform` needs to do JNDI lookups for `TransactionManager`, `UserTransaction`, etc
|
||||
|
||||
All of these JNDI calls route through a single service whose role is `org.hibernate.engine.jndi.spi.JndiService`.
|
||||
The standard `JndiService` accepts a number of configuration settings
|
||||
The standard `JndiService` accepts a number of configuration settings:
|
||||
|
||||
`hibernate.jndi.class`:: names the javax.naming.InitialContext implementation class to use. See https://docs.oracle.com/javase/8/docs/api/javax/naming/Context.html#INITIAL_CONTEXT_FACTORY[`javax.naming.Context#INITIAL_CONTEXT_FACTORY`]
|
||||
`hibernate.jndi.class`:: names the `javax.naming.InitialContext` implementation class to use. See https://docs.oracle.com/javase/8/docs/api/javax/naming/Context.html#INITIAL_CONTEXT_FACTORY[`javax.naming.Context#INITIAL_CONTEXT_FACTORY`]
|
||||
`hibernate.jndi.url`:: names the JNDI InitialContext connection url. See https://docs.oracle.com/javase/8/docs/api/javax/naming/Context.html#PROVIDER_URL[`javax.naming.Context.PROVIDER_URL`]
|
||||
|
||||
Any other settings prefixed with `hibernate.jndi.` will be collected and passed along to the JNDI provider.
|
||||
|
|
|
@ -100,7 +100,7 @@ If the version number is generated by the database, such as a trigger, use the a
|
|||
[[locking-optimistic-timestamp]]
|
||||
===== Timestamp
|
||||
|
||||
Timestamps are a less reliable way of optimistic locking than version numbers, but can be used by applications for other purposes as well.
|
||||
Timestamps are a less reliable way of optimistic locking than version numbers but can be used by applications for other purposes as well.
|
||||
Timestamping is automatically used if you the `@Version` annotation on a `Date` or `Calendar` property type.
|
||||
|
||||
[[locking-optimistic-version-timestamp-example]]
|
||||
|
@ -114,7 +114,7 @@ include::{sourcedir}/OptimisticLockingTest.java[tags=locking-optimistic-version-
|
|||
|
||||
Hibernate can retrieve the timestamp value from the database or the JVM, by reading the value you specify for the `@org.hibernate.annotations.Source` annotation.
|
||||
The value can be either `org.hibernate.annotations.SourceType.DB` or `org.hibernate.annotations.SourceType.VM`.
|
||||
The default behavior is to use the database, and is also used if you don't specify the annotation at all.
|
||||
The default behavior is to use the database, and database is also used if you don't specify the annotation at all.
|
||||
|
||||
The timestamp can also be generated by the database instead of Hibernate
|
||||
if you use the `@org.hibernate.annotations.Generated(GenerationTime.ALWAYS)` or the `@Source` annotation.
|
||||
|
@ -161,7 +161,7 @@ include::{sourcedir}/OptimisticLockTest.java[tags=locking-optimistic-exclude-att
|
|||
----
|
||||
====
|
||||
|
||||
This way, if one tread modifies the `Phone` number while a second thread increments the `callCount` attribute,
|
||||
This way, if one thread modifies the `Phone` number while a second thread increments the `callCount` attribute,
|
||||
the two concurrent transactions are not going to conflict as illustrated by the following example.
|
||||
|
||||
[[locking-optimistic-exclude-attribute-example]]
|
||||
|
@ -198,7 +198,7 @@ sometimes, you need rely on the actual database row column values to prevent *lo
|
|||
Hibernate supports a form of optimistic locking that does not require a dedicated "version attribute".
|
||||
This is also useful for use with modeling legacy schemas.
|
||||
|
||||
The idea is that you can get Hibernate to perform "version checks" using either all of the entity's attributes, or just the attributes that have changed.
|
||||
The idea is that you can get Hibernate to perform "version checks" using either all of the entity's attributes or just the attributes that have changed.
|
||||
This is achieved through the use of the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/OptimisticLocking.html[`@OptimisticLocking`]
|
||||
annotation which defines a single attribute of type
|
||||
|
@ -312,9 +312,9 @@ Hibernate always uses the locking mechanism of the database, and never lock obje
|
|||
=== `LockMode` and `LockModeType`
|
||||
|
||||
Long before JPA 1.0, Hibernate already defined various explicit locking strategies through its `LockMode` enumeration.
|
||||
JPA comes with its own http://docs.oracle.com/javaee/7/api/javax/persistence/LockModeType.html[`LockModeType`] enumeration which defines similar strategies as the Hibernate-native `LockMode`.
|
||||
JPA comes with its own {jpaJavadocUrlPrefix}LockModeType.html[`LockModeType`] enumeration which defines similar strategies as the Hibernate-native `LockMode`.
|
||||
|
||||
[cols=",",, options="header"]
|
||||
[cols=",,",, options="header"]
|
||||
|=======================================================================
|
||||
|`LockModeType`|`LockMode`|Description
|
||||
|
||||
|
@ -322,7 +322,7 @@ JPA comes with its own http://docs.oracle.com/javaee/7/api/javax/persistence/Loc
|
|||
|`READ` and `OPTIMISTIC`|`READ` | The entity version is checked towards the end of the currently running transaction.
|
||||
|`WRITE` and `OPTIMISTIC_FORCE_INCREMENT`|`WRITE` | The entity version is incremented automatically even if the entity has not changed.
|
||||
|`PESSIMISTIC_FORCE_INCREMENT`|`PESSIMISTIC_FORCE_INCREMENT` | The entity is locked pessimistically and its version is incremented automatically even if the entity has not changed.
|
||||
|`PESSIMISTIC_READ`|`PESSIMISTIC_READ` | The entity is locked pessimistically using a shared lock, if the database supports such a feature. Otherwise, an explicit lock is used.
|
||||
|`PESSIMISTIC_READ`|`PESSIMISTIC_READ` | The entity is locked pessimistically using a shared lock if the database supports such a feature. Otherwise, an explicit lock is used.
|
||||
|`PESSIMISTIC_WRITE`|`PESSIMISTIC_WRITE`, `UPGRADE` | The entity is locked using an explicit lock.
|
||||
|`PESSIMISTIC_WRITE` with a `javax.persistence.lock.timeout` setting of 0 |`UPGRADE_NOWAIT` | The lock acquisition request fails fast if the row s already locked.
|
||||
|`PESSIMISTIC_WRITE` with a `javax.persistence.lock.timeout` setting of -2 |`UPGRADE_SKIPLOCKED` | The lock acquisition request skips the already locked rows. It uses a `SELECT ... FOR UPDATE SKIP LOCKED` in Oracle and PostgreSQL 9.5, or `SELECT ... with (rowlock, updlock, readpast) in SQL Server`.
|
||||
|
@ -351,7 +351,7 @@ This ensures that applications are portable.
|
|||
JPA 2.0 introduced two query hints:
|
||||
|
||||
javax.persistence.lock.timeout:: it gives the number of milliseconds a lock acquisition request will wait before throwing an exception
|
||||
javax.persistence.lock.scope:: defines the http://docs.oracle.com/javaee/7/api/javax/persistence/PessimisticLockScope.html[_scope_] of the lock acquisition request.
|
||||
javax.persistence.lock.scope:: defines the {jpaJavadocUrlPrefix}PessimisticLockScope.html[_scope_] of the lock acquisition request.
|
||||
The scope can either be `NORMAL` (default value) or `EXTENDED`. The `EXTENDED` scope will cause a lock acquisition request to be passed to other owned table structured (e.g. `@Inheritance(strategy=InheritanceType.JOINED)`, `@ElementCollection`)
|
||||
|
||||
[[locking-jpa-query-hints-timeout-example]]
|
||||
|
@ -385,7 +385,7 @@ The `javax.persistence.lock.scope` is https://hibernate.atlassian.net/browse/HHH
|
|||
Traditionally, Hibernate offered the `Session#lock()` method for acquiring an optimistic or a pessimistic lock on a given entity.
|
||||
Because varying the locking options was difficult when using a single `LockMode` parameter, Hibernate has added the `Session#buildLockRequest()` method API.
|
||||
|
||||
The following example shows how to obtain shared database lock without waiting for the lock acquisition request.
|
||||
The following example shows how to obtain a shared database lock without waiting for the lock acquisition request.
|
||||
|
||||
[[locking-buildLockRequest-example]]
|
||||
.`buildLockRequest` example
|
||||
|
@ -448,8 +448,8 @@ include::{extrasdir}/locking-follow-on-secondary-query-example.sql[]
|
|||
|
||||
The lock request was moved from the original query to a secondary one which takes the previously fetched entities to lock their associated database records.
|
||||
|
||||
Prior to Hibernate 5.2.1, the the follow-on-locking mechanism was applied uniformly to any locking query executing on Oracle.
|
||||
Since 5.2.1, the Oracle Dialect tries to figure out if the current query demand the follow-on-locking mechanism.
|
||||
Prior to Hibernate 5.2.1, the follow-on-locking mechanism was applied uniformly to any locking query executing on Oracle.
|
||||
Since 5.2.1, the Oracle Dialect tries to figure out if the current query demands the follow-on-locking mechanism.
|
||||
|
||||
Even more important is that you can overrule the default follow-on-locking detection logic and explicitly enable or disable it on a per query basis.
|
||||
|
||||
|
@ -469,6 +469,6 @@ include::{extrasdir}/locking-follow-on-explicit-example.sql[]
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
The follow-on-locking mechanism should be explicitly enabled only if the current executing query fails because the `FOR UPDATE` clause cannot be applied, meaning that the Dialect resolving mechanism needs to be further improved.
|
||||
The follow-on-locking mechanism should be explicitly enabled only if the currently executing query fails because the `FOR UPDATE` clause cannot be applied, meaning that the Dialect resolving mechanism needs to be further improved.
|
||||
====
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
[[multitenacy]]
|
||||
== Multitenancy
|
||||
:sourcedir: ../../../../../test/java/org/hibernate/userguide/multitenancy
|
||||
:extrasdir: extras
|
||||
|
||||
[[multitenacy-intro]]
|
||||
=== What is multitenancy?
|
||||
|
@ -9,7 +10,7 @@ The term multitenancy, in general, is applied to software development to indicat
|
|||
This is highly common in SaaS solutions.
|
||||
Isolating information (data, customizations, etc.) pertaining to the various tenants is a particular challenge in these systems.
|
||||
This includes the data owned by each tenant stored in the database.
|
||||
It is this last piece, sometimes called multitenant data, on which we will focus.
|
||||
It is this last piece, sometimes called multitenant data, that we will focus on.
|
||||
|
||||
[[multitenacy-approaches]]
|
||||
=== Multitenant data approaches
|
||||
|
@ -18,9 +19,8 @@ There are three main approaches to isolating information in these multitenant sy
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Each approach has pros and cons as well as specific techniques and considerations.
|
||||
Each multitenancy strategy has pros and cons as well as specific techniques and considerations.
|
||||
Such topics are beyond the scope of this documentation.
|
||||
Many resources exist which delve into these other topics, like http://msdn.microsoft.com/en-us/library/aa479086.aspx[this one] which does a great job of covering these topics.
|
||||
====
|
||||
|
||||
[[multitenacy-separate-database]]
|
||||
|
@ -75,7 +75,7 @@ include::{sourcedir}/AbstractMultiTenancyTest.java[tags=multitenacy-hibernate-se
|
|||
|
||||
Additionally, when specifying the configuration, an `org.hibernate.MultiTenancyStrategy` should be named using the `hibernate.multiTenancy` setting.
|
||||
Hibernate will perform validations based on the type of strategy you specify.
|
||||
The strategy here correlates to the isolation approach discussed above.
|
||||
The strategy here correlates with the isolation approach discussed above.
|
||||
|
||||
NONE::
|
||||
(the default) No multitenancy is expected.
|
||||
|
@ -115,7 +115,7 @@ It could name a `MultiTenantConnectionProvider` instance, a `MultiTenantConnecti
|
|||
Hibernate will assume it should use the specific `DataSourceBasedMultiTenantConnectionProviderImpl` implementation which works on a number of pretty reasonable assumptions when running inside of an app server and using one `javax.sql.DataSource` per tenant.
|
||||
See its https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/connections/spi/DataSourceBasedMultiTenantConnectionProviderImpl.html[Javadocs] for more details.
|
||||
|
||||
The following example portrays a `MultiTenantConnectionProvider` implementation that handles multiple `ConnectionProviders`.
|
||||
The following example portrays a `MultiTenantConnectionProvider` implementation that handles multiple ``ConnectionProvider``s.
|
||||
|
||||
[[multitenacy-hibernate-ConfigurableMultiTenantConnectionProvider-example]]
|
||||
.A `MultiTenantConnectionProvider` implementation
|
||||
|
@ -129,7 +129,7 @@ include::{sourcedir}/ConfigurableMultiTenantConnectionProvider.java[tags=multite
|
|||
The `ConfigurableMultiTenantConnectionProvider` can be set up as follows:
|
||||
|
||||
[[multitenacy-hibernate-MultiTenantConnectionProvider-example]]
|
||||
.A `MultiTenantConnectionProvider` implementation
|
||||
.A `MultiTenantConnectionProvider` usage example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -140,7 +140,7 @@ include::{sourcedir}/AbstractMultiTenancyTest.java[tags=multitenacy-hibernate-Mu
|
|||
When using multitenancy, it's possible to save an entity with the same identifier across different tenants:
|
||||
|
||||
[[multitenacy-hibernate-same-entity-example]]
|
||||
.A `MultiTenantConnectionProvider` implementation
|
||||
.An example of saving entities with the same identifier across different tenants
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -149,20 +149,20 @@ include::{sourcedir}/AbstractMultiTenancyTest.java[tags=multitenacy-multitenacy-
|
|||
====
|
||||
|
||||
[[multitenacy-hibernate-CurrentTenantIdentifierResolver]]
|
||||
==== CurrentTenantIdentifierResolver
|
||||
==== `CurrentTenantIdentifierResolver`
|
||||
|
||||
`org.hibernate.context.spi.CurrentTenantIdentifierResolver` is a contract for Hibernate to be able to resolve what the application considers the current tenant identifier.
|
||||
The implementation to use is either passed directly to `Configuration` via its `setCurrentTenantIdentifierResolver` method.
|
||||
It can also be specified via the `hibernate.tenant_identifier_resolver` setting.
|
||||
The implementation to use can be either passed directly to `Configuration` via its `setCurrentTenantIdentifierResolver` method,
|
||||
or be specified via the `hibernate.tenant_identifier_resolver` setting.
|
||||
|
||||
There are two situations where CurrentTenantIdentifierResolver is used:
|
||||
There are two situations where `CurrentTenantIdentifierResolver` is used:
|
||||
|
||||
* The first situation is when the application is using the `org.hibernate.context.spi.CurrentSessionContext` feature in conjunction with multitenancy.
|
||||
In the case of the current-session feature, Hibernate will need to open a session if it cannot find an existing one in scope.
|
||||
However, when a session is opened in a multitenant environment, the tenant identifier has to be specified.
|
||||
This is where the `CurrentTenantIdentifierResolver` comes into play; Hibernate will consult the implementation you provide to determine the tenant identifier to use when opening the session.
|
||||
In this case, it is required that a `CurrentTenantIdentifierResolver` is supplied.
|
||||
* The other situation is when you do not want to have to explicitly specify the tenant identifier all the time.
|
||||
* The other situation is when you do not want to explicitly specify the tenant identifier all the time.
|
||||
If a `CurrentTenantIdentifierResolver` has been specified, Hibernate will use it to determine the default tenant identifier to use when opening the session.
|
||||
|
||||
Additionally, if the `CurrentTenantIdentifierResolver` implementation returns `true` for its `validateExistingCurrentSessions` method, Hibernate will make sure any existing sessions that are found in scope have a matching tenant identifier.
|
||||
|
@ -176,7 +176,86 @@ The key used to cache data encodes the tenant identifier.
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Currently, schema export will not really work with multitenancy. That may not change.
|
||||
Currently, schema export will not really work with multitenancy.
|
||||
|
||||
The JPA expert group is in the process of defining multitenancy support for an upcoming version of the specification.
|
||||
====
|
||||
|
||||
[[multitenacy-hibernate-session-configuration]]
|
||||
==== Multitenancy Hibernate Session configuration
|
||||
|
||||
When using multitenancy, you might want to configure each tenant-specific `Session` differently.
|
||||
For instance, each tenant could specify a different time zone configuration.
|
||||
|
||||
[[multitenacy-hibernate-timezone-configuration-registerConnectionProvider-call-example]]
|
||||
.Registering the tenant-specific time zone information
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-timezone-configuration-registerConnectionProvider-call-example]
|
||||
----
|
||||
====
|
||||
|
||||
The `registerConnectionProvider` method is used to define the tenant-specific context.
|
||||
|
||||
[[multitenacy-hibernate-timezone-configuration-registerConnectionProvider-example]]
|
||||
.The `registerConnectionProvider` method used for defining the tenant-specific context
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-timezone-configuration-registerConnectionProvider-example]
|
||||
----
|
||||
====
|
||||
|
||||
For our example, the tenant-specific context is held in the `connectionProviderMap` and `timeZoneTenantMap`.
|
||||
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-timezone-configuration-context-example]
|
||||
----
|
||||
|
||||
Now, when building the Hibernate `Session`, aside from passing the tenant identifier,
|
||||
we could also configure the `Session` to use the tenant-specific time zone.
|
||||
|
||||
[[multitenacy-hibernate-timezone-configuration-session-example]]
|
||||
.The Hibernate `Session` can be configured using the tenant-specific context
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-timezone-configuration-session-example]
|
||||
----
|
||||
====
|
||||
|
||||
So, if we set the `useTenantTimeZone` parameter to `true`, Hibernate will persist the `Timestamp` properties using the
|
||||
tenant-specific time zone. As you can see in the following example, the `Timestamp` is successfully retrieved
|
||||
even if the currently running JVM uses a different time zone.
|
||||
|
||||
[[multitenacy-hibernate-applying-timezone-configuration-example]]
|
||||
.The `useTenantTimeZone` allows you to persist a `Timestamp` in the provided time zone
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-applying-timezone-configuration-example]
|
||||
----
|
||||
====
|
||||
|
||||
However, behind the scenes, we can see that Hibernate has saved the `created_on` property in the tenant-specific time zone.
|
||||
The following example shows you that the `Timestamp` was saved in the UTC time zone, hence the offset displayed in the
|
||||
test output.
|
||||
|
||||
[[multitenacy-hibernate-not-applying-timezone-configuration-example]]
|
||||
.With the `useTenantTimeZone` property set to `false`, the `Timestamp` is fetched in the tenant-specific time zone
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-not-applying-timezone-configuration-example]
|
||||
----
|
||||
|
||||
[source, SQL,indent=0]
|
||||
----
|
||||
include::{extrasdir}/multitenacy-hibernate-not-applying-timezone-configuration-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
Notice that for the `Eastern European Time` time zone, the time zone offset was 2 hours when the test was executed.
|
||||
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
SELECT
|
||||
p.created_on
|
||||
FROM
|
||||
Person p
|
||||
WHERE
|
||||
p.id = ?
|
||||
|
||||
-- binding parameter [1] as [BIGINT] - [1]
|
||||
-- extracted value ([CREATED_ON] : [TIMESTAMP]) - [2018-11-23 10:00:00.0]
|
||||
|
||||
-- The created_on timestamp value is: [2018-11-23 10:00:00.0]
|
||||
-- For the current time zone: [Eastern European Time], the UTC time zone offset is: [7200000]
|
|
@ -16,9 +16,9 @@ Hibernate supports three types of configurations within OSGi.
|
|||
|
||||
=== hibernate-osgi
|
||||
|
||||
Rather than embed OSGi capabilities into hibernate-core, and sub-modules, hibernate-osgi was created.
|
||||
Rather than embedding OSGi capabilities into hibernate-core, and sub-modules, hibernate-osgi was created.
|
||||
It's purposefully separated, isolating all OSGi dependencies.
|
||||
It provides an OSGi-specific `ClassLoader` (aggregates the container's `ClassLoader` with core and `EntityManager` `ClassLoader`s),
|
||||
It provides an OSGi-specific `ClassLoader` (aggregates the container's `ClassLoader` with core and `EntityManager` ``ClassLoader``s),
|
||||
JPA persistence provider, `SessionFactory`/`EntityManagerFactory` bootstrapping, entities/mappings scanner, and service management.
|
||||
|
||||
=== features.xml
|
||||
|
@ -26,14 +26,14 @@ JPA persistence provider, `SessionFactory`/`EntityManagerFactory` bootstrapping,
|
|||
Apache Karaf environments tend to make heavy use of its "features" concept, where a feature is a set of order-specific bundles focused on a concise capability.
|
||||
These features are typically defined in a `features.xml` file.
|
||||
Hibernate produces and releases its own `features.xml` that defines a core `hibernate-orm`, as well as additional features for optional functionality (caching, Envers, etc.).
|
||||
This is included in the binary distribution, as well as deployed to the JBoss Nexus repository (using the `org.hibernate` groupId and `hibernate-osgi` with the `karaf.xml` classifier).
|
||||
This is included in the binary distribution, as well as deployed to the JBoss Nexus repository (using the `org.hibernate` groupId and `hibernate-osgi` artifactId with the `karaf.xml` classifier).
|
||||
|
||||
Note that our features are versioned using the same ORM artifact versions they wrap.
|
||||
Also, note that the features are heavily tested against Karaf 3.0.3 as a part of our PaxExam-based integration tests.
|
||||
However, they'll likely work on other versions as well.
|
||||
|
||||
hibernate-osgi, theoretically, supports a variety of OSGi containers, such as Equinox.
|
||||
In that case, please use `features.xm`l as a reference for necessary bundles to activate and their correct ordering.
|
||||
In that case, please use `features.xm` as a reference for necessary bundles to activate and their correct ordering.
|
||||
However, note that Karaf starts a number of bundles automatically, several of which would need to be installed manually on alternatives.
|
||||
|
||||
=== QuickStarts/Demos
|
||||
|
@ -53,10 +53,10 @@ In order to utilize container-managed JPA, an Enterprise OSGi JPA container must
|
|||
In Karaf, this means Aries JPA, which is included out-of-the-box (simply activate the `jpa` and `transaction` features).
|
||||
Originally, we intended to include those dependencies within our own `features.xml`.
|
||||
However, after guidance from the Karaf and Aries teams, it was pulled out.
|
||||
This allows Hibernate OSGi to be portable and not be directly tied to Aries versions, instead having the user choose which to use.
|
||||
This allows Hibernate OSGi to be portable and not be directly tied to Aries versions, instead of having the user choose which to use.
|
||||
|
||||
That being said, the QuickStart/Demo projects include a sample https://github.com/hibernate/hibernate-demos/tree/master/hibernate-orm/osgi/managed-jpa/features.xml[features.xml]
|
||||
showing which features need activated in Karaf in order to support this environment.
|
||||
showing which features need to be activated in Karaf in order to support this environment.
|
||||
As mentioned, use this purely as a reference!
|
||||
|
||||
=== persistence.xml
|
||||
|
@ -90,7 +90,7 @@ That `DataSource` is then used by your `persistence.xml` persistence-unit. The f
|
|||
|
||||
=== Bundle Package Imports
|
||||
|
||||
Your bundle's manifest will need to import, at a minimum,
|
||||
Your bundle's manifest will need to import, at a minimum:
|
||||
|
||||
* `javax.persistence`
|
||||
* `org.hibernate.proxy` and `javassist.util.proxy`, due to Hibernate's ability to return proxies for lazy initialization (Javassist enhancement occurs on the entity's `ClassLoader` during runtime).
|
||||
|
@ -120,9 +120,9 @@ Similar to any other JPA setup, your bundle must include a `persistence.xml` fil
|
|||
|
||||
=== Bundle Package Imports
|
||||
|
||||
Your bundle's manifest will need to import, at a minimum,
|
||||
Your bundle's manifest will need to import, at a minimum:
|
||||
|
||||
* javax.persistence
|
||||
* `javax.persistence`
|
||||
* `org.hibernate.proxy` and `javassist.util.proxy`, due to Hibernate's ability to return proxies for lazy initialization (Javassist enhancement occurs on the entity's `ClassLoader` during runtime)
|
||||
* JDBC driver package (example: `org.h2`)
|
||||
* `org.osgi.framework`, necessary to discover the `EntityManagerFactory` (described below)
|
||||
|
@ -150,13 +150,13 @@ include::{sourcedir}/jpa/HibernateUtil.java[tag=osgi-discover-EntityManagerFacto
|
|||
[[osgi-unmanaged-native]]
|
||||
=== Unmanaged Native
|
||||
|
||||
Native Hibernate use is also supported. The client bundle is responsible for managing the `SessionFactory` and `Session`s.
|
||||
Native Hibernate use is also supported. The client bundle is responsible for managing the ``SessionFactory`` and ``Session``s.
|
||||
|
||||
=== Bundle Package Imports
|
||||
|
||||
Your bundle's manifest will need to import, at a minimum,
|
||||
Your bundle's manifest will need to import, at a minimum:
|
||||
|
||||
* javax.persistence
|
||||
* `javax.persistence`
|
||||
* `org.hibernate.proxy` and `javassist.util.proxy`, due to Hibernate's ability to return proxies for lazy initialization (Javassist enhancement occurs on the entity's `ClassLoader` during runtime)
|
||||
* JDBC driver package (example: `org.h2`)
|
||||
* `org.osgi.framework`, necessary to discover the `SessionFactory` (described below)
|
||||
|
@ -186,7 +186,7 @@ include::{sourcedir}/_native/HibernateUtil.java[tag=osgi-discover-SessionFactory
|
|||
The https://github.com/hibernate/hibernate-demos/tree/master/hibernate-orm/osgi/unmanaged-native[unmanaged-native] demo project displays the use of optional Hibernate modules.
|
||||
Each module adds additional dependency bundles that must first be activated, either manually or through an additional feature.
|
||||
As of ORM 4.2, Envers is fully supported.
|
||||
Support for C3P0, Proxool, EhCache, and Infinispan were added in 4.3, however none of their 3rd party libraries currently work in OSGi (lots of `ClassLoader` problems, etc.).
|
||||
Support for C3P0, Proxool, EhCache, and Infinispan were added in 4.3. However, none of their 3rd party libraries currently work in OSGi (lots of `ClassLoader` problems, etc.).
|
||||
We're tracking the issues in JIRA.
|
||||
|
||||
=== Extension Points
|
||||
|
@ -201,7 +201,7 @@ The specified interface should be used during service registration.
|
|||
`org.hibernate.integrator.spi.Integrator`:: (as of 4.2)
|
||||
`org.hibernate.boot.registry.selector.StrategyRegistrationProvider`:: (as of 4.3)
|
||||
`org.hibernate.boot.model.TypeContributor`:: (as of 4.3)
|
||||
JTA's:: `javax.transaction.TransactionManager` and `javax.transaction.UserTransaction` (as of 4.2), however these are typically provided by the OSGi container.
|
||||
JTA's:: `javax.transaction.TransactionManager` and `javax.transaction.UserTransaction` (as of 4.2). However, these are typically provided by the OSGi container.
|
||||
|
||||
The easiest way to register extension point implementations is through a `blueprint.xml` file.
|
||||
Add `OSGI-INF/blueprint/blueprint.xml` to your classpath. Envers' blueprint is a great example:
|
||||
|
@ -225,10 +225,10 @@ Extension points can also be registered programmatically with `BundleContext#reg
|
|||
* Scanning is supported to find non-explicitly listed entities and mappings.
|
||||
However, they MUST be in the same bundle as your persistence unit (fairly typical anyway).
|
||||
Our OSGi `ClassLoader` only considers the "requesting bundle" (hence the requirement on using services to create `EntityManagerFactory`/`SessionFactory`), rather than attempting to scan all available bundles.
|
||||
This is primarily for versioning considerations, collision protections, etc.
|
||||
This is primarily for versioning considerations, collision protection, etc.
|
||||
* Some containers (ex: Aries) always return true for `PersistenceUnitInfo#excludeUnlistedClasses`, even if your `persistence.xml` explicitly has `exclude-unlisted-classes` set to `false`.
|
||||
They claim it's to protect JPA providers from having to implement scanning ("we handle it for you"), even though we still want to support it in many cases.
|
||||
The work around is to set `hibernate.archive.autodetection` to, for example, `hbm,class`.
|
||||
The workaround is to set `hibernate.archive.autodetection` to, for example, `hbm,class`.
|
||||
This tells hibernate to ignore the `excludeUnlistedClasses` value and scan for `*.hbm.xml` and entities regardless.
|
||||
* Scanning does not currently support annotated packages on `package-info.java`.
|
||||
* Currently, Hibernate OSGi is primarily tested using Apache Karaf and Apache Aries JPA. Additional testing is needed with Equinox, Gemini, and other container providers.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
:sourcedir: ../../../../../test/java/org/hibernate/userguide/pc
|
||||
|
||||
Hibernate "grew up" not supporting bytecode enhancement at all.
|
||||
At that time, Hibernate only supported proxy-based for lazy loading and always used diff-based dirty calculation.
|
||||
At that time, Hibernate only supported proxy-based alternative for lazy loading and always used diff-based dirty calculation.
|
||||
Hibernate 3.x saw the first attempts at bytecode enhancement support in Hibernate.
|
||||
We consider those initial attempts (up until 5.0) completely as an incubation.
|
||||
The support for bytecode enhancement in 5.0 onward is what we are discussing here.
|
||||
|
@ -17,11 +17,11 @@ Hibernate supports the enhancement of an application Java domain model for the p
|
|||
===== Lazy attribute loading
|
||||
|
||||
Think of this as partial loading support.
|
||||
Essentially you can tell Hibernate that only part(s) of an entity should be loaded upon fetching from the database and when the other part(s) should be loaded as well.
|
||||
Note that this is very much different from proxy-based idea of lazy loading which is entity-centric where the entity's state is loaded at once as needed.
|
||||
Essentially, you can tell Hibernate that only part(s) of an entity should be loaded upon fetching from the database and when the other part(s) should be loaded as well.
|
||||
Note that this is very much different from the proxy-based idea of lazy loading which is entity-centric where the entity's state is loaded at once as needed.
|
||||
With bytecode enhancement, individual attributes or groups of attributes are loaded as needed.
|
||||
|
||||
Lazy attributes can be designated to be loaded together and this is called a "lazy group".
|
||||
Lazy attributes can be designated to be loaded together, and this is called a "lazy group".
|
||||
By default, all singular attributes are part of a single group, meaning that when one lazy singular attribute is accessed all lazy singular attributes are loaded.
|
||||
Lazy plural attributes, by default, are each a lazy group by themselves.
|
||||
This behavior is explicitly controllable through the `@org.hibernate.annotations.LazyGroup` annotation.
|
||||
|
@ -35,9 +35,9 @@ include::{sourcedir}/BytecodeEnhancementTest.java[tags=BytecodeEnhancement-lazy-
|
|||
----
|
||||
====
|
||||
|
||||
In the above example we have 2 lazy attributes: `accountsPayableXrefId` and `image`.
|
||||
In the above example, we have 2 lazy attributes: `accountsPayableXrefId` and `image`.
|
||||
Each is part of a different fetch group (accountsPayableXrefId is part of the default fetch group),
|
||||
which means that accessing `accountsPayableXrefId` will not force the loading of image, and vice-versa.
|
||||
which means that accessing `accountsPayableXrefId` will not force the loading of the `image` attribute, and vice-versa.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
|
@ -52,11 +52,11 @@ Historically Hibernate only supported diff-based dirty calculation for determini
|
|||
This essentially means that Hibernate would keep track of the last known state of an entity in regards to the database (typically the last read or write).
|
||||
Then, as part of flushing the persistence context, Hibernate would walk every entity associated with the persistence context and check its current state against that "last known database state".
|
||||
This is by far the most thorough approach to dirty checking because it accounts for data-types that can change their internal state (`java.util.Date` is the prime example of this).
|
||||
However, in a persistence context with a large number of associated entities it can also be a performance-inhibiting approach.
|
||||
However, in a persistence context with a large number of associated entities, it can also be a performance-inhibiting approach.
|
||||
|
||||
If your application does not need to care about "internal state changing data-type" use cases, bytecode-enhanced dirty tracking might be a worthwhile alternative to consider, especially in terms of performance.
|
||||
In this approach Hibernate will manipulate the bytecode of your classes to add "dirty tracking" directly to the entity, allowing the entity itself to keep track of which of its attributes have changed.
|
||||
During flush time, Hibernate simply asks your entity what has changed rather that having to perform the state-diff calculations.
|
||||
During the flush time, Hibernate asks your entity what has changed rather than having to perform the state-diff calculations.
|
||||
|
||||
[[BytecodeEnhancement-dirty-tracking-bidirectional]]
|
||||
===== Bidirectional association management
|
||||
|
@ -105,11 +105,11 @@ These are hard to discuss without diving into a discussion of Hibernate internal
|
|||
==== Performing enhancement
|
||||
|
||||
[[BytecodeEnhancement-enhancement-runtime]]
|
||||
===== Run-time enhancement
|
||||
===== Runtime enhancement
|
||||
|
||||
Currently, run-time enhancement of the domain model is only supported in managed JPA environments following the JPA-defined SPI for performing class transformations.
|
||||
Currently, runtime enhancement of the domain model is only supported in managed JPA environments following the JPA-defined SPI for performing class transformations.
|
||||
|
||||
Even then, this support is disabled by default. To enable run-time enhancement, specify one of the following configuration properties:
|
||||
Even then, this support is disabled by default. To enable runtime enhancement, specify one of the following configuration properties:
|
||||
|
||||
`*hibernate.enhancer.enableDirtyTracking*` (e.g. `true` or `false` (default value))::
|
||||
Enable dirty tracking feature in runtime bytecode enhancement.
|
||||
|
@ -122,14 +122,14 @@ Enable association management feature in runtime bytecode enhancement which auto
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Also, at the moment, only annotated classes are supported for run-time enhancement.
|
||||
Also, at the moment, only annotated classes support runtime enhancement.
|
||||
====
|
||||
|
||||
[[BytecodeEnhancement-enhancement-gradle]]
|
||||
===== Gradle plugin
|
||||
|
||||
Hibernate provides a Gradle plugin that is capable of providing build-time enhancement of the domain model as they are compiled as part of a Gradle build.
|
||||
To use the plugin a project would first need to apply it:
|
||||
To use the plugin, a project would first need to apply it:
|
||||
|
||||
.Apply the Gradle plugin
|
||||
====
|
||||
|
@ -145,7 +145,7 @@ enableLazyInitialization:: Whether enhancement for lazy attribute loading should
|
|||
enableDirtyTracking:: Whether enhancement for self-dirty tracking should be done.
|
||||
enableAssociationManagement:: Whether enhancement for bi-directional association management should be done.
|
||||
|
||||
The default value for all 3 configuration settings is `false`
|
||||
The default value for all 3 configuration settings is `false`.
|
||||
|
||||
The `enhance { }` block is required in order for enhancement to occur.
|
||||
Enhancement is disabled by default in preparation for additions capabilities (hbm2ddl, etc) in the plugin.
|
||||
|
@ -157,7 +157,7 @@ Hibernate provides a Maven plugin capable of providing build-time enhancement of
|
|||
See the section on the <<BytecodeEnhancement-enhancement-gradle>> for details on the configuration settings. Again, the default for those 3 is `false`.
|
||||
|
||||
The Maven plugin supports one additional configuration settings: failOnError, which controls what happens in case of error.
|
||||
Default behavior is to fail the build, but it can be set so that only a warning is issued.
|
||||
The default behavior is to fail the build, but it can be set so that only a warning is issued.
|
||||
|
||||
.Apply the Maven plugin
|
||||
====
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[[pc]]
|
||||
== Persistence Contexts
|
||||
== Persistence Context
|
||||
:sourcedir: ../../../../../test/java/org/hibernate/userguide/pc
|
||||
:sourcedir-caching: ../../../../../test/java/org/hibernate/userguide/caching
|
||||
:extrasdir: extras
|
||||
|
@ -10,12 +10,12 @@ Persistent data has a state in relation to both a persistence context and the un
|
|||
|
||||
`transient`:: the entity has just been instantiated and is not associated with a persistence context.
|
||||
It has no persistent representation in the database and typically no identifier value has been assigned (unless the _assigned_ generator was used).
|
||||
`managed`, or `persistent`:: the entity has an associated identifier and is associated with a persistence context.
|
||||
`managed` or `persistent`:: the entity has an associated identifier and is associated with a persistence context.
|
||||
It may or may not physically exist in the database yet.
|
||||
`detached`:: the entity has an associated identifier, but is no longer associated with a persistence context (usually because the persistence context was closed or the instance was evicted from the context)
|
||||
`removed`:: the entity has an associated identifier and is associated with a persistence context, however it is scheduled for removal from the database.
|
||||
`detached`:: the entity has an associated identifier but is no longer associated with a persistence context (usually because the persistence context was closed or the instance was evicted from the context)
|
||||
`removed`:: the entity has an associated identifier and is associated with a persistence context, however, it is scheduled for removal from the database.
|
||||
|
||||
Much of the `org.hibernate.Session` and `javax.persistence.EntityManager` methods deal with moving entities between these states.
|
||||
Much of the `org.hibernate.Session` and `javax.persistence.EntityManager` methods deal with moving entities among these states.
|
||||
|
||||
[[pc-unwrap]]
|
||||
=== Accessing Hibernate APIs from JPA
|
||||
|
@ -37,7 +37,7 @@ include::BytecodeEnhancement.adoc[]
|
|||
=== Making entities persistent
|
||||
|
||||
Once you've created a new entity instance (using the standard `new` operator) it is in `new` state.
|
||||
You can make it persistent by associating it to either a `org.hibernate.Session` or `javax.persistence.EntityManager`.
|
||||
You can make it persistent by associating it to either an `org.hibernate.Session` or a `javax.persistence.EntityManager`.
|
||||
|
||||
[[pc-persist-jpa-example]]
|
||||
.Making an entity persistent with JPA
|
||||
|
@ -57,7 +57,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-persist-native-example]
|
|||
----
|
||||
====
|
||||
|
||||
`org.hibernate.Session` also has a method named persist which follows the exact semantic defined in the JPA specification for the persist method.
|
||||
`org.hibernate.Session` also has a method named persist which follows the exact semantics defined in the JPA specification for the persist method.
|
||||
It is this `org.hibernate.Session` method to which the Hibernate `javax.persistence.EntityManager` implementation delegates.
|
||||
|
||||
If the `DomesticCat` entity type has a generated identifier, the value is associated with the instance when the save or persist is called.
|
||||
|
@ -78,7 +78,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-remove-jpa-example]
|
|||
====
|
||||
|
||||
[[pc-remove-native-example]]
|
||||
.Deleting an entity with Hibernate API
|
||||
.Deleting an entity with the Hibernate API
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -88,10 +88,11 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-remove-native-example]
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Hibernate itself can handle deleting detached state.
|
||||
JPA, however, disallows it.
|
||||
Hibernate itself can handle deleting entities in detached state.
|
||||
JPA, however, disallows this behavior.
|
||||
|
||||
The implication here is that the entity instance passed to the `org.hibernate.Session` delete method can be either in managed or detached state,
|
||||
while the entity instance passed to remove on `javax.persistence.EntityManager` must be in managed state.
|
||||
while the entity instance passed to remove on `javax.persistence.EntityManager` must be in the managed state.
|
||||
====
|
||||
|
||||
[[pc-get-reference]]
|
||||
|
@ -174,10 +175,114 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-find-optional-by-id-nat
|
|||
----
|
||||
====
|
||||
|
||||
[[pc-by-multiple-ids]]
|
||||
=== Obtain multiple entities by their identifiers
|
||||
|
||||
If you want to load multiple entities by providing their identifiers, calling the `EntityManager#find` method multiple times is not only inconvenient,
|
||||
but also inefficient.
|
||||
|
||||
While the JPA standard does not support retrieving multiple entities at once, other than running a JPQL or Criteria API query,
|
||||
Hibernate offers this functionality via the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/Session.html#byMultipleIds-java.lang.Class-[`byMultipleIds` method] of the Hibernate `Session`.
|
||||
|
||||
The `byMultipleIds` method returns a
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/MultiIdentifierLoadAccess.html[`MultiIdentifierLoadAccess`]
|
||||
which you can use to customize the multi-load request.
|
||||
|
||||
The `MultiIdentifierLoadAccess` interface provides several methods which you can use to
|
||||
change the behavior of the multi-load call:
|
||||
|
||||
`enableOrderedReturn(boolean enabled)`::
|
||||
This setting controls whether the returned `List` is ordered and positional in relation to the
|
||||
incoming ids. If enabled (the default), the return `List` is ordered and
|
||||
positional relative to the incoming ids. In other words, a request to
|
||||
`multiLoad([2,1,3])` will return `[Entity#2, Entity#1, Entity#3]`.
|
||||
+
|
||||
An important distinction is made here in regards to the handling of
|
||||
unknown entities depending on this "ordered return" setting.
|
||||
If enabled, a null is inserted into the `List` at the proper position(s).
|
||||
If disabled, the nulls are not put into the return List.
|
||||
+
|
||||
In other words, consumers of the returned ordered List would need to be able to handle null elements.
|
||||
`enableSessionCheck(boolean enabled)`::
|
||||
This setting, which is disabled by default, tells Hibernate to check the first-level cache (a.k.a `Session` or Persistence Context) first and, if the entity is found and already managed by the Hibernate `Session`, the cached entity will be added to the returned `List`, therefore skipping it from being fetched via the multi-load query.
|
||||
`enableReturnOfDeletedEntities(boolean enabled)`::
|
||||
This setting instructs Hibernate if the multi-load operation is allowed to return entities that were deleted by the current Persistence Context. A deleted entity is one which has been passed to this
|
||||
`Session.delete` or `Session.remove` method, but the `Session` was not flushed yet, meaning that the
|
||||
associated row was not deleted in the database table.
|
||||
+
|
||||
The default behavior is to handle them as null in the return (see `enableOrderedReturn`).
|
||||
When enabled, the result set will contain deleted entities.
|
||||
When disabled (which is the default behavior), deleted entities are not included in the returning `List`.
|
||||
`with(LockOptions lockOptions)`::
|
||||
This setting allows you to pass a given
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/LockOptions.html[`LockOptions`] mode to the multi-load query.
|
||||
`with(CacheMode cacheMode)`::
|
||||
This setting allows you to pass a given
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/CacheMode.html[`CacheMode`]
|
||||
strategy so that we can load entities from the second-level cache, therefore skipping the cached entities from being fetched via the multi-load query.
|
||||
`withBatchSize(int batchSize)`::
|
||||
This setting allows you to specify a batch size for loading the entities (e.g. how many at a time).
|
||||
+
|
||||
The default is to use a batch sizing strategy defined by the `Dialect.getDefaultBatchLoadSizingStrategy()` method.
|
||||
+
|
||||
Any greater-than-one value here will override that default behavior.
|
||||
`with(RootGraph<T> graph)`::
|
||||
The `RootGraph` is a Hibernate extension to the JPA `EntityGraph` contract,
|
||||
and this method allows you to pass a specific `RootGraph` to the multi-load query
|
||||
so that it can fetch additional relationships of the current loading entity.
|
||||
|
||||
Now, assuming we have 3 `Person` entities in the database, we can load all of them with a single call
|
||||
as illustrated by the following example:
|
||||
|
||||
[[tag::pc-by-multiple-ids-example]]
|
||||
.Loading multiple entities using the `byMultipleIds()` Hibernate API
|
||||
====
|
||||
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/MultiLoadIdTest.java[tags=pc-by-multiple-ids-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/pc-by-multiple-ids-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
Notice that only one SQL SELECT statement was executed since the second call uses the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/MultiIdentifierLoadAccess.html#enableSessionCheck-boolean-[`enableSessionCheck`] method of the `MultiIdentifierLoadAccess`
|
||||
to instruct Hibernate to skip entities that are already loaded in the current Persistence Context.
|
||||
|
||||
If the entities are not available in the current Persistence Context but they could be loaded from the second-level cache, you can use the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/MultiIdentifierLoadAccess.html#with-org.hibernate.CacheMode-[`with(CacheMode)`] method of the `MultiIdentifierLoadAccess` object.
|
||||
|
||||
[[tag::pc-by-multiple-ids-second-level-cache-example]]
|
||||
.Loading multiple entities from the second-level cache
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/MultiLoadIdTest.java[tags=pc-by-multiple-ids-second-level-cache-example]
|
||||
----
|
||||
====
|
||||
|
||||
In the example above, we first make sure that we clear the second-level cache to demonstrate that
|
||||
the multi-load query will put the returning entities into the second-level cache.
|
||||
|
||||
After executing the first `byMultipleIds` call, Hibernate is going to fetch the requested entities,
|
||||
and as illustrated by the `getSecondLevelCachePutCount` method call, 3 entities were indeed added to the
|
||||
shared cache.
|
||||
|
||||
Afterward, when executing the second `byMultipleIds` call for the same entities in a new Hibernate `Session`,
|
||||
we set the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/CacheMode.html#NORMAL[`CacheMode.NORMAL`] second-level cache mode so that entities are going to be returned from the second-level cache.
|
||||
|
||||
The `getSecondLevelCacheHitCount` statistics method returns 3 this time, since the 3 entities were loaded from the second-level cache, and, as illustrated by `sqlStatementInterceptor.getSqlQueries()`, no multi-load SELECT statement was executed this time.
|
||||
|
||||
[[pc-find-natural-id]]
|
||||
=== Obtain an entity by natural-id
|
||||
|
||||
In addition to allowing to load by identifier, Hibernate allows applications to load by declared natural identifier.
|
||||
In addition to allowing to load the entity by its identifier, Hibernate allows applications to load entities by the declared natural identifier.
|
||||
|
||||
[[pc-find-by-natural-id-entity-example]]
|
||||
.Natural-id mapping
|
||||
|
@ -219,23 +324,397 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-find-optional-by-simple
|
|||
----
|
||||
====
|
||||
|
||||
Hibernate offer a consistent API for accessing persistent data by identifier or by the natural-id. Each of these defines the same two data access methods:
|
||||
Hibernate offers a consistent API for accessing persistent data by identifier or by the natural-id. Each of these defines the same two data access methods:
|
||||
|
||||
getReference::
|
||||
Should be used in cases where the identifier is assumed to exist, where non-existence would be an actual error.
|
||||
Should never be used to test existence.
|
||||
That is because this method will prefer to create and return a proxy if the data is not already associated with the Session rather than hit the database.
|
||||
The quintessential use-case for using this method is to create foreign-key based associations.
|
||||
The quintessential use-case for using this method is to create foreign key based associations.
|
||||
load::
|
||||
Will return the persistent data associated with the given identifier value or null if that identifier does not exist.
|
||||
|
||||
Each of these two methods define an overloading variant accepting a `org.hibernate.LockOptions` argument.
|
||||
Each of these two methods defines an overloading variant accepting a `org.hibernate.LockOptions` argument.
|
||||
Locking is discussed in a separate <<chapters/locking/Locking.adoc#locking,chapter>>.
|
||||
|
||||
[[pc-filtering]]
|
||||
=== Filtering entities and associations
|
||||
|
||||
Hibernate offers two options if you want to filter entities or entity associations:
|
||||
|
||||
static (e.g. `@Where` and `@WhereJoinTable`):: which are defined at mapping time and
|
||||
cannot change at runtime.
|
||||
dynamic (e.g. `@Filter` and `@FilterJoinTable`):: which are applied and configured at runtime.
|
||||
|
||||
[[pc-where]]
|
||||
==== `@Where`
|
||||
|
||||
Sometimes, you want to filter out entities or collections using custom SQL criteria.
|
||||
This can be achieved using the `@Where` annotation, which can be applied to entities and collections.
|
||||
|
||||
[[pc-where-example]]
|
||||
.`@Where` mapping usage
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/WhereTest.java[tags=pc-where-example]
|
||||
----
|
||||
====
|
||||
|
||||
If the database contains the following entities:
|
||||
|
||||
[[pc-where-persistence-example]]
|
||||
.Persisting and fetching entities with a `@Where` mapping
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/WhereTest.java[tags=pc-where-persistence-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/pc-where-persistence-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
When executing an `Account` entity query, Hibernate is going to filter out all records that are not active.
|
||||
|
||||
[[pc-where-entity-query-example]]
|
||||
.Query entities mapped with `@Where`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/WhereTest.java[tags=pc-where-entity-query-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/pc-where-entity-query-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
When fetching the `debitAccounts` or the `creditAccounts` collections, Hibernate is going to apply the `@Where` clause filtering criteria to the associated child entities.
|
||||
|
||||
[[pc-where-collection-query-example]]
|
||||
.Traversing collections mapped with `@Where`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/WhereTest.java[tags=pc-where-collection-query-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/pc-where-collection-query-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[[pc-where-join-table]]
|
||||
==== `@WhereJoinTable`
|
||||
|
||||
Just like `@Where` annotation, `@WhereJoinTable` is used to filter out collections using a joined table (e.g. @ManyToMany association).
|
||||
|
||||
[[pc-where-join-table-example]]
|
||||
.`@WhereJoinTable` mapping example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/WhereJoinTableTest.java[tags=pc-where-join-table-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/pc-where-join-table-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
In the example above, the current week `Reader` entities are included in the `currentWeekReaders` collection
|
||||
which uses the `@WhereJoinTable` annotation to filter the joined table rows according to the provided SQL clause.
|
||||
|
||||
Considering that the following two `Book_Reader` entries are added into our system:
|
||||
|
||||
[[pc-where-join-table-persist-example]]
|
||||
.`@WhereJoinTable` test data
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/WhereJoinTableTest.java[tags=pc-where-join-table-persist-example]
|
||||
----
|
||||
====
|
||||
|
||||
When fetching the `currentWeekReaders` collection, Hibernate is going to find only one entry:
|
||||
|
||||
[[pc-where-join-table-fetch-example]]
|
||||
.`@WhereJoinTable` fetch example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/WhereJoinTableTest.java[tags=pc-where-join-table-fetch-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[pc-filter]]
|
||||
==== `@Filter`
|
||||
|
||||
The `@Filter` annotation is another way to filter out entities or collections using custom SQL criteria.
|
||||
Unlike the `@Where` annotation, `@Filter` allows you to parameterize the filter clause at runtime.
|
||||
|
||||
Now, considering we have the following `Account` entity:
|
||||
|
||||
[[pc-filter-account-example]]
|
||||
.`@Filter` mapping entity-level usage
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/FilterTest.java[tags=pc-filter-Account-example]
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Notice that the `active` property is mapped to the `active_status` column.
|
||||
|
||||
This mapping was done to show you that the `@Filter` condition uses a SQL condition and not a JPQL filtering predicate.
|
||||
====
|
||||
|
||||
As already explained, we can also apply the `@Filter` annotation for collections as illustrated by the `Client` entity:
|
||||
|
||||
[[pc-filter-client-example]]
|
||||
.`@Filter` mapping collection-level usage
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/FilterTest.java[tags=pc-filter-Client-example]
|
||||
----
|
||||
====
|
||||
|
||||
If we persist a `Client` with three associated `Account` entities,
|
||||
Hibernate will execute the following SQL statements:
|
||||
|
||||
[[pc-filter-persistence-example]]
|
||||
.Persisting and fetching entities with a `@Filter` mapping
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/FilterTest.java[tags=pc-filter-persistence-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/pc-filter-persistence-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
By default, without explicitly enabling the filter, Hibernate is going to fetch all `Account` entities.
|
||||
|
||||
[[pc-no-filter-entity-query-example]]
|
||||
.Query entities mapped without activating the `@Filter`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/FilterTest.java[tags=pc-no-filter-entity-query-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/pc-no-filter-entity-query-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
If the filter is enabled and the filter parameter value is provided,
|
||||
then Hibernate is going to apply the filtering criteria to the associated `Account` entities.
|
||||
|
||||
[[pc-filter-entity-query-example]]
|
||||
.Query entities mapped with `@Filter`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/FilterTest.java[tags=pc-filter-entity-query-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/pc-filter-entity-query-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
Filters apply to entity queries, but not to direct fetching.
|
||||
|
||||
Therefore, in the following example, the filter is not taken into consideration when fetching an entity from the Persistence Context.
|
||||
|
||||
[[pc-filter-entity-example]]
|
||||
.Fetching entities mapped with `@Filter`
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/FilterTest.java[tags=pc-filter-entity-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/pc-filter-entity-example.sql[]
|
||||
----
|
||||
|
||||
As you can see from the example above, contrary to an entity query, the filter does not prevent the entity from being loaded.
|
||||
====
|
||||
|
||||
Just like with entity queries, collections can be filtered as well, but only if the filter is explicitly enabled on the currently running Hibernate `Session`.
|
||||
|
||||
[[pc-no-filter-collection-query-example]]
|
||||
.Traversing collections without activating the `@Filter`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/FilterTest.java[tags=pc-no-filter-collection-query-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/pc-no-filter-collection-query-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
When activating the `@Filter` and fetching the `accounts` collections, Hibernate is going to apply the filter condition to the associated collection entries.
|
||||
|
||||
[[pc-filter-collection-query-example]]
|
||||
.Traversing collections mapped with `@Filter`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/FilterTest.java[tags=pc-filter-collection-query-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/pc-filter-collection-query-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The main advantage of `@Filter` over the `@Where` clause is that the filtering criteria can be customized at runtime.
|
||||
====
|
||||
|
||||
[WARNING]
|
||||
====
|
||||
It's not possible to combine the `@Filter` and `@Cache` collection annotations.
|
||||
This limitation is due to ensuring consistency and because the filtering information is not stored in the second-level cache.
|
||||
|
||||
If caching were allowed for a currently filtered collection, then the second-level cache would store only a subset of the whole collection.
|
||||
Afterward, every other Session will get the filtered collection from the cache, even if the Session-level filters have not been explicitly activated.
|
||||
|
||||
For this reason, the second-level collection cache is limited to storing whole collections, and not subsets.
|
||||
====
|
||||
|
||||
[[pc-filter-sql-fragment-alias]]
|
||||
==== `@Filter` with `@SqlFragmentAlias`
|
||||
|
||||
When using the `@Filter` annotation and working with entities that are mapped onto multiple database tables,
|
||||
you will need to use the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/SqlFragmentAlias.html[`@SqlFragmentAlias`] annotation
|
||||
if the `@Filter` defines a condition that uses predicates across multiple tables.
|
||||
|
||||
[[pc-filter-sql-fragment-alias-example]]
|
||||
.`@SqlFragmentAlias` mapping usage
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/FilterSqlFragementAliasTest.java[tags=pc-filter-sql-fragment-alias-example]
|
||||
----
|
||||
====
|
||||
|
||||
Now, when fetching the `Account` entities and activating the filter,
|
||||
Hibernate is going to apply the right table aliases to the filter predicates:
|
||||
|
||||
[[pc-filter-sql-fragment-alias-query-example]]
|
||||
.Fetching a collection filtered with `@SqlFragmentAlias`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/FilterSqlFragementAliasTest.java[tags=pc-filter-sql-fragment-alias-query-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/pc-filter-sql-fragment-alias-query-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[[pc-filter-join-table]]
|
||||
==== `@FilterJoinTable`
|
||||
|
||||
When using the `@Filter` annotation with collections, the filtering is done against the child entries (entities or embeddables).
|
||||
However, if you have a link table between the parent entity and the child table, then you need to use the `@FilterJoinTable` to filter child entries according to some column contained in the join table.
|
||||
|
||||
The `@FilterJoinTable` annotation can be, therefore, applied to a unidirectional `@OneToMany` collection as illustrated in the following mapping:
|
||||
|
||||
[[pc-filter-join-table-example]]
|
||||
.`@FilterJoinTable` mapping usage
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/FilterJoinTableTest.java[tags=pc-filter-join-table-example]
|
||||
----
|
||||
====
|
||||
|
||||
The `firstAccounts` filter will allow us to get only the `Account` entities that have the `order_id`
|
||||
(which tells the position of every entry inside the `accounts` collection)
|
||||
less than a given number (e.g. `maxOrderId`).
|
||||
|
||||
Let's assume our database contains the following entities:
|
||||
|
||||
[[pc-filter-join-table-persistence-example]]
|
||||
.Persisting and fetching entities with a `@FilterJoinTable` mapping
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/FilterJoinTableTest.java[tags=pc-filter-join-table-persistence-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/pc-filter-join-table-persistence-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
The collections can be filtered only if the associated filter is enabled on the currently running Hibernate `Session`.
|
||||
|
||||
[[pc-no-filter-join-table-collection-query-example]]
|
||||
.Traversing collections mapped with `@FilterJoinTable` without enabling the filter
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/FilterJoinTableTest.java[tags=pc-no-filter-join-table-collection-query-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/pc-no-filter-join-table-collection-query-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
If we enable the filter and set the `maxOrderId` to `1` when fetching the `accounts` collections, Hibernate is going to apply the `@FilterJoinTable` clause filtering criteria, and we will get just
|
||||
`2` `Account` entities, with the `order_id` values of `0` and `1`.
|
||||
|
||||
[[pc-filter-join-table-collection-query-example]]
|
||||
.Traversing collections mapped with `@FilterJoinTable`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/FilterJoinTableTest.java[tags=pc-filter-join-table-collection-query-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/pc-filter-join-table-collection-query-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[[pc-managed-state]]
|
||||
=== Modifying managed/persistent state
|
||||
|
||||
Entities in managed/persistent state may be manipulated by the application and any changes will be automatically detected and persisted when the persistence context is flushed.
|
||||
Entities in managed/persistent state may be manipulated by the application, and any changes will be automatically detected and persisted when the persistence context is flushed.
|
||||
There is no need to call a particular method to make your modifications persistent.
|
||||
|
||||
[[pc-managed-state-jpa-example]]
|
||||
|
@ -320,7 +799,7 @@ include::{sourcedir}/DynamicUpdateTest.java[tags=pc-managed-state-dynamic-update
|
|||
----
|
||||
====
|
||||
|
||||
This time, when reruning the previous test case, Hibernate generates the following SQL UPDATE statement:
|
||||
This time, when rerunning the previous test case, Hibernate generates the following SQL UPDATE statement:
|
||||
|
||||
[[pc-managed-state-dynamic-update-example]]
|
||||
.Modifying the `Product` entity with a dynamic update
|
||||
|
@ -370,11 +849,11 @@ See the discussion of non-identifier <<chapters/domain/basic_types.adoc#mapping-
|
|||
|
||||
[IMPORTANT]
|
||||
====
|
||||
Traditionally, Hibernate has been allowing detached entities to be refreshed.
|
||||
Traditionally, Hibernate allowed detached entities to be refreshed.
|
||||
Unfortunately, JPA prohibits this practice and specifies that an `IllegalArgumentException` should be thrown instead.
|
||||
|
||||
For this reason, when bootstrapping the Hibernate `SessionFactory` using the native API, the legacy detached entity refresh behavior is going to be preserved.
|
||||
On the other hand, when bootstrapping Hibernate through JPA `EntityManagerFactory` building process, detached entities are not allowed to be refreshed by default.
|
||||
On the other hand, when bootstrapping Hibernate through the JPA `EntityManagerFactory` building process, detached entities are not allowed to be refreshed by default.
|
||||
|
||||
However, this default behavior can be overwritten through the `hibernate.allow_refresh_detached_entity` configuration property.
|
||||
If this property is explicitly set to `true`, then you can refresh detached entities even when using the JPA bootstraps mechanism, therefore bypassing the JPA specification restriction.
|
||||
|
@ -416,16 +895,16 @@ Clearing the persistence context has the same effect.
|
|||
Evicting a particular entity from the persistence context makes it detached.
|
||||
And finally, serialization will make the deserialized form be detached (the original instance is still managed).
|
||||
|
||||
Detached data can still be manipulated, however the persistence context will no longer automatically know about these modification and the application will need to intervene to make the changes persistent again.
|
||||
Detached data can still be manipulated, however, the persistence context will no longer automatically know about these modifications, and the application will need to intervene to make the changes persistent again.
|
||||
|
||||
[[pc-detach-reattach]]
|
||||
==== Reattaching detached data
|
||||
|
||||
Reattachment is the process of taking an incoming entity instance that is in detached state and re-associating it with the current persistence context.
|
||||
Reattachment is the process of taking an incoming entity instance that is in the detached state and re-associating it with the current persistence context.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
JPA does not provide for this model. This is only available through Hibernate `org.hibernate.Session`.
|
||||
JPA does not support reattaching detached data. This is only available through Hibernate `org.hibernate.Session`.
|
||||
====
|
||||
|
||||
[[pc-detach-reattach-lock-example]]
|
||||
|
@ -459,7 +938,7 @@ Provided the entity is detached, `update` and `saveOrUpdate` operate exactly the
|
|||
[[pc-merge]]
|
||||
==== Merging detached data
|
||||
|
||||
Merging is the process of taking an incoming entity instance that is in detached state and copying its data over onto a new managed instance.
|
||||
Merging is the process of taking an incoming entity instance that is in the detached state and copying its data over onto a new managed instance.
|
||||
|
||||
Although not exactly per se, the following example is a good visualization of the `merge` operation internals.
|
||||
|
||||
|
@ -502,7 +981,7 @@ The possible values are:
|
|||
disallow (the default):: throws `IllegalStateException` if an entity copy is detected
|
||||
allow:: performs the merge operation on each entity copy that is detected
|
||||
log:: (provided for testing only) performs the merge operation on each entity copy that is detected and logs information about the entity copies.
|
||||
This setting requires DEBUG logging be enabled for `org.hibernate.event.internal.EntityCopyAllowedLoggedObserver`.
|
||||
This setting requires DEBUG logging be enabled for `org.hibernate.event.internal.EntityCopyAllowedLoggedObserver`
|
||||
|
||||
In addition, the application may customize the behavior by providing an implementation of `org.hibernate.event.spi.EntityCopyObserver` and setting `hibernate.event.merge.entity_copy_observer` to the class name.
|
||||
When this property is set to `allow` or `log`, Hibernate will merge each entity copy detected while cascading the merge operation.
|
||||
|
@ -653,7 +1132,7 @@ include::{sourcedir-caching}/FirstLevelCacheTest.java[tags=caching-management-co
|
|||
JPA allows you to propagate the state transition from a parent entity to a child.
|
||||
For this purpose, the JPA `javax.persistence.CascadeType` defines various cascade types:
|
||||
|
||||
`ALL`:: cascades all entity state transitions
|
||||
`ALL`:: cascades all entity state transitions.
|
||||
`PERSIST`:: cascades the entity persist operation.
|
||||
`MERGE`:: cascades the entity merge operation.
|
||||
`REMOVE`:: cascades the entity remove operation.
|
||||
|
@ -825,11 +1304,16 @@ the automatic schema generator will apply the ON DELETE CASCADE SQL directive to
|
|||
as illustrated by the following example.
|
||||
|
||||
[[pc-cascade-on-delete-mapping-example]]
|
||||
.`@OnDelete` mapping
|
||||
.`@OnDelete` `@ManyToOne` mapping
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/CascadeOnDeleteTest.java[tags=pc-cascade-on-delete-mapping-example]
|
||||
include::{sourcedir}/CascadeOnDeleteTest.java[tags=pc-cascade-on-delete-mapping-Person-example]
|
||||
----
|
||||
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/CascadeOnDeleteTest.java[tags=pc-cascade-on-delete-mapping-Phone-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
|
@ -838,10 +1322,10 @@ include::{extrasdir}/pc-cascade-on-delete-mapping-example.sql[]
|
|||
----
|
||||
====
|
||||
|
||||
Now, you can just remove the `Person` entity, and the associated `Phone` is going to be removed automatically.
|
||||
Now, you can just remove the `Person` entity, and the associated `Phone` entities are going to be deleted automatically via the Foreign Key cascade.
|
||||
|
||||
[[pc-cascade-on-delete-example]]
|
||||
.`@OnDelete` example
|
||||
.`@OnDelete` `@ManyToOne` delete example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -852,4 +1336,106 @@ include::{sourcedir}/CascadeOnDeleteTest.java[tags=pc-cascade-on-delete-example]
|
|||
----
|
||||
include::{extrasdir}/pc-cascade-on-delete-example.sql[]
|
||||
----
|
||||
====
|
||||
====
|
||||
|
||||
The `@OnDelete` annotation can also be placed on a collection, as
|
||||
illustrated in the following example.
|
||||
|
||||
[[pc-cascade-on-delete-collection-mapping-example]]
|
||||
.`@OnDelete` `@OneToMany` mapping
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/CascadeOnDeleteCollectionTest.java[tags=pc-cascade-on-delete-collection-mapping-Person-example]
|
||||
----
|
||||
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/CascadeOnDeleteCollectionTest.java[tags=pc-cascade-on-delete-collection-mapping-Phone-example]
|
||||
----
|
||||
====
|
||||
|
||||
Now, when removing the `Person` entity, all the associated `Phone` child entities are deleted via the Foreign Key cascade even if the `@OneToMany` collection was using the `CascadeType.ALL` attribute.
|
||||
|
||||
[[pc-cascade-on-delete-collection-example]]
|
||||
.`@OnDelete` `@ManyToOne` delete example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/CascadeOnDeleteCollectionTest.java[tags=pc-cascade-on-delete-collection-example]
|
||||
----
|
||||
|
||||
[source, SQL, indent=0]
|
||||
----
|
||||
include::{extrasdir}/pc-cascade-on-delete-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Without the `@OnDelete` annotation, the `@OneToMany` association relies on the `cascade` attribute to propagate the `remove` entity state transition from the parent entity to its children.
|
||||
However, when the `@OnDelete` annotation is in place, Hibernate prevents the child entity `DELETE` statement from being executed while flushing the Persistence Context.
|
||||
|
||||
This way, only the parent entity gets deleted, and all the associated child records are removed by the database engine, instead of being deleted explicitly via `DELETE` statements.
|
||||
====
|
||||
|
||||
[[pc-exception-handling]]
|
||||
=== Exception handling
|
||||
|
||||
If the JPA `EntityManager` or the Hibernate-specific `Session` throws an exception, including any JDBC https://docs.oracle.com/javase/8/docs/api/java/sql/SQLException.html[`SQLException`], you have to immediately rollback the database transaction and close the current `EntityManager` or `Session`.
|
||||
|
||||
Certain methods of the JPA `EntityManager` or the Hibernate `Session` will not leave the Persistence Context in a consistent state. As a rule of thumb, no exception thrown by Hibernate can be treated as recoverable. Ensure that the Session will be closed by calling the `close()` method in a finally block.
|
||||
|
||||
Rolling back the database transaction does not put your business objects back into the state they were at the start of the transaction. This means that the database state and the business objects will be out of sync. Usually, this is not a problem because exceptions are not recoverable and you will have to start over after rollback anyway.
|
||||
|
||||
The JPA {jpaJavadocUrlPrefix}PersistenceException.html[`PersistenceException`] or the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/HibernateException.html[`HibernateException`] wraps most of the errors that can occur in a Hibernate persistence layer.
|
||||
|
||||
Both the `PersistenceException` and the `HibernateException` are runtime exceptions because, in our opinion, we should not force the application developer to catch an unrecoverable exception at a low layer. In most systems, unchecked and fatal exceptions are handled in one of the first frames of the method call stack (i.e., in higher layers) and either an error message is presented to the application user or some other appropriate action is taken. Note that Hibernate might also throw other unchecked exceptions that are not a `HibernateException`. These are not recoverable either, and appropriate action should be taken.
|
||||
|
||||
Hibernate wraps the JDBC `SQLException`, thrown while interacting with the database, in a
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/JDBCException.html[`JDBCException`].
|
||||
In fact, Hibernate will attempt to convert the exception into a more meaningful subclass of `JDBCException`. The underlying `SQLException` is always available via https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/JDBCException.html#getSQLException--[`JDBCException.getSQLException()`]. Hibernate converts the `SQLException` into an appropriate JDBCException subclass using the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/exception/spi/SQLExceptionConverter.html[`SQLExceptionConverter`]
|
||||
attached to the current `SessionFactory`.
|
||||
|
||||
By default, the `SQLExceptionConverter` is defined by the configured Hibernate `Dialect` via the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/dialect/Dialect.html#buildSQLExceptionConversionDelegate--[`buildSQLExceptionConversionDelegate`] method
|
||||
which is overridden by several database-specific ``Dialect``s.
|
||||
|
||||
However, it is also possible to plug in a custom implementation. See the
|
||||
<<appendices/Configurations.adoc#configurations-exception-handling,`hibernate.jdbc.sql_exception_converter`>> configuration property for more details.
|
||||
|
||||
The standard `JDBCException` subtypes are:
|
||||
|
||||
ConstraintViolationException::
|
||||
indicates some form of integrity constraint violation.
|
||||
DataException::
|
||||
indicates that evaluation of the valid SQL statement against the given data
|
||||
resulted in some illegal operation, mismatched types, truncation or incorrect cardinality.
|
||||
GenericJDBCException::
|
||||
a generic exception which did not fall into any of the other categories.
|
||||
JDBCConnectionException::
|
||||
indicates an error with the underlying JDBC communication.
|
||||
LockAcquisitionException::
|
||||
indicates an error acquiring a lock level necessary to perform the requested operation.
|
||||
LockTimeoutException::
|
||||
indicates that the lock acquisition request has timed out.
|
||||
PessimisticLockException::
|
||||
indicates that a lock acquisition request has failed.
|
||||
QueryTimeoutException::
|
||||
indicates that the current executing query has timed out.
|
||||
SQLGrammarException::
|
||||
indicates a grammar or syntax problem with the issued SQL.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Starting with Hibernate 5.2, the Hibernate `Session` extends the JPA `EntityManager`. For this reason, when a `SessionFactory` is built via Hibernate's native bootstrapping,
|
||||
the `HibernateException` or `SQLException` can be wrapped in a JPA {jpaJavadocUrlPrefix}PersistenceException.html[`PersistenceException`] when thrown
|
||||
by `Session` methods that implement `EntityManager` methods (e.g., https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/Session.html#merge-java.lang.Object-[Session.merge(Object object)],
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/Session.html#flush--[Session.flush()]).
|
||||
|
||||
If your `SessionFactory` is built via Hibernate's native bootstrapping, and you don't want the Hibernate exceptions to be wrapped in the JPA `PersistenceException`, you need to set the
|
||||
`hibernate.native_exception_handling_51_compliance` configuration property to `true`. See the
|
||||
<<appendices/Configurations.adoc#configurations-exception-handling,`hibernate.native_exception_handling_51_compliance`>> configuration property for more details.
|
||||
====
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
apply plugin: 'org.hibernate.orm'
|
||||
|
||||
ext {
|
||||
hibernateVersion = 'hibernate-version-you-want'
|
||||
}
|
||||
|
@ -10,6 +12,8 @@ buildscript {
|
|||
|
||||
hibernate {
|
||||
enhance {
|
||||
// any configuration goes here
|
||||
enableLazyInitialization = true
|
||||
enableDirtyTracking = true
|
||||
enableAssociationManagement = true
|
||||
}
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
SELECT p.id AS id1_0_0_,
|
||||
p.name AS name2_0_0_
|
||||
FROM Person p
|
||||
WHERE p.id IN ( 1, 2, 3 )
|
|
@ -0,0 +1,20 @@
|
|||
SELECT
|
||||
c.id as id1_1_0_,
|
||||
c.name as name2_1_0_
|
||||
FROM
|
||||
Client c
|
||||
WHERE
|
||||
c.id = 1
|
||||
|
||||
SELECT
|
||||
a.id as id1_0_,
|
||||
a.active_status as active2_0_,
|
||||
a.amount as amount3_0_,
|
||||
a.client_id as client_i6_0_,
|
||||
a.rate as rate4_0_,
|
||||
a.account_type as account_5_0_
|
||||
FROM
|
||||
Account a
|
||||
WHERE
|
||||
accounts0_.active_status = true
|
||||
and a.client_id = 1
|
|
@ -0,0 +1,13 @@
|
|||
SELECT
|
||||
a.id as id1_0_0_,
|
||||
a.active_status as active2_0_0_,
|
||||
a.amount as amount3_0_0_,
|
||||
a.client_id as client_i6_0_0_,
|
||||
a.rate as rate4_0_0_,
|
||||
a.account_type as account_5_0_0_,
|
||||
c.id as id1_1_1_,
|
||||
c.name as name2_1_1_
|
||||
FROM
|
||||
Account a
|
||||
WHERE
|
||||
a.id = 2
|
|
@ -0,0 +1,11 @@
|
|||
SELECT
|
||||
a.id as id1_0_,
|
||||
a.active_status as active2_0_,
|
||||
a.amount as amount3_0_,
|
||||
a.client_id as client_i6_0_,
|
||||
a.rate as rate4_0_,
|
||||
a.account_type as account_5_0_
|
||||
FROM
|
||||
Account a
|
||||
WHERE
|
||||
a.active_status = true
|
|
@ -0,0 +1,19 @@
|
|||
SELECT
|
||||
ca.Client_id as Client_i1_2_0_,
|
||||
ca.accounts_id as accounts2_2_0_,
|
||||
ca.order_id as order_id3_0_,
|
||||
a.id as id1_0_1_,
|
||||
a.amount as amount3_0_1_,
|
||||
a.rate as rate4_0_1_,
|
||||
a.account_type as account_5_0_1_
|
||||
FROM
|
||||
Client_Account ca
|
||||
INNER JOIN
|
||||
Account a
|
||||
ON ca.accounts_id=a.id
|
||||
WHERE
|
||||
ca.order_id <= ?
|
||||
AND ca.Client_id = ?
|
||||
|
||||
-- binding parameter [1] as [INTEGER] - [1]
|
||||
-- binding parameter [2] as [BIGINT] - [1]
|
|
@ -0,0 +1,23 @@
|
|||
INSERT INTO Client (name, id)
|
||||
VALUES ('John Doe', 1)
|
||||
|
||||
INSERT INTO Account (amount, client_id, rate, account_type, id)
|
||||
VALUES (5000.0, 1, 0.0125, 'CREDIT', 1)
|
||||
|
||||
INSERT INTO Account (amount, client_id, rate, account_type, id)
|
||||
VALUES (0.0, 1, 0.0105, 'DEBIT', 2)
|
||||
|
||||
INSERT INTO Account (amount, client_id, rate, account_type, id)
|
||||
VALUES (250.0, 1, 0.0105, 'DEBIT', 3)
|
||||
|
||||
INSERT INTO Client_Account (Client_id, order_id, accounts_id)
|
||||
VALUES (1, 0, 1)
|
||||
|
||||
INSERT INTO Client_Account (Client_id, order_id, accounts_id)
|
||||
VALUES (1, 0, 1)
|
||||
|
||||
INSERT INTO Client_Account (Client_id, order_id, accounts_id)
|
||||
VALUES (1, 1, 2)
|
||||
|
||||
INSERT INTO Client_Account (Client_id, order_id, accounts_id)
|
||||
VALUES (1, 2, 3)
|
|
@ -0,0 +1,11 @@
|
|||
INSERT INTO Client (name, id)
|
||||
VALUES ('John Doe', 1)
|
||||
|
||||
INSERT INTO Account (active_status, amount, client_id, rate, account_type, id)
|
||||
VALUES (true, 5000.0, 1, 0.0125, 'CREDIT', 1)
|
||||
|
||||
INSERT INTO Account (active_status, amount, client_id, rate, account_type, id)
|
||||
VALUES (false, 0.0, 1, 0.0105, 'DEBIT', 2)
|
||||
|
||||
INSERT INTO Account (active_status, amount, client_id, rate, account_type, id)
|
||||
VALUES (true, 250.0, 1, 0.0105, 'DEBIT', 3)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue