Migrate 5.1 docs in 5.0 branch
This commit is contained in:
parent
419ae32756
commit
034abc9755
|
@ -1,6 +1,7 @@
|
|||
apply plugin: 'eclipse'
|
||||
apply plugin: 'idea'
|
||||
apply from: "./libraries.gradle"
|
||||
apply from: "./databases.gradle"
|
||||
|
||||
|
||||
buildscript {
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
|
||||
// build a map of the database settings to use.
|
||||
ext {
|
||||
db = 'h2'
|
||||
dbBundle = [
|
||||
h2 : [
|
||||
'db.dialect' : 'org.hibernate.dialect.H2Dialect',
|
||||
'jdbc.driver': 'org.h2.Driver',
|
||||
'jdbc.user' : 'sa',
|
||||
'jdbc.pass' : '',
|
||||
'jdbc.url' : 'jdbc:h2:mem:db1;DB_CLOSE_DELAY=-1;LOCK_TIMEOUT=10000',
|
||||
],
|
||||
hsqldb : [
|
||||
'db.dialect' : 'org.hibernate.dialect.HSQLDialect',
|
||||
'jdbc.driver': 'org.hsqldb.jdbc.JDBCDriver',
|
||||
'jdbc.user' : 'sa',
|
||||
'jdbc.pass' : '',
|
||||
'jdbc.url' : 'jdbc:hsqldb:mem:test'
|
||||
],
|
||||
pgsql : [
|
||||
'db.dialect' : 'org.hibernate.dialect.PostgreSQL94Dialect',
|
||||
'jdbc.driver': 'org.postgresql.Driver',
|
||||
'jdbc.user' : 'hibernate_orm_test',
|
||||
'jdbc.pass' : 'hibernate_orm_test',
|
||||
'jdbc.url' : 'jdbc:postgresql:hibernate_orm_test'
|
||||
],
|
||||
mysql : [
|
||||
'db.dialect' : 'org.hibernate.dialect.MySQL57InnoDBDialect',
|
||||
'jdbc.driver': 'com.mysql.jdbc.Driver',
|
||||
'jdbc.user' : 'hibernateormtest',
|
||||
'jdbc.pass' : 'hibernateormtest',
|
||||
'jdbc.url' : 'jdbc:mysql://localhost/hibernate_orm_test'
|
||||
],
|
||||
mariadb : [
|
||||
'db.dialect' : 'org.hibernate.dialect.MySQL57InnoDBDialect',
|
||||
'jdbc.driver': 'org.mariadb.jdbc.Driver',
|
||||
'jdbc.user' : 'hibernate_orm_test',
|
||||
'jdbc.pass' : 'hibernate_orm_test',
|
||||
'jdbc.url' : 'jdbc:mariadb://localhost/hibernate_orm_test'
|
||||
]
|
||||
]
|
||||
}
|
|
@ -1,3 +1,4 @@
|
|||
import org.apache.tools.ant.filters.ReplaceTokens
|
||||
import org.asciidoctor.gradle.AsciidoctorTask
|
||||
|
||||
/*
|
||||
|
@ -27,6 +28,8 @@ apply plugin: "java"
|
|||
apply plugin: "jdocbook"
|
||||
apply plugin: 'org.asciidoctor.convert'
|
||||
|
||||
apply plugin: 'hibernate-matrix-testing'
|
||||
|
||||
apply from: "${rootProject.projectDir}/utilities.gradle"
|
||||
|
||||
defaultTasks 'buildDocs'
|
||||
|
@ -45,25 +48,66 @@ if ( JavaVersion.current().isJava8Compatible() ) {
|
|||
}
|
||||
|
||||
dependencies {
|
||||
ext.pressgangVersion = '3.0.0'
|
||||
ext.pressgangVersion = '3.0.0'
|
||||
|
||||
// asciidoctor 'org.asciidoctor:asciidoctorj:1.5.2'
|
||||
asciidoclet 'org.asciidoctor:asciidoclet:0.+'
|
||||
// asciidoctor 'org.asciidoctor:asciidoctorj:1.5.2'
|
||||
asciidoclet 'org.asciidoctor:asciidoclet:0.+'
|
||||
|
||||
jdocbookXsl "org.jboss.pressgang:pressgang-xslt-ns:${pressgangVersion}"
|
||||
jdocbookXsl "org.jboss.pressgang:pressgang-fonts:${pressgangVersion}"
|
||||
jdocbookStyles "org.jboss.pressgang:pressgang-jdocbook-style:${pressgangVersion}"
|
||||
jdocbookXsl "org.jboss.pressgang:pressgang-xslt-ns:${pressgangVersion}"
|
||||
jdocbookXsl "org.jboss.pressgang:pressgang-fonts:${pressgangVersion}"
|
||||
jdocbookStyles "org.jboss.pressgang:pressgang-jdocbook-style:${pressgangVersion}"
|
||||
|
||||
compile( libraries.jpa )
|
||||
compile( project( ':hibernate-jpamodelgen' ) )
|
||||
|
||||
testCompile( 'org.apache.commons:commons-lang3:3.4' )
|
||||
|
||||
testCompile( project(':hibernate-core') )
|
||||
testCompile( project(':hibernate-entitymanager') )
|
||||
testCompile( project(':hibernate-ehcache') )
|
||||
testCompile( project(':hibernate-spatial') )
|
||||
|
||||
testCompile( project(':hibernate-testing') )
|
||||
testCompile( project(path: ':hibernate-entitymanager', configuration: 'tests') )
|
||||
|
||||
testRuntime( libraries.h2 )
|
||||
testRuntime( libraries.hsqldb )
|
||||
}
|
||||
|
||||
processTestResources.doLast( {
|
||||
copy {
|
||||
from( sourceSets.test.java.srcDirs ) {
|
||||
include '**/*.properties'
|
||||
include '**/*.xml'
|
||||
}
|
||||
into sourceSets.test.output.classesDir
|
||||
}
|
||||
copy {
|
||||
ext.targetDir = file( "${buildDir}/resources/test" )
|
||||
from file('src/test/resources')
|
||||
into targetDir
|
||||
filter( ReplaceTokens, tokens: dbBundle[db] );
|
||||
}
|
||||
} )
|
||||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
// grouping tasks - declaration, see below for task dependency definitions
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
task buildDocs {
|
||||
group 'Documentation'
|
||||
description 'Grouping task for performing all documentation building tasks'
|
||||
}
|
||||
|
||||
task buildDocsForPublishing {
|
||||
group 'Documentation'
|
||||
description 'Grouping task for building all documentation for publishing (release)'
|
||||
}
|
||||
|
||||
|
||||
// aggregated JavaDoc ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
// aggregated JavaDoc
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
final File javadocDir = mkdir( new File( (File) project.buildDir, 'javadocs' ) );
|
||||
|
||||
|
@ -148,8 +192,6 @@ task aggregateJavadocs(type: Javadoc) {
|
|||
}
|
||||
}
|
||||
|
||||
buildDocs.dependsOn aggregateJavadocs
|
||||
|
||||
|
||||
// jDocBook ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
@ -215,8 +257,6 @@ task renderTopicalGuides(type: AsciidoctorTask, group: 'Documentation') {
|
|||
attributes icons: 'font', experimental: true, 'source-highlighter': 'prettify'
|
||||
}
|
||||
|
||||
buildDocs.dependsOn renderTopicalGuides
|
||||
|
||||
|
||||
|
||||
// Getting Started Guides (quick starts) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -231,7 +271,6 @@ task renderGettingStartedGuides(type: AsciidoctorTask, group: 'Documentation') {
|
|||
attributes icons: 'font', experimental: true, 'source-highlighter': 'prettify'
|
||||
}
|
||||
|
||||
buildDocs.dependsOn renderGettingStartedGuides
|
||||
|
||||
task buildTutorialZip(type: Zip) {
|
||||
from 'src/main/asciidoc/quickstart/tutorials'
|
||||
|
@ -247,3 +286,69 @@ task buildTutorialZip(type: Zip) {
|
|||
|
||||
renderGettingStartedGuides.dependsOn buildTutorialZip
|
||||
|
||||
|
||||
|
||||
// Mapping Guides ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
task renderMappingGuide(type: AsciidoctorTask, group: 'Documentation') {
|
||||
description = 'Renders the Mapping Guides in HTML format using Asciidoctor.'
|
||||
sourceDir = file( 'src/main/asciidoc/mapping' )
|
||||
outputDir = new File("$buildDir/asciidoc/mapping/html")
|
||||
backends "html5"
|
||||
separateOutputDirs false
|
||||
options logDocuments: true
|
||||
//attributes icons: 'font', experimental: true, 'source-highlighter': 'prettify', linkcss: true, stylesheet: "css/hibernate.css"
|
||||
attributes icons: 'font', experimental: true, 'source-highlighter': 'prettify', linkcss: true
|
||||
resources {
|
||||
from('src/main/asciidoc/') {
|
||||
include 'images/**'
|
||||
include 'css/**'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// User Guides ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
final String[] versionComponents = version.split( '\\.' );
|
||||
final String majorMinorVersion = versionComponents[0] + '.' + versionComponents[1];
|
||||
|
||||
task renderUserGuide(type: AsciidoctorTask, group: 'Documentation') {
|
||||
description = 'Renders the User Guides in HTML format using Asciidoctor.'
|
||||
sourceDir = file( 'src/main/asciidoc/userguide' )
|
||||
outputDir = new File("$buildDir/asciidoc/userguide/html_single")
|
||||
backends "html5"
|
||||
separateOutputDirs false
|
||||
options logDocuments: true
|
||||
attributes icons: 'font', experimental: true, 'source-highlighter': 'prettify', linkcss: true, stylesheet: "css/hibernate.css", majorMinorVersion: majorMinorVersion
|
||||
resources {
|
||||
from('src/main/asciidoc/userguide/') {
|
||||
include 'images/**'
|
||||
}
|
||||
from('src/main/style/asciidoctor') {
|
||||
include 'images/**'
|
||||
}
|
||||
from('src/main/style/asciidoctor') {
|
||||
include 'css/**'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
// grouping tasks
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
buildDocs.dependsOn aggregateJavadocs
|
||||
buildDocs.dependsOn renderTopicalGuides
|
||||
buildDocs.dependsOn renderGettingStartedGuides
|
||||
buildDocs.dependsOn renderUserGuide
|
||||
// the jDocBook plugin already adds its main task as a dependency of the buildDocs task
|
||||
|
||||
|
||||
buildDocsForPublishing.dependsOn aggregateJavadocs
|
||||
buildDocsForPublishing.dependsOn renderTopicalGuides
|
||||
buildDocsForPublishing.dependsOn renderGettingStartedGuides
|
||||
buildDocsForPublishing.dependsOn renderUserGuide
|
||||
// only html-single to match what Asciidoctor currently offers
|
||||
//buildDocsForPublishing.dependsOn 'renderDocBook_integrationsGuide_en-US_html_single '
|
||||
buildDocsForPublishing.dependsOn renderDocBook_integrationsGuide
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
== References
|
||||
|
||||
[bibliography]
|
||||
- [[[PoEAA]]] Martin Fowler. Patterns of Enterprise Application Architecture.
|
||||
Addison-Wesley Publishing Company. 2003.
|
||||
- [[[JPwH]]] Christian Bauer & Gavin King. http://www.manning.com/bauer2[Java Persistence with Hibernate]. Manning Publications Co. 2007.
|
|
@ -0,0 +1,39 @@
|
|||
= Hibernate ORM {majorMinorVersion} User Guide
|
||||
Steve Ebersole, Vlad Mihalcea, Andrea Boriero, Brett Meyer, Radim Vansa
|
||||
:toc:
|
||||
:toclevels: 3
|
||||
|
||||
include::Preface.adoc[]
|
||||
|
||||
:numbered:
|
||||
|
||||
include::chapters/architecture/Architecture.adoc[]
|
||||
include::chapters/domain/DomainModel.adoc[]
|
||||
include::chapters/bootstrap/Bootstrap.adoc[]
|
||||
include::chapters/pc/PersistenceContext.adoc[]
|
||||
include::chapters/flushing/Flushing.adoc[]
|
||||
include::chapters/jdbc/Database_Access.adoc[]
|
||||
include::chapters/transactions/Transactions.adoc[]
|
||||
include::chapters/jndi/JNDI.adoc[]
|
||||
include::chapters/locking/Locking.adoc[]
|
||||
include::chapters/fetching/Fetching.adoc[]
|
||||
include::chapters/batch/Batching.adoc[]
|
||||
include::chapters/caching/Caching.adoc[]
|
||||
include::chapters/events/Events.adoc[]
|
||||
include::chapters/query/hql/HQL.adoc[]
|
||||
include::chapters/query/criteria/Criteria.adoc[]
|
||||
include::chapters/query/native/Native.adoc[]
|
||||
include::chapters/query/spatial/Spatial.adoc[]
|
||||
include::chapters/multitenancy/MultiTenancy.adoc[]
|
||||
include::chapters/osgi/OSGi.adoc[]
|
||||
include::chapters/envers/Envers.adoc[]
|
||||
include::chapters/portability/Portability.adoc[]
|
||||
|
||||
include::appendices/Configurations.adoc[]
|
||||
include::appendices/Legacy_Bootstrap.adoc[]
|
||||
include::appendices/Legacy_DomainModel.adoc[]
|
||||
include::appendices/Legacy_Criteria.adoc[]
|
||||
include::appendices/Legacy_Native_Queries.adoc[]
|
||||
|
||||
include::Bibliography.adoc[]
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
[[preface]]
|
||||
== Preface
|
||||
|
||||
Developing Object-Oriented software that deals with data from Relational
|
||||
Databases can be cumbersome and resource consuming. Development costs
|
||||
are significantly higher due to a paradigm mismatch between how data is
|
||||
represented in objects versus relational databases. Hibernate is an
|
||||
Object/Relational Mapping (ORM) solution for Java environments. ORM
|
||||
refers to the technique of mapping data between an object model
|
||||
representation to a relational data model representation. See
|
||||
http://en.wikipedia.org/wiki/Object-relational_mapping[Wikipedia] for a
|
||||
good high-level discussion. Also, Martin Fowler's
|
||||
http://martinfowler.com/bliki/OrmHate.html[OrmHate] article takes a look
|
||||
at many of the mentioned mismatch problems.
|
||||
|
||||
Although having a strong background in SQL is not required to use
|
||||
Hibernate, having a basic understanding of the concepts can help you
|
||||
understand Hibernate more quickly and fully. An understanding of data
|
||||
modeling principles is especially important. Both
|
||||
http://www.agiledata.org/essays/dataModeling101.html and
|
||||
http://en.wikipedia.org/wiki/Data_modeling are good starting points for
|
||||
understanding these data modeling principles.
|
||||
|
||||
Understanding the basics of transactions and design patterns such as
|
||||
"Unit of Work" <<Bibliography.adoc#PoEAA,PoEAA>> or "ApplicationTransaction" are important as well.
|
||||
These topics will be discussed in the documentation, but a prior
|
||||
understanding will certainly help.
|
||||
|
||||
Hibernate not only takes care of the mapping from Java classes to
|
||||
database tables (and from Java data types to SQL data types), but also
|
||||
provides data query and retrieval facilities. It can significantly
|
||||
reduce development time otherwise spent with manual data handling in SQL
|
||||
and JDBC. Hibernate’s design goal is to relieve the developer from 95%
|
||||
of common data persistence-related programming tasks by eliminating the
|
||||
need for manual, hand-crafted data processing using SQL and JDBC.
|
||||
However, unlike many other persistence solutions, Hibernate does not
|
||||
hide the power of SQL from you and guarantees that your investment in
|
||||
relational technology and knowledge is as valid as always.
|
||||
|
||||
Hibernate may not be the best solution for data-centric applications
|
||||
that only use stored procedures to implement the business logic in the
|
||||
database, it is most useful with object-oriented domain models and
|
||||
business logic in the Java-based middle-tier. However, Hibernate can
|
||||
certainly help you to remove or encapsulate vendor-specific SQL code and
|
||||
will help with the common task of result set translation from a tabular
|
||||
representation to a graph of objects.
|
||||
|
||||
See http://hibernate.org/orm/contribute/ for information on getting
|
||||
involved.
|
||||
|
||||
|
||||
[TIP]
|
||||
====
|
||||
If you are just getting started with using Hibernate you may want to
|
||||
start with the Hibernate Getting Started Guide available from the
|
||||
http://hibernate.org/orm/documentation[documentation page]. It contains
|
||||
quick-start style tutorials as well as lots of introductory information.
|
||||
There is also a series of topical guides providing deep dives into
|
||||
various topics.
|
||||
====
|
|
@ -0,0 +1,722 @@
|
|||
[[configurations]]
|
||||
== Configurations
|
||||
|
||||
[[configurations-strategy]]
|
||||
=== Strategy configurations
|
||||
|
||||
Many configuration settings define pluggable strategies that Hibernate uses for various purposes.
|
||||
The configuration of many of these strategy type settings accept definition in various forms.
|
||||
The documentation of such configuration settings refer here.
|
||||
The types of forms available in such cases include:
|
||||
|
||||
short name (if defined)::
|
||||
Certain built-in strategy implementations have a corresponding short name.
|
||||
strategy instance::
|
||||
An instance of the strategy implementation to use can be specified
|
||||
strategy Class reference::
|
||||
A `java.lang.Class` reference of the strategy implementation to use
|
||||
strategy Class name::
|
||||
The class name (`java.lang.String`) of the strategy implementation to use
|
||||
|
||||
[[configurations-general]]
|
||||
=== General Configuration
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|===================================================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.dialect` | `org.hibernate.dialect.
|
||||
PostgreSQL94Dialect` |
|
||||
The classname of a Hibernate `org.hibernate.dialect.Dialect` from which Hibernate can generate SQL optimized for a particular relational database.
|
||||
|
||||
In most cases Hibernate can choose the correct `org.hibernate.dialect.Dialect` implementation based on the JDBC metadata returned by the JDBC driver.
|
||||
|
||||
|`hibernate.current_session_context_class` |`jta`, `thread`, `managed`, or a custom class implementing `org.hibernate.context.spi.
|
||||
CurrentSessionContext` |
|
||||
|
||||
Supply a custom strategy for the scoping of the _current_ `Session`.
|
||||
|
||||
The definition of what exactly _current_ means is controlled by the `org.hibernate.context.spi.CurrentSessionContext` implementation in use.
|
||||
|
||||
Note that for backwards compatibility, if a `org.hibernate.context.spi.CurrentSessionContext` is not configured but JTA is configured this will default to the `org.hibernate.context.internal.JTASessionContext`.
|
||||
|
||||
|===================================================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-database-connection]]
|
||||
=== Database connection properties
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|===================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.connection.driver_class` or `javax.persistence.jdbc.driver` | `org.postgresql.Driver` | Names the JDBC `Driver` class name.
|
||||
|`hibernate.connection.url` or `javax.persistence.jdbc.url` | `jdbc:postgresql:hibernate_orm_test` | Names the JDBC connection URL.
|
||||
|`hibernate.connection.username` or `javax.persistence.jdbc.user` | | Names the JDBC connection user name.
|
||||
|`hibernate.connection.password` or `javax.persistence.jdbc.password` | | Names the JDBC connection password.
|
||||
|`hibernate.connection.isolation` | `REPEATABLE_READ` or
|
||||
`Connection.TRANSACTION_REPEATABLE_READ` | Names the JDBC connection transaction isolation level.
|
||||
|`hibernate.connection.autocommit` | `true` or `false` (default value) | Names the JDBC connection autocommit mode.
|
||||
|`hibernate.connection.pool_size` | 20 | Maximum number of connections for the built-in Hibernate connection pool.
|
||||
|`hibernate.connection.datasource` | |
|
||||
|
||||
Either a `javax.sql.DataSource` instance or a JNDI name under which to locate the `DataSource`.
|
||||
|
||||
For JNDI names, ses also `hibernate.jndi.class`, `hibernate.jndi.url`, `hibernate.jndi`.
|
||||
|
||||
|`hibernate.connection` | | Names a prefix used to define arbitrary JDBC connection properties. These properties are passed along to the JDBC provider when creating a connection.
|
||||
|`hibernate.connection.provider_class` | `org.hibernate.hikaricp.internal.
|
||||
HikariCPConnectionProvider` a|
|
||||
|
||||
Names the `org.hibernate.engine.jdbc.connections.spi.ConnectionProvider` to use for obtaining JDBC connections.
|
||||
|
||||
Can reference:
|
||||
|
||||
* an instance of `ConnectionProvider`
|
||||
* a `Class<? extends ConnectionProvider` object reference
|
||||
* a fully qualified name of a class implementing `ConnectionProvider`
|
||||
|
||||
The term `class` appears in the setting name due to legacy reasons; however it can accept instances.
|
||||
|
||||
|`hibernate.jndi.class` | | Names the JNDI `javax.naming.InitialContext` class.
|
||||
|`hibernate.jndi.url` | java:global/jdbc/default | Names the JNDI provider/connection url.
|
||||
|`hibernate.jndi` | |
|
||||
|
||||
Names a prefix used to define arbitrary JNDI `javax.naming.InitialContext` properties.
|
||||
|
||||
These properties are passed along to `javax.naming.InitialContext#InitialContext(java.util.Hashtable)`
|
||||
|
||||
|`hibernate.connection.acquisition_mode` | `immediate` |
|
||||
|
||||
Specifies how Hibernate should acquire JDBC connections. The possible values are given by `org.hibernate.ConnectionAcquisitionMode`.
|
||||
|
||||
Should generally only configure this or `hibernate.connection.release_mode`, not both.
|
||||
|
||||
|`hibernate.connection.release_mode` | `auto` (default value) |
|
||||
|
||||
Specifies how Hibernate should release JDBC connections. The possible values are given by the current transaction mode (`after_transaction` for JDBC transactions and `after_statement` for JTA transactions).
|
||||
|
||||
Should generally only configure this or `hibernate.connection.acquisition_mode`, not both.
|
||||
|
||||
|===================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-c3p0]]
|
||||
=== c3p0 properties
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|===================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.c3p0.min_size` | 1 | Minimum size of C3P0 connection pool. Refers to http://www.mchange.com/projects/c3p0/#minPoolSize[c3p0 `minPoolSize` setting].
|
||||
|`hibernate.c3p0.max_size` | 5 | Maximum size of C3P0 connection pool. Refers to http://www.mchange.com/projects/c3p0/#maxPoolSize[c3p0 `maxPoolSize` setting].
|
||||
|`hibernate.c3p0.timeout` | 30 | Maximum idle time for C3P0 connection pool. Refers to http://www.mchange.com/projects/c3p0/#maxIdleTime[c3p0 `maxIdleTime` setting].
|
||||
|`hibernate.c3p0.max_statements` | 5 | Maximum size of C3P0 statement cache. Refers to http://www.mchange.com/projects/c3p0/#maxStatements[c3p0 `maxStatements` setting].
|
||||
|`hibernate.c3p0.acquire_increment` | 2 | Number of connections acquired at a time when there's no connection available in the pool. Refers to http://www.mchange.com/projects/c3p0/#acquireIncrement[c3p0 `acquireIncrement` setting].
|
||||
|`hibernate.c3p0.idle_test_period` | 5 | Idle time before a C3P0 pooled connection is validated. Refers to http://www.mchange.com/projects/c3p0/#idleConnectionTestPeriod[c3p0 `idleConnectionTestPeriod` setting].
|
||||
|`hibernate.c3p0` | | A setting prefix used to indicate additional c3p0 properties that need to be passed to the underlying c3p0 connection pool.
|
||||
|===================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-mapping]]
|
||||
=== Mapping Properties
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|===================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
3+|Table qualifying options
|
||||
|`hibernate.default_schema` |A schema name |Qualify unqualified table names with the given schema or tablespace in generated SQL.
|
||||
|`hibernate.default_catalog` |A catalog name |Qualifies unqualified table names with the given catalog in generated SQL.
|
||||
|
||||
A setting to control whether to `org.hibernate.engine.internal.StatisticalLoggingSessionEventListener` is enabled on all `Sessions` (unless explicitly disabled for a given `Session`).
|
||||
The default value of this setting is determined by the value for `hibernate.generate_statistics`, meaning that if collection of statistics is enabled logging of Session metrics is enabled by default too.
|
||||
|
||||
3+|Identifier options
|
||||
|`hibernate.id.new_generator_mappings` |`true` (default value) or `false` |
|
||||
|
||||
Setting which indicates whether or not the new `org.hibernate.id.IdentifierGenerator` are used for `AUTO`, `TABLE` and `SEQUENCE`.
|
||||
|
||||
Existing applications may want to disable this (set it `false`) for upgrade compatibility from 3.x and 4.x to 5.x.
|
||||
|
||||
|`hibernate.use_identifier_rollback` |`true` or `false` (default value) |If true, generated identifier properties are reset to default values when objects are deleted.
|
||||
|`hibernate.id.optimizer.pooled.preferred` |`none`, `hilo`, `legacy-hilo`, `pooled` (default value), `pooled-lo`, `pooled-lotl` or a fully-qualified name of the `org.hibernate.id.enhanced.Optimizer` implementation |
|
||||
|
||||
When a generator specified an increment-size and an optimizer was not explicitly specified, which of the _pooled_ optimizers should be preferred?
|
||||
|
||||
3+|Quoting options
|
||||
|`hibernate.globally_quoted_identifiers` |`true` or `false` (default value) |Should all database identifiers be quoted.
|
||||
|`hibernate.globally_quoted_identifiers_skip_column_definitions` |`true` or `false` (default value) |
|
||||
|
||||
Assuming `hibernate.globally_quoted_identifiers` is `true`, this allows the global quoting to skip column-definitions as defined by `javax.persistence.Column`,
|
||||
`javax.persistence.JoinColumn`, etc, and while it avoids column-definitions being quoted due to global quoting, they can still be explicitly quoted in the annotation/xml mappings.
|
||||
|
||||
|`hibernate.auto_quote_keyword` |`true` or `false` (default value) |Specifies whether to automatically quote any names that are deemed keywords.
|
||||
|
||||
3+|Discriminator options
|
||||
|`hibernate.discriminator.implicit_for_joined` |`true` or `false` (default value) |
|
||||
|
||||
The legacy behavior of Hibernate is to not use discriminators for joined inheritance (Hibernate does not need the discriminator).
|
||||
However, some JPA providers do need the discriminator for handling joined inheritance so, in the interest of portability, this capability has been added to Hibernate too.
|
||||
|
||||
However, we want to make sure that legacy applications continue to work as well, which puts us in a bind in terms of how to handle _implicit_ discriminator mappings.
|
||||
The solution is to assume that the absence of discriminator metadata means to follow the legacy behavior _unless_ this setting is enabled.
|
||||
|
||||
With this setting enabled, Hibernate will interpret the absence of discriminator metadata as an indication to use the JPA-defined defaults for these absent annotations.
|
||||
|
||||
See Hibernate Jira issue https://hibernate.atlassian.net/browse/HHH-6911[HHH-6911] for additional background info.
|
||||
|
||||
|`hibernate.discriminator.ignore_explicit_for_joined` |`true` or `false` (default value) |
|
||||
|
||||
The legacy behavior of Hibernate is to not use discriminators for joined inheritance (Hibernate does not need the discriminator).
|
||||
However, some JPA providers do need the discriminator for handling joined inheritance so, in the interest of portability, this capability has been added to Hibernate too.
|
||||
|
||||
Existing applications rely (implicitly or explicitly) on Hibernate ignoring any `DiscriminatorColumn` declarations on joined inheritance hierarchies.
|
||||
This setting allows these applications to maintain the legacy behavior of `DiscriminatorColumn` annotations being ignored when paired with joined inheritance.
|
||||
|
||||
See Hibernate Jira issue https://hibernate.atlassian.net/browse/HHH-6911[HHH-6911] for additional background info.
|
||||
|
||||
3+|Naming strategies
|
||||
|`hibernate.implicit_naming_strategy` |`default` (default value), `jpa`, `legacy-jpa`, `legacy-hbm`, `component-path` a|
|
||||
|
||||
Used to specify the `org.hibernate.boot.model.naming.ImplicitNamingStrategy` class to use.
|
||||
The following short names are defined for this setting:
|
||||
|
||||
`default`:: Uses the `org.hibernate.boot.model.naming.ImplicitNamingStrategyJpaCompliantImpl`
|
||||
`jpa`:: Uses the `org.hibernate.boot.model.naming.ImplicitNamingStrategyJpaCompliantImpl`
|
||||
`legacy-jpa`:: Uses the `org.hibernate.boot.model.naming.ImplicitNamingStrategyLegacyJpaImpl`
|
||||
`legacy-hbm`:: Uses the `org.hibernate.boot.model.naming.ImplicitNamingStrategyLegacyHbmImpl`
|
||||
`component-path`:: Uses the `org.hibernate.boot.model.naming.ImplicitNamingStrategyComponentPathImpl`
|
||||
|
||||
If this property happens to be empty, the fallback is to use `default` strategy.
|
||||
|
||||
|`hibernate.physical_naming_strategy` | `org.hibernate.boot.model.naming.
|
||||
PhysicalNamingStrategyStandardImpl` (default value) | Used to specify the `org.hibernate.boot.model.naming.PhysicalNamingStrategy` class to use.
|
||||
3+|Metadata scanning options
|
||||
|`hibernate.archive.scanner` | a|
|
||||
|
||||
Pass an implementation of `org.hibernate.boot.archive.scan.spi.Scanner`.
|
||||
By default, `org.hibernate.boot.archive.scan.internal.StandardScanner` is used.
|
||||
|
||||
Accepts either:
|
||||
|
||||
* an actual `Scanner` instance
|
||||
* a reference to a Class that implements `Scanner`
|
||||
* a fully qualified name of a Class that implements `Scanner`
|
||||
|
||||
|`hibernate.archive.interpreter` | a|
|
||||
|
||||
Pass `org.hibernate.boot.archive.spi.ArchiveDescriptorFactory` to use in the scanning process.
|
||||
|
||||
Accepts either:
|
||||
|
||||
* an actual `ArchiveDescriptorFactory` instance
|
||||
* a reference to a Class that implements `ArchiveDescriptorFactory`
|
||||
* a fully qualified name of a Class that implements `ArchiveDescriptorFactory`
|
||||
|
||||
See information on `org.hibernate.boot.archive.scan.spi.Scanner` about expected constructor forms.
|
||||
|
||||
|`hibernate.archive.autodetection` | `true` or `false` (default value) a|
|
||||
|
||||
Identifies a comma-separate list of values indicating the mapping types we should auto-detect during scanning.
|
||||
|
||||
Allowable values include:
|
||||
|
||||
`class`:: scan classes (e.g. `.class`) to extract entity mapping metadata
|
||||
`hbm`:: scan `hbm` mapping files (e.g. `hbm.xml`) to extract entity mapping metadata
|
||||
|
||||
|`hibernate.mapping.precedence` | `true` or `false` (default value) |
|
||||
|
||||
Used to specify the order in which metadata sources should be processed.
|
||||
Value is a delimited-list whose elements are defined by `org.hibernate.cfg.MetadataSourceType`.
|
||||
|
||||
Default is `hbm,class"` which indicates to process `hbm.xml` files followed by annotations (combined with `orm.xml` mappings).
|
||||
|
||||
3+|JDBC-related options
|
||||
|`hibernate.use_nationalized_character_data` |`true` or `false` (default value) |Enable nationalized character support on all string / clob based attribute ( string, char, clob, text etc ).
|
||||
|`hibernate.jdbc.lob.non_contextual_creation` |`true` or `false` (default value) |Should we not use contextual LOB creation (aka based on `java.sql.Connection#createBlob()` et al)? The default value for HANA, H2, and PostgreSQL is `true`.
|
||||
|
||||
3+|Bean Validation options
|
||||
|`javax.persistence.validation.factory` |`javax.validation.ValidationFactory` implementation | Specify the `javax.validation.ValidationFactory` implementation to use for Bean Validation.
|
||||
|`hibernate.check_nullability` |`true` or `false` |
|
||||
|
||||
Enable nullability checking. Raises an exception if a property marked as not-null is null.
|
||||
|
||||
Default to `false` if Bean Validation is present in the classpath and Hibernate Annotations is used, `true` otherwise.
|
||||
|
||||
3+|Misc options
|
||||
|`hibernate.create_empty_composites.enabled` |`true` or `false` (default value) | Enable instantiation of composite/embeddable objects when all of its attribute values are `null`. The default (and historical) behavior is that a `null` reference will be used to represent the composite when all of its attributes are `null`.
|
||||
|`hibernate.entity_dirtiness_strategy` | fully-qualified class name or an actual `CustomEntityDirtinessStrategy` instance | Setting to identify a `org.hibernate.CustomEntityDirtinessStrategy` to use.
|
||||
|`hibernate.default_entity_mode` |`pojo` (default value) or `dynamic-map` |Default `EntityMode` for entity representation for all sessions opened from this `SessionFactory`, defaults to `pojo`.
|
||||
|===================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-bytecode-enhancement]]
|
||||
=== Bytecode Enhancement Properties
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|===================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.enhancer.enableDirtyTracking`| `true` or `false` (default value) | Enable dirty tracking feature in runtime bytecode enhancement.
|
||||
|`hibernate.enhancer.enableLazyInitialization`| `true` or `false` (default value) | Enable lazy loading feature in runtime bytecode enhancement. This way, even basic types (e.g. `@Basic(fetch = FetchType.LAZY`)) can be fetched lazily.
|
||||
|`hibernate.enhancer.enableAssociationManagement`| `true` or `false` (default value) | Enable association management feature in runtime bytecode enhancement which automatically synchronizes a bidirectional association when only one side is changed.
|
||||
|`hibernate.bytecode.provider` |`javassist` (default value) | The `org.hibernate.bytecode.spi.BytecodeProvider` built-in implementation flavor. Currently, only `javassist` is supported.
|
||||
|`hibernate.bytecode.use_reflection_optimizer`| `true` or `false` (default value) | Should we use reflection optimization? The reflection optimizer implements the `org.hibernate.bytecode.spi.ReflectionOptimizer` interface and improves entity instantiation and property getter/setter calls.
|
||||
|===================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-query]]
|
||||
=== Query settings
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|===================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.query.plan_cache_max_size` | `2048` (default value) a|
|
||||
|
||||
The maximum number of entries including:
|
||||
|
||||
* `org.hibernate.engine.query.spi.HQLQueryPlan`
|
||||
* `org.hibernate.engine.query.spi.FilterQueryPlan`
|
||||
* `org.hibernate.engine.query.spi.NativeSQLQueryPlan`
|
||||
|
||||
maintained by `org.hibernate.engine.query.spi.QueryPlanCache`.
|
||||
|
||||
|`hibernate.query.plan_parameter_metadata_max_size` | `128` (default value) | The maximum number of strong references associated with `ParameterMetadata` maintained by `org.hibernate.engine.query.spi.QueryPlanCache`.
|
||||
|`hibernate.order_by.default_null_ordering` |`none`, `first` or `last` |Defines precedence of null values in `ORDER BY` clause. Defaults to `none` which varies between RDBMS implementation.
|
||||
|`hibernate.discriminator.force_in_select` |`true` or `false` (default value) | For entities which do not explicitly say, should we force discriminators into SQL selects?
|
||||
|`hibernate.query.substitutions` | `true=1,false=0` |A comma-separated list of token substitutions to use when translating a Hibernate query to SQL.
|
||||
|`hibernate.query.factory_class` |`org.hibernate.hql.internal.ast.
|
||||
ASTQueryTranslatorFactory` (default value) or `org.hibernate.hql.internal.classic.
|
||||
ClassicQueryTranslatorFactory` |Chooses the HQL parser implementation.
|
||||
|
||||
|`hibernate.query.jpaql_strict_compliance` |`true` or `false` (default value) |Map from tokens in Hibernate queries to SQL tokens, such as function or literal names.
|
||||
|
||||
Should we strictly adhere to JPA Query Language (JPQL) syntax, or more broadly support all of Hibernate's superset (HQL)?
|
||||
|
||||
Setting this to `true` may cause valid HQL to throw an exception because it violates the JPQL subset.
|
||||
|
||||
|`hibernate.query.startup_check` | `true` (default value) or `false` |Should named queries be checked during startup?
|
||||
|`hibernate.hql.bulk_id_strategy` | A fully-qualified class name, an instance, or a `Class` object reference |Provide a custom `org.hibernate.hql.spi.id.MultiTableBulkIdStrategy` implementation for handling multi-table bulk HQL operations.
|
||||
|`hibernate.proc.param_null_passing` | `true` or `false` (default value) |
|
||||
|
||||
Global setting for whether `null` parameter bindings should be passed to database procedure/function calls as part of `org.hibernate.procedure.ProcedureCall` handling.
|
||||
Implicitly Hibernate will not pass the `null`, the intention being to allow any default argument values to be applied.
|
||||
|
||||
This defines a global setting, which can then be controlled per parameter via `org.hibernate.procedure.ParameterRegistration#enablePassingNulls(boolean)`
|
||||
|
||||
Values are `true` (pass the NULLs) or `false` (do not pass the NULLs).
|
||||
|
||||
|`hibernate.jdbc.log.warnings` | `true` or `false` |Enable fetching JDBC statement warning for logging. Default value is given by `org.hibernate.dialect.Dialect#isJdbcLogWarningsEnabledByDefault()`.
|
||||
|`hibernate.session_factory.statement_inspector` | A fully-qualified class name, an instance, or a `Class` object reference a|
|
||||
|
||||
Names a `org.hibernate.resource.jdbc.spi.StatementInspector` implementation to be applied to every `Session` created by the current `SessionFactory`.
|
||||
|
||||
Can reference
|
||||
|
||||
* `StatementInspector` instance
|
||||
* `StatementInspector` implementation {@link Class} reference
|
||||
* `StatementInspector` implementation class name (fully-qualified class name)
|
||||
|
||||
|===================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-batch]]
|
||||
=== Batching properties
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|=====================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.jdbc.batch_size` |5 |Maximum JDBC batch size. A nonzero value enables batch updates.
|
||||
|`hibernate.order_inserts` |`true` or `false` (default value) |Forces Hibernate to order SQL inserts by the primary key value of the items being inserted. This preserves batching when using cascading.
|
||||
|`hibernate.order_updates` |`true` or `false` (default value) |Forces Hibernate to order SQL updates by the primary key value of the items being updated. This preserves batching when using cascading and reduces the likelihood of transaction deadlocks in highly-concurrent systems.
|
||||
|`hibernate.jdbc.batch_versioned_data` |`true`(default value) or `false` |
|
||||
Should versioned entities be included in batching?
|
||||
|
||||
Set this property to `true` if your JDBC driver returns correct row counts from executeBatch(). This option is usually safe, but is disabled by default. If enabled, Hibernate uses batched DML for automatically versioned data.
|
||||
|
||||
|`hibernate.batch_fetch_style` |`LEGACY`(default value) |
|
||||
|
||||
Names the `org.hibernate.loader.BatchFetchStyle` to use.
|
||||
|
||||
Can specify either the `org.hibernate.loader.BatchFetchStyle` name (insensitively), or a `org.hibernate.loader.BatchFetchStyle` instance. `LEGACY}` is the default value.
|
||||
|
||||
|`hibernate.jdbc.batch.builder` | The fully qualified name of an `org.hibernate.engine.jdbc.batch.spi.BatchBuilder` implementation class type or an actual object instance | Names the `org.hibernate.engine.jdbc.batch.spi.BatchBuilder` implementation to use.
|
||||
|=====================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-database-fetch]]
|
||||
==== Fetching properties
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|=====================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.max_fetch_depth`|A value between `0` and `3` |Sets a maximum depth for the outer join fetch tree for single-ended associations. A single-ended association is a one-to-one or many-to-one assocation. A value of `0` disables default outer join fetching.
|
||||
|`hibernate.default_batch_fetch_size` |`4`,`8`, or `16` |Default size for Hibernate Batch fetching of associations (lazily fetched associations can be fetched in batches to prevent N+1 query problems).
|
||||
|`hibernate.jdbc.fetch_size` |`0` or an integer |A non-zero value determines the JDBC fetch size, by calling `Statement.setFetchSize()`.
|
||||
|`hibernate.jdbc.use_scrollable_resultset` |`true` or `false` |Enables Hibernate to use JDBC2 scrollable resultsets. This property is only relevant for user-supplied JDBC connections. Otherwise, Hibernate uses connection metadata.
|
||||
|`hibernate.jdbc.use_streams_for_binary` |`true` or `false` (default value) |Use streams when writing or reading `binary` or `serializable` types to or from JDBC. This is a system-level property.
|
||||
|`hibernate.jdbc.use_get_generated_keys` |`true` or `false` |Allows Hibernate to use JDBC3 `PreparedStatement.getGeneratedKeys()` to retrieve natively-generated keys after insert. You need the JDBC3+ driver and JRE1.4+. Disable this property if your driver has problems with the Hibernate identifier generators. By default, it tries to detect the driver capabilities from connection metadata.
|
||||
|`hibernate.jdbc.wrap_result_sets` |`true` or `false` (default value) |Enable wrapping of JDBC result sets in order to speed up column name lookups for broken JDBC drivers.
|
||||
|`hibernate.enable_lazy_load_no_trans` |`true` or `false` (default value) |
|
||||
|
||||
Initialize Lazy Proxies or Collections outside a given Transactional Persistence Context.
|
||||
|
||||
Although enabling this configuration can make `LazyInitializationException` go away, it's better to use a fetch plan that guarantees that all properties are properly initialised before the Session is closed.
|
||||
|
||||
In reality, you shouldn't probably enable this setting anyway.
|
||||
|=====================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-logging]]
|
||||
=== Statement logging and statistics
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|===================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
3+|SQL statement logging
|
||||
|`hibernate.show_sql` |`true` or `false` (default value) |Write all SQL statements to the console. This is an alternative to setting the log category `org.hibernate.SQL` to debug.
|
||||
|`hibernate.format_sql` |`true` or `false` (default value) |Pretty-print the SQL in the log and console.
|
||||
|`hibernate.use_sql_comments` |`true` or `false` (default value) |If true, Hibernate generates comments inside the SQL, for easier debugging.
|
||||
3+|Statistics settings
|
||||
|`hibernate.generate_statistics` |`true` or `false` |Causes Hibernate to collect statistics for performance tuning.
|
||||
|`hibernate.session.events.log` |`true` or `false` |
|
||||
|
||||
A setting to control whether to `org.hibernate.engine.internal
|
||||
.StatisticalLoggingSessionEventListener` is enabled on all `Sessions` (unless explicitly disabled for a given `Session`).
|
||||
The default value of this setting is determined by the value for `hibernate.generate_statistics`, meaning that if statistics are enabled, then logging of Session metrics is enabled by default too.
|
||||
|
||||
|===================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-cache]]
|
||||
=== Cache Properties
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|==================================================================================================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.cache.region.factory_class` | `org.hibernate.cache.infinispan.
|
||||
InfinispanRegionFactory` |The fully-qualified name of the `RegionFactory` implementation class.
|
||||
|`hibernate.cache.default_cache_concurrency_strategy` | |
|
||||
|
||||
Setting used to give the name of the default `org.hibernate.annotations.CacheConcurrencyStrategy` to use when either `@javax.persistence.Cacheable` or
|
||||
`@org.hibernate.annotations.Cache`. `@org.hibernate.annotations.Cache` is used to override the global setting.
|
||||
|
||||
|`hibernate.cache.use_minimal_puts` |`true` (default value) or `false` |Optimizes second-level cache operation to minimize writes, at the cost of more frequent reads. This is most useful for clustered caches and is enabled by default for clustered cache implementations.
|
||||
|`hibernate.cache.use_query_cache` |`true` or `false` (default value) |Enables the query cache. You still need to set individual queries to be cachable.
|
||||
|`hibernate.cache.use_second_level_cache` |`true` (default value) or `false` |Enable/disable the second level cache, which is enabled by default, although the default `RegionFactor` is `NoCachingRegionFactory` (meaning there is no actual caching implementation).
|
||||
|`hibernate.cache.query_cache_factory` |Fully-qualified classname |A custom `org.hibernate.cache.spi.QueryCacheFactory` interface. The default is the built-in `StandardQueryCacheFactory`.
|
||||
|`hibernate.cache.region_prefix` |A string |A prefix for second-level cache region names.
|
||||
|`hibernate.cache.use_structured_entries` |`true` or `false` (default value) |Forces Hibernate to store data in the second-level cache in a more human-readable format.
|
||||
|`hibernate.cache.auto_evict_collection_cache` |`true` or `false` (default: false) |Enables the automatic eviction of a bi-directional association's collection cache when an element in the `ManyToOne` collection is added/updated/removed without properly managing the change on the `OneToMany` side.
|
||||
|`hibernate.cache.use_reference_entries` |`true` or `false` |Optimizes second-level cache operation to store immutable entities (aka "reference") which do not have associations into cache directly, this case, lots of disasseble and deep copy operations can be avoid. Default value of this property is `false`.
|
||||
|`hibernate.ejb.classcache`| `hibernate.ejb.classcache
|
||||
.org.hibernate.ejb.test.Item` = `read-write` | Sets the associated entity class cache concurrency strategy for the designated region. Caching configuration should follow the following pattern `hibernate.ejb.classcache.<fully.qualified.Classname>` usage[, region] where usage is the cache strategy used and region the cache region name.
|
||||
|`hibernate.ejb.collectioncache`| `hibernate.ejb.collectioncache
|
||||
.org.hibernate.ejb.test.Item.distributors` = `read-write, RegionName`/> | Sets the associated collection cache concurrency strategy for the designated region. Caching configuration should follow the following pattern `hibernate.ejb.collectioncache.<fully.qualified.Classname>.<role>` usage[, region] where usage is the cache strategy used and region the cache region name
|
||||
|==================================================================================================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-transactions]]
|
||||
=== Transactions properties
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|===================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.transaction.jta.platform` |`JBossAS`, `BitronixJtaPlatform` |
|
||||
|
||||
Names the `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform` implementation to use for integrating with JTA systems.
|
||||
Can reference either a `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform` instance or the name of the `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform` implementation class
|
||||
|
||||
|`hibernate.jta.prefer_user_transaction` |`true` or `false` (default value) |
|
||||
|
||||
Should we prefer using the `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform#retrieveUserTransaction` over using `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform#retrieveTransactionManager`
|
||||
|
||||
|`hibernate.transaction.jta.platform_resolver` | | Names the `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatformResolver` implementation to use.
|
||||
|`hibernate.jta.cacheTransactionManager` | `true` (default value) or `false` | A configuration value key used to indicate that it is safe to cache.
|
||||
|`hibernate.jta.cacheUserTransaction` | `true` or `false` (default value) | A configuration value key used to indicate that it is safe to cache.
|
||||
|`hibernate.transaction.flush_before_completion` |`true` or `false` (default value) | Causes the session be flushed during the before completion phase of the transaction. If possible, use built-in and automatic session context management instead.
|
||||
|`hibernate.transaction.auto_close_session` |`true` or `false` (default value) |Causes the session to be closed during the after completion phase of the transaction. If possible, use built-in and automatic session context management instead.
|
||||
|`hibernate.transaction.coordinator_class` | a|
|
||||
|
||||
Names the implementation of `org.hibernate.resource.transaction.TransactionCoordinatorBuilder` to use for creating `org.hibernate.resource.transaction.TransactionCoordinator` instances.
|
||||
|
||||
Can be
|
||||
|
||||
* `TransactionCoordinatorBuilder` instance
|
||||
* `TransactionCoordinatorBuilder` implementation `Class` reference
|
||||
* `TransactionCoordinatorBuilder` implementation class name (fully-qualified name) or short-name
|
||||
|
||||
|`hibernate.jta.track_by_thread` | `true` (default value) or `false` |
|
||||
|
||||
A transaction can be rolled back by another thread ("tracking by thread") and not the original application.
|
||||
Examples of this include a JTA transaction timeout handled by a background reaper thread.
|
||||
|
||||
The ability to handle this situation requires checking the Thread ID every time Session is called, so enabling this can certainly have a performance impact.
|
||||
|===================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-multi-tenancy]]
|
||||
=== Multi-tenancy settings
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|===================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.multiTenancy` | `NONE` (default value), `SCHEMA`, `DATABASE`, and `DISCRIMINATOR` (not implemented yet) | The multi-tenancy strategy in use.
|
||||
|`hibernate.multi_tenant_connection_provider` | `true` or `false` (default value) | Names a `org.hibernate.engine.jdbc.connections.spi.MultiTenantConnectionProvider` implementation to use. As `MultiTenantConnectionProvider` is also a service, can be configured directly through the `org.hibernate.boot.registry.StandardServiceRegistryBuilder`.
|
||||
|`hibernate.tenant_identifier_resolver` | a|
|
||||
|
||||
Names a `org.hibernate.context.spi.CurrentTenantIdentifierResolver` implementation to resolve the resolve the current tenant identifier so that calling `SessionFactory#openSession()` would get a `Session` that's connected to the right tenant.
|
||||
|
||||
Can be:
|
||||
|
||||
* `CurrentTenantIdentifierResolver` instance
|
||||
* `CurrentTenantIdentifierResolver` implementation `Class` object reference
|
||||
* `CurrentTenantIdentifierResolver` implementation class name
|
||||
|
||||
|===================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-hbmddl]]
|
||||
=== Automatic schema generation
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|===================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.hbm2ddl.auto` |`none` (default value), `create-only`, `drop`, `create`, `create-drop`, `validate`, and `update` a|
|
||||
|
||||
Setting to perform `SchemaManagementTool` actions automatically as part of the `SessionFactory` lifecycle.
|
||||
Valid options are defined by the `externalHbm2ddlName` value of the `org.hibernate.tool.schema.Action` enum:
|
||||
|
||||
`none`:: No action will be performed.
|
||||
`create-only`:: Database creation will be generated.
|
||||
`drop`:: Database dropping will be generated.
|
||||
`create`:: Database dropping will be generated followed by database creation.
|
||||
`create-drop`:: Drop the schema and recreate it on SessionFactory startup. Additionally, drop the schema on SessionFactory shutdown.
|
||||
`validate`:: Validate the database schema
|
||||
`update`:: Update the database schema
|
||||
|
||||
|`javax.persistence.schema-generation.database.action` |`none` (default value), `create-only`, `drop`, `create`, `create-drop`, `validate`, and `update` a|
|
||||
|
||||
Setting to perform `SchemaManagementTool` actions automatically as part of the `SessionFactory` lifecycle.
|
||||
Valid options are defined by the `externalJpaName` value of the `org.hibernate.tool.schema.Action` enum:
|
||||
|
||||
`none`:: No action will be performed.
|
||||
`create`:: Database creation will be generated.
|
||||
`drop`:: Database dropping will be generated.
|
||||
`drop-and-create`:: Database dropping will be generated followed by database creation.
|
||||
|
||||
|`javax.persistence.schema-generation.scripts.action` |`none` (default value), `create-only`, `drop`, `create`, `create-drop`, `validate`, and `update` a|
|
||||
|
||||
Setting to perform `SchemaManagementTool` actions writing the commands into a DDL script file.
|
||||
Valid options are defined by the `externalJpaName` value of the `org.hibernate.tool.schema.Action` enum:
|
||||
|
||||
`none`:: No action will be performed.
|
||||
`create`:: Database creation will be generated.
|
||||
`drop`:: Database dropping will be generated.
|
||||
`drop-and-create`:: Database dropping will be generated followed by database creation.
|
||||
|
||||
|`javax.persistence.schema-generation-connection` | |Allows passing a specific `java.sql.Connection` instance to be used by `SchemaManagementTool`
|
||||
|`javax.persistence.database-product-name` | |
|
||||
|
||||
Specifies the name of the database provider in cases where a Connection to the underlying database is not available (aka, mainly in generating scripts).
|
||||
In such cases, a value for this setting _must_ be specified.
|
||||
|
||||
The value of this setting is expected to match the value returned by `java.sql.DatabaseMetaData#getDatabaseProductName()` for the target database.
|
||||
|
||||
Additionally, specifying `javax.persistence.database-major-version` and/or `javax.persistence.database-minor-version` may be required to understand exactly how to generate the required schema commands.
|
||||
|
||||
|`javax.persistence.database-major-version` | |
|
||||
|
||||
Specifies the major version of the underlying database, as would be returned by `java.sql.DatabaseMetaData#getDatabaseMajorVersion` for the target database.
|
||||
|
||||
This value is used to help more precisely determine how to perform schema generation tasks for the underlying database in cases where `javax.persistence.database-product-name` does not provide enough distinction.
|
||||
|
||||
|`javax.persistence.database-minor-version` | |
|
||||
|
||||
Specifies the minor version of the underlying database, as would be returned by `java.sql.DatabaseMetaData#getDatabaseMinorVersion` for the target database.
|
||||
|
||||
This value is used to help more precisely determine how to perform schema generation tasks for the underlying database in cases where `javax.persistence.database-product-name` and `javax.persistence.database-major-version` does not provide enough distinction.
|
||||
|
||||
|`javax.persistence.schema-generation.create-source` | a|
|
||||
|
||||
Specifies whether schema generation commands for schema creation are to be determine based on object/relational mapping metadata, DDL scripts, or a combination of the two.
|
||||
See `org.hibernate.tool.schema.SourceType` for valid set of values.
|
||||
|
||||
If no value is specified, a default is assumed as follows:
|
||||
|
||||
* if source scripts are specified (per `javax.persistence.schema-generation.create-script-source`), then `scripts` is assumed
|
||||
* otherwise, `metadata` is assumed
|
||||
|
||||
|`javax.persistence.schema-generation.drop-source` | a|
|
||||
|
||||
Specifies whether schema generation commands for schema dropping are to be determine based on object/relational mapping metadata, DDL scripts, or a combination of the two.
|
||||
See `org.hibernate.tool.schema.SourceType` for valid set of values.
|
||||
|
||||
If no value is specified, a default is assumed as follows:
|
||||
|
||||
* if source scripts are specified (per `javax.persistence.schema-generation.create-script-source`), then `scripts` is assumed
|
||||
* otherwise, `metadata` is assumed
|
||||
|
||||
|`javax.persistence.schema-generation.create-script-source` | |
|
||||
|
||||
Specifies the `create` script file as either a `java.io.Reader` configured for reading of the DDL script file or a string designating a file `java.net.URL` for the DDL script.
|
||||
|
||||
Hibernate historically also accepted `hibernate.hbm2ddl.import_files` for a similar purpose, but `javax.persistence.schema-generation.create-script-source` should be preferred over `hibernate.hbm2ddl.import_files`.
|
||||
|
||||
|`javax.persistence.schema-generation.drop-script-source` | | Specifies the `drop` script file as either a `java.io.Reader` configured for reading of the DDL script file or a string designating a file `java.net.URL` for the DDL script.
|
||||
|`javax.persistence.schema-generation.scripts.create-target` | |For cases where the `javax.persistence.schema-generation.scripts.action` value indicates that schema creation commands should be written to DDL script file, `javax.persistence.schema-generation.scripts.create-target` specifies either a `java.io.Writer` configured for output of the DDL script or a string specifying the file URL for the DDL script.
|
||||
|`javax.persistence.schema-generation.scripts.drop-target` | |For cases where the `javax.persistence.schema-generation.scripts.action` value indicates that schema dropping commands should be written to DDL script file, `javax.persistence.schema-generation.scripts.drop-target` specifies either a `java.io.Writer` configured for output of the DDL script or a string specifying the file URL for the DDL script.
|
||||
|`javax.persistence.hibernate.hbm2ddl.import_files` | `import.sql` (default value) a|
|
||||
|
||||
Comma-separated names of the optional files containing SQL DML statements executed during the `SessionFactory` creation.
|
||||
File order matters, the statements of a give file are executed before the statements of the following one.
|
||||
|
||||
These statements are only executed if the schema is created, meaning that `hibernate.hbm2ddl.auto` is set to `create` or `create-drop`.
|
||||
`javax.persistence.schema-generation.create-script-source` / `javax.persistence.schema-generation.drop-script-source` should be preferred.
|
||||
|
||||
|`javax.persistence.sql-load-script-source` | |
|
||||
|
||||
JPA variant of `hibernate.hbm2ddl.import_files`. Specifies a `java.io.Reader` configured for reading of the SQL load script or a string designating the file `java.net.URL` for the SQL load script.
|
||||
A "SQL load script" is a script that performs some database initialization (INSERT, etc).
|
||||
|
||||
|`hibernate.hbm2ddl.import_files_sql_extractor` | |
|
||||
|
||||
Reference to the `org.hibernate.tool.hbm2ddl.ImportSqlCommandExtractor` implementation class to use for parsing source/import files as defined by `javax.persistence.schema-generation.create-script-source`,
|
||||
`javax.persistence.schema-generation.drop-script-source` or `hibernate.hbm2ddl.import_files`.
|
||||
|
||||
Reference may refer to an instance, a Class implementing `ImportSqlCommandExtractor` of the fully-qualified name of the `ImportSqlCommandExtractor` implementation.
|
||||
If the fully-qualified name is given, the implementation must provide a no-arg constructor.
|
||||
|
||||
The default value is `org.hibernate.tool.hbm2ddl.SingleLineSqlCommandExtractor`.
|
||||
|
||||
|`hibernate.hbm2dll.create_namespaces` | `true` or `false` (default value) |Specifies whether to automatically create also the database schema/catalog.
|
||||
|`javax.persistence.create-database-schemas` | `true` or `false` (default value) |
|
||||
|
||||
The JPA variant of `hibernate.hbm2dll.create_namespaces`. Specifies whether the persistence provider is to create the database schema(s) in addition to creating database objects (tables, sequences, constraints, etc).
|
||||
The value of this boolean property should be set to `true` if the persistence provider is to create schemas in the database or to generate DDL that contains "CREATE SCHEMA" commands.
|
||||
If this property is not supplied (or is explicitly `false`), the provider should not attempt to create database schemas.
|
||||
|
||||
|`hibernate.hbm2ddl.schema_filter_provider` | |
|
||||
|
||||
Used to specify the `org.hibernate.tool.schema.spi.SchemaFilterProvider` to be used by `create`, `drop`, `migrate`, and `validate` operations on the database schema.
|
||||
`SchemaFilterProvider` provides filters that can be used to limit the scope of these operations to specific namespaces, tables and sequences. All objects are included by default.
|
||||
|
||||
|`hibernate.hbm2ddl.delimiter` | `;` |Identifies the delimiter to use to separate schema management statements in script outputs.
|
||||
|
||||
|`hibernate.schema_management_tool` |A schema name |Used to specify the `org.hibernate.tool.schema.spi.SchemaManagementTool` to use for performing schema management. The default is to use `org.hibernate.tool.schema.internal.HibernateSchemaManagementTool`
|
||||
|`hibernate.synonyms` |`true` or `false` (default value) |If enabled, allows schema update and validation to support synonyms. Due to the possibility that this would return duplicate tables (especially in Oracle), this is disabled by default.
|
||||
|`hibernate.hbm2dll.extra_physical_table_types` |`BASE TABLE` |Identifies a comma-separated list of values to specify extra table types, other than the default `TABLE` value, to recognize as defining a physical table by schema update, creation and validation.
|
||||
|`hibernate.schema_update.unique_constraint_strategy` |`DROP_RECREATE_QUIETLY`, `RECREATE_QUIETLY`, `SKIP` a|
|
||||
|
||||
Unique columns and unique keys both use unique constraints in most dialects.
|
||||
`SchemaUpdate` needs to create these constraints, but DBs support for finding existing constraints is extremely inconsistent.
|
||||
Further, non-explicitly-named unique constraints use randomly generated characters.
|
||||
|
||||
Therefore, the `org.hibernate.tool.hbm2ddl.UniqueConstraintSchemaUpdateStrategy` offers the following options:
|
||||
|
||||
`DROP_RECREATE_QUIETLY`:: Default option.
|
||||
Attempt to drop, then (re-)create each unique constraint. Ignore any exceptions being thrown.
|
||||
`RECREATE_QUIETLY`::
|
||||
Attempts to (re-)create unique constraints, ignoring exceptions thrown if the constraint already existed
|
||||
`SKIP`::
|
||||
Does not attempt to create unique constraints on a schema update.
|
||||
|
||||
|===================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-exception-handling]]
|
||||
=== Exception handling
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|===================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.jdbc.sql_exception_converter` | Fully-qualified name of class implementing `SQLExceptionConverter` |The `org.hibernate.exception.spi.SQLExceptionConverter` to use for converting `SQLExceptions` to Hibernate's `JDBCException` hierarchy. The default is to use the configured `org.hibernate.dialect.Dialect`'s preferred `SQLExceptionConverter`.
|
||||
|===================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-session-events]]
|
||||
=== Session events
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|===========================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.session.events.auto` | | Fully qualified class name implementing the `SessionEventListener` interface.
|
||||
|`hibernate.session_factory.interceptor` or `hibernate.ejb.interceptor` | `org.hibernate.EmptyInterceptor` (default value) a|
|
||||
|
||||
Names a `org.hibernate.Interceptor` implementation to be applied to every `Session` created by the current `org.hibernate.SessionFactory`
|
||||
|
||||
Can reference:
|
||||
|
||||
* `Interceptor` instance
|
||||
* `Interceptor` implementation `Class` object reference
|
||||
* `Interceptor` implementation class name
|
||||
|
||||
|`hibernate.ejb.interceptor.session_scoped` | fully-qualified class name or class reference | An optional Hibernate interceptor.
|
||||
|
||||
The interceptor instance is specific to a given Session instance (and hence is not thread-safe) has to implement `org.hibernate.Interceptor` and have a no-arg constructor.
|
||||
|
||||
This property can not be combined with `hibernate.ejb.interceptor`.
|
||||
|`hibernate.ejb.session_factory_observer` | fully-qualified class name or class reference | Specifies a `SessionFactoryObserver` to be applied to the SessionFactory. The class must have a no-arg constructor.
|
||||
|`hibernate.ejb.event` | `hibernate.ejb.event.pre-load` = `com.acme.SecurityListener,com.acme.AuditListener` | Event listener list for a given event type. The list of event listeners is a comma separated fully qualified class name list.
|
||||
|===========================================================================================================================
|
||||
|
||||
[[configurations-jmx]]
|
||||
=== JMX settings
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|===================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.jmx.enabled` | `true` or `false` (default value) | Enable JMX.
|
||||
|`hibernate.jmx.usePlatformServer` | `true` or `false` (default value) | Uses the platform MBeanServer as returned by `ManagementFactory#getPlatformMBeanServer()`.
|
||||
|`hibernate.jmx.agentId` | | The agent identifier of the associated `MBeanServer`.
|
||||
|`hibernate.jmx.defaultDomain` | | The domain name of the associated `MBeanServer`.
|
||||
|`hibernate.jmx.sessionFactoryName` | | The `SessionFactory` name appended to the object name the Manageable Bean is registered with. If null, the `hibernate.session_factory_name` configuration value is used.
|
||||
|`org.hibernate.core | | The default object domain appended to the object name the Manageable Bean is registered with.
|
||||
|===================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-jacc]]
|
||||
=== JACC settings
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|===================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.jacc.enabled` | `true` or `false` (default value) | Is JACC enabled?
|
||||
|`hibernate.jacc` | `hibernate.jacc.allowed.org.jboss.ejb3.test.jacc.AllEntity` | The property name defines the role (e.g. `allowed`) and the entity class name (e.g. `org.jboss.ejb3.test.jacc.AllEntity`), while the property value defines the authorized actions (e.g. `insert,update,read`).
|
||||
|`hibernate.jacc_context_id` | | A String identifying the policy context whose PolicyConfiguration interface is to be returned. The value passed to this parameter must not be null.
|
||||
|===================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-misc]]
|
||||
=== ClassLoaders properties
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|=====================================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.classLoaders` | |Used to define a `java.util.Collection<ClassLoader>` or the `ClassLoader` instance Hibernate should use for class-loading and resource-lookups.
|
||||
|`hibernate.classLoader.application` | |Names the `ClassLoader` used to load user application classes.
|
||||
|`hibernate.classLoader.resources` | |Names the `ClassLoader` Hibernate should use to perform resource loading.
|
||||
|`hibernate.classLoader.hibernate` | |Names the `ClassLoader` responsible for loading Hibernate classes. By default this is the `ClassLoader` that loaded this class.
|
||||
|`hibernate.classLoader.environment` | |Names the `ClassLoader` used when Hibernate is unable to locates classes on the `hibernate.classLoader.application` or `hibernate.classLoader.hibernate`.
|
||||
|=====================================================================================================================================================================================================================================================
|
||||
|
||||
[[configurations-misc]]
|
||||
=== Miscellaneous properties
|
||||
|
||||
[width="100%",cols="20%,20%,60%",]
|
||||
|=====================================================================================================================================================================================================================================================
|
||||
|Property |Example |Purpose
|
||||
|`hibernate.dialect_resolvers` | | Names any additional `org.hibernate.engine.jdbc.dialect.spi.DialectResolver` implementations to register with the standard `org.hibernate.engine.jdbc.dialect.spi.DialectFactory`
|
||||
|`hibernate.session_factory_name` |A JNDI name |
|
||||
|
||||
Setting used to name the Hibernate `SessionFactory`.
|
||||
Naming the `SessionFactory` allows for it to be properly serialized across JVMs as long as the same name is used on each JVM.
|
||||
|
||||
If `hibernate.session_factory_name_is_jndi` is set to `true`, this is also the name under which the `SessionFactory` is bound into JNDI on startup and from which it can be obtained from JNDI.
|
||||
|
||||
|`hibernate.session_factory_name_is_jndi` |`true` (default value) or `false` |
|
||||
|
||||
Does the value defined by `hibernate.session_factory_name` represent a JNDI namespace into which the `org.hibernate.SessionFactory` should be bound and made accessible?
|
||||
|
||||
Defaults to `true` for backwards compatibility. Set this to `false` if naming a SessionFactory is needed for serialization purposes, but no writable JNDI context exists in the runtime environment or if the user simply does not want JNDI to be used.
|
||||
|
||||
|`hibernate.ejb.entitymanager_factory_name`| By default, the persistence unit name is used, otherwise a randomly generated UUID | Internally, Hibernate keeps track of all `EntityManagerFactory` instances using the `EntityManagerFactoryRegistry`. The name is used as a key to identify a given `EntityManagerFactory` reference.
|
||||
|`hibernate.ejb.cfgfile`| `hibernate.cfg.xml` (default value) | XML configuration file to use to configure Hibernate.
|
||||
|`hibernate.ejb.discard_pc_on_close`| `true` or `false` (default value) |
|
||||
|
||||
If true, the persistence context will be discarded (think `clear()` when the method is called.
|
||||
Otherwise, the persistence context will stay alive till the transaction completion: all objects will remain managed, and any change will be synchronized with the database (default to false, ie wait for transaction completion).
|
||||
|
||||
|`hibernate.ejb.metamodel.population`| `enabled` or `disabled`, or `ignoreUnsupported` (default value) a|
|
||||
|
||||
Setting that controls whether we seek out JPA _static metamodel_ classes and populate them.
|
||||
|
||||
Accepts three values:
|
||||
|
||||
enabled:: Do the population
|
||||
disabled:: Do not do the population
|
||||
ignoreUnsupported:: Do the population, but ignore any non-JPA features that would otherwise result in the population failing (e.g. `@Any` annotation).
|
||||
|
||||
|`hibernate.delay_cdi_access`| `true` or `false` (default value) | Defines delayed access to CDI `BeanManager`. Starting in 5.1 the preferred means for CDI bootstrapping is through `org.hibernate.jpa.event.spi.jpa.ExtendedBeanManager`.
|
||||
|=====================================================================================================================================================================================================================================================
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
[[appendix-legacy-bootstrap]]
|
||||
== Legacy Bootstrapping
|
||||
|
||||
The legacy way to bootstrap a SessionFactory is via the `org.hibernate.cfg.Configuration` object.
|
||||
`Configuration` represents, essentially, a single point for specifying all aspects of building the `SessionFactory`: everything from settings, to mappings, to strategies, etc.
|
||||
I like to think of `Configuration` as a big pot to which we add a bunch of stuff (mappings, settings, etc) and from which we eventually get a `SessionFactory.`
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
There are some significant draw backs to this approach which led to its deprecation and the development of the new approach, which is discussed in <<chapters/bootstrap/Bootstrap.adoc#bootstrap-native,Native Bootstrapping>>.
|
||||
`Configuration` is semi-deprecated but still available for use, in a limited form that eliminates these drawbacks.
|
||||
"Under the covers", `Configuration` uses the new bootstrapping code, so the things available there as also available here in terms of auto-discovery.
|
||||
====
|
||||
|
||||
You can obtain the `Configuration` by instantiating it directly.
|
||||
You then specify mapping metadata (XML mapping documents, annotated classes) that describe your applications object model and its mapping to a SQL database.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
Configuration cfg = new Configuration()
|
||||
// addResource does a classpath resource lookup
|
||||
.addResource("Item.hbm.xml")
|
||||
.addResource("Bid.hbm.xml")
|
||||
|
||||
// calls addResource using "/org/hibernate/auction/User.hbm.xml"
|
||||
.addClass(`org.hibernate.auction.User.class`)
|
||||
|
||||
// parses Address class for mapping annotations
|
||||
.addAnnotatedClass( Address.class )
|
||||
|
||||
// reads package-level (package-info.class) annotations in the named package
|
||||
.addPackage( "org.hibernate.auction" )
|
||||
|
||||
.setProperty("hibernate.dialect", "org.hibernate.dialect.H2Dialect")
|
||||
.setProperty("hibernate.connection.datasource", "java:comp/env/jdbc/test")
|
||||
.setProperty("hibernate.order_updates", "true");
|
||||
----
|
||||
|
||||
There are other ways to specify Configuration information, including:
|
||||
|
||||
* Place a file named hibernate.properties in a root directory of the classpath
|
||||
* Pass an instance of java.util.Properties to `Configuration#setProperties`
|
||||
* Via a `hibernate.cfg.xml` file
|
||||
* System properties using java `-Dproperty=value`
|
||||
|
||||
== Migration
|
||||
|
||||
Mapping Configuration methods to the corresponding methods in the new APIs..
|
||||
|
||||
|===
|
||||
|`Configuration#addFile`|`Configuration#addFile`
|
||||
|`Configuration#add(XmlDocument)`|`Configuration#add(XmlDocument)`
|
||||
|`Configuration#addXML`|`Configuration#addXML`
|
||||
|`Configuration#addCacheableFile`|`Configuration#addCacheableFile`
|
||||
|`Configuration#addURL`|`Configuration#addURL`
|
||||
|`Configuration#addInputStream`|`Configuration#addInputStream`
|
||||
|`Configuration#addResource`|`Configuration#addResource`
|
||||
|`Configuration#addClass`|`Configuration#addClass`
|
||||
|`Configuration#addAnnotatedClass`|`Configuration#addAnnotatedClass`
|
||||
|`Configuration#addPackage`|`Configuration#addPackage`
|
||||
|`Configuration#addJar`|`Configuration#addJar`
|
||||
|`Configuration#addDirectory`|`Configuration#addDirectory`
|
||||
|`Configuration#registerTypeContributor`|`Configuration#registerTypeContributor`
|
||||
|`Configuration#registerTypeOverride`|`Configuration#registerTypeOverride`
|
||||
|`Configuration#setProperty`|`Configuration#setProperty`
|
||||
|`Configuration#setProperties`|`Configuration#setProperties`
|
||||
|`Configuration#addProperties`|`Configuration#addProperties`
|
||||
|`Configuration#setNamingStrategy`|`Configuration#setNamingStrategy`
|
||||
|`Configuration#setImplicitNamingStrategy`|`Configuration#setImplicitNamingStrategy`
|
||||
|`Configuration#setPhysicalNamingStrategy`|`Configuration#setPhysicalNamingStrategy`
|
||||
|`Configuration#configure`|`Configuration#configure`
|
||||
|`Configuration#setInterceptor`|`Configuration#setInterceptor`
|
||||
|`Configuration#setEntityNotFoundDelegate`|`Configuration#setEntityNotFoundDelegate`
|
||||
|`Configuration#setSessionFactoryObserver`|`Configuration#setSessionFactoryObserver`
|
||||
|`Configuration#setCurrentTenantIdentifierResolver`|`Configuration#setCurrentTenantIdentifierResolver`
|
||||
|===
|
||||
|
||||
|
|
@ -0,0 +1,487 @@
|
|||
[[appendix-legacy-criteria]]
|
||||
== Legacy Hibernate Criteria Queries
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
This appendix covers the legacy Hibernate `org.hibernate.Criteria` API, which should be considered deprecated.
|
||||
|
||||
New development should focus on the JPA javax.persistence.criteria.CriteriaQuery API.
|
||||
Eventually, Hibernate-specific criteria features will be ported as extensions to the JPA `javax.persistence.criteria.CriteriaQuery`.
|
||||
For details on the JPA APIs, see <<chapters/query/criteria/Criteria.adoc#criteria, Criteria>>.
|
||||
====
|
||||
|
||||
Hibernate features an intuitive, extensible criteria query API.
|
||||
|
||||
[[criteria-creating]]
|
||||
=== Creating a `Criteria` instance
|
||||
|
||||
The interface `org.hibernate.Criteria` represents a query against a particular persistent class.
|
||||
The `Session` is a factory for `Criteria` instances.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
Criteria crit = sess.createCriteria(Cat.class);
|
||||
crit.setMaxResults(50);
|
||||
List cats = crit.list();
|
||||
----
|
||||
|
||||
[[criteria-narrowing]]
|
||||
=== Narrowing the result set
|
||||
|
||||
An individual query criterion is an instance of the interface `org.hibernate.criterion.Criterion`.
|
||||
The class `org.hibernate.criterion.Restrictions` defines factory methods for obtaining certain built-in `Criterion` types.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.like("name", "Fritz%") )
|
||||
.add( Restrictions.between("weight", minWeight, maxWeight) )
|
||||
.list();
|
||||
----
|
||||
|
||||
Restrictions can be grouped logically.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.like("name", "Fritz%") )
|
||||
.add( Restrictions.or(
|
||||
Restrictions.eq( "age", new Integer(0) ),
|
||||
Restrictions.isNull("age")
|
||||
) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.in( "name", new String[] { "Fritz", "Izi", "Pk" } ) )
|
||||
.add( Restrictions.disjunction()
|
||||
.add( Restrictions.isNull("age") )
|
||||
.add( Restrictions.eq("age", new Integer(0) ) )
|
||||
.add( Restrictions.eq("age", new Integer(1) ) )
|
||||
.add( Restrictions.eq("age", new Integer(2) ) )
|
||||
) )
|
||||
.list();
|
||||
----
|
||||
|
||||
There are a range of built-in criterion types (`Restrictions` subclasses).
|
||||
One of the most useful `Restrictions` allows you to specify SQL directly.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.sqlRestriction("lower({alias}.name) like lower(?)", "Fritz%", Hibernate.STRING) )
|
||||
.list();
|
||||
----
|
||||
|
||||
The `{alias}` placeholder will be replaced by the row alias of the queried entity.
|
||||
|
||||
You can also obtain a criterion from a `Property` instance.
|
||||
You can create a `Property` by calling `Property.forName()`:
|
||||
|
||||
[source,java]
|
||||
----
|
||||
|
||||
Property age = Property.forName("age");
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.disjunction()
|
||||
.add( age.isNull() )
|
||||
.add( age.eq( new Integer(0) ) )
|
||||
.add( age.eq( new Integer(1) ) )
|
||||
.add( age.eq( new Integer(2) ) )
|
||||
) )
|
||||
.add( Property.forName("name").in( new String[] { "Fritz", "Izi", "Pk" } ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[[criteria-ordering]]
|
||||
=== Ordering the results
|
||||
|
||||
You can order the results using `org.hibernate.criterion.Order`.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.like("name", "F%")
|
||||
.addOrder( Order.asc("name").nulls(NullPrecedence.LAST) )
|
||||
.addOrder( Order.desc("age") )
|
||||
.setMaxResults(50)
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Property.forName("name").like("F%") )
|
||||
.addOrder( Property.forName("name").asc() )
|
||||
.addOrder( Property.forName("age").desc() )
|
||||
.setMaxResults(50)
|
||||
.list();
|
||||
----
|
||||
|
||||
[[criteria-associations]]
|
||||
=== Associations
|
||||
|
||||
By navigating associations using `createCriteria()` you can specify constraints upon related entities:
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.like("name", "F%") )
|
||||
.createCriteria("kittens")
|
||||
.add( Restrictions.like("name", "F%") )
|
||||
.list();
|
||||
----
|
||||
|
||||
The second `createCriteria()` returns a new instance of `Criteria` that refers to the elements of the `kittens` collection.
|
||||
|
||||
There is also an alternate form that is useful in certain circumstances:
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.createAlias("kittens", "kt")
|
||||
.createAlias("mate", "mt")
|
||||
.add( Restrictions.eqProperty("kt.name", "mt.name") )
|
||||
.list();
|
||||
----
|
||||
|
||||
(`createAlias()` does not create a new instance of `Criteria`.)
|
||||
|
||||
The kittens collections held by the `Cat` instances returned by the previous two queries are _not_ pre-filtered by the criteria.
|
||||
If you want to retrieve just the kittens that match the criteria, you must use a `ResultTransformer`.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.createCriteria("kittens", "kt")
|
||||
.add( Restrictions.eq("name", "F%") )
|
||||
.setResultTransformer(Criteria.ALIAS_TO_ENTITY_MAP)
|
||||
.list();
|
||||
Iterator iter = cats.iterator();
|
||||
while ( iter.hasNext() ) {
|
||||
Map map = (Map) iter.next();
|
||||
Cat cat = (Cat) map.get(Criteria.ROOT_ALIAS);
|
||||
Cat kitten = (Cat) map.get("kt");
|
||||
}
|
||||
----
|
||||
|
||||
Additionally, you may manipulate the result set using a left outer join:
|
||||
|
||||
[source]
|
||||
----
|
||||
List cats = session.createCriteria( Cat.class )
|
||||
.createAlias("mate", "mt", Criteria.LEFT_JOIN, Restrictions.like("mt.name", "good%") )
|
||||
.addOrder(Order.asc("mt.age"))
|
||||
.list();
|
||||
----
|
||||
|
||||
This will return all of the `Cat`s with a mate whose name starts with "good" ordered by their mate's age, and all cats who do not have a mate.
|
||||
This is useful when there is a need to order or limit in the database prior to returning complex/large result sets,
|
||||
and removes many instances where multiple queries would have to be performed and the results unioned by java in memory.
|
||||
|
||||
Without this feature, first all of the cats without a mate would need to be loaded in one query.
|
||||
|
||||
A second query would need to retrieve the cats with mates who's name started with "good" sorted by the mates age.
|
||||
|
||||
Thirdly, in memory; the lists would need to be joined manually.
|
||||
|
||||
[[criteria-dynamicfetching]]
|
||||
=== Dynamic association fetching
|
||||
|
||||
You can specify association fetching semantics at runtime using `setFetchMode()`.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.like("name", "Fritz%") )
|
||||
.setFetchMode("mate", FetchMode.EAGER)
|
||||
.setFetchMode("kittens", FetchMode.EAGER)
|
||||
.list();
|
||||
----
|
||||
|
||||
This query will fetch both `mate` and `kittens` by outer join.
|
||||
|
||||
[[criteria-components]]
|
||||
=== Components
|
||||
|
||||
To add a restriction against a property of an embedded component, the component property name should be prepended to the property name when creating the `Restriction`.
|
||||
The criteria object should be created on the owning entity, and cannot be created on the component itself.
|
||||
For example, suppose the `Cat` has a component property `fullName` with sub-properties `firstName` and `lastName`:
|
||||
|
||||
[source]
|
||||
----
|
||||
List cats = session.createCriteria(Cat.class)
|
||||
.add(Restrictions.eq("fullName.lastName", "Cattington"))
|
||||
.list();
|
||||
----
|
||||
|
||||
Note: this does not apply when querying collections of components, for that see below <<criteria-collections>>
|
||||
|
||||
[[criteria-collections]]
|
||||
=== Collections
|
||||
|
||||
When using criteria against collections, there are two distinct cases.
|
||||
One is if the collection contains entities (eg. `<one-to-many/>` or `<many-to-many/>`) or components (`<composite-element/>` ),
|
||||
and the second is if the collection contains scalar values (`<element/>`).
|
||||
In the first case, the syntax is as given above in the section <<criteria-associations>> where we restrict the `kittens` collection.
|
||||
Essentially we create a `Criteria` object against the collection property and restrict the entity or component properties using that instance.
|
||||
|
||||
For querying a collection of basic values, we still create the `Criteria` object against the collection,
|
||||
but to reference the value, we use the special property "elements".
|
||||
For an indexed collection, we can also reference the index property using the special property "indices".
|
||||
|
||||
[source]
|
||||
----
|
||||
List cats = session.createCriteria(Cat.class)
|
||||
.createCriteria("nickNames")
|
||||
.add(Restrictions.eq("elements", "BadBoy"))
|
||||
.list();
|
||||
----
|
||||
|
||||
[[criteria-examples]]
|
||||
=== Example queries
|
||||
|
||||
The class `org.hibernate.criterion.Example` allows you to construct a query criterion from a given instance.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
Cat cat = new Cat();
|
||||
cat.setSex('F');
|
||||
cat.setColor(Color.BLACK);
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.add( Example.create(cat) )
|
||||
.list();
|
||||
----
|
||||
|
||||
Version properties, identifiers and associations are ignored.
|
||||
By default, null valued properties are excluded.
|
||||
|
||||
You can adjust how the `Example` is applied.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
Example example = Example.create(cat)
|
||||
.excludeZeroes() //exclude zero valued properties
|
||||
.excludeProperty("color") //exclude the property named "color"
|
||||
.ignoreCase() //perform case insensitive string comparisons
|
||||
.enableLike(); //use like for string comparisons
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.add(example)
|
||||
.list();
|
||||
----
|
||||
|
||||
You can even use examples to place criteria upon associated objects.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.add( Example.create(cat) )
|
||||
.createCriteria("mate")
|
||||
.add( Example.create( cat.getMate() ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[[criteria-projection]]
|
||||
=== Projections, aggregation and grouping
|
||||
|
||||
The class `org.hibernate.criterion.Projections` is a factory for `Projection` instances.
|
||||
You can apply a projection to a query by calling `setProjection()`.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.setProjection( Projections.rowCount() )
|
||||
.add( Restrictions.eq("color", Color.BLACK) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.setProjection( Projections.projectionList()
|
||||
.add( Projections.rowCount() )
|
||||
.add( Projections.avg("weight") )
|
||||
.add( Projections.max("weight") )
|
||||
.add( Projections.groupProperty("color") )
|
||||
)
|
||||
.list();
|
||||
----
|
||||
|
||||
There is no explicit "group by" necessary in a criteria query.
|
||||
Certain projection types are defined to be __grouping projections__, which also appear in the SQL `group by` clause.
|
||||
|
||||
An alias can be assigned to a projection so that the projected value can be referred to in restrictions or orderings.
|
||||
Here are two different ways to do this:
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.setProjection( Projections.alias( Projections.groupProperty("color"), "colr" ) )
|
||||
.addOrder( Order.asc("colr") )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.setProjection( Projections.groupProperty("color").as("colr") )
|
||||
.addOrder( Order.asc("colr") )
|
||||
.list();
|
||||
----
|
||||
|
||||
The `alias()` and `as()` methods simply wrap a projection instance in another, aliased, instance of `Projection`.
|
||||
As a shortcut, you can assign an alias when you add the projection to a projection list:
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.setProjection( Projections.projectionList()
|
||||
.add( Projections.rowCount(), "catCountByColor" )
|
||||
.add( Projections.avg("weight"), "avgWeight" )
|
||||
.add( Projections.max("weight"), "maxWeight" )
|
||||
.add( Projections.groupProperty("color"), "color" )
|
||||
)
|
||||
.addOrder( Order.desc("catCountByColor") )
|
||||
.addOrder( Order.desc("avgWeight") )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Domestic.class, "cat")
|
||||
.createAlias("kittens", "kit")
|
||||
.setProjection( Projections.projectionList()
|
||||
.add( Projections.property("cat.name"), "catName" )
|
||||
.add( Projections.property("kit.name"), "kitName" )
|
||||
)
|
||||
.addOrder( Order.asc("catName") )
|
||||
.addOrder( Order.asc("kitName") )
|
||||
.list();
|
||||
----
|
||||
|
||||
You can also use `Property.forName()` to express projections:
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.setProjection( Property.forName("name") )
|
||||
.add( Property.forName("color").eq(Color.BLACK) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.setProjection( Projections.projectionList()
|
||||
.add( Projections.rowCount().as("catCountByColor") )
|
||||
.add( Property.forName("weight").avg().as("avgWeight") )
|
||||
.add( Property.forName("weight").max().as("maxWeight") )
|
||||
.add( Property.forName("color").group().as("color" )
|
||||
)
|
||||
.addOrder( Order.desc("catCountByColor") )
|
||||
.addOrder( Order.desc("avgWeight") )
|
||||
.list();
|
||||
----
|
||||
|
||||
[[criteria-detachedqueries]]
|
||||
=== Detached queries and subqueries
|
||||
|
||||
The `DetachedCriteria` class allows you to create a query outside the scope of a session and then execute it using an arbitrary `Session`.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
DetachedCriteria query = DetachedCriteria.forClass(Cat.class)
|
||||
.add( Property.forName("sex").eq('F') );
|
||||
|
||||
Session session = ....;
|
||||
Transaction txn = session.beginTransaction();
|
||||
List results = query.getExecutableCriteria(session).setMaxResults(100).list();
|
||||
txn.commit();
|
||||
session.close();
|
||||
----
|
||||
|
||||
A `DetachedCriteria` can also be used to express a subquery.
|
||||
`Criterion` instances involving subqueries can be obtained via `Subqueries` or `Property`.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
DetachedCriteria avgWeight = DetachedCriteria.forClass(Cat.class)
|
||||
.setProjection( Property.forName("weight").avg() );
|
||||
session.createCriteria(Cat.class)
|
||||
.add( Property.forName("weight").gt(avgWeight) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
DetachedCriteria weights = DetachedCriteria.forClass(Cat.class)
|
||||
.setProjection( Property.forName("weight") );
|
||||
session.createCriteria(Cat.class)
|
||||
.add( Subqueries.geAll("weight", weights) )
|
||||
.list();
|
||||
----
|
||||
|
||||
Correlated subqueries are also possible:
|
||||
|
||||
[source,java]
|
||||
----
|
||||
DetachedCriteria avgWeightForSex = DetachedCriteria.forClass(Cat.class, "cat2")
|
||||
.setProjection( Property.forName("weight").avg() )
|
||||
.add( Property.forName("cat2.sex").eqProperty("cat.sex") );
|
||||
session.createCriteria(Cat.class, "cat")
|
||||
.add( Property.forName("weight").gt(avgWeightForSex) )
|
||||
.list();
|
||||
----
|
||||
Example of multi-column restriction based on a subquery:
|
||||
|
||||
[source,java]
|
||||
----
|
||||
DetachedCriteria sizeQuery = DetachedCriteria.forClass( Man.class )
|
||||
.setProjection( Projections.projectionList().add( Projections.property( "weight" ) )
|
||||
.add( Projections.property( "height" ) ) )
|
||||
.add( Restrictions.eq( "name", "John" ) );
|
||||
session.createCriteria( Woman.class )
|
||||
.add( Subqueries.propertiesEq( new String[] { "weight", "height" }, sizeQuery ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[[query-criteria-naturalid]]
|
||||
=== Queries by natural identifier
|
||||
|
||||
For most queries, including criteria queries, the query cache is not efficient because query cache invalidation occurs too frequently.
|
||||
However, there is a special kind of query where you can optimize the cache invalidation algorithm: lookups by a constant natural key.
|
||||
In some applications, this kind of query occurs frequently.
|
||||
The Criteria API provides special provision for this use case.
|
||||
|
||||
First, map the natural key of your entity using `<natural-id>` and enable use of the second-level cache.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<class name="User">
|
||||
<cache usage="read-write"/>
|
||||
<id name="id">
|
||||
<generator class="increment"/>
|
||||
</id>
|
||||
<natural-id>
|
||||
<property name="name"/>
|
||||
<property name="org"/>
|
||||
</natural-id>
|
||||
<property name="password"/>
|
||||
</class>
|
||||
----
|
||||
|
||||
This functionality is not intended for use with entities with _mutable_ natural keys.
|
||||
|
||||
Once you have enabled the Hibernate query cache, the `Restrictions.naturalId()` allows you to make use of the more efficient cache algorithm.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
session.createCriteria(User.class)
|
||||
.add( Restrictions.naturalId()
|
||||
.set("name", "gavin")
|
||||
.set("org", "hb")
|
||||
).setCacheable(true)
|
||||
.uniqueResult();
|
||||
----
|
|
@ -0,0 +1,49 @@
|
|||
[[appendix-legacy-domain-model]]
|
||||
== Legacy Domain Model
|
||||
:sourcedir: extras
|
||||
|
||||
.Declaring a version property in `hbm.xml`
|
||||
====
|
||||
[source,xml]
|
||||
----
|
||||
include::{sourcedir}/version_property.xml[]
|
||||
----
|
||||
====
|
||||
|
||||
[cols=",",]
|
||||
|=======================================================================
|
||||
|column |The name of the column holding the version number. Optional, defaults to the property name.
|
||||
|name |The name of a property of the persistent class.
|
||||
|type |The type of the version number. Optional, defaults to `integer`.
|
||||
|access |Hibernate's strategy for accessing the property value. Optional, defaults to `property`.
|
||||
|unsaved-value |Indicates that an instance is newly instantiated and thus unsaved.
|
||||
This distinguishes it from detached instances that were saved or loaded in a previous session.
|
||||
The default value, `undefined`, indicates that the identifier property value should be used. Optional.
|
||||
|generated |Indicates that the version property value is generated by the database. Optional, defaults to `never`.
|
||||
|insert |Whether or not to include the `version` column in SQL `insert` statements.
|
||||
Defaults to `true`, but you can set it to `false` if the database column is defined with a default value of `0`.
|
||||
|=======================================================================
|
||||
|
||||
.The timestamp element in `hbm.xml`
|
||||
====
|
||||
[source,xml]
|
||||
----
|
||||
include::{sourcedir}/timestamp_version.xml[]
|
||||
----
|
||||
====
|
||||
|
||||
[cols=",",]
|
||||
|=======================================================================
|
||||
|column |The name of the column which holds the timestamp. Optional, defaults to the property name
|
||||
|name |The name of a JavaBeans style property of Java type `Date` or `Timestamp` of the persistent class.
|
||||
|access |The strategy Hibernate uses to access the property value. Optional, defaults to `property`.
|
||||
|unsaved-value |A version property which indicates than instance is newly instantiated, and unsaved.
|
||||
This distinguishes it from detached instances that were saved or loaded in a previous session.
|
||||
The default value of `undefined` indicates that Hibernate uses the identifier property value.
|
||||
|source |Whether Hibernate retrieves the timestamp from the database or the current JVM.
|
||||
Database-based timestamps incur an overhead because Hibernate needs to query the database each time to determine the incremental next value.
|
||||
However, database-derived timestamps are safer to use in a clustered environment.
|
||||
Not all database dialects are known to support the retrieval of the database's current timestamp.
|
||||
Others may also be unsafe for locking because of lack of precision.
|
||||
|generated |Whether the timestamp property value is generated by the database. Optional, defaults to `never`.
|
||||
|=======================================================================
|
|
@ -0,0 +1,373 @@
|
|||
[[appendix-legacy-native-queries]]
|
||||
== Legacy Hibernate Native Queries
|
||||
|
||||
[[legacy-sql-named-queries]]
|
||||
=== Legacy Named SQL queries
|
||||
|
||||
Named SQL queries can also be defined during mapping and called in exactly the same way as a named HQL query.
|
||||
In this case, you do _not_ need to call `addEntity()` anymore.
|
||||
|
||||
.Named sql query using the `<sql-query>` mapping element
|
||||
====
|
||||
[source,xml]
|
||||
----
|
||||
<sql-query name = "persons">
|
||||
<return alias="person" class="eg.Person"/>
|
||||
SELECT person.NAME AS {person.name},
|
||||
person.AGE AS {person.age},
|
||||
person.SEX AS {person.sex}
|
||||
FROM PERSON person
|
||||
WHERE person.NAME LIKE :namePattern
|
||||
</sql-query>
|
||||
----
|
||||
====
|
||||
|
||||
.Execution of a named query
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
List people = session
|
||||
.getNamedQuery( "persons" )
|
||||
.setString( "namePattern", namePattern )
|
||||
.setMaxResults( 50 )
|
||||
.list();
|
||||
----
|
||||
====
|
||||
|
||||
The `<return-join>` element is use to join associations and the `<load-collection>` element is used to define queries which initialize collections.
|
||||
|
||||
.Named sql query with association
|
||||
====
|
||||
[source,xml]
|
||||
----
|
||||
<sql-query name = "personsWith">
|
||||
<return alias="person" class="eg.Person"/>
|
||||
<return-join alias="address" property="person.mailingAddress"/>
|
||||
SELECT person.NAME AS {person.name},
|
||||
person.AGE AS {person.age},
|
||||
person.SEX AS {person.sex},
|
||||
address.STREET AS {address.street},
|
||||
address.CITY AS {address.city},
|
||||
address.STATE AS {address.state},
|
||||
address.ZIP AS {address.zip}
|
||||
FROM PERSON person
|
||||
JOIN ADDRESS address
|
||||
ON person.ID = address.PERSON_ID AND address.TYPE='MAILING'
|
||||
WHERE person.NAME LIKE :namePattern
|
||||
</sql-query>
|
||||
----
|
||||
====
|
||||
|
||||
A named SQL query may return a scalar value.
|
||||
You must declare the column alias and Hibernate type using the `<return-scalar>` element:
|
||||
|
||||
.Named query returning a scalar
|
||||
====
|
||||
[source,xml]
|
||||
----
|
||||
<sql-query name = "mySqlQuery">
|
||||
<return-scalar column = "name" type="string"/>
|
||||
<return-scalar column = "age" type="long"/>
|
||||
SELECT p.NAME AS name,
|
||||
p.AGE AS age,
|
||||
FROM PERSON p WHERE p.NAME LIKE 'Hiber%'
|
||||
</sql-query>
|
||||
----
|
||||
====
|
||||
|
||||
You can externalize the resultset mapping information in a `<resultset>` element which will allow you to either reuse them across several named queries or through the `setResultSetMapping()` API.
|
||||
|
||||
.<resultset> mapping used to externalize mappinginformation
|
||||
====
|
||||
[source,xml]
|
||||
----
|
||||
<resultset name = "personAddress">
|
||||
<return alias="person" class="eg.Person"/>
|
||||
<return-join alias="address" property="person.mailingAddress"/>
|
||||
</resultset>
|
||||
|
||||
<sql-query name = "personsWith" resultset-ref="personAddress">
|
||||
SELECT person.NAME AS {person.name},
|
||||
person.AGE AS {person.age},
|
||||
person.SEX AS {person.sex},
|
||||
address.STREET AS {address.street},
|
||||
address.CITY AS {address.city},
|
||||
address.STATE AS {address.state},
|
||||
address.ZIP AS {address.zip}
|
||||
FROM PERSON person
|
||||
JOIN ADDRESS address
|
||||
ON person.ID = address.PERSON_ID AND address.TYPE='MAILING'
|
||||
WHERE person.NAME LIKE :namePattern
|
||||
</sql-query>
|
||||
----
|
||||
====
|
||||
|
||||
You can, alternatively, use the resultset mapping information in your hbm files directly in java code.
|
||||
|
||||
.Programmatically specifying the result mapping information
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
List cats = session
|
||||
.createSQLQuery( "select {cat.*}, {kitten.*} from cats cat, cats kitten where kitten.mother = cat.id" )
|
||||
.setResultSetMapping("catAndKitten")
|
||||
.list();
|
||||
----
|
||||
====
|
||||
|
||||
[[legacy-propertyresults]]
|
||||
=== Legacy return-property to explicitly specify column/alias names
|
||||
|
||||
You can explicitly tell Hibernate what column aliases to use with `<return-property>`, instead of using the `{}` syntax to let Hibernate inject its own aliases.
|
||||
For example:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<sql-query name = "mySqlQuery">
|
||||
<return alias = "person" class = "eg.Person">
|
||||
<return-property name = "name" column = "myName"/>
|
||||
<return-property name = "age" column = "myAge"/>
|
||||
<return-property name = "sex" column = "mySex"/>
|
||||
</return>
|
||||
SELECT person.NAME AS myName,
|
||||
person.AGE AS myAge,
|
||||
person.SEX AS mySex,
|
||||
FROM PERSON person WHERE person.NAME LIKE :name
|
||||
</sql-query>
|
||||
----
|
||||
|
||||
`<return-property>` also works with multiple columns.
|
||||
This solves a limitation with the `{}` syntax which cannot allow fine grained control of multi-column properties.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<sql-query name = "organizationCurrentEmployments">
|
||||
<return alias = "emp" class = "Employment">
|
||||
<return-property name = "salary">
|
||||
<return-column name = "VALUE"/>
|
||||
<return-column name = "CURRENCY"/>
|
||||
</return-property>
|
||||
<return-property name = "endDate" column = "myEndDate"/>
|
||||
</return>
|
||||
SELECT EMPLOYEE AS {emp.employee}, EMPLOYER AS {emp.employer},
|
||||
STARTDATE AS {emp.startDate}, ENDDATE AS {emp.endDate},
|
||||
REGIONCODE as {emp.regionCode}, EID AS {emp.id}, VALUE, CURRENCY
|
||||
FROM EMPLOYMENT
|
||||
WHERE EMPLOYER = :id AND ENDDATE IS NULL
|
||||
ORDER BY STARTDATE ASC
|
||||
</sql-query>
|
||||
----
|
||||
|
||||
In this example `<return-property>` was used in combination with the `{}` syntax for injection.
|
||||
This allows users to choose how they want to refer column and properties.
|
||||
|
||||
If your mapping has a discriminator you must use `<return-discriminator>` to specify the discriminator column.
|
||||
|
||||
[[legacy-sp_query]]
|
||||
=== Legacy stored procedures for querying
|
||||
|
||||
Hibernate provides support for queries via stored procedures and functions.
|
||||
Most of the following documentation is equivalent for both.
|
||||
The stored procedure/function must return a resultset as the first out-parameter to be able to work with Hibernate.
|
||||
An example of such a stored function in Oracle 9 and higher is as follows:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
CREATE OR REPLACE FUNCTION selectAllEmployments
|
||||
RETURN SYS_REFCURSOR
|
||||
AS
|
||||
st_cursor SYS_REFCURSOR;
|
||||
BEGIN
|
||||
OPEN st_cursor FOR
|
||||
SELECT EMPLOYEE, EMPLOYER,
|
||||
STARTDATE, ENDDATE,
|
||||
REGIONCODE, EID, VALUE, CURRENCY
|
||||
FROM EMPLOYMENT;
|
||||
RETURN st_cursor;
|
||||
END;
|
||||
----
|
||||
|
||||
To use this query in Hibernate you need to map it via a named query.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<sql-query name = "selectAllEmployees_SP" callable = "true">
|
||||
<return alias="emp" class="Employment">
|
||||
<return-property name = "employee" column = "EMPLOYEE"/>
|
||||
<return-property name = "employer" column = "EMPLOYER"/>
|
||||
<return-property name = "startDate" column = "STARTDATE"/>
|
||||
<return-property name = "endDate" column = "ENDDATE"/>
|
||||
<return-property name = "regionCode" column = "REGIONCODE"/>
|
||||
<return-property name = "id" column = "EID"/>
|
||||
<return-property name = "salary">
|
||||
<return-column name = "VALUE"/>
|
||||
<return-column name = "CURRENCY"/>
|
||||
</return-property>
|
||||
</return>
|
||||
{ ? = call selectAllEmployments() }
|
||||
</sql-query>
|
||||
----
|
||||
|
||||
Stored procedures currently only return scalars and entities.
|
||||
`<return-join>` and `<load-collection>` are not supported.
|
||||
|
||||
[[legacy-sql-limits-storedprocedures]]
|
||||
=== Legacy rules/limitations for using stored procedures
|
||||
|
||||
You cannot use stored procedures with Hibernate unless you follow some procedure/function rules.
|
||||
If they do not follow those rules they are not usable with Hibernate.
|
||||
If you still want to use these procedures you have to execute them via `session.doWork()`.
|
||||
|
||||
The rules are different for each database since database vendors have different stored procedure semantics/syntax.
|
||||
|
||||
Stored procedure queries cannot be paged with `setFirstResult()/setMaxResults()`.
|
||||
|
||||
The recommended call form is standard SQL92: `{ ? = call functionName(<parameters>) }` or `{ ? = call procedureName(<parameters>}`.
|
||||
Native call syntax is not supported.
|
||||
|
||||
For Oracle the following rules apply:
|
||||
|
||||
* A function must return a result set.
|
||||
The first parameter of a procedure must be an `OUT` that returns a result set.
|
||||
This is done by using a `SYS_REFCURSOR` type in Oracle 9 or 10.
|
||||
In Oracle you need to define a `REF CURSOR` type.
|
||||
See Oracle literature for further information.
|
||||
|
||||
For Sybase or MS SQL server the following rules apply:
|
||||
|
||||
* The procedure must return a result set.
|
||||
Note that since these servers can return multiple result sets and update counts, Hibernate will iterate the results and take the first result that is a result set as its return value.
|
||||
Everything else will be discarded.
|
||||
* If you can enable `SET NOCOUNT ON` in your procedure it will probably be more efficient, but this is not a requirement.
|
||||
|
||||
[[legacy-sql-cud]]
|
||||
=== Legacy custom SQL for create, update and delete
|
||||
|
||||
Hibernate can use custom SQL for create, update, and delete operations.
|
||||
The SQL can be overridden at the statement level or individual column level.
|
||||
This section describes statement overrides.
|
||||
For columns, see <<chapters/domain/basic_types.adoc#mapping-column-read-and-write,Column transformers: read and write expressions>>.
|
||||
The following example shows how to define custom SQL operations using annotations.
|
||||
|
||||
.Custom CRUD XML
|
||||
====
|
||||
[source,xml]
|
||||
----
|
||||
<class name = "Person">
|
||||
<id name = "id">
|
||||
<generator class = "increment"/>
|
||||
</id>
|
||||
<property name = "name" not-null = "true"/>
|
||||
<sql-insert>INSERT INTO PERSON (NAME, ID) VALUES ( UPPER(?), ? )</sql-insert>
|
||||
<sql-update>UPDATE PERSON SET NAME=UPPER(?) WHERE ID=?</sql-update>
|
||||
<sql-delete>DELETE FROM PERSON WHERE ID=?</sql-delete>
|
||||
</class>
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
If you expect to call a store procedure, be sure to set the `callable` attribute to `true`, in annotations as well as in xml.
|
||||
====
|
||||
|
||||
To check that the execution happens correctly, Hibernate allows you to define one of those three strategies:
|
||||
|
||||
* none: no check is performed: the store procedure is expected to fail upon issues
|
||||
* count: use of rowcount to check that the update is successful
|
||||
* param: like COUNT but using an output parameter rather that the standard mechanism
|
||||
|
||||
To define the result check style, use the `check` parameter which is again available in annotations as well as in xml.
|
||||
|
||||
Last but not least, stored procedures are in most cases required to return the number of rows inserted, updated and deleted.
|
||||
Hibernate always registers the first statement parameter as a numeric output parameter for the CUD operations:
|
||||
|
||||
.Stored procedures and their return value
|
||||
====
|
||||
[source]
|
||||
----
|
||||
CREATE OR REPLACE FUNCTION updatePerson (uid IN NUMBER, uname IN VARCHAR2)
|
||||
RETURN NUMBER IS
|
||||
BEGIN
|
||||
|
||||
update PERSON
|
||||
set
|
||||
NAME = uname,
|
||||
where
|
||||
ID = uid;
|
||||
|
||||
return SQL%ROWCOUNT;
|
||||
|
||||
END updatePerson;
|
||||
----
|
||||
====
|
||||
|
||||
[[legacy-sql-load]]
|
||||
=== Legacy custom SQL for loading
|
||||
|
||||
You can also declare your own SQL (or HQL) queries for entity loading.
|
||||
As with inserts, updates, and deletes, this can be done at the individual column level as described in
|
||||
For columns, see <<chapters/domain/basic_types.adoc#mapping-column-read-and-write,Column transformers: read and write expressions>> or at the statement level.
|
||||
Here is an example of a statement level override:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<sql-query name = "person">
|
||||
<return alias = "pers" class = "Person" lock-mod e= "upgrade"/>
|
||||
SELECT NAME AS {pers.name}, ID AS {pers.id}
|
||||
FROM PERSON
|
||||
WHERE ID=?
|
||||
FOR UPDATE
|
||||
</sql-query>
|
||||
----
|
||||
|
||||
This is just a named query declaration, as discussed earlier. You can reference this named query in a class mapping:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<class name = "Person">
|
||||
<id name = "id">
|
||||
<generator class = "increment"/>
|
||||
</id>
|
||||
<property name = "name" not-null = "true"/>
|
||||
<loader query-ref = "person"/>
|
||||
</class>
|
||||
----
|
||||
|
||||
This even works with stored procedures.
|
||||
|
||||
You can even define a query for collection loading:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<set name = "employments" inverse = "true">
|
||||
<key/>
|
||||
<one-to-many class = "Employment"/>
|
||||
<loader query-ref = "employments"/>
|
||||
</set>
|
||||
----
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<sql-query name = "employments">
|
||||
<load-collection alias = "emp" role = "Person.employments"/>
|
||||
SELECT {emp.*}
|
||||
FROM EMPLOYMENT emp
|
||||
WHERE EMPLOYER = :id
|
||||
ORDER BY STARTDATE ASC, EMPLOYEE ASC
|
||||
</sql-query>
|
||||
----
|
||||
|
||||
You can also define an entity loader that loads a collection by join fetching:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<sql-query name = "person">
|
||||
<return alias = "pers" class = "Person"/>
|
||||
<return-join alias = "emp" property = "pers.employments"/>
|
||||
SELECT NAME AS {pers.*}, {emp.*}
|
||||
FROM PERSON pers
|
||||
LEFT OUTER JOIN EMPLOYMENT emp
|
||||
ON pers.ID = emp.PERSON_ID
|
||||
WHERE ID=?
|
||||
</sql-query>
|
||||
----
|
|
@ -0,0 +1,15 @@
|
|||
<!--
|
||||
~ Hibernate, Relational Persistence for Idiomatic Java
|
||||
~
|
||||
~ License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
~ See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
-->
|
||||
<timestamp
|
||||
column="timestamp_column"
|
||||
name="propertyName"
|
||||
access="field|property|ClassName"
|
||||
unsaved-value="null|undefined"
|
||||
source="vm|db"
|
||||
generated="never|always"
|
||||
node="element-name|@attribute-name|element/@attribute|."
|
||||
/>
|
|
@ -0,0 +1,16 @@
|
|||
<!--
|
||||
~ Hibernate, Relational Persistence for Idiomatic Java
|
||||
~
|
||||
~ License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
~ See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
-->
|
||||
<version
|
||||
column="version_column"
|
||||
name="propertyName"
|
||||
type="typename"
|
||||
access="field|property|ClassName"
|
||||
unsaved-value="null|negative|undefined"
|
||||
generated="never|always"
|
||||
insert="true|false"
|
||||
node="element-name|@attribute-name|element/@attribute|."
|
||||
/>
|
|
@ -0,0 +1,31 @@
|
|||
[[architecture]]
|
||||
== Architecture
|
||||
|
||||
[[architecture-overview]]
|
||||
=== Overview
|
||||
|
||||
image:images/architecture/data_access_layers.svg[Data Access Layers]
|
||||
|
||||
Hibernate, as an ORM solution, effectively "sits between" the Java application data access layer and the Relational Database, as can be seen in the diagram above.
|
||||
The Java application makes use of the Hibernate APIs to load, store, query, etc its domain data.
|
||||
Here we will introduce the essential Hibernate APIs.
|
||||
This will be a brief introduction; we will discuss these contracts in detail later.
|
||||
|
||||
As a JPA provider, Hibernate implements the Java Persistence API specifications and the association between JPA interfaces and Hibernate specific implementations can be visualized in the following diagram:
|
||||
|
||||
image:images/architecture/JPA_Hibernate.svg[image]
|
||||
|
||||
SessionFactory (`org.hibernate.SessionFactory`):: A thread-safe (and immutable) representation of the mapping of the application domain model to a database.
|
||||
Acts as a factory for `org.hibernate.Session` instances. The `EntityManagerFactory` is the JPA equivalent of a `SessionFactory` and basically those two converge into the same `SessionFactory` implementation.
|
||||
+
|
||||
A `SessionFactory` is very expensive to create, so, for any given database, the application should have only one associated `SessionFactory`.
|
||||
The `SessionFactory` maintains services that Hibernate uses across all `Session(s)` such as second level caches, connection pools, transaction system integrations, etc.
|
||||
|
||||
Session (`org.hibernate.Session`):: A single-threaded, short-lived object conceptually modeling a "Unit of Work" <<Bibliography.adoc#PoEAA,PoEAA>>.
|
||||
In JPA nomenclature, the `Session` is represented by an `EntityManager`.
|
||||
+
|
||||
Behind the scenes, the Hibernate `Session` wraps a JDBC `java.sql.Connection` and acts as a factory for `org.hibernate.Transaction` instances.
|
||||
It maintains a generally "repeatable read" persistence context (first level cache) of the application domain model.
|
||||
|
||||
Transaction (`org.hibernate.Transaction`):: A single-threaded, short-lived object used by the application to demarcate individual physical transaction boundaries.
|
||||
`EntityTransaction` is the JPA equivalent and both act as an abstraction API to isolate the application from the underlying transaction system in use (JDBC or JTA).
|
|
@ -0,0 +1,280 @@
|
|||
[[batch]]
|
||||
== Batching
|
||||
:sourcedir: ../../../../../test/java/org/hibernate/userguide/batch
|
||||
|
||||
[[batch-jdbcbatch]]
|
||||
=== JDBC batching
|
||||
|
||||
JDBC offers support for batching together SQL statements that can be represented as a single PreparedStatement.
|
||||
Implementation wise this generally means that drivers will send the batched operation to the server in one call,
|
||||
which can save on network calls to the database. Hibernate can leverage JDBC batching.
|
||||
The following settings control this behavior.
|
||||
|
||||
`hibernate.jdbc.batch_size`::
|
||||
Controls the maximum number of statements Hibernate will batch together before asking the driver to execute the batch.
|
||||
Zero or a negative number disables this feature.
|
||||
|
||||
`hibernate.jdbc.batch_versioned_data`::
|
||||
Some JDBC drivers return incorrect row counts when a batch is executed.
|
||||
If your JDBC driver falls into this category this setting should be set to `false`.
|
||||
Otherwise, it is safe to enable this which will allow Hibernate to still batch the DML for versioned entities and still use the returned row counts for optimistic lock checks.
|
||||
Since 5.0, it defaults to true. Previously (versions 3.x and 4.x), it used to be false.
|
||||
|
||||
`hibernate.jdbc.batch.builder`::
|
||||
Names the implementation class used to manage batching capabilities.
|
||||
It is almost never a good idea to switch from Hibernate's default implementation.
|
||||
But if you wish to, this setting would name the `org.hibernate.engine.jdbc.batch.spi.BatchBuilder` implementation to use.
|
||||
|
||||
`hibernate.order_update`::
|
||||
Forces Hibernate to order SQL updates by the entity type and the primary key value of the items being updated.
|
||||
This allows for more batching to be used. It will also result in fewer transaction deadlocks in highly concurrent systems.
|
||||
Comes with a performance hit, so benchmark before and after to see if this actually helps or hurts your application.
|
||||
|
||||
`hibernate.order_inserts`::
|
||||
Forces Hibernate to order inserts to allow for more batching to be used.
|
||||
Comes with a performance hit, so benchmark before and after to see if this actually helps or hurts your application.
|
||||
|
||||
[[batch-session-batch]]
|
||||
=== Session batching
|
||||
|
||||
The following example shows an anti-pattern for batch inserts.
|
||||
|
||||
[[batch-session-batch-example]]
|
||||
.Naive way to insert 100 000 entities with Hibernate
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BatchTest.java[tags=batch-session-batch-example]
|
||||
----
|
||||
====
|
||||
|
||||
There are several problems associated with this example:
|
||||
|
||||
. Hibernate caches all the newly inserted `Customer` instances in the session-level c1ache, so, when the transaction ends, 100 000 entities are managed by the persistence context.
|
||||
If the maximum memory allocated to the JVM is rather low, this example could fails with an `OutOfMemoryException`.
|
||||
The Java 1.8 JVM allocated either 1/4 of available RAM or 1Gb, which can easily accommodate 100 000 objects on the heap.
|
||||
. long-running transactions can deplete a connection pool so other transactions don't get a chance to proceed.
|
||||
. JDBC batching is not enabled by default, so every insert statement requires a database roundtrip.
|
||||
To enable JDBC batching, set the `hibernate.jdbc.batch_size` property to an integer between 10 and 50.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Hibernate disables insert batching at the JDBC level transparently if you use an identity identifier generator.
|
||||
====
|
||||
|
||||
[[batch-session-batch-insert]]
|
||||
==== Batch inserts
|
||||
|
||||
When you make new objects persistent, employ methods `flush()` and `clear()` to the session regularly, to control the size of the first-level cache.
|
||||
|
||||
[[batch-session-batch-insert-example]]
|
||||
.Flushing and clearing the `Session`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BatchTest.java[tags=batch-session-batch-insert-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[batch-session-scroll]]
|
||||
==== Session scroll
|
||||
|
||||
When you retrieve and update data, `flush()` and `clear()` the session regularly.
|
||||
In addition, use method `scroll()` to take advantage of server-side cursors for queries that return many rows of data.
|
||||
|
||||
[[batch-session-scroll-example]]
|
||||
.Using `scroll()`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BatchTest.java[tags=batch-session-scroll-example]
|
||||
----
|
||||
====
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
You should always close the `ScrollableResults`, to deallocate the associated `PreparedStatement` and avoid resource leaking.
|
||||
====
|
||||
|
||||
==== StatelessSession
|
||||
|
||||
`StatelessSession` is a command-oriented API provided by Hibernate.
|
||||
Use it to stream data to and from the database in the form of detached objects.
|
||||
A `StatelessSession` has no persistence context associated with it and does not provide many of the higher-level life cycle semantics.
|
||||
|
||||
Some of the things not provided by a `StatelessSession` include:
|
||||
|
||||
* a first-level cache
|
||||
* interaction with any second-level or query cache
|
||||
* transactional write-behind or automatic dirty checking
|
||||
|
||||
Limitations of `StatelessSession`:
|
||||
|
||||
* Operations performed using a stateless session never cascade to associated instances.
|
||||
* Collections are ignored by a stateless session.
|
||||
* Lazy loading of associations is not supported.
|
||||
* Operations performed via a stateless session bypass Hibernate's event model and interceptors.
|
||||
* Due to the lack of a first-level cache, Stateless sessions are vulnerable to data aliasing effects.
|
||||
* A stateless session is a lower-level abstraction that is much closer to the underlying JDBC.
|
||||
|
||||
[[batch-stateless-session-example]]
|
||||
.Using a `StatelessSession`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BatchTest.java[tags=batch-stateless-session-example]
|
||||
----
|
||||
====
|
||||
|
||||
The `Customer` instances returned by the query are immediately detached.
|
||||
They are never associated with any persistence context.
|
||||
|
||||
The `insert()`, `update()`, and `delete()` operations defined by the `StatelessSession` interface operate directly on database rows.
|
||||
They cause the corresponding SQL operations to be executed immediately.
|
||||
They have different semantics from the `save()`, `saveOrUpdate()`, and `delete()` operations defined by the `Session` interface.
|
||||
|
||||
[[batch-bulk-hql]]
|
||||
=== Hibernate Query Language for DML
|
||||
|
||||
DML, or Data Manipulation Language, refers to SQL statements such as `INSERT`, `UPDATE`, and `DELETE`.
|
||||
Hibernate provides methods for bulk SQL-style DML statement execution, in the form of Hibernate Query Language (HQL).
|
||||
|
||||
[[batch-bulk-hql-update-delete]]
|
||||
==== HQL/JPQL for UPDATE and DELETE
|
||||
|
||||
Both the Hibernate native Query Language and JPQL (Java Persistence Query Language) provide support for bulk UPDATE and DELETE.
|
||||
|
||||
[[batch-bulk-hql-update-delete-example]]
|
||||
.Psuedo-syntax for UPDATE and DELETE statements using HQL
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
UPDATE FROM EntityName e WHERE e.name = ?
|
||||
|
||||
DELETE FROM EntityName e WHERE e.name = ?
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The `FROM` and `WHERE` clauses are each optional, but it's good practice to use them.
|
||||
====
|
||||
|
||||
The `FROM` clause can only refer to a single entity, which can be aliased.
|
||||
If the entity name is aliased, any property references must be qualified using that alias.
|
||||
If the entity name is not aliased, then it is illegal for any property references to be qualified.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Joins, either implicit or explicit, are prohibited in a bulk HQL query.
|
||||
You can use sub-queries in the `WHERE` clause, and the sub-queries themselves can contain joins.
|
||||
====
|
||||
|
||||
[[batch-bulk-jpql-update-example]]
|
||||
.Executing a JPQL `UPDATE`, using the `Query.executeUpdate()`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BatchTest.java[tags=batch-bulk-jpql-update-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[batch-bulk-hql-update-example]]
|
||||
.Executing an HQL `UPDATE`, using the `Query.executeUpdate()`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BatchTest.java[tags=batch-bulk-hql-update-example]
|
||||
----
|
||||
====
|
||||
|
||||
In keeping with the EJB3 specification, HQL `UPDATE` statements, by default, do not effect the version or the timestamp property values for the affected entities.
|
||||
You can use a versioned update to force Hibernate to reset the version or timestamp property values, by adding the `VERSIONED` keyword after the `UPDATE` keyword.
|
||||
|
||||
[[batch-bulk-hql-update-version-example]]
|
||||
.Updating the version of timestamp
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BatchTest.java[tags=batch-bulk-hql-update-version-example]
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
If you use the `VERSIONED` statement, you cannot use custom version types, which use class `org.hibernate.usertype.UserVersionType`.
|
||||
|
||||
This feature is only available in HQL since it's not standardized by JPA.
|
||||
====
|
||||
|
||||
[[batch-bulk-jpql-delete-example]]
|
||||
.A JPQL `DELETE` statement
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BatchTest.java[tags=batch-bulk-jpql-delete-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[batch-bulk-hql-delete-example]]
|
||||
.An HQL `DELETE` statement
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BatchTest.java[tags=batch-bulk-hql-delete-example]
|
||||
----
|
||||
====
|
||||
|
||||
Method `Query.executeUpdate()` returns an `int` value, which indicates the number of entities effected by the operation.
|
||||
This may or may not correlate to the number of rows effected in the database.
|
||||
An JPQL/HQL bulk operation might result in multiple SQL statements being executed, such as for joined-subclass.
|
||||
In the example of joined-subclass, a `DELETE` against one of the subclasses may actually result in deletes in the tables underlying the join, or further down the inheritance hierarchy.
|
||||
|
||||
==== HQL syntax for INSERT
|
||||
|
||||
.Pseudo-syntax for INSERT statements
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
INSERT INTO EntityName
|
||||
properties_list
|
||||
SELECT properties_list
|
||||
FROM ...
|
||||
----
|
||||
====
|
||||
|
||||
Only the `INSERT INTO ... SELECT ...` form is supported.
|
||||
You cannot specify explicit values to insert.
|
||||
|
||||
The `properties_list` is analogous to the column specification in the `SQL` `INSERT` statement.
|
||||
For entities involved in mapped inheritance, you can only use properties directly defined on that given class-level in the `properties_list`.
|
||||
Superclass properties are not allowed and subclass properties are irrelevant.
|
||||
In other words, `INSERT` statements are inherently non-polymorphic.
|
||||
|
||||
The SELECT statement can be any valid HQL select query, but the return types must match the types expected by the INSERT.
|
||||
Hibernate verifies the return types during query compilation, instead of expecting the database to check it.
|
||||
Problems might result from Hibernate types which are equivalent, rather than equal.
|
||||
One such example is a mismatch between a property defined as an `org.hibernate.type.DateType` and a property defined as an `org.hibernate.type.TimestampType`,
|
||||
even though the database may not make a distinction, or may be capable of handling the conversion.
|
||||
|
||||
If id property is not specified in the `properties_list`,
|
||||
Hibernate generates a value automatically.
|
||||
Automatic generation is only available if you use ID generators which operate on the database.
|
||||
Otherwise, Hibernate throws an exception during parsing.
|
||||
Available in-database generators are `org.hibernate.id.SequenceGenerator` and its subclasses, and objects which implement `org.hibernate.id.PostInsertIdentifierGenerator`.
|
||||
The most notable exception is `org.hibernate.id.TableHiLoGenerator`, which does not expose a selectable way to get its values.
|
||||
|
||||
For properties mapped as either version or timestamp, the insert statement gives you two options.
|
||||
You can either specify the property in the properties_list, in which case its value is taken from the corresponding select expressions, or omit it from the properties_list,
|
||||
in which case the seed value defined by the org.hibernate.type.VersionType is used.
|
||||
|
||||
[[batch-bulk-hql-insert-example]]
|
||||
.HQL INSERT statement
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BatchTest.java[tags=batch-bulk-hql-insert-example]
|
||||
----
|
||||
====
|
||||
|
||||
This section is only a brief overview of HQL. For more information, see <<chapters/query/hql/HQL.adoc#hql,HQL>>.
|
|
@ -0,0 +1,243 @@
|
|||
[[bootstrap]]
|
||||
== Bootstrap
|
||||
:sourcedir: ../../../../../test/java/org/hibernate/userguide/bootstrap
|
||||
|
||||
The term bootstrapping refers to initializing and starting a software component.
|
||||
In Hibernate, we are specifically talking about the process of building a fully functional `SessionFactory` instance or `EntityManagerFactory` instance, for JPA.
|
||||
The process is very different for each.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
This chapter will not focus on all the possibilities of bootstrapping.
|
||||
Those will be covered in each specific more-relevant chapters later on.
|
||||
Instead, we focus here on the API calls needed to perform the bootstrapping.
|
||||
====
|
||||
|
||||
[TIP]
|
||||
====
|
||||
During the bootstrap process, you might want to customize Hibernate behavior so make sure you check the <<appendices/Configurations.adoc#configurations,Configurations>> section as well.
|
||||
====
|
||||
|
||||
[[bootstrap-native]]
|
||||
=== Native Bootstrapping
|
||||
|
||||
This section discusses the process of bootstrapping a Hibernate `SessionFactory`.
|
||||
Specifically it discusses the bootstrapping APIs as redesigned in 5.0.
|
||||
For a discussion of the legacy bootstrapping API, see <<appendices/Legacy_Bootstrap.adoc#appendix-legacy-bootstrap,Legacy Bootstrapping>>
|
||||
|
||||
[[bootstrap-native-registry]]
|
||||
==== Building the ServiceRegistry
|
||||
|
||||
The first step in native bootstrapping is the building of a `ServiceRegistry` holding the services Hibernate will need during bootstrapping and at run time.
|
||||
|
||||
Actually, we are concerned with building 2 different ServiceRegistries.
|
||||
First is the `org.hibernate.boot.registry.BootstrapServiceRegistry`.
|
||||
The `BootstrapServiceRegistry` is intended to hold services that Hibernate needs at both bootstrap and run time.
|
||||
This boils down to 3 services:
|
||||
|
||||
`org.hibernate.boot.registry.classloading.spi.ClassLoaderService`:: which controls how Hibernate interacts with `ClassLoader`s
|
||||
`org.hibernate.integrator.spi.IntegratorService`:: which controls the management and discovery of `org.hibernate.integrator.spi.Integrator` instances.
|
||||
`org.hibernate.boot.registry.selector.spi.StrategySelector`:: which control how Hibernate resolves implementations of various strategy contracts.
|
||||
This is a very powerful service, but a full discussion of it is beyond the scope of this guide.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
If you are ok with the default behavior of Hibernate in regards to these `BootstrapServiceRegistry` services
|
||||
(which is quite often the case, especially in stand-alone environments), then building the `BootstrapServiceRegistry` can be skipped.
|
||||
====
|
||||
|
||||
If you wish to alter how the `BootstrapServiceRegistry` is built, that is controlled through the `org.hibernate.boot.registry.BootstrapServiceRegistryBuilder:`
|
||||
|
||||
[[bootstrap-bootstrap-native-registry-BootstrapServiceRegistry-example]]
|
||||
.Controlling `BootstrapServiceRegistry` building
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BootstrapTest.java[tags=bootstrap-bootstrap-native-registry-BootstrapServiceRegistry-example]
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The services of the `BootstrapServiceRegistry` cannot be extended (added to) nor overridden (replaced).
|
||||
====
|
||||
|
||||
The second ServiceRegistry is the `org.hibernate.boot.registry.StandardServiceRegistry`.
|
||||
You will almost always need to configure the `StandardServiceRegistry`, which is done through `org.hibernate.boot.registry.StandardServiceRegistryBuilder`:
|
||||
|
||||
[[bootstrap-bootstrap-native-registry-StandardServiceRegistryBuilder-example]]
|
||||
.Building a `BootstrapServiceRegistryBuilder`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BootstrapTest.java[tags=bootstrap-bootstrap-native-registry-StandardServiceRegistryBuilder-example]
|
||||
----
|
||||
====
|
||||
|
||||
A `StandardServiceRegistry` is also highly configurable via the StandardServiceRegistryBuilder API.
|
||||
See the `StandardServiceRegistryBuilder` https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/registry/StandardServiceRegistryBuilder.html[Javadocs] for more details.
|
||||
|
||||
Some specific methods of interest:
|
||||
|
||||
[[bootstrap-bootstrap-native-registry-MetadataSources-example]]
|
||||
.Configuring a `MetadataSources`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BootstrapTest.java[tags=bootstrap-bootstrap-native-registry-MetadataSources-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[bootstrap-event-listener-registration]]
|
||||
==== Event Listener registration
|
||||
|
||||
The main use cases for an `org.hibernate.integrator.spi.Integrator` right now are registering event listeners and providing services (see `org.hibernate.integrator.spi.ServiceContributingIntegrator`).
|
||||
With 5.0 we plan on expanding that to allow altering the metamodel describing the mapping between object and relational models.
|
||||
|
||||
[[bootstrap-event-listener-registration-example]]
|
||||
.Configuring an event listener
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BootstrapTest.java[tags=bootstrap-event-listener-registration-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[bootstrap-native-metadata]]
|
||||
==== Building the Metadata
|
||||
|
||||
The second step in native bootstrapping is the building of a `org.hibernate.boot.Metadata` object containing the parsed representations of an application domain model and its mapping to a database.
|
||||
The first thing we obviously need to build a parsed representation is the source information to be parsed (annotated classes, `hbm.xml` files, `orm.xml` files).
|
||||
This is the purpose of `org.hibernate.boot.MetadataSources`:
|
||||
|
||||
`MetadataSources` has many other methods as well, explore its API and https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/MetadataSources.html[Javadocs] for more information.
|
||||
Also, all methods on `MetadataSources` offer fluent-style call chaining::
|
||||
|
||||
[[bootstrap-native-metadata-source-example]]
|
||||
.Configuring a `MetadataSources` with method chaining
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BootstrapTest.java[tags=bootstrap-native-metadata-source-example]
|
||||
----
|
||||
====
|
||||
|
||||
Once we have the sources of mapping information defined, we need to build the `Metadata` object.
|
||||
If you are ok with the default behavior in building the Metadata then you can simply call the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/MetadataSources.html#buildMetadata--[`buildMetadata`] method of the `MetadataSources`.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Notice that a `ServiceRegistry` can be passed at a number of points in this bootstrapping process.
|
||||
The suggested approach is to build a `StandardServiceRegistry` yourself and pass that along to the `MetadataSources` constructor.
|
||||
From there, `MetadataBuilder`, `Metadata`, `SessionFactoryBuilder` and `SessionFactory` will all pick up that same `StandardServiceRegistry`.
|
||||
====
|
||||
|
||||
However, if you wish to adjust the process of building `Metadata` from `MetadataSources`,
|
||||
you will need to use the `MetadataBuilder` as obtained via `MetadataSources#getMetadataBuilder`.
|
||||
`MetadataBuilder` allows a lot of control over the `Metadata` building process.
|
||||
See its https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/MetadataBuilder.html[Javadocs] for full details.
|
||||
|
||||
[[bootstrap-native-metadata-builder-example]]
|
||||
.Building Metadata via `MetadataBuilder`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BootstrapTest.java[tags=bootstrap-native-metadata-builder-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[bootstrap-native-SessionFactory]]
|
||||
==== Building the SessionFactory
|
||||
|
||||
The final step in native bootstrapping is to build the `SessionFactory` itself.
|
||||
Much like discussed above, if you are ok with the default behavior of building a `SessionFactory` from a `Metadata` reference, you can simply call the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/Metadata.html#buildSessionFactory--[`buildSessionFactory`] method on the `Metadata` object.
|
||||
|
||||
However, if you would like to adjust that building process you will need to use `SessionFactoryBuilder` as obtained via [`Metadata#getSessionFactoryBuilder`. Again, see its https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/Metadata.html#getSessionFactoryBuilder--[Javadocs] for more details.
|
||||
|
||||
[[bootstrap-native-SessionFactory-example]]
|
||||
.Native Bootstrapping - Putting it all together
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BootstrapTest.java[tags=bootstrap-native-SessionFactory-example]
|
||||
----
|
||||
====
|
||||
|
||||
The bootstrapping API is quite flexible, but in most cases it makes the most sense to think of it as a 3 step process:
|
||||
|
||||
1. Build the `StandardServiceRegistry`
|
||||
2. Build the `Metadata`
|
||||
3. Use those 2 to build the `SessionFactory`
|
||||
|
||||
[[bootstrap-native-SessionFactoryBuilder-example]]
|
||||
.Building `SessionFactory` via `SessionFactoryBuilder`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BootstrapTest.java[tags=bootstrap-native-SessionFactoryBuilder-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[bootstrap-jpa]]
|
||||
=== JPA Bootstrapping
|
||||
|
||||
Bootstrapping Hibernate as a JPA provider can be done in a JPA-spec compliant manner or using a proprietary bootstrapping approach.
|
||||
The standardized approach has some limitations in certain environments, but aside from those, it is *highly* recommended that you use JPA-standardized bootstrapping.
|
||||
|
||||
[[bootstrap-jpa-compliant]]
|
||||
==== JPA-compliant bootstrapping
|
||||
|
||||
In JPA, we are ultimately interested in bootstrapping a `javax.persistence.EntityManagerFactory` instance.
|
||||
The JPA specification defines 2 primary standardized bootstrap approaches depending on how the application intends to access the `javax.persistence.EntityManager` instances from an `EntityManagerFactory`.
|
||||
It uses the terms _EE_ and _SE_ for these two approaches, but those terms are very misleading in this context.
|
||||
What the JPA spec calls EE bootstrapping implies the existence of a container (EE, OSGi, etc), who'll manage and inject the persistence context on behalf of the application.
|
||||
What it calls SE bootstrapping is everything else. We will use the terms container-bootstrapping and application-bootstrapping in this guide.
|
||||
|
||||
If you would like additional details on accessing and using `EntityManager` instances, sections 7.6 and 7.7 of the JPA 2.1 specification cover container-managed and application-managed `EntityManagers`, respectively.
|
||||
|
||||
For compliant container-bootstrapping, the container will build an `EntityManagerFactory` for each persistent-unit defined in the `META-INF/persistence.xml` configuration file
|
||||
and make that available to the application for injection via the `javax.persistence.PersistenceUnit` annotation or via JNDI lookup.
|
||||
|
||||
[[bootstrap-jpa-compliant-PersistenceUnit-example]]
|
||||
.Injecting a EntityManagerFactory
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BootstrapTest.java[tags=bootstrap-jpa-compliant-PersistenceUnit-example]
|
||||
----
|
||||
====
|
||||
|
||||
For compliant application-bootstrapping, rather than the container building the `EntityManagerFactory` for the application, the application builds the `EntityManagerFactory` itself using the `javax.persistence.Persistence` bootstrap class.
|
||||
The application creates an `EntityManagerFactory` by calling the `createEntityManagerFactory` method:
|
||||
|
||||
[[bootstrap-jpa-compliant-EntityManagerFactory-example]]
|
||||
.Application bootstrapped EntityManagerFactory
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BootstrapTest.java[tags=bootstrap-jpa-compliant-EntityManagerFactory-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[bootstrap-jpa-hibernate]]
|
||||
==== Proprietary JPA bootstrapping
|
||||
|
||||
Hibernate defines a proprietary https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/boot/internal/EntityManagerFactoryBuilderImpl.html[`EntityManagerFactoryBuilderImpl`]
|
||||
utility, which allows bootstrapping the JPA environment without even in the absence of the `persistence.xml` configuration file.
|
||||
To substitute the `persistence.xml` file, Hibernate offers the `PersistenceUnitInfoDescriptor` utility, which can take configuration that's available in the standard XML configuration file.
|
||||
|
||||
[[bootstrap-native-EntityManagerFactory-example]]
|
||||
.Proprietary bootstrapped `EntityManagerFactory`
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BootstrapTest.java[tags=bootstrap-native-PersistenceUnitInfoImpl-example]
|
||||
----
|
||||
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/BootstrapTest.java[tags=bootstrap-native-EntityManagerFactory-example]
|
||||
----
|
||||
====
|
||||
|
||||
The `integrationSettings` allows the application developer to customize the bootstrapping process by specifying different `hibernate.integrator_provider` or `hibernate.strategy_registration_provider` integration providers.
|
|
@ -0,0 +1,713 @@
|
|||
[[caching]]
|
||||
== Caching
|
||||
:sourcedir: ../../../../../test/java/org/hibernate/userguide/caching
|
||||
|
||||
At runtime, Hibernate handles moving data into and out of the second-level cache in response to the operations performed by the `Session`, which acts as a transaction-level cache of persistent data.
|
||||
Once an entity becomes managed, that object is added to the internal cache of the current persistence context (`EntityManager` or `Session`).
|
||||
The persistence context is also called the first-level cache, and it's enabled by default.
|
||||
|
||||
It is possible to configure a JVM-level (`SessionFactory`-level) or even a cluster cache on a class-by-class and collection-by-collection basis.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Be aware that caches are not aware of changes made to the persistent store by other applications.
|
||||
They can, however, be configured to regularly expire cached data.
|
||||
====
|
||||
|
||||
[[caching-config]]
|
||||
=== Configuring second-level caching
|
||||
|
||||
Hibernate can integrate with various caching providers for the purpose of caching data outside the context of a particular `Session`.
|
||||
This section defines the settings which control this behavior.
|
||||
|
||||
[[caching-config-provider]]
|
||||
==== RegionFactory
|
||||
|
||||
`org.hibernate.cache.spi.RegionFactory` defines the integration between Hibernate and a pluggable caching provider.
|
||||
`hibernate.cache.region.factory_class` is used to declare the provider to use.
|
||||
Hibernate comes with built-in support for two popular caching libraries: <<caching-provider-ehcache,Ehcache>> and <<caching-provider-infinispan,Infinispan>>.
|
||||
Detailed information is provided later in this chapter.
|
||||
|
||||
[[caching-config-properties]]
|
||||
==== Caching configuration properties
|
||||
|
||||
Besides specific provider configuration, there are a number of configurations options on the Hibernate side of the integration that control various caching behaviors:
|
||||
|
||||
`hibernate.cache.use_second_level_cache`::
|
||||
Enable or disable second level caching overall. Default is true, although the default region factory is `NoCachingRegionFactory`.
|
||||
`hibernate.cache.use_query_cache`::
|
||||
Enable or disable second level caching of query results. Default is false.
|
||||
`hibernate.cache.query_cache_factory`::
|
||||
Query result caching is handled by a special contract that deals with staleness-based invalidation of the results.
|
||||
The default implementation does not allow stale results at all. Use this for applications that would like to relax that.
|
||||
Names an implementation of `org.hibernate.cache.spi.QueryCacheFactory`
|
||||
`hibernate.cache.use_minimal_puts`::
|
||||
Optimizes second-level cache operations to minimize writes, at the cost of more frequent reads. Providers typically set this appropriately.
|
||||
`hibernate.cache.region_prefix`::
|
||||
Defines a name to be used as a prefix to all second-level cache region names.
|
||||
`hibernate.cache.default_cache_concurrency_strategy`::
|
||||
In Hibernate second-level caching, all regions can be configured differently including the concurrency strategy to use when accessing that particular region.
|
||||
This setting allows to define a default strategy to be used.
|
||||
This setting is very rarely required as the pluggable providers do specify the default strategy to use.
|
||||
Valid values include:
|
||||
* read-only,
|
||||
* read-write,
|
||||
* nonstrict-read-write,
|
||||
* transactional
|
||||
`hibernate.cache.use_structured_entries`::
|
||||
If `true`, forces Hibernate to store data in the second-level cache in a more human-friendly format.
|
||||
Can be useful if you'd like to be able to "browse" the data directly in your cache, but does have a performance impact.
|
||||
`hibernate.cache.auto_evict_collection_cache`::
|
||||
Enables or disables the automatic eviction of a bidirectional association's collection cache entry when the association is changed just from the owning side.
|
||||
This is disabled by default, as it has a performance impact to track this state.
|
||||
However if your application does not manage both sides of bidirectional association where the collection side is cached,
|
||||
the alternative is to have stale data in that collection cache.
|
||||
`hibernate.cache.use_reference_entries`::
|
||||
Enable direct storage of entity references into the second level cache for read-only or immutable entities.
|
||||
|
||||
[[caching-mappings]]
|
||||
=== Configuring second-level cache mappings
|
||||
|
||||
The cache mappings can be configured via JPA annotations or XML descriptors or using the Hibernate-specific mapping files.
|
||||
|
||||
By default, entities are not part of the second level cache and we recommend you to stick to this setting.
|
||||
However, you can override this by setting the `shared-cache-mode` element in your `persistence.xml` file
|
||||
or by using the `javax.persistence.sharedCache.mode` property in your configuration file.
|
||||
The following values are possible:
|
||||
|
||||
`ENABLE_SELECTIVE` (Default and recommended value)::
|
||||
Entities are not cached unless explicitly marked as cacheable (with the https://docs.oracle.com/javaee/7/api/javax/persistence/Cacheable.html[`@Cacheable`] annotation).
|
||||
`DISABLE_SELECTIVE`::
|
||||
Entities are cached unless explicitly marked as non-cacheable.
|
||||
`ALL`::
|
||||
Entities are always cached even if marked as non-cacheable.
|
||||
`NONE`::
|
||||
No entity is cached even if marked as cacheable.
|
||||
This option can make sense to disable second-level cache altogether.
|
||||
|
||||
The cache concurrency strategy used by default can be set globally via the `hibernate.cache.default_cache_concurrency_strategy` configuration property.
|
||||
The values for this property are:
|
||||
|
||||
read-only::
|
||||
If your application needs to read, but not modify, instances of a persistent class, a read-only cache is the best choice.
|
||||
Application can still delete entities and these changes should be reflected in second-level cache so that the cache
|
||||
does not provide stale entities.
|
||||
Implementations may use performance optimizations based on the immutability of entities.
|
||||
read-write::
|
||||
If the application needs to update data, a read-write cache might be appropriate.
|
||||
This strategy provides consistent access to single entity, but not a serializable transaction isolation level; e.g. when TX1 reads looks up an entity and does not find it, TX2 inserts the entity into cache and TX1 looks it up again, the new entity can be read in TX1.
|
||||
nonstrict-read-write::
|
||||
Similar to read-write strategy but there might be occasional stale reads upon concurrent access to an entity. The choice of this strategy might be appropriate if the application rarely updates the same data simultaneously and strict transaction isolation is not required. Implementations may use performance optimizations that make use of the relaxed consistency guarantee.
|
||||
transactional::
|
||||
Provides serializable transaction isolation level.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Rather than using a global cache concurrency strategy, it is recommended to define this setting on a per entity basis.
|
||||
Use the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Cache.html[`@org.hibernate.annotations.Cache`] annotation for that.
|
||||
====
|
||||
|
||||
The `@Cache` annotation define three attributes:
|
||||
|
||||
usage::
|
||||
Defines the `CacheConcurrencyStrategy`
|
||||
region::
|
||||
Defines a cache region where entries will be stored
|
||||
include::
|
||||
If lazy properties should be included in the second level cache.
|
||||
The default value is `all` so lazy properties are cacheable.
|
||||
The other possible value is `non-lazy` so lazy properties are not cacheable.
|
||||
|
||||
[[caching-query]]
|
||||
=== Entity cache
|
||||
|
||||
[[caching-entity-mapping-example]]
|
||||
.Entity cache mapping
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/NonStrictReadWriteCacheTest.java[tags=caching-entity-mapping-example]
|
||||
----
|
||||
====
|
||||
|
||||
Hibernate stores cached entities in a dehydrated form, which is similar to the database representation.
|
||||
Aside from the foreign key column values of the `@ManyToOne` or `@OneToOne` child-side associations,
|
||||
entity relationships are not stored in the cache,
|
||||
|
||||
Once an entity is stored in the second-level cache, you can avoid a database hit and load the entity from the cache alone:
|
||||
|
||||
[[caching-entity-jpa-example]]
|
||||
.Loading entity using JPA
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-entity-jpa-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[caching-entity-native-example]]
|
||||
.Loading entity using Hibernate native API
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-entity-native-example]
|
||||
----
|
||||
====
|
||||
|
||||
The Hibernate second-level cache can also load entities by their <<chapters/domain/natural_id.adoc#naturalid,natural id>>:
|
||||
|
||||
[[caching-entity-natural-id-mapping-example]]
|
||||
.Hibernate natural id entity mapping
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-entity-natural-id-mapping-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[caching-entity-natural-id-example]]
|
||||
.Loading entity using Hibernate native natural id API
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-entity-natural-id-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[caching-collection]]
|
||||
=== Collection cache
|
||||
|
||||
Hibernate can also cache collections, and the `@Cache` annotation must be on added to the collection property.
|
||||
|
||||
If the collection is made of value types (basic or embeddables mapped with `@ElementCollection`),
|
||||
the collection is stored as such.
|
||||
If the collection contains other entities (`@OneToMany` or `@ManyToMany`),
|
||||
the collection cache entry will store the entity identifiers only.
|
||||
|
||||
[[caching-collection-mapping-example]]
|
||||
.Collection cache mapping
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/NonStrictReadWriteCacheTest.java[tags=caching-collection-mapping-example]
|
||||
----
|
||||
====
|
||||
|
||||
Collections are read-through, meaning they are cached upon being accessed for the first time:
|
||||
|
||||
[[caching-collection-example]]
|
||||
.Collection cache usage
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/NonStrictReadWriteCacheTest.java[tags=caching-collection-example]
|
||||
----
|
||||
====
|
||||
|
||||
Subsequent collection retrievals will use the cache instead of going to the database.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The collection cache is not write-through so any modification will trigger a collection cache entry invalidation.
|
||||
On a subsequent access, the collection will be loaded from the database and re-cached.
|
||||
====
|
||||
|
||||
[[caching-query]]
|
||||
=== Query cache
|
||||
|
||||
Aside from caching entities and collections, Hibernate offers a query cache too.
|
||||
This is useful for frequently executed queries with fixed parameter values.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Caching of query results introduces some overhead in terms of your applications normal transactional processing.
|
||||
For example, if you cache results of a query against `Person`,
|
||||
Hibernate will need to keep track of when those results should be invalidated because changes have been committed against any `Person` entity.
|
||||
|
||||
That, coupled with the fact that most applications simply gain no benefit from caching query results,
|
||||
leads Hibernate to disable caching of query results by default.
|
||||
====
|
||||
|
||||
To use query caching, you will first need to enable it with the following configuration property:
|
||||
|
||||
[[caching-query-configuration]]
|
||||
.Enabling query cache
|
||||
====
|
||||
[source, XML, indent=0]
|
||||
----
|
||||
<property
|
||||
name="hibernate.cache.use_query_cache"
|
||||
value="true" />
|
||||
----
|
||||
====
|
||||
|
||||
As mentioned above, most queries do not benefit from caching or their results.
|
||||
So by default, individual queries are not cached even after enabling query caching.
|
||||
Each particular query that needs to be cached must be manually set as cacheable.
|
||||
This way, the query looks for existing cache results or adds the query results to the cache when being executed.
|
||||
|
||||
[[caching-query-jpa-example]]
|
||||
.Caching query using JPA
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-query-jpa-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[caching-query-native-example]]
|
||||
.Caching query using Hibernate native API
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-query-native-example]
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The query cache does not cache the state of the actual entities in the cache;
|
||||
it caches only identifier values and results of value type.
|
||||
|
||||
Just as with collection caching, the query cache should always be used in conjunction with the second-level cache for those entities expected to be cached as part of a query result cache.
|
||||
====
|
||||
|
||||
[[caching-query-region]]
|
||||
==== Query cache regions
|
||||
|
||||
This setting creates two new cache regions:
|
||||
|
||||
`org.hibernate.cache.internal.StandardQueryCache`::
|
||||
Holding the cached query results
|
||||
`org.hibernate.cache.spi.UpdateTimestampsCache`::
|
||||
Holding timestamps of the most recent updates to queryable tables.
|
||||
These are used to validate the results as they are served from the query cache.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
If you configure your underlying cache implementation to use expiration, it's very important that the timeout of the underlying cache region for the `UpdateTimestampsCache` is set to a higher value than the timeouts of any of the query caches.
|
||||
|
||||
In fact, we recommend that the `UpdateTimestampsCache` region is not configured for expiration (time-based) or eviction (size/memory-based) at all.
|
||||
Note that an LRU (Least Recently Used) cache eviction policy is never appropriate for this particular cache region.
|
||||
====
|
||||
|
||||
If you require fine-grained control over query cache expiration policies,
|
||||
you can specify a named cache region for a particular query.
|
||||
|
||||
[[caching-query-region-jpa-example]]
|
||||
.Caching query in custom region using JPA
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-query-region-jpa-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[caching-query-region-native-example]]
|
||||
.Caching query in custom region using Hibernate native API
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-query-region-native-example]
|
||||
----
|
||||
====
|
||||
|
||||
If you want to force the query cache to refresh one of its regions (disregarding any cached results it finds there),
|
||||
you can use custom cache modes.
|
||||
|
||||
[[caching-query-region-store-mode-jpa-example]]
|
||||
.Using custom query cache mode with JPA
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-query-region-store-mode-jpa-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[caching-query-region-native-example]]
|
||||
.Using custom query cache mode with Hibernate native API
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-query-region-store-mode-native-example]
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
When using http://docs.oracle.com/javaee/7/api/javax/persistence/CacheStoreMode.html#REFRESH[`CacheStoreMode.REFRESH`] or https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/CacheMode.html#REFRESH[`CacheMode.REFRESH`] in conjunction with the region you have defined for the given query,
|
||||
Hibernate will selectively force the results cached in that particular region to be refreshed.
|
||||
|
||||
This is particularly useful in cases where underlying data may have been updated via a separate process
|
||||
and is a far more efficient alternative to bulk eviction of the region via `SessionFactory` eviction which looks as follows:
|
||||
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-query-region-native-evict-example]
|
||||
----
|
||||
|
||||
====
|
||||
|
||||
[[caching-management]]
|
||||
=== Managing the cached data
|
||||
|
||||
Traditionally, Hibernate defined the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/CacheMode.html[`CacheMode`] enumeration to describe
|
||||
the ways of interactions with the cached data.
|
||||
JPA split cache modes by storage (http://docs.oracle.com/javaee/7/api/javax/persistence/CacheStoreMode.html[`CacheStoreMode`])
|
||||
and retrieval (http://docs.oracle.com/javaee/7/api/javax/persistence/CacheRetrieveMode.html[`CacheRetrieveMode`]).
|
||||
|
||||
The relationship between Hibernate and JPA cache modes can be seen in the following table:
|
||||
|
||||
.Cache modes relationships
|
||||
[cols=",,,",options="header",]
|
||||
|======================================
|
||||
|Hibernate | JPA | Description
|
||||
|`CacheMode.NORMAL` |`CacheStoreMode.USE` and `CacheRetrieveMode.USE` | Default. Reads/writes data from/into cache
|
||||
|`CacheMode.REFRESH` |`CacheStoreMode.REFRESH` and `CacheRetrieveMode.BYPASS` | Doesn't read from cache, but writes to the cache upon loading from the database
|
||||
|`CacheMode.PUT` |`CacheStoreMode.USE` and `CacheRetrieveMode.BYPASS` | Doesn't read from cache, but writes to the cache as it reads from the database
|
||||
|`CacheMode.GET` |`CacheStoreMode.BYPASS` and `CacheRetrieveMode.USE` | Read from the cache, but doesn't write to cache
|
||||
|`CacheMode.IGNORE` |`CacheStoreMode.BYPASS` and `CacheRetrieveMode.BYPASS` | Doesn't read/write data from/into cache
|
||||
|======================================
|
||||
|
||||
Setting the cache mode can be done either when loading entities directly or when executing a query.
|
||||
|
||||
[[caching-management-cache-mode-entity-jpa-example]]
|
||||
.Using custom cache modes with JPA
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-management-cache-mode-entity-jpa-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[caching-management-cache-mode-entity-native-example]]
|
||||
.Using custom cache modes with Hibernate native API
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-management-cache-mode-entity-native-example]
|
||||
----
|
||||
====
|
||||
|
||||
The custom cache modes can be set for queries as well:
|
||||
|
||||
[[caching-management-cache-mode-query-jpa-example]]
|
||||
.Using custom cache modes for queries with JPA
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-management-cache-mode-query-jpa-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[caching-management-cache-mode-query-native-example]]
|
||||
.Using custom cache modes for queries with Hibernate native API
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-management-cache-mode-query-native-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[caching-management-evict]]
|
||||
==== Evicting cache entries
|
||||
|
||||
Because the second level cache is bound to the `EntityManagerFactory` or the `SessionFactory`,
|
||||
cache eviction must be done through these two interfaces.
|
||||
|
||||
JPA only supports entity eviction through the https://docs.oracle.com/javaee/7/api/javax/persistence/Cache.html[`javax.persistence.Cache`] interface:
|
||||
|
||||
[[caching-management-evict-jpa-example]]
|
||||
.Evicting entities with JPA
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-management-evict-jpa-example]
|
||||
----
|
||||
====
|
||||
|
||||
Hibernate is much more flexible in this regard as it offers fine-grained control over what needs to be evicted.
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/Cache.html[`org.hibernate.Cache`] interface defines various evicting strategies:
|
||||
|
||||
- entities (by their class or region)
|
||||
- entities stored using the natural-id (by their class or region)
|
||||
- collections (by the region, and it might take the collection owner identifier as well)
|
||||
- queries (by region)
|
||||
|
||||
[[caching-management-evict-native-example]]
|
||||
.Evicting entities with Hibernate native API
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-management-evict-native-example]
|
||||
----
|
||||
====
|
||||
|
||||
[[caching-statistics]]
|
||||
=== Caching statistics
|
||||
|
||||
If you enable the `hibernate.generate_statistics` configuration property,
|
||||
Hibernate will expose a number of metrics via `SessionFactory.getStatistics()`.
|
||||
Hibernate can even be configured to expose these statistics via JMX.
|
||||
|
||||
This way, you can get access to the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/stat/Statistics.html[`Statistics`] class which comprises all sort of
|
||||
second-level cache metrics.
|
||||
|
||||
[[caching-statistics-example]]
|
||||
.Caching statistics
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-statistics-example]
|
||||
----
|
||||
====
|
||||
|
||||
|
||||
[[caching-provider-ehcache]]
|
||||
=== Ehcache
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Use of the build-in integration for http://www.ehcache.org/[Ehcache] requires that the `hibernate-ehcache` module jar (and all of its dependencies) are on the classpath.
|
||||
====
|
||||
|
||||
[[caching-provider-ehcache-region-factory]]
|
||||
==== RegionFactory
|
||||
|
||||
The hibernate-ehcache module defines two specific region factories: `EhCacheRegionFactory` and `SingletonEhCacheRegionFactory`.
|
||||
|
||||
[[caching-provider-ehcache-region-factory-shared]]
|
||||
===== `EhCacheRegionFactory`
|
||||
|
||||
To use the `EhCacheRegionFactory`, you need to specify the following configuration property:
|
||||
|
||||
[[caching-provider-ehcache-region-factory-shared-example]]
|
||||
.`EhCacheRegionFactory` configuration
|
||||
====
|
||||
[source, XML, indent=0]
|
||||
----
|
||||
<property
|
||||
name="hibernate.cache.region.factory_class"
|
||||
value="org.hibernate.cache.ehcache.EhCacheRegionFactory"/>
|
||||
----
|
||||
====
|
||||
|
||||
The `EhCacheRegionFactory` configures a `net.sf.ehcache.CacheManager` for each `SessionFactory`,
|
||||
so the `CacheManager` is not shared among multiple `SessionFactory` instances in the same JVM.
|
||||
|
||||
[[caching-provider-ehcache-region-factory-singleton]]
|
||||
===== `SingletonEhCacheRegionFactory`
|
||||
|
||||
To use the `SingletonEhCacheRegionFactory`, you need to specify the following configuration property:
|
||||
|
||||
[[caching-provider-ehcache-region-factory-singleton-example]]
|
||||
.`SingletonEhCacheRegionFactory` configuration
|
||||
====
|
||||
[source, XML, indent=0]
|
||||
----
|
||||
<property
|
||||
name="hibernate.cache.region.factory_class"
|
||||
value="org.hibernate.cache.ehcache.SingletonEhCacheRegionFactory"/>
|
||||
----
|
||||
====
|
||||
|
||||
The `SingletonEhCacheRegionFactory` configures a singleton `net.sf.ehcache.CacheManager` (see http://www.ehcache.org/apidocs/2.8.4/net/sf/ehcache/CacheManager.html#create%28%29[CacheManager#create()]),
|
||||
shared among multiple `SessionFactory` instances in the same JVM.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
http://www.ehcache.org/documentation/2.8/integrations/hibernate#optional[Ehcache documentation] recommends using multiple non-singleton `CacheManager(s)` when there are multiple Hibernate `SessionFactory` instances running in the same JVM.
|
||||
====
|
||||
|
||||
[[caching-provider-infinispan]]
|
||||
=== Infinispan
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Use of the build-in integration for http://infinispan.org/[Infinispan] requires that the `hibernate-infinispan module` jar (and all of its dependencies) are on the classpath.
|
||||
====
|
||||
|
||||
Infinispan currently supports all cache concurrency modes, although not all combinations of configurations are compatible.
|
||||
|
||||
Traditionally the `transactional` and `read-only` strategy was supported on _transactional invalidation_ caches. In version 5.0, further modes have been added:
|
||||
|
||||
* _non-transactional invalidation_ caches are supported as well with `read-write` strategy. The actual setting of cache concurrency mode (`read-write` vs. `transactional`) is not honored, the appropriate strategy is selected based on the cache configuration (_non-transactional_ vs. _transactional_).
|
||||
* `read-write` mode is supported on _non-transactional distributed/replicated_ caches, however, eviction should not be used in this configuration. Use of eviction can lead to consistency issues. Expiration (with reasonably long max-idle times) can be used.
|
||||
* `nonstrict-read-write` mode is supported on _non-transactional distributed/replicated_ caches, but the eviction should be turned off as well. In addition to that, the entities must use versioning. This mode mildly relaxes the consistency - between DB commit and end of transaction commit a stale read (see <<cache-provider-infinispan-stale-read-example,example>>) may occur in another transaction. However this strategy uses less RPCs and can be more performant than the other ones.
|
||||
* `read-only` mode is supported on both _transactional_ and _non-transactional_ _invalidation_ caches and _non-transactional distributed/replicated_ caches, but use of this mode currently does not bring any performance gains.
|
||||
|
||||
The available combinations are summarized in table below
|
||||
[[cache-provider-infinispan-compatibility-table]]
|
||||
.Cache concurrency strategy/cache mode compatibility table
|
||||
[options="header"]
|
||||
|===
|
||||
|Concurrency strategy|Cache transactions|Cache mode |Eviction
|
||||
|transactional |transactional |invalidation |yes
|
||||
|read-write |non-transactional |invalidation |yes
|
||||
|read-write |non-transactional |distributed/replicated |no
|
||||
|nonstrict-read-write|non-transactional |distributed/replicated |no
|
||||
|===
|
||||
|
||||
If your second level cache is not clustered, it is possible to use local cache instead of the clustered caches in all modes as described above.
|
||||
|
||||
[[caching-provider-infinispan-stale-read-example]]
|
||||
.Stale read with `nonstrict-read-write` strategy
|
||||
====
|
||||
[source, indent=0]
|
||||
----
|
||||
A=0 (non-cached), B=0 (cached in 2LC)
|
||||
TX1: write A = 1, write B = 1
|
||||
TX1: start commit
|
||||
TX1: commit A, B in DB
|
||||
TX2: read A = 1 (from DB), read B = 0 (from 2LC) // breaks transactional atomicity
|
||||
TX1: update A, B in 2LC
|
||||
TX1: end commit
|
||||
Tx3: read A = 1, B = 1 // reads after TX1 commit completes are consistent again
|
||||
----
|
||||
====
|
||||
|
||||
[[caching-provider-infinispan-region-factory]]
|
||||
==== RegionFactory
|
||||
|
||||
The hibernate-infinispan module defines two specific providers: `infinispan` and `infinispan-jndi`.
|
||||
|
||||
[[caching-provider-infinispan-region-factory-basic]]
|
||||
===== `InfinispanRegionFactory`
|
||||
|
||||
If Hibernate and Infinispan are running in a standalone environment, the `InfinispanRegionFactory` should be configured as follows:
|
||||
|
||||
[[caching-provider-infinispan-region-factory-basic-example]]
|
||||
.`InfinispanRegionFactory` configuration
|
||||
====
|
||||
[source, XML, indent=0]
|
||||
----
|
||||
<property
|
||||
name="hibernate.cache.region.factory_class"
|
||||
value="org.hibernate.cache.infinispan.InfinispanRegionFactory" />
|
||||
----
|
||||
====
|
||||
|
||||
[[caching-provider-infinispan-region-factory-jndi]]
|
||||
===== `JndiInfinispanRegionFactory`
|
||||
|
||||
If the Infinispan `CacheManager` is bound to JNDI, then the `JndiInfinispanRegionFactory` should be used as a region factory:
|
||||
|
||||
[[caching-provider-infinispan-region-factory-jndi-example]]
|
||||
.`JndiInfinispanRegionFactory` configuration
|
||||
====
|
||||
[source, XML, indent=0]
|
||||
----
|
||||
<property
|
||||
name="hibernate.cache.region.factory_class"
|
||||
value="org.hibernate.cache.infinispan.JndiInfinispanRegionFactory" />
|
||||
|
||||
<property
|
||||
name="hibernate.cache.infinispan.cachemanager"
|
||||
value="java:CacheManager" />
|
||||
----
|
||||
====
|
||||
|
||||
===== Infinispan in JBoss AS/WildFly
|
||||
|
||||
When using JPA in WildFly, region factory is automatically set upon configuring `hibernate.cache.use_second_level_cache=true` (by default second-level cache is not used).
|
||||
For more information, please consult https://docs.jboss.org/author/display/WFLY9/JPA+Reference+Guide#JPAReferenceGuide-UsingtheInfinispansecondlevelcache[WildFly documentation].
|
||||
|
||||
[[caching-provider-infinispan-config]]
|
||||
==== Configuration properties
|
||||
|
||||
Hibernate-infinispan module comes with default configuration in `infinispan-config.xml` that is suited for clustered use. If there's only single instance accessing the DB, you can use more performant `infinispan-config-local.xml` by setting the `hibernate.cache.infinispan.cfg` property. If you require further tuning of the cache, you can provide your own configuration. Caches that are not specified in the provided configuration will default to `infinispan-config.xml` (if the provided configuration uses clustering) or `infinispan-config-local.xml`. It is not possible to specify the configuration this way in WildFly.
|
||||
|
||||
[[caching-provider-infinispan-config-example]]
|
||||
.Use custom Infinispan configuration
|
||||
====
|
||||
[source, XML, indent=0]
|
||||
----
|
||||
<property
|
||||
name="hibernate.cache.infinispan.cfg"
|
||||
value="my-infinispan-configuration.xml" />
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
If the cache is configured as transactional, InfinispanRegionFactory automatically sets transaction manager so that the TM used by Infinispan is the same as TM used by Hibernate.
|
||||
====
|
||||
|
||||
|
||||
Cache configuration can differ for each type of data stored in the cache. In order to override the cache configuration template, use property `hibernate.cache.infinispan._data-type_.cfg` where `_data-type_` can be one of:
|
||||
|
||||
`entity`:: Entities indexed by `@Id` or `@EmbeddedId` attribute.
|
||||
`immutable-entity`:: Entities tagged with `@Immutable` annotation or set as `mutable=false` in mapping file.
|
||||
`naturalid`:: Entities indexed by their `@NaturalId` attribute.
|
||||
`collection`:: All collections.
|
||||
`timestamps`:: Mapping _entity type_ -> _last modification timestamp_. Used for query caching.
|
||||
`query`:: Mapping _query_ -> _query result_.
|
||||
`pending-puts`:: Auxiliary caches for regions using invalidation mode caches.
|
||||
|
||||
For specifying cache template for specific region, use region name instead of the `_data-type_`:
|
||||
|
||||
[[caching-provider-infinispan-config-cache-example]]
|
||||
.Use custom cache template
|
||||
====
|
||||
[source, XML, indent=0]
|
||||
----
|
||||
<property
|
||||
name="hibernate.cache.infinispan.entities.cfg"
|
||||
value="custom-entities" />
|
||||
<property
|
||||
name="hibernate.cache.infinispan.query.cfg"
|
||||
value="custom-query-cache" />
|
||||
<property
|
||||
name="hibernate.cache.infinispan.com.example.MyEntity.cfg"
|
||||
value="my-entities" />
|
||||
<property
|
||||
name="hibernate.cache.infinispan.com.example.MyEntity.someCollection.cfg"
|
||||
value="my-entities-some-collection" />
|
||||
====
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
Cache configurations are used only as a template for the cache created for given region (usually each entity hierarchy or collection has its own region). It is not possible to use the same cache for different regions.
|
||||
====
|
||||
|
||||
Some options in the cache configuration can also be overridden directly through properties. These are:
|
||||
|
||||
`hibernate.cache.infinispan._something_.eviction.strategy`:: Available options are `NONE`, `LRU` and `LIRS`.
|
||||
`hibernate.cache.infinispan._something_.eviction.max_entries`:: Maximum number of entries in the cache.
|
||||
`hibernate.cache.infinispan._something_.expiration.lifespan`:: Lifespan of entry from insert into cache (in milliseconds)
|
||||
`hibernate.cache.infinispan._something_.expiration.max_idle`:: Lifespan of entry from last read/modification (in milliseconds)
|
||||
`hibernate.cache.infinispan._something_.expiration.wake_up_interval`:: Period of thread checking expired entries.
|
||||
`hibernate.cache.infinispan.statistics`:: Globally enables/disable Infinispan statistics collection, and their exposure via JMX.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
In versions prior to 5.1, `hibernate.cache.infinispan._something_.expiration.wake_up_interval` was called `hibernate.cache.infinispan._something_.eviction.wake_up_interval`.
|
||||
Eviction settings are checked upon each cache insert, it is expiration that needs to be triggered periodically.
|
||||
The old property still works, but its use is deprecated.
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Property `hibernate.cache.infinispan.use_synchronization` that allowed to register Infinispan as XA resource in the transaction has been deprecated in 5.0 and is not honored anymore. Infinispan 2LC must register as synchronizations on transactional caches. Also, non-transactional cache modes hook into the current JTA/JDBC transaction as synchronizations.
|
||||
====
|
||||
|
||||
[[caching-provider-infinispan-config-query-timestamps]]
|
||||
===== Configuring Query and Timestamps caches
|
||||
|
||||
Since version 5.0 it is possible to configure query caches as _non-transactional_. Consistency guarantees are not changed and writes to the query cache should be faster.
|
||||
|
||||
The query cache is configured so that queries are only cached locally . Alternatively, you can configure query caching to use replication by selecting the "replicated-query" as query cache name. However, replication for query cache only makes sense if, and only if, all of this conditions are true:
|
||||
|
||||
* Performing the query is quite expensive.
|
||||
* The same query is very likely to be repeatedly executed on different cluster nodes.
|
||||
* The query is unlikely to be invalidated out of the cache
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Hibernate must aggressively invalidate query results from the cache any time any instance of one of the entity types is modified. All cached query results referencing given entity type are invalidated, even if the change made to the specific entity instance would not have affected the query result.
|
||||
The timestamps cache plays here an important role - it contains last modification timestamp for each entity type. After a cached query results is loaded, its timestamp is compared to all timestamps of the entity types that are referenced in the query and if any of these is higher, the cached query result is discarded and the query is executed against DB.
|
||||
====
|
||||
|
||||
In default configuration, timestamps cache is asynchronously replicated. This means that a cached query on remote node can provide stale results for a brief time window before the remote timestamps cache is updated. However, requiring synchronous RPC would result in severe performance degradation.
|
||||
|
||||
Further, but possibly outdated information can be found in http://infinispan.org/docs/8.0.x/user_guide/user_guide.html#_using_infinispan_as_jpa_hibernate_second_level_cache_provider[Infinispan documentation].
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
[[domain-model]]
|
||||
== Domain Model
|
||||
:sourcedir: extras
|
||||
|
||||
The term https://en.wikipedia.org/wiki/Domain_model[domain model] comes from the realm of data modeling.
|
||||
It is the model that ultimately describes the https://en.wikipedia.org/wiki/Problem_domain[problem domain] you are working in.
|
||||
Sometimes you will also hear the term _persistent classes_.
|
||||
|
||||
Ultimately the application domain model is the central character in an ORM.
|
||||
They make up the classes you wish to map. Hibernate works best if these classes follow the Plain Old Java Object (POJO) / JavaBean programming model.
|
||||
However, none of these rules are hard requirements.
|
||||
Indeed, Hibernate assumes very little about the nature of your persistent objects. You can express a domain model in other ways (using trees of `java.util.Map` instances, for example).
|
||||
|
||||
Historically applications using Hibernate would have used its proprietary XML mapping file format for this purpose.
|
||||
With the coming of JPA, most of this information is now defined in a way that is portable across ORM/JPA providers using annotations (and/or standardized XML format).
|
||||
This chapter will focus on JPA mapping where possible.
|
||||
For Hibernate mapping features not supported by JPA we will prefer Hibernate extension annotations.
|
||||
|
||||
include::types.adoc[]
|
||||
include::naming.adoc[]
|
||||
include::basic_types.adoc[]
|
||||
include::embeddables.adoc[]
|
||||
include::entity.adoc[]
|
||||
include::access.adoc[]
|
||||
include::identifiers.adoc[]
|
||||
include::associations.adoc[]
|
||||
include::collections.adoc[]
|
||||
include::natural_id.adoc[]
|
||||
include::dynamic_model.adoc[]
|
||||
include::inheritance.adoc[]
|
|
@ -0,0 +1,101 @@
|
|||
[[access]]
|
||||
==== Access strategies
|
||||
:sourcedir: extras
|
||||
|
||||
As a JPA provider, Hibernate can introspect both the entity attributes (instance fields) or the accessors (instance properties).
|
||||
By default, the placement of the `@Id` annotation gives the default access strategy.
|
||||
When placed on a field, Hibernate will assume field-based access.
|
||||
Place on the identifier getter, Hibernate will use property-based access.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
You should pay attention to https://docs.oracle.com/javase/7/docs/api/java/beans/Introspector.html#decapitalize(java.lang.String)[Java Beans specification] in regard to naming properties to avoid
|
||||
issues such as https://hibernate.atlassian.net/browse/HCANN-63[Property name beginning with at least two uppercase characters has odd functionality in HQL]!
|
||||
====
|
||||
|
||||
Embeddable types inherit the access strategy from their parent entities.
|
||||
|
||||
[[field-based-access]]
|
||||
===== Field-based access
|
||||
|
||||
.Field-based access
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/access/SimpleEntityFieldAccess.java[]
|
||||
----
|
||||
====
|
||||
|
||||
When using field-based access, adding other entity-level methods is much more flexible because Hibernate won't consider those part of the persistence state.
|
||||
To exclude a field from being part of the entity persistent state, the field must be marked with the `@Transient` annotation.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Another advantage of using field-based access is that some entity attributes can be hidden from outside the entity.
|
||||
An example of such attribute is the entity `@Version` field, which must not be manipulated by the data access layer.
|
||||
With field-based access, we can simply omit the getter and the setter for this version field, and Hibernate can still leverage the optimistic concurrency control mechanism.
|
||||
====
|
||||
|
||||
[[property-based-access]]
|
||||
===== Property-based access
|
||||
|
||||
.Property-based access
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/access/SimpleEntityPropertyAccess.java[]
|
||||
----
|
||||
====
|
||||
|
||||
When using property-based access, Hibernate uses the accessors for both reading and writing the entity state.
|
||||
Every other method that will be added to the entity (e.g. helper methods for synchronizing both ends of a bidirectional one-to-many association) will have to be marked with the `@Transient` annotation.
|
||||
|
||||
===== Overriding the default access strategy
|
||||
|
||||
The default access strategy mechanism can be overridden with the JPA `@Access` annotation.
|
||||
In the following example, the `@Version` attribute is accessed by its field and not by its getter, like the rest of entity attributes.
|
||||
|
||||
.Overriding access strategy
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/access/SimpleEntityPropertyAccessOverride.java[]
|
||||
----
|
||||
====
|
||||
|
||||
[[access-embeddable-types]]
|
||||
===== Embeddable types and access strategy
|
||||
|
||||
Because embeddables are managed by their owning entities, the access strategy is therefore inherited from the entity too.
|
||||
This applies to both simple embeddable types as well as for collection of embeddables.
|
||||
|
||||
The embeddable types can overrule the default implicit access strategy (inherited from the owning entity).
|
||||
In the following example, the embeddable uses property-based access, no matter what access strategy the owning entity is choosing:
|
||||
|
||||
.Embeddable with exclusive access strategy
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/access/EmbeddableAccessType.java[]
|
||||
----
|
||||
====
|
||||
|
||||
The owning entity can use field-based access, while the embeddable uses property-based access as it has chosen explicitly:
|
||||
|
||||
.Entity including a single embeddable type
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/access/EmbeddedAccessType.java[]
|
||||
----
|
||||
====
|
||||
|
||||
This works also for collection of embeddable types:
|
||||
|
||||
.Entity including a collection of embeddable types
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/access/ElementCollectionAccessType.java[]
|
||||
----
|
||||
====
|
|
@ -0,0 +1,390 @@
|
|||
[[associations]]
|
||||
=== Associations
|
||||
:sourcedir: ../../../../../test/java/org/hibernate/userguide/associations
|
||||
:extrasdir: extras/associations
|
||||
|
||||
Associations describe how two or more entities form a relationship based on a database joining semantics.
|
||||
|
||||
[[associations-many-to-one]]
|
||||
==== `@ManyToOne`
|
||||
|
||||
`@ManyToOne` is the most common association, having a direct equivalent in the relational database as well (e.g. foreign key),
|
||||
and so it establishes a relationship between a child entity and a parent.
|
||||
|
||||
[[associations-many-to-one-example]]
|
||||
.`@ManyToOne` association
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/ManyToOneTest.java[tags=associations-many-to-one-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/associations-many-to-one-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
Each entity has a lifecycle of its own. Once the `@ManyToOne` association is set, Hibernate will set the associated database foreign key column.
|
||||
|
||||
[[associations-many-to-one-lifecycle-example]]
|
||||
.`@ManyToOne` association lifecycle
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/ManyToOneTest.java[tags=associations-many-to-one-lifecycle-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/associations-many-to-one-lifecycle-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[[associations-one-to-many]]
|
||||
==== `@OneToMany`
|
||||
|
||||
The `@OneToMany` association links a parent entity with one or more child entities.
|
||||
If the `@OneToMany` doesn't have a mirroring `@ManyToOne` association on the child side, the `@OneToMany` association is unidirectional.
|
||||
If there is a `@ManyToOne` association on the child side, the `@OneToMany` association is bidirectional and the application developer can navigate this relationship from both ends.
|
||||
|
||||
[[associations-one-to-many-unidirectional]]
|
||||
===== Unidirectional `@OneToMany`
|
||||
|
||||
When using a unidirectional `@OneToMany` association, Hibernate resorts to using a link table between the two joining entities.
|
||||
|
||||
[[associations-one-to-many-unidirectional-example]]
|
||||
.Unidirectional `@OneToMany` association
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/OneToManyUnidirectionalTest.java[tags=associations-one-to-many-unidirectional-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/associations-one-to-many-unidirectional-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The `@OneToMany` association is by definition a parent association, even if it's a unidirectional or a bidirectional one.
|
||||
Only the parent side of an association makes sense to cascade its entity state transitions to children.
|
||||
====
|
||||
|
||||
[[associations-one-to-many-unidirectional-lifecycle-example]]
|
||||
.Cascading `@OneToMany` association
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/OneToManyUnidirectionalTest.java[tags=associations-one-to-many-unidirectional-lifecycle-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/associations-one-to-many-unidirectional-lifecycle-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
When persisting the `Person` entity, the cascade will propagate the persist operation to the underlying `Phone` children as well.
|
||||
Upon removing a `Phone` from the phones collection, the association row is deleted from the link table, and the `orphanRemoval` attribute will trigger a `Phone` removal as well.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The unidirectional associations are not very efficient when it comes to removing child entities.
|
||||
In this particular example, upon flushing the persistence context, Hibernate deletes all database child entries and reinserts the ones that are still found in the in-memory persistence context.
|
||||
|
||||
On the other hand, a bidirectional `@OneToMany` association is much more efficient because the child entity controls the association.
|
||||
====
|
||||
|
||||
[[associations-one-to-many-bidirectional]]
|
||||
===== Bidirectional `@OneToMany`
|
||||
|
||||
The bidirectional `@OneToMany` association also requires a `@ManyToOne` association on the child side.
|
||||
Although the Domain Model exposes two sides to navigate this association, behind the scenes, the relational database has only one foreign key for this relationship.
|
||||
|
||||
Every bidirectional association must have one owning side only (the child side), the other one being referred to as the _inverse_ (or the `mappedBy`) side.
|
||||
|
||||
[[associations-one-to-many-bidirectional-example]]
|
||||
.`@OneToMany` association mappedBy the `@ManyToOne` side
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/OneToManyBidirectionalTest.java[tags=associations-one-to-many-bidirectional-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/associations-one-to-many-bidirectional-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
Whenever a bidirectional association is formed, the application developer must make sure both sides are in-sync at all times.
|
||||
The `addPhone()` and `removePhone()` are utilities methods that synchronize both ends whenever a child element is added or removed.
|
||||
====
|
||||
|
||||
Because the `Phone` class has a `@NaturalId` column (the phone number being unique),
|
||||
the `equals()` and the `hashCode()` can make use of this property, and so the `removePhone()` logic is reduced to the `remove()` Java `Collection` method.
|
||||
|
||||
[[associations-one-to-many-bidirectional-lifecycle-example]]
|
||||
.Bidirectional `@OneToMany` with an owner `@ManyToOne` side lifecycle
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/OneToManyBidirectionalTest.java[tags=associations-one-to-many-bidirectional-lifecycle-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/associations-one-to-many-bidirectional-lifecycle-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
Unlike the unidirectional `@OneToMany`, the bidirectional association is much more efficient when managing the collection persistence state.
|
||||
Every element removal only requires a single update (in which the foreign key column is set to `NULL`), and,
|
||||
if the child entity lifecycle is bound to its owning parent so that the child cannot exist without its parent,
|
||||
then we can annotate the association with the `orphan-removal` attribute and disassociating the child will trigger a delete statement on the actual child table row as well.
|
||||
|
||||
[[associations-one-to-one]]
|
||||
==== `@OneToOne`
|
||||
|
||||
The `@OneToOne` association can either be unidirectional or bidirectional.
|
||||
A unidirectional association follows the relational database foreign key semantics, the client-side owning the relationship.
|
||||
A bidirectional association features a `mappedBy` `@OneToOne` parent side too.
|
||||
|
||||
[[associations-one-to-one-unidirectional]]
|
||||
===== Unidirectional `@OneToOne`
|
||||
|
||||
[[associations-one-to-one-unidirectional-example]]
|
||||
.Unidirectional `@OneToOne`
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/OneToOneUnidirectionalTest.java[tags=associations-one-to-one-unidirectional-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/associations-one-to-one-unidirectional-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
From a relational database point of view, the underlying schema is identical to the unidirectional `@ManyToOne` association,
|
||||
as the client-side controls the relationship based on the foreign key column.
|
||||
|
||||
But then, it's unusual to consider the `Phone` as a client-side and the `PhoneDetails` as the parent-side because the details cannot exist without an actual phone.
|
||||
A much more natural mapping would be if the `Phone` were the parent-side, therefore pushing the foreign key into the `PhoneDetails` table.
|
||||
This mapping requires a bidirectional `@OneToOne` association as you can see in the following example:
|
||||
|
||||
[[associations-one-to-one-bidirectional]]
|
||||
===== Bidirectional `@OneToOne`
|
||||
|
||||
[[associations-one-to-one-bidirectional-example]]
|
||||
.Bidirectional `@OneToOne`
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/OneToOneBidirectionalTest.java[tags=associations-one-to-one-bidirectional-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/associations-one-to-one-bidirectional-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
This time, the `PhoneDetails` owns the association, and, like any bidirectional association, the parent-side can propagate its lifecycle to the child-side through cascading.
|
||||
|
||||
[[associations-one-to-one-bidirectional-lifecycle-example]]
|
||||
.Bidirectional `@OneToOne` lifecycle
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/OneToOneBidirectionalTest.java[tags=associations-one-to-one-bidirectional-lifecycle-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/associations-one-to-one-bidirectional-lifecycle-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
When using a bidirectional `@OneToOne` association, Hibernate enforces the unique constraint upon fetching the child-side.
|
||||
If there are more than one children associated with the same parent, Hibernate will throw a constraint violation exception.
|
||||
Continuing the previous example, when adding another `PhoneDetails`, Hibernate validates the uniqueness constraint when reloading the `Phone` object.
|
||||
|
||||
[[associations-one-to-one-bidirectional-constraint-example]]
|
||||
.Bidirectional `@OneToOne` unique constraint
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/OneToOneBidirectionalTest.java[tags=associations-one-to-one-bidirectional-constraint-example,indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
[[associations-many-to-many]]
|
||||
==== `@ManyToMany`
|
||||
|
||||
The `@ManyToMany` association requires a link table that joins two entities.
|
||||
Like the `@OneToMany` association, `@ManyToMany` can be a either unidirectional or bidirectional.
|
||||
|
||||
[[associations-many-to-many-unidirectional]]
|
||||
===== Unidirectional `@ManyToMany`
|
||||
|
||||
[[associations-many-to-many-unidirectional-example]]
|
||||
.Unidirectional `@ManyToMany`
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/ManyToManyUnidirectionalTest.java[tags=associations-many-to-many-unidirectional-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/associations-many-to-many-unidirectional-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
Just like with unidirectional `@OneToMany` associations, the link table is controlled by the owning side.
|
||||
|
||||
When an entity is removed from the `@ManyToMany` collection, Hibernate simply deletes the joining record in the link table.
|
||||
Unfortunately, this operation requires removing all entries associated with a given parent and recreating the ones that are listed in the current running persistent context.
|
||||
|
||||
[[associations-many-to-many-unidirectional-lifecycle-example]]
|
||||
.Unidirectional `@ManyToMany` lifecycle
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/ManyToManyUnidirectionalTest.java[tags=associations-many-to-many-unidirectional-lifecycle-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/associations-many-to-many-unidirectional-lifecycle-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
For `@ManyToMany` associations, the `REMOVE` entity state transition doesn't make sense to be cascaded because it will propagate beyond the link table.
|
||||
Since the other side might be referenced by other entities on the parent-side, the automatic removal might end up in a `ConstraintViolationException`.
|
||||
|
||||
For example, if `@ManyToMany(cascade = CascadeType.ALL)` was defined and the first person would be deleted,
|
||||
Hibernate would throw an exception because another person is still associated with the address that's being deleted.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
Person person1 = entityManager.find(Person.class, personId);
|
||||
entityManager.remove(person1);
|
||||
|
||||
Caused by: javax.persistence.PersistenceException: org.hibernate.exception.ConstraintViolationException: could not execute statement
|
||||
Caused by: org.hibernate.exception.ConstraintViolationException: could not execute statement
|
||||
Caused by: java.sql.SQLIntegrityConstraintViolationException: integrity constraint violation: foreign key no action; FKM7J0BNABH2YR0PE99IL1D066U table: PERSON_ADDRESS
|
||||
----
|
||||
====
|
||||
|
||||
By simply removing the parent-side, Hibernate can safely remove the associated link records as you can see in the following example:
|
||||
|
||||
[[associations-many-to-many-unidirectional-remove-example]]
|
||||
.Unidirectional `@ManyToMany` entity removal
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/ManyToManyUnidirectionalTest.java[tags=associations-many-to-many-unidirectional-remove-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/associations-many-to-many-unidirectional-remove-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[[associations-many-to-many-bidirectional]]
|
||||
===== Bidirectional `@ManyToMany`
|
||||
|
||||
A bidirectional `@ManyToMany` association has an owning and a `mappedBy` side.
|
||||
To preserve synchronicity between both sides, it's good practice to provide helper methods for adding or removing child entities.
|
||||
|
||||
[[associations-many-to-many-bidirectional-example]]
|
||||
.Bidirectional `@ManyToMany`
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/ManyToManyBidirectionalTest.java[tags=associations-many-to-many-bidirectional-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/associations-many-to-many-bidirectional-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
With the helper methods in place, the synchronicity management can be simplified, as you can see in the following example:
|
||||
|
||||
[[associations-many-to-many-bidirectional-lifecycle-example]]
|
||||
.Bidirectional `@ManyToMany` lifecycle
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/ManyToManyBidirectionalTest.java[tags=associations-many-to-many-bidirectional-lifecycle-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/associations-many-to-many-bidirectional-lifecycle-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
If a bidirectional `@OneToMany` association performs better when removing or changing the order of child elements,
|
||||
the `@ManyToMany` relationship cannot benefit from such an optimization because the foreign key side is not in control.
|
||||
To overcome this limitation, the link table must be directly exposed and the `@ManyToMany` association split into two bidirectional `@OneToMany` relationships.
|
||||
|
||||
[[associations-many-to-many-bidirectional-with-link-entity]]
|
||||
===== Bidirectional many-to-many with a link entity
|
||||
|
||||
To most natural `@ManyToMany` association follows the same logic employed by the database schema,
|
||||
and the link table has an associated entity which controls the relationship for both sides that need to be joined.
|
||||
|
||||
[[associations-many-to-many-bidirectional-with-link-entity-example]]
|
||||
.Bidirectional many-to-many with link entity
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/ManyToManyBidirectionalWithLinkEntityTest.java[tags=associations-many-to-many-bidirectional-with-link-entity-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/associations-many-to-many-bidirectional-with-link-entity-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
Both the `Person` and the `Address` have a` mappedBy` `@OneToMany` side, while the `PersonAddress` owns the `person` and the `address` `@ManyToOne` associations.
|
||||
Because this mapping is formed out of two bidirectional associations, the helper methods are even more relevant.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The aforementioned example uses a Hibernate specific mapping for the link entity since JPA doesn't allow building a composite identifier out of multiple `@ManyToOne` associations.
|
||||
For more details, see the <<chapters/domain/identifiers.adoc#identifiers-composite-associations,Composite identifiers - associations>> section.
|
||||
====
|
||||
|
||||
The entity state transitions are better managed than in the previous bidirectional `@ManyToMany` case.
|
||||
|
||||
[[associations-many-to-many-bidirectional-with-link-entity-lifecycle-example]]
|
||||
.Bidirectional many-to-many with link entity lifecycle
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/ManyToManyBidirectionalWithLinkEntityTest.java[tags=associations-many-to-many-bidirectional-with-link-entity-lifecycle-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/associations-many-to-many-bidirectional-with-link-entity-lifecycle-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
There is only one delete statement executed because, this time, the association is controlled by the `@ManyToOne` side which only has to monitor the state of the underlying foreign key relationship to trigger the right DML statement.
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,628 @@
|
|||
[[collections]]
|
||||
=== Collections
|
||||
:sourcedir: ../../../../../test/java/org/hibernate/userguide/collections
|
||||
:extrasdir: extras/collections
|
||||
|
||||
Naturally Hibernate also allows to persist collections.
|
||||
These persistent collections can contain almost any other Hibernate type, including: basic types, custom types, components and references to other entities.
|
||||
In this context, the distinction between value and reference semantics is very important.
|
||||
An object in a collection might be handled with _value_ semantics (its life cycle being fully depends on the collection owner),
|
||||
or it might be a reference to another entity with its own life cycle.
|
||||
In the latter case, only the _link_ between the two objects is considered to be a state held by the collection.
|
||||
|
||||
The owner of the collection is always an entity, even if the collection is defined by an embeddable type.
|
||||
Collections form one/many-to-many associations between types so there can be:
|
||||
|
||||
- value type collections
|
||||
- embeddable type collections
|
||||
- entity collections
|
||||
|
||||
Hibernate uses its own collection implementations which are enriched with lazy-loading, caching or state change detection semantics.
|
||||
For this reason, persistent collections must be declared as an interface type.
|
||||
The actual interface might be `java.util.Collection`, `java.util.List`, `java.util.Set`, `java.util.Map`, `java.util.SortedSet`, `java.util.SortedMap` or even other object types (meaning you will have to write an implementation of `org.hibernate.usertype.UserCollectionType`).
|
||||
|
||||
As the following example demonstrates, it's important to use the interface type and not the collection implementation, as declared in the entity mapping.
|
||||
|
||||
[[collections-collection-proxy-example]]
|
||||
.Hibernate uses its own collection implementations
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/BasicTypeElementCollectionTest.java[tags=collections-collection-proxy-entity-example,indent=0]
|
||||
|
||||
include::{sourcedir}/BasicTypeElementCollectionTest.java[tags=collections-collection-proxy-usage-example,indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
It is important that collections be defined using the appropriate Java Collections Framework interface rather than a specific implementation.
|
||||
From a theoretical perspective, this just follows good design principles.
|
||||
From a practical perspective, Hibernate (like other persistence providers) will use their own collection implementations which conform to the Java Collections Framework interfaces.
|
||||
====
|
||||
|
||||
The persistent collections injected by Hibernate behave like `ArrayList`, `HashSet`, `TreeSet`, `HashMap` or `TreeMap`, depending on the interface type.
|
||||
|
||||
[[collections-synopsis]]
|
||||
==== Collections as a value type
|
||||
|
||||
Value and embeddable type collections have a similar behavior as simple value types because they are automatically persisted when referenced by a persistent object and automatically deleted when unreferenced.
|
||||
If a collection is passed from one persistent object to another, its elements might be moved from one table to another.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
Two entities cannot share a reference to the same collection instance.
|
||||
Collection-valued properties do not support null value semantics because Hibernate does not distinguish between a null collection reference and an empty collection.
|
||||
====
|
||||
|
||||
[[collections-value]]
|
||||
==== Collections of value types
|
||||
|
||||
Collections of value type include basic and embeddable types.
|
||||
Collections cannot be nested, and, when used in collections, embeddable types are not allowed to define other collections.
|
||||
|
||||
For collections of value types, JPA 2.0 defines the `@ElementCollection` annotation.
|
||||
The lifecycle of the value-type collection is entirely controlled by its owning entity.
|
||||
|
||||
Considering the previous example mapping, when clearing the phone collection, Hibernate deletes all the associated phones.
|
||||
When adding a new element to the value type collection, Hibernate issues a new insert statement.
|
||||
|
||||
[[collections-value-type-collection-lifecycle-example]]
|
||||
.Value type collection lifecycle
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/BasicTypeElementCollectionTest.java[tags=collections-value-type-collection-lifecycle-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-value-type-collection-lifecycle-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
If removing all elements or adding new ones is rather straightforward, removing a certain entry actually requires reconstructing the whole collection from scratch.
|
||||
|
||||
[[collections-value-type-collection-remove-example]]
|
||||
.Removing collection elements
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/BasicTypeElementCollectionTest.java[tags=collections-value-type-collection-remove-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-value-type-collection-remove-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
Depending on the number of elements, this behavior might not be efficient, if many elements need to be deleted and reinserted back into the database table.
|
||||
A workaround is to use an `@OrderColumn`, which, although not as efficient as when using the actual link table primary key, might improve the efficiency of the remove operations.
|
||||
|
||||
[[collections-value-type-collection-order-column-remove-example]]
|
||||
.Removing collection elements using the order column
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/BasicTypeOrderColumnElementCollectionTest.java[tags=collections-value-type-collection-order-column-remove-entity-example,indent=0]
|
||||
|
||||
include::{sourcedir}/BasicTypeOrderColumnElementCollectionTest.java[tags=collections-value-type-collection-order-column-remove-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-value-type-collection-order-column-remove-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The `@OrderColumn` column works best when removing from the tail of the collection, as it only requires a single delete statement.
|
||||
Removing from the head or the middle of the collection requires deleting the extra elements and updating the remaining ones to preserve element order.
|
||||
====
|
||||
|
||||
Embeddable type collections behave the same way as value type collections.
|
||||
Adding embeddables to the collection triggers the associated insert statements and removing elements from the collection will generate delete statements.
|
||||
|
||||
[[collections-embeddable-type-collection-lifecycle-example]]
|
||||
.Embeddable type collections
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/EmbeddableTypeElementCollectionTest.java[tags=collections-embeddable-type-collection-lifecycle-entity-example,indent=0]
|
||||
|
||||
include::{sourcedir}/EmbeddableTypeElementCollectionTest.java[tags=collections-embeddable-type-collection-lifecycle-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-embeddable-type-collection-lifecycle-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[[collections-entity]]
|
||||
==== Collections of entities
|
||||
|
||||
If value type collections can only form a one-to-many association between an owner entity and multiple basic or embeddable types,
|
||||
entity collections can represent both <<chapters/domain/associations.adoc#associations-one-to-many,@OneToMany>> and <<chapters/domain/associations.adoc#associations-many-to-many,@ManyToMany>> associations.
|
||||
|
||||
From a relational database perspective, associations are defined by the foreign key side (the child-side).
|
||||
With value type collections, only the entity can control the association (the parent-side), but for a collection of entities, both sides of the association are managed by the persistence context.
|
||||
|
||||
For this reason, entity collections can be devised into two main categories: unidirectional and bidirectional associations.
|
||||
Unidirectional associations are very similar to value type collections since only the parent side controls this relationship.
|
||||
Bidirectional associations are more tricky since, even if sides need to be in-sync at all times, only one side is responsible for managing the association.
|
||||
A bidirectional association has an _owning_ side and an _inverse (mappedBy)_ side.
|
||||
|
||||
Another way of categorizing entity collections is by the underlying collection type, and so we can have:
|
||||
|
||||
* bags
|
||||
* indexed lists
|
||||
* sets
|
||||
* sorted sets
|
||||
* maps
|
||||
* sorted maps
|
||||
* arrays
|
||||
|
||||
In the following sections, we will go through all these collection types and discuss both unidirectional and bidirectional associations.
|
||||
|
||||
[[collections-bag]]
|
||||
==== Bags
|
||||
|
||||
Bags are unordered lists and we can have unidirectional bags or bidirectional ones.
|
||||
|
||||
[[collections-unidirectional-bag]]
|
||||
===== Unidirectional bags
|
||||
|
||||
The unidirectional bag is mapped using a single `@OneToMany` annotation on the parent side of the association.
|
||||
Behind the scenes, Hibernate requires an association table to manage the parent-child relationship, as we can see in the following example:
|
||||
|
||||
[[collections-unidirectional-bag-example]]
|
||||
.Unidirectional bag
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/UnidirectionalBagTest.java[tags=collections-unidirectional-bag-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-unidirectional-bag-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Because both the parent and the child sides are entities, the persistence context manages each entity separately.
|
||||
Cascades can propagate an entity state transition from a parent entity to its children.
|
||||
====
|
||||
|
||||
By marking the parent side with the `CascadeType.ALL` attribute, the unidirectional association lifecycle becomes very similar to that of a value type collection.
|
||||
|
||||
[[collections-unidirectional-bag-lifecycle-example]]
|
||||
.Unidirectional bag lifecycle
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/UnidirectionalBagTest.java[tags=collections-unidirectional-bag-lifecycle-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-unidirectional-bag-lifecycle-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
In the example above, once the parent entity is persisted, the child entities are going to be persisted as well.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Just like value type collections, unidirectional bags are not as efficient when it comes to modifying the collection structure (removing or reshuffling elements).
|
||||
Because the parent-side cannot uniquely identify each individual child, Hibernate might delete all child table rows associated with the parent entity and re-add them according to the current collection state.
|
||||
====
|
||||
|
||||
[[collections-bidirectional-bag]]
|
||||
===== Bidirectional bags
|
||||
|
||||
The bidirectional bag is the most common type of entity collection.
|
||||
The `@ManyToOne` side is the owning side of the bidirectional bag association, while the `@OneToMany` is the _inverse_ side, being marked with the `mappedBy` attribute.
|
||||
|
||||
[[collections-bidirectional-bag-example]]
|
||||
.Bidirectional bag
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/BidirectionalBagTest.java[tags=collections-bidirectional-bag-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-bidirectional-bag-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[[collections-bidirectional-bag-lifecycle-example]]
|
||||
.Bidirectional bag lifecycle
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/BidirectionalBagTest.java[tags=collections-bidirectional-bag-lifecycle-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-bidirectional-bag-lifecycle-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[[collections-bidirectional-bag-orphan-removal-example]]
|
||||
.Bidirectional bag with orphan removal
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/BidirectionalBagOrphanRemovalTest.java[tags=collections-bidirectional-bag-orphan-removal-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-bidirectional-bag-orphan-removal-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
When rerunning the previous example, the child will get removed because the parent-side propagates the removal upon disassociating the child entity reference.
|
||||
|
||||
[[collections-list]]
|
||||
==== Ordered Lists
|
||||
|
||||
Although they use the `List` interface on the Java side, bags don't retain element order.
|
||||
To preserve the collection element order, there are two possibilities:
|
||||
|
||||
`@OrderBy`:: the collection is ordered upon retrieval using a child entity property
|
||||
`@OrderColumn`:: the collection uses a dedicated order column in the collection link table
|
||||
|
||||
[[collections-unidirectional-ordered-list]]
|
||||
===== Unidirectional ordered lists
|
||||
|
||||
When using the `@OrderBy` annotation, the mapping looks as follows:
|
||||
|
||||
[[collections-unidirectional-ordered-list-order-by-example]]
|
||||
.Unidirectional `@OrderBy` list
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/UnidirectionalOrderedByListTest.java[tags=collections-unidirectional-ordered-list-order-by-example,indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
The database mapping is the same as with the <<collections-unidirectional-bag>> example, so it won't be repeated.
|
||||
Upon fetching the collection, Hibernate generates the following select statement:
|
||||
|
||||
[[collections-unidirectional-ordered-list-order-by-select-example]]
|
||||
.Unidirectional `@OrderBy` list select statement
|
||||
====
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-unidirectional-ordered-list-order-by-select-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
The child table column is used to order the list elements.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The `@OrderBy` annotation can take multiple entity properties, and each property can take an ordering direction too (e.g. `@OrderBy("name ASC, type DESC")`).
|
||||
|
||||
If no property is specified (e.g. `@OrderBy`), the primary key of the child entity table is used for ordering.
|
||||
====
|
||||
|
||||
Another ordering option is to use the `@OrderColumn` annotation:
|
||||
|
||||
[[collections-unidirectional-ordered-list-order-column-example]]
|
||||
.Unidirectional `@OrderColumn` list
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/UnidirectionalOrderColumnListTest.java[tags=collections-unidirectional-ordered-list-order-column-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-unidirectional-ordered-list-order-column-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
This time, the link table takes the `order_id` column and uses it to materialize the collection element order.
|
||||
When fetching the list, the following select query is executed:
|
||||
|
||||
[[collections-unidirectional-ordered-list-order-column-select-example]]
|
||||
.Unidirectional `@OrderColumn` list select statement
|
||||
====
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-unidirectional-ordered-list-order-column-select-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
With the `order_id` column in place, Hibernate can order the list in-memory after it's being fetched from the database.
|
||||
|
||||
[[collections-bidirectional-ordered-list]]
|
||||
===== Bidirectional ordered lists
|
||||
|
||||
The mapping is similar with the <<collections-bidirectional-bag>> example, just that the parent side is going to be annotated with either `@OrderBy` or `@OrderColumn`.
|
||||
|
||||
[[collections-bidirectional-ordered-list-order-by-example]]
|
||||
.Bidirectional `@OrderBy` list
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/BidirectionalOrderByListTest.java[tags=collections-bidirectional-ordered-list-order-by-example,indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
Just like with the unidirectional `@OrderBy` list, the `number` column is used to order the statement on the SQL level.
|
||||
|
||||
When using the `@OrderColumn` annotation, the `order_id` column is going to be embedded in the child table:
|
||||
|
||||
[[collections-bidirectional-ordered-list-order-column-example]]
|
||||
.Bidirectional `@OrderColumn` list
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/BidirectionalOrderColumnListTest.java[tags=collections-bidirectional-ordered-list-order-column-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-bidirectional-ordered-list-order-column-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
When fetching the collection, Hibernate will use the fetched ordered columns to sort the elements according to the `@OrderColumn` mapping.
|
||||
|
||||
[[collections-set]]
|
||||
==== Sets
|
||||
|
||||
Sets are collections that don't allow duplicate entries and Hibernate supports both the unordered `Set` and the natural-ordering `SortedSet`.
|
||||
|
||||
[[collections-unidirectional-set]]
|
||||
===== Unidirectional sets
|
||||
|
||||
The unidirectional set uses a link table to hold the parent-child associations and the entity mapping looks as follows:
|
||||
|
||||
[[collections-unidirectional-set-example]]
|
||||
.Unidirectional set
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/UnidirectionalSetTest.java[tags=collections-unidirectional-set-example,indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
The unidirectional set lifecycle is similar to that of the <<collections-unidirectional-bag>>, so it can be omitted.
|
||||
The only difference is that `Set` doesn't allow duplicates, but this constraint is enforced by the Java object contract rather then the database mapping.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
When using sets, it's very important to supply proper equals/hashCode implementations for child entities.
|
||||
In the absence of a custom equals/hashCode implementation logic, Hibernate will use the default Java reference-based object equality which might render unexpected results when mixing detached and managed object instances.
|
||||
====
|
||||
|
||||
[[collections-bidirectional-set]]
|
||||
===== Bidirectional sets
|
||||
|
||||
Just like bidirectional bags, the bidirectional set doesn't use a link table, and the child table has a foreign key referencing the parent table primary key.
|
||||
The lifecycle is just like with bidirectional bags except for the duplicates which are filtered out.
|
||||
|
||||
[[collections-bidirectional-set-example]]
|
||||
.Bidirectional set
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/BidirectionalSetTest.java[tags=collections-bidirectional-set-example,indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
[[collections-sorted-set]]
|
||||
==== Sorted sets
|
||||
|
||||
For sorted sets, the entity mapping must use the `SortedSet` interface instead.
|
||||
According to the `SortedSet` contract, all elements must implement the comparable interface and therefore provide the sorting logic.
|
||||
|
||||
[[collections-unidirectional-sorted-set]]
|
||||
===== Unidirectional sorted sets
|
||||
|
||||
A `SortedSet` that relies on the natural sorting order given by the child element `Comparable` implementation logic must be annotated with the `@SortNatural` Hibernate annotation.
|
||||
|
||||
[[collections-unidirectional-sorted-set-natural-comparator-example]]
|
||||
.Unidirectional natural sorted set
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/UnidirectionalSortedSetTest.java[tags=collections-unidirectional-sorted-set-natural-comparator-example,indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
The lifecycle and the database mapping are identical to the <<collections-unidirectional-bag>>, so they are intentionally omitted.
|
||||
|
||||
To provide a custom sorting logic, Hibernate also provides a `@SortComparator` annotation:
|
||||
|
||||
[[collections-unidirectional-sorted-set-custom-comparator-example]]
|
||||
.Unidirectional custom comparator sorted set
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/UnidirectionalComparatorSortedSetTest.java[tags=collections-unidirectional-sorted-set-custom-comparator-example,indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
[[collections-bidirectional-sorted-set]]
|
||||
===== Bidirectional sorted sets
|
||||
|
||||
The `@SortNatural` and `@SortComparator` work the same for bidirectional sorted sets too:
|
||||
|
||||
[[collections-bidirectional-sorted-set-example]]
|
||||
.Bidirectional natural sorted set
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/BidirectionalSortedSetTest.java[tags=collections-bidirectional-sorted-set-example,indent=0]
|
||||
|
||||
include::{sourcedir}/UnidirectionalComparatorSortedSetTest.java[lines=75..77,indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
[[collections-map]]
|
||||
==== Maps
|
||||
|
||||
A `java.util.Map` is ternary association because it required a parent entity a map key and a value.
|
||||
An entity can either be a map key or a map value, depending on the mapping.
|
||||
Hibernate allows using the following map keys:
|
||||
|
||||
`MapKeyColumn`:: for value type maps, the map key is a column in the link table that defines the grouping logic
|
||||
`MapKey`:: the map key is either the primary key or another property of the entity stored as a map entry value
|
||||
`MapKeyEnumerated`:: the map key is an `Enum` of the target child entity
|
||||
`MapKeyTemporal`:: the map key is a `Date` or a `Calendar` of the target child entity
|
||||
`MapKeyJoinColumn`:: the map key is an entity mapped as an association in the child entity that's stored as a map entry key
|
||||
|
||||
[[collections-map-value-type]]
|
||||
===== Value type maps
|
||||
|
||||
A map of value type must use the `@ElementCollection` annotation, just like value type lists, bags or sets.
|
||||
|
||||
[[collections-map-value-type-entity-key-example]]
|
||||
.Value type map with an entity as a map key
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/ElementCollectionMapTest.java[tags=collections-map-value-type-entity-key-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-map-value-type-entity-key-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
Adding entries to the map generates the following SQL statements:
|
||||
|
||||
[[collections-map-value-type-entity-key-add-example]]
|
||||
.Adding value type map entries
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/ElementCollectionMapTest.java[tags=collections-map-value-type-entity-key-add-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-map-value-type-entity-key-add-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[[collections-map-unidirectional]]
|
||||
===== Unidirectional maps
|
||||
|
||||
A unidirectional map exposes a parent-child association from the parent-side only.
|
||||
The following example shows a unidirectional map which also uses a `@MapKeyTemporal` annotation.
|
||||
The map key is a timestamp and it's taken from the child entity table.
|
||||
|
||||
[[collections-map-unidirectional-example]]
|
||||
.Unidirectional Map
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/UnidirectionalMapTest.java[tags=collections-map-unidirectional-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-map-unidirectional-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[[collections-map-bidirectional]]
|
||||
===== Bidirectional maps
|
||||
|
||||
Like most bidirectional associations, this relationship is owned by the child-side while the parent is the inverse side abd can propagate its own state transitions to the child entities.
|
||||
In the following example, you can see that `@MapKeyEnumerated` was used so that the `Phone` enumeration becomes the map key.
|
||||
|
||||
[[collections-map-bidirectional-example]]
|
||||
.Bidirectional Map
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/BidirectionalMapTest.java[tags=collections-map-bidirectional-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-map-bidirectional-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[[collections-array]]
|
||||
==== Arrays
|
||||
|
||||
When it comes to arrays, there is quite a difference between Java arrays and relational database array types (e.g. VARRAY, ARRAY).
|
||||
First, not all database systems implement the SQL-99 ARRAY type, and, for this reason, Hibernate doesn't support native database array types.
|
||||
Second, Java arrays are relevant for basic types only since storing multiple embeddables or entities should always be done using the Java Collection API.
|
||||
|
||||
[[collections-array-binary]]
|
||||
==== Arrays as binary
|
||||
|
||||
By default, Hibernate will choose a BINARY type, as supported by the current `Dialect`.
|
||||
|
||||
[[collections-array-binary-example]]
|
||||
.Binary arrays
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/ArrayTest.java[tags=collections-array-binary-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-array-binary-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
[[collections-as-basic]]
|
||||
==== Collections as basic value type
|
||||
|
||||
Notice how all the previous examples explicitly mark the collection attribute as either `ElementCollection`, `OneToMany` or `ManyToMany`.
|
||||
Collections not marked as such require a custom Hibernate `Type` and the collection elements must be stored in a single database column.
|
||||
|
||||
This is sometimes beneficial. Consider a use-case such as a `VARCHAR` column that represents a delimited list/set of Strings.
|
||||
|
||||
[[collections-comma-delimited-collection-example]]
|
||||
.Comma delimited collection
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/BasicTypeCollectionTest.java[tags=collections-comma-delimited-collection-example,indent=0]
|
||||
|
||||
include::{sourcedir}/type/CommaDelimitedStringsJavaTypeDescriptor.java[tags=collections-comma-delimited-collection-example,indent=0]
|
||||
|
||||
include::{sourcedir}/type/CommaDelimitedStringsType.java[tags=collections-comma-delimited-collection-example,indent=0]
|
||||
----
|
||||
====
|
||||
|
||||
The developer can use the comma-delimited collection like any other collection we've discussed so far and Hibernate will take care of the type transformation part.
|
||||
The collection itself behaves like any other basic value type, as its lifecycle is bound to its owner entity.
|
||||
|
||||
[[collections-comma-delimited-collection-lifecycle-example]]
|
||||
.Comma delimited collection lifecycle
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/BasicTypeCollectionTest.java[tags=collections-comma-delimited-collection-lifecycle-example,indent=0]
|
||||
----
|
||||
|
||||
[source,sql]
|
||||
----
|
||||
include::{extrasdir}/collections-comma-delimited-collection-lifecycle-example.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
See the Hibernate Integrations Guide for more details on developing custom value type mappings.
|
|
@ -0,0 +1,44 @@
|
|||
[[dynamic-model]]
|
||||
=== Dynamic Model
|
||||
:sourcedir: extras
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
JPA only acknowledges the entity model mapping so, if you are concerned about JPA provider portability, it's best to stick to the strict POJO model.
|
||||
On the other hand, Hibernate can work with both POJO entities as well as with dynamic entity models.
|
||||
====
|
||||
|
||||
[[mapping-model-dynamic]]
|
||||
==== Dynamic mapping models
|
||||
|
||||
Persistent entities do not necessarily have to be represented as POJO/JavaBean classes.
|
||||
Hibernate also supports dynamic models (using `Map`s of `Map`s at runtime).
|
||||
With this approach, you do not write persistent classes, only mapping files.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The mapping of dynamic models is beyond the scope of this chapter.
|
||||
We will discuss using such models with Hibernate, in the <<mapping, next chapter>>.
|
||||
====
|
||||
|
||||
A given entity has just one entity mode within a given SessionFactory.
|
||||
This is a change from previous versions which allowed to define multiple entity modes for an entity and to select which to load.
|
||||
Entity modes can now be mixed within a domain model; a dynamic entity might reference a POJO entity, and vice versa.
|
||||
|
||||
.Working with Dynamic Domain Models
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/dynamic/listing10.java[]
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The main advantage of dynamic models is quick turnaround time for prototyping without the need for entity class implementation.
|
||||
The main down-fall is that you lose compile-time type checking and will likely deal with many exceptions at runtime.
|
||||
However, as a result of the Hibernate mapping, the database schema can easily be normalized and sound, allowing to add a proper domain model implementation on top later on.
|
||||
|
||||
It is also interesting to note that dynamic models are great for certain integration use cases as well.
|
||||
Envers, for example, makes extensive use of dynamic models to represent the historical data.
|
||||
====
|
|
@ -0,0 +1,164 @@
|
|||
[[embeddables]]
|
||||
=== Embeddable types
|
||||
:sourcedir: extras
|
||||
|
||||
Historically Hibernate called these components.
|
||||
JPA calls them embeddables.
|
||||
Either way the concept is the same: a composition of values.
|
||||
For example we might have a Name class that is a composition of first-name and last-name, or an Address class that is a composition of street, city, postal code, etc.
|
||||
|
||||
.Usage of the word _embeddable_
|
||||
[NOTE]
|
||||
====
|
||||
To avoid any confusion with the annotation that marks a given embeddable type, the annotation will be further referred as `@Embeddable`.
|
||||
|
||||
Throughout this chapter and thereafter, for brevity sake, embeddable types may also be referred as _embeddable_.
|
||||
====
|
||||
|
||||
.Simple embeddable type example
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/embeddable/Name.java[]
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/embeddable/Address.java[]
|
||||
----
|
||||
====
|
||||
|
||||
An embeddable type is another form of value type, and its lifecycle is bound to a parent entity type, therefore inheriting the attribute access from its parent (for details on attribute access, see <<chapters/domain/entity.adoc#access-embeddable-types,Access strategies>>).
|
||||
|
||||
Embeddable types can be made up of basic values as well as associations, with the caveat that, when used as collection elements, they cannot define collections themselves.
|
||||
|
||||
==== Component / Embedded
|
||||
|
||||
Most often, embeddable types are used to group multiple basic type mappings and reuse them across several entities.
|
||||
|
||||
.Simple Embeddedable
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/embeddable/Person.java[]
|
||||
----
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
JPA defines two terms for working with an embeddable type: `@Embeddable` and `@Embedded`.
|
||||
`@Embeddable` is used to describe the mapping type itself (e.g. `Name`).
|
||||
`@Embedded` is for referencing a given embeddable type (e.g. `person.name`).
|
||||
====
|
||||
|
||||
So, the embeddable type is represented by the `Name` class and the parent makes use of it through the `person.name` object composition.
|
||||
|
||||
.Person table
|
||||
====
|
||||
[source,sql]
|
||||
----
|
||||
include::{sourcedir}/embeddable/Person1.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
The composed values are mapped to the same table as the parent table.
|
||||
Composition is part of good OO data modeling (idiomatic Java).
|
||||
In fact, that table could also be mapped by the following entity type instead.
|
||||
|
||||
.Alternative to embeddable type composition
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/embeddable/Person_alt.java[]
|
||||
----
|
||||
====
|
||||
|
||||
The composition form is certainly more Object-oriented, and that becomes more evident as we work with multiple embeddable types.
|
||||
|
||||
[[embeddable-multiple]]
|
||||
==== Multiple embeddable types
|
||||
|
||||
.Multiple embeddable types
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/embeddable/Contact.java[]
|
||||
----
|
||||
====
|
||||
|
||||
Although from an object-oriented perspective, it's much more convenient to work with embeddable types, this example doesn't work as-is.
|
||||
When the same embeddable type is included multiple times in the same parent entity type, the JPA specification demands setting the associated column names explicitly.
|
||||
|
||||
This requirement is due to how object properties are mapped to database columns.
|
||||
By default, JPA expects a database column having the same name with its associated object property.
|
||||
When including multiple embeddables, the implicit name-based mapping rule doesn't work anymore because multiple object properties could end-up being mapped to the same database column.
|
||||
|
||||
We have a few options to handle this issue.
|
||||
|
||||
[[embeddable-multiple-jpa]]
|
||||
==== JPA's AttributeOverride
|
||||
|
||||
JPA defines the `@AttributeOverride` annotation to handle this scenario.
|
||||
|
||||
.JPA's AttributeOverride
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/embeddable/Contact-AttributeOverride.java[]
|
||||
----
|
||||
====
|
||||
|
||||
This way, the mapping conflict is resolved by setting up explicit name-based property-column type mappings.
|
||||
|
||||
[[embeddable-multiple-namingstrategy]]
|
||||
==== ImplicitNamingStrategy
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
This is a Hibernate specific feature.
|
||||
Users concerned with JPA provider portability should instead prefer explicit column naming with <<embeddable-multiple-jpa,`@AttributeOverride`>>.
|
||||
====
|
||||
|
||||
Hibernate naming strategies are covered in detail in <<naming.adoc#naming,Naming>>.
|
||||
However, for the purposes of this discussion, Hibernate has the capability to interpret implicit column names in a way that is safe for use with multiple embeddable types.
|
||||
|
||||
.Enabling embeddable type safe implicit naming
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/embeddable/component-safe-implicit-naming.java[]
|
||||
----
|
||||
====
|
||||
|
||||
====
|
||||
[source,sql]
|
||||
----
|
||||
include::{sourcedir}/embeddable/Contact-ImplicitNamingStrategy.sql[]
|
||||
----
|
||||
====
|
||||
|
||||
Now the "path" to attributes are used in the implicit column naming.
|
||||
You could even develop your own to do special implicit naming.
|
||||
|
||||
[[embeddable-collections]]
|
||||
==== Collections of embeddable types
|
||||
|
||||
Collections of embeddable types are specifically value collections (as embeddable types are a value type).
|
||||
Value collections are covered in detail in <<collection.adoc#collections-value,Collections of value types>>.
|
||||
|
||||
[[embeddable-mapkey]]
|
||||
==== Embeddable types as Map key
|
||||
|
||||
Embeddable types can also be used as `Map` keys.
|
||||
This topic is converted in detail in <<collection.adoc#collections-map,Map - key>>.
|
||||
|
||||
[[embeddable-identifier]]
|
||||
==== Embeddable types as identifiers
|
||||
|
||||
Embeddable types can also be used as entity type identifiers.
|
||||
This usage is covered in detail in <chapters/domain/identifiers.adoc#identifiers-composite,Composite identifiers>>.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
Embeddable types that are used as collection entries, map keys or entity type identifiers cannot include their own collection mappings.
|
||||
====
|
|
@ -0,0 +1,303 @@
|
|||
[[entity]]
|
||||
=== Entity types
|
||||
:sourcedir: extras
|
||||
|
||||
.Usage of the word _entity_
|
||||
[NOTE]
|
||||
====
|
||||
The entity type describes the mapping between the actual persistable domain model object and a database table row.
|
||||
To avoid any confusion with the annotation that marks a given entity type, the annotation will be further referred as `@Entity`.
|
||||
|
||||
Throughout this chapter and thereafter, entity types will be simply referred as _entity_.
|
||||
====
|
||||
|
||||
[[entity-pojo]]
|
||||
==== POJO Models
|
||||
|
||||
Section _2.1 The Entity Class_ of the _JPA 2.1 specification_ defines its requirements for an entity class.
|
||||
Applications that wish to remain portable across JPA providers should adhere to these requirements.
|
||||
|
||||
* The entity class must be annotated with the `javax.persistence.Entity` annotation (or be denoted as such in XML mapping)
|
||||
* The entity class must have a public or protected no-argument constructor. It may define additional constructors as well.
|
||||
* The entity class must be a top-level class.
|
||||
* An enum or interface may not be designated as an entity.
|
||||
* The entity class must not be final. No methods or persistent instance variables of the entity class may be final.
|
||||
* If an entity instance is to be used remotely as a detached object, the entity class must implement the `Serializable` interface.
|
||||
* Both abstract and concrete classes can be entities. Entities may extend non-entity classes as well as entity classes, and non-entity classes may extend entity classes.
|
||||
* The persistent state of an entity is represented by instance variables, which may correspond to JavaBean-style properties.
|
||||
An instance variable must be directly accessed only from within the methods of the entity by the entity instance itself.
|
||||
The state of the entity is available to clients only through the entity’s accessor methods (getter/setter methods) or other business methods.
|
||||
|
||||
Hibernate, however, is not as strict in its requirements. The differences from the list above include:
|
||||
|
||||
* The entity class must have a no-argument constructor, which may be public, protected or package visibility. It may define additional constructors as well.
|
||||
* The entity class _need not_ be a top-level class.
|
||||
* Technically Hibernate can persist final classes or classes with final persistent state accessor (getter/setter) methods.
|
||||
However, it is generally not a good idea as doing so will stop Hibernate from being able to generate proxies for lazy-loading the entity.
|
||||
* Hibernate does not restrict the application developer from exposing instance variables and reference them from outside the entity class itself.
|
||||
The validity of such a paradigm, however, is debatable at best.
|
||||
|
||||
Let's look at each requirement in detail.
|
||||
|
||||
[[entity-pojo-final]]
|
||||
==== Prefer non-final classes
|
||||
|
||||
A central feature of Hibernate is the ability to load lazily certain entity instance variables (attributes) via runtime proxies.
|
||||
This feature depends upon the entity class being non-final or else implementing an interface that declares all the attribute getters/setters.
|
||||
You can still persist final classes that do not implement such an interface with Hibernate,
|
||||
but you will not be able to use proxies for fetching lazy associations, therefore limiting your options for performance tuning.
|
||||
For the very same reason, you should also avoid declaring persistent attribute getters and setters as final.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Starting in 5.0 Hibernate offers a more robust version of bytecode enhancement as another means for handling lazy loading.
|
||||
Hibernate had some bytecode re-writing capabilities prior to 5.0 but they were very rudimentary.
|
||||
See the <chapters/pc/BytecodeEnhancement.adoc#BytecodeEnhancement,BytecodeEnhancement>> for additional information on fetching and on bytecode enhancement.
|
||||
====
|
||||
|
||||
[[entity-pojo-constructor]]
|
||||
==== Implement a no-argument constructor
|
||||
|
||||
The entity class should have a no-argument constructor. Both Hibernate and JPA require this.
|
||||
|
||||
JPA requires that this constructor be defined as public or protected.
|
||||
Hibernate, for the most part, does not care about the constructor visibility, as long as the system SecurityManager allows overriding the visibility setting.
|
||||
That said, the constructor should be defined with at least package visibility if you wish to leverage runtime proxy generation.
|
||||
|
||||
[[entity-pojo-accessors]]
|
||||
==== Declare getters and setters for persistent attributes
|
||||
|
||||
The JPA specification requires this, otherwise the model would prevent accessing the entity persistent state fields directly from outside the entity itself.
|
||||
|
||||
Although Hibernate does not require it, it is recommended to follow the JavaBean conventions and define getters and setters for entity persistent attributes.
|
||||
Nevertheless, you can still tell Hibernate to directly access the entity fields.
|
||||
|
||||
Attributes (whether fields or getters/setters) need not be declared public.
|
||||
Hibernate can deal with attributes declared with public, protected, package or private visibility.
|
||||
Again, if wanting to use runtime proxy generation for lazy loading, the getter/setter should grant access to at least package visibility.
|
||||
|
||||
[[entity-pojo-identifier]]
|
||||
==== Provide identifier attribute(s)
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
Historically this was considered optional.
|
||||
However, not defining identifier attribute(s) on the entity should be considered a deprecated feature that will be removed in an upcoming release.
|
||||
====
|
||||
|
||||
The identifier attribute does not necessarily need to be mapped to the column(s) that physically define the primary key.
|
||||
However, it should map to column(s) that can uniquely identify each row.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
We recommend that you declare consistently-named identifier attributes on persistent classes and that you use a nullable (i.e., non-primitive) type.
|
||||
====
|
||||
|
||||
The placement of the `@Id` annotation marks the <<chapters/domain/access.adoc#access,persistence state access strategy>>.
|
||||
|
||||
.Identifier
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/entity/Identifier.java[]
|
||||
----
|
||||
====
|
||||
|
||||
Hibernate offers multiple identifier generation strategies, see the <<chapters/domain/identifiers.adoc#identifiers,Identifier Generators>> chapter for more about this topic.
|
||||
|
||||
[[entity-pojo-mapping]]
|
||||
==== Mapping the entity
|
||||
|
||||
The main piece in mapping the entity is the `javax.persistence.Entity` annotation.
|
||||
The `@Entity` annotation defines just one attribute `name` which is used to give a specific entity name for use in JPQL queries.
|
||||
By default, the entity name represents the unqualified name of the entity class itself.
|
||||
|
||||
.Simple `@Entity`
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/entity/SimpleEntity.java[]
|
||||
----
|
||||
====
|
||||
|
||||
An entity models a database table.
|
||||
The identifier uniquely identifies each row in that table.
|
||||
By default, the name of the table is assumed to be the same as the name of the entity.
|
||||
To explicitly give the name of the table or to specify other information about the table, we would use the `javax.persistence.Table` annotation.
|
||||
|
||||
.Simple `@Entity` with `@Table`
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/entity/SimpleEntityWithTable.java[]
|
||||
----
|
||||
====
|
||||
|
||||
[[mapping-model-pojo-equalshashcode]]
|
||||
==== Implementing `equals()` and `hashCode()`
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Much of the discussion in this section deals with the relation of an entity to a Hibernate Session, whether the entity is managed, transient or detached.
|
||||
If you are unfamiliar with these topics, they are explained in the <<chapters/pc/PersistenceContext.adoc#pc,Persistence Context>> chapter.
|
||||
====
|
||||
|
||||
Whether to implement `equals()` and `hashCode()` methods in your domain model, let alone how to implement them, is a surprisingly tricky discussion when it comes to ORM.
|
||||
|
||||
There is really just one absolute case: a class that acts as an identifier must implement equals/hashCode based on the id value(s).
|
||||
Generally, this is pertinent for user-defined classes used as composite identifiers.
|
||||
Beyond this one very specific use case and few others we will discuss below, you may want to consider not implementing equals/hashCode altogether.
|
||||
|
||||
So what's all the fuss? Normally, most Java objects provide a built-in `equals()` and `hashCode()` based on the object's identity, so each new object will be different from all others.
|
||||
This is generally what you want in ordinary Java programming.
|
||||
Conceptually however this starts to break down when you start to think about the possibility of multiple instances of a class representing the same data.
|
||||
|
||||
This is, in fact, exactly the case when dealing with data coming from a database.
|
||||
Every time we load a specific `Person` from the database we would naturally get a unique instance.
|
||||
Hibernate, however, works hard to make sure that does not happen within a given `Session`.
|
||||
In fact, Hibernate guarantees equivalence of persistent identity (database row) and Java identity inside a particular session scope.
|
||||
So if we ask a Hibernate `Session` to load that specific Person multiple times we will actually get back the same __instance__:
|
||||
|
||||
.Scope of identity
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/entity/listing1.java[]
|
||||
----
|
||||
====
|
||||
|
||||
Consider another example using a persistent `java.util.Set`:
|
||||
|
||||
.Set usage with Session-scoped identity
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/entity/listing3.java[]
|
||||
----
|
||||
====
|
||||
|
||||
However, the semantic changes when we mix instances loaded from different Sessions:
|
||||
|
||||
.Mixed Sessions
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/entity/listing2.java[]
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/entity/listing4.java[]
|
||||
----
|
||||
====
|
||||
|
||||
Specifically the outcome in this last example will depend on whether the `Person` class implemented equals/hashCode, and, if so, how.
|
||||
|
||||
Consider yet another case:
|
||||
|
||||
.Sets with transient entities
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/entity/listing5.java[]
|
||||
----
|
||||
====
|
||||
|
||||
In cases where you will be dealing with entities outside of a Session (whether they be transient or detached), especially in cases where you will be using them in Java collections,
|
||||
you should consider implementing equals/hashCode.
|
||||
|
||||
A common initial approach is to use the entity's identifier attribute as the basis for equals/hashCode calculations:
|
||||
|
||||
.Naive equals/hashCode implementation
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/entity/listing6.java[]
|
||||
----
|
||||
====
|
||||
|
||||
It turns out that this still breaks when adding transient instance of `Person` to a set as we saw in the last example:
|
||||
|
||||
.Still trouble
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/entity/listing7.java[]
|
||||
----
|
||||
====
|
||||
|
||||
The issue here is a conflict between _the use of generated identifier_, _the contract of `Set`_ and _the equals/hashCode implementations_.
|
||||
`Set` says that the equals/hashCode value for an object should not change while the object is part of the Set.
|
||||
But that is exactly what happened here because the equals/hasCode are based on the (generated) id, which was not set until the `session.getTransaction().commit()` call.
|
||||
|
||||
Note that this is just a concern when using generated identifiers.
|
||||
If you are using assigned identifiers this will not be a problem, assuming the identifier value is assigned prior to adding to the `Set`.
|
||||
|
||||
Another option is to force the identifier to be generated and set prior to adding to the `Set`:
|
||||
|
||||
.Forcing identifier generation
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/entity/listing8.java[]
|
||||
----
|
||||
====
|
||||
|
||||
But this is often not feasible.
|
||||
|
||||
The final approach is to use a "better" equals/hashCode implementation, making use of a natural-id or business-key.
|
||||
|
||||
.Better equals/hashCode with natural-id
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/entity/listing9.java[]
|
||||
----
|
||||
====
|
||||
|
||||
As you can see the question of equals/hashCode is not trivial, nor is there a one-size-fits-all solution.
|
||||
|
||||
For details on mapping the identifier, see the <<chapters/domain/identifiers.adoc#identiifers,Identifiers>> chapter.
|
||||
|
||||
[[entity-pojo-optlock]]
|
||||
==== Mapping optimistic locking
|
||||
|
||||
JPA defines support for optimistic locking based on either a version (sequential numeric) or timestamp strategy.
|
||||
To enable this style of optimistic locking simply add the `javax.persistence.Version` to the persistent attribute that defines the optimistic locking value.
|
||||
According to JPA, the valid types for these attributes are limited to:
|
||||
|
||||
* `int` or `Integer`
|
||||
* `short` or `Short`
|
||||
* `long` or `Long`
|
||||
* `java.sql.Timestamp`
|
||||
|
||||
.Version
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/entity/Version.java[]
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/entity/Timestamp.java[]
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
include::{sourcedir}/entity/Instant.java[]
|
||||
----
|
||||
====
|
||||
|
||||
Hibernate supports a form of optimistic locking that does not require a dedicated "version attribute".
|
||||
This is intended mainly for use with modeling legacy schemas.
|
||||
The idea is that you can get Hibernate to perform "version checks" using either all of the entity's attributes, or just the attributes that have changed.
|
||||
This is achieved through the use of the `org.hibernate.annotations.OptimisticLocking` annotation which defines a single attribute of type `org.hibernate.annotations.OptimisticLockType`.
|
||||
There are 4 available OptimisticLockTypes:
|
||||
|
||||
`NONE`:: optimistic locking is disabled even if there is a `@Version` annotation present
|
||||
`VERSION` (the default):: performs optimistic locking based on a `@Version` as described above
|
||||
`ALL`:: performs optimistic locking based on _all_ fields as part of an expanded WHERE clause restriction for the UPDATE/DELETE SQL statements
|
||||
`DIRTY`:: performs optimistic locking based on _dirty_ fields as part of an expanded WHERE clause restriction for the UPDATE/DELETE SQL statements.
|
||||
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
@Entity
|
||||
public class Patch {
|
||||
|
||||
@Id
|
||||
private Long id;
|
||||
|
||||
@ElementCollection
|
||||
@CollectionTable(
|
||||
name="patch_change",
|
||||
joinColumns=@JoinColumn(name="patch_id")
|
||||
)
|
||||
@OrderColumn(name = "index_id")
|
||||
private List<Change> changes = new ArrayList<>();
|
||||
|
||||
public List<Change> getChanges() {
|
||||
return changes;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
@Embeddable
|
||||
@Access(AccessType.PROPERTY)
|
||||
public static class Change {
|
||||
|
||||
private String path;
|
||||
|
||||
private String diff;
|
||||
|
||||
public Change() {}
|
||||
|
||||
@Column(name = "path", nullable = false)
|
||||
public String getPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
public void setPath(String path) {
|
||||
this.path = path;
|
||||
}
|
||||
|
||||
@Column(name = "diff", nullable = false)
|
||||
public String getDiff() {
|
||||
return diff;
|
||||
}
|
||||
|
||||
public void setDiff(String diff) {
|
||||
this.diff = diff;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
@Entity
|
||||
public class Patch {
|
||||
|
||||
@Id
|
||||
private Long id;
|
||||
|
||||
@Embedded
|
||||
private Change change;
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
@Entity
|
||||
public class Simple {
|
||||
|
||||
@Id
|
||||
private Integer id;
|
||||
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId( Integer id ) {
|
||||
this.id = id;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
@Entity
|
||||
public class Simple {
|
||||
|
||||
private Integer id;
|
||||
|
||||
@Id
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId( Integer id ) {
|
||||
this.id = id;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
@Entity
|
||||
public class Simple {
|
||||
|
||||
private Integer id;
|
||||
|
||||
@Version
|
||||
@Access( AccessType.FIELD )
|
||||
private Integer version;
|
||||
|
||||
@Id
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId( Integer id ) {
|
||||
this.id = id;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
CREATE TABLE Address (
|
||||
id BIGINT NOT NULL ,
|
||||
number VARCHAR(255) ,
|
||||
postalCode VARCHAR(255) ,
|
||||
street VARCHAR(255) ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE Person (
|
||||
id BIGINT NOT NULL ,
|
||||
registrationNumber VARCHAR(255) ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE Person_Address (
|
||||
owners_id BIGINT NOT NULL ,
|
||||
addresses_id BIGINT NOT NULL
|
||||
)
|
||||
|
||||
ALTER TABLE Person
|
||||
ADD CONSTRAINT UK_23enodonj49jm8uwec4i7y37f
|
||||
UNIQUE (registrationNumber)
|
||||
|
||||
ALTER TABLE Person_Address
|
||||
ADD CONSTRAINT FKm7j0bnabh2yr0pe99il1d066u
|
||||
FOREIGN KEY (addresses_id) REFERENCES Address
|
||||
|
||||
ALTER TABLE Person_Address
|
||||
ADD CONSTRAINT FKbn86l24gmxdv2vmekayqcsgup
|
||||
FOREIGN KEY (owners_id) REFERENCES Person
|
|
@ -0,0 +1,26 @@
|
|||
INSERT INTO Person ( registrationNumber, id )
|
||||
VALUES ( 'ABC-123', 1 )
|
||||
|
||||
INSERT INTO Address ( number, postalCode, street, id )
|
||||
VALUES ( '12A', '4005A', '12th Avenue', 2 )
|
||||
|
||||
INSERT INTO Address ( number, postalCode, street, id )
|
||||
VALUES ( '18B', '4007B', '18th Avenue', 3 )
|
||||
|
||||
INSERT INTO Person ( registrationNumber, id )
|
||||
VALUES ( 'DEF-456', 4 )
|
||||
|
||||
INSERT INTO Person_Address ( owners_id, addresses_id )
|
||||
VALUES ( 1, 2 )
|
||||
|
||||
INSERT INTO Person_Address ( owners_id, addresses_id )
|
||||
VALUES ( 1, 3 )
|
||||
|
||||
INSERT INTO Person_Address ( owners_id, addresses_id )
|
||||
VALUES ( 4, 2 )
|
||||
|
||||
DELETE FROM Person_Address
|
||||
WHERE owners_id = 1
|
||||
|
||||
INSERT INTO Person_Address ( owners_id, addresses_id )
|
||||
VALUES ( 1, 3 )
|
|
@ -0,0 +1,31 @@
|
|||
CREATE TABLE Address (
|
||||
id BIGINT NOT NULL ,
|
||||
number VARCHAR(255) ,
|
||||
postalCode VARCHAR(255) ,
|
||||
street VARCHAR(255) ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE Person (
|
||||
id BIGINT NOT NULL ,
|
||||
registrationNumber VARCHAR(255) ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE PersonAddress (
|
||||
person_id BIGINT NOT NULL ,
|
||||
address_id BIGINT NOT NULL ,
|
||||
PRIMARY KEY ( person_id, address_id )
|
||||
)
|
||||
|
||||
ALTER TABLE Person
|
||||
ADD CONSTRAINT UK_23enodonj49jm8uwec4i7y37f
|
||||
UNIQUE (registrationNumber)
|
||||
|
||||
ALTER TABLE PersonAddress
|
||||
ADD CONSTRAINT FK8b3lru5fyej1aarjflamwghqq
|
||||
FOREIGN KEY (person_id) REFERENCES Person
|
||||
|
||||
ALTER TABLE PersonAddress
|
||||
ADD CONSTRAINT FK7p69mgialumhegyl4byrh65jk
|
||||
FOREIGN KEY (address_id) REFERENCES Address
|
|
@ -0,0 +1,23 @@
|
|||
INSERT INTO Person ( registrationNumber, id )
|
||||
VALUES ( 'ABC-123', 1 )
|
||||
|
||||
INSERT INTO Person ( registrationNumber, id )
|
||||
VALUES ( 'DEF-456', 2 )
|
||||
|
||||
INSERT INTO Address ( number, postalCode, street, id )
|
||||
VALUES ( '12A', '4005A', '12th Avenue', 3 )
|
||||
|
||||
INSERT INTO Address ( number, postalCode, street, id )
|
||||
VALUES ( '18B', '4007B', '18th Avenue', 4 )
|
||||
|
||||
INSERT INTO PersonAddress ( person_id, address_id )
|
||||
VALUES ( 1, 3 )
|
||||
|
||||
INSERT INTO PersonAddress ( person_id, address_id )
|
||||
VALUES ( 1, 4 )
|
||||
|
||||
INSERT INTO PersonAddress ( person_id, address_id )
|
||||
VALUES ( 2, 3 )
|
||||
|
||||
DELETE FROM PersonAddress
|
||||
WHERE person_id = 1 AND address_id = 3
|
|
@ -0,0 +1,24 @@
|
|||
CREATE TABLE Address (
|
||||
id BIGINT NOT NULL ,
|
||||
number VARCHAR(255) ,
|
||||
street VARCHAR(255) ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE Person (
|
||||
id BIGINT NOT NULL ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE Person_Address (
|
||||
Person_id BIGINT NOT NULL ,
|
||||
addresses_id BIGINT NOT NULL
|
||||
)
|
||||
|
||||
ALTER TABLE Person_Address
|
||||
ADD CONSTRAINT FKm7j0bnabh2yr0pe99il1d066u
|
||||
FOREIGN KEY (addresses_id) REFERENCES Address
|
||||
|
||||
ALTER TABLE Person_Address
|
||||
ADD CONSTRAINT FKba7rc9qe2vh44u93u0p2auwti
|
||||
FOREIGN KEY (Person_id) REFERENCES Person
|
|
@ -0,0 +1,24 @@
|
|||
INSERT INTO Person ( id )
|
||||
VALUES ( 1 )
|
||||
|
||||
INSERT INTO Address ( number, street, id )
|
||||
VALUES ( '12A', '12th Avenue', 2 )
|
||||
|
||||
INSERT INTO Address ( number, street, id )
|
||||
VALUES ( '18B', '18th Avenue', 3 )
|
||||
|
||||
INSERT INTO Person ( id )
|
||||
VALUES ( 4 )
|
||||
|
||||
INSERT INTO Person_Address ( Person_id, addresses_id )
|
||||
VALUES ( 1, 2 )
|
||||
INSERT INTO Person_Address ( Person_id, addresses_id )
|
||||
VALUES ( 1, 3 )
|
||||
INSERT INTO Person_Address ( Person_id, addresses_id )
|
||||
VALUES ( 4, 2 )
|
||||
|
||||
DELETE FROM Person_Address
|
||||
WHERE Person_id = 1
|
||||
|
||||
INSERT INTO Person_Address ( Person_id, addresses_id )
|
||||
VALUES ( 1, 3 )
|
|
@ -0,0 +1,5 @@
|
|||
DELETE FROM Person_Address
|
||||
WHERE Person_id = 1
|
||||
|
||||
DELETE FROM Person
|
||||
WHERE id = 1
|
|
@ -0,0 +1,15 @@
|
|||
CREATE TABLE Person (
|
||||
id BIGINT NOT NULL ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE Phone (
|
||||
id BIGINT NOT NULL ,
|
||||
number VARCHAR(255) ,
|
||||
person_id BIGINT ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
ALTER TABLE Phone
|
||||
ADD CONSTRAINT PERSON_ID_FK
|
||||
FOREIGN KEY (person_id) REFERENCES Person
|
|
@ -0,0 +1,10 @@
|
|||
INSERT INTO Person ( id )
|
||||
VALUES ( 1 )
|
||||
|
||||
INSERT INTO Phone ( number, person_id, id )
|
||||
VALUES ( '123-456-7890', 1, 2 )
|
||||
|
||||
UPDATE Phone
|
||||
SET number = '123-456-7890',
|
||||
person_id = NULL
|
||||
WHERE id = 2
|
|
@ -0,0 +1,19 @@
|
|||
CREATE TABLE Person (
|
||||
id BIGINT NOT NULL ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE Phone (
|
||||
id BIGINT NOT NULL ,
|
||||
number VARCHAR(255) ,
|
||||
person_id BIGINT ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
ALTER TABLE Phone
|
||||
ADD CONSTRAINT UK_l329ab0g4c1t78onljnxmbnp6
|
||||
UNIQUE (number)
|
||||
|
||||
ALTER TABLE Phone
|
||||
ADD CONSTRAINT FKmw13yfsjypiiq0i1osdkaeqpg
|
||||
FOREIGN KEY (person_id) REFERENCES Person
|
|
@ -0,0 +1,10 @@
|
|||
INSERT INTO Phone
|
||||
( number, person_id, id )
|
||||
VALUES ( '123-456-7890', NULL, 2 )
|
||||
|
||||
INSERT INTO Phone
|
||||
( number, person_id, id )
|
||||
VALUES ( '321-654-0987', NULL, 3 )
|
||||
|
||||
DELETE FROM Phone
|
||||
WHERE id = 2
|
|
@ -0,0 +1,27 @@
|
|||
CREATE TABLE Person (
|
||||
id BIGINT NOT NULL ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE Person_Phone (
|
||||
Person_id BIGINT NOT NULL ,
|
||||
phones_id BIGINT NOT NULL
|
||||
)
|
||||
|
||||
CREATE TABLE Phone (
|
||||
id BIGINT NOT NULL ,
|
||||
number VARCHAR(255) ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
ALTER TABLE Person_Phone
|
||||
ADD CONSTRAINT UK_9uhc5itwc9h5gcng944pcaslf
|
||||
UNIQUE (phones_id)
|
||||
|
||||
ALTER TABLE Person_Phone
|
||||
ADD CONSTRAINT FKr38us2n8g5p9rj0b494sd3391
|
||||
FOREIGN KEY (phones_id) REFERENCES Phone
|
||||
|
||||
ALTER TABLE Person_Phone
|
||||
ADD CONSTRAINT FK2ex4e4p7w1cj310kg2woisjl2
|
||||
FOREIGN KEY (Person_id) REFERENCES Person
|
|
@ -0,0 +1,29 @@
|
|||
INSERT INTO Person
|
||||
( id )
|
||||
VALUES ( 1 )
|
||||
|
||||
INSERT INTO Phone
|
||||
( number, id )
|
||||
VALUES ( '123 - 456 - 7890', 2 )
|
||||
|
||||
INSERT INTO Phone
|
||||
( number, id )
|
||||
VALUES ( '321 - 654 - 0987', 3 )
|
||||
|
||||
INSERT INTO Person_Phone
|
||||
( Person_id, phones_id )
|
||||
VALUES ( 1, 2 )
|
||||
|
||||
INSERT INTO Person_Phone
|
||||
( Person_id, phones_id )
|
||||
VALUES ( 1, 3 )
|
||||
|
||||
DELETE FROM Person_Phone
|
||||
WHERE Person_id = 1
|
||||
|
||||
INSERT INTO Person_Phone
|
||||
( Person_id, phones_id )
|
||||
VALUES ( 1, 3 )
|
||||
|
||||
DELETE FROM Phone
|
||||
WHERE id = 2
|
|
@ -0,0 +1,17 @@
|
|||
CREATE TABLE Phone (
|
||||
id BIGINT NOT NULL ,
|
||||
number VARCHAR(255) ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE PhoneDetails (
|
||||
id BIGINT NOT NULL ,
|
||||
provider VARCHAR(255) ,
|
||||
technology VARCHAR(255) ,
|
||||
phone_id BIGINT ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
ALTER TABLE PhoneDetails
|
||||
ADD CONSTRAINT FKeotuev8ja8v0sdh29dynqj05p
|
||||
FOREIGN KEY (phone_id) REFERENCES Phone
|
|
@ -0,0 +1,5 @@
|
|||
INSERT INTO Phone ( number, id )
|
||||
VALUES ( '123 - 456 - 7890', 1 )
|
||||
|
||||
INSERT INTO PhoneDetails ( phone_id, provider, technology, id )
|
||||
VALUES ( 1, 'T - Mobile, GSM', 2 )
|
|
@ -0,0 +1,17 @@
|
|||
CREATE TABLE Phone (
|
||||
id BIGINT NOT NULL ,
|
||||
number VARCHAR(255) ,
|
||||
details_id BIGINT ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE PhoneDetails (
|
||||
id BIGINT NOT NULL ,
|
||||
provider VARCHAR(255) ,
|
||||
technology VARCHAR(255) ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
ALTER TABLE Phone
|
||||
ADD CONSTRAINT FKnoj7cj83ppfqbnvqqa5kolub7
|
||||
FOREIGN KEY (details_id) REFERENCES PhoneDetails
|
|
@ -0,0 +1,6 @@
|
|||
CREATE TABLE Product (
|
||||
id INTEGER NOT NULL ,
|
||||
image blob ,
|
||||
name VARCHAR(255) ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
|
@ -0,0 +1,6 @@
|
|||
CREATE TABLE Product (
|
||||
id INTEGER NOT NULL
|
||||
image clob
|
||||
name VARCHAR(255)
|
||||
PRIMARY KEY ( id )
|
||||
)
|
|
@ -0,0 +1,22 @@
|
|||
DEBUG SQL:92 -
|
||||
insert
|
||||
into
|
||||
Product
|
||||
(bitSet, id)
|
||||
values
|
||||
(?, ?)
|
||||
|
||||
TRACE BasicBinder:65 - binding parameter [1] as [VARCHAR] - [{0, 65, 128, 129}]
|
||||
TRACE BasicBinder:65 - binding parameter [2] as [INTEGER] - [1]
|
||||
|
||||
DEBUG SQL:92 -
|
||||
select
|
||||
bitsettype0_.id as id1_0_0_,
|
||||
bitsettype0_.bitSet as bitSet2_0_0_
|
||||
from
|
||||
Product bitsettype0_
|
||||
where
|
||||
bitsettype0_.id=?
|
||||
|
||||
TRACE BasicBinder:65 - binding parameter [1] as [INTEGER] - [1]
|
||||
TRACE BasicExtractor:61 - extracted value ([bitSet2_0_0_] : [VARCHAR]) - [{0, 65, 128, 129}]
|
|
@ -0,0 +1,22 @@
|
|||
DEBUG SQL:92 -
|
||||
insert
|
||||
into
|
||||
Product
|
||||
(bitSet, id)
|
||||
values
|
||||
(?, ?)
|
||||
|
||||
DEBUG BitSetUserType:71 - Binding 1,10,11 to parameter 1
|
||||
TRACE BasicBinder:65 - binding parameter [2] as [INTEGER] - [1]
|
||||
|
||||
DEBUG SQL:92 -
|
||||
select
|
||||
bitsetuser0_.id as id1_0_0_,
|
||||
bitsetuser0_.bitSet as bitSet2_0_0_
|
||||
from
|
||||
Product bitsetuser0_
|
||||
where
|
||||
bitsetuser0_.id=?
|
||||
|
||||
TRACE BasicBinder:65 - binding parameter [1] as [INTEGER] - [1]
|
||||
DEBUG BitSetUserType:56 - Result set column bitSet2_0_0_ value is 1,10,11
|
|
@ -0,0 +1,2 @@
|
|||
INSERT INTO DateEvent ( timestamp, id )
|
||||
VALUES ( '2015-12-29', 1 )
|
|
@ -0,0 +1,2 @@
|
|||
INSERT INTO DateEvent ( timestamp, id )
|
||||
VALUES ( '16:51:58', 1 )
|
|
@ -0,0 +1,2 @@
|
|||
INSERT INTO DateEvent ( timestamp, id )
|
||||
VALUES ( '2015-12-29 16:54:04.544', 1 )
|
|
@ -0,0 +1,2 @@
|
|||
INSERT INTO Phone (phone_number, phone_type, id)
|
||||
VALUES ('123-456-78990', 2, 1)
|
|
@ -0,0 +1,2 @@
|
|||
INSERT INTO Phone (phone_number, phone_type, id)
|
||||
VALUES ('123-456-78990', 'MOBILE', 1)
|
|
@ -0,0 +1,2 @@
|
|||
INSERT INTO Event ( span, id )
|
||||
VALUES ( 'P1Y2M3D', 1 )
|
|
@ -0,0 +1,6 @@
|
|||
CREATE TABLE Product (
|
||||
id INTEGER NOT NULL ,
|
||||
name VARCHAR(255) ,
|
||||
warranty NVARCHAR(255) ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
|
@ -0,0 +1,6 @@
|
|||
CREATE TABLE Product (
|
||||
id INTEGER NOT NULL ,
|
||||
name VARCHAR(255) ,
|
||||
warranty nclob ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
|
@ -0,0 +1,2 @@
|
|||
INSERT INTO Product ("name", "number", id)
|
||||
VALUES ('Mobile phone', '123-456-7890', 1)
|
|
@ -0,0 +1,6 @@
|
|||
CREATE TABLE property_holder (
|
||||
id BIGINT NOT NULL,
|
||||
property_type VARCHAR(255),
|
||||
property_id BIGINT,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
|
@ -0,0 +1,25 @@
|
|||
INSERT INTO integer_property
|
||||
( "name", "value", id )
|
||||
VALUES ( 'age', 23, 1 )
|
||||
|
||||
INSERT INTO string_property
|
||||
( "name", "value", id )
|
||||
VALUES ( 'name', 'John Doe', 1 )
|
||||
|
||||
INSERT INTO property_holder
|
||||
( property_type, property_id, id )
|
||||
VALUES ( 'S', 1, 1 )
|
||||
|
||||
|
||||
SELECT ph.id AS id1_1_0_,
|
||||
ph.property_type AS property2_1_0_,
|
||||
ph.property_id AS property3_1_0_
|
||||
FROM property_holder ph
|
||||
WHERE ph.id = 1
|
||||
|
||||
|
||||
SELECT sp.id AS id1_2_0_,
|
||||
sp."name" AS name2_2_0_,
|
||||
sp."value" AS value3_2_0_
|
||||
FROM string_property sp
|
||||
WHERE sp.id = 1
|
|
@ -0,0 +1,12 @@
|
|||
INSERT INTO Account (credit, rate, id)
|
||||
VALUES (5000.0, 0.0125, 1)
|
||||
|
||||
SELECT
|
||||
a.id as id1_0_0_,
|
||||
a.credit as credit2_0_0_,
|
||||
a.rate as rate3_0_0_,
|
||||
a.credit * a.rate as formula0_0_
|
||||
FROM
|
||||
Account a
|
||||
WHERE
|
||||
a.id = 1
|
|
@ -0,0 +1,10 @@
|
|||
CREATE TABLE property_repository (
|
||||
id BIGINT NOT NULL,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE repository_properties (
|
||||
repository_id BIGINT NOT NULL,
|
||||
property_type VARCHAR(255),
|
||||
property_id BIGINT NOT NULL
|
||||
)
|
|
@ -0,0 +1,36 @@
|
|||
INSERT INTO integer_property
|
||||
( "name", "value", id )
|
||||
VALUES ( 'age', 23, 1 )
|
||||
|
||||
INSERT INTO string_property
|
||||
( "name", "value", id )
|
||||
VALUES ( 'name', 'John Doe', 1 )
|
||||
|
||||
INSERT INTO property_repository ( id )
|
||||
VALUES ( 1 )
|
||||
|
||||
INSERT INTO repository_properties
|
||||
( repository_id , property_type , property_id )
|
||||
VALUES
|
||||
( 1 , 'I' , 1 )
|
||||
|
||||
INSERT INTO repository_properties
|
||||
( repository_id , property_type , property_id )
|
||||
VALUES
|
||||
( 1 , 'S' , 1 )
|
||||
|
||||
SELECT pr.id AS id1_1_0_
|
||||
FROM property_repository pr
|
||||
WHERE pr.id = 1
|
||||
|
||||
SELECT ip.id AS id1_0_0_ ,
|
||||
integerpro0_."name" AS name2_0_0_ ,
|
||||
integerpro0_."value" AS value3_0_0_
|
||||
FROM integer_property integerpro0_
|
||||
WHERE integerpro0_.id = 1
|
||||
|
||||
SELECT sp.id AS id1_3_0_ ,
|
||||
sp."name" AS name2_3_0_ ,
|
||||
sp."value" AS value3_3_0_
|
||||
FROM string_property sp
|
||||
WHERE sp.id = 1
|
|
@ -0,0 +1,11 @@
|
|||
INSERT INTO Savings (money, currency, id)
|
||||
VALUES (10 * 100, 'USD', 1)
|
||||
|
||||
SELECT
|
||||
s.id as id1_0_0_,
|
||||
s.money / 100 as money2_0_0_,
|
||||
s.currency as currency3_0_0_
|
||||
FROM
|
||||
Savings s
|
||||
WHERE
|
||||
s.id = 1
|
|
@ -0,0 +1,2 @@
|
|||
INSERT INTO Event ("timestamp", id)
|
||||
VALUES (current_timestamp, 1)
|
|
@ -0,0 +1,2 @@
|
|||
INSERT INTO Event ("timestamp", id)
|
||||
VALUES ('Tue Mar 01 10:58:18 EET 2016', 1)
|
|
@ -0,0 +1,5 @@
|
|||
CREATE TABLE Person (
|
||||
id BIGINT NOT NULL ,
|
||||
phones VARBINARY(255) ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
|
@ -0,0 +1,19 @@
|
|||
CREATE TABLE Person (
|
||||
id BIGINT NOT NULL, PRIMARY KEY (id)
|
||||
)
|
||||
|
||||
CREATE TABLE Phone (
|
||||
id BIGINT NOT NULL,
|
||||
number VARCHAR(255),
|
||||
type VARCHAR(255),
|
||||
person_id BIGINT,
|
||||
PRIMARY KEY (id)
|
||||
)
|
||||
|
||||
ALTER TABLE Phone
|
||||
ADD CONSTRAINT UK_l329ab0g4c1t78onljnxmbnp6
|
||||
UNIQUE (number)
|
||||
|
||||
ALTER TABLE Phone
|
||||
ADD CONSTRAINT FKmw13yfsjypiiq0i1osdkaeqpg
|
||||
FOREIGN KEy (person_id) REFERENCES Person
|
|
@ -0,0 +1,8 @@
|
|||
INSERT INTO Phone (number, person_id, type, id)
|
||||
VALUES ( '028-234-9876', 1, 'landline', 1 )
|
||||
|
||||
INSERT INTO Phone (number, person_id, type, id)
|
||||
VALUES ( '072-122-9876', 1, 'mobile', 2 )
|
||||
|
||||
UPDATE Phone
|
||||
SET person_id = NULL, type = 'landline' where id = 1
|
|
@ -0,0 +1 @@
|
|||
DELETE FROM Phone WHERE id = 1
|
|
@ -0,0 +1,8 @@
|
|||
CREATE TABLE Phone (
|
||||
id BIGINT NOT NULL ,
|
||||
number VARCHAR(255) ,
|
||||
type VARCHAR(255) ,
|
||||
person_id BIGINT ,
|
||||
order_id INTEGER ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
|
@ -0,0 +1,6 @@
|
|||
INSERT INTO Person ( phones, id )
|
||||
VALUES ( '027-123-4567,028-234-9876', 1 )
|
||||
|
||||
UPDATE Person
|
||||
SET phones = '028-234-9876'
|
||||
WHERE id = 1
|
|
@ -0,0 +1,5 @@
|
|||
INSERT INTO Person_phones ( Person_id, number, type )
|
||||
VALUES ( 1, '028-234-9876', 'landline' )
|
||||
|
||||
INSERT INTO Person_phones ( Person_id, number, type )
|
||||
VALUES ( 1, '072-122-9876', 'mobile' )
|
|
@ -0,0 +1,17 @@
|
|||
CREATE TABLE Person (
|
||||
id BIGINT NOT NULL ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE Phone (
|
||||
id BIGINT NOT NULL ,
|
||||
number VARCHAR(255) ,
|
||||
since TIMESTAMP ,
|
||||
type INTEGER ,
|
||||
person_id BIGINT ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
ALTER TABLE Phone
|
||||
ADD CONSTRAINT FKmw13yfsjypiiq0i1osdkaeqpg
|
||||
FOREIGN KEY (person_id) REFERENCES Person
|
|
@ -0,0 +1,26 @@
|
|||
CREATE TABLE Person (
|
||||
id BIGINT NOT NULL ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE Phone (
|
||||
id BIGINT NOT NULL ,
|
||||
number VARCHAR(255) ,
|
||||
since TIMESTAMP ,
|
||||
type INTEGER ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE phone_register (
|
||||
phone_id BIGINT NOT NULL ,
|
||||
person_id BIGINT NOT NULL ,
|
||||
PRIMARY KEY ( phone_id, person_id )
|
||||
)
|
||||
|
||||
ALTER TABLE phone_register
|
||||
ADD CONSTRAINT FKc3jajlx41lw6clbygbw8wm65w
|
||||
FOREIGN KEY (person_id) REFERENCES Phone
|
||||
|
||||
ALTER TABLE phone_register
|
||||
ADD CONSTRAINT FK6npoomh1rp660o1b55py9ndw4
|
||||
FOREIGN KEY (phone_id) REFERENCES Person
|
|
@ -0,0 +1,5 @@
|
|||
INSERT INTO phone_register (Person_id, number, type, since)
|
||||
VALUES (1, '072-122-9876', 1, '2015-12-15 17:16:45.311')
|
||||
|
||||
INSERT INTO phone_register (Person_id, number, type, since)
|
||||
VALUES (1, '028-234-9876', 0, '2015-12-15 17:16:45.311')
|
|
@ -0,0 +1,16 @@
|
|||
CREATE TABLE Person (
|
||||
id BIGINT NOT NULL ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE phone_register (
|
||||
Person_id BIGINT NOT NULL ,
|
||||
since TIMESTAMP ,
|
||||
number VARCHAR(255) NOT NULL ,
|
||||
type INTEGER NOT NULL ,
|
||||
PRIMARY KEY ( Person_id, number, type )
|
||||
)
|
||||
|
||||
ALTER TABLE phone_register
|
||||
ADD CONSTRAINT FKrmcsa34hr68of2rq8qf526mlk
|
||||
FOREIGN KEY (Person_id) REFERENCES Person
|
|
@ -0,0 +1,28 @@
|
|||
CREATE TABLE Person (
|
||||
id BIGINT NOT NULL ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
CREATE TABLE Person_Phone (
|
||||
Person_id BIGINT NOT NULL ,
|
||||
phones_id BIGINT NOT NULL
|
||||
)
|
||||
|
||||
CREATE TABLE Phone (
|
||||
id BIGINT NOT NULL ,
|
||||
number VARCHAR(255) ,
|
||||
type VARCHAR(255) ,
|
||||
PRIMARY KEY ( id )
|
||||
)
|
||||
|
||||
ALTER TABLE Person_Phone
|
||||
ADD CONSTRAINT UK_9uhc5itwc9h5gcng944pcaslf
|
||||
UNIQUE (phones_id)
|
||||
|
||||
ALTER TABLE Person_Phone
|
||||
ADD CONSTRAINT FKr38us2n8g5p9rj0b494sd3391
|
||||
FOREIGN KEY (phones_id) REFERENCES Phone
|
||||
|
||||
ALTER TABLE Person_Phone
|
||||
ADD CONSTRAINT FK2ex4e4p7w1cj310kg2woisjl2
|
||||
FOREIGN KEY (Person_id) REFERENCES Person
|
|
@ -0,0 +1,14 @@
|
|||
INSERT INTO Person ( id )
|
||||
VALUES ( 1 )
|
||||
|
||||
INSERT INTO Phone ( number, type, id )
|
||||
VALUES ( '028-234-9876', 'landline', 1 )
|
||||
|
||||
INSERT INTO Phone ( number, type, id )
|
||||
VALUES ( '072-122-9876', 'mobile', 2 )
|
||||
|
||||
INSERT INTO Person_Phone ( Person_id, phones_id )
|
||||
VALUES ( 1, 1 )
|
||||
|
||||
INSERT INTO Person_Phone ( Person_id, phones_id )
|
||||
VALUES ( 1, 2 )
|
|
@ -0,0 +1,14 @@
|
|||
SELECT
|
||||
phones0_.Person_id AS Person_i1_1_0_,
|
||||
phones0_.phones_id AS phones_i2_1_0_,
|
||||
unidirecti1_.id AS id1_2_1_,
|
||||
unidirecti1_.number AS number2_2_1_,
|
||||
unidirecti1_.type AS type3_2_1_
|
||||
FROM
|
||||
Person_Phone phones0_
|
||||
INNER JOIN
|
||||
Phone unidirecti1_ ON phones0_.phones_id=unidirecti1_.id
|
||||
WHERE
|
||||
phones0_.Person_id = 1
|
||||
ORDER BY
|
||||
unidirecti1_.number
|
|
@ -0,0 +1,6 @@
|
|||
CREATE TABLE Person_Phone (
|
||||
Person_id BIGINT NOT NULL ,
|
||||
phones_id BIGINT NOT NULL ,
|
||||
order_id INTEGER NOT NULL ,
|
||||
PRIMARY KEY ( Person_id, order_id )
|
||||
)
|
|
@ -0,0 +1,14 @@
|
|||
select
|
||||
phones0_.Person_id as Person_i1_1_0_,
|
||||
phones0_.phones_id as phones_i2_1_0_,
|
||||
phones0_.order_id as order_id3_0_,
|
||||
unidirecti1_.id as id1_2_1_,
|
||||
unidirecti1_.number as number2_2_1_,
|
||||
unidirecti1_.type as type3_2_1_
|
||||
from
|
||||
Person_Phone phones0_
|
||||
inner join
|
||||
Phone unidirecti1_
|
||||
on phones0_.phones_id=unidirecti1_.id
|
||||
where
|
||||
phones0_.Person_id = 1
|
|
@ -0,0 +1,7 @@
|
|||
DELETE FROM Person_phones WHERE Person_id = 1
|
||||
|
||||
INSERT INTO Person_phones ( Person_id, phones )
|
||||
VALUES ( 1, '123-456-7890' )
|
||||
|
||||
INSERT INTO Person_phones (Person_id, phones)
|
||||
VALUES ( 1, '456-000-1234' )
|
|
@ -0,0 +1,8 @@
|
|||
DELETE FROM Person_phones
|
||||
WHERE Person_id = 1
|
||||
AND order_id = 1
|
||||
|
||||
UPDATE Person_phones
|
||||
SET phones = '456-000-1234'
|
||||
WHERE Person_id = 1
|
||||
AND order_id = 0
|
|
@ -0,0 +1,4 @@
|
|||
DELETE FROM Person_phones WHERE Person_id = 1
|
||||
|
||||
INSERT INTO Person_phones ( Person_id, phones )
|
||||
VALUES ( 1, '456-000-1234' )
|
|
@ -0,0 +1,20 @@
|
|||
Session s = openSession();
|
||||
Transaction tx = s.beginTransaction();
|
||||
|
||||
// Create a customer entity
|
||||
Map<String, String>david = new HashMap<>();
|
||||
david.put( "name","David" );
|
||||
|
||||
// Create an organization entity
|
||||
Map<String, String>foobar = new HashMap<>();
|
||||
foobar.put( "name","Foobar Inc." );
|
||||
|
||||
// Link both
|
||||
david.put( "organization",foobar );
|
||||
|
||||
// Save both
|
||||
s.save( "Customer",david );
|
||||
s.save( "Organization",foobar );
|
||||
|
||||
tx.commit();
|
||||
s.close();
|
|
@ -0,0 +1,22 @@
|
|||
@Embeddable
|
||||
public class Address {
|
||||
|
||||
private String line1;
|
||||
|
||||
private String line2;
|
||||
|
||||
@Embedded
|
||||
private ZipCode zipCode;
|
||||
|
||||
...
|
||||
|
||||
@Embeddable
|
||||
public static class Zip {
|
||||
|
||||
private String postalCode;
|
||||
|
||||
private String plus4;
|
||||
|
||||
...
|
||||
}
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
@Entity
|
||||
public class Contact {
|
||||
|
||||
@Id
|
||||
private Integer id;
|
||||
|
||||
@Embedded
|
||||
private Name name;
|
||||
|
||||
@Embedded
|
||||
@AttributeOverrides(
|
||||
@AttributeOverride(
|
||||
name = "line1",
|
||||
column = @Column( name = "home_address_line1" ),
|
||||
),
|
||||
@AttributeOverride(
|
||||
name = "line2",
|
||||
column = @Column( name = "home_address_line2" )
|
||||
),
|
||||
@AttributeOverride(
|
||||
name = "zipCode.postalCode",
|
||||
column = @Column( name = "home_address_postal_cd" )
|
||||
),
|
||||
@AttributeOverride(
|
||||
name = "zipCode.plus4",
|
||||
column = @Column( name = "home_address_postal_plus4" )
|
||||
)
|
||||
)
|
||||
private Address homeAddress;
|
||||
|
||||
@Embedded
|
||||
@AttributeOverrides(
|
||||
@AttributeOverride(
|
||||
name = "line1",
|
||||
column = @Column( name = "mailing_address_line1" ),
|
||||
),
|
||||
@AttributeOverride(
|
||||
name = "line2",
|
||||
column = @Column( name = "mailing_address_line2" )
|
||||
),
|
||||
@AttributeOverride(
|
||||
name = "zipCode.postalCode",
|
||||
column = @Column( name = "mailing_address_postal_cd" )
|
||||
),
|
||||
@AttributeOverride(
|
||||
name = "zipCode.plus4",
|
||||
column = @Column( name = "mailing_address_postal_plus4" )
|
||||
)
|
||||
)
|
||||
private Address mailingAddress;
|
||||
|
||||
@Embedded
|
||||
@AttributeOverrides(
|
||||
@AttributeOverride(
|
||||
name = "line1",
|
||||
column = @Column( name = "work_address_line1" ),
|
||||
),
|
||||
@AttributeOverride(
|
||||
name = "line2",
|
||||
column = @Column( name = "work_address_line2" )
|
||||
),
|
||||
@AttributeOverride(
|
||||
name = "zipCode.postalCode",
|
||||
column = @Column( name = "work_address_postal_cd" )
|
||||
),
|
||||
@AttributeOverride(
|
||||
name = "zipCode.plus4",
|
||||
column = @Column( name = "work_address_postal_plus4" )
|
||||
)
|
||||
)
|
||||
private Address workAddress;
|
||||
|
||||
...
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
create table Contact(
|
||||
id integer not null,
|
||||
name_firstName VARCHAR,
|
||||
name_middleName VARCHAR,
|
||||
name_lastName VARCHAR,
|
||||
homeAddress_line1 VARCHAR,
|
||||
homeAddress_line2 VARCHAR,
|
||||
homeAddress_zipCode_postalCode VARCHAR,
|
||||
homeAddress_zipCode_plus4 VARCHAR,
|
||||
mailingAddress_line1 VARCHAR,
|
||||
mailingAddress_line2 VARCHAR,
|
||||
mailingAddress_zipCode_postalCode VARCHAR,
|
||||
mailingAddress_zipCode_plus4 VARCHAR,
|
||||
workAddress_line1 VARCHAR,
|
||||
workAddress_line2 VARCHAR,
|
||||
workAddress_zipCode_postalCode VARCHAR,
|
||||
workAddress_zipCode_plus4 VARCHAR,
|
||||
...
|
||||
)
|
|
@ -0,0 +1,20 @@
|
|||
@Entity
|
||||
public class Contact {
|
||||
|
||||
@Id
|
||||
private Integer id;
|
||||
|
||||
@Embedded
|
||||
private Name name;
|
||||
|
||||
@Embedded
|
||||
private Address homeAddress;
|
||||
|
||||
@Embedded
|
||||
private Address mailingAddress;
|
||||
|
||||
@Embedded
|
||||
private Address workAddress;
|
||||
|
||||
...
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
@Embeddable
|
||||
public class Name {
|
||||
|
||||
private String firstName;
|
||||
|
||||
private String middleName;
|
||||
|
||||
private String lastName;
|
||||
|
||||
...
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
@Entity
|
||||
public class Person {
|
||||
|
||||
@Id
|
||||
private Integer id;
|
||||
|
||||
@Embedded
|
||||
private Name name;
|
||||
|
||||
...
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
create table Person (
|
||||
id integer not null,
|
||||
firstName VARCHAR,
|
||||
middleName VARCHAR,
|
||||
lastName VARCHAR,
|
||||
...
|
||||
)
|
|
@ -0,0 +1,14 @@
|
|||
@Entity
|
||||
public class Person {
|
||||
|
||||
@Id
|
||||
private Integer id;
|
||||
|
||||
private String firstName;
|
||||
|
||||
private String middleName;
|
||||
|
||||
private String lastName;
|
||||
|
||||
...
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
MetadataSources sources = ...;
|
||||
sources.addAnnotatedClass( Address.class );
|
||||
sources.addAnnotatedClass( Name.class );
|
||||
sources.addAnnotatedClass( Contact.class );
|
||||
|
||||
Metadata metadata = sources.getMetadataBuilder().applyImplicitNamingStrategy( ImplicitNamingStrategyComponentPathImpl.INSTANCE )
|
||||
...
|
||||
.build();
|
|
@ -0,0 +1,2 @@
|
|||
@Id
|
||||
private Integer id;
|
|
@ -0,0 +1,10 @@
|
|||
@Entity
|
||||
public class Thing2 {
|
||||
|
||||
@Id
|
||||
private Integer id;
|
||||
|
||||
@Version
|
||||
private Instant ts;
|
||||
...
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
@Entity
|
||||
public class Simple {
|
||||
...
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue