Merge remote-tracking branch 'upstream/master' into wip/6.0_merge_23
This commit is contained in:
commit
f77fb75639
|
@ -70,7 +70,7 @@ task release {
|
|||
|
||||
task publish {
|
||||
description = "The task performed when we want to just publish maven artifacts. Relies on " +
|
||||
"the fact that subprojects will have a task named pubappropriately define a release task " +
|
||||
"the fact that subprojects will appropriately define a release task " +
|
||||
"themselves if they have any release-related activities to perform"
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
== References
|
||||
|
||||
[bibliography]
|
||||
- [[[PoEAA]]] Martin Fowler. Patterns of Enterprise Application Architecture.
|
||||
- [[[PoEAA]]] Martin Fowler. https://www.martinfowler.com/books/eaa.html[Patterns of Enterprise Application Architecture].
|
||||
Addison-Wesley Publishing Company. 2003.
|
||||
- [[[JPwH]]] Christian Bauer & Gavin King. http://www.manning.com/bauer2[Java Persistence with Hibernate]. Manning Publications Co. 2007.
|
||||
- [[[JPwH]]] Christian Bauer & Gavin King. https://www.manning.com/books/java-persistence-with-hibernate-second-edition[Java Persistence with Hibernate, Second Edition]. Manning Publications Co. 2015.
|
||||
|
|
|
@ -44,9 +44,9 @@ While having a strong background in SQL is not required to use Hibernate, it cer
|
|||
Probably even more important is an understanding of data modeling principles.
|
||||
You might want to consider these resources as a good starting point:
|
||||
|
||||
* http://en.wikipedia.org/wiki/Data_modeling[Data Modeling Wikipedia definition]
|
||||
* http://en.wikipedia.org/wiki/Data_modeling[Data modeling Wikipedia definition]
|
||||
* http://www.agiledata.org/essays/dataModeling101.html[Data Modeling 101]
|
||||
|
||||
Understanding the basics of transactions and design patterns such as _Unit of Work_ <<Bibliography.adoc#PoEAA,PoEAA>> or _Application Transaction_ are important as well.
|
||||
Understanding the basics of transactions and design patterns such as _Unit of Work_ (<<Bibliography.adoc#PoEAA,PoEAA>>) or _Application Transaction_ are important as well.
|
||||
These topics will be discussed in the documentation, but a prior understanding will certainly help.
|
||||
====
|
||||
|
|
|
@ -93,7 +93,7 @@ See the <<chapters/domain/basic_types.adoc#basic-enums-attribute-converter, `Att
|
|||
[[annotations-jpa-converter]]
|
||||
==== `@Converter`
|
||||
|
||||
The https://javaee.github.io/javaee-spec/javadocs/javax/persistence/Converter.html[`@Converter`] annotation is used to specify that the current annotate https://javaee.github.io/javaee-spec/javadocs/javax/persistence/AttributeConverter.html[`AttributeConverter`] implementation can be used as a JPA basic attribute converter.
|
||||
The https://javaee.github.io/javaee-spec/javadocs/javax/persistence/Converter.html[`@Converter`] annotation is used to specify that the currently annotated https://javaee.github.io/javaee-spec/javadocs/javax/persistence/AttributeConverter.html[`AttributeConverter`] implementation can be used as a JPA basic attribute converter.
|
||||
|
||||
If the https://javaee.github.io/javaee-spec/javadocs/javax/persistence/Converter.html#autoApply--[`autoApply`] attribute is set to `true`, then the JPA provider will automatically convert all basic attributes with the same Java type as defined by the current converter.
|
||||
|
||||
|
@ -109,7 +109,7 @@ See the <<chapters/domain/basic_types.adoc#basic-enums-attribute-converter, `Att
|
|||
[[annotations-jpa-discriminatorcolumn]]
|
||||
==== `@DiscriminatorColumn`
|
||||
|
||||
The https://javaee.github.io/javaee-spec/javadocs/javax/persistence/DiscriminatorColumn.html[`@DiscriminatorColumn`] annotation is used to specify the discriminator column name and the https://javaee.github.io/javaee-spec/javadocs/javax/persistence/DiscriminatorColumn.html#discriminatorType--[discriminator type] for the `SINGLE_TABLE` and `JOINED` Inheritance strategies.
|
||||
The https://javaee.github.io/javaee-spec/javadocs/javax/persistence/DiscriminatorColumn.html[`@DiscriminatorColumn`] annotation is used to specify the discriminator column name and the https://javaee.github.io/javaee-spec/javadocs/javax/persistence/DiscriminatorColumn.html#discriminatorType--[discriminator type] for the `SINGLE_TABLE` and `JOINED` inheritance strategies.
|
||||
|
||||
See the <<chapters/domain/inheritance.adoc#entity-inheritance-discriminator, Discriminator>> section for more info.
|
||||
|
||||
|
@ -151,7 +151,7 @@ See the <<chapters/domain/identifiers.adoc#identifiers-composite-aggregated, Com
|
|||
[[annotations-jpa-entity]]
|
||||
==== `@Entity`
|
||||
|
||||
The https://javaee.github.io/javaee-spec/javadocs/javax/persistence/Entity.html[`@Entity`] annotation is used to specify that the currently annotate class represents an entity type.
|
||||
The https://javaee.github.io/javaee-spec/javadocs/javax/persistence/Entity.html[`@Entity`] annotation is used to specify that the currently annotated class represents an entity type.
|
||||
Unlike basic and embeddable types, entity types have an identity and their state is managed by the underlying Persistence Context.
|
||||
|
||||
See the <<chapters/domain/entity.adoc#entity, Entity>> section for more info.
|
||||
|
@ -254,7 +254,7 @@ See the <<chapters/domain/associations.adoc#associations-many-to-one-example,`@M
|
|||
[[annotations-jpa-joincolumns]]
|
||||
==== `@JoinColumns`
|
||||
|
||||
The https://javaee.github.io/javaee-spec/javadocs/javax/persistence/JoinColumns.html[`@JoinColumns`] annotation is used to group multiple <<annotations-jpa-joincolumn>> annotations, which are used when mapping entity association or an embeddable collection using a composite identifier
|
||||
The https://javaee.github.io/javaee-spec/javadocs/javax/persistence/JoinColumns.html[`@JoinColumns`] annotation is used to group multiple <<annotations-jpa-joincolumn>> annotations, which are used when mapping entity association or an embeddable collection using a composite identifier.
|
||||
|
||||
[[annotations-jpa-jointable]]
|
||||
==== `@JoinTable`
|
||||
|
@ -631,7 +631,7 @@ You should use either the JPA <<annotations-jpa-access>> or the Hibernate native
|
|||
==== `@Any`
|
||||
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Any.html[`@Any`] annotation is used to define the *any-to-one* association
|
||||
which can point to one one of several entity types.
|
||||
which can point to one of several entity types.
|
||||
|
||||
See the <<chapters/domain/associations.adoc#associations-any,`@Any` mapping>> section for more info.
|
||||
|
||||
|
@ -724,7 +724,7 @@ See the <<chapters/domain/basic_types.adoc#mapping-column-read-and-write-composi
|
|||
[[annotations-hibernate-columntransformer]]
|
||||
==== `@ColumnTransformer`
|
||||
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/ColumnTransformer.html[`@ColumnTransformer`] annotation is used to customize how a given column value is read from or write into the database.
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/ColumnTransformer.html[`@ColumnTransformer`] annotation is used to customize how a given column value is read from or written into the database.
|
||||
|
||||
See the <<chapters/domain/basic_types.adoc#mapping-column-read-and-write-example,`@ColumnTransformer` mapping>> section for more info.
|
||||
|
||||
|
@ -787,7 +787,7 @@ The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibern
|
|||
[[annotations-hibernate-fetch]]
|
||||
==== `@Fetch`
|
||||
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Fetch.html[`@Fetch`] annotation is used to specify the Hibernate specific https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/FetchMode.html[`FetchMode`] (e.g. `JOIN`, `SELECT`, `SUBSELECT`) used for the currently annotated association:
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Fetch.html[`@Fetch`] annotation is used to specify the Hibernate specific https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/FetchMode.html[`FetchMode`] (e.g. `JOIN`, `SELECT`, `SUBSELECT`) used for the currently annotated association.
|
||||
|
||||
See the <<chapters/fetching/Fetching.adoc#fetching-fetch-annotation, `@Fetch` mapping>> section for more info.
|
||||
|
||||
|
@ -974,7 +974,7 @@ See the <<chapters/domain/collections.adoc#collections-customizing-ordered-list-
|
|||
[[annotations-hibernate-loader]]
|
||||
==== `@Loader`
|
||||
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Loader.html[`@Loader`] annotation is used to override the default `SELECT` query used for loading an entity loading.
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Loader.html[`@Loader`] annotation is used to override the default `SELECT` query used for loading an entity.
|
||||
|
||||
See the <<chapters/query/native/Native.adoc#sql-custom-crud-example, Custom CRUD mapping>> section for more info.
|
||||
|
||||
|
@ -1067,7 +1067,7 @@ See the <<chapters/domain/natural_id.adoc#naturalid-caching,`@NaturalIdCache` ma
|
|||
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/NotFound.html[`@NotFound`] annotation is used to specify the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/NotFoundAction.html[`NotFoundAction`] strategy for when an element is not found in a given association.
|
||||
|
||||
The `NotFoundAction` defines with two possibilities:
|
||||
The `NotFoundAction` defines two possibilities:
|
||||
|
||||
`EXCEPTION`:: An exception is thrown when an element is not found (default and recommended).
|
||||
`IGNORE`:: Ignore the element when not found in the database.
|
||||
|
@ -1097,7 +1097,7 @@ See the <<chapters/locking/Locking.adoc#locking-optimistic-exclude-attribute, Ex
|
|||
[[annotations-hibernate-optimisticlocking]]
|
||||
==== `@OptimisticLocking`
|
||||
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/OptimisticLocking.html[`@OptimisticLocking`] annotation is used to specify the currently annotated an entity optimistic locking strategy.
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/OptimisticLocking.html[`@OptimisticLocking`] annotation is used to specify the currently annotated entity's optimistic locking strategy.
|
||||
|
||||
The four possible strategies are defined by the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/OptimisticLockType.html[`OptimisticLockType`] enumeration:
|
||||
|
||||
|
@ -1156,7 +1156,7 @@ The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibern
|
|||
There are two possible `PolymorphismType` options:
|
||||
|
||||
EXPLICIT:: The currently annotated entity is retrieved only if explicitly asked.
|
||||
IMPLICIT:: The currently annotated entity is retrieved if any of its super entity are retrieved. This is the default option.
|
||||
IMPLICIT:: The currently annotated entity is retrieved if any of its super entities are retrieved. This is the default option.
|
||||
|
||||
See the <<chapters/domain/inheritance.adoc#entity-inheritance-polymorphism, `@Polymorphism`>> section for more info.
|
||||
|
||||
|
@ -1170,7 +1170,7 @@ See the <<chapters/domain/entity.adoc#entity-proxy, `@Proxy` mapping>> section f
|
|||
[[annotations-hibernate-rowid]]
|
||||
==== `@RowId`
|
||||
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/RowId.html[`@RowId`] annotation is used to specify the database column used as a `ROWID` pseudocolumn.
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/RowId.html[`@RowId`] annotation is used to specify the database column used as a `ROWID` _pseudocolumn_.
|
||||
For instance, Oracle defines the https://docs.oracle.com/cd/B19306_01/server.102/b14200/pseudocolumns008.htm[`ROWID` pseudocolumn] which provides the address of every table row.
|
||||
|
||||
According to Oracle documentation, `ROWID` is the fastest way to access a single row from a table.
|
||||
|
|
|
@ -13,7 +13,7 @@ this feature is not suitable for a production environment.
|
|||
An automated schema migration tool (e.g. https://flywaydb.org/[Flyway], http://www.liquibase.org/[Liquibase]) allows you to use any database-specific DDL feature (e.g. Rules, Triggers, Partitioned Tables).
|
||||
Every migration should have an associated script, which is stored on the Version Control System, along with the application source code.
|
||||
|
||||
When the application is deployed on a production-like QA environment, and the deploy worked as expected, then pushing the deploy to a production environment should be straightforward since the latest schema migration was already tested.
|
||||
When the application is deployed on a production-like QA environment, and the deployment worked as expected, then pushing the deployment to a production environment should be straightforward since the latest schema migration was already tested.
|
||||
|
||||
[TIP]
|
||||
====
|
||||
|
@ -62,7 +62,7 @@ This saves database roundtrips, and so it https://leanpub.com/high-performance-j
|
|||
|
||||
Not only `INSERT` and `UPDATE` statements, but even `DELETE` statements can be batched as well.
|
||||
For `INSERT` and `UPDATE` statements, make sure that you have all the right configuration properties in place, like ordering inserts and updates and activating batching for versioned data.
|
||||
Check out this article for more details on this topic.
|
||||
Check out https://vladmihalcea.com/how-to-batch-insert-and-update-statements-with-hibernate/[this article] for more details on this topic.
|
||||
|
||||
For `DELETE` statements, there is no option to order parent and child statements, so cascading can interfere with the JDBC batching process.
|
||||
|
||||
|
@ -81,7 +81,7 @@ When it comes to identifiers, you can either choose a natural id or a synthetic
|
|||
|
||||
For natural identifiers, the *assigned* identifier generator is the right choice.
|
||||
|
||||
For synthetic keys, the application developer can either choose a randomly generates fixed-size sequence (e.g. UUID) or a natural identifier.
|
||||
For synthetic keys, the application developer can either choose a randomly generated fixed-size sequence (e.g. UUID) or a natural identifier.
|
||||
Natural identifiers are very practical, being more compact than their UUID counterparts, so there are multiple generators to choose from:
|
||||
|
||||
- `IDENTITY`
|
||||
|
@ -127,22 +127,22 @@ On the other hand, the more exotic the association mapping, the better the chanc
|
|||
Therefore, the `@ManyToOne` and the `@OneToOne` child-side association are best to represent a `FOREIGN KEY` relationship.
|
||||
|
||||
The parent-side `@OneToOne` association requires bytecode enhancement
|
||||
so that the association can be loaded lazily. Otherwise, the parent-side is always fetched even if the association is marked with `FetchType.LAZY`.
|
||||
so that the association can be loaded lazily. Otherwise, the parent-side association is always fetched even if the association is marked with `FetchType.LAZY`.
|
||||
|
||||
For this reason, it's best to map `@OneToOne` association using `@MapsId` so that the `PRIMARY KEY` is shared between the child and the parent entities.
|
||||
When using `@MapsId`, the parent-side becomes redundant since the child-entity can be easily fetched using the parent entity identifier.
|
||||
When using `@MapsId`, the parent-side association becomes redundant since the child-entity can be easily fetched using the parent entity identifier.
|
||||
|
||||
For collections, the association can be either:
|
||||
|
||||
- unidirectional
|
||||
- bidirectional
|
||||
|
||||
For unidirectional collections, `Sets` are the best choice because they generate the most efficient SQL statements.
|
||||
Unidirectional `Lists` are less efficient than a `@ManyToOne` association.
|
||||
For unidirectional collections, ``Set``s are the best choice because they generate the most efficient SQL statements.
|
||||
Unidirectional ``List``s are less efficient than a `@ManyToOne` association.
|
||||
|
||||
Bidirectional associations are usually a better choice because the `@ManyToOne` side controls the association.
|
||||
|
||||
Embeddable collections (``@ElementCollection`) are unidirectional associations, hence `Sets` are the most efficient, followed by ordered `Lists`, whereas bags (unordered `Lists`) are the least efficient.
|
||||
Embeddable collections (``@ElementCollection``) are unidirectional associations, hence ``Set``s are the most efficient, followed by ordered ``List``s, whereas bags (unordered ``List``s) are the least efficient.
|
||||
|
||||
The `@ManyToMany` annotation is rarely a good choice because it treats both sides as unidirectional associations.
|
||||
|
||||
|
@ -198,7 +198,7 @@ Prior to JPA, Hibernate used to have all associations as `LAZY` by default.
|
|||
However, when JPA 1.0 specification emerged, it was thought that not all providers would use Proxies. Hence, the `@ManyToOne` and the `@OneToOne` associations are now `EAGER` by default.
|
||||
|
||||
The `EAGER` fetching strategy cannot be overwritten on a per query basis, so the association is always going to be retrieved even if you don't need it.
|
||||
More, if you forget to `JOIN FETCH` an `EAGER` association in a JPQL query, Hibernate will initialize it with a secondary statement, which in turn can lead to N+1 query issues.
|
||||
Moreover, if you forget to `JOIN FETCH` an `EAGER` association in a JPQL query, Hibernate will initialize it with a secondary statement, which in turn can lead to N+1 query issues.
|
||||
====
|
||||
|
||||
So, `EAGER` fetching is to be avoided. For this reason, it's better if all associations are marked as `LAZY` by default.
|
||||
|
@ -208,7 +208,7 @@ There are good and bad ways to treat the `LazyInitializationException`.
|
|||
|
||||
The best way to deal with `LazyInitializationException` is to fetch all the required associations prior to closing the Persistence Context.
|
||||
The `JOIN FETCH` directive is good for `@ManyToOne` and `OneToOne` associations, and for at most one collection (e.g. `@OneToMany` or `@ManyToMany`).
|
||||
If you need to fetch multiple collections, to avoid a Cartesian Product, you should use secondary queries which are triggered either by navigating the `LAZY` association or by calling `Hibernate#initialize(proxy)` method.
|
||||
If you need to fetch multiple collections, to avoid a Cartesian Product, you should use secondary queries which are triggered either by navigating the `LAZY` association or by calling `Hibernate#initialize(Object proxy)` method.
|
||||
|
||||
[[best-practices-caching]]
|
||||
=== Caching
|
||||
|
@ -216,7 +216,7 @@ If you need to fetch multiple collections, to avoid a Cartesian Product, you sho
|
|||
Hibernate has two caching layers:
|
||||
|
||||
- the first-level cache (Persistence Context) which provides application-level repeatable reads.
|
||||
- the second-level cache which, unlike application-level caches, it doesn't store entity aggregates but normalized dehydrated entity entries.
|
||||
- the second-level cache which, unlike application-level caches, doesn't store entity aggregates but normalized dehydrated entity entries.
|
||||
|
||||
The first-level cache is not a caching solution "per se", being more useful for ensuring `READ COMMITTED` isolation level.
|
||||
|
||||
|
@ -229,7 +229,7 @@ and you should consider these alternatives prior to jumping to a second-level ca
|
|||
|
||||
- tuning the underlying database cache so that the working set fits into memory, therefore reducing Disk I/O traffic.
|
||||
- optimizing database statements through JDBC batching, statement caching, indexing can reduce the average response time, therefore increasing throughput as well.
|
||||
- database replication is also a very valuable option to increase read-only transaction throughput
|
||||
- database replication is also a very valuable option to increase read-only transaction throughput.
|
||||
|
||||
After properly tuning the database, to further reduce the average response time and increase the system throughput, application-level caching becomes inevitable.
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ The documentation of such configuration settings refers here.
|
|||
The types of forms available in such cases include:
|
||||
|
||||
short name (if defined)::
|
||||
Certain built-in strategy implementations have a corresponding short name.
|
||||
Certain built-in strategy implementations have a corresponding short name
|
||||
strategy instance::
|
||||
An instance of the strategy implementation to use can be specified
|
||||
strategy Class reference::
|
||||
|
@ -28,7 +28,7 @@ In most cases, Hibernate can choose the correct https://docs.jboss.org/hibernate
|
|||
+
|
||||
`*hibernate.current_session_context_class*` (e.g. `jta`, `thread`, `managed`, or a custom class implementing `org.hibernate.context.spi.CurrentSessionContext`)::
|
||||
+
|
||||
Supply a custom strategy for the scoping of the _current_ `Session`.
|
||||
Supplies a custom strategy for the scoping of the _current_ `Session`.
|
||||
+
|
||||
The definition of what exactly _current_ means is controlled by the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/context/spi/CurrentSessionContext.html[`CurrentSessionContext`] implementation in use.
|
||||
+
|
||||
|
@ -42,7 +42,7 @@ This setting controls if Hibernate `Transaction` should behave as defined by th
|
|||
since it extends the JPA one.
|
||||
|
||||
`*hibernate.jpa.compliance.query*` (e.g. `true` or `false` (default value))::
|
||||
Controls whether Hibernate's handling of `javax.persistence.Query` (JPQL, Criteria and native-query) should strictly follow the JPA spec.
|
||||
Controls whether Hibernate's handling of `javax.persistence.Query` (JPQL, Criteria and native query) should strictly follow the JPA spec.
|
||||
+
|
||||
This includes both in terms of parsing or translating a query as well as calls to the `javax.persistence.Query` methods throwing spec
|
||||
defined exceptions whereas Hibernate might not.
|
||||
|
@ -63,7 +63,7 @@ This setting controls whether the JPA spec-defined behavior or the Hibernate beh
|
|||
If enabled, Hibernate will operate in the JPA specified way, throwing exceptions when the spec says it should.
|
||||
|
||||
`*hibernate.jpa.compliance.proxy*` (e.g. `true` or `false` (default value))::
|
||||
The JPA spec says that a `javax.persistence.EntityNotFoundException` should be thrown when accessing an entity Proxy
|
||||
The JPA spec says that a `javax.persistence.EntityNotFoundException` should be thrown when accessing an entity proxy
|
||||
which does not have an associated table row in the database.
|
||||
+
|
||||
Traditionally, Hibernate does not initialize an entity proxy when accessing its identifier since we already know the identifier value,
|
||||
|
@ -77,7 +77,7 @@ The JPA spec says that the scope of TableGenerator and SequenceGenerator names i
|
|||
Traditionally, Hibernate has considered the names locally scoped.
|
||||
+
|
||||
If enabled, the names used by `@TableGenerator` and `@SequenceGenerator` will be considered global so configuring two different generators
|
||||
with the same name will cause a `java.lang.IllegalArgumentException' to be thrown at boot time.
|
||||
with the same name will cause a `java.lang.IllegalArgumentException` to be thrown at boot time.
|
||||
|
||||
[[configurations-database-connection]]
|
||||
=== Database connection properties
|
||||
|
@ -106,16 +106,16 @@ See discussion of `hibernate.connection.provider_disables_autocommit` as well.
|
|||
Indicates a promise by the user that Connections that Hibernate obtains from the configured ConnectionProvider
|
||||
have auto-commit disabled when they are obtained from that provider, whether that provider is backed by
|
||||
a DataSource or some other Connection pooling mechanism. Generally, this occurs when:
|
||||
* Hibernate is configured to get Connections from an underlying DataSource, and that DataSource is already configured to disable auto-commit on its managed Connections
|
||||
* Hibernate is configured to get Connections from an underlying DataSource, and that DataSource is already configured to disable auto-commit on its managed Connections.
|
||||
* Hibernate is configured to get Connections from a non-DataSource connection pool and that connection pool is already configured to disable auto-commit.
|
||||
For the Hibernate provided implementation this will depend on the value of `hibernate.connection.autocommit` setting.
|
||||
+
|
||||
Hibernate uses this assurance as an opportunity to opt-out of certain operations that may have a performance
|
||||
Hibernate uses this assurance as an opportunity to opt out of certain operations that may have a performance
|
||||
impact (although this impact is generally negligible). Specifically, when a transaction is started via the
|
||||
Hibernate or JPA transaction APIs Hibernate will generally immediately acquire a Connection from the
|
||||
provider and:
|
||||
* check whether the Connection is initially in auto-commit mode via a call to `Connection#getAutocommit` to know how to clean up the Connection when released.
|
||||
* start a JDBC transaction by calling `Connection#setAutocommit(false)`
|
||||
* start a JDBC transaction by calling `Connection#setAutocommit(false)`.
|
||||
+
|
||||
We can skip both of those steps if we know that the ConnectionProvider will always return Connections with auto-commit disabled.
|
||||
That is the purpose of this setting. By setting it to `true`, the `Connection` acquisition can be delayed until the first
|
||||
|
@ -147,9 +147,9 @@ For more details about the `PhysicalConnectionHandlingMode` and Hibernate connec
|
|||
====
|
||||
This setting is deprecated. You should use the `*hibernate.connection.handling_mode*` instead.
|
||||
====
|
||||
+
|
||||
|
||||
Specifies how Hibernate should acquire JDBC connections. The possible values are given by `org.hibernate.ConnectionAcquisitionMode`.
|
||||
+
|
||||
|
||||
Should generally only configure this or `hibernate.connection.release_mode`, not both.
|
||||
|
||||
[line-through]#`*hibernate.connection.release_mode*`# (e.g. `auto` (default value))::
|
||||
|
@ -157,9 +157,9 @@ Should generally only configure this or `hibernate.connection.release_mode`, not
|
|||
====
|
||||
This setting is deprecated. You should use the `*hibernate.connection.handling_mode*` instead.
|
||||
====
|
||||
+
|
||||
|
||||
Specifies how Hibernate should release JDBC connections. The possible values are given by the current transaction mode (`after_transaction` for JDBC transactions and `after_statement` for JTA transactions).
|
||||
+
|
||||
|
||||
Should generally only configure this or `hibernate.connection.acquisition_mode`, not both.
|
||||
|
||||
`*hibernate.connection.datasource*`::
|
||||
|
@ -175,7 +175,7 @@ Names the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/
|
|||
Can reference:
|
||||
+
|
||||
** an instance of `ConnectionProvider`
|
||||
** a `Class<? extends ConnectionProvider` object reference
|
||||
** a `Class<? extends ConnectionProvider>` object reference
|
||||
** a fully qualified name of a class implementing `ConnectionProvider`
|
||||
+
|
||||
|
||||
|
@ -184,13 +184,13 @@ The term `class` appears in the setting name due to legacy reasons. However, it
|
|||
`*hibernate.jndi.class*`::
|
||||
Names the JNDI `javax.naming.InitialContext` class.
|
||||
|
||||
`*hibernate.jndi.url*` (e.g. java:global/jdbc/default)::
|
||||
`*hibernate.jndi.url*` (e.g. `java:global/jdbc/default`)::
|
||||
Names the JNDI provider/connection url.
|
||||
|
||||
`*hibernate.jndi*`::
|
||||
Names a prefix used to define arbitrary JNDI `javax.naming.InitialContext` properties.
|
||||
+
|
||||
These properties are passed along to `javax.naming.InitialContext#InitialContext(java.util.Hashtable)`
|
||||
These properties are passed along to `javax.naming.InitialContext#InitialContext(java.util.Hashtable)` method.
|
||||
|
||||
==== Hibernate internal connection pool options
|
||||
|
||||
|
@ -201,7 +201,7 @@ Minimum number of connections for the built-in Hibernate connection pool.
|
|||
Maximum number of connections for the built-in Hibernate connection pool.
|
||||
|
||||
`*hibernate.connection.pool_validation_interval*` (e.g. 30 (default value))::
|
||||
The number of seconds between two consecutive pool validations. During validation, the pool size can increase or decreases based on the connection acquisition request count.
|
||||
The number of seconds between two consecutive pool validations. During validation, the pool size can increase or decrease based on the connection acquisition request count.
|
||||
|
||||
[[configurations-c3p0]]
|
||||
=== c3p0 properties
|
||||
|
@ -264,7 +264,7 @@ If true, the value specified by the `generator` attribute of the `@GeneratedValu
|
|||
The default value is `true` meaning that `@GeneratedValue.generator()` will be used as the sequence/table name by default.
|
||||
Users migrating from earlier versions using the legacy `hibernate_sequence` name should disable this setting.
|
||||
|
||||
`*hibernate.ejb.identifier_generator_strategy_provider*` (e.g. fully-qualified class name or an actual the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/spi/IdentifierGeneratorStrategyProvider.html[`IdentifierGeneratorStrategyProvider`] instance)::
|
||||
`*hibernate.ejb.identifier_generator_strategy_provider*` (e.g. fully-qualified class name or an actual https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/spi/IdentifierGeneratorStrategyProvider.html[`IdentifierGeneratorStrategyProvider`] instance)::
|
||||
This setting allows you to provide an instance or the class implementing the `org.hibernate.jpa.spi.IdentifierGeneratorStrategyProvider` interface,
|
||||
so you can provide a set of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/id/IdentifierGenerator.html[`IdentifierGenerator`] strategies allowing to override the Hibernate Core default ones.
|
||||
|
||||
|
@ -282,7 +282,7 @@ Should all database identifiers be quoted.
|
|||
|
||||
`*hibernate.globally_quoted_identifiers_skip_column_definitions*` (e.g. `true` or `false` (default value))::
|
||||
Assuming `hibernate.globally_quoted_identifiers` is `true`, this allows the global quoting to skip column-definitions as defined by `javax.persistence.Column`,
|
||||
`javax.persistence.JoinColumn`, etc, and while it avoids column-definitions being quoted due to global quoting, they can still be explicitly quoted in the annotation/xml mappings.
|
||||
`javax.persistence.JoinColumn`, etc., and while it avoids column-definitions being quoted due to global quoting, they can still be explicitly quoted in the annotation/xml mappings.
|
||||
|
||||
`*hibernate.auto_quote_keyword*` (e.g. `true` or `false` (default value))::
|
||||
Specifies whether to automatically quote any names that are deemed keywords.
|
||||
|
@ -330,7 +330,7 @@ Used to specify the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/jav
|
|||
Pass an implementation of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/archive/scan/spi/Scanner.html[`Scanner`].
|
||||
By default, https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/archive/scan/internal/StandardScanner.html[`StandardScanner`] is used.
|
||||
+
|
||||
Accepts either:
|
||||
Accepts:
|
||||
+
|
||||
** an actual `Scanner` instance
|
||||
** a reference to a Class that implements `Scanner`
|
||||
|
@ -339,7 +339,7 @@ Accepts either:
|
|||
`*hibernate.archive.interpreter*`::
|
||||
Pass https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/archive/spi/ArchiveDescriptorFactory.html[`ArchiveDescriptorFactory`] to use in the scanning process.
|
||||
+
|
||||
Accepts either:
|
||||
Accepts:
|
||||
+
|
||||
** an actual `ArchiveDescriptorFactory` instance
|
||||
** a reference to a Class that implements `ArchiveDescriptorFactory`
|
||||
|
@ -349,7 +349,7 @@ Accepts either:
|
|||
See information on https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/archive/scan/spi/Scanner.html[`Scanner`] about expected constructor forms.
|
||||
|
||||
`*hibernate.archive.autodetection*` (e.g. `hbm,class` (default value))::
|
||||
Identifies a comma-separate list of values indicating the mapping types we should auto-detect during scanning.
|
||||
Identifies a comma-separated list of values indicating the mapping types we should auto-detect during scanning.
|
||||
+
|
||||
Allowable values include:
|
||||
+
|
||||
|
@ -357,7 +357,7 @@ Allowable values include:
|
|||
`hbm`::: scan `hbm` mapping files (e.g. `hbm.xml`) to extract entity mapping metadata
|
||||
+
|
||||
|
||||
By default both HBM, annotations, and JPA XML mappings are scanned.
|
||||
By default HBM, annotations, and JPA XML mappings are scanned.
|
||||
+
|
||||
When using JPA, to disable the automatic scanning of all entity classes, the `exclude-unlisted-classes` `persistence.xml` element must be set to true.
|
||||
Therefore, when setting `exclude-unlisted-classes` to true, only the classes that are explicitly declared in the `persistence.xml` configuration files are going to be taken into consideration.
|
||||
|
@ -366,14 +366,14 @@ Therefore, when setting `exclude-unlisted-classes` to true, only the classes tha
|
|||
Used to specify the order in which metadata sources should be processed.
|
||||
Value is a delimited-list whose elements are defined by https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/cfg/MetadataSourceType.html[`MetadataSourceType`].
|
||||
+
|
||||
The default is `hbm,class"`, therefore `hbm.xml` files are processed first, followed by annotations (combined with `orm.xml` mappings).
|
||||
The default is `hbm,class`, therefore `hbm.xml` files are processed first, followed by annotations (combined with `orm.xml` mappings).
|
||||
+
|
||||
When using JPA, the XML mapping overrides a conflicting annotation mapping that targets the same entity attribute.
|
||||
|
||||
==== JDBC-related options
|
||||
|
||||
`*hibernate.use_nationalized_character_data*` (e.g. `true` or `false` (default value))::
|
||||
Enable nationalized character support on all string / clob based attribute ( string, char, clob, text etc ).
|
||||
Enable nationalized character support on all string / clob based attribute ( string, char, clob, text, etc. ).
|
||||
|
||||
`*hibernate.jdbc.lob.non_contextual_creation*` (e.g. `true` or `false` (default value))::
|
||||
Should we not use contextual LOB creation (aka based on `java.sql.Connection#createBlob()` et al)? The default value for HANA, H2, and PostgreSQL is `true`.
|
||||
|
@ -403,12 +403,12 @@ Such a need is very uncommon and not recommended.
|
|||
==== Misc options
|
||||
|
||||
`*hibernate.create_empty_composites.enabled*` (e.g. `true` or `false` (default value))::
|
||||
Enable instantiation of composite/embeddable objects when all of its attribute values are `null`. The default (and historical) behavior is that a `null` reference will be used to represent the composite when all of its attributes are `null`.
|
||||
Enable instantiation of composite/embeddable objects when all of its attribute values are `null`. The default (and historical) behavior is that a `null` reference will be used to represent the composite when all of its attributes are ``null``s.
|
||||
+
|
||||
This is an experimental feature that has known issues. It should not be used in production until it is stabilized. See Hibernate Jira issue https://hibernate.atlassian.net/browse/HHH-11936[HHH-11936] for details.
|
||||
|
||||
`*hibernate.entity_dirtiness_strategy*` (e.g. fully-qualified class name or an actual `CustomEntityDirtinessStrategy` instance)::
|
||||
Setting to identify a `org.hibernate.CustomEntityDirtinessStrategy` to use.
|
||||
Setting to identify an `org.hibernate.CustomEntityDirtinessStrategy` to use.
|
||||
|
||||
`*hibernate.default_entity_mode*` (e.g. `pojo` (default value) or `dynamic-map`)::
|
||||
Default `EntityMode` for entity representation for all sessions opened from this `SessionFactory`, defaults to `pojo`.
|
||||
|
@ -474,7 +474,7 @@ Should named queries be checked during startup?
|
|||
Global setting for whether `null` parameter bindings should be passed to database procedure/function calls as part of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/procedure/ProcedureCall.html[`ProcedureCall`] handling.
|
||||
Implicitly Hibernate will not pass the `null`, the intention being to allow any default argument values to be applied.
|
||||
+
|
||||
This defines a global setting, which can then be controlled per parameter via `org.hibernate.procedure.ParameterRegistration#enablePassingNulls(boolean)`
|
||||
This defines a global setting, which can then be controlled per parameter via `org.hibernate.procedure.ParameterRegistration#enablePassingNulls(boolean)`.
|
||||
+
|
||||
Values are `true` (pass the NULLs) or `false` (do not pass the NULLs).
|
||||
|
||||
|
@ -491,14 +491,14 @@ Can reference a
|
|||
|
||||
`*hibernate.query.validate_parameters*` (e.g. `true` (default value) or `false`)::
|
||||
This configuration property can be used to disable parameters validation performed by `org.hibernate.query.Query#setParameter` when the Session is bootstrapped via JPA
|
||||
`javax.persistence.EntityManagerFactory`
|
||||
`javax.persistence.EntityManagerFactory`.
|
||||
|
||||
`*hibernate.criteria.literal_handling_mode*` (e.g. `AUTO` (default value), `BIND` or `INLINE`)::
|
||||
By default, Criteria queries uses bind parameters for any literal that is not a numeric value.
|
||||
By default, Criteria queries use bind parameters for any literal that is not a numeric value.
|
||||
However, to increase the likelihood of JDBC statement caching, you might want to use bind parameters for numeric values too.
|
||||
+
|
||||
The `org.hibernate.query.criteria.LiteralHandlingMode#BIND` mode will use bind variables for any literal value.
|
||||
The `org.hibernate.query.criteria.LiteralHandlingMode#INLINE` mode will inline literal values as-is.
|
||||
The `org.hibernate.query.criteria.LiteralHandlingMode#INLINE` mode will inline literal values as is.
|
||||
+
|
||||
To prevent SQL injection, never use `org.hibernate.query.criteria.LiteralHandlingMode#INLINE` with String variables.
|
||||
Always use constants with the `org.hibernate.query.criteria.LiteralHandlingMode#INLINE` mode.
|
||||
|
@ -574,15 +574,15 @@ Set this property to `true` if your JDBC driver returns correct row counts from
|
|||
`*hibernate.batch_fetch_style*` (e.g. `LEGACY`(default value))::
|
||||
Names the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/loader/BatchFetchStyle.html[`BatchFetchStyle`] to use.
|
||||
+
|
||||
Can specify either the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/loader/BatchFetchStyle.html[`BatchFetchStyle`] name (insensitively), or a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/loader/BatchFetchStyle.html[`BatchFetchStyle`] instance. `LEGACY}` is the default value.
|
||||
Can specify either the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/loader/BatchFetchStyle.html[`BatchFetchStyle`] name (case insensitively), or a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/loader/BatchFetchStyle.html[`BatchFetchStyle`] instance. `LEGACY` is the default value.
|
||||
|
||||
`*hibernate.jdbc.batch.builder*` (e.g. The fully qualified name of a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/batch/spi/BatchBuilder.html[`BatchBuilder`] implementation class type or an actual object instance)::
|
||||
`*hibernate.jdbc.batch.builder*` (e.g. the fully qualified name of a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/batch/spi/BatchBuilder.html[`BatchBuilder`] implementation class type or an actual object instance)::
|
||||
Names the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/batch/spi/BatchBuilder.html[`BatchBuilder`] implementation to use.
|
||||
|
||||
[[configurations-database-fetch]]
|
||||
==== Fetching properties
|
||||
|
||||
`*hibernate.max_fetch_depth*` (e.g. A value between `0` and `3`)::
|
||||
`*hibernate.max_fetch_depth*` (e.g. a value between `0` and `3`)::
|
||||
Sets a maximum depth for the outer join fetch tree for single-ended associations. A single-ended association is a one-to-one or many-to-one association. A value of `0` disables default outer join fetching.
|
||||
|
||||
`*hibernate.default_batch_fetch_size*` (e.g. `4`,`8`, or `16`)::
|
||||
|
@ -645,7 +645,7 @@ Either a shortcut name (e.g. `jcache`, `ehcache`) or the fully-qualified name of
|
|||
|
||||
`*hibernate.cache.default_cache_concurrency_strategy*`::
|
||||
Setting used to give the name of the default https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/CacheConcurrencyStrategy.html[`CacheConcurrencyStrategy`] to use
|
||||
when either `@javax.persistence.Cacheable` or `@org.hibernate.annotations.Cache`. `@org.hibernate.annotations.Cache` is used to override the global setting.
|
||||
when `@javax.persistence.Cacheable`, `@org.hibernate.annotations.Cache` or `@org.hibernate.annotations.Cache` is used to override the global setting.
|
||||
|
||||
`*hibernate.cache.use_minimal_puts*` (e.g. `true` (default value) or `false`)::
|
||||
Optimizes second-level cache operation to minimize writes, at the cost of more frequent reads. This is most useful for clustered caches and is enabled by default for clustered cache implementations.
|
||||
|
@ -654,9 +654,9 @@ Optimizes second-level cache operation to minimize writes, at the cost of more f
|
|||
Enables the query cache. You still need to set individual queries to be cachable.
|
||||
|
||||
`*hibernate.cache.use_second_level_cache*` (e.g. `true` (default value) or `false`)::
|
||||
Enable/disable the second level cache, which is enabled by default, although the default `RegionFactor` is `NoCachingRegionFactory` (meaning there is no actual caching implementation).
|
||||
Enable/disable the second-level cache, which is enabled by default, although the default `RegionFactor` is `NoCachingRegionFactory` (meaning there is no actual caching implementation).
|
||||
|
||||
`*hibernate.cache.query_cache_factory*` (e.g. Fully-qualified class name)::
|
||||
`*hibernate.cache.query_cache_factory*` (e.g. fully-qualified class name)::
|
||||
A custom https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/cache/spi/QueryCacheFactory.html[`QueryCacheFactory`] interface. The default is the built-in `StandardQueryCacheFactory`.
|
||||
|
||||
`*hibernate.cache.region_prefix*` (e.g. A string)::
|
||||
|
@ -669,29 +669,29 @@ Forces Hibernate to store data in the second-level cache in a more human-readabl
|
|||
Enables the automatic eviction of a bi-directional association's collection cache when an element in the `ManyToOne` collection is added/updated/removed without properly managing the change on the `OneToMany` side.
|
||||
|
||||
`*hibernate.cache.use_reference_entries*` (e.g. `true` or `false`)::
|
||||
Optimizes second-level cache operation to store immutable entities (aka "reference") which do not have associations into cache directly, this case, disassembling and deep copy operations can be avoided. The default value of this property is `false`.
|
||||
Optimizes second-level cache operation to store immutable entities (aka "reference") which do not have associations into cache directly. In this case, disassembling and deep copy operations can be avoided. The default value of this property is `false`.
|
||||
|
||||
`*hibernate.ejb.classcache*` (e.g. `hibernate.ejb.classcache.org.hibernate.ejb.test.Item` = `read-write`)::
|
||||
Sets the associated entity class cache concurrency strategy for the designated region. Caching configuration should follow the following pattern `hibernate.ejb.classcache.<fully.qualified.Classname>` usage[, region] where usage is the cache strategy used and region the cache region name.
|
||||
Sets the associated entity class cache concurrency strategy for the designated region. Caching configuration should follow the following pattern `hibernate.ejb.classcache.<fully.qualified.Classname> = usage[, region]` where usage is the cache strategy used and region the cache region name.
|
||||
|
||||
`*hibernate.ejb.collectioncache*` (e.g. `hibernate.ejb.collectioncache.org.hibernate.ejb.test.Item.distributors` = `read-write, RegionName`)::
|
||||
Sets the associated collection cache concurrency strategy for the designated region. Caching configuration should follow the following pattern `hibernate.ejb.collectioncache.<fully.qualified.Classname>.<role>` usage[, region] where usage is the cache strategy used and region the cache region name
|
||||
Sets the associated collection cache concurrency strategy for the designated region. Caching configuration should follow the following pattern `hibernate.ejb.collectioncache.<fully.qualified.Classname>.<role> = usage[, region]` where usage is the cache strategy used and region the cache region name.
|
||||
|
||||
[[configurations-infinispan]]
|
||||
=== Infinispan properties
|
||||
|
||||
For more details about how to customize the Infinispan second-level cache provider, check out the
|
||||
http://infinispan.org/docs/stable/user_guide/user_guide.html#configuration_properties[Infinispan User Guide]
|
||||
http://infinispan.org/docs/stable/user_guide/user_guide.html#configuration_properties[Infinispan User Guide].
|
||||
|
||||
[[configurations-transactions]]
|
||||
=== Transactions properties
|
||||
|
||||
`*hibernate.transaction.jta.platform*` (e.g. `JBossAS`, `BitronixJtaPlatform`)::
|
||||
Names the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatform.html[`JtaPlatform`] implementation to use for integrating with JTA systems.
|
||||
Can reference either a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatform.html[`JtaPlatform`] instance or the name of the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatform.html[`JtaPlatform`] implementation class
|
||||
Can reference either a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatform.html[`JtaPlatform`] instance or the name of the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatform.html[`JtaPlatform`] implementation class.
|
||||
|
||||
`*hibernate.jta.prefer_user_transaction*` (e.g. `true` or `false` (default value))::
|
||||
Should we prefer using the `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform#retrieveUserTransaction` over using `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform#retrieveTransactionManager`
|
||||
Should we prefer using the `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform#retrieveUserTransaction` over using `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform#retrieveTransactionManager`?
|
||||
|
||||
`*hibernate.transaction.jta.platform_resolver*`::
|
||||
Names the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatformResolver.html[`JtaPlatformResolver`] implementation to use.
|
||||
|
@ -711,12 +711,12 @@ Causes the session to be closed during the after completion phase of the transac
|
|||
`*hibernate.transaction.coordinator_class*`::
|
||||
Names the implementation of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/resource/transaction/spi/TransactionCoordinatorBuilder.html[`TransactionCoordinatorBuilder`] to use for creating https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/resource/transaction/spi/TransactionCoordinator.html[`TransactionCoordinator`] instances.
|
||||
+
|
||||
Can be a`TransactionCoordinatorBuilder` instance, `TransactionCoordinatorBuilder` implementation `Class` reference, a `TransactionCoordinatorBuilder` implementation class name (fully-qualified name) or a short name.
|
||||
Can be a `TransactionCoordinatorBuilder` instance, `TransactionCoordinatorBuilder` implementation `Class` reference, a `TransactionCoordinatorBuilder` implementation class name (fully-qualified name) or a short name.
|
||||
+
|
||||
The following short names are defined for this setting:
|
||||
+
|
||||
`jdbc`::: Manages transactions via calls to `java.sql.Connection` (default for non-JPA applications)
|
||||
`jta`::: Manages transactions via JTA. See <<chapters/bootstrap/Bootstrap.adoc#bootstrap-jpa-compliant,Java EE bootstrapping>>
|
||||
`jdbc`::: Manages transactions via calls to `java.sql.Connection` (default for non-JPA applications).
|
||||
`jta`::: Manages transactions via JTA. See <<chapters/bootstrap/Bootstrap.adoc#bootstrap-jpa-compliant,Java EE bootstrapping>>.
|
||||
+
|
||||
|
||||
If a JPA application does not provide a setting for `hibernate.transaction.coordinator_class`, Hibernate will
|
||||
|
@ -756,12 +756,12 @@ The multi-tenancy strategy in use.
|
|||
Names a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/connections/spi/MultiTenantConnectionProvider.html[`MultiTenantConnectionProvider`] implementation to use. As `MultiTenantConnectionProvider` is also a service, can be configured directly through the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/registry/StandardServiceRegistryBuilder.html[`StandardServiceRegistryBuilder`].
|
||||
|
||||
`*hibernate.tenant_identifier_resolver*`::
|
||||
Names a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/context/spi/CurrentTenantIdentifierResolver.html[`CurrentTenantIdentifierResolver`] implementation to resolve the resolve the current tenant identifier so that calling `SessionFactory#openSession()` would get a `Session` that's connected to the right tenant.
|
||||
Names a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/context/spi/CurrentTenantIdentifierResolver.html[`CurrentTenantIdentifierResolver`] implementation to resolve the current tenant identifier so that calling `SessionFactory#openSession()` would get a `Session` that's connected to the right tenant.
|
||||
+
|
||||
Can be a `CurrentTenantIdentifierResolver` instance, `CurrentTenantIdentifierResolver` implementation `Class` object reference or a `CurrentTenantIdentifierResolver` implementation class name.
|
||||
|
||||
`*hibernate.multi_tenant.datasource.identifier_for_any*` (e.g. `true` or `false` (default value))::
|
||||
When the `hibernate.connection.datasource` property value is resolved to a `javax.naming.Context` object, this configuration property defines the JNDI name used to locate the `DataSource` used for fetching the initial `Connection` which is used to access to the database metadata of the underlying database(s) (in situations where we do not have a tenant id, like startup processing).
|
||||
When the `hibernate.connection.datasource` property value is resolved to a `javax.naming.Context` object, this configuration property defines the JNDI name used to locate the `DataSource` used for fetching the initial `Connection` which is used to access the database metadata of the underlying database(s) (in situations where we do not have a tenant id, like startup processing).
|
||||
|
||||
[[configurations-hbmddl]]
|
||||
=== Automatic schema generation
|
||||
|
@ -775,8 +775,8 @@ Valid options are defined by the `externalHbm2ddlName` value of the https://docs
|
|||
`drop`::: Database dropping will be generated.
|
||||
`create`::: Database dropping will be generated followed by database creation.
|
||||
`create-drop`::: Drop the schema and recreate it on SessionFactory startup. Additionally, drop the schema on SessionFactory shutdown.
|
||||
`validate`::: Validate the database schema
|
||||
`update`::: Update the database schema
|
||||
`validate`::: Validate the database schema.
|
||||
`update`::: Update the database schema.
|
||||
|
||||
`*javax.persistence.schema-generation.database.action*` (e.g. `none` (default value), `create-only`, `drop`, `create`, `create-drop`, `validate`, and `update`)::
|
||||
Setting to perform `SchemaManagementTool` actions automatically as part of the `SessionFactory` lifecycle.
|
||||
|
@ -797,7 +797,7 @@ Valid options are defined by the `externalJpaName` value of the https://docs.jbo
|
|||
`drop-and-create`::: Database dropping will be generated followed by database creation.
|
||||
|
||||
`*javax.persistence.schema-generation-connection*`::
|
||||
Allows passing a specific `java.sql.Connection` instance to be used by `SchemaManagementTool`
|
||||
Allows passing a specific `java.sql.Connection` instance to be used by `SchemaManagementTool`.
|
||||
|
||||
`*javax.persistence.database-product-name*`::
|
||||
Specifies the name of the database provider in cases where a Connection to the underlying database is not available (aka, mainly in generating scripts).
|
||||
|
@ -864,13 +864,13 @@ A "SQL load script" is a script that performs some database initialization (INSE
|
|||
Reference to the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/hbm2ddl/ImportSqlCommandExtractor.html[`ImportSqlCommandExtractor`] implementation class to use for parsing source/import files as defined by `javax.persistence.schema-generation.create-script-source`,
|
||||
`javax.persistence.schema-generation.drop-script-source` or `hibernate.hbm2ddl.import_files`.
|
||||
+
|
||||
Reference may refer to an instance, a Class implementing `ImportSqlCommandExtractor` of the fully-qualified name of the `ImportSqlCommandExtractor` implementation.
|
||||
Reference may refer to an instance, a Class implementing `ImportSqlCommandExtractor` or the fully-qualified name of the `ImportSqlCommandExtractor` implementation.
|
||||
If the fully-qualified name is given, the implementation must provide a no-arg constructor.
|
||||
+
|
||||
The default value is https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/hbm2ddl/SingleLineSqlCommandExtractor.html[`SingleLineSqlCommandExtractor`].
|
||||
|
||||
`*hibernate.hbm2ddl.create_namespaces*` (e.g. `true` or `false` (default value))::
|
||||
Specifies whether to automatically create also the database schema/catalog.
|
||||
Specifies whether to automatically create the database schema/catalog also.
|
||||
|
||||
`*javax.persistence.create-database-schemas*` (e.g. `true` or `false` (default value))::
|
||||
The JPA variant of `hibernate.hbm2ddl.create_namespaces`. Specifies whether the persistence provider is to create the database schema(s) in addition to creating database objects (tables, sequences, constraints, etc).
|
||||
|
@ -886,7 +886,7 @@ Used to specify the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/jav
|
|||
Setting to choose the strategy used to access the JDBC Metadata.
|
||||
Valid options are defined by the `strategy` value of the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/JdbcMetadaAccessStrategy.html[`JdbcMetadaAccessStrategy`] enum:
|
||||
+
|
||||
`grouped`::: https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaMigrator.html[`SchemaMigrator`] and https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaValidator.html[`SchemaValidator`] execute a single `java.sql.DatabaseMetaData#getTables(String, String, String, String[])` call to retrieve all the database table in order to determine if all the `javax.persistence.Entity` have a corresponding mapped database tables.This strategy may require `hibernate.default_schema` and/or `hibernate.default_catalog` to be provided.
|
||||
`grouped`::: https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaMigrator.html[`SchemaMigrator`] and https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaValidator.html[`SchemaValidator`] execute a single `java.sql.DatabaseMetaData#getTables(String, String, String, String[])` call to retrieve all the database table in order to determine if all the ``javax.persistence.Entity``s have a corresponding mapped database tables. This strategy may require `hibernate.default_schema` and/or `hibernate.default_catalog` to be provided.
|
||||
`individually`::: https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaMigrator.html[`SchemaMigrator`] and https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaValidator.html[`SchemaValidator`] execute one `java.sql.DatabaseMetaData#getTables(String, String, String, String[])` call for each `javax.persistence.Entity` in order to determine if a corresponding database table exists.
|
||||
|
||||
`*hibernate.hbm2ddl.delimiter*` (e.g. `;`)::
|
||||
|
@ -911,7 +911,7 @@ Therefore, the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs
|
|||
`DROP_RECREATE_QUIETLY`::: Default option.
|
||||
Attempt to drop, then (re-)create each unique constraint. Ignore any exceptions being thrown.
|
||||
`RECREATE_QUIETLY`:::
|
||||
Attempts to (re-)create unique constraints, ignoring exceptions thrown if the constraint already existed
|
||||
Attempts to (re-)create unique constraints, ignoring exceptions thrown if the constraint already existed.
|
||||
`SKIP`:::
|
||||
Does not attempt to create unique constraints on a schema update.
|
||||
|
||||
|
@ -924,13 +924,13 @@ Whether the schema migration tool should halt on error, therefore terminating th
|
|||
[[configurations-exception-handling]]
|
||||
=== Exception handling
|
||||
|
||||
`*hibernate.jdbc.sql_exception_converter*` (e.g. Fully-qualified name of class implementing `SQLExceptionConverter`)::
|
||||
`*hibernate.jdbc.sql_exception_converter*` (e.g. fully-qualified name of class implementing `SQLExceptionConverter`)::
|
||||
The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/exception/spi/SQLExceptionConverter.html[`SQLExceptionConverter`] to use for converting `SQLExceptions` to Hibernate's `JDBCException` hierarchy. The default is to use the configured https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/dialect/Dialect.html[`Dialect`]'s preferred `SQLExceptionConverter`.
|
||||
|
||||
`*hibernate.native_exception_handling_51_compliance*` (e.g. `true` or `false` (default value))::
|
||||
Indicates if exception handling for a `SessionFactory` built via Hibernate's native bootstrapping
|
||||
should behave the same as native exception handling in Hibernate ORM 5.1. When set to `true`,
|
||||
`HibernateException` will be not wrapped or converted according to the JPA specification. This
|
||||
`HibernateException` will not be wrapped or converted according to the JPA specification. This
|
||||
setting will be ignored for a `SessionFactory` built via JPA bootstrapping.
|
||||
|
||||
[[configurations-session-events]]
|
||||
|
@ -940,7 +940,7 @@ setting will be ignored for a `SessionFactory` built via JPA bootstrapping.
|
|||
Fully qualified class name implementing the `SessionEventListener` interface.
|
||||
|
||||
`*hibernate.session_factory.interceptor*` (e.g. `org.hibernate.EmptyInterceptor` (default value))::
|
||||
Names a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/Interceptor[`Interceptor`] implementation to be applied to every `Session` created by the current `org.hibernate.SessionFactory`
|
||||
Names an https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/Interceptor[`Interceptor`] implementation to be applied to every `Session` created by the current `org.hibernate.SessionFactory`.
|
||||
+
|
||||
Can reference:
|
||||
+
|
||||
|
@ -953,7 +953,7 @@ Can reference:
|
|||
WARNING: Deprecated setting. Use `hibernate.session_factory.session_scoped_interceptor` instead.
|
||||
|
||||
`*hibernate.session_factory.session_scoped_interceptor*` (e.g. fully-qualified class name or class reference)::
|
||||
Names a `org.hibernate.Interceptor` implementation to be applied to the `org.hibernate.SessionFactory` and propagated to each `Session` created from the `SessionFactory`.
|
||||
Names an `org.hibernate.Interceptor` implementation to be applied to the `org.hibernate.SessionFactory` and propagated to each `Session` created from the `SessionFactory`.
|
||||
+
|
||||
This setting identifies an `Interceptor` implementation that is to be applied to every `Session` opened from the `SessionFactory`,
|
||||
but unlike `hibernate.session_factory.interceptor`, a unique instance of the `Interceptor` is
|
||||
|
@ -963,7 +963,7 @@ Can reference:
|
|||
+
|
||||
* `Interceptor` instance
|
||||
* `Interceptor` implementation `Class` object reference
|
||||
* `java.util.function.Supplier` instance which is used to retrieve the `Interceptor` instance.
|
||||
* `java.util.function.Supplier` instance which is used to retrieve the `Interceptor` instance
|
||||
+
|
||||
NOTE: Specifically, this setting cannot name an `Interceptor` instance.
|
||||
|
||||
|
@ -1062,7 +1062,7 @@ Used to define an instance, the class or the fully qualified class name of a htt
|
|||
=== Miscellaneous properties
|
||||
|
||||
`*hibernate.dialect_resolvers*`::
|
||||
Names any additional https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/dialect/spi/DialectResolver.html[`DialectResolver`] implementations to register with the standard https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/dialect/spi/DialectFactory.html[`DialectFactory`]
|
||||
Names any additional https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/dialect/spi/DialectResolver.html[`DialectResolver`] implementations to register with the standard https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/dialect/spi/DialectFactory.html[`DialectFactory`].
|
||||
|
||||
`*hibernate.session_factory_name*` (e.g. A JNDI name)::
|
||||
Setting used to name the Hibernate `SessionFactory`.
|
||||
|
@ -1082,7 +1082,7 @@ Internally, Hibernate keeps track of all `EntityManagerFactory` instances using
|
|||
XML configuration file to use to configure Hibernate.
|
||||
|
||||
`*hibernate.ejb.discard_pc_on_close*` (e.g. `true` or `false` (default value))::
|
||||
If true, the persistence context will be discarded (think `clear()` when the method is called.
|
||||
If true, the persistence context will be discarded (think `clear()` when the method is called).
|
||||
Otherwise, the persistence context will stay alive till the transaction completion: all objects will remain managed, and any change will be synchronized with the database (default to false, ie wait for transaction completion).
|
||||
|
||||
`*hibernate.ejb.metamodel.population*` (e.g. `enabled` or `disabled`, or `ignoreUnsupported` (default value))::
|
||||
|
@ -1090,8 +1090,8 @@ Setting that indicates whether to build the JPA types.
|
|||
+
|
||||
Accepts three values:
|
||||
+
|
||||
enabled::: Do the build
|
||||
disabled::: Do not do the build
|
||||
enabled::: Do the build.
|
||||
disabled::: Do not do the build.
|
||||
ignoreUnsupported::: Do the build, but ignore any non-JPA features that would otherwise result in a failure (e.g. `@Any` annotation).
|
||||
|
||||
`*hibernate.jpa.static_metamodel.population*` (e.g. `enabled` or `disabled`, or `skipUnsupported` (default value))::
|
||||
|
@ -1099,8 +1099,8 @@ Setting that controls whether we seek out JPA _static metamodel_ classes and pop
|
|||
+
|
||||
Accepts three values:
|
||||
+
|
||||
enabled::: Do the population
|
||||
disabled::: Do not do the population
|
||||
enabled::: Do the population.
|
||||
disabled::: Do not do the population.
|
||||
skipUnsupported::: Do the population, but ignore any non-JPA features that would otherwise result in the population failing (e.g. `@Any` annotation).
|
||||
|
||||
`*hibernate.delay_cdi_access*` (e.g. `true` or `false` (default value))::
|
||||
|
@ -1123,29 +1123,29 @@ true::: allows to flush an update out of a transaction
|
|||
false::: does not allow
|
||||
|
||||
`*hibernate.collection_join_subquery*` (e.g. `true` (default value) or `false`)::
|
||||
Setting which indicates whether or not the new JOINS over collection tables should be rewritten to subqueries.
|
||||
Setting which indicates whether or not the new JOINs over collection tables should be rewritten to subqueries.
|
||||
|
||||
`*hibernate.allow_refresh_detached_entity*` (e.g. `true` (default value when using Hibernate native bootstrapping) or `false` (default value when using JPA bootstrapping))::
|
||||
Setting that allows to call `javax.persistence.EntityManager#refresh(entity)` or `Session#refresh(entity)` on a detached instance even when the `org.hibernate.Session` is obtained from a JPA `javax.persistence.EntityManager`.
|
||||
|
||||
`*hibernate.use_entity_where_clause_for_collections*` (e.g., `true` (default) or `false`)::
|
||||
Setting controls whether an entity's "where" clause, mapped using `@Where(clause="...")` or `<entity ... where="...">, is taken into account when loading one-to-many or many-to-many collections of that type of entity.
|
||||
Setting controls whether an entity's "where" clause, mapped using `@Where(clause = "...")` or `<entity ... where="...">` is taken into account when loading one-to-many or many-to-many collections of that type of entity.
|
||||
|
||||
`*hibernate.event.merge.entity_copy_observer*` (e.g. `disallow` (default value), `allow`, `log` (testing purpose only) or fully-qualified class name)::
|
||||
Setting that specifies how Hibernate will respond when multiple representations of the same persistent entity ("entity copy") is detected while merging.
|
||||
+
|
||||
The possible values are:
|
||||
+
|
||||
disallow (the default)::: throws `IllegalStateException` if an entity copy is detected
|
||||
disallow::: throws `IllegalStateException` if an entity copy is detected
|
||||
allow::: performs the merge operation on each entity copy that is detected
|
||||
log::: (provided for testing only) performs the merge operation on each entity copy that is detected and logs information about the entity copies.
|
||||
This setting requires DEBUG logging be enabled for https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/event/internal/EntityCopyAllowedLoggedObserver.html[`EntityCopyAllowedLoggedObserver`].
|
||||
+
|
||||
|
||||
In addition, the application may customize the behavior by providing an implementation of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/event/spi/EntityCopyObserver.html[`EntityCopyObserver`] and setting `hibernate.event.merge.entity_copy_observer` to the class name.
|
||||
When this property is set to `allow` or `log`, Hibernate will merge each entity copy detected while cascading the merge operation.
|
||||
In the process of merging each entity copy, Hibernate will cascade the merge operation from each entity copy to its associations with `cascade=CascadeType.MERGE` or `CascadeType.ALL`.
|
||||
In the process of merging each entity copy, Hibernate will cascade the merge operation from each entity copy to its associations with `cascade = CascadeType.MERGE` or `cascade = CascadeType.ALL`.
|
||||
The entity state resulting from merging an entity copy will be overwritten when another entity copy is merged.
|
||||
+
|
||||
|
||||
For more details, check out the <<chapters/pc/PersistenceContext.adoc#pc-merge-gotchas,Merge gotchas>> section.
|
||||
|
||||
[[configurations-envers]]
|
||||
|
@ -1180,8 +1180,8 @@ Enable or disable the SpecJ proprietary mapping syntax which differs from JPA sp
|
|||
`*hibernate.temp.use_jdbc_metadata_defaults*` (e.g. `true` (default value) or `false`)::
|
||||
This setting is used to control whether we should consult the JDBC metadata to determine certain Settings default values when the database may not be available (mainly in tools usage).
|
||||
|
||||
`*hibernate.connection_provider.injection_data*` (e.g. `java.util.Map`)::
|
||||
Connection provider settings to be injected in the currently configured connection provider.
|
||||
`*hibernate.connection_provider.injection_data*`::
|
||||
Connection provider settings to be injected (a `Map` instance) in the currently configured connection provider.
|
||||
|
||||
`*hibernate.jandex_index*` (e.g. `org.jboss.jandex.Index`)::
|
||||
`*hibernate.jandex_index*`::
|
||||
Names a Jandex `org.jboss.jandex.Index` instance to use.
|
||||
|
|
|
@ -10,7 +10,7 @@ I like to think of `Configuration` as a big pot to which we add a bunch of stuff
|
|||
There are some significant drawbacks to the legacy bootstrapping mechanism which led to its deprecation and the development of the new approach, which is discussed in <<chapters/bootstrap/Bootstrap.adoc#bootstrap-native,Native Bootstrapping>>.
|
||||
|
||||
`Configuration` is semi-deprecated but still available for use, in a limited form that eliminates these drawbacks.
|
||||
"Under the covers", `Configuration` uses the new bootstrapping code, so the things available there as also available here in terms of auto-discovery.
|
||||
"Under the covers", `Configuration` uses the new bootstrapping code, so the things available there are also available here in terms of auto-discovery.
|
||||
====
|
||||
|
||||
You can obtain the `Configuration` by instantiating it directly.
|
||||
|
@ -20,11 +20,11 @@ You then specify mapping metadata (XML mapping documents, annotated classes) tha
|
|||
----
|
||||
Configuration cfg = new Configuration()
|
||||
// addResource does a classpath resource lookup
|
||||
.addResource("Item.hbm.xml")
|
||||
.addResource("Bid.hbm.xml")
|
||||
.addResource( "Item.hbm.xml" )
|
||||
.addResource( "Bid.hbm.xml" )
|
||||
|
||||
// calls addResource using "/org/hibernate/auction/User.hbm.xml"
|
||||
.addClass(`org.hibernate.auction.User.class`)
|
||||
.addClass( org.hibernate.auction.User.class )
|
||||
|
||||
// parses Address class for mapping annotations
|
||||
.addAnnotatedClass( Address.class )
|
||||
|
@ -32,9 +32,9 @@ Configuration cfg = new Configuration()
|
|||
// reads package-level (package-info.class) annotations in the named package
|
||||
.addPackage( "org.hibernate.auction" )
|
||||
|
||||
.setProperty("hibernate.dialect", "org.hibernate.dialect.H2Dialect")
|
||||
.setProperty("hibernate.connection.datasource", "java:comp/env/jdbc/test")
|
||||
.setProperty("hibernate.order_updates", "true");
|
||||
.setProperty( "hibernate.dialect", "org.hibernate.dialect.H2Dialect" )
|
||||
.setProperty( "hibernate.connection.datasource", "java:comp/env/jdbc/test" )
|
||||
.setProperty( "hibernate.order_updates", "true" );
|
||||
----
|
||||
|
||||
There are other ways to specify Configuration information, including:
|
||||
|
@ -46,7 +46,7 @@ There are other ways to specify Configuration information, including:
|
|||
|
||||
== Migration
|
||||
|
||||
Mapping Configuration methods to the corresponding methods in the new APIs..
|
||||
Mapping Configuration methods to the corresponding methods in the new APIs.
|
||||
|
||||
|===
|
||||
|`Configuration#addFile`|`Configuration#addFile`
|
||||
|
|
|
@ -20,8 +20,8 @@ The `Session` is a factory for `Criteria` instances.
|
|||
|
||||
[source,java]
|
||||
----
|
||||
Criteria crit = sess.createCriteria(Cat.class);
|
||||
crit.setMaxResults(50);
|
||||
Criteria crit = sess.createCriteria( Cat.class );
|
||||
crit.setMaxResults( 50 );
|
||||
List cats = crit.list();
|
||||
----
|
||||
|
||||
|
@ -49,10 +49,9 @@ If you provide the JPA entity name to a legacy Criteria query:
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List<Event> events =
|
||||
entityManager.unwrap( Session.class )
|
||||
.createCriteria( "ApplicationEvent" )
|
||||
.list();
|
||||
List<Event> events = entityManager.unwrap( Session.class )
|
||||
.createCriteria( "ApplicationEvent" )
|
||||
.list();
|
||||
----
|
||||
|
||||
Hibernate is going to throw the following `MappingException`:
|
||||
|
@ -66,10 +65,9 @@ On the other hand, the Hibernate entity name (the fully qualified class name) wo
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List<Event> events =
|
||||
entityManager.unwrap( Session.class )
|
||||
.createCriteria( Event.class.getName() )
|
||||
.list();
|
||||
List<Event> events = entityManager.unwrap( Session.class )
|
||||
.createCriteria( Event.class.getName() )
|
||||
.list();
|
||||
----
|
||||
|
||||
For more about this topic, check out the https://hibernate.atlassian.net/browse/HHH-2597[HHH-2597] JIRA issue.
|
||||
|
@ -82,9 +80,9 @@ The class `org.hibernate.criterion.Restrictions` defines factory methods for obt
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.like("name", "Fritz%") )
|
||||
.add( Restrictions.between("weight", minWeight, maxWeight) )
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Restrictions.like( "name", "Fritz%" ) )
|
||||
.add( Restrictions.between( "weight", minWeight, maxWeight ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -92,18 +90,18 @@ Restrictions can be grouped logically.
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.like("name", "Fritz%") )
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Restrictions.like( "name", "Fritz%" ) )
|
||||
.add( Restrictions.or(
|
||||
Restrictions.eq( "age", new Integer(0) ),
|
||||
Restrictions.isNull("age")
|
||||
) )
|
||||
Restrictions.isNull( "age" ) )
|
||||
)
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Restrictions.in( "name", new String[] { "Fritz", "Izi", "Pk" } ) )
|
||||
.add( Restrictions.disjunction()
|
||||
.add( Restrictions.isNull("age") )
|
||||
|
@ -119,8 +117,10 @@ One of the most useful `Restrictions` allows you to specify SQL directly.
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.sqlRestriction("lower({alias}.name) like lower(?)", "Fritz%", Hibernate.STRING) )
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Restrictions.sqlRestriction(
|
||||
"lower({alias}.name) like lower(?)", "Fritz%", Hibernate.STRING )
|
||||
)
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -132,8 +132,8 @@ You can create a `Property` by calling `Property.forName()`:
|
|||
[source,java]
|
||||
----
|
||||
|
||||
Property age = Property.forName("age");
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
Property age = Property.forName( "age" );
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Restrictions.disjunction()
|
||||
.add( age.isNull() )
|
||||
.add( age.eq( new Integer(0) ) )
|
||||
|
@ -151,35 +151,35 @@ You can order the results using `org.hibernate.criterion.Order`.
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.like("name", "F%")
|
||||
.addOrder( Order.asc("name").nulls(NullPrecedence.LAST) )
|
||||
.addOrder( Order.desc("age") )
|
||||
.setMaxResults(50)
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Restrictions.like( "name", "F%" ) )
|
||||
.addOrder( Order.asc( "name" ).nulls( NullPrecedence.LAST ) )
|
||||
.addOrder( Order.desc( "age" ) )
|
||||
.setMaxResults( 50 )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Property.forName("name").like("F%") )
|
||||
.addOrder( Property.forName("name").asc() )
|
||||
.addOrder( Property.forName("age").desc() )
|
||||
.setMaxResults(50)
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Property.forName( "name" ).like( "F%" ) )
|
||||
.addOrder( Property.forName( "name" ).asc() )
|
||||
.addOrder( Property.forName( "age" ).desc() )
|
||||
.setMaxResults( 50 )
|
||||
.list();
|
||||
----
|
||||
|
||||
[[criteria-associations]]
|
||||
=== Associations
|
||||
|
||||
By navigating associations using `createCriteria()` you can specify constraints upon related entities:
|
||||
By navigating associations using `createCriteria()`, you can specify constraints upon related entities:
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.like("name", "F%") )
|
||||
.createCriteria("kittens")
|
||||
.add( Restrictions.like("name", "F%") )
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Restrictions.like( "name", "F%" ) )
|
||||
.createCriteria( "kittens" )
|
||||
.add( Restrictions.like( "name", "F%" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -189,30 +189,30 @@ There is also an alternate form that is useful in certain circumstances:
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.createAlias("kittens", "kt")
|
||||
.createAlias("mate", "mt")
|
||||
.add( Restrictions.eqProperty("kt.name", "mt.name") )
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.createAlias( "kittens", "kt" )
|
||||
.createAlias( "mate", "mt" )
|
||||
.add( Restrictions.eqProperty( "kt.name", "mt.name" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
(`createAlias()` does not create a new instance of `Criteria`.)
|
||||
Note that `createAlias()` does not create a new instance of `Criteria`.
|
||||
|
||||
The kittens collections held by the `Cat` instances returned by the previous two queries are _not_ pre-filtered by the criteria.
|
||||
If you want to retrieve just the kittens that match the criteria, you must use a `ResultTransformer`.
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.createCriteria("kittens", "kt")
|
||||
.add( Restrictions.eq("name", "F%") )
|
||||
.setResultTransformer(Criteria.ALIAS_TO_ENTITY_MAP)
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.createCriteria( "kittens", "kt" )
|
||||
.add( Restrictions.eq( "name", "F%" ) )
|
||||
.setResultTransformer( Criteria.ALIAS_TO_ENTITY_MAP )
|
||||
.list();
|
||||
Iterator iter = cats.iterator();
|
||||
while ( iter.hasNext() ) {
|
||||
Map map = (Map) iter.next();
|
||||
Cat cat = (Cat) map.get(Criteria.ROOT_ALIAS);
|
||||
Cat kitten = (Cat) map.get("kt");
|
||||
Cat cat = (Cat) map.get( Criteria.ROOT_ALIAS );
|
||||
Cat kitten = (Cat) map.get( "kt" );
|
||||
}
|
||||
----
|
||||
|
||||
|
@ -221,20 +221,16 @@ Additionally, you may manipulate the result set using a left outer join:
|
|||
[source]
|
||||
----
|
||||
List cats = session.createCriteria( Cat.class )
|
||||
.createAlias("mate", "mt", Criteria.LEFT_JOIN, Restrictions.like("mt.name", "good%") )
|
||||
.addOrder(Order.asc("mt.age"))
|
||||
.createAlias( "mate", "mt", Criteria.LEFT_JOIN, Restrictions.like( "mt.name", "good%" ) )
|
||||
.addOrder( Order.asc( "mt.age" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
This will return all of the `Cat`s with a mate whose name starts with "good" ordered by their mate's age, and all cats who do not have a mate.
|
||||
This will return all of the ``Cat``s with a mate whose name starts with "good" ordered by their mate's age, and all cats who do not have a mate.
|
||||
This is useful when there is a need to order or limit in the database prior to returning complex/large result sets,
|
||||
and removes many instances where multiple queries would have to be performed and the results unioned by Java in memory.
|
||||
|
||||
Without this feature, first all of the cats without a mate would need to be loaded in one query.
|
||||
|
||||
A second query would need to retrieve the cats with mates who's name started with "good" sorted by the mates age.
|
||||
|
||||
Thirdly, in memory; the lists would need to be joined manually.
|
||||
Without this feature, firstly all of the cats without a mate would need to be loaded in one query. Then a second query would need to retrieve the cats with mates whose name started with "good" sorted by the mates age. Thirdly, in memory, the lists would need to be joined manually.
|
||||
|
||||
[[criteria-dynamicfetching]]
|
||||
=== Dynamic association fetching
|
||||
|
@ -243,10 +239,10 @@ You can specify association fetching semantics at runtime using `setFetchMode()`
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List cats = sess.createCriteria(Cat.class)
|
||||
.add( Restrictions.like("name", "Fritz%") )
|
||||
.setFetchMode("mate", FetchMode.EAGER)
|
||||
.setFetchMode("kittens", FetchMode.EAGER)
|
||||
List cats = sess.createCriteria( Cat.class )
|
||||
.add( Restrictions.like( "name", "Fritz%" ) )
|
||||
.setFetchMode( "mate", FetchMode.EAGER )
|
||||
.setFetchMode( "kittens", FetchMode.EAGER )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -261,18 +257,18 @@ For example, suppose the `Cat` has a component property `fullName` with sub-prop
|
|||
|
||||
[source]
|
||||
----
|
||||
List cats = session.createCriteria(Cat.class)
|
||||
.add(Restrictions.eq("fullName.lastName", "Cattington"))
|
||||
List cats = session.createCriteria( Cat.class )
|
||||
.add( Restrictions.eq( "fullName.lastName", "Cattington" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
Note: this does not apply when querying collections of components, for that see below <<criteria-collections>>
|
||||
Note: this does not apply when querying collections of components, for that see <<criteria-collections>> below.
|
||||
|
||||
[[criteria-collections]]
|
||||
=== Collections
|
||||
|
||||
When using criteria against collections, there are two distinct cases.
|
||||
One is if the collection contains entities (eg. `<one-to-many/>` or `<many-to-many/>`) or components (`<composite-element/>` ),
|
||||
One is if the collection contains entities (e.g. `<one-to-many/>` or `<many-to-many/>`) or components (`<composite-element/>` ),
|
||||
and the second is if the collection contains scalar values (`<element/>`).
|
||||
In the first case, the syntax is as given above in the section <<criteria-associations>> where we restrict the `kittens` collection.
|
||||
Essentially, we create a `Criteria` object against the collection property and restrict the entity or component properties using that instance.
|
||||
|
@ -283,9 +279,9 @@ For an indexed collection, we can also reference the index property using the sp
|
|||
|
||||
[source]
|
||||
----
|
||||
List cats = session.createCriteria(Cat.class)
|
||||
.createCriteria("nickNames")
|
||||
.add(Restrictions.eq("elements", "BadBoy"))
|
||||
List cats = session.createCriteria( Cat.class )
|
||||
.createCriteria( "nickNames" )
|
||||
.add( Restrictions.eq( "elements", "BadBoy" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -297,10 +293,10 @@ The class `org.hibernate.criterion.Example` allows you to construct a query crit
|
|||
[source,java]
|
||||
----
|
||||
Cat cat = new Cat();
|
||||
cat.setSex('F');
|
||||
cat.setColor(Color.BLACK);
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.add( Example.create(cat) )
|
||||
cat.setSex( 'F' );
|
||||
cat.setColor( Color.BLACK );
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.add( Example.create( cat ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -311,13 +307,13 @@ You can adjust how the `Example` is applied.
|
|||
|
||||
[source,java]
|
||||
----
|
||||
Example example = Example.create(cat)
|
||||
.excludeZeroes() //exclude zero valued properties
|
||||
.excludeProperty("color") //exclude the property named "color"
|
||||
.ignoreCase() //perform case insensitive string comparisons
|
||||
.enableLike(); //use like for string comparisons
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.add(example)
|
||||
Example example = Example.create( cat )
|
||||
.excludeZeroes() //exclude zero valued properties
|
||||
.excludeProperty( "color" ) //exclude the property named "color"
|
||||
.ignoreCase() //perform case insensitive string comparisons
|
||||
.enableLike(); //use like for string comparisons
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.add( example )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -325,10 +321,11 @@ You can even use examples to place criteria upon associated objects.
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.add( Example.create(cat) )
|
||||
.createCriteria("mate")
|
||||
.add( Example.create( cat.getMate() ) )
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.add( Example.create( cat ) )
|
||||
.createCriteria( "mate" )
|
||||
.add( Example.create( cat.getMate() )
|
||||
)
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -340,20 +337,20 @@ You can apply a projection to a query by calling `setProjection()`.
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.setProjection( Projections.rowCount() )
|
||||
.add( Restrictions.eq("color", Color.BLACK) )
|
||||
.add( Restrictions.eq( "color", Color.BLACK ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.setProjection( Projections.projectionList()
|
||||
.add( Projections.rowCount() )
|
||||
.add( Projections.avg("weight") )
|
||||
.add( Projections.max("weight") )
|
||||
.add( Projections.groupProperty("color") )
|
||||
.add( Projections.avg( "weight" ) )
|
||||
.add( Projections.max( "weight" ) )
|
||||
.add( Projections.groupProperty( "color" ) )
|
||||
)
|
||||
.list();
|
||||
----
|
||||
|
@ -366,17 +363,17 @@ Here are two different ways to do this:
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.setProjection( Projections.alias( Projections.groupProperty("color"), "colr" ) )
|
||||
.addOrder( Order.asc("colr") )
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.setProjection( Projections.alias( Projections.groupProperty( "color" ), "colr" ) )
|
||||
.addOrder( Order.asc( "colr" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.setProjection( Projections.groupProperty("color").as("colr") )
|
||||
.addOrder( Order.asc("colr") )
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.setProjection( Projections.groupProperty( "color" ).as( "colr" ) )
|
||||
.addOrder( Order.asc( "colr" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -385,28 +382,28 @@ As a shortcut, you can assign an alias when you add the projection to a projecti
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.setProjection( Projections.projectionList()
|
||||
.add( Projections.rowCount(), "catCountByColor" )
|
||||
.add( Projections.avg("weight"), "avgWeight" )
|
||||
.add( Projections.max("weight"), "maxWeight" )
|
||||
.add( Projections.groupProperty("color"), "color" )
|
||||
.add( Projections.avg( "weight" ), "avgWeight" )
|
||||
.add( Projections.max( "weight" ), "maxWeight" )
|
||||
.add( Projections.groupProperty( "color" ), "color" )
|
||||
)
|
||||
.addOrder( Order.desc("catCountByColor") )
|
||||
.addOrder( Order.desc("avgWeight") )
|
||||
.addOrder( Order.desc( "catCountByColor" ) )
|
||||
.addOrder( Order.desc( "avgWeight" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Domestic.class, "cat")
|
||||
.createAlias("kittens", "kit")
|
||||
List results = session.createCriteria( Domestic.class, "cat" )
|
||||
.createAlias( "kittens", "kit" )
|
||||
.setProjection( Projections.projectionList()
|
||||
.add( Projections.property("cat.name"), "catName" )
|
||||
.add( Projections.property("kit.name"), "kitName" )
|
||||
.add( Projections.property( "cat.name" ), "catName" )
|
||||
.add( Projections.property( "kit.name" ), "kitName" )
|
||||
)
|
||||
.addOrder( Order.asc("catName") )
|
||||
.addOrder( Order.asc("kitName") )
|
||||
.addOrder( Order.asc( "catName" ) )
|
||||
.addOrder( Order.asc( "kitName" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -414,23 +411,23 @@ You can also use `Property.forName()` to express projections:
|
|||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.setProjection( Property.forName("name") )
|
||||
.add( Property.forName("color").eq(Color.BLACK) )
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.setProjection( Property.forName( "name" ) )
|
||||
.add( Property.forName( "color" ).eq( Color.BLACK ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
List results = session.createCriteria(Cat.class)
|
||||
.setProjection( Projections.projectionList()
|
||||
.add( Projections.rowCount().as("catCountByColor") )
|
||||
.add( Property.forName("weight").avg().as("avgWeight") )
|
||||
.add( Property.forName("weight").max().as("maxWeight") )
|
||||
.add( Property.forName("color").group().as("color" )
|
||||
List results = session.createCriteria( Cat.class )
|
||||
.setProjection(Projections.projectionList()
|
||||
.add( Projections.rowCount().as( "catCountByColor" ) )
|
||||
.add( Property.forName( "weight" ).avg().as( "avgWeight" ) )
|
||||
.add( Property.forName( "weight" ).max().as( "maxWeight" ) )
|
||||
.add( Property.forName( "color" ).group().as( "color" ) )
|
||||
)
|
||||
.addOrder( Order.desc("catCountByColor") )
|
||||
.addOrder( Order.desc("avgWeight") )
|
||||
.addOrder( Order.desc( "catCountByColor" ) )
|
||||
.addOrder( Order.desc( "avgWeight" ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -441,12 +438,12 @@ The `DetachedCriteria` class allows you to create a query outside the scope of a
|
|||
|
||||
[source,java]
|
||||
----
|
||||
DetachedCriteria query = DetachedCriteria.forClass(Cat.class)
|
||||
.add( Property.forName("sex").eq('F') );
|
||||
DetachedCriteria query = DetachedCriteria.forClass( Cat.class )
|
||||
.add( Property.forName( "sex" ).eq( 'F' ) );
|
||||
|
||||
Session session = ....;
|
||||
Transaction txn = session.beginTransaction();
|
||||
List results = query.getExecutableCriteria(session).setMaxResults(100).list();
|
||||
List results = query.getExecutableCriteria( session ).setMaxResults( 100 ).list();
|
||||
txn.commit();
|
||||
session.close();
|
||||
----
|
||||
|
@ -456,19 +453,19 @@ A `DetachedCriteria` can also be used to express a subquery.
|
|||
|
||||
[source,java]
|
||||
----
|
||||
DetachedCriteria avgWeight = DetachedCriteria.forClass(Cat.class)
|
||||
.setProjection( Property.forName("weight").avg() );
|
||||
session.createCriteria(Cat.class)
|
||||
.add( Property.forName("weight").gt(avgWeight) )
|
||||
DetachedCriteria avgWeight = DetachedCriteria.forClass( Cat.class )
|
||||
.setProjection( Property.forName( "weight" ).avg() );
|
||||
session.createCriteria( Cat.class )
|
||||
.add( Property.forName( "weight" ).gt( avgWeight ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
[source,java]
|
||||
----
|
||||
DetachedCriteria weights = DetachedCriteria.forClass(Cat.class)
|
||||
.setProjection( Property.forName("weight") );
|
||||
session.createCriteria(Cat.class)
|
||||
.add( Subqueries.geAll("weight", weights) )
|
||||
DetachedCriteria weights = DetachedCriteria.forClass( Cat.class )
|
||||
.setProjection( Property.forName( "weight" ) );
|
||||
session.createCriteria( Cat.class )
|
||||
.add( Subqueries.geAll( "weight", weights ) )
|
||||
.list();
|
||||
----
|
||||
|
||||
|
@ -476,11 +473,11 @@ Correlated subqueries are also possible:
|
|||
|
||||
[source,java]
|
||||
----
|
||||
DetachedCriteria avgWeightForSex = DetachedCriteria.forClass(Cat.class, "cat2")
|
||||
.setProjection( Property.forName("weight").avg() )
|
||||
.add( Property.forName("cat2.sex").eqProperty("cat.sex") );
|
||||
session.createCriteria(Cat.class, "cat")
|
||||
.add( Property.forName("weight").gt(avgWeightForSex) )
|
||||
DetachedCriteria avgWeightForSex = DetachedCriteria.forClass( Cat.class, "cat2" )
|
||||
.setProjection( Property.forName( "weight" ).avg() )
|
||||
.add( Property.forName( "cat2.sex" ).eqProperty( "cat.sex" ) );
|
||||
session.createCriteria( Cat.class, "cat" )
|
||||
.add( Property.forName( "weight" ).gt( avgWeightForSex ) )
|
||||
.list();
|
||||
----
|
||||
Example of multi-column restriction based on a subquery:
|
||||
|
@ -488,9 +485,12 @@ Example of multi-column restriction based on a subquery:
|
|||
[source,java]
|
||||
----
|
||||
DetachedCriteria sizeQuery = DetachedCriteria.forClass( Man.class )
|
||||
.setProjection( Projections.projectionList().add( Projections.property( "weight" ) )
|
||||
.add( Projections.property( "height" ) ) )
|
||||
.setProjection( Projections.projectionList()
|
||||
.add( Projections.property( "weight" ) )
|
||||
.add( Projections.property( "height" ) )
|
||||
)
|
||||
.add( Restrictions.eq( "name", "John" ) );
|
||||
|
||||
session.createCriteria( Woman.class )
|
||||
.add( Subqueries.propertiesEq( new String[] { "weight", "height" }, sizeQuery ) )
|
||||
.list();
|
||||
|
@ -527,10 +527,11 @@ Once you have enabled the Hibernate query cache, the `Restrictions.naturalId()`
|
|||
|
||||
[source,java]
|
||||
----
|
||||
session.createCriteria(User.class)
|
||||
session.createCriteria( User.class )
|
||||
.add( Restrictions.naturalId()
|
||||
.set("name", "gavin")
|
||||
.set("org", "hb")
|
||||
).setCacheable(true)
|
||||
.set( "name", "gavin" )
|
||||
.set( "org", "hb" )
|
||||
)
|
||||
.setCacheable( true )
|
||||
.uniqueResult();
|
||||
----
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
== Legacy Hibernate Native Queries
|
||||
|
||||
[[legacy-sql-named-queries]]
|
||||
=== Legacy Named SQL queries
|
||||
=== Legacy named SQL queries
|
||||
|
||||
Named SQL queries can also be defined during mapping and called in exactly the same way as a named HQL query.
|
||||
In this case, you do _not_ need to call `addEntity()` anymore.
|
||||
|
@ -77,7 +77,7 @@ You must declare the column alias and Hibernate type using the `<return-scalar>`
|
|||
|
||||
You can externalize the resultset mapping information in a `<resultset>` element which will allow you to either reuse them across several named queries or through the `setResultSetMapping()` API.
|
||||
|
||||
.<resultset> mapping used to externalize mappinginformation
|
||||
.<resultset> mapping used to externalize mapping information
|
||||
====
|
||||
[source,xml]
|
||||
----
|
||||
|
@ -110,7 +110,7 @@ You can, alternatively, use the resultset mapping information in your hbm files
|
|||
----
|
||||
List cats = session
|
||||
.createSQLQuery( "select {cat.*}, {kitten.*} from cats cat, cats kitten where kitten.mother = cat.id" )
|
||||
.setResultSetMapping("catAndKitten")
|
||||
.setResultSetMapping( "catAndKitten" )
|
||||
.list();
|
||||
----
|
||||
====
|
||||
|
@ -228,7 +228,7 @@ Native call syntax is not supported.
|
|||
For Oracle the following rules apply:
|
||||
|
||||
* A function must return a result set.
|
||||
The first parameter of a procedure must be an `OUT` that returns a result set.
|
||||
* The first parameter of a procedure must be an `OUT` that returns a result set.
|
||||
This is done by using a `SYS_REFCURSOR` type in Oracle 9 or 10.
|
||||
In Oracle you need to define a `REF CURSOR` type.
|
||||
See Oracle literature for further information.
|
||||
|
@ -272,7 +272,7 @@ If you expect to call a stored procedure, be sure to set the `callable` attribut
|
|||
|
||||
To check that the execution happens correctly, Hibernate allows you to define one of those three strategies:
|
||||
|
||||
* none: no check is performed: the store procedure is expected to fail upon issues
|
||||
* none: no check is performed; the store procedure is expected to fail upon issues
|
||||
* count: use of rowcount to check that the update is successful
|
||||
* param: like COUNT but using an output parameter rather that the standard mechanism
|
||||
|
||||
|
@ -312,7 +312,7 @@ Here is an example of a statement level override:
|
|||
[source,xml]
|
||||
----
|
||||
<sql-query name = "person">
|
||||
<return alias = "pers" class = "Person" lock-mod e= "upgrade"/>
|
||||
<return alias = "pers" class = "Person" lock-mode= "upgrade"/>
|
||||
SELECT NAME AS {pers.name}, ID AS {pers.id}
|
||||
FROM PERSON
|
||||
WHERE ID=?
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
image:images/architecture/data_access_layers.svg[Data Access Layers]
|
||||
|
||||
Hibernate, as an ORM solution, effectively "sits between" the Java application data access layer and the Relational Database, as can be seen in the diagram above.
|
||||
The Java application makes use of the Hibernate APIs to load, store, query, etc its domain data.
|
||||
The Java application makes use of the Hibernate APIs to load, store, query, etc. its domain data.
|
||||
Here we will introduce the essential Hibernate APIs.
|
||||
This will be a brief introduction; we will discuss these contracts in detail later.
|
||||
|
||||
|
@ -21,7 +21,7 @@ Acts as a factory for `org.hibernate.Session` instances. The `EntityManagerFacto
|
|||
A `SessionFactory` is very expensive to create, so, for any given database, the application should have only one associated `SessionFactory`.
|
||||
The `SessionFactory` maintains services that Hibernate uses across all `Session(s)` such as second level caches, connection pools, transaction system integrations, etc.
|
||||
|
||||
Session (`org.hibernate.Session`):: A single-threaded, short-lived object conceptually modeling a "Unit of Work" <<Bibliography.adoc#PoEAA,PoEAA>>.
|
||||
Session (`org.hibernate.Session`):: A single-threaded, short-lived object conceptually modeling a "Unit of Work" (<<Bibliography.adoc#PoEAA,PoEAA>>).
|
||||
In JPA nomenclature, the `Session` is represented by an `EntityManager`.
|
||||
+
|
||||
Behind the scenes, the Hibernate `Session` wraps a JDBC `java.sql.Connection` and acts as a factory for `org.hibernate.Transaction` instances.
|
||||
|
|
|
@ -66,7 +66,7 @@ include::{sourcedir}/BatchTest.java[tags=batch-session-batch-example]
|
|||
|
||||
There are several problems associated with this example:
|
||||
|
||||
. Hibernate caches all the newly inserted `Customer` instances in the session-level c1ache, so, when the transaction ends, 100 000 entities are managed by the persistence context.
|
||||
. Hibernate caches all the newly inserted `Customer` instances in the session-level cache, so, when the transaction ends, 100 000 entities are managed by the persistence context.
|
||||
If the maximum memory allocated to the JVM is rather low, this example could fail with an `OutOfMemoryException`.
|
||||
The Java 1.8 JVM allocated either 1/4 of available RAM or 1Gb, which can easily accommodate 100 000 objects on the heap.
|
||||
. long-running transactions can deplete a connection pool so other transactions don't get a chance to proceed.
|
||||
|
@ -163,7 +163,7 @@ Hibernate provides methods for bulk SQL-style DML statement execution, in the fo
|
|||
Both the Hibernate native Query Language and JPQL (Java Persistence Query Language) provide support for bulk UPDATE and DELETE.
|
||||
|
||||
[[batch-bulk-hql-update-delete-example]]
|
||||
.Psuedo-syntax for UPDATE and DELETE statements using HQL
|
||||
.Pseudo-syntax for UPDATE and DELETE statements using HQL
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -483,13 +483,13 @@ The underlying database must support CTE (Common Table Expressions) that can be
|
|||
|
||||
The underlying database must also support the VALUES list clause, like PostgreSQL or SQL Server 2008.
|
||||
|
||||
However, this strategy requires the IN-clause row value expression for composite identifiers, so you can only use this strategy only with PostgreSQL.
|
||||
However, this strategy requires the IN-clause row value expression for composite identifiers, so you can only use this strategy with PostgreSQL.
|
||||
====
|
||||
|
||||
If you can use temporary tables, that's probably the best choice.
|
||||
However, if you are not allowed to create temporary tables, you must pick one of these four strategies that works with your underlying database.
|
||||
Before making your mind, you should benchmark which one works best for your current workload.
|
||||
For instance, http://blog.2ndquadrant.com/postgresql-ctes-are-optimization-fences/[CTE are optimization fences in PostgreSQL], so make sure you measure before taking a decision.
|
||||
Before making up your mind, you should benchmark which one works best for your current workload.
|
||||
For instance, http://blog.2ndquadrant.com/postgresql-ctes-are-optimization-fences/[CTE are optimization fences in PostgreSQL], so make sure you measure before making a decision.
|
||||
|
||||
If you're using Oracle or MySQL 5.7, you can choose either `InlineIdsOrClauseBulkIdStrategy` or `InlineIdsInClauseBulkIdStrategy`.
|
||||
For older version of MySQL, then you can only use `InlineIdsOrClauseBulkIdStrategy`.
|
||||
|
|
|
@ -4,8 +4,6 @@
|
|||
:boot-spi-sourcedir: ../../../../../../../hibernate-core/src/test/java/org/hibernate/boot/spi
|
||||
:extrasdir: extras
|
||||
|
||||
org.hibernate.boot.spi.metadatabuildercontributor;
|
||||
|
||||
The term bootstrapping refers to initializing and starting a software component.
|
||||
In Hibernate, we are specifically talking about the process of building a fully functional `SessionFactory` instance or `EntityManagerFactory` instance, for JPA.
|
||||
The process is very different for each.
|
||||
|
@ -20,7 +18,7 @@ During the bootstrap process, you might want to customize Hibernate behavior so
|
|||
|
||||
This section discusses the process of bootstrapping a Hibernate `SessionFactory`.
|
||||
Specifically, it addresses the bootstrapping APIs as redesigned in 5.0.
|
||||
For a discussion of the legacy bootstrapping API, see <<appendices/Legacy_Bootstrap.adoc#appendix-legacy-bootstrap,Legacy Bootstrapping>>
|
||||
For a discussion of the legacy bootstrapping API, see <<appendices/Legacy_Bootstrap.adoc#appendix-legacy-bootstrap,Legacy Bootstrapping>>.
|
||||
|
||||
[[bootstrap-native-registry]]
|
||||
==== Building the ServiceRegistry
|
||||
|
@ -32,9 +30,9 @@ First is the `org.hibernate.boot.registry.BootstrapServiceRegistry`.
|
|||
The `BootstrapServiceRegistry` is intended to hold services that Hibernate needs at both bootstrap and run time.
|
||||
This boils down to 3 services:
|
||||
|
||||
`org.hibernate.boot.registry.classloading.spi.ClassLoaderService`:: which controls how Hibernate interacts with `ClassLoader`s
|
||||
`org.hibernate.boot.registry.classloading.spi.ClassLoaderService`:: which controls how Hibernate interacts with ``ClassLoader``s.
|
||||
`org.hibernate.integrator.spi.IntegratorService`:: which controls the management and discovery of `org.hibernate.integrator.spi.Integrator` instances.
|
||||
`org.hibernate.boot.registry.selector.spi.StrategySelector`:: which control how Hibernate resolves implementations of various strategy contracts.
|
||||
`org.hibernate.boot.registry.selector.spi.StrategySelector`:: which controls how Hibernate resolves implementations of various strategy contracts.
|
||||
This is a very powerful service, but a full discussion of it is beyond the scope of this guide.
|
||||
|
||||
[NOTE]
|
||||
|
@ -43,7 +41,7 @@ If you are ok with the default behavior of Hibernate in regards to these `Bootst
|
|||
(which is quite often the case, especially in stand-alone environments), then you don't need to explicitly build the `BootstrapServiceRegistry`.
|
||||
====
|
||||
|
||||
If you wish to alter how the `BootstrapServiceRegistry` is built, that is controlled through the `org.hibernate.boot.registry.BootstrapServiceRegistryBuilder:`
|
||||
If you wish to alter how the `BootstrapServiceRegistry` is built, that is controlled through the `org.hibernate.boot.registry.BootstrapServiceRegistryBuilder`:
|
||||
|
||||
[[bootstrap-bootstrap-native-registry-BootstrapServiceRegistry-example]]
|
||||
.Controlling `BootstrapServiceRegistry` building
|
||||
|
@ -105,9 +103,9 @@ include::{sourcedir}/BootstrapTest.java[tags=bootstrap-event-listener-registrati
|
|||
|
||||
The second step in native bootstrapping is the building of an `org.hibernate.boot.Metadata` object containing the parsed representations of an application domain model and its mapping to a database.
|
||||
The first thing we obviously need to build a parsed representation is the source information to be parsed (annotated classes, `hbm.xml` files, `orm.xml` files).
|
||||
This is the purpose of `org.hibernate.boot.MetadataSources`:
|
||||
This is the purpose of `org.hibernate.boot.MetadataSources`.
|
||||
|
||||
`MetadataSources` has many other methods as well, explore its API and https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/MetadataSources.html[Javadocs] for more information.
|
||||
`MetadataSources` has many other methods as well. Explore its API and https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/MetadataSources.html[Javadocs] for more information.
|
||||
Also, all methods on `MetadataSources` offer fluent-style call chaining::
|
||||
|
||||
[[bootstrap-native-metadata-source-example]]
|
||||
|
@ -149,7 +147,7 @@ include::{sourcedir}/BootstrapTest.java[tags=bootstrap-native-metadata-builder-e
|
|||
The final step in native bootstrapping is to build the `SessionFactory` itself.
|
||||
Much like discussed above, if you are ok with the default behavior of building a `SessionFactory` from a `Metadata` reference, you can simply call the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/Metadata.html#buildSessionFactory--[`buildSessionFactory`] method on the `Metadata` object.
|
||||
|
||||
However, if you would like to adjust that building process, you will need to use `SessionFactoryBuilder` as obtained via [`Metadata#getSessionFactoryBuilder`. Again, see its https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/Metadata.html#getSessionFactoryBuilder--[Javadocs] for more details.
|
||||
However, if you would like to adjust that building process, you will need to use `SessionFactoryBuilder` as obtained via `Metadata#getSessionFactoryBuilder`. Again, see its https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/Metadata.html#getSessionFactoryBuilder--[Javadocs] for more details.
|
||||
|
||||
[[bootstrap-native-SessionFactory-example]]
|
||||
.Native Bootstrapping - Putting it all together
|
||||
|
@ -336,4 +334,4 @@ The above `MetadataBuilderContributor` is used to register a `SqlFuction` which
|
|||
By having access to the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/MetadataBuilder.html[`MetadataBuilder`] class that's used by the underlying `SessionFactory`, the JPA bootstrap becomes just as flexible as the Hibernate native bootstrap mechanism.
|
||||
|
||||
You can then pass the custom `MetadataBuilderContributor` via the `hibernate.metadata_builder_contributor` configuration property as explained in the <<appendices/Configurations.adoc#configurations-bootstrap, configuration chapter>>
|
||||
You can then pass the custom `MetadataBuilderContributor` via the `hibernate.metadata_builder_contributor` configuration property as explained in the <<appendices/Configurations.adoc#configurations-bootstrap, Configuration chapter>>.
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
[[enhancement]]
|
||||
== Enhancement
|
||||
:sourcedir: ../../../../../test/java/org/hibernate/userguide/bytecode
|
||||
:extrasdir: extras
|
||||
|
||||
Hibernate offers a number of services that can be added into an application's domain model classes
|
||||
through bytecode enhancement...
|
||||
|
||||
|
||||
[[enhancement-laziness]]
|
||||
=== Laziness
|
||||
|
||||
|
||||
[[enhancement-bidir]]
|
||||
=== Bi-directionality
|
||||
|
||||
|
||||
[[enhancement-dirty]]
|
||||
=== In-line dirty checking
|
||||
|
||||
|
||||
[[enhancement-extended]]
|
||||
=== Extended enhancement
|
|
@ -33,7 +33,7 @@ Detailed information is provided later in this chapter.
|
|||
[[caching-config-properties]]
|
||||
==== Caching configuration properties
|
||||
|
||||
Besides specific provider configuration, there are a number of configurations options on the Hibernate side of the integration that control various caching behaviors:
|
||||
Besides provider specific configuration, there are a number of configurations options on the Hibernate side of the integration that control various caching behaviors:
|
||||
|
||||
`hibernate.cache.use_second_level_cache`::
|
||||
Enable or disable second level caching overall. By default, if the currently configured
|
||||
|
@ -43,7 +43,7 @@ Besides specific provider configuration, there are a number of configurations op
|
|||
`hibernate.cache.query_cache_factory`::
|
||||
Query result caching is handled by a special contract that deals with staleness-based invalidation of the results.
|
||||
The default implementation does not allow stale results at all. Use this for applications that would like to relax that.
|
||||
Names an implementation of `org.hibernate.cache.spi.QueryCacheFactory`
|
||||
Names an implementation of `org.hibernate.cache.spi.QueryCacheFactory`.
|
||||
`hibernate.cache.use_minimal_puts`::
|
||||
Optimizes second-level cache operations to minimize writes, at the cost of more frequent reads. Providers typically set this appropriately.
|
||||
`hibernate.cache.region_prefix`::
|
||||
|
@ -322,7 +322,7 @@ For projection queries, the query cache stores the dehydrated entity state (e.g.
|
|||
This setting creates two new cache regions:
|
||||
|
||||
`default-query-results-region`::
|
||||
Holding the cached query results
|
||||
Holding the cached query results.
|
||||
`default-update-timestamps-region`::
|
||||
Holding timestamps of the most recent updates to queryable tables.
|
||||
These are used to validate the results as they are served from the query cache.
|
||||
|
@ -670,7 +670,7 @@ shared among multiple `SessionFactory` instances in the same JVM.
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
The http://www.ehcache.org/documentation/2.8/integrations/hibernate#optional[Ehcache documentation] recommends using multiple non-singleton `CacheManager(s)` when there are multiple Hibernate `SessionFactory` instances running in the same JVM.
|
||||
The http://www.ehcache.org/documentation/2.8/integrations/hibernate#optional[Ehcache documentation] recommends using multiple non-singleton ``CacheManager``s when there are multiple Hibernate `SessionFactory` instances running in the same JVM.
|
||||
====
|
||||
|
||||
[[caching-provider-ehcache-missing-cache-strategy]]
|
||||
|
@ -702,7 +702,7 @@ Note that caches created this way may be very badly configured (large size in pa
|
|||
[[caching-provider-infinispan]]
|
||||
=== Infinispan
|
||||
|
||||
Infinispan is a distributed in-memory key/value data store, available as a cache or data grid, which can be used as a Hibernate 2nd-level cache provider as well.
|
||||
Infinispan is a distributed in-memory key/value data store, available as a cache or data grid, which can be used as a Hibernate second-level cache provider as well.
|
||||
|
||||
It supports advanced functionality such as transactions, events, querying, distributed processing, off-heap and geographical failover.
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ include::{extrasdir}/associations-one-to-many-unidirectional-example.sql[]
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
The `@OneToMany` association is by definition a parent association, even if it's a unidirectional or a bidirectional one.
|
||||
The `@OneToMany` association is by definition a parent association, regardless of whether it's a unidirectional or a bidirectional one.
|
||||
Only the parent side of an association makes sense to cascade its entity state transitions to children.
|
||||
====
|
||||
|
||||
|
@ -147,7 +147,7 @@ include::{extrasdir}/associations-one-to-many-bidirectional-lifecycle-example.sq
|
|||
Unlike the unidirectional `@OneToMany`, the bidirectional association is much more efficient when managing the collection persistence state.
|
||||
Every element removal only requires a single update (in which the foreign key column is set to `NULL`), and,
|
||||
if the child entity lifecycle is bound to its owning parent so that the child cannot exist without its parent,
|
||||
then we can annotate the association with the `orphan-removal` attribute and dissociate the child will trigger a delete statement on the actual child table row as well.
|
||||
then we can annotate the association with the `orphanRemoval` attribute and dissociating the child will trigger a delete statement on the actual child table row as well.
|
||||
|
||||
[[associations-one-to-one]]
|
||||
==== `@OneToOne`
|
||||
|
@ -177,7 +177,7 @@ From a relational database point of view, the underlying schema is identical to
|
|||
as the client-side controls the relationship based on the foreign key column.
|
||||
|
||||
But then, it's unusual to consider the `Phone` as a client-side and the `PhoneDetails` as the parent-side because the details cannot exist without an actual phone.
|
||||
A much more natural mapping would be if the `Phone` were the parent-side, therefore pushing the foreign key into the `PhoneDetails` table.
|
||||
A much more natural mapping would be the `Phone` were the parent-side, therefore pushing the foreign key into the `PhoneDetails` table.
|
||||
This mapping requires a bidirectional `@OneToOne` association as you can see in the following example:
|
||||
|
||||
[[associations-one-to-one-bidirectional]]
|
||||
|
@ -249,7 +249,7 @@ include::{sourcedir}/OneToOneBidirectionalLazyTest.java[tags=associations-one-to
|
|||
====
|
||||
|
||||
For more about how to enable Bytecode enhancement,
|
||||
see the <<chapters/pc/BytecodeEnhancement.adoc#BytecodeEnhancement, BytecodeEnhancement chapter>>.
|
||||
see the <<chapters/pc/BytecodeEnhancement.adoc#BytecodeEnhancement, Bytecode Enhancement chapter>>.
|
||||
|
||||
[[associations-many-to-many]]
|
||||
==== `@ManyToMany`
|
||||
|
@ -465,7 +465,7 @@ include::{sourcedir}/NotFoundTest.java[tags=associations-not-found-find-example,
|
|||
----
|
||||
====
|
||||
|
||||
However, if we change the `cityName` attribute to a non-existing city:
|
||||
However, if we change the `cityName` attribute to a non-existing city's name:
|
||||
|
||||
[[associations-not-found-non-existing-persist-example]]
|
||||
.`@NotFound` change to non-existing City example
|
||||
|
@ -546,7 +546,7 @@ The table resolving mapping is defined by the `metaDef` attribute which referenc
|
|||
The `package-info.java` contains the `@AnyMetaDef` mapping:
|
||||
|
||||
[[associations-any-meta-def-example]]
|
||||
.`@Any` mapping usage
|
||||
.`@AnyMetaDef` mapping usage
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[[basic]]
|
||||
=== Basic Types
|
||||
=== Basic types
|
||||
:modeldir: ../../../../../main/java/org/hibernate/userguide/model
|
||||
:sourcedir: ../../../../../test/java/org/hibernate/userguide/mapping
|
||||
:resourcedir: ../../../../../test/resources/org/hibernate/userguide/
|
||||
|
@ -15,33 +15,34 @@ Internally Hibernate uses a registry of basic types when it needs to resolve a s
|
|||
==== Hibernate-provided BasicTypes
|
||||
|
||||
.Standard BasicTypes
|
||||
[cols=",,,",options="header",]
|
||||
[cols="<.^,<.^,<.^,<.^",options="header",]
|
||||
|=======================================================================================================================================================================================================================================================================================
|
||||
|Hibernate type (org.hibernate.type package) |JDBC type |Java type |BasicTypeRegistry key(s)
|
||||
|StringType |VARCHAR |java.lang.String |string, java.lang.String
|
||||
|MaterializedClob |CLOB |java.lang.String |materialized_clob
|
||||
|TextType |LONGVARCHAR |java.lang.String |text
|
||||
|CharacterType |CHAR |char, java.lang.Character |char, java.lang.Character
|
||||
|BooleanType |BIT |boolean, java.lang.Boolean |boolean, java.lang.Boolean
|
||||
|CharacterType |CHAR |char, java.lang.Character |character, char, java.lang.Character
|
||||
|BooleanType |BOOLEAN |boolean, java.lang.Boolean |boolean, java.lang.Boolean
|
||||
|NumericBooleanType |INTEGER, 0 is false, 1 is true |boolean, java.lang.Boolean |numeric_boolean
|
||||
|YesNoType |CHAR, 'N'/'n' is false, 'Y'/'y' is true. The uppercase value is written to the database. |boolean, java.lang.Boolean |yes_no
|
||||
|TrueFalseType |CHAR, 'F'/'f' is false, 'T'/'t' is true. The uppercase value is written to the database. |boolean, java.lang.Boolean |true_false
|
||||
|ByteType |TINYINT |byte, java.lang.Byte |byte, java.lang.Byte
|
||||
|ShortType |SMALLINT |short, java.lang.Short |short, java.lang.Short
|
||||
|IntegerTypes |INTEGER |int, java.lang.Integer |int, java.lang.Integer
|
||||
|IntegerType |INTEGER |int, java.lang.Integer |integer, int, java.lang.Integer
|
||||
|LongType |BIGINT |long, java.lang.Long |long, java.lang.Long
|
||||
|FloatType |FLOAT |float, java.lang.Float |float, java.lang.Float
|
||||
|DoubleType |DOUBLE |double, java.lang.Double |double, java.lang.Double
|
||||
|BigIntegerType |NUMERIC |java.math.BigInteger |big_integer, java.math.BigInteger
|
||||
|BigDecimalType |NUMERIC |java.math.BigDecimal |big_decimal, java.math.bigDecimal
|
||||
|TimestampType |TIMESTAMP |java.sql.Timestamp |timestamp, java.sql.Timestamp
|
||||
|TimeType |TIME |java.sql.Time |time, java.sql.Time
|
||||
|DateType |DATE |java.sql.Date |date, java.sql.Date
|
||||
|CalendarType |TIMESTAMP |java.util.Calendar |calendar, java.util.Calendar
|
||||
|TimestampType |TIMESTAMP |java.util.Date |timestamp, java.sql.Timestamp, java.util.Date
|
||||
|DbTimestampType |TIMESTAMP |java.util.Date |dbtimestamp
|
||||
|TimeType |TIME |java.util.Date |time, java.sql.Time
|
||||
|DateType |DATE |java.util.Date |date, java.sql.Date
|
||||
|CalendarType |TIMESTAMP |java.util.Calendar |calendar, java.util.Calendar, java.util.GregorianCalendar
|
||||
|CalendarDateType |DATE |java.util.Calendar |calendar_date
|
||||
|CalendarTimeType |TIME |java.util.Calendar |calendar_time
|
||||
|CurrencyType |VARCHAR |java.util.Currency |currency, java.util.Currency
|
||||
|LocaleType |VARCHAR |java.util.Locale |locale, java.utility.locale
|
||||
|LocaleType |VARCHAR |java.util.Locale |locale, java.util.Locale
|
||||
|TimeZoneType |VARCHAR, using the TimeZone ID |java.util.TimeZone |timezone, java.util.TimeZone
|
||||
|UrlType |VARCHAR |java.net.URL |url, java.net.URL
|
||||
|ClassType |VARCHAR (class FQN) |java.lang.Class |class, java.lang.Class
|
||||
|
@ -64,10 +65,12 @@ Internally Hibernate uses a registry of basic types when it needs to resolve a s
|
|||
|PrimitiveCharacterArrayNClobType |NCHAR |char[] |N/A
|
||||
|CharacterNCharType |NCHAR |java.lang.Character |ncharacter
|
||||
|CharacterArrayNClobType |NCLOB |java.lang.Character[] |N/A
|
||||
|RowVersionType |VARBINARY |byte[] |row_version
|
||||
|ObjectType |VARCHAR |implementors of java.lang.Serializable | object, java.lang.Object
|
||||
|=======================================================================================================================================================================================================================================================================================
|
||||
|
||||
.Java 8 BasicTypes
|
||||
[cols=",,,",options="header",]
|
||||
[cols="<.^,<.^,<.^,<.^",options="header",]
|
||||
|=================================================================================================
|
||||
|Hibernate type (org.hibernate.type package) |JDBC type |Java type |BasicTypeRegistry key(s)
|
||||
|DurationType |BIGINT |java.time.Duration |Duration, java.time.Duration
|
||||
|
@ -81,16 +84,16 @@ Internally Hibernate uses a registry of basic types when it needs to resolve a s
|
|||
|=================================================================================================
|
||||
|
||||
.Hibernate Spatial BasicTypes
|
||||
[cols=",,,",options="header",]
|
||||
[cols="<.^,<.^,<.^,<.^",options="header",]
|
||||
|=================================================================================================
|
||||
|Hibernate type (org.hibernate.spatial package) |JDBC type |Java type |BasicTypeRegistry key(s)
|
||||
|JTSGeometryType |depends on the dialect | com.vividsolutions.jts.geom.Geometry |jts_geometry, or the class name of Geometry or any of its subclasses
|
||||
|GeolatteGeometryType |depends on the dialect | org.geolatte.geom.Geometry |geolatte_geometry, or the class name of Geometry or any of its subclasses
|
||||
|JTSGeometryType |depends on the dialect | com.vividsolutions.jts.geom.Geometry |jts_geometry, and the class names of Geometry and its subclasses
|
||||
|GeolatteGeometryType |depends on the dialect | org.geolatte.geom.Geometry |geolatte_geometry, and the class names of Geometry and its subclasses
|
||||
|=================================================================================================
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
To use the Hibernate Spatial types, you must add the `hibernate-spatial` dependency to your classpath _and_ use a `org.hibernate.spatial.SpatialDialect` implementation.
|
||||
To use the Hibernate Spatial types, you must add the `hibernate-spatial` dependency to your classpath _and_ use an `org.hibernate.spatial.SpatialDialect` implementation.
|
||||
|
||||
See the <<chapters/query/spatial/Spatial.adoc#spatial,Spatial>> chapter for more details.
|
||||
====
|
||||
|
@ -150,18 +153,18 @@ Note that JPA 2.1 introduced the `javax.persistence.AttributeConverter` contract
|
|||
The `@Basic` annotation defines 2 attributes.
|
||||
|
||||
`optional` - boolean (defaults to true):: Defines whether this attribute allows nulls.
|
||||
JPA defines this as "a hint", which essentially means that it effect is specifically required.
|
||||
JPA defines this as "a hint", which essentially means that its effect is specifically required.
|
||||
As long as the type is not primitive, Hibernate takes this to mean that the underlying column should be `NULLABLE`.
|
||||
`fetch` - FetchType (defaults to EAGER):: Defines whether this attribute should be fetched eagerly or lazily.
|
||||
JPA says that EAGER is a requirement to the provider (Hibernate) that the value should be fetched when the owner is fetched, while LAZY is merely a hint that the value is fetched when the attribute is accessed.
|
||||
Hibernate ignores this setting for basic types unless you are using bytecode enhancement.
|
||||
See the <<chapters/pc/BytecodeEnhancement.adoc#BytecodeEnhancement,BytecodeEnhancement>> for additional information on fetching and on bytecode enhancement.
|
||||
See the <<chapters/pc/BytecodeEnhancement.adoc#BytecodeEnhancement,Bytecode Enhancement>> for additional information on fetching and on bytecode enhancement.
|
||||
|
||||
[[basic-column-annotation]]
|
||||
==== The `@Column` annotation
|
||||
|
||||
JPA defines rules for implicitly determining the name of tables and columns.
|
||||
For a detailed discussion of implicit naming see <<chapters/domain/naming.adoc#naming,Naming>>.
|
||||
For a detailed discussion of implicit naming see <<chapters/domain/naming.adoc#naming,Naming strategies>>.
|
||||
|
||||
For basic type attributes, the implicit naming rule is that the column name is the same as the attribute name.
|
||||
If that implicit naming rule does not meet your requirements, you can explicitly tell Hibernate (and other providers) the column name to use.
|
||||
|
@ -182,12 +185,12 @@ The `@Column` annotation defines other mapping information as well. See its Java
|
|||
[[basic-registry]]
|
||||
==== BasicTypeRegistry
|
||||
|
||||
We said before that a Hibernate type is not a Java type, nor a SQL type, but that it understands both and performs the marshalling between them.
|
||||
We said before that a Hibernate type is not a Java type, nor an SQL type, but that it understands both and performs the marshalling between them.
|
||||
But looking at the basic type mappings from the previous examples,
|
||||
how did Hibernate know to use its `org.hibernate.type.StringType` for mapping for `java.lang.String` attributes,
|
||||
or its `org.hibernate.type.IntegerType` for mapping `java.lang.Integer` attributes?
|
||||
|
||||
The answer lies in a service inside Hibernate called the `org.hibernate.type.BasicTypeRegistry`, which essentially maintains a map of `org.hibernate.type.BasicType` (a `org.hibernate.type.Type` specialization) instances keyed by a name.
|
||||
The answer lies in a service inside Hibernate called the `org.hibernate.type.BasicTypeRegistry`, which essentially maintains a map of `org.hibernate.type.BasicType` (an `org.hibernate.type.Type` specialization) instances keyed by a name.
|
||||
|
||||
We will see later, in the <<basic-type-annotation>> section, that we can explicitly tell Hibernate which BasicType to use for a particular attribute.
|
||||
But first, let's explore how implicit resolution works and how applications can adjust the implicit resolution.
|
||||
|
@ -399,7 +402,7 @@ include::{sourcedir}/../bootstrap/BootstrapTest.java[tags=basic-custom-type-regi
|
|||
====
|
||||
Like `BasicType`, you can also register the `UserType` using a simple name.
|
||||
|
||||
Without registering a name, the `UserType` mapping requires the fully-classified name:
|
||||
Without registering a name, the `UserType` mapping requires the fully qualified class name:
|
||||
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -576,7 +579,7 @@ include::{converter-sourcedir}/ConverterTest.java[tags=basic-attribute-converter
|
|||
----
|
||||
====
|
||||
|
||||
Traditionally, you could only use the dbData `Caption` representation, which in our case is a `String`, when referencing the `caption` entity property.
|
||||
Traditionally, you could only use the DB data `Caption` representation, which in our case is a `String`, when referencing the `caption` entity property.
|
||||
|
||||
[[basic-attribute-converter-query-parameter-converter-dbdata-example]]
|
||||
.Filtering by the `Caption` property using the DB data representation
|
||||
|
@ -985,7 +988,7 @@ Note that this can cause difficulty as the driver chooses to map many different
|
|||
==== UUID as identifier
|
||||
|
||||
Hibernate supports using UUID values as identifiers, and they can even be generated on the user's behalf.
|
||||
For details, see the discussion of generators in <<chapters/domain/identifiers.adoc#identifiers,_Identifier generators_>>.
|
||||
For details, see the discussion of generators in <<chapters/domain/identifiers.adoc#identifiers,_Identifiers_>>.
|
||||
|
||||
[[basic-datetime]]
|
||||
==== Mapping Date/Time Values
|
||||
|
@ -1037,7 +1040,7 @@ include::{extrasdir}/basic/basic-datetime-temporal-date-persist-example.sql[]
|
|||
----
|
||||
====
|
||||
|
||||
Only the year, month and the day field were saved into the database.
|
||||
Only the year, month and the day fields were saved into the database.
|
||||
|
||||
If we change the `@Temporal` type to `TIME`:
|
||||
|
||||
|
@ -1646,7 +1649,7 @@ include::{sourcedir}/../fetching/FetchingTest.java[tags=mapping-column-read-and-
|
|||
----
|
||||
====
|
||||
|
||||
If a property uses more than one column, you must use the `forColumn` attribute to specify which column, the `@ColumnTransformer` read and write expressions are targeting.
|
||||
If a property uses more than one column, you must use the `forColumn` attribute to specify which column the `@ColumnTransformer` read and write expressions are targeting.
|
||||
|
||||
[[mapping-column-read-and-write-composite-type-example]]
|
||||
.`@ColumnTransformer` `forColumn` attribute usage
|
||||
|
|
|
@ -102,7 +102,7 @@ Depending on the number of elements, this behavior might not be efficient, if ma
|
|||
A workaround is to use an `@OrderColumn`, which, although not as efficient as when using the actual link table primary key, might improve the efficiency of the remove operations.
|
||||
|
||||
[[collections-value-type-collection-order-column-remove-example]]
|
||||
.Removing collection elements using the order column
|
||||
.Removing collection elements using @OrderColumn
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
|
|
|
@ -14,7 +14,7 @@ On the other hand, Hibernate can work with both POJO entities and dynamic entity
|
|||
==== Dynamic mapping models
|
||||
|
||||
Persistent entities do not necessarily have to be represented as POJO/JavaBean classes.
|
||||
Hibernate also supports dynamic models (using `Map` of `Maps` at runtime).
|
||||
Hibernate also supports dynamic models (using `Map` of ``Map``s at runtime).
|
||||
With this approach, you do not write persistent classes, only mapping files.
|
||||
|
||||
A given entity has just one entity mode within a given SessionFactory.
|
||||
|
|
|
@ -116,7 +116,7 @@ include::{extrasdir}/embeddable/embeddable-type-association-mapping-example.sql[
|
|||
----
|
||||
====
|
||||
|
||||
Now, if you have a `Book` entity which declares two `Publisher` embeddable types for the ebook and paperback version,
|
||||
Now, if you have a `Book` entity which declares two `Publisher` embeddable types for the ebook and paperback versions,
|
||||
you cannot use the default `Publisher` embeddable mapping since there will be a conflict between the two embeddable column mappings.
|
||||
|
||||
Therefore, the `Book` entity needs to override the embeddable type mappings for each `Publisher` attribute:
|
||||
|
@ -179,7 +179,7 @@ You could even develop your own naming strategy to do other types of implicit na
|
|||
[[embeddable-collections]]
|
||||
==== Collections of embeddable types
|
||||
|
||||
Collections of embeddable types are specifically valued collections (as embeddable types are a value type).
|
||||
Collections of embeddable types are specifically valued collections (as embeddable types are value types).
|
||||
Value collections are covered in detail in <<chapters/domain/collections.adoc#collections-value,Collections of value types>>.
|
||||
|
||||
[[embeddable-mapkey]]
|
||||
|
|
|
@ -19,9 +19,9 @@ Throughout this chapter and thereafter, entity types will be simply referred to
|
|||
==== POJO Models
|
||||
|
||||
Section _2.1 The Entity Class_ of the _JPA 2.1 specification_ defines its requirements for an entity class.
|
||||
Applications that wish to remain portable across JPA providers should adhere to these requirements.
|
||||
Applications that wish to remain portable across JPA providers should adhere to these requirements:
|
||||
|
||||
* The entity class must be annotated with the `javax.persistence.Entity` annotation (or be denoted as such in XML mapping)
|
||||
* The entity class must be annotated with the `javax.persistence.Entity` annotation (or be denoted as such in XML mapping).
|
||||
* The entity class must have a public or protected no-argument constructor. It may define additional constructors as well.
|
||||
* The entity class must be a top-level class.
|
||||
* An enum or interface may not be designated as an entity.
|
||||
|
@ -38,7 +38,7 @@ Hibernate, however, is not as strict in its requirements. The differences from t
|
|||
* The entity class _need not_ be a top-level class.
|
||||
* Technically Hibernate can persist final classes or classes with final persistent state accessor (getter/setter) methods.
|
||||
However, it is generally not a good idea as doing so will stop Hibernate from being able to generate proxies for lazy-loading the entity.
|
||||
* Hibernate does not restrict the application developer from exposing instance variables and reference them from outside the entity class itself.
|
||||
* Hibernate does not restrict the application developer from exposing instance variables and referencing them from outside the entity class itself.
|
||||
The validity of such a paradigm, however, is debatable at best.
|
||||
|
||||
Let's look at each requirement in detail.
|
||||
|
@ -56,7 +56,7 @@ For the very same reason, you should also avoid declaring persistent attribute g
|
|||
====
|
||||
Starting with 5.0, Hibernate offers a more robust version of bytecode enhancement as another means for handling lazy loading.
|
||||
Hibernate had some bytecode re-writing capabilities prior to 5.0 but they were very rudimentary.
|
||||
See the <<chapters/pc/BytecodeEnhancement.adoc#BytecodeEnhancement,BytecodeEnhancement>> for additional information on fetching and on bytecode enhancement.
|
||||
See the <<chapters/pc/BytecodeEnhancement.adoc#BytecodeEnhancement,Bytecode Enhancement>> for additional information on fetching and on bytecode enhancement.
|
||||
====
|
||||
|
||||
[[entity-pojo-constructor]]
|
||||
|
@ -118,7 +118,7 @@ The main piece in mapping the entity is the https://javaee.github.io/javaee-spec
|
|||
|
||||
The `@Entity` annotation defines just the https://javaee.github.io/javaee-spec/javadocs/javax/persistence/Entity.html#name--[`name`] attribute which is used to give a specific entity name for use in JPQL queries.
|
||||
|
||||
By default, if the name attribute the `@Entity` annotation is missing, the unqualified name of the entity class itself will be used as the entity name
|
||||
By default, if the name attribute of the `@Entity` annotation is missing, the unqualified name of the entity class itself will be used as the entity name.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
|
@ -174,7 +174,7 @@ Let's assume we are using MySQL and want to map a `Book` entity to the `book` ta
|
|||
which looks as follows.
|
||||
|
||||
[[mapping-post-table-catalog-mysql-example]]
|
||||
.The `post` table located in the `public` catalog
|
||||
.The `book` table located in the `public` catalog
|
||||
====
|
||||
[source,sql]
|
||||
----
|
||||
|
@ -204,7 +204,7 @@ Let's assume we are using PostgreSQL and want to map a `Book` entity to the `boo
|
|||
which looks as follows.
|
||||
|
||||
[[mapping-post-table-schema-postgresql-example]]
|
||||
.The `post` table located in the `library` schema
|
||||
.The `book` table located in the `library` schema
|
||||
====
|
||||
[source,sql]
|
||||
----
|
||||
|
@ -248,7 +248,7 @@ Beyond this one very specific use case and few others we will discuss below, you
|
|||
|
||||
So what's all the fuss? Normally, most Java objects provide a built-in `equals()` and `hashCode()` based on the object's identity, so each new object will be different from all others.
|
||||
This is generally what you want in ordinary Java programming.
|
||||
Conceptually however this starts to break down when you start to think about the possibility of multiple instances of a class representing the same data.
|
||||
Conceptually, however, this starts to break down when you start to think about the possibility of multiple instances of a class representing the same data.
|
||||
|
||||
This is, in fact, exactly the case when dealing with data coming from a database.
|
||||
Every time we load a specific `Person` from the database we would naturally get a unique instance.
|
||||
|
@ -268,7 +268,7 @@ include::{sourcedir-mapping}/identifier/SimpleEntityTest.java[tag=entity-pojo-id
|
|||
Consider we have a `Library` parent entity which contains a `java.util.Set` of `Book` entities:
|
||||
|
||||
[[entity-pojo-set-mapping-example]]
|
||||
Library entity mapping
|
||||
.Library entity mapping
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
|
@ -454,11 +454,11 @@ Hibernate will trigger a Persistence Context flush if there are pending `Account
|
|||
[[entity-proxy]]
|
||||
==== Define a custom entity proxy
|
||||
|
||||
By default, when it needs to use a proxy instead of the actual Pojo, Hibernate is going to use a Bytecode manipulation library like
|
||||
By default, when it needs to use a proxy instead of the actual POJO, Hibernate is going to use a Bytecode manipulation library like
|
||||
http://jboss-javassist.github.io/javassist/[Javassist] or
|
||||
http://bytebuddy.net/[Byte Buddy].
|
||||
|
||||
However, if the entity class is final, Javassist will not create a proxy and you will get a Pojo even when you only need a proxy reference.
|
||||
However, if the entity class is final, Javassist will not create a proxy and you will get a POJO even when you only need a proxy reference.
|
||||
In this case, you could proxy an interface that this particular entity implements, as illustrated by the following example.
|
||||
|
||||
[[entity-proxy-interface-mapping]]
|
||||
|
@ -490,7 +490,7 @@ include::{extrasdir}/entity/entity-proxy-persist-mapping.sql[]
|
|||
====
|
||||
|
||||
As you can see in the associated SQL snippet, Hibernate issues no SQL SELECT query since the proxy can be
|
||||
constructed without needing to fetch the actual entity Pojo.
|
||||
constructed without needing to fetch the actual entity POJO.
|
||||
|
||||
[[entity-tuplizer]]
|
||||
==== Dynamic entity proxies using the @Tuplizer annotation
|
||||
|
@ -498,7 +498,7 @@ constructed without needing to fetch the actual entity Pojo.
|
|||
It is possible to map your entities as dynamic proxies using
|
||||
the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Tuplizer.html[`@Tuplizer`] annotation.
|
||||
|
||||
In the following entity mapping, both the embeddable and the entity are mapped as interfaces, not Pojos.
|
||||
In the following entity mapping, both the embeddable and the entity are mapped as interfaces, not POJOs.
|
||||
|
||||
[[entity-tuplizer-entity-mapping]]
|
||||
.Dynamic entity proxy mapping
|
||||
|
@ -549,7 +549,7 @@ include::{sourcedir-proxy}/tuplizer/DataProxyHandler.java[tag=entity-tuplizer-in
|
|||
----
|
||||
====
|
||||
|
||||
With the `DynamicInstantiator` in place, we can work with the dynamic proxy entities just like with Pojo entities.
|
||||
With the `DynamicInstantiator` in place, we can work with the dynamic proxy entities just like with POJO entities.
|
||||
|
||||
[[entity-tuplizer-dynamic-proxy-example]]
|
||||
.Persisting entities and embeddables as dynamic proxies
|
||||
|
@ -584,4 +584,4 @@ include::{sourcedir-persister}/Book.java[tag=entity-persister-mapping,indent=0]
|
|||
====
|
||||
|
||||
By providing your own `EntityPersister` and `CollectionPersister` implementations,
|
||||
you can control how entities and collections are persisted in to the database.
|
||||
you can control how entities and collections are persisted into the database.
|
|
@ -84,7 +84,7 @@ Identifier value generations strategies are discussed in detail in the <<identif
|
|||
==== Composite identifiers
|
||||
|
||||
Composite identifiers correspond to one or more persistent attributes.
|
||||
Here are the rules governing composite identifiers, as defined by the JPA specification.
|
||||
Here are the rules governing composite identifiers, as defined by the JPA specification:
|
||||
|
||||
* The composite identifier must be represented by a "primary key class".
|
||||
The primary key class may be defined using the `javax.persistence.EmbeddedId` annotation (see <<identifiers-composite-aggregated>>),
|
||||
|
@ -101,7 +101,7 @@ Hibernate does allow composite identifiers to be defined without a "primary key
|
|||
====
|
||||
|
||||
The attributes making up the composition can be either basic, composite, `@ManyToOne`.
|
||||
Note especially that collections and one-to-ones are never appropriate.
|
||||
Note especially that collection and one-to-one are never appropriate.
|
||||
|
||||
[[identifiers-composite-aggregated]]
|
||||
==== Composite identifiers with `@EmbeddedId`
|
||||
|
@ -301,15 +301,15 @@ The rest of the discussion here assumes this setting is enabled (`true`).
|
|||
In Hibernate 5.3, Hibernate attempts to delay the insert of entities if the flush-mode does not equal `AUTO`.
|
||||
This was slightly problematic for entities that used `IDENTITY` or `SEQUENCE` generated identifiers that were
|
||||
also involved in some form of association with another entity in the same transaction.
|
||||
+
|
||||
|
||||
In Hibernate 5.4, Hibernate attempts to remedy the problem using an algorithm to decide if the insert should
|
||||
be delayed or if it requires immediate insertion. We wanted to restore the behavior prior to 5.3 only for
|
||||
very specific use cases where it made sense.
|
||||
+
|
||||
|
||||
Entity mappings can sometimes be complex and it is possible a corner case was overlooked. Hibernate offers a
|
||||
way to completely disable the 5.3 behavior in the event problems occur with `DelayedPostInsertIdentifier`. To
|
||||
enable the legacy behavior, set `hibernate.id.disable_delayed_identity_inserts=true`.
|
||||
+
|
||||
|
||||
This configuration option is meant to act as a _temporary_ fix and bridge the gap between the changes in this
|
||||
behavior across Hibernate 5.x releases. If this configuration setting is necessary for a mapping, please open
|
||||
a JIRA and report the mapping so that the algorithm can be reviewed.
|
||||
|
@ -507,8 +507,8 @@ include::{sourcedir}/UuidCustomGeneratedValueTest.java[tag=identifiers-generator
|
|||
Most of the Hibernate generators that separately obtain identifier values from database structures support the use of pluggable optimizers.
|
||||
Optimizers help manage the number of times Hibernate has to talk to the database in order to generate identifier values.
|
||||
For example, with no optimizer applied to a sequence-generator, every time the application asked Hibernate to generate an identifier it would need to grab the next sequence value from the database.
|
||||
But if we can minimize the number of times we need to communicate with the database here, the application will be able to perform better.
|
||||
Which is, in fact, the role of these optimizers.
|
||||
But if we can minimize the number of times we need to communicate with the database here, the application will be able to perform better,
|
||||
which is, in fact, the role of these optimizers.
|
||||
|
||||
none:: No optimization is performed. We communicate with the database each and every time an identifier value is needed from the generator.
|
||||
|
||||
|
|
|
@ -276,7 +276,7 @@ Each table defines all persistent states of the class, including the inherited s
|
|||
|
||||
In Hibernate, it is not necessary to explicitly map such inheritance hierarchies.
|
||||
You can map each class as a separate entity root.
|
||||
However, if you wish use polymorphic associations (e.g. an association to the superclass of your hierarchy), you need to use the union subclass mapping.
|
||||
However, if you wish to use polymorphic associations (e.g. an association to the superclass of your hierarchy), you need to use the union subclass mapping.
|
||||
|
||||
[[entity-inheritance-table-per-class-example]]
|
||||
.Table per class
|
||||
|
@ -341,7 +341,7 @@ However, you can even query
|
|||
For instance, considering the following `DomainModelEntity` interface:
|
||||
|
||||
[[entity-inheritance-polymorphism-interface-example]]
|
||||
.Domain Model Entity interface
|
||||
.DomainModelEntity interface
|
||||
====
|
||||
[source,java]
|
||||
----
|
||||
|
|
|
@ -4,11 +4,11 @@
|
|||
|
||||
Part of the mapping of an object model to the relational database is
|
||||
mapping names from the object model to the corresponding database names.
|
||||
Hibernate looks at this as 2 stage process:
|
||||
Hibernate looks at this as 2-stage process:
|
||||
|
||||
* The first stage is determining a proper logical name from the domain model mapping. A
|
||||
logical name can be either explicitly specified by the user (using `@Column` or
|
||||
`@Table` e.g.) or it can be implicitly determined by Hibernate through an
|
||||
logical name can be either explicitly specified by the user (e.g., using `@Column` or
|
||||
`@Table`) or it can be implicitly determined by Hibernate through an
|
||||
<<ImplicitNamingStrategy>> contract.
|
||||
* Second is the resolving of this logical name to a physical name which is defined
|
||||
by the <<PhysicalNamingStrategy>> contract.
|
||||
|
@ -25,7 +25,7 @@ Also, the NamingStrategy contract was often not flexible enough to properly appl
|
|||
"rule", either because the API lacked the information to decide or because the API was honestly
|
||||
not well defined as it grew.
|
||||
|
||||
Due to these limitation, `org.hibernate.cfg.NamingStrategy` has been deprecated and then removed
|
||||
Due to these limitation, `org.hibernate.cfg.NamingStrategy` has been deprecated
|
||||
in favor of ImplicitNamingStrategy and PhysicalNamingStrategy.
|
||||
====
|
||||
|
||||
|
@ -38,7 +38,7 @@ repetitive information a developer must provide for mapping a domain model.
|
|||
====
|
||||
JPA defines inherent rules about implicit logical name determination. If JPA provider
|
||||
portability is a major concern, or if you really just like the JPA-defined implicit
|
||||
naming rules, be sure to stick with ImplicitNamingStrategyJpaCompliantImpl (the default)
|
||||
naming rules, be sure to stick with ImplicitNamingStrategyJpaCompliantImpl (the default).
|
||||
|
||||
Also, JPA defines no separation between logical and physical name. Following the JPA
|
||||
specification, the logical name *is* the physical name. If JPA provider portability
|
||||
|
@ -58,7 +58,7 @@ determine a logical name when the mapping did not provide an explicit name.
|
|||
image:images/domain/naming/implicit_naming_strategy_diagram.svg[Implicit Naming Strategy Diagram]
|
||||
|
||||
Hibernate defines multiple ImplicitNamingStrategy implementations out-of-the-box. Applications
|
||||
are also free to plug-in custom implementations.
|
||||
are also free to plug in custom implementations.
|
||||
|
||||
There are multiple ways to specify the ImplicitNamingStrategy to use. First, applications can specify
|
||||
the implementation using the `hibernate.implicit_naming_strategy` configuration setting which accepts:
|
||||
|
@ -106,7 +106,7 @@ applications and integrations can define custom implementations of this Physical
|
|||
contract. Here is an example PhysicalNamingStrategy for a fictitious company named Acme Corp
|
||||
whose naming standards are to:
|
||||
|
||||
* prefer underscore-delimited words rather than camel-casing
|
||||
* prefer underscore-delimited words rather than camel casing
|
||||
* replace certain words with standard abbreviations
|
||||
|
||||
.Example PhysicalNamingStrategy implementation
|
||||
|
|
|
@ -42,7 +42,7 @@ include::{sourcedir}/MultipleNaturalIdTest.java[tags=naturalid-multiple-attribut
|
|||
[[naturalid-api]]
|
||||
==== Natural Id API
|
||||
|
||||
As stated before, Hibernate provides an API for loading entities by their associate natural id.
|
||||
As stated before, Hibernate provides an API for loading entities by their associated natural id.
|
||||
This is represented by the `org.hibernate.NaturalIdLoadAccess` contract obtained via Session#byNaturalId.
|
||||
|
||||
[NOTE]
|
||||
|
@ -71,17 +71,17 @@ include::{sourcedir}/MultipleNaturalIdTest.java[tags=naturalid-load-access-examp
|
|||
|
||||
NaturalIdLoadAccess offers 2 distinct methods for obtaining the entity:
|
||||
|
||||
`load()`:: obtains a reference to the entity, making sure that the entity state is initialized
|
||||
`load()`:: obtains a reference to the entity, making sure that the entity state is initialized.
|
||||
`getReference()`:: obtains a reference to the entity. The state may or may not be initialized.
|
||||
If the entity is already associated with the current running Session, that reference (loaded or not) is returned.
|
||||
If the entity is not loaded in the current Session and the entity supports proxy generation, an uninitialized proxy is generated and returned, otherwise the entity is loaded from the database and returned.
|
||||
|
||||
`NaturalIdLoadAccess` allows loading an entity by natural id and at the same time apply a pessimistic lock.
|
||||
`NaturalIdLoadAccess` allows loading an entity by natural id and at the same time applies a pessimistic lock.
|
||||
For additional details on locking, see the <<chapters/locking/Locking.adoc#locking,Locking>> chapter.
|
||||
|
||||
We will discuss the last method available on NaturalIdLoadAccess ( `setSynchronizationEnabled()` ) in <<naturalid-mutability-caching>>.
|
||||
|
||||
Because the `Company` and `PostalCarrier` entities define "simple" natural ids, we can load them as follows:
|
||||
Because the `Book` entities in the first two examples define "simple" natural ids, we can load them as follows:
|
||||
|
||||
[[naturalid-simple-load-access-example]]
|
||||
.Loading by simple natural id
|
||||
|
@ -98,7 +98,7 @@ include::{sourcedir}/CompositeNaturalIdTest.java[tags=naturalid-simple-load-acce
|
|||
====
|
||||
|
||||
Here we see the use of the `org.hibernate.SimpleNaturalIdLoadAccess` contract,
|
||||
obtained via `Session#bySimpleNaturalId().
|
||||
obtained via `Session#bySimpleNaturalId()`.
|
||||
|
||||
`SimpleNaturalIdLoadAccess` is similar to `NaturalIdLoadAccess` except that it does not define the using method.
|
||||
Instead, because these _simple_ natural ids are defined based on just one attribute we can directly pass
|
||||
|
@ -115,7 +115,7 @@ If the entity does not define a natural id, or if the natural id is not of a "si
|
|||
A natural id may be mutable or immutable. By default the `@NaturalId` annotation marks an immutable natural id attribute.
|
||||
An immutable natural id is expected to never change its value.
|
||||
|
||||
If the value(s) of the natural id attribute(s) change, `@NaturalId(mutable=true)` should be used instead.
|
||||
If the value(s) of the natural id attribute(s) change, `@NaturalId(mutable = true)` should be used instead.
|
||||
|
||||
[[naturalid-mutable-mapping-example]]
|
||||
.Mutable natural id mapping
|
||||
|
|
|
@ -50,9 +50,9 @@ The persistent attributes of the `Contact` class are value types.
|
|||
|
||||
Value types are further classified into three sub-categories:
|
||||
|
||||
Basic types:: in mapping the `Contact` table, all attributes except for name would be basic types. Basic types are discussed in detail in <<chapters/domain/basic_types.adoc#basic,_Basic Types_>>
|
||||
Embeddable types:: the name attribute is an example of an embeddable type, which is discussed in details in <<chapters/domain/embeddables.adoc#embeddables,_Embeddable Types_>>
|
||||
Collection types:: although not featured in the aforementioned example, collection types are also a distinct category among value types. Collection types are further discussed in <<chapters/domain/collections.adoc#collections,_Collections_>>
|
||||
Basic types:: in mapping the `Contact` table, all attributes except for name would be basic types. Basic types are discussed in detail in <<chapters/domain/basic_types.adoc#basic,_Basic types_>>
|
||||
Embeddable types:: the `name` attribute is an example of an embeddable type, which is discussed in details in <<chapters/domain/embeddables.adoc#embeddables,_Embeddable types_>>
|
||||
*Collection* types:: although not featured in the aforementioned example, collection types are also a distinct category among value types. Collection types are further discussed in <<chapters/domain/collections.adoc#collections,_Collections_>>
|
||||
|
||||
[[categorization-entity]]
|
||||
==== Entity types
|
||||
|
@ -62,4 +62,4 @@ Entities are domain model classes which correlate to rows in a database table, u
|
|||
Because of the requirement for a unique identifier, entities exist independently and define their own lifecycle.
|
||||
The `Contact` class itself would be an example of an entity.
|
||||
|
||||
Mapping entities is discussed in detail in <<chapters/domain/entity.adoc#entity,_Entity_>>.
|
||||
Mapping entities is discussed in detail in <<chapters/domain/entity.adoc#entity,_Entity types_>>.
|
||||
|
|
|
@ -224,14 +224,14 @@ Sometimes, however, it is easier and more efficient to access it in the last rev
|
|||
`*org.hibernate.envers.default_schema*` (default: `null` - same schema as the table being audited)::
|
||||
The default schema name that should be used for audit tables.
|
||||
+
|
||||
Can be overridden using the `@AuditTable( schema="..." )` annotation.
|
||||
Can be overridden using the `@AuditTable( schema = "..." )` annotation.
|
||||
+
|
||||
If not present, the schema will be the same as the schema of the table being audited.
|
||||
|
||||
`*org.hibernate.envers.default_catalog*` (default: `null` - same catalog as the table being audited)::
|
||||
The default catalog name that should be used for audit tables.
|
||||
+
|
||||
Can be overridden using the `@AuditTable( catalog="..." )` annotation.
|
||||
Can be overridden using the `@AuditTable( catalog = "..." )` annotation.
|
||||
+
|
||||
If not present, the catalog will be the same as the catalog of the normal tables.
|
||||
|
||||
|
@ -255,7 +255,7 @@ This property is only evaluated if the `ValidityAuditStrategy` is used.
|
|||
|
||||
`*org.hibernate.envers.audit_strategy_validity_revend_timestamp_field_name*`(default: `REVEND_TSTMP` )::
|
||||
Column name of the timestamp of the end revision until which the data was valid.
|
||||
Only used if the `ValidityAuditStrategy` is used, and `org.hibernate.envers.audit_strategy_validity_store_revend_timestamp` evaluates to true
|
||||
Only used if the `ValidityAuditStrategy` is used, and `org.hibernate.envers.audit_strategy_validity_store_revend_timestamp` evaluates to true.
|
||||
|
||||
`*org.hibernate.envers.use_revision_entity_with_native_id*` (default: `true` )::
|
||||
Boolean flag that determines the strategy of revision number generation.
|
||||
|
@ -272,7 +272,7 @@ The default implementation creates `REVCHANGES` table that stores entity names o
|
|||
Single record encapsulates the revision identifier (foreign key to `REVINFO` table) and a string value.
|
||||
For more information, refer to <<envers-tracking-modified-entities-revchanges>> and <<envers-tracking-modified-entities-queries>>.
|
||||
|
||||
`*org.hibernate.envers.global_with_modified_flag*` (default: `false`, can be individually overridden with `@Audited( withModifiedFlag=true )` )::
|
||||
`*org.hibernate.envers.global_with_modified_flag*` (default: `false`, can be individually overridden with `@Audited( withModifiedFlag = true )` )::
|
||||
Should property modification flags be stored for all audited entities and all properties.
|
||||
+
|
||||
When set to true, for all properties an additional boolean column in the audit tables will be created, filled with information if the given property changed in the given revision.
|
||||
|
@ -369,7 +369,7 @@ IMPORTANT: These subqueries are notoriously slow and difficult to index.
|
|||
. The alternative is a validity audit strategy.
|
||||
This strategy stores the start-revision and the end-revision of audit information.
|
||||
For each row inserted, updated or deleted in an audited table, one or more rows are inserted in the audit tables, together with the start revision of its validity.
|
||||
But at the same, time the end-revision field of the previous audit rows (if available) is set to this revision.
|
||||
But at the same time, the end-revision field of the previous audit rows (if available) is set to this revision.
|
||||
Queries on the audit information can then use 'between start and end revision' instead of subqueries as used by the default audit strategy.
|
||||
+
|
||||
The consequence of this strategy is that persisting audit information will be a bit slower because of the extra updates involved,
|
||||
|
@ -380,7 +380,7 @@ IMPORTANT: This can be improved even further by adding extra indexes.
|
|||
[[envers-audit-ValidityAuditStrategy]]
|
||||
==== Configuring the `ValidityAuditStrategy`
|
||||
|
||||
To better visualize how the `ValidityAuditStrategy`, consider the following exercise where
|
||||
To better visualize how the `ValidityAuditStrategy` works, consider the following exercise where
|
||||
we replay the previous audit logging example for the `Customer` entity.
|
||||
|
||||
First, you need to configure the `ValidityAuditStrategy`:
|
||||
|
@ -395,7 +395,7 @@ include::{sourcedir}/ValidityStrategyAuditTest.java[tags=envers-audited-validity
|
|||
====
|
||||
|
||||
If, you're using the `persistence.xml` configuration file,
|
||||
then the mapping will looks as follows:
|
||||
then the mapping will look as follows:
|
||||
|
||||
[source, XML, indent=0]
|
||||
----
|
||||
|
@ -416,7 +416,7 @@ include::{extrasdir}/envers-audited-validity-mapping-example.sql[]
|
|||
----
|
||||
====
|
||||
|
||||
As you can see, the `REVEND` column is added as well as its Foreign key to the `REVINFO` table.
|
||||
As you can see, the `REVEND` column is added as well as its foreign key to the `REVINFO` table.
|
||||
|
||||
When rerunning the previous `Customer` audit log queries against the `ValidityAuditStrategy`,
|
||||
we get the following results:
|
||||
|
@ -443,7 +443,19 @@ When Envers starts a new revision, it creates a new revision entity which stores
|
|||
By default, that includes just:
|
||||
|
||||
revision number::
|
||||
An integral value (`int/Integer` or `long/Long`). Essentially, the primary key of the revision
|
||||
An integral value (`int/Integer` or `long/Long`). Essentially, the primary key of the revision.
|
||||
+
|
||||
[WARNING]
|
||||
====
|
||||
A revision number value should **always** be increasing and never overflows.
|
||||
|
||||
The default implementations provided by Envers use an `int` data type which has an upper bounds of `Integer.MAX_VALUE`.
|
||||
It is critical that users consider whether this upper bounds is feasible for your application. If a large range is needed, consider using a custom revision entity mapping using a `long` data type.
|
||||
|
||||
In the event that the revision number reaches its upper bounds wrapping around becoming negative, an `AuditException` will be thrown causing the current transaction to be rolled back.
|
||||
This guarantees that the audit history remains in a valid state that can be queried by the Envers Query API.
|
||||
====
|
||||
|
||||
revision timestamp::
|
||||
Either a `long/Long` or `java.util.Date` value representing the instant at which the revision was made.
|
||||
When using a `java.util.Date`, instead of a `long/Long` for the revision timestamp, take care not to store it to a column data type which will lose precision.
|
||||
|
@ -461,7 +473,7 @@ You can extend from `org.hibernate.envers.DefaultRevisionEntity`, if you wish, t
|
|||
+
|
||||
Simply add the custom revision entity as you do your normal entities and Envers will *find it*.
|
||||
+
|
||||
NOTE: It is an error for there to be multiple entities marked as `@org.hibernate.envers.RevisionEntity`
|
||||
NOTE: It is an error for there to be multiple entities marked as `@org.hibernate.envers.RevisionEntity`.
|
||||
|
||||
. Second, you need to tell Envers how to create instances of your revision entity which is handled by the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/envers/RevisionListener.html#newRevision-java.lang.Object-[`newRevision( Object revisionEntity )`]
|
||||
|
@ -650,7 +662,7 @@ include::{sourcedir}/EntityTypeChangeAuditTrackingRevisionListenerTest.java[tags
|
|||
The `CustomTrackingRevisionEntity` contains a `@OneToMany` list of `ModifiedTypeRevisionEntity`
|
||||
|
||||
[[envers-tracking-modified-entities-revchanges-EntityType-example]]
|
||||
.The `EntityType` encapsulatets the entity type name before a class name modification
|
||||
.The `EntityType` encapsulates the entity type name before a class name modification
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -658,7 +670,7 @@ include::{sourcedir}/EntityTypeChangeAuditTrackingRevisionListenerTest.java[tags
|
|||
----
|
||||
====
|
||||
|
||||
Now, when fetching the `CustomTrackingRevisionEntity`, you cna get access to the previous entity class name.
|
||||
Now, when fetching the `CustomTrackingRevisionEntity`, you can get access to the previous entity class name.
|
||||
|
||||
[[envers-tracking-modified-entities-revchanges-query-example]]
|
||||
.Getting the `EntityType` through the `CustomTrackingRevisionEntity`
|
||||
|
@ -686,7 +698,7 @@ Tracking entity changes at the property level can be enabled by:
|
|||
. setting `org.hibernate.envers.global_with_modified_flag` configuration property to `true`.
|
||||
This global switch will cause adding modification flags to be stored for all audited properties of all audited entities.
|
||||
|
||||
. using `@Audited( withModifiedFlag=true )` on a property or on an entity.
|
||||
. using `@Audited( withModifiedFlag = true )` on a property or on an entity.
|
||||
|
||||
The trade-off coming with this functionality is an increased size of audit tables and a very little, almost negligible, performance drop during audit writes.
|
||||
This is due to the fact that every tracked property has to have an accompanying boolean column in the schema that stores information about the property modifications.
|
||||
|
@ -729,7 +741,7 @@ To see how "Modified Flags" can be utilized, check out the very simple query API
|
|||
=== Selecting strategy for tracking property level changes
|
||||
|
||||
By default, Envers uses the `legacy` modified column naming strategy.
|
||||
This strategy is designed to add columns based the following rule-set:
|
||||
This strategy is designed to add columns based on the following rule-set:
|
||||
|
||||
. If property is annotated with `@Audited` and the _modifiedColumnName_ attribute is specified, the column will directly be based on the supplied name.
|
||||
. If property is not annotated with `@Audited` or if no _modifiedColumnName_ attribute is given, the column will be named after the java class property, appended with the configured suffix, the default being `_MOD`.
|
||||
|
@ -748,7 +760,7 @@ public class Customer {
|
|||
}
|
||||
```
|
||||
|
||||
This mapping will actually lead to some inconsistent naming between columns, see below with how the model's name will be stored in `customer_name` but the modified column that tracks whether this column changes between revisions is named `name_MOD`.
|
||||
This mapping will actually lead to some inconsistent naming between columns, see below for how the model's name will be stored in `customer_name` but the modified column that tracks whether this column changes between revisions is named `name_MOD`.
|
||||
|
||||
```
|
||||
CREATE TABLE Customer_AUD (
|
||||
|
@ -761,21 +773,21 @@ CREATE TABLE Customer_AUD (
|
|||
)
|
||||
```
|
||||
|
||||
An additional strategy called `improved`, aims to address these consistent column naming concerns.
|
||||
An additional strategy called `improved`, aims to address these inconsistent column naming concerns.
|
||||
This strategy uses the following rule-set:
|
||||
|
||||
. Property is a Basic type (Single Column valued property)
|
||||
.. Use the _modifiedColumnName_ directly if one is supplied on the property mapping
|
||||
.. Otherwise use the resolved ORM column name appended with the modified flag suffix configured value.
|
||||
.. Otherwise use the resolved ORM column name appended with the modified flag suffix configured value
|
||||
. Property is an Association (to-one mapping) with a Foreign Key using a single column
|
||||
.. Use the _modifiedColumnName_ directly if one is supplied on the property mapping
|
||||
.. Otherwise use the resolved ORM column name appended with the modified flag suffix configured value.
|
||||
.. Otherwise use the resolved ORM column name appended with the modified flag suffix configured value
|
||||
. Property is an Association (to-one mapping) with a Foreign Key using multiple columns
|
||||
.. Use the _modifiedColumnName_ directly if one is supplied on the property mapping
|
||||
.. Otherwise use the property name appended with the modified flag suffix configured value.
|
||||
.. Otherwise use the property name appended with the modified flag suffix configured value
|
||||
. Property is an Embeddable
|
||||
.. Use the _modifiedColumnName_ directly if one is supplied on the property mapping
|
||||
.. Otherwise use the property name appended with the modified flag suffix configured value.
|
||||
.. Otherwise use the property name appended with the modified flag suffix configured value
|
||||
|
||||
While using this strategy, the same `Customer` mapping will generate the following table schema:
|
||||
|
||||
|
@ -790,7 +802,7 @@ CREATE TABLE Customer_AUD (
|
|||
)
|
||||
```
|
||||
|
||||
When already using Envers in conjunction with the modified columns flag feature, its advised not to enable the new strategy immediately as schema changes would be required.
|
||||
When already using Envers in conjunction with the modified columns flag feature, it is advised not to enable the new strategy immediately as schema changes would be required.
|
||||
You will need to either migrate your existing schema manually to adhere to the rules above or use the explicit _modifiedColumnName_ attribute on the `@Audited` annotation for existing columns that use the feature.
|
||||
|
||||
To configure a custom strategy implementation or use the improved strategy, the configuration option `org.hibernate.envers.modified_column_naming_strategy` will need to be set.
|
||||
|
@ -804,7 +816,7 @@ You can think of historic data as having two dimensions:
|
|||
horizontal:: The state of the database at a given revision. Thus, you can query for entities as they were at revision N.
|
||||
vertical:: The revisions, at which entities changed. Hence, you can query for revisions, in which a given entity changed.
|
||||
|
||||
The queries in Envers are similar to Hibernate Criteria queries, so if you are common with them, using Envers queries will be much easier.
|
||||
The queries in Envers are similar to Hibernate Criteria queries, so if you are familiar with them, using Envers queries will be much easier.
|
||||
|
||||
The main limitation of the current queries implementation is that you cannot traverse relations.
|
||||
You can only specify constraints on the ids of the related entities, and only on the "owning" side of the relation.
|
||||
|
@ -878,7 +890,7 @@ include::{sourcedir}/QueryAuditTest.java[tags=entities-filtering-by-entity-ident
|
|||
----
|
||||
====
|
||||
|
||||
Apart for strict equality matching, you can also use an `IN` clause to provide multiple entity identifiers:
|
||||
Apart from strict equality matching, you can also use an `IN` clause to provide multiple entity identifiers:
|
||||
|
||||
[[entities-in-clause-filtering-by-entity-identifier-example]]
|
||||
.Getting the `Customer` entities whose `address` identifier matches one of the given entity identifiers
|
||||
|
@ -928,10 +940,10 @@ You can add constraints to this query in the same way as to the previous one.
|
|||
|
||||
There are some additional possibilities:
|
||||
|
||||
. using `AuditEntity.revisionNumber()` you can specify constraints, projections and order on the revision number, in which the audited entity was modified
|
||||
. using `AuditEntity.revisionNumber()` you can specify constraints, projections and order on the revision number, in which the audited entity was modified.
|
||||
|
||||
. similarly, using `AuditEntity.revisionProperty( propertyName )` you can specify constraints, projections and order on a property of the revision entity,
|
||||
corresponding to the revision in which the audited entity was modified
|
||||
corresponding to the revision in which the audited entity was modified.
|
||||
|
||||
. `AuditEntity.revisionType()` gives you access as above to the type of the revision (`ADD`, `MOD`, `DEL`).
|
||||
|
||||
|
@ -946,7 +958,7 @@ include::{sourcedir}/QueryAuditTest.java[tags=revisions-of-entity-query-by-revis
|
|||
|
||||
The second additional feature you can use in queries for revisions is the ability to _maximize_/_minimize_ a property.
|
||||
|
||||
For example, if you want to select the smallest possibler revision at which the value of the `createdOn`
|
||||
For example, if you want to select the smallest possible revision at which the value of the `createdOn`
|
||||
attribute was larger then a given value,
|
||||
you can run the following query:
|
||||
|
||||
|
@ -956,7 +968,7 @@ you can run the following query:
|
|||
include::{sourcedir}/QueryAuditTest.java[tags=revisions-of-entity-query-minimize-example]
|
||||
----
|
||||
|
||||
The `minimize()` and `maximize()` methods return a criteria, to which you can add constraints,
|
||||
The `minimize()` and `maximize()` methods return a criterion, to which you can add constraints,
|
||||
which must be met by the entities with the _maximized_/_minimized_ properties.
|
||||
|
||||
You probably also noticed that there are two boolean parameters, passed when creating the query.
|
||||
|
@ -994,7 +1006,7 @@ hold the audited property data at the _maximum_ revision number for each `Custom
|
|||
=== Querying for entity revisions that modified a given property
|
||||
|
||||
For the two types of queries described above it's possible to use special `Audit` criteria called `hasChanged()` and `hasNotChanged()`
|
||||
that makes use of the functionality described in <<envers-tracking-properties-changes>>.
|
||||
that make use of the functionality described in <<envers-tracking-properties-changes>>.
|
||||
|
||||
Let's have a look at various queries that can benefit from these two criteria.
|
||||
|
||||
|
@ -1082,17 +1094,17 @@ You can now obtain this information easily by using the following query:
|
|||
[source, JAVA, indent=0]
|
||||
----
|
||||
List results = AuditReaderFactory.get( entityManager )
|
||||
.createQuery()
|
||||
.forRevisionsOfEntityWithChanges( Customer.class, false )
|
||||
.add( AuditEntity.id().eq( 1L ) )
|
||||
.getResultList();
|
||||
.createQuery()
|
||||
.forRevisionsOfEntityWithChanges( Customer.class, false )
|
||||
.add( AuditEntity.id().eq( 1L ) )
|
||||
.getResultList();
|
||||
|
||||
for ( Object entry : results ) {
|
||||
final Object[] array = (Object[]) entry;
|
||||
final Set<String> propertiesChanged = (Set<String>) array[3];
|
||||
for ( String propertyName : propertiesChanged ) {
|
||||
final Object[] array = (Object[]) entry;
|
||||
final Set<String> propertiesChanged = (Set<String>) array[3];
|
||||
for ( String propertyName : propertiesChanged ) {
|
||||
/* Do something useful with the modified property `propertyName` */
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
====
|
||||
|
@ -1123,17 +1135,17 @@ include::{sourcedir}/EntityTypeChangeAuditTest.java[tags=envers-tracking-modifie
|
|||
|
||||
Other queries (also accessible from `org.hibernate.envers.CrossTypeRevisionChangesReader`):
|
||||
|
||||
`List<Object> findEntities( Number )`::
|
||||
`List<Object> findEntities(Number)`::
|
||||
Returns snapshots of all audited entities changed (added, updated and removed) in a given revision.
|
||||
Executes `N+1` SQL queries, where `N` is a number of different entity classes modified within specified revision.
|
||||
Executes `N + 1` SQL queries, where `N` is a number of different entity classes modified within specified revision.
|
||||
|
||||
`List<Object> findEntities( Number, RevisionType )`::
|
||||
`List<Object> findEntities(Number, RevisionType)`::
|
||||
Returns snapshots of all audited entities changed (added, updated or removed) in a given revision filtered by modification type.
|
||||
Executes `N+1` SQL queries, where `N` is a number of different entity classes modified within specified revision.
|
||||
Executes `N + 1` SQL queries, where `N` is a number of different entity classes modified within specified revision.
|
||||
|
||||
`Map<RevisionType, List<Object>> findEntitiesGroupByRevisionType( Number )`::
|
||||
`Map<RevisionType, List<Object>> findEntitiesGroupByRevisionType(Number)`::
|
||||
Returns a map containing lists of entity snapshots grouped by modification operation (e.g. addition, update and removal).
|
||||
Executes `3N+1` SQL queries, where `N` is a number of different entity classes modified within specified revision.
|
||||
Executes `3N + 1` SQL queries, where `N` is a number of different entity classes modified within specified revision.
|
||||
|
||||
[[envers-querying-entity-relation-joins]]
|
||||
=== Querying for entities using entity relation joins
|
||||
|
@ -1148,7 +1160,7 @@ to traverse entity relations through an audit query, you must use the relation t
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Relation joins can be applied to `many-to-one` and `many-to-one` mappings only when using `JoinType.LEFT` or `JoinType.INNER`.
|
||||
Relation joins can be applied to `many-to-one` and `one-to-one` mappings only when using `JoinType.LEFT` or `JoinType.INNER`.
|
||||
====
|
||||
|
||||
The basis for creating an entity relation join query is as follows:
|
||||
|
@ -1173,7 +1185,7 @@ include::{sourcedir}/QueryAuditTest.java[tags=envers-querying-entity-relation-le
|
|||
|
||||
Like any other query, constraints may be added to restrict the results.
|
||||
|
||||
For example, to find a `Customers` entities at a given revision whose addresses are in `România`,
|
||||
For example, to find all `Customer` entities at a given revision whose addresses are in `România`,
|
||||
you can use the following query:
|
||||
|
||||
[[envers-querying-entity-relation-join-restriction]]
|
||||
|
@ -1254,7 +1266,7 @@ include::{extrasdir}/envers-querying-entity-relation-nested-join-multiple-restri
|
|||
|
||||
Lastly, this example illustrates how related entity properties can be compared in a single constraint.
|
||||
|
||||
Assuming, the `Customer` and the `Address` were previously changed as follows:
|
||||
Assuming the `Customer` and the `Address` were previously changed as follows:
|
||||
|
||||
[[envers-querying-entity-relation-nested-join-multiple-restrictions-combined-entities]]
|
||||
.Changing the `Address` to match the `Country` name
|
||||
|
@ -1344,16 +1356,16 @@ but this can be overridden by specifying a different suffix/prefix in the config
|
|||
|
||||
The audit table contains the following columns:
|
||||
|
||||
id:: `id` of the original entity (this can be more then one column in the case of composite primary keys)
|
||||
id:: `id` of the original entity (this can be more then one column in the case of composite primary keys).
|
||||
revision number:: an integer, which matches to the revision number in the revision entity table.
|
||||
revision type:: The `org.hibernate.envers.RevisionType` enumeration ordinal stating if the change represents an INSERT, UPDATE or DELETE.
|
||||
audited fields:: properties from the original entity being audited
|
||||
audited fields:: properties from the original entity being audited.
|
||||
|
||||
The primary key of the audit table is the combination of the original id of the entity and the revision number,
|
||||
so there can be at most one historic entry for a given entity instance at a given revision.
|
||||
|
||||
The current entity data is stored in the original table and in the audit table.
|
||||
This is a duplication of data, however as this solution makes the query system much more powerful, and as memory is cheap, hopefully, this won't be a major drawback for the users.
|
||||
This is a duplication of data, however, as this solution makes the query system much more powerful, and as memory is cheap, hopefully, this won't be a major drawback for the users.
|
||||
|
||||
A row in the audit table with entity id `ID`, revision `N`, and data `D` means: entity with id `ID` has data `D` from revision `N` upwards.
|
||||
Hence, if we want to find an entity at revision `M`, we have to search for a row in the audit table, which has the revision number smaller or equal to `M`, but as large as possible.
|
||||
|
@ -1386,7 +1398,7 @@ Your opinions on the subject are very welcome on the forum.
|
|||
If you would like to generate the database schema file with Hibernate,
|
||||
you simply need to use the hbm2ddl too.
|
||||
|
||||
This task will generate the definitions of all entities, both of which are audited by Envers and those which are not.
|
||||
This task will generate the definitions of all entities, both of those which are audited by Envers and those which are not.
|
||||
|
||||
See the <<chapters/schema/Schema.adoc#schema-generation, Schema generation>> chapter for more info.
|
||||
|
||||
|
@ -1412,19 +1424,19 @@ include::{extrasdir}/envers-generateschema-example.sql[]
|
|||
==== What isn't and will not be supported
|
||||
|
||||
Bags are not supported because they can contain non-unique elements.
|
||||
Persisting, a bag of `String`s violates the relational database principle that each table is a set of tuples.
|
||||
Persisting a bag of `String`s violates the relational database principle that each table is a set of tuples.
|
||||
|
||||
In case of bags, however (which require a join table), if there is a duplicate element, the two tuples corresponding to the elements will be the same.
|
||||
Although Hibernate allows this, Envers (or more precisely: the database connector) will throw an exception when trying to persist two identical elements because of a unique constraint violation.
|
||||
Although Hibernate allows this, Envers (or more precisely the database connector) will throw an exception when trying to persist two identical elements because of a unique constraint violation.
|
||||
|
||||
There are at least two ways out if you need bag semantics:
|
||||
|
||||
. use an indexed collection, with the `@javax.persistence.OrderColumn` annotation
|
||||
. use an indexed collection, with the `@javax.persistence.OrderColumn` annotation.
|
||||
. provide a unique id for your elements with the `@CollectionId` annotation.
|
||||
|
||||
==== What isn't and _will_ be supported
|
||||
|
||||
. Bag style collections with a `@CollectionId` identifier column (see https://hibernate.atlassian.net/browse/HHH-3950[HHH-3950]).
|
||||
* Bag style collections with a `@CollectionId` identifier column (see https://hibernate.atlassian.net/browse/HHH-3950[HHH-3950]).
|
||||
|
||||
=== `@OneToMany` with `@JoinColumn`
|
||||
|
||||
|
@ -1433,12 +1445,12 @@ Envers, however, has to do this so that when you read the revisions in which the
|
|||
|
||||
To be able to name the additional join table, there is a special annotation: `@AuditJoinTable`, which has similar semantics to JPA `@JoinTable`.
|
||||
|
||||
One special case is to have relations mapped with `@OneToMany` with `@JoinColumn` on the one side, and `@ManyToOne` and `@JoinColumn( insertable=false, updatable=false`) on the many side.
|
||||
One special case is to have relations mapped with `@OneToMany` with `@JoinColumn` on the one side, and `@ManyToOne` and `@JoinColumn( insertable = false, updatable = false`) on the many side.
|
||||
Such relations are, in fact, bidirectional, but the owning side is the collection.
|
||||
|
||||
To properly audit such relations with Envers, you can use the `@AuditMappedBy` annotation.
|
||||
It enables you to specify the reverse property (using the `mappedBy` element).
|
||||
In case of indexed collections, the index column must also be mapped in the referenced entity (using `@Column( insertable=false, updatable=false )`, and specified using `positionMappedBy`.
|
||||
In case of indexed collections, the index column must also be mapped in the referenced entity (using `@Column( insertable = false, updatable = false )`, and specified using `positionMappedBy`.
|
||||
This annotation will affect only the way Envers works.
|
||||
Please note that the annotation is experimental and may change in the future.
|
||||
|
||||
|
@ -1452,7 +1464,7 @@ Because audit tables tend to grow indefinitely, they can quickly become really l
|
|||
When the audit tables have grown to a certain limit (varying per RDBMS and/or operating system) it makes sense to start using table partitioning.
|
||||
SQL table partitioning offers a lot of advantages including, but certainly not limited to:
|
||||
|
||||
. Improved query performance by selectively moving rows to various partitions (or even purging old rows)
|
||||
. Improved query performance by selectively moving rows to various partitions (or even purging old rows).
|
||||
. Faster data loads, index creation, etc.
|
||||
|
||||
[[envers-partitioning-columns]]
|
||||
|
@ -1509,9 +1521,9 @@ Currently, the salary table contains the following rows for a certain person X:
|
|||
The salary for the current fiscal year (2010) is unknown.
|
||||
The agency requires that all changes in registered salaries for a fiscal year are recorded (i.e., an audit trail).
|
||||
The rationale behind this is that decisions made at a certain date are based on the registered salary at that time.
|
||||
And at any time it must be possible reproduce the reason why a certain decision was made at a certain date.
|
||||
And at any time it must be possible to reproduce the reason why a certain decision was made at a certain date.
|
||||
|
||||
The following audit information is available, sorted on in order of occurrence:
|
||||
The following audit information is available, sorted in order of occurrence:
|
||||
|
||||
.Salaries - audit table
|
||||
[width="100%",cols="20%,20%,20%,20%,20%",options="header",]
|
||||
|
@ -1541,7 +1553,7 @@ Remember that, in this case, Envers will have to update the _end revision timest
|
|||
. There are two revisions in the salary of the fiscal year 2007 which both have nearly the same _revision timestamp_ and a different __end revision timestamp__.
|
||||
|
||||
On first sight, it is evident that the first revision was a mistake and probably not relevant.
|
||||
The only relevant revision for 2007 is the one with _end revision timestamp_ null.
|
||||
The only relevant revision for 2007 is the one with _end revision timestamp_ value of null.
|
||||
|
||||
Based on the above, it is evident that only the _end revision timestamp_ is suitable for audit table partitioning.
|
||||
The _revision timestamp_ is not suitable.
|
||||
|
@ -1562,15 +1574,15 @@ the audit row will remain in the same partition (the 'extension bucket').
|
|||
|
||||
And sometime in 2011, the last partition (or 'extension bucket') is split into two new partitions:
|
||||
|
||||
. _end revision timestamp_ year = 2010:: This partition contains audit data that is potentially relevant (in 2011).
|
||||
. _end revision timestamp_ year >= 2011 or null:: This partition contains the most interesting audit data and is the new 'extension bucket'.
|
||||
. _end revision timestamp_ year = 2010: This partition contains audit data that is potentially relevant (in 2011).
|
||||
. _end revision timestamp_ year >= 2011 or null: This partition contains the most interesting audit data and is the new 'extension bucket'.
|
||||
|
||||
[[envers-links]]
|
||||
=== Envers links
|
||||
|
||||
. http://hibernate.org[Hibernate main page]
|
||||
. http://community.jboss.org/en/envers?view=discussions[Forum]
|
||||
. http://hibernate.org/community/[Forum]
|
||||
. https://hibernate.atlassian.net/[JIRA issue tracker] (when adding issues concerning Envers, be sure to select the "envers" component!)
|
||||
. https://hibernate.hipchat.com/chat/room/1238636[HipChat channel]
|
||||
. https://hibernate.zulipchat.com/#narrow/stream/132096-hibernate-user[Zulip channel]
|
||||
. https://community.jboss.org/wiki/EnversFAQ[FAQ]
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ include::{sourcedir}/ListenerTest.java[tags=events-interceptors-load-listener-ex
|
|||
[[events-mixing-events-and-interceptors]]
|
||||
=== Mixing Events and Interceptors
|
||||
|
||||
When you want to customize the entity state transition behavior, you have to options:
|
||||
When you want to customize the entity state transition behavior, you have two options:
|
||||
|
||||
. you provide a custom `Interceptor`, which is taken into consideration by the default Hibernate event listeners.
|
||||
For example, the `Interceptor#onSave()` method is invoked by Hibernate `AbstractSaveEventListener`.
|
||||
|
@ -173,7 +173,7 @@ When that is the case, the defined order of execution is well defined by the JPA
|
|||
* Any default listeners associated with the entity are invoked first, in the order they were specified in the XML. See the `javax.persistence.ExcludeDefaultListeners` annotation.
|
||||
* Next, entity listener class callbacks associated with the entity hierarchy are invoked, in the order they are defined in the `EntityListeners`.
|
||||
If multiple classes in the entity hierarchy define entity listeners, the listeners defined for a superclass are invoked before the listeners defined for its subclasses.
|
||||
See the `javax.persistence.ExcludeSuperclassListener`s annotation.
|
||||
See the ``javax.persistence.ExcludeSuperclassListener``'s annotation.
|
||||
* Lastly, callback methods defined on the entity hierarchy are invoked.
|
||||
If a callback type is annotated on both an entity and one or more of its superclasses without method overriding, both would be called, the most general superclass first.
|
||||
An entity class is also allowed to override a callback method defined in a superclass in which case the super callback would not get invoked; the overriding method would get invoked provided it is annotated.
|
||||
|
|
|
@ -27,7 +27,7 @@ There are a number of scopes for defining fetching:
|
|||
|
||||
_static_::
|
||||
Static definition of fetching strategies is done in the mappings.
|
||||
The statically-defined fetch strategies are used in the absence of any dynamically defined strategies
|
||||
The statically-defined fetch strategies are used in the absence of any dynamically defined strategies.
|
||||
SELECT:::
|
||||
Performs a separate SQL select to load the data. This can either be EAGER (the second select is issued immediately) or LAZY (the second select is delayed until the data is needed).
|
||||
This is the strategy generally termed N+1.
|
||||
|
@ -41,9 +41,9 @@ _static_::
|
|||
Again, this can either be EAGER (the second select is issued immediately) or LAZY (the second select is delayed until the data is needed).
|
||||
_dynamic_ (sometimes referred to as runtime)::
|
||||
The dynamic definition is really use-case centric. There are multiple ways to define dynamic fetching:
|
||||
_fetch profiles_::: defined in mappings, but can be enabled/disabled on the `Session`.
|
||||
HQL/JPQL::: and both Hibernate and JPA Criteria queries have the ability to specify fetching, specific to said query.
|
||||
entity graphs::: Starting in Hibernate 4.2 (JPA 2.1) this is also an option.
|
||||
fetch profiles::: defined in mappings, but can be enabled/disabled on the `Session`.
|
||||
HQL / JPQL::: both Hibernate and JPA Criteria queries have the ability to specify fetching, specific to said query.
|
||||
entity graphs::: starting in Hibernate 4.2 (JPA 2.1), this is also an option.
|
||||
|
||||
[[fetching-direct-vs-query]]
|
||||
=== Direct fetching vs. entity queries
|
||||
|
@ -101,7 +101,7 @@ so Hibernate requires a secondary select to ensure that the EAGER association is
|
|||
[IMPORTANT]
|
||||
====
|
||||
If you forget to JOIN FETCH all EAGER associations, Hibernate is going to issue a secondary select for each and every one of those
|
||||
which, in turn, can lean to N+1 query issues.
|
||||
which, in turn, can lead to N+1 query issues.
|
||||
|
||||
For this reason, you should prefer LAZY associations.
|
||||
====
|
||||
|
@ -206,10 +206,6 @@ include::{sourcedir}/GraphFetchingTest.java[tags=fetching-strategies-dynamic-fet
|
|||
|
||||
[NOTE]
|
||||
====
|
||||
Although the JPA standard specifies that you can override an EAGER fetching association at runtime using the `javax.persistence.fetchgraph` hint,
|
||||
currently, Hibernate does not implement this feature, so EAGER associations cannot be fetched lazily.
|
||||
For more info, check out the https://hibernate.atlassian.net/browse/HHH-8776[HHH-8776] Jira issue.
|
||||
|
||||
When executing a JPQL query, if an EAGER association is omitted, Hibernate will issue a secondary select for every association needed to be fetched eagerly,
|
||||
which can lead to N+1 query issues.
|
||||
|
||||
|
@ -448,7 +444,7 @@ it allows you to fetch all the required data with a single query.
|
|||
=== The `@Fetch` annotation mapping
|
||||
|
||||
Besides the `FetchType.LAZY` or `FetchType.EAGER` JPA annotations,
|
||||
you can also use the Hibernate-specific `@Fetch` annotation that accepts one of the following `FetchMode(s)`:
|
||||
you can also use the Hibernate-specific `@Fetch` annotation that accepts one of the following ``FetchMode``s:
|
||||
|
||||
SELECT::
|
||||
The association is going to be fetched lazily using a secondary select for each individual entity,
|
||||
|
@ -611,7 +607,7 @@ include::{sourcedir}/LazyCollectionTest.java[tags=fetching-LazyCollection-domain
|
|||
either List(s) that are annotated with @OrderColumn or Map(s).
|
||||
|
||||
For bags (e.g. regular List(s) of entities that do not preserve any certain ordering),
|
||||
the @LazyCollection(LazyCollectionOption.EXTRA)` behaves like any other `FetchType.LAZY` collection
|
||||
the `@LazyCollection(LazyCollectionOption.EXTRA)` behaves like any other `FetchType.LAZY` collection
|
||||
(the collection is fetched entirely upon being accessed for the first time).
|
||||
====
|
||||
|
||||
|
@ -648,4 +644,4 @@ include::{extrasdir}/fetching-LazyCollection-select-example.sql[]
|
|||
Therefore, the child entities were fetched one after the other without triggering a full collection initialization.
|
||||
|
||||
For this reason, caution is advised since accessing all elements using `LazyCollectionOption.EXTRA` can lead to N+1 query issues.
|
||||
====
|
||||
====
|
||||
|
|
|
@ -106,7 +106,7 @@ include::{extrasdir}/flushing-auto-flush-jpql-overlap-example.sql[]
|
|||
----
|
||||
====
|
||||
|
||||
This time, the flush was triggered by a JPQL query because the pending entity persists action overlaps with the query being executed.
|
||||
This time, the flush was triggered by a JPQL query because the pending entity persisting action overlaps with the query being executed.
|
||||
|
||||
==== `AUTO` flush on native SQL query
|
||||
|
||||
|
@ -173,7 +173,7 @@ include::{extrasdir}/flushing-commit-flush-jpql-example.sql[]
|
|||
Because the JPA doesn't impose a strict rule on delaying flushing, when executing a native SQL query, the persistence context is going to be flushed.
|
||||
|
||||
[[flushing-commit-flush-sql-example]]
|
||||
.`COMMIT` flushing on SQL
|
||||
.`COMMIT` flushing on native SQL
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -197,7 +197,7 @@ The `ALWAYS` is only available with the native `Session` API.
|
|||
The `ALWAYS` flush mode triggers a persistence context flush even when executing a native SQL query against the `Session` API.
|
||||
|
||||
[[flushing-always-flush-sql-example]]
|
||||
.`COMMIT` flushing on SQL
|
||||
.`COMMIT` flushing on native SQL
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -230,7 +230,7 @@ include::{extrasdir}/flushing-manual-flush-example.sql[]
|
|||
----
|
||||
====
|
||||
|
||||
The `INSERT` statement was not executed because the persistence context because there was no manual `flush()` call.
|
||||
The `INSERT` statement was not executed because there was no manual `flush()` call.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
|
@ -277,6 +277,7 @@ The `ActionQueue` executes all operations in the following order:
|
|||
. `OrphanRemovalAction`
|
||||
. `EntityInsertAction` or `EntityIdentityInsertAction`
|
||||
. `EntityUpdateAction`
|
||||
. `QueuedOperationCollectionAction`
|
||||
. `CollectionRemoveAction`
|
||||
. `CollectionUpdateAction`
|
||||
. `CollectionRecreateAction`
|
||||
|
|
|
@ -28,7 +28,7 @@ Hibernate will internally determine which `ConnectionProvider` to use based on t
|
|||
|
||||
Hibernate can integrate with a `javax.sql.DataSource` for obtaining JDBC Connections.
|
||||
Applications would tell Hibernate about the `DataSource` via the (required) `hibernate.connection.datasource` setting which can either specify a JNDI name or would reference the actual `DataSource` instance.
|
||||
For cases where a JNDI name is given, be sure to read <<chapters/jndi/JNDI.adoc#jndi,JNDI>>
|
||||
For cases where a JNDI name is given, be sure to read <<chapters/jndi/JNDI.adoc#jndi,JNDI>>.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
|
@ -185,7 +185,7 @@ This usage is discouraged and not discussed here.
|
|||
All of the provided ConnectionProvider implementations, other than `DataSourceConnectionProvider`, support consistent setting of transaction isolation for all `Connections` obtained from the underlying pool.
|
||||
The value for `hibernate.connection.isolation` can be specified in one of 3 formats:
|
||||
|
||||
* the integer value accepted at the JDBC level
|
||||
* the integer value accepted at the JDBC level.
|
||||
* the name of the `java.sql.Connection` constant field representing the isolation you would like to use.
|
||||
For example, `TRANSACTION_REPEATABLE_READ` for https://docs.oracle.com/javase/8/docs/api/java/sql/Connection.html#TRANSACTION_REPEATABLE_READ[`java.sql.Connection#TRANSACTION_REPEATABLE_READ`].
|
||||
Not that this is only supported for JDBC standard isolation levels, not for isolation levels specific to a particular JDBC driver.
|
||||
|
@ -212,7 +212,7 @@ If you don't want to use the default connection handling mode, you can specify a
|
|||
|
||||
==== Transaction type and connection handling
|
||||
|
||||
By default, the connection handling mode is given by the underlying transaction coordinator. There are two types of transactions: `RESOURCE_LOCAL` (which involves a single database `Connection` and the transaction is controlled via the `commit` and `rollback` `Connection` methods) and `JTA` (which may involve multiple resources either database connections, JMS queues, etc).
|
||||
By default, the connection handling mode is given by the underlying transaction coordinator. There are two types of transactions: `RESOURCE_LOCAL` (which involves a single database `Connection` and the transaction is controlled via the `commit` and `rollback` `Connection` methods) and `JTA` (which may involve multiple resources including database connections, JMS queues, etc).
|
||||
|
||||
===== RESOURCE_LOCAL transaction connection handling
|
||||
|
||||
|
@ -270,6 +270,7 @@ If for some reason it is not able to determine the proper one or you want to use
|
|||
|DB297 |Support for the DB2 database, version 9.7.
|
||||
|DB2390 |Support for DB2 Universal Database for OS/390, also known as DB2/390.
|
||||
|DB2400 |Support for DB2 Universal Database for iSeries, also known as DB2/400.
|
||||
|DB2400V7R3 |Support for DB2 Universal Database for i, also known as DB2/400, version 7.3
|
||||
|DerbyTenFive |Support for the Derby database, version 10.5
|
||||
|DerbyTenSix |Support for the Derby database, version 10.6
|
||||
|DerbyTenSeven |Support for the Derby database, version 10.7
|
||||
|
|
|
@ -10,9 +10,9 @@ Generally, it does this when the application:
|
|||
* is using JTA transactions and the `JtaPlatform` needs to do JNDI lookups for `TransactionManager`, `UserTransaction`, etc
|
||||
|
||||
All of these JNDI calls route through a single service whose role is `org.hibernate.engine.jndi.spi.JndiService`.
|
||||
The standard `JndiService` accepts a number of configuration settings
|
||||
The standard `JndiService` accepts a number of configuration settings:
|
||||
|
||||
`hibernate.jndi.class`:: names the javax.naming.InitialContext implementation class to use. See https://docs.oracle.com/javase/8/docs/api/javax/naming/Context.html#INITIAL_CONTEXT_FACTORY[`javax.naming.Context#INITIAL_CONTEXT_FACTORY`]
|
||||
`hibernate.jndi.class`:: names the `javax.naming.InitialContext` implementation class to use. See https://docs.oracle.com/javase/8/docs/api/javax/naming/Context.html#INITIAL_CONTEXT_FACTORY[`javax.naming.Context#INITIAL_CONTEXT_FACTORY`]
|
||||
`hibernate.jndi.url`:: names the JNDI InitialContext connection url. See https://docs.oracle.com/javase/8/docs/api/javax/naming/Context.html#PROVIDER_URL[`javax.naming.Context.PROVIDER_URL`]
|
||||
|
||||
Any other settings prefixed with `hibernate.jndi.` will be collected and passed along to the JNDI provider.
|
||||
|
|
|
@ -114,7 +114,7 @@ include::{sourcedir}/OptimisticLockingTest.java[tags=locking-optimistic-version-
|
|||
|
||||
Hibernate can retrieve the timestamp value from the database or the JVM, by reading the value you specify for the `@org.hibernate.annotations.Source` annotation.
|
||||
The value can be either `org.hibernate.annotations.SourceType.DB` or `org.hibernate.annotations.SourceType.VM`.
|
||||
The default behavior is to use the database and is also used if you don't specify the annotation at all.
|
||||
The default behavior is to use the database, and database is also used if you don't specify the annotation at all.
|
||||
|
||||
The timestamp can also be generated by the database instead of Hibernate
|
||||
if you use the `@org.hibernate.annotations.Generated(GenerationTime.ALWAYS)` or the `@Source` annotation.
|
||||
|
|
|
@ -10,7 +10,7 @@ The term multitenancy, in general, is applied to software development to indicat
|
|||
This is highly common in SaaS solutions.
|
||||
Isolating information (data, customizations, etc.) pertaining to the various tenants is a particular challenge in these systems.
|
||||
This includes the data owned by each tenant stored in the database.
|
||||
It is this last piece, sometimes called multitenant data, on which we will focus.
|
||||
It is this last piece, sometimes called multitenant data, that we will focus on.
|
||||
|
||||
[[multitenacy-approaches]]
|
||||
=== Multitenant data approaches
|
||||
|
@ -21,7 +21,6 @@ There are three main approaches to isolating information in these multitenant sy
|
|||
====
|
||||
Each multitenancy strategy has pros and cons as well as specific techniques and considerations.
|
||||
Such topics are beyond the scope of this documentation.
|
||||
Many resources exist which delve into these other topics, like http://msdn.microsoft.com/en-us/library/aa479086.aspx[this one] which does a great job of covering this subject.
|
||||
====
|
||||
|
||||
[[multitenacy-separate-database]]
|
||||
|
@ -116,7 +115,7 @@ It could name a `MultiTenantConnectionProvider` instance, a `MultiTenantConnecti
|
|||
Hibernate will assume it should use the specific `DataSourceBasedMultiTenantConnectionProviderImpl` implementation which works on a number of pretty reasonable assumptions when running inside of an app server and using one `javax.sql.DataSource` per tenant.
|
||||
See its https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/connections/spi/DataSourceBasedMultiTenantConnectionProviderImpl.html[Javadocs] for more details.
|
||||
|
||||
The following example portrays a `MultiTenantConnectionProvider` implementation that handles multiple `ConnectionProviders`.
|
||||
The following example portrays a `MultiTenantConnectionProvider` implementation that handles multiple ``ConnectionProvider``s.
|
||||
|
||||
[[multitenacy-hibernate-ConfigurableMultiTenantConnectionProvider-example]]
|
||||
.A `MultiTenantConnectionProvider` implementation
|
||||
|
@ -130,7 +129,7 @@ include::{sourcedir}/ConfigurableMultiTenantConnectionProvider.java[tags=multite
|
|||
The `ConfigurableMultiTenantConnectionProvider` can be set up as follows:
|
||||
|
||||
[[multitenacy-hibernate-MultiTenantConnectionProvider-example]]
|
||||
.A `MultiTenantConnectionProvider` implementation
|
||||
.A `MultiTenantConnectionProvider` usage example
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -141,7 +140,7 @@ include::{sourcedir}/AbstractMultiTenancyTest.java[tags=multitenacy-hibernate-Mu
|
|||
When using multitenancy, it's possible to save an entity with the same identifier across different tenants:
|
||||
|
||||
[[multitenacy-hibernate-same-entity-example]]
|
||||
.A `MultiTenantConnectionProvider` implementation
|
||||
.An example of saving entities with the same identifier across different tenants
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -150,20 +149,20 @@ include::{sourcedir}/AbstractMultiTenancyTest.java[tags=multitenacy-multitenacy-
|
|||
====
|
||||
|
||||
[[multitenacy-hibernate-CurrentTenantIdentifierResolver]]
|
||||
==== CurrentTenantIdentifierResolver
|
||||
==== `CurrentTenantIdentifierResolver`
|
||||
|
||||
`org.hibernate.context.spi.CurrentTenantIdentifierResolver` is a contract for Hibernate to be able to resolve what the application considers the current tenant identifier.
|
||||
The implementation to use is either passed directly to `Configuration` via its `setCurrentTenantIdentifierResolver` method.
|
||||
It can also be specified via the `hibernate.tenant_identifier_resolver` setting.
|
||||
The implementation to use can be either passed directly to `Configuration` via its `setCurrentTenantIdentifierResolver` method,
|
||||
or be specified via the `hibernate.tenant_identifier_resolver` setting.
|
||||
|
||||
There are two situations where CurrentTenantIdentifierResolver is used:
|
||||
There are two situations where `CurrentTenantIdentifierResolver` is used:
|
||||
|
||||
* The first situation is when the application is using the `org.hibernate.context.spi.CurrentSessionContext` feature in conjunction with multitenancy.
|
||||
In the case of the current-session feature, Hibernate will need to open a session if it cannot find an existing one in scope.
|
||||
However, when a session is opened in a multitenant environment, the tenant identifier has to be specified.
|
||||
This is where the `CurrentTenantIdentifierResolver` comes into play; Hibernate will consult the implementation you provide to determine the tenant identifier to use when opening the session.
|
||||
In this case, it is required that a `CurrentTenantIdentifierResolver` is supplied.
|
||||
* The other situation is when you do not want to have to explicitly specify the tenant identifier all the time.
|
||||
* The other situation is when you do not want to explicitly specify the tenant identifier all the time.
|
||||
If a `CurrentTenantIdentifierResolver` has been specified, Hibernate will use it to determine the default tenant identifier to use when opening the session.
|
||||
|
||||
Additionally, if the `CurrentTenantIdentifierResolver` implementation returns `true` for its `validateExistingCurrentSessions` method, Hibernate will make sure any existing sessions that are found in scope have a matching tenant identifier.
|
||||
|
@ -186,7 +185,7 @@ The JPA expert group is in the process of defining multitenancy support for an u
|
|||
==== Multitenancy Hibernate Session configuration
|
||||
|
||||
When using multitenancy, you might want to configure each tenant-specific `Session` differently.
|
||||
For instance, each tenant could take a different time zone configuration.
|
||||
For instance, each tenant could specify a different time zone configuration.
|
||||
|
||||
[[multitenacy-hibernate-timezone-configuration-registerConnectionProvider-call-example]]
|
||||
.Registering the tenant-specific time zone information
|
||||
|
@ -245,7 +244,7 @@ The following example shows you that the `Timestamp` was saved in the UTC time z
|
|||
test output.
|
||||
|
||||
[[multitenacy-hibernate-not-applying-timezone-configuration-example]]
|
||||
.With the `useTenantTimeZone` property set to `false`, the `Timestamp` in fetched in the tenant-specific time zone
|
||||
.With the `useTenantTimeZone` property set to `false`, the `Timestamp` is fetched in the tenant-specific time zone
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
----
|
||||
|
@ -258,5 +257,5 @@ include::{extrasdir}/multitenacy-hibernate-not-applying-timezone-configuration-e
|
|||
----
|
||||
====
|
||||
|
||||
Notice that, for the `Eastern European Time` time zone, the time zone offset was 2 hours when the test was executed.
|
||||
Notice that for the `Eastern European Time` time zone, the time zone offset was 2 hours when the test was executed.
|
||||
|
||||
|
|
|
@ -16,9 +16,9 @@ Hibernate supports three types of configurations within OSGi.
|
|||
|
||||
=== hibernate-osgi
|
||||
|
||||
Rather than embed OSGi capabilities into hibernate-core, and sub-modules, hibernate-osgi was created.
|
||||
Rather than embedding OSGi capabilities into hibernate-core, and sub-modules, hibernate-osgi was created.
|
||||
It's purposefully separated, isolating all OSGi dependencies.
|
||||
It provides an OSGi-specific `ClassLoader` (aggregates the container's `ClassLoader` with core and `EntityManager` `ClassLoader`s),
|
||||
It provides an OSGi-specific `ClassLoader` (aggregates the container's `ClassLoader` with core and `EntityManager` ``ClassLoader``s),
|
||||
JPA persistence provider, `SessionFactory`/`EntityManagerFactory` bootstrapping, entities/mappings scanner, and service management.
|
||||
|
||||
=== features.xml
|
||||
|
@ -26,14 +26,14 @@ JPA persistence provider, `SessionFactory`/`EntityManagerFactory` bootstrapping,
|
|||
Apache Karaf environments tend to make heavy use of its "features" concept, where a feature is a set of order-specific bundles focused on a concise capability.
|
||||
These features are typically defined in a `features.xml` file.
|
||||
Hibernate produces and releases its own `features.xml` that defines a core `hibernate-orm`, as well as additional features for optional functionality (caching, Envers, etc.).
|
||||
This is included in the binary distribution, as well as deployed to the JBoss Nexus repository (using the `org.hibernate` groupId and `hibernate-osgi` with the `karaf.xml` classifier).
|
||||
This is included in the binary distribution, as well as deployed to the JBoss Nexus repository (using the `org.hibernate` groupId and `hibernate-osgi` artifactId with the `karaf.xml` classifier).
|
||||
|
||||
Note that our features are versioned using the same ORM artifact versions they wrap.
|
||||
Also, note that the features are heavily tested against Karaf 3.0.3 as a part of our PaxExam-based integration tests.
|
||||
However, they'll likely work on other versions as well.
|
||||
|
||||
hibernate-osgi, theoretically, supports a variety of OSGi containers, such as Equinox.
|
||||
In that case, please use `features.xm`l as a reference for necessary bundles to activate and their correct ordering.
|
||||
In that case, please use `features.xm` as a reference for necessary bundles to activate and their correct ordering.
|
||||
However, note that Karaf starts a number of bundles automatically, several of which would need to be installed manually on alternatives.
|
||||
|
||||
=== QuickStarts/Demos
|
||||
|
@ -90,7 +90,7 @@ That `DataSource` is then used by your `persistence.xml` persistence-unit. The f
|
|||
|
||||
=== Bundle Package Imports
|
||||
|
||||
Your bundle's manifest will need to import, at a minimum,
|
||||
Your bundle's manifest will need to import, at a minimum:
|
||||
|
||||
* `javax.persistence`
|
||||
* `org.hibernate.proxy` and `javassist.util.proxy`, due to Hibernate's ability to return proxies for lazy initialization (Javassist enhancement occurs on the entity's `ClassLoader` during runtime).
|
||||
|
@ -120,9 +120,9 @@ Similar to any other JPA setup, your bundle must include a `persistence.xml` fil
|
|||
|
||||
=== Bundle Package Imports
|
||||
|
||||
Your bundle's manifest will need to import, at a minimum,
|
||||
Your bundle's manifest will need to import, at a minimum:
|
||||
|
||||
* javax.persistence
|
||||
* `javax.persistence`
|
||||
* `org.hibernate.proxy` and `javassist.util.proxy`, due to Hibernate's ability to return proxies for lazy initialization (Javassist enhancement occurs on the entity's `ClassLoader` during runtime)
|
||||
* JDBC driver package (example: `org.h2`)
|
||||
* `org.osgi.framework`, necessary to discover the `EntityManagerFactory` (described below)
|
||||
|
@ -150,13 +150,13 @@ include::{sourcedir}/jpa/HibernateUtil.java[tag=osgi-discover-EntityManagerFacto
|
|||
[[osgi-unmanaged-native]]
|
||||
=== Unmanaged Native
|
||||
|
||||
Native Hibernate use is also supported. The client bundle is responsible for managing the `SessionFactory` and `Session`s.
|
||||
Native Hibernate use is also supported. The client bundle is responsible for managing the ``SessionFactory`` and ``Session``s.
|
||||
|
||||
=== Bundle Package Imports
|
||||
|
||||
Your bundle's manifest will need to import, at a minimum,
|
||||
Your bundle's manifest will need to import, at a minimum:
|
||||
|
||||
* javax.persistence
|
||||
* `javax.persistence`
|
||||
* `org.hibernate.proxy` and `javassist.util.proxy`, due to Hibernate's ability to return proxies for lazy initialization (Javassist enhancement occurs on the entity's `ClassLoader` during runtime)
|
||||
* JDBC driver package (example: `org.h2`)
|
||||
* `org.osgi.framework`, necessary to discover the `SessionFactory` (described below)
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
:sourcedir: ../../../../../test/java/org/hibernate/userguide/pc
|
||||
|
||||
Hibernate "grew up" not supporting bytecode enhancement at all.
|
||||
At that time, Hibernate only supported proxy-based for lazy loading and always used diff-based dirty calculation.
|
||||
At that time, Hibernate only supported proxy-based alternative for lazy loading and always used diff-based dirty calculation.
|
||||
Hibernate 3.x saw the first attempts at bytecode enhancement support in Hibernate.
|
||||
We consider those initial attempts (up until 5.0) completely as an incubation.
|
||||
The support for bytecode enhancement in 5.0 onward is what we are discussing here.
|
||||
|
@ -145,7 +145,7 @@ enableLazyInitialization:: Whether enhancement for lazy attribute loading should
|
|||
enableDirtyTracking:: Whether enhancement for self-dirty tracking should be done.
|
||||
enableAssociationManagement:: Whether enhancement for bi-directional association management should be done.
|
||||
|
||||
The default value for all 3 configuration settings is `false`
|
||||
The default value for all 3 configuration settings is `false`.
|
||||
|
||||
The `enhance { }` block is required in order for enhancement to occur.
|
||||
Enhancement is disabled by default in preparation for additions capabilities (hbm2ddl, etc) in the plugin.
|
||||
|
|
|
@ -10,12 +10,12 @@ Persistent data has a state in relation to both a persistence context and the un
|
|||
|
||||
`transient`:: the entity has just been instantiated and is not associated with a persistence context.
|
||||
It has no persistent representation in the database and typically no identifier value has been assigned (unless the _assigned_ generator was used).
|
||||
`managed`, or `persistent`:: the entity has an associated identifier and is associated with a persistence context.
|
||||
`managed` or `persistent`:: the entity has an associated identifier and is associated with a persistence context.
|
||||
It may or may not physically exist in the database yet.
|
||||
`detached`:: the entity has an associated identifier but is no longer associated with a persistence context (usually because the persistence context was closed or the instance was evicted from the context)
|
||||
`removed`:: the entity has an associated identifier and is associated with a persistence context, however, it is scheduled for removal from the database.
|
||||
|
||||
Much of the `org.hibernate.Session` and `javax.persistence.EntityManager` methods deal with moving entities between these states.
|
||||
Much of the `org.hibernate.Session` and `javax.persistence.EntityManager` methods deal with moving entities among these states.
|
||||
|
||||
[[pc-unwrap]]
|
||||
=== Accessing Hibernate APIs from JPA
|
||||
|
@ -37,7 +37,7 @@ include::BytecodeEnhancement.adoc[]
|
|||
=== Making entities persistent
|
||||
|
||||
Once you've created a new entity instance (using the standard `new` operator) it is in `new` state.
|
||||
You can make it persistent by associating it to either a `org.hibernate.Session` or `javax.persistence.EntityManager`.
|
||||
You can make it persistent by associating it to either an `org.hibernate.Session` or a `javax.persistence.EntityManager`.
|
||||
|
||||
[[pc-persist-jpa-example]]
|
||||
.Making an entity persistent with JPA
|
||||
|
@ -57,7 +57,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-persist-native-example]
|
|||
----
|
||||
====
|
||||
|
||||
`org.hibernate.Session` also has a method named persist which follows the exact semantic defined in the JPA specification for the persist method.
|
||||
`org.hibernate.Session` also has a method named persist which follows the exact semantics defined in the JPA specification for the persist method.
|
||||
It is this `org.hibernate.Session` method to which the Hibernate `javax.persistence.EntityManager` implementation delegates.
|
||||
|
||||
If the `DomesticCat` entity type has a generated identifier, the value is associated with the instance when the save or persist is called.
|
||||
|
@ -442,7 +442,7 @@ include::{sourcedir}/WhereJoinTableTest.java[tags=pc-where-join-table-persist-ex
|
|||
----
|
||||
====
|
||||
|
||||
When fetching the `currentWeekReaders` collection, Hibernate is going to find one one entry:
|
||||
When fetching the `currentWeekReaders` collection, Hibernate is going to find only one entry:
|
||||
|
||||
[[pc-where-join-table-fetch-example]]
|
||||
.`@WhereJoinTable` fetch example
|
||||
|
@ -981,7 +981,7 @@ The possible values are:
|
|||
disallow (the default):: throws `IllegalStateException` if an entity copy is detected
|
||||
allow:: performs the merge operation on each entity copy that is detected
|
||||
log:: (provided for testing only) performs the merge operation on each entity copy that is detected and logs information about the entity copies.
|
||||
This setting requires DEBUG logging be enabled for `org.hibernate.event.internal.EntityCopyAllowedLoggedObserver`.
|
||||
This setting requires DEBUG logging be enabled for `org.hibernate.event.internal.EntityCopyAllowedLoggedObserver`
|
||||
|
||||
In addition, the application may customize the behavior by providing an implementation of `org.hibernate.event.spi.EntityCopyObserver` and setting `hibernate.event.merge.entity_copy_observer` to the class name.
|
||||
When this property is set to `allow` or `log`, Hibernate will merge each entity copy detected while cascading the merge operation.
|
||||
|
@ -1132,7 +1132,7 @@ include::{sourcedir-caching}/FirstLevelCacheTest.java[tags=caching-management-co
|
|||
JPA allows you to propagate the state transition from a parent entity to a child.
|
||||
For this purpose, the JPA `javax.persistence.CascadeType` defines various cascade types:
|
||||
|
||||
`ALL`:: cascades all entity state transitions
|
||||
`ALL`:: cascades all entity state transitions.
|
||||
`PERSIST`:: cascades the entity persist operation.
|
||||
`MERGE`:: cascades the entity merge operation.
|
||||
`REMOVE`:: cascades the entity remove operation.
|
||||
|
@ -1401,7 +1401,7 @@ attached to the current `SessionFactory`.
|
|||
|
||||
By default, the `SQLExceptionConverter` is defined by the configured Hibernate `Dialect` via the
|
||||
https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/dialect/Dialect.html#buildSQLExceptionConversionDelegate--[`buildSQLExceptionConversionDelegate`] method
|
||||
which is overridden by several database-specific `Dialects`.
|
||||
which is overridden by several database-specific ``Dialect``s.
|
||||
|
||||
However, it is also possible to plug in a custom implementation. See the
|
||||
<<appendices/Configurations.adoc#configurations-exception-handling,`hibernate.jdbc.sql_exception_converter`>> configuration property for more details.
|
||||
|
|
|
@ -11,9 +11,9 @@ buildscript {
|
|||
}
|
||||
|
||||
hibernate {
|
||||
enhance {
|
||||
enhance {
|
||||
enableLazyInitialization = true
|
||||
enableDirtyTracking = true
|
||||
enableAssociationManagement = true
|
||||
}
|
||||
}
|
||||
}
|
|
@ -26,7 +26,7 @@ Generally, this required their users to configure the Hibernate dialect or defin
|
|||
Starting with version 3.2, Hibernate introduced the notion of automatically detecting the dialect to use based on the `java.sql.DatabaseMetaData` obtained from a `java.sql.Connection` to that database.
|
||||
This was much better, except that this resolution was limited to databases Hibernate know about ahead of time and was in no way configurable or overrideable.
|
||||
|
||||
Starting with version 3.3, Hibernate has a fare more powerful way to automatically determine which dialect to should be used by relying on a series of delegates which implement the `org.hibernate.dialect.resolver.DialectResolver` which defines only a single method:
|
||||
Starting with version 3.3, Hibernate has a fare more powerful way to automatically determine which dialect to be used by relying on a series of delegates which implement the `org.hibernate.dialect.resolver.DialectResolver` which defines only a single method:
|
||||
|
||||
[source,java]
|
||||
----
|
||||
|
@ -41,7 +41,7 @@ All other exceptions result in a warning and continuing on to the next resolver.
|
|||
The cool part about these resolvers is that users can also register their own custom resolvers which will be processed ahead of the built-in Hibernate ones.
|
||||
This might be useful in a number of different situations:
|
||||
|
||||
* it allows easy integration for auto-detection of dialects beyond those shipped with Hibernate itself
|
||||
* it allows easy integration for auto-detection of dialects beyond those shipped with Hibernate itself.
|
||||
* it allows you to specify to use a custom dialect when a particular database is recognized.
|
||||
|
||||
To register one or more resolvers, simply specify them (separated by commas, tabs or spaces) using the 'hibernate.dialect_resolvers' configuration setting (see the `DIALECT_RESOLVERS` constant on `org.hibernate.cfg.Environment`).
|
||||
|
@ -91,7 +91,7 @@ In terms of portability concerns, the function handling currently works pretty w
|
|||
|
||||
SQL functions can be referenced in many ways by users.
|
||||
However, not all databases support the same set of functions.
|
||||
Hibernate, provides a means of mapping a _logical_ function name to a delegate which knows how to render that particular function, perhaps even using a totally different physical function call.
|
||||
Hibernate provides a means of mapping a _logical_ function name to a delegate which knows how to render that particular function, perhaps even using a totally different physical function call.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
|
|
|
@ -21,7 +21,7 @@ Users of the older Hibernate `org.hibernate.Criteria` query API will recognize t
|
|||
|
||||
Criteria queries are essentially an object graph, where each part of the graph represents an increasing (as we navigate down this graph) more atomic part of the query.
|
||||
The first step in performing a criteria query is building this graph.
|
||||
The `javax.persistence.criteria.CriteriaBuilder` interface is the first thing with which you need to become acquainted with begin using criteria queries.
|
||||
The `javax.persistence.criteria.CriteriaBuilder` interface is the first thing with which you need to become acquainted with before using criteria queries.
|
||||
Its role is that of a factory for all the individual pieces of the criteria.
|
||||
You obtain a `javax.persistence.criteria.CriteriaBuilder` instance by calling the `getCriteriaBuilder()` method of either `javax.persistence.EntityManagerFactory` or `javax.persistence.EntityManager`.
|
||||
|
||||
|
@ -183,10 +183,10 @@ positional::
|
|||
The simple _Object get(int position)_ form is very similar to the access illustrated in <<criteria-typedquery-multiselect-array-explicit-example>> and <<criteria-typedquery-multiselect-array-implicit-example>>.
|
||||
The _<X> X get(int position, Class<X> type_ form allows typed positional access, but based on the explicitly supplied type which the tuple value must be type-assignable to.
|
||||
aliased::
|
||||
Allows access to the underlying tuple values based an (optionally) assigned alias.
|
||||
Allows access to the underlying tuple values based on (optionally) assigned alias.
|
||||
The example query did not apply an alias.
|
||||
An alias would be applied via the alias method on `javax.persistence.criteria.Selection`.
|
||||
Just like `positional` access, there is both a typed (__Object get(String alias)__) and an untyped (__<X> X get(String alias, Class<X> type__ form.
|
||||
Just like `positional` access, there is both a typed (__Object get(String alias)__) and an untyped (__<X> X get(String alias, Class<X> type)__) form.
|
||||
|
||||
[[criteria-from]]
|
||||
=== FROM clause
|
||||
|
@ -207,7 +207,7 @@ All the individual parts of the FROM clause (roots, joins, paths) implement the
|
|||
=== Roots
|
||||
|
||||
Roots define the basis from which all joins, paths and attributes are available in the query.
|
||||
A root is always an entity type. Roots are defined and added to the criteria by the overloaded from methods on `javax.persistence.criteria.CriteriaQuery`:
|
||||
A root is always an entity type. Roots are defined and added to the criteria by the overloaded __from__ methods on `javax.persistence.criteria.CriteriaQuery`:
|
||||
|
||||
[[criteria-from-root-methods-example]]
|
||||
.Root methods
|
||||
|
@ -245,7 +245,7 @@ include::{sourcedir}/CriteriaTest.java[tags=criteria-from-multiple-root-example]
|
|||
=== Joins
|
||||
|
||||
Joins allow navigation from other `javax.persistence.criteria.From` to either association or embedded attributes.
|
||||
Joins are created by the numerous overloaded join methods of the `javax.persistence.criteria.From` interface.
|
||||
Joins are created by the numerous overloaded __join__ methods of the `javax.persistence.criteria.From` interface.
|
||||
|
||||
[[criteria-from-join-example]]
|
||||
.Join example
|
||||
|
@ -260,7 +260,7 @@ include::{sourcedir}/CriteriaTest.java[tags=criteria-from-join-example]
|
|||
=== Fetches
|
||||
|
||||
Just like in HQL and JPQL, criteria queries can specify that associated data be fetched along with the owner.
|
||||
Fetches are created by the numerous overloaded fetch methods of the `javax.persistence.criteria.From` interface.
|
||||
Fetches are created by the numerous overloaded __fetch__ methods of the `javax.persistence.criteria.From` interface.
|
||||
|
||||
[[criteria-from-fetch-example]]
|
||||
.Join fetch example
|
||||
|
|
|
@ -129,10 +129,10 @@ Relying on provider specific hints limits your applications portability to some
|
|||
`org.hibernate.comment`::
|
||||
Defines the comment to apply to the generated SQL. See `org.hibernate.query.Query#setComment`.
|
||||
`org.hibernate.fetchSize`::
|
||||
Defines the JDBC fetch-size to use. See `org.hibernate.query.Query#setFetchSize`
|
||||
Defines the JDBC fetch-size to use. See `org.hibernate.query.Query#setFetchSize`.
|
||||
`org.hibernate.flushMode`::
|
||||
Defines the Hibernate-specific `FlushMode` to use. See `org.hibernate.query.Query#setFlushMode.` If possible, prefer using `javax.persistence.Query#setFlushMode` instead.
|
||||
`org.hibernate.readOnly`:: Defines that entities and collections loaded by this query should be marked as read-only. See `org.hibernate.query.Query#setReadOnly`
|
||||
`org.hibernate.readOnly`:: Defines that entities and collections loaded by this query should be marked as read-only. See `org.hibernate.query.Query#setReadOnly`.
|
||||
|
||||
The final thing that needs to happen before the query can be executed is to bind the values for any defined parameters.
|
||||
JPA defines a simplified set of parameter binding methods.
|
||||
|
@ -577,9 +577,9 @@ Superclass properties are not allowed and subclass properties do not make sense.
|
|||
In other words, `INSERT` statements are inherently non-polymorphic.
|
||||
|
||||
`select_statement` can be any valid HQL select query, with the caveat that the return types must match the types expected by the insert.
|
||||
Currently, this is checked during query compilation rather than allowing the check to relegate to the database.
|
||||
Currently, this is checked during query compilation rather than allowing the check to delegate to the database.
|
||||
This may cause problems between Hibernate Types which are _equivalent_ as opposed to __equal__.
|
||||
For example, this might cause lead to issues with mismatches between an attribute mapped as a `org.hibernate.type.DateType` and an attribute defined as a `org.hibernate.type.TimestampType`,
|
||||
For example, this might lead to issues with mismatches between an attribute mapped as a `org.hibernate.type.DateType` and an attribute defined as a `org.hibernate.type.TimestampType`,
|
||||
even though the database might not make a distinction or might be able to handle the conversion.
|
||||
|
||||
For the id attribute, the insert statement gives you two options.
|
||||
|
@ -622,7 +622,7 @@ In other words, JPQL says they _can be_ case-insensitive and so Hibernate must b
|
|||
=== Root entity references
|
||||
|
||||
A root entity reference, or what JPA calls a `range variable declaration`, is specifically a reference to a mapped entity type from the application.
|
||||
It cannot name component/ embeddable types.
|
||||
It cannot name component/embeddable types.
|
||||
And associations, including collections, are handled in a different manner, as later discussed.
|
||||
|
||||
The BNF for a root entity reference is:
|
||||
|
@ -648,7 +648,7 @@ We see that the query is defining a root entity reference to the `org.hibernate.
|
|||
Additionally, it declares an alias of `p` to that `org.hibernate.userguide.model.Person` reference, which is the identification variable.
|
||||
|
||||
Usually, the root entity reference represents just the `entity name` rather than the entity class FQN (fully-qualified name).
|
||||
By default, the entity name is the unqualified entity class name, here `Person`
|
||||
By default, the entity name is the unqualified entity class name, here `Person`.
|
||||
|
||||
[[hql-root-reference-jpql-example]]
|
||||
.Simple query using entity name for root entity reference
|
||||
|
@ -737,7 +737,7 @@ Explicit joins may reference association or component/embedded attributes.
|
|||
In the case of component/embedded attributes, the join is simply logical and does not correlate to a physical (SQL) join.
|
||||
For further information about collection-valued association references, see <<hql-collection-valued-associations>>.
|
||||
|
||||
An important use case for explicit joins is to define `FETCH JOINS` which override the laziness of the joined association.
|
||||
An important use case for explicit joins is to define ``FETCH JOIN``s which override the laziness of the joined association.
|
||||
As an example, given an entity named `Person` with a collection-valued association named `phones`, the `JOIN FETCH` will also load the child collection in the same SQL query:
|
||||
|
||||
[[hql-explicit-fetch-join-example]]
|
||||
|
@ -864,7 +864,7 @@ include::{sourcedir}/SelectDistinctTest.java[tags=hql-distinct-entity-query-exam
|
|||
====
|
||||
|
||||
In this case, `DISTINCT` is used because there can be multiple `Books` entities associated with a given `Person`.
|
||||
If in the database there are 3 `Persons` in the database and each person has 2 `Books`, without `DISTINCT` this query will return 6 `Persons` since
|
||||
If in the database there are 3 ``Person``s in the database and each person has 2 ``Book``s, without `DISTINCT` this query will return 6 ``Person``s since
|
||||
the SQL-level result-set size is given by the number of joined `Book` records.
|
||||
|
||||
However, the `DISTINCT` keyword is passed to the database as well:
|
||||
|
@ -923,7 +923,7 @@ Which form an application chooses to use is simply a matter of taste.
|
|||
=== Special case - qualified path expressions
|
||||
|
||||
We said earlier that collection-valued associations actually refer to the _values_ of that collection.
|
||||
Based on the type of collection, there are also available a set of explicit qualification expressions.
|
||||
Based on the type of collection, there are also a set of explicit qualification expressions available.
|
||||
|
||||
[[hql-collection-qualification-example]]
|
||||
.Qualified collection references example
|
||||
|
@ -974,8 +974,8 @@ and the query would return instances of all three.
|
|||
|
||||
This behavior can be altered in two ways:
|
||||
|
||||
- by limiting the query to select only from the subclass entity
|
||||
- by using either the `org.hibernate.annotations.Polymorphism` annotation (global, and Hibernate-specific). See the <<chapters/domain/inheritance.adoc#entity-inheritance-polymorphism, `@Polymorphism` section for more info about this use case>>.
|
||||
- by limiting the query to select only from the subclass entity.
|
||||
- by using either the `org.hibernate.annotations.Polymorphism` annotation (global, and Hibernate-specific). See the <<chapters/domain/inheritance.adoc#entity-inheritance-polymorphism, `@Polymorphism` section>> for more info about this use case.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
|
@ -1091,13 +1091,13 @@ include::{sourcedir}/HQLTest.java[tags=hql-concatenation-example]
|
|||
----
|
||||
====
|
||||
|
||||
See <<hql-exp-functions>> for details on the `concat()` function
|
||||
See <<hql-exp-functions>> for details on the `concat()` function.
|
||||
|
||||
[[hql-aggregate-functions]]
|
||||
=== Aggregate functions
|
||||
|
||||
Aggregate functions are also valid expressions in HQL and JPQL.
|
||||
The semantic is the same as their SQL counterpart.
|
||||
The semantics is the same as their SQL counterpart.
|
||||
The supported aggregate functions are:
|
||||
|
||||
`COUNT` (including distinct/all qualifiers)::
|
||||
|
@ -1161,7 +1161,7 @@ include::{sourcedir}/HQLTest.java[tags=hql-substring-function-example]
|
|||
====
|
||||
|
||||
UPPER::
|
||||
Upper cases the specified string
|
||||
Upper cases the specified string.
|
||||
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
|
@ -1171,7 +1171,7 @@ include::{sourcedir}/HQLTest.java[tags=hql-upper-function-example]
|
|||
====
|
||||
|
||||
LOWER::
|
||||
Lower cases the specified string
|
||||
Lower cases the specified string.
|
||||
|
||||
====
|
||||
[source, JAVA, indent=0]
|
||||
|
@ -1345,7 +1345,7 @@ include::{sourcedir}/HQLTest.java[tags=hql-str-function-example]
|
|||
=== User-defined functions
|
||||
|
||||
Hibernate Dialects can register additional functions known to be available for that particular database product.
|
||||
These functions are also available in HQL (and JPQL, though only when using Hibernate as the JPA provider obviously).
|
||||
These functions are also available in HQL (and JPQL, though only when using Hibernate as the JPA provider, obviously).
|
||||
However, they would only be available when using that database Dialect.
|
||||
Applications that aim for database portability should avoid using functions in this category.
|
||||
|
||||
|
@ -1779,7 +1779,7 @@ The types of the `single_valued_expression` and the individual values in the `si
|
|||
JPQL limits the valid types here to string, numeric, date, time, timestamp, and enum types, and, in JPQL, `single_valued_expression` can only refer to:
|
||||
|
||||
* "state fields", which is its term for simple attributes. Specifically, this excludes association and component/embedded attributes.
|
||||
* entity type expressions. See <<hql-entity-type-exp>>
|
||||
* entity type expressions. See <<hql-entity-type-exp>>.
|
||||
|
||||
In HQL, `single_valued_expression` can refer to a far more broad set of expression types.
|
||||
Single-valued association are allowed, and so are component/embedded attributes, although that feature depends on the level of support for tuple or "row value constructor syntax" in the underlying database.
|
||||
|
@ -1985,7 +1985,7 @@ The query plan cache can be configured via the following configuration propertie
|
|||
`hibernate.query.plan_cache_max_size`::
|
||||
This setting gives the maximum number of entries of the plan cache. The default value is 2048.
|
||||
`hibernate.query.plan_parameter_metadata_max_size`::
|
||||
The setting gives the maximum number of `ParameterMetadataImpl` instances maintained by the query plan cache. The `ParameterMetadataImpl` object encapsulates metadata about parameters encountered within a query. The default is 128.
|
||||
The setting gives the maximum number of `ParameterMetadataImpl` instances maintained by the query plan cache. The `ParameterMetadataImpl` object encapsulates metadata about parameters encountered within a query. The default value is 128.
|
||||
|
||||
Now, if you have many JPQL or Criteria API queries, it's a good idea to increase the query plan cache size so that the vast majority of executing entity queries can skip the compilation phase, therefore reducing execution time.
|
||||
|
||||
|
|
|
@ -297,7 +297,7 @@ Please note that the alias names in the result are simply examples, each alias w
|
|||
[width="100%",cols="23%,22%,55%",options="header",]
|
||||
|=======================================================================
|
||||
|Description |Syntax |Example
|
||||
|A simple property |`{[aliasname].[propertyname]`
|
||||
|A simple property |`{[aliasname].[propertyname]}`
|
||||
|`A_NAME as {item.name}`
|
||||
|
||||
|A composite property |`{[aliasname].[componentname].[propertyname]}`
|
||||
|
@ -608,7 +608,7 @@ include::{sourcedir}/SQLTest.java[tags=sql-hibernate-entity-associations_named-q
|
|||
----
|
||||
====
|
||||
|
||||
Finally, if the association to a related entity involve a composite primary key, a `@FieldResult` element should be used for each foreign key column.
|
||||
Finally, if the association to a related entity involves a composite primary key, a `@FieldResult` element should be used for each foreign key column.
|
||||
The `@FieldResult` name is composed of the property name for the relationship, followed by a dot ("."), followed by the name or the field or property of the primary key.
|
||||
For this example, the following entities are going to be used:
|
||||
|
||||
|
@ -900,8 +900,8 @@ The only requirement is to set the `callable` attribute to `true`.
|
|||
|
||||
To check that the execution happens correctly, Hibernate allows you to define one of those three strategies:
|
||||
|
||||
* none: no check is performed; the store procedure is expected to fail upon constraint violations
|
||||
* count: use of row-count returned by the `executeUpdate()` method call to check that the update was successful
|
||||
* none: no check is performed; the store procedure is expected to fail upon constraint violations.
|
||||
* count: use of row-count returned by the `executeUpdate()` method call to check that the update was successful.
|
||||
* param: like count but using a `CallableStatement` output parameter.
|
||||
|
||||
To define the result check style, use the `check` parameter.
|
||||
|
|
|
@ -56,7 +56,7 @@ include::{resourcesdir}/schema-generation.sql[]
|
|||
If we configure Hibernate to import the script above:
|
||||
|
||||
[[schema-generation-import-file-configuration-example]]
|
||||
.Enabling query cache
|
||||
.Enabling schema generation import file
|
||||
====
|
||||
[source, XML, indent=0]
|
||||
----
|
||||
|
@ -204,7 +204,7 @@ The second INSERT statement fails because of the unique constraint violation.
|
|||
|
||||
The https://javaee.github.io/javaee-spec/javadocs/javax/persistence/Index.html[`@Index`] annotation is used by the automated schema generation tool to create a database index.
|
||||
|
||||
Considering the following entity mapping, Hibernate generates the index when creating the database schema:
|
||||
Considering the following entity mapping. Hibernate generates the index when creating the database schema:
|
||||
|
||||
[[schema-generation-columns-index-mapping-example]]
|
||||
.`@Index` mapping example
|
||||
|
|
|
@ -43,7 +43,7 @@ https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/
|
|||
`getDomainDataRegionStatistics(String regionName)`:: Get the second-level cache statistics per domain data (entity, collection, natural-id) region.
|
||||
`getQueryRegionStatistics(String regionName)`:: Get the second-level cache statistics per query region.
|
||||
`getCacheRegionStatistics(String regionName)`:: Get statistics for either a domain-data or query-result region
|
||||
(this method checks both, preferring domain data region if one).
|
||||
(this method checks both, preferring domain data region if one exists).
|
||||
|
||||
[[statistics-session-factory]]
|
||||
==== SessionFactory statistics methods
|
||||
|
@ -74,7 +74,7 @@ https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/
|
|||
[[statistics-concurrency-control]]
|
||||
==== Concurrency Control statistics methods
|
||||
|
||||
`getOptimisticFailureCount`:: The number of Hibernate `StaleObjectStateException`s or JPA `OptimisticLockException`s that occurred.
|
||||
`getOptimisticFailureCount`:: The number of Hibernate ``StaleObjectStateException``s or JPA ``OptimisticLockException``s that occurred.
|
||||
|
||||
[[statistics-entity]]
|
||||
==== Entity statistics methods
|
||||
|
@ -158,7 +158,7 @@ to make room for new query entries.
|
|||
[[statistics-query-plan-cache]]
|
||||
=== Query plan cache statistics
|
||||
|
||||
Every entity query, be it JPQL/HQL or Criteria API is compiled to an AST (Abstract Syntax Tree),
|
||||
Every entity query, be it JPQL/HQL or Criteria API, is compiled to an AST (Abstract Syntax Tree),
|
||||
and this process is resource-intensive.
|
||||
To speed up the entity query executions, Hibernate offers a query plan cache so that compiled plans can be reused.
|
||||
|
||||
|
|
|
@ -5,9 +5,9 @@
|
|||
It is important to understand that the term transaction has many different yet related meanings in regards to persistence and Object/Relational Mapping.
|
||||
In most use-cases these definitions align, but that is not always the case.
|
||||
|
||||
* Might refer to the physical transaction with the database.
|
||||
* Might refer to the logical notion of a transaction as related to a persistence context.
|
||||
* Might refer to the application notion of a Unit-of-Work, as defined by the archetypal pattern.
|
||||
* It might refer to the physical transaction with the database.
|
||||
* It might refer to the logical notion of a transaction as related to a persistence context.
|
||||
* It might refer to the application notion of a Unit-of-Work, as defined by the archetypal pattern.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
|
@ -96,12 +96,12 @@ Hibernate provides many implementations of the `JtaPlatform` contract, all with
|
|||
|
||||
Hibernate provides an API for helping to isolate applications from the differences in the underlying physical transaction system in use.
|
||||
Based on the configured `TransactionCoordinatorBuilder`, Hibernate will simply do the right thing when this transaction API is used by the application.
|
||||
This allows your applications and components to be more portable move around into different environments.
|
||||
This allows your applications and components to be more portable to move around into different environments.
|
||||
|
||||
To use this API, you would obtain the `org.hibernate.Transaction` from the Session. `Transaction` allows for all the normal operations you'd expect: `begin`, `commit` and `rollback`, and it even exposes some cool methods like:
|
||||
|
||||
`markRollbackOnly`:: that works in both JTA and JDBC
|
||||
`getTimeout` and `setTimeout`:: that again work in both JTA and JDBC
|
||||
`markRollbackOnly`:: that works in both JTA and JDBC.
|
||||
`getTimeout` and `setTimeout`:: that again work in both JTA and JDBC.
|
||||
`registerSynchronization`:: that allows you to register JTA Synchronizations even in non-JTA environments.
|
||||
In fact, in both JTA and JDBC environments, these `Synchronizations` are kept locally by Hibernate.
|
||||
In JTA environments, Hibernate will only ever register one single `Synchronization` with the `TransactionManager` to avoid ordering problems.
|
||||
|
@ -183,7 +183,7 @@ This is also known and used as __session-per-request__.
|
|||
The beginning and end of a Hibernate session is defined by the duration of a database transaction.
|
||||
If you use programmatic transaction demarcation in plain Java SE without JTA, you are advised to use the Hibernate `Transaction` API to hide the underlying transaction system from your code.
|
||||
If you use JTA, you can utilize the JTA interfaces to demarcate transactions.
|
||||
If you execute in an EJB container that supports CMT, transaction boundaries are defined declaratively and you do not need any transaction or session demarcation operations in your code. Refer to <<transactions>> for more information and code examples.
|
||||
If you execute in an EJB container that supports CMT, transaction boundaries are defined declaratively and you do not need any transaction or session demarcation operations in your code.
|
||||
|
||||
The `hibernate.current_session_context_class` configuration parameter defines which `org.hibernate.context.spi.CurrentSessionContext` implementation should be used.
|
||||
For backward compatibility, if this configuration parameter is not set but a `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform` is configured, Hibernate will use the `org.hibernate.context.internal.JTASessionContext`.
|
||||
|
@ -306,7 +306,7 @@ Rolling back the database transaction does not put your business objects back in
|
|||
This means that the database state and the business objects will be out of sync.
|
||||
Usually, this is not a problem because exceptions are not recoverable and you will have to start over after rollback anyway.
|
||||
|
||||
For more details, check out the <<chapters/pc/PersistenceContext.adoc#pc-exception-handling, _exception handling_>> chapter.
|
||||
For more details, check out the <<chapters/pc/PersistenceContext.adoc#pc-exception-handling, _exception handling_>> section in <<chapters/pc/PersistenceContext.adoc#batch,Persistence Context chapter>>.
|
||||
|
||||
The `Session` caches every object that is in a persistent state (watched and checked for dirty state by Hibernate).
|
||||
If you keep it open for a long time or simply load too much data, it will grow endlessly until you get an `OutOfMemoryException`.
|
||||
|
|
|
@ -396,3 +396,4 @@ p{margin-bottom:1.25rem}
|
|||
.print-only{display:block!important}
|
||||
.hide-for-print{display:none!important}
|
||||
.show-for-print{display:inherit!important}}
|
||||
#toc>a{margin-left:5px}
|
||||
|
|
|
@ -98,7 +98,8 @@ h4,h5,h6{
|
|||
background-color:gainsboro !important;
|
||||
border-radius: 1em !important;
|
||||
width: 100% !important;
|
||||
padding: 2em 0em 2em 0em !important;
|
||||
padding: 1em 0em 1em 0em !important;
|
||||
margin: 1em 0em 1em 0em !important;
|
||||
}
|
||||
td.icon {
|
||||
display:block !important;
|
||||
|
@ -218,3 +219,8 @@ code{
|
|||
font-weight:400 !important;
|
||||
color:rgba(0,0,0,.9) !important
|
||||
}
|
||||
.image img{
|
||||
display: block;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
}
|
|
@ -1,5 +1,7 @@
|
|||
var versions = {
|
||||
'current' : '/current/userguide/html_single/Hibernate_User_Guide.html',
|
||||
'5.4' : '/5.4/userguide/html_single/Hibernate_User_Guide.html',
|
||||
'5.3' : '/5.3/userguide/html_single/Hibernate_User_Guide.html',
|
||||
'5.2' : '/5.2/userguide/html_single/Hibernate_User_Guide.html',
|
||||
'5.1' : '/5.1/userguide/html_single/Hibernate_User_Guide.html',
|
||||
'5.0' : '/5.0/userguide/html_single/Hibernate_User_Guide.html',
|
||||
|
@ -49,4 +51,4 @@ $(document).ready(function() {
|
|||
$('#tocsearch').after('<a href="#" id="toctreeexpand" title="Expand"><i class="fa fa-plus-square" aria-hidden="true"></i></a><a href="#" id="toctreecollapse" title="Collapse"><i class="fa fa-minus-square" aria-hidden="true"></i></a>');
|
||||
$('#toctreeexpand').click(function() { $('#toctree').jstree('open_all'); });
|
||||
$('#toctreecollapse').click(function() { $('#toctree').jstree('close_all'); });
|
||||
});
|
||||
});
|
||||
|
|
|
@ -16,27 +16,25 @@ import org.jboss.logging.Logger;
|
|||
*
|
||||
* @author Steve Ebersole
|
||||
*/
|
||||
public class Version {
|
||||
private static String version;
|
||||
public final class Version {
|
||||
|
||||
private static final String VERSION = initVersion();
|
||||
|
||||
private static String initVersion() {
|
||||
final String version = Version.class.getPackage().getImplementationVersion();
|
||||
return version != null ? version : "[WORKING]";
|
||||
}
|
||||
|
||||
private Version() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Access to the Hibernate version.
|
||||
*
|
||||
* IMPL NOTE : Real value is injected by the build.
|
||||
* Access to the Hibernate ORM version.
|
||||
*
|
||||
* @return The Hibernate version
|
||||
*/
|
||||
public static String getVersionString() {
|
||||
if ( version == null ) {
|
||||
version = Version.class.getPackage().getImplementationVersion();
|
||||
if ( version == null ) {
|
||||
version = "[WORKING]";
|
||||
}
|
||||
}
|
||||
return version;
|
||||
return VERSION;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.hibernate.dialect.Cache71Dialect;
|
|||
import org.hibernate.dialect.DB2390Dialect;
|
||||
import org.hibernate.dialect.DB2390V8Dialect;
|
||||
import org.hibernate.dialect.DB2400Dialect;
|
||||
import org.hibernate.dialect.DB2400V7R3Dialect;
|
||||
import org.hibernate.dialect.DB2Dialect;
|
||||
import org.hibernate.dialect.DerbyTenFiveDialect;
|
||||
import org.hibernate.dialect.DerbyTenSevenDialect;
|
||||
|
@ -192,6 +193,7 @@ public class StrategySelectorBuilder {
|
|||
addDialect( strategySelector, DB2390Dialect.class );
|
||||
addDialect( strategySelector, DB2390V8Dialect.class );
|
||||
addDialect( strategySelector, DB2400Dialect.class );
|
||||
addDialect( strategySelector, DB2400V7R3Dialect.class );
|
||||
addDialect( strategySelector, DerbyTenFiveDialect.class );
|
||||
addDialect( strategySelector, DerbyTenSixDialect.class );
|
||||
addDialect( strategySelector, DerbyTenSevenDialect.class );
|
||||
|
|
|
@ -18,6 +18,7 @@ import javax.persistence.ManyToOne;
|
|||
import javax.persistence.OneToMany;
|
||||
import javax.persistence.OneToOne;
|
||||
|
||||
import net.bytebuddy.utility.OpenedClassReader;
|
||||
import org.hibernate.bytecode.enhance.internal.bytebuddy.EnhancerImpl.AnnotatedFieldDescription;
|
||||
import org.hibernate.bytecode.enhance.spi.EnhancementException;
|
||||
import org.hibernate.bytecode.enhance.spi.EnhancerConstants;
|
||||
|
@ -296,7 +297,7 @@ final class BiDirectionalAssociationHandler implements Implementation {
|
|||
@Override
|
||||
public Size apply(
|
||||
MethodVisitor methodVisitor, Context implementationContext, MethodDescription instrumentedMethod) {
|
||||
return delegate.apply( new MethodVisitor( Opcodes.ASM5, methodVisitor ) {
|
||||
return delegate.apply( new MethodVisitor( OpenedClassReader.ASM_API, methodVisitor ) {
|
||||
|
||||
@Override
|
||||
public void visitMethodInsn(int opcode, String owner, String name, String desc, boolean itf) {
|
||||
|
|
|
@ -11,6 +11,7 @@ import static net.bytebuddy.matcher.ElementMatchers.named;
|
|||
|
||||
import javax.persistence.Id;
|
||||
|
||||
import net.bytebuddy.utility.OpenedClassReader;
|
||||
import org.hibernate.bytecode.enhance.internal.bytebuddy.EnhancerImpl.AnnotatedFieldDescription;
|
||||
import org.hibernate.bytecode.enhance.spi.EnhancementException;
|
||||
import org.hibernate.bytecode.enhance.spi.EnhancerConstants;
|
||||
|
@ -54,7 +55,7 @@ final class FieldAccessEnhancer implements AsmVisitorWrapper.ForDeclaredMethods.
|
|||
TypePool typePool,
|
||||
int writerFlags,
|
||||
int readerFlags) {
|
||||
return new MethodVisitor( Opcodes.ASM5, methodVisitor ) {
|
||||
return new MethodVisitor( OpenedClassReader.ASM_API, methodVisitor ) {
|
||||
@Override
|
||||
public void visitFieldInsn(int opcode, String owner, String name, String desc) {
|
||||
if ( opcode != Opcodes.GETFIELD && opcode != Opcodes.PUTFIELD ) {
|
||||
|
|
|
@ -18,6 +18,7 @@ import java.util.Objects;
|
|||
|
||||
import javax.persistence.Embedded;
|
||||
|
||||
import net.bytebuddy.utility.OpenedClassReader;
|
||||
import org.hibernate.bytecode.enhance.internal.bytebuddy.EnhancerImpl.AnnotatedFieldDescription;
|
||||
import org.hibernate.bytecode.enhance.spi.EnhancerConstants;
|
||||
import org.hibernate.engine.spi.CompositeOwner;
|
||||
|
@ -134,7 +135,7 @@ final class PersistentAttributeTransformer implements AsmVisitorWrapper.ForDecla
|
|||
TypePool typePool,
|
||||
int writerFlags,
|
||||
int readerFlags) {
|
||||
return new MethodVisitor( Opcodes.ASM5, methodVisitor ) {
|
||||
return new MethodVisitor( OpenedClassReader.ASM_API, methodVisitor ) {
|
||||
@Override
|
||||
public void visitFieldInsn(int opcode, String owner, String name, String desc) {
|
||||
if ( isEnhanced( owner, name, desc ) ) {
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.dialect;
|
||||
|
||||
import org.hibernate.dialect.identity.DB2IdentityColumnSupport;
|
||||
import org.hibernate.dialect.identity.IdentityColumnSupport;
|
||||
import org.hibernate.dialect.unique.DefaultUniqueDelegate;
|
||||
import org.hibernate.dialect.unique.UniqueDelegate;
|
||||
|
||||
/**
|
||||
* An SQL dialect for i. This class provides support for DB2 Universal Database for i V7R1 and
|
||||
* later, also known as DB2/400.
|
||||
*
|
||||
* @author Pierrick Rouxel (pierrickrouxel)
|
||||
*/
|
||||
public class DB2400V7R3Dialect extends DB2400Dialect {
|
||||
|
||||
private final UniqueDelegate uniqueDelegate;
|
||||
|
||||
public DB2400V7R3Dialect() {
|
||||
super();
|
||||
|
||||
uniqueDelegate = new DefaultUniqueDelegate(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public UniqueDelegate getUniqueDelegate() {
|
||||
return uniqueDelegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supportsSequences() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getQuerySequencesString() {
|
||||
return "select distinct sequence_name from qsys2.syssequences " +
|
||||
"where ( current_schema = '*LIBL' and sequence_schema in ( select schema_name from qsys2.library_list_info ) ) " +
|
||||
"or sequence_schema = current_schema";
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("deprecation")
|
||||
public String getLimitString(String sql, int offset, int limit) {
|
||||
if ( offset == 0 ) {
|
||||
return sql + " fetch first " + limit + " rows only";
|
||||
}
|
||||
//nest the main query in an outer select
|
||||
return "select * from ( select inner2_.*, rownumber() over(order by order of inner2_) as rownumber_ from ( "
|
||||
+ sql + " fetch first " + limit + " rows only ) as inner2_ ) as inner1_ where rownumber_ > "
|
||||
+ offset + " order by rownumber_";
|
||||
}
|
||||
|
||||
@Override
|
||||
public IdentityColumnSupport getIdentityColumnSupport() {
|
||||
return new DB2IdentityColumnSupport();
|
||||
}
|
||||
}
|
|
@ -62,7 +62,15 @@ public enum Database {
|
|||
case "DSN": // z/OS
|
||||
return new DB2390Dialect(info);
|
||||
case "QSQ": // i
|
||||
return new DB2400Dialect();
|
||||
final int majorVersion = info.getDatabaseMajorVersion();
|
||||
final int minorVersion = info.getDatabaseMinorVersion();
|
||||
|
||||
if ( majorVersion > 7 || ( majorVersion == 7 && minorVersion >= 3 ) ) {
|
||||
return new DB2400V7R3Dialect();
|
||||
}
|
||||
else {
|
||||
return new DB2400Dialect();
|
||||
}
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ public class MariaDBDialect extends MySQLDialect {
|
|||
@Override
|
||||
public String getQuerySequencesString() {
|
||||
return getSequenceSupport().supportsSequences()
|
||||
? "select table_name from information_schema.TABLES where table_type='SEQUENCE'"
|
||||
? "select table_name from information_schema.TABLES where table_schema = database() and table_type = 'SEQUENCE'"
|
||||
: super.getQuerySequencesString(); //fancy way to write "null"
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,8 @@ import org.hibernate.event.spi.PostLoadEvent;
|
|||
import org.hibernate.event.spi.PostLoadEventListener;
|
||||
import org.hibernate.event.spi.PreLoadEvent;
|
||||
import org.hibernate.event.spi.PreLoadEventListener;
|
||||
import org.hibernate.graph.spi.AttributeNodeImplementor;
|
||||
import org.hibernate.graph.spi.GraphImplementor;
|
||||
import org.hibernate.internal.CoreMessageLogger;
|
||||
import org.hibernate.persister.entity.EntityPersister;
|
||||
import org.hibernate.pretty.MessageHelper;
|
||||
|
@ -181,6 +183,9 @@ public final class TwoPhaseLoad {
|
|||
String entityName = persister.getEntityName();
|
||||
String[] propertyNames = persister.getPropertyNames();
|
||||
final Type[] types = persister.getPropertyTypes();
|
||||
|
||||
final GraphImplementor<?> fetchGraphContext = session.getFetchGraphLoadContext();
|
||||
|
||||
for ( int i = 0; i < hydratedState.length; i++ ) {
|
||||
final Object value = hydratedState[i];
|
||||
if ( debugEnabled ) {
|
||||
|
@ -204,7 +209,7 @@ public final class TwoPhaseLoad {
|
|||
// IMPLEMENTATION NOTE: this is a lazy collection property on a bytecode-enhanced entity.
|
||||
// HHH-10989: We need to resolve the collection so that a CollectionReference is added to StatefulPersistentContext.
|
||||
// As mentioned above, hydratedState[i] needs to remain LazyPropertyInitializer.UNFETCHED_PROPERTY
|
||||
// so do not assign the resolved, unitialized PersistentCollection back to hydratedState[i].
|
||||
// so do not assign the resolved, uninitialized PersistentCollection back to hydratedState[i].
|
||||
Boolean overridingEager = getOverridingEager( session, entityName, propertyNames[i], types[i], debugEnabled );
|
||||
types[i].resolve( value, session, entity, overridingEager );
|
||||
}
|
||||
|
@ -227,6 +232,10 @@ public final class TwoPhaseLoad {
|
|||
LOG.debugf( "Skipping <unknown> attribute : `%s`", propertyNames[i] );
|
||||
}
|
||||
}
|
||||
|
||||
if ( session.getFetchGraphLoadContext() != fetchGraphContext ) {
|
||||
session.setFetchGraphLoadContext( fetchGraphContext );
|
||||
}
|
||||
}
|
||||
|
||||
//Must occur after resolving identifiers!
|
||||
|
@ -364,27 +373,51 @@ public final class TwoPhaseLoad {
|
|||
}
|
||||
|
||||
/**
|
||||
* Check if eager of the association is overriden by anything.
|
||||
* Check if eager of the association is overridden (i.e. skipping metamodel strategy), including (order sensitive):
|
||||
* <ol>
|
||||
* <li>fetch graph</li>
|
||||
* <li>fetch profile</li>
|
||||
* </ol>
|
||||
*
|
||||
* @param session session
|
||||
* @param entityName entity name
|
||||
* @param associationName association name
|
||||
*
|
||||
* @param associationType association type
|
||||
* @param isDebugEnabled if debug log level enabled
|
||||
* @return null if there is no overriding, true if it is overridden to eager and false if it is overridden to lazy
|
||||
*/
|
||||
private static Boolean getOverridingEager(
|
||||
final SharedSessionContractImplementor session,
|
||||
final String entityName,
|
||||
final String associationName,
|
||||
final Type type,
|
||||
final Type associationType,
|
||||
final boolean isDebugEnabled) {
|
||||
// Performance: check type.isCollectionType() first, as type.isAssociationType() is megamorphic
|
||||
if ( type.isCollectionType() || type.isAssociationType() ) {
|
||||
final Boolean overridingEager = isEagerFetchProfile( session, entityName, associationName );
|
||||
if ( associationType.isCollectionType() || associationType.isAssociationType() ) {
|
||||
|
||||
//This method is very hot, and private so let's piggy back on the fact that the caller already knows the debugging state.
|
||||
if ( isDebugEnabled ) {
|
||||
if ( overridingEager != null ) {
|
||||
// check 'fetch graph' first; skip 'fetch profile' if 'fetch graph' takes effect
|
||||
Boolean overridingEager = isEagerFetchGraph( session, associationName, associationType );
|
||||
|
||||
if ( overridingEager != null ) {
|
||||
//This method is very hot, and private so let's piggy back on the fact that the caller already knows the debugging state.
|
||||
if ( isDebugEnabled ) {
|
||||
LOG.debugf(
|
||||
"Overriding eager fetching using fetch graph. EntityName: %s, associationName: %s, eager fetching: %s",
|
||||
entityName,
|
||||
associationName,
|
||||
overridingEager
|
||||
);
|
||||
}
|
||||
|
||||
return overridingEager;
|
||||
}
|
||||
|
||||
// check 'fetch profile' next; skip 'metamodel' if 'fetch profile' takes effect
|
||||
overridingEager = isEagerFetchProfile( session, entityName, associationName );
|
||||
|
||||
if ( overridingEager != null ) {
|
||||
//This method is very hot, and private so let's piggy back on the fact that the caller already knows the debugging state.
|
||||
if ( isDebugEnabled ) {
|
||||
LOG.debugf(
|
||||
"Overriding eager fetching using active fetch profile. EntityName: %s, associationName: %s, eager fetching: %s",
|
||||
entityName,
|
||||
|
@ -392,10 +425,10 @@ public final class TwoPhaseLoad {
|
|||
overridingEager
|
||||
);
|
||||
}
|
||||
return overridingEager;
|
||||
}
|
||||
|
||||
return overridingEager;
|
||||
}
|
||||
// let 'metamodel' decide eagerness
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -418,6 +451,39 @@ public final class TwoPhaseLoad {
|
|||
|
||||
return null;
|
||||
}
|
||||
|
||||
private static Boolean isEagerFetchGraph(SharedSessionContractImplementor session, String associationName, Type associationType) {
|
||||
final GraphImplementor<?> context = session.getFetchGraphLoadContext();
|
||||
|
||||
if ( context != null ) {
|
||||
// 'fetch graph' is in effect, so null should not be returned
|
||||
final AttributeNodeImplementor<Object> attributeNode = context.findAttributeNode( associationName );
|
||||
if ( attributeNode != null ) {
|
||||
if ( associationType.isCollectionType() ) {
|
||||
// to do: deal with Map's key and value
|
||||
session.setFetchGraphLoadContext( null );
|
||||
}
|
||||
else {
|
||||
// set 'fetchGraphContext' to sub-graph so graph is explored further (internal loading)
|
||||
final GraphImplementor<?> subContext = attributeNode.getSubGraphMap().get( associationType.getReturnedClass() );
|
||||
if ( subContext != null ) {
|
||||
session.setFetchGraphLoadContext( subContext );
|
||||
}
|
||||
else {
|
||||
session.setFetchGraphLoadContext( null );
|
||||
}
|
||||
}
|
||||
// explicit 'fetch graph' applies, so fetch eagerly
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
// implicit 'fetch graph' applies, so fetch lazily
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* PostLoad cannot occur during initializeEntity, as that call occurs *before*
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.hibernate.engine.jdbc.LobCreationContext;
|
|||
import org.hibernate.engine.jdbc.spi.JdbcCoordinator;
|
||||
import org.hibernate.engine.jdbc.spi.JdbcServices;
|
||||
import org.hibernate.engine.query.spi.sql.NativeSQLQuerySpecification;
|
||||
import org.hibernate.graph.spi.GraphImplementor;
|
||||
import org.hibernate.internal.util.config.ConfigurationHelper;
|
||||
import org.hibernate.persister.entity.EntityPersister;
|
||||
import org.hibernate.query.spi.QueryProducerImplementor;
|
||||
|
@ -457,4 +458,31 @@ public interface SharedSessionContractImplementor
|
|||
*/
|
||||
PersistenceContext getPersistenceContextInternal();
|
||||
|
||||
/**
|
||||
* Get the current fetch graph context (either {@link org.hibernate.graph.spi.RootGraphImplementor} or {@link org.hibernate.graph.spi.SubGraphImplementor}.
|
||||
* Suppose fetch graph is "a(b(c))", then during {@link org.hibernate.engine.internal.TwoPhaseLoad}:
|
||||
* <ul>
|
||||
* <li>when loading root</li>: {@link org.hibernate.graph.spi.RootGraphImplementor root} will be returned
|
||||
* <li>when internally loading 'a'</li>: {@link org.hibernate.graph.spi.SubGraphImplementor subgraph} of 'a' will be returned
|
||||
* <li>when internally loading 'b'</li>: {@link org.hibernate.graph.spi.SubGraphImplementor subgraph} of 'a(b)' will be returned
|
||||
* <li>when internally loading 'c'</li>: {@link org.hibernate.graph.spi.SubGraphImplementor subgraph} of 'a(b(c))' will be returned
|
||||
* </ul>
|
||||
*
|
||||
* @return current fetch graph context; can be null if fetch graph is not effective or the graph eager loading is done.
|
||||
* @see #setFetchGraphLoadContext(GraphImplementor)
|
||||
* @see org.hibernate.engine.internal.TwoPhaseLoad
|
||||
*/
|
||||
default GraphImplementor getFetchGraphLoadContext() {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the current fetch graph context (either {@link org.hibernate.graph.spi.RootGraphImplementor} or {@link org.hibernate.graph.spi.SubGraphImplementor}.
|
||||
*
|
||||
* @param fetchGraphLoadContext new fetch graph context; can be null (this field will be set to null after root entity loading is done).
|
||||
* @see #getFetchGraphLoadContext()
|
||||
*/
|
||||
default void setFetchGraphLoadContext(GraphImplementor fetchGraphLoadContext) {
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -7,14 +7,12 @@
|
|||
package org.hibernate.event.service.internal;
|
||||
|
||||
import java.lang.reflect.Array;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.ListIterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import org.hibernate.event.service.spi.DuplicationStrategy;
|
||||
|
@ -25,25 +23,22 @@ import org.hibernate.event.service.spi.JpaBootstrapSensitive;
|
|||
import org.hibernate.event.spi.EventType;
|
||||
import org.hibernate.jpa.event.spi.CallbackRegistryConsumer;
|
||||
|
||||
import org.jboss.logging.Logger;
|
||||
|
||||
/**
|
||||
* @author Steve Ebersole
|
||||
* @author Sanne Grinovero
|
||||
*/
|
||||
class EventListenerGroupImpl<T> implements EventListenerGroup<T> {
|
||||
private EventType<T> eventType;
|
||||
private static final Logger log = Logger.getLogger( EventListenerGroupImpl.class );
|
||||
|
||||
private final EventType<T> eventType;
|
||||
private final EventListenerRegistryImpl listenerRegistry;
|
||||
|
||||
private final Set<DuplicationStrategy> duplicationStrategies = new LinkedHashSet<>();
|
||||
|
||||
// Performance: make sure iteration on this type is efficient; in particular we do not want to allocate iterators,
|
||||
// not having to capture state in lambdas.
|
||||
// So we keep the listeners in both a List (for convenience) and in an array (for iteration). Make sure
|
||||
// their content stays in synch!
|
||||
private T[] listeners = null;
|
||||
|
||||
//Update both fields when making changes!
|
||||
private List<T> listenersAsList;
|
||||
|
||||
public EventListenerGroupImpl(EventType<T> eventType, EventListenerRegistryImpl listenerRegistry) {
|
||||
this.eventType = eventType;
|
||||
this.listenerRegistry = listenerRegistry;
|
||||
|
@ -82,11 +77,8 @@ class EventListenerGroupImpl<T> implements EventListenerGroup<T> {
|
|||
|
||||
@Override
|
||||
public void clear() {
|
||||
if ( duplicationStrategies != null ) {
|
||||
duplicationStrategies.clear();
|
||||
}
|
||||
duplicationStrategies.clear();
|
||||
listeners = null;
|
||||
listenersAsList = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -94,8 +86,9 @@ class EventListenerGroupImpl<T> implements EventListenerGroup<T> {
|
|||
final T[] ls = listeners;
|
||||
if ( ls != null && ls.length != 0 ) {
|
||||
final U event = eventSupplier.get();
|
||||
for ( T listener : ls ) {
|
||||
actionOnEvent.accept( listener, event );
|
||||
//noinspection ForLoopReplaceableByForEach
|
||||
for ( int i = 0; i < ls.length; i++ ) {
|
||||
actionOnEvent.accept( ls[i], event );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -104,8 +97,9 @@ class EventListenerGroupImpl<T> implements EventListenerGroup<T> {
|
|||
public final <U> void fireEventOnEachListener(final U event, final BiConsumer<T,U> actionOnEvent) {
|
||||
final T[] ls = listeners;
|
||||
if ( ls != null ) {
|
||||
for ( T listener : ls ) {
|
||||
actionOnEvent.accept( listener, event );
|
||||
//noinspection ForLoopReplaceableByForEach
|
||||
for ( int i = 0; i < ls.length; i++ ) {
|
||||
actionOnEvent.accept( ls[i], event );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -114,8 +108,9 @@ class EventListenerGroupImpl<T> implements EventListenerGroup<T> {
|
|||
public <U,X> void fireEventOnEachListener(final U event, final X parameter, final EventActionWithParameter<T, U, X> actionOnEvent) {
|
||||
final T[] ls = listeners;
|
||||
if ( ls != null ) {
|
||||
for ( T listener : ls ) {
|
||||
actionOnEvent.applyEventToListener( listener, event, parameter );
|
||||
//noinspection ForLoopReplaceableByForEach
|
||||
for ( int i = 0; i < ls.length; i++ ) {
|
||||
actionOnEvent.applyEventToListener( ls[i], event, parameter );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -125,118 +120,150 @@ class EventListenerGroupImpl<T> implements EventListenerGroup<T> {
|
|||
duplicationStrategies.add( strategy );
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation note: should be final for performance reasons.
|
||||
* @deprecated this is not the most efficient way for iterating the event listeners.
|
||||
* See {@link #fireEventOnEachListener(Object, BiConsumer)} and co. for better alternatives.
|
||||
*/
|
||||
@Override
|
||||
@Deprecated
|
||||
public final Iterable<T> listeners() {
|
||||
final List<T> ls = listenersAsList;
|
||||
return ls == null ? Collections.EMPTY_LIST : ls;
|
||||
public void appendListener(T listener) {
|
||||
handleListenerAddition( listener, this::internalAppend );
|
||||
}
|
||||
|
||||
@Override
|
||||
@SafeVarargs
|
||||
public final void appendListeners(T... listeners) {
|
||||
internalAppendListeners( listeners );
|
||||
checkForArrayRefresh();
|
||||
}
|
||||
|
||||
private void checkForArrayRefresh() {
|
||||
final List<T> list = listenersAsList;
|
||||
if ( this.listeners == null ) {
|
||||
T[] a = (T[]) Array.newInstance( eventType.baseListenerInterface(), list.size() );
|
||||
listeners = list.<T>toArray( a );
|
||||
//noinspection ForLoopReplaceableByForEach
|
||||
for ( int i = 0; i < listeners.length; i++ ) {
|
||||
handleListenerAddition( listeners[i], this::internalAppend );
|
||||
}
|
||||
}
|
||||
|
||||
private void internalAppendListeners(T[] listeners) {
|
||||
for ( T listener : listeners ) {
|
||||
internalAppendListener( listener );
|
||||
private void internalAppend(T listener) {
|
||||
prepareListener( listener );
|
||||
|
||||
if ( listeners == null ) {
|
||||
//noinspection unchecked
|
||||
this.listeners = (T[]) Array.newInstance( eventType.baseListenerInterface(), 1 );
|
||||
this.listeners[0] = listener;
|
||||
}
|
||||
else {
|
||||
final int size = this.listeners.length;
|
||||
|
||||
//noinspection unchecked
|
||||
final T[] newCopy = (T[]) Array.newInstance( eventType.baseListenerInterface(), size+1 );
|
||||
|
||||
// first copy the existing listeners
|
||||
System.arraycopy( this.listeners, 0, newCopy, 0, size );
|
||||
|
||||
// and then put the new one after them
|
||||
newCopy[size] = listener;
|
||||
|
||||
this.listeners = newCopy;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void appendListener(T listener) {
|
||||
internalAppendListener( listener );
|
||||
checkForArrayRefresh();
|
||||
}
|
||||
|
||||
private void internalAppendListener(T listener) {
|
||||
if ( listenerShouldGetAdded( listener ) ) {
|
||||
internalAppend( listener );
|
||||
}
|
||||
public void prependListener(T listener) {
|
||||
handleListenerAddition( listener, this::internalPrepend );
|
||||
}
|
||||
|
||||
@Override
|
||||
@SafeVarargs
|
||||
public final void prependListeners(T... listeners) {
|
||||
internalPrependListeners( listeners );
|
||||
checkForArrayRefresh();
|
||||
}
|
||||
|
||||
private void internalPrependListeners(T[] listeners) {
|
||||
for ( T listener : listeners ) {
|
||||
internalPreprendListener( listener );
|
||||
//noinspection ForLoopReplaceableByForEach
|
||||
for ( int i = 0; i < listeners.length; i++ ) {
|
||||
handleListenerAddition( listeners[i], this::internalPrepend );
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prependListener(T listener) {
|
||||
internalPreprendListener( listener );
|
||||
checkForArrayRefresh();
|
||||
}
|
||||
private void internalPrepend(T listener) {
|
||||
prepareListener( listener );
|
||||
|
||||
private void internalPreprendListener(T listener) {
|
||||
if ( listenerShouldGetAdded( listener ) ) {
|
||||
internalPrepend( listener );
|
||||
if ( this.listeners == null ) {
|
||||
//noinspection unchecked
|
||||
this.listeners = (T[]) Array.newInstance( eventType.baseListenerInterface(), 1 );
|
||||
this.listeners[0] = listener;
|
||||
}
|
||||
else {
|
||||
final int size = this.listeners.length;
|
||||
|
||||
//noinspection unchecked
|
||||
final T[] newCopy = (T[]) Array.newInstance( eventType.baseListenerInterface(), size+1 );
|
||||
|
||||
// put the new one first
|
||||
newCopy[0] = listener;
|
||||
|
||||
// and copy the rest after it
|
||||
System.arraycopy( this.listeners, 0, newCopy, 1, size );
|
||||
|
||||
this.listeners = newCopy;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean listenerShouldGetAdded(T listener) {
|
||||
final List<T> ts = listenersAsList;
|
||||
if ( ts == null ) {
|
||||
listenersAsList = new ArrayList<>();
|
||||
return true;
|
||||
// no need to do de-dup checks
|
||||
private void handleListenerAddition(T listener, Consumer<T> additionHandler) {
|
||||
if ( listeners == null ) {
|
||||
additionHandler.accept( listener );
|
||||
return;
|
||||
}
|
||||
|
||||
boolean doAdd = true;
|
||||
strategy_loop: for ( DuplicationStrategy strategy : duplicationStrategies ) {
|
||||
final ListIterator<T> itr = ts.listIterator();
|
||||
while ( itr.hasNext() ) {
|
||||
final T existingListener = itr.next();
|
||||
final T[] localListenersRef = this.listeners;
|
||||
final boolean debugEnabled = log.isDebugEnabled();
|
||||
|
||||
for ( DuplicationStrategy strategy : duplicationStrategies ) {
|
||||
|
||||
// for each strategy, see if the strategy indicates that any of the existing
|
||||
// listeners match the listener being added. If so, we want to apply that
|
||||
// strategy's action. Control it returned immediately after applying the action
|
||||
// on match - meaning no further strategies are checked...
|
||||
|
||||
for ( int i = 0; i < localListenersRef.length; i++ ) {
|
||||
final T existingListener = localListenersRef[i];
|
||||
if ( debugEnabled ) {
|
||||
log.debugf(
|
||||
"Checking incoming listener [`%s`] for match against existing listener [`%s`]",
|
||||
listener,
|
||||
existingListener
|
||||
);
|
||||
}
|
||||
|
||||
if ( strategy.areMatch( listener, existingListener ) ) {
|
||||
if ( debugEnabled ) {
|
||||
log.debugf( "Found listener match between `%s` and `%s`", listener, existingListener );
|
||||
}
|
||||
|
||||
switch ( strategy.getAction() ) {
|
||||
// todo : add debug logging of what happens here...
|
||||
case ERROR: {
|
||||
throw new EventListenerRegistrationException( "Duplicate event listener found" );
|
||||
}
|
||||
case KEEP_ORIGINAL: {
|
||||
doAdd = false;
|
||||
break strategy_loop;
|
||||
if ( debugEnabled ) {
|
||||
log.debugf( "Skipping listener registration (%s) : `%s`", strategy.getAction(), listener );
|
||||
}
|
||||
return;
|
||||
}
|
||||
case REPLACE_ORIGINAL: {
|
||||
checkAgainstBaseInterface( listener );
|
||||
performInjections( listener );
|
||||
itr.set( listener );
|
||||
doAdd = false;
|
||||
break strategy_loop;
|
||||
if ( debugEnabled ) {
|
||||
log.debugf( "Replacing listener registration (%s) : `%s` -> %s", strategy.getAction(), existingListener, listener );
|
||||
}
|
||||
prepareListener( listener );
|
||||
|
||||
listeners[i] = listener;
|
||||
}
|
||||
}
|
||||
|
||||
// we've found a match - we should return
|
||||
// - the match action has already been applied at this point
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
return doAdd;
|
||||
}
|
||||
|
||||
private void internalPrepend(T listener) {
|
||||
// we did not find any match.. add it
|
||||
checkAgainstBaseInterface( listener );
|
||||
performInjections( listener );
|
||||
additionHandler.accept( listener );
|
||||
}
|
||||
|
||||
private void prepareListener(T listener) {
|
||||
checkAgainstBaseInterface( listener );
|
||||
performInjections( listener );
|
||||
listenersAsList.add( 0, listener );
|
||||
listeners = null; //Marks it for refreshing
|
||||
}
|
||||
|
||||
private void performInjections(T listener) {
|
||||
|
@ -259,10 +286,20 @@ class EventListenerGroupImpl<T> implements EventListenerGroup<T> {
|
|||
}
|
||||
}
|
||||
|
||||
private void internalAppend(T listener) {
|
||||
checkAgainstBaseInterface( listener );
|
||||
performInjections( listener );
|
||||
listenersAsList.add( listener );
|
||||
listeners = null; //Marks it for refreshing
|
||||
|
||||
/**
|
||||
* Implementation note: should be final for performance reasons.
|
||||
* @deprecated this is not the most efficient way for iterating the event listeners.
|
||||
* See {@link #fireEventOnEachListener(Object, BiConsumer)} and co. for better alternatives.
|
||||
*/
|
||||
@Override
|
||||
@Deprecated
|
||||
public final Iterable<T> listeners() {
|
||||
if ( listeners == null ) {
|
||||
//noinspection unchecked
|
||||
return Collections.EMPTY_LIST;
|
||||
}
|
||||
|
||||
return Arrays.asList( listeners );
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,11 +16,8 @@ public enum GraphSemantic {
|
|||
/**
|
||||
* Indicates a "fetch graph" EntityGraph. Attributes explicitly specified
|
||||
* as AttributeNodes are treated as FetchType.EAGER (via join fetch or
|
||||
* subsequent select).
|
||||
* <p/>
|
||||
* Note: Currently, attributes that are not specified are treated as
|
||||
* FetchType.LAZY or FetchType.EAGER depending on the attribute's definition
|
||||
* in metadata, rather than forcing FetchType.LAZY.
|
||||
* subsequent select). Attributes that are not specified are treated as
|
||||
* FetchType.LAZY invariably.
|
||||
*/
|
||||
FETCH( "javax.persistence.fetchgraph" ),
|
||||
|
||||
|
@ -29,7 +26,7 @@ public enum GraphSemantic {
|
|||
* as AttributeNodes are treated as FetchType.EAGER (via join fetch or
|
||||
* subsequent select). Attributes that are not specified are treated as
|
||||
* FetchType.LAZY or FetchType.EAGER depending on the attribute's definition
|
||||
* in metadata
|
||||
* in metadata.
|
||||
*/
|
||||
LOAD( "javax.persistence.loadgraph" );
|
||||
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
|
||||
package org.hibernate.hql.internal.ast.tree;
|
||||
|
||||
/**
|
||||
* @author Jan-Willem Gmelig Meyling
|
||||
*/
|
||||
public interface TableReferenceNode {
|
||||
|
||||
/**
|
||||
* Returns table names which are referenced by this node. If the tables
|
||||
* can not be determined it returns null.
|
||||
*
|
||||
* @return table names or null.
|
||||
*/
|
||||
public String[] getReferencedTables();
|
||||
|
||||
}
|
|
@ -237,7 +237,11 @@ public class SequenceStyleGenerator
|
|||
final int initialValue = determineInitialValue( params );
|
||||
int incrementSize = determineIncrementSize( params );
|
||||
|
||||
if ( isPhysicalSequence( jdbcEnvironment, forceTableUse ) ) {
|
||||
final String optimizationStrategy = determineOptimizationStrategy( params, incrementSize );
|
||||
|
||||
final boolean isPooledOptimizer = OptimizerFactory.isPooledOptimizer( optimizationStrategy );
|
||||
|
||||
if ( isPooledOptimizer && isPhysicalSequence( jdbcEnvironment, forceTableUse ) ) {
|
||||
String databaseSequenceName = sequenceName.getObjectName().getText();
|
||||
Long databaseIncrementValue = getSequenceIncrementValue( jdbcEnvironment, databaseSequenceName );
|
||||
|
||||
|
@ -268,7 +272,6 @@ public class SequenceStyleGenerator
|
|||
}
|
||||
}
|
||||
|
||||
final String optimizationStrategy = determineOptimizationStrategy( params, incrementSize );
|
||||
incrementSize = determineAdjustedIncrementSize( optimizationStrategy, incrementSize );
|
||||
|
||||
if ( dialect.getSequenceSupport().supportsSequences() && !forceTableUse ) {
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.hibernate.event.spi.SaveOrUpdateEventListener;
|
|||
import org.hibernate.jpa.AvailableSettings;
|
||||
import org.hibernate.jpa.QueryHints;
|
||||
import org.hibernate.jpa.internal.util.CacheModeHelper;
|
||||
import org.hibernate.jpa.internal.util.ConfigurationHelper;
|
||||
import org.hibernate.jpa.internal.util.LockOptionsHelper;
|
||||
import org.hibernate.resource.transaction.spi.TransactionCoordinatorBuilder;
|
||||
import org.hibernate.service.spi.ServiceRegistryImplementor;
|
||||
|
@ -117,6 +118,7 @@ final class FastSessionServices {
|
|||
final JdbcServices jdbcServices;
|
||||
final boolean isJtaTransactionAccessible;
|
||||
final CacheMode initialSessionCacheMode;
|
||||
final FlushMode initialSessionFlushMode;
|
||||
final boolean discardOnClose;
|
||||
final BaselineSessionEventsListenerBuilder defaultSessionEventListeners;
|
||||
final LockOptions defaultLockOptions;
|
||||
|
@ -177,6 +179,12 @@ final class FastSessionServices {
|
|||
this.defaultJdbcObservers = new ConnectionObserverStatsBridge( sf );
|
||||
this.defaultSessionEventListeners = sessionFactoryOptions.getBaselineSessionEventsListenerBuilder();
|
||||
this.defaultLockOptions = initializeDefaultLockOptions( defaultSessionProperties );
|
||||
this.initialSessionFlushMode = initializeDefaultFlushMode( defaultSessionProperties );
|
||||
}
|
||||
|
||||
private static FlushMode initializeDefaultFlushMode(Map<String, Object> defaultSessionProperties) {
|
||||
Object setMode = defaultSessionProperties.get( AvailableSettings.FLUSH_MODE );
|
||||
return ConfigurationHelper.getFlushMode( setMode, FlushMode.AUTO );
|
||||
}
|
||||
|
||||
private static LockOptions initializeDefaultLockOptions(final Map<String, Object> defaultSessionProperties) {
|
||||
|
|
|
@ -113,6 +113,7 @@ import org.hibernate.event.spi.SaveOrUpdateEventListener;
|
|||
import org.hibernate.graph.GraphSemantic;
|
||||
import org.hibernate.graph.RootGraph;
|
||||
import org.hibernate.graph.internal.RootGraphImpl;
|
||||
import org.hibernate.graph.spi.GraphImplementor;
|
||||
import org.hibernate.graph.spi.RootGraphImplementor;
|
||||
import org.hibernate.jdbc.ReturningWork;
|
||||
import org.hibernate.jdbc.Work;
|
||||
|
@ -188,6 +189,8 @@ public class SessionImpl
|
|||
private transient LoadEvent loadEvent; //cached LoadEvent instance
|
||||
|
||||
private transient TransactionObserver transactionObserver;
|
||||
|
||||
private transient GraphImplementor fetchGraphLoadContext;
|
||||
|
||||
public SessionImpl(SessionFactoryImpl factory, SessionCreationOptions options) {
|
||||
super( factory, options );
|
||||
|
@ -226,6 +229,15 @@ public class SessionImpl
|
|||
// NOTE : pulse() already handles auto-join-ability correctly
|
||||
getTransactionCoordinator().pulse();
|
||||
|
||||
final FlushMode initialMode;
|
||||
if ( this.properties == null ) {
|
||||
initialMode = fastSessionServices.initialSessionFlushMode;
|
||||
}
|
||||
else {
|
||||
initialMode = ConfigurationHelper.getFlushMode( getSessionProperty( AvailableSettings.FLUSH_MODE ), FlushMode.AUTO );
|
||||
}
|
||||
getSession().setHibernateFlushMode( initialMode );
|
||||
|
||||
if ( log.isTraceEnabled() ) {
|
||||
log.tracef( "Opened Session [%s] at timestamp: %s", getSessionIdentifier(), getTimestamp() );
|
||||
}
|
||||
|
@ -2767,6 +2779,10 @@ public class SessionImpl
|
|||
lockOptions = buildLockOptions( lockModeType, properties );
|
||||
loadAccess.with( lockOptions );
|
||||
}
|
||||
|
||||
if ( getLoadQueryInfluencers().getEffectiveEntityGraph().getSemantic() == GraphSemantic.FETCH ) {
|
||||
setFetchGraphLoadContext( getLoadQueryInfluencers().getEffectiveEntityGraph().getGraph() );
|
||||
}
|
||||
|
||||
return loadAccess.load( (Serializable) primaryKey );
|
||||
}
|
||||
|
@ -2812,6 +2828,7 @@ public class SessionImpl
|
|||
finally {
|
||||
getLoadQueryInfluencers().getEffectiveEntityGraph().clear();
|
||||
getLoadQueryInfluencers().setReadOnly( null );
|
||||
setFetchGraphLoadContext( null );
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3175,6 +3192,16 @@ public class SessionImpl
|
|||
checkOpen();
|
||||
return getEntityManagerFactory().findEntityGraphsByType( entityClass );
|
||||
}
|
||||
|
||||
@Override
|
||||
public GraphImplementor getFetchGraphLoadContext() {
|
||||
return this.fetchGraphLoadContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setFetchGraphLoadContext(GraphImplementor fetchGraphLoadContext) {
|
||||
this.fetchGraphLoadContext = fetchGraphLoadContext;
|
||||
}
|
||||
|
||||
/**
|
||||
* Used by JDK serialization...
|
||||
|
|
|
@ -50,6 +50,7 @@ import static org.hibernate.internal.HEMLogging.messageLogger;
|
|||
* @author Steve Ebersole
|
||||
*/
|
||||
public class PersistenceXmlParser {
|
||||
|
||||
private static final EntityManagerMessageLogger LOG = messageLogger( PersistenceXmlParser.class );
|
||||
|
||||
/**
|
||||
|
@ -214,12 +215,16 @@ public class PersistenceXmlParser {
|
|||
private final PersistenceUnitTransactionType defaultTransactionType;
|
||||
private final Map<String, ParsedPersistenceXmlDescriptor> persistenceUnits;
|
||||
|
||||
private PersistenceXmlParser(ClassLoaderService classLoaderService, PersistenceUnitTransactionType defaultTransactionType) {
|
||||
protected PersistenceXmlParser(ClassLoaderService classLoaderService, PersistenceUnitTransactionType defaultTransactionType) {
|
||||
this.classLoaderService = classLoaderService;
|
||||
this.defaultTransactionType = defaultTransactionType;
|
||||
this.persistenceUnits = new ConcurrentHashMap<>();
|
||||
}
|
||||
|
||||
protected List<ParsedPersistenceXmlDescriptor> getResolvedPersistenceUnits() {
|
||||
return new ArrayList<>(persistenceUnits.values());
|
||||
}
|
||||
|
||||
private void doResolve(Map integration) {
|
||||
final List<URL> xmlUrls = classLoaderService.locateResources( "META-INF/persistence.xml" );
|
||||
if ( xmlUrls.isEmpty() ) {
|
||||
|
@ -236,8 +241,10 @@ public class PersistenceXmlParser {
|
|||
}
|
||||
}
|
||||
|
||||
private void parsePersistenceXml(URL xmlUrl, Map integration) {
|
||||
LOG.tracef( "Attempting to parse persistence.xml file : %s", xmlUrl.toExternalForm() );
|
||||
protected void parsePersistenceXml(URL xmlUrl, Map integration) {
|
||||
if ( LOG.isTraceEnabled() ) {
|
||||
LOG.tracef( "Attempting to parse persistence.xml file : %s", xmlUrl.toExternalForm() );
|
||||
}
|
||||
|
||||
final Document doc = loadUrl( xmlUrl );
|
||||
final Element top = doc.getDocumentElement();
|
||||
|
|
|
@ -1366,8 +1366,13 @@ public abstract class AbstractProducedQuery<R> implements QueryImplementor<R> {
|
|||
}
|
||||
|
||||
final CacheMode effectiveCacheMode = CacheMode.fromJpaModes( queryOptions.getCacheRetrieveMode(), queryOptions.getCacheStoreMode() );
|
||||
sessionCacheMode = getSession().getCacheMode();
|
||||
getSession().setCacheMode( effectiveCacheMode );
|
||||
if( effectiveCacheMode != null) {
|
||||
sessionCacheMode = getSession().getCacheMode();
|
||||
getSession().setCacheMode( effectiveCacheMode );
|
||||
}
|
||||
if ( entityGraphQueryHint != null && entityGraphQueryHint.getSemantic() == GraphSemantic.FETCH ) {
|
||||
getSession().setFetchGraphLoadContext( entityGraphQueryHint.getGraph() );
|
||||
}
|
||||
}
|
||||
|
||||
protected void afterQuery() {
|
||||
|
@ -1379,6 +1384,7 @@ public abstract class AbstractProducedQuery<R> implements QueryImplementor<R> {
|
|||
getSession().setCacheMode( sessionCacheMode );
|
||||
sessionCacheMode = null;
|
||||
}
|
||||
getSession().setFetchGraphLoadContext( null );
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -153,6 +153,7 @@ public class DialectFactoryTest extends BaseUnitTestCase {
|
|||
testDetermination( "DB2/LINUX390", DB2Dialect.class, resolver );
|
||||
testDetermination( "DB2/AIX64", DB2Dialect.class, resolver );
|
||||
testDetermination( "DB2 UDB for AS/400", DB2400Dialect.class, resolver );
|
||||
testDetermination( "DB2 UDB for AS/400", 7, 3, DB2400V7R3Dialect.class, resolver );
|
||||
testDetermination( "Oracle", 8, Oracle8iDialect.class, resolver );
|
||||
testDetermination( "Oracle", 9, Oracle9iDialect.class, resolver );
|
||||
testDetermination( "Oracle", 10, Oracle10gDialect.class, resolver );
|
||||
|
|
|
@ -0,0 +1,298 @@
|
|||
package org.hibernate.event.service.internal;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.hibernate.event.service.spi.DuplicationStrategy;
|
||||
import org.hibernate.event.service.spi.EventListenerGroup;
|
||||
import org.hibernate.event.service.spi.EventListenerRegistrationException;
|
||||
import org.hibernate.event.spi.ClearEvent;
|
||||
import org.hibernate.event.spi.ClearEventListener;
|
||||
import org.hibernate.event.spi.EventType;
|
||||
|
||||
import org.hibernate.testing.TestForIssue;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* Test that a listener replacing the original one is actually called when the event is fired for each listener.
|
||||
* <p>
|
||||
* Note: I'm using ClearEvent for the tests because it's the simpler one I've found.
|
||||
* </p>
|
||||
*/
|
||||
@TestForIssue(jiraKey = "HHH-13831")
|
||||
public class EventListenerDuplicationStrategyTest {
|
||||
|
||||
@Rule
|
||||
public ExpectedException thrown = ExpectedException.none();
|
||||
|
||||
Tracker tracker = new Tracker();
|
||||
ClearEvent event = new ClearEvent( null );
|
||||
EventListenerGroup<ClearEventListener> listenerGroup = new EventListenerGroupImpl( EventType.CLEAR, null );
|
||||
|
||||
@Test
|
||||
public void testListenersIterator() {
|
||||
listenerGroup.addDuplicationStrategy( ReplaceOriginalStrategy.INSTANCE );
|
||||
listenerGroup.appendListener( new OriginalListener( tracker ) );
|
||||
listenerGroup.listeners().forEach( listener -> listener.onClear( event ) );
|
||||
|
||||
assertThat( tracker.callers ).containsExactly( OriginalListener.class );
|
||||
|
||||
tracker.reset();
|
||||
listenerGroup.appendListener( new ExpectedListener( tracker ) );
|
||||
listenerGroup.listeners().forEach( listener -> listener.onClear( event ) );
|
||||
|
||||
assertThat( tracker.callers ).containsExactly( ExpectedListener.class );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFireLazyEventOnEachListener() {
|
||||
listenerGroup.addDuplicationStrategy( ReplaceOriginalStrategy.INSTANCE );
|
||||
listenerGroup.appendListener( new OriginalListener( tracker ) );
|
||||
listenerGroup.fireLazyEventOnEachListener( () -> event, ClearEventListener::onClear );
|
||||
|
||||
assertThat( tracker.callers ).containsExactly( OriginalListener.class );
|
||||
|
||||
tracker.reset();
|
||||
listenerGroup.appendListener( new ExpectedListener( tracker ) );
|
||||
listenerGroup.fireLazyEventOnEachListener( () -> event, ClearEventListener::onClear );
|
||||
|
||||
assertThat( tracker.callers ).containsExactly( ExpectedListener.class );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFireEventOnEachListener() {
|
||||
listenerGroup.addDuplicationStrategy( ReplaceOriginalStrategy.INSTANCE );
|
||||
listenerGroup.appendListener( new OriginalListener( tracker ) );
|
||||
listenerGroup.fireEventOnEachListener( event, ClearEventListener::onClear );
|
||||
|
||||
assertThat( tracker.callers ).containsExactly( OriginalListener.class );
|
||||
|
||||
tracker.reset();
|
||||
listenerGroup.appendListener( new ExpectedListener( tracker ) );
|
||||
listenerGroup.fireEventOnEachListener( event, ClearEventListener::onClear );
|
||||
|
||||
assertThat( tracker.callers ).containsExactly( ExpectedListener.class );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListenersIteratorWithMultipleListenersAndNoStrategy() {
|
||||
listenerGroup.appendListener( new OriginalListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExpectedListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExtraListener( tracker ) );
|
||||
listenerGroup.listeners().forEach( listener -> listener.onClear( event ) );
|
||||
|
||||
assertThat( tracker.callers ).containsExactly( OriginalListener.class, ExpectedListener.class, ExtraListener.class );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFireLazyEventOnEachListenerWithMultipleListenersAndNoStrategy() {
|
||||
listenerGroup.appendListener( new OriginalListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExpectedListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExtraListener( tracker ) );
|
||||
listenerGroup.fireLazyEventOnEachListener( () -> event, ClearEventListener::onClear );
|
||||
|
||||
assertThat( tracker.callers ).containsExactly( OriginalListener.class, ExpectedListener.class, ExtraListener.class );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFireEventOnEachListenerWithMultipleListenersAndNoStrategy() {
|
||||
listenerGroup.appendListener( new OriginalListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExpectedListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExtraListener( tracker ) );
|
||||
listenerGroup.fireEventOnEachListener( event, ClearEventListener::onClear );
|
||||
|
||||
assertThat( tracker.callers ).containsExactly( OriginalListener.class, ExpectedListener.class, ExtraListener.class );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListenersIteratorWithMultipleListenersAndReplacementStrategy() {
|
||||
listenerGroup.addDuplicationStrategy( ReplaceOriginalStrategy.INSTANCE );
|
||||
listenerGroup.appendListener( new OriginalListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExpectedListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExtraListener( tracker ) );
|
||||
listenerGroup.listeners().forEach( listener -> listener.onClear( event ) );
|
||||
|
||||
assertThat( tracker.callers ).containsExactly( ExpectedListener.class, ExtraListener.class );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFireLazyEventOnEachListenerWithMultipleListenersAndReplacementStrategy() {
|
||||
listenerGroup.addDuplicationStrategy( ReplaceOriginalStrategy.INSTANCE );
|
||||
listenerGroup.appendListener( new OriginalListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExpectedListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExtraListener( tracker ) );
|
||||
listenerGroup.fireLazyEventOnEachListener( () -> event, ClearEventListener::onClear );
|
||||
|
||||
assertThat( tracker.callers ).containsExactly( ExpectedListener.class, ExtraListener.class );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFireEventOnEachListenerWithMultipleListenersAndReplacementStrategy() {
|
||||
listenerGroup.addDuplicationStrategy( ReplaceOriginalStrategy.INSTANCE );
|
||||
listenerGroup.appendListener( new OriginalListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExpectedListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExtraListener( tracker ) );
|
||||
listenerGroup.fireEventOnEachListener( event, ClearEventListener::onClear );
|
||||
|
||||
assertThat( tracker.callers ).containsExactly( ExpectedListener.class, ExtraListener.class );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testListenersIteratorWithMultipleListenersAndKeepOriginalStrategy() {
|
||||
listenerGroup.addDuplicationStrategy( KeepOriginalStrategy.INSTANCE );
|
||||
listenerGroup.appendListener( new OriginalListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExpectedListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExtraListener( tracker ) );
|
||||
listenerGroup.listeners().forEach( listener -> listener.onClear( event ) );
|
||||
|
||||
assertThat( tracker.callers ).containsExactly( OriginalListener.class, ExtraListener.class );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFireLazyEventOnEachListenerWithMultipleListenersAndKeepOriginalStrategy() {
|
||||
listenerGroup.addDuplicationStrategy( KeepOriginalStrategy.INSTANCE );
|
||||
listenerGroup.appendListener( new OriginalListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExpectedListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExtraListener( tracker ) );
|
||||
listenerGroup.fireLazyEventOnEachListener( () -> event, ClearEventListener::onClear );
|
||||
|
||||
assertThat( tracker.callers ).containsExactly( OriginalListener.class, ExtraListener.class );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFireEventOnEachListenerWithMultipleListenersAndKeepOriginalStrategy() {
|
||||
listenerGroup.addDuplicationStrategy( KeepOriginalStrategy.INSTANCE );
|
||||
listenerGroup.appendListener( new OriginalListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExpectedListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExtraListener( tracker ) );
|
||||
listenerGroup.fireEventOnEachListener( event, ClearEventListener::onClear );
|
||||
|
||||
assertThat( tracker.callers ).containsExactly( OriginalListener.class, ExtraListener.class );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testErrorStrategyOnAppend() {
|
||||
thrown.expect( EventListenerRegistrationException.class );
|
||||
thrown.expectMessage( "Duplicate event listener found" );
|
||||
|
||||
listenerGroup.addDuplicationStrategy( ErrorStrategy.INSTANCE );
|
||||
listenerGroup.appendListener( new OriginalListener( tracker ) );
|
||||
listenerGroup.appendListener( new ExpectedListener( tracker ) );
|
||||
}
|
||||
|
||||
/**
|
||||
* Keep track of which listener is called and how many listeners are called.
|
||||
*/
|
||||
private class Tracker {
|
||||
private List<Class<?>> callers = new ArrayList<>();
|
||||
|
||||
public void calledBy(Class<?> caller) {
|
||||
callers.add( caller );
|
||||
}
|
||||
|
||||
public void reset() {
|
||||
callers.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The initial listener for the test
|
||||
*/
|
||||
private static class OriginalListener implements ClearEventListener {
|
||||
private final Tracker tracker;
|
||||
|
||||
public OriginalListener(Tracker tracker) {
|
||||
this.tracker = tracker;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClear(ClearEvent event) {
|
||||
tracker.calledBy( this.getClass() );
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The expected listener to be called if everything goes well
|
||||
*/
|
||||
private static class ExpectedListener implements ClearEventListener {
|
||||
private final Tracker tracker;
|
||||
|
||||
public ExpectedListener(Tracker tracker) {
|
||||
this.tracker = tracker;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClear(ClearEvent event) {
|
||||
tracker.calledBy( this.getClass() );
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* An additional listener to test the case of multiple listeners registered for the same event
|
||||
*/
|
||||
private static class ExtraListener implements ClearEventListener {
|
||||
private final Tracker tracker;
|
||||
|
||||
public ExtraListener(Tracker tracker) {
|
||||
this.tracker = tracker;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClear(ClearEvent event) {
|
||||
tracker.calledBy( this.getClass() );
|
||||
}
|
||||
}
|
||||
|
||||
private static class ReplaceOriginalStrategy implements DuplicationStrategy {
|
||||
|
||||
static final ReplaceOriginalStrategy INSTANCE = new ReplaceOriginalStrategy();
|
||||
|
||||
@Override
|
||||
public boolean areMatch(Object listener, Object original) {
|
||||
// We just want to replace the original listener with the extra so that we can test with multiple listeners
|
||||
return original instanceof OriginalListener && listener instanceof ExpectedListener;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Action getAction() {
|
||||
return Action.REPLACE_ORIGINAL;
|
||||
}
|
||||
}
|
||||
|
||||
private static class KeepOriginalStrategy implements DuplicationStrategy {
|
||||
|
||||
static final KeepOriginalStrategy INSTANCE = new KeepOriginalStrategy();
|
||||
|
||||
@Override
|
||||
public boolean areMatch(Object listener, Object original) {
|
||||
// We just want this to work for original and expected listener
|
||||
return original instanceof OriginalListener && listener instanceof ExpectedListener;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Action getAction() {
|
||||
return Action.KEEP_ORIGINAL;
|
||||
}
|
||||
}
|
||||
|
||||
private static class ErrorStrategy implements DuplicationStrategy {
|
||||
|
||||
static final ErrorStrategy INSTANCE = new ErrorStrategy();
|
||||
|
||||
@Override
|
||||
public boolean areMatch(Object listener, Object original) {
|
||||
// We just want this to work for original and expected listener
|
||||
return original instanceof OriginalListener && listener instanceof ExpectedListener;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Action getAction() {
|
||||
return Action.ERROR;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -50,7 +50,7 @@ public class EntityGraphFunctionalTests extends BaseEntityManagerFunctionalTestC
|
|||
final Issue issue = session.find(
|
||||
Issue.class,
|
||||
1,
|
||||
Collections.singletonMap( GraphSemantic.FETCH.getJpaHintName(), graph )
|
||||
Collections.singletonMap( GraphSemantic.LOAD.getJpaHintName(), graph )
|
||||
);
|
||||
|
||||
assertTrue( Hibernate.isInitialized( issue ) );
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
package org.hibernate.internal;
|
||||
|
||||
import org.hibernate.FlushMode;
|
||||
import org.hibernate.Session;
|
||||
import org.hibernate.SessionFactory;
|
||||
import org.hibernate.boot.MetadataSources;
|
||||
import org.hibernate.boot.registry.StandardServiceRegistryBuilder;
|
||||
import org.hibernate.boot.registry.internal.StandardServiceRegistryImpl;
|
||||
import org.hibernate.cfg.AvailableSettings;
|
||||
import org.hibernate.testing.TestForIssue;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
import org.junit.runners.Parameterized.Parameter;
|
||||
import org.junit.runners.Parameterized.Parameters;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
/**
|
||||
* @author Nathan Xu
|
||||
*/
|
||||
@TestForIssue( jiraKey = "HHH-13677" )
|
||||
@RunWith( Parameterized.class )
|
||||
public class FlushModeConfigTest {
|
||||
|
||||
@Parameters
|
||||
public static FlushMode[] parameters() {
|
||||
return FlushMode.values();
|
||||
}
|
||||
|
||||
@Parameter
|
||||
public FlushMode flushMode;
|
||||
|
||||
private StandardServiceRegistryImpl serviceRegistry;
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
serviceRegistry = (StandardServiceRegistryImpl) new StandardServiceRegistryBuilder()
|
||||
.applySetting( AvailableSettings.FLUSH_MODE, flushMode.name() )
|
||||
.build();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFlushModeSettingTakingEffect() {
|
||||
try ( final SessionFactory sessionFactory = new MetadataSources(serviceRegistry).buildMetadata().buildSessionFactory() ) {
|
||||
try ( final Session session = sessionFactory.openSession() ) {
|
||||
assertEquals( flushMode, session.getHibernateFlushMode() );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() {
|
||||
serviceRegistry.destroy();
|
||||
}
|
||||
|
||||
}
|
|
@ -76,7 +76,6 @@ public class PluralAttributeExpressionsTest extends AbstractMetamodelSpecificTes
|
|||
|
||||
@Test
|
||||
@TestForIssue( jiraKey = "HHH-11225" )
|
||||
@FailureExpected( jiraKey = "HHH-6686")
|
||||
public void testElementMapIsEmptyHql() {
|
||||
doInJPA( this::entityManagerFactory, entityManager -> {
|
||||
entityManager.createQuery( "select m from MapEntity m where m.localized is empty" ).getResultList();
|
||||
|
@ -85,7 +84,6 @@ public class PluralAttributeExpressionsTest extends AbstractMetamodelSpecificTes
|
|||
|
||||
@Test
|
||||
@TestForIssue( jiraKey = "HHH-11225" )
|
||||
@FailureExpected( jiraKey = "HHH-6686")
|
||||
public void testElementMapIsEmptyCriteria() {
|
||||
doInJPA( this::entityManagerFactory, entityManager -> {
|
||||
final HibernateCriteriaBuilder cb = (HibernateCriteriaBuilder) entityManager.getCriteriaBuilder();
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.jpa.test.graphs;
|
||||
|
||||
import org.hibernate.annotations.FetchMode;
|
||||
import org.hibernate.annotations.FetchProfile;
|
||||
|
||||
import javax.persistence.*;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
|
||||
/**
|
||||
* @author Nathan Xu
|
||||
*/
|
||||
@Entity
|
||||
@FetchProfile(
|
||||
name = "company.location",
|
||||
fetchOverrides = {
|
||||
@FetchProfile.FetchOverride(
|
||||
entity = CompanyWithFetchProfile.class,
|
||||
association = "location",
|
||||
mode = FetchMode.JOIN
|
||||
)
|
||||
}
|
||||
)
|
||||
public class CompanyWithFetchProfile {
|
||||
@Id @GeneratedValue
|
||||
public long id;
|
||||
|
||||
@OneToMany
|
||||
public Set<Employee> employees = new HashSet<Employee>();
|
||||
|
||||
@OneToOne(fetch = FetchType.LAZY)
|
||||
public Location location;
|
||||
|
||||
@ElementCollection
|
||||
public Set<Market> markets = new HashSet<Market>();
|
||||
|
||||
@ElementCollection(fetch = FetchType.EAGER)
|
||||
public Set<String> phoneNumbers = new HashSet<String>();
|
||||
|
||||
public Location getLocation() {
|
||||
return location;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,213 @@
|
|||
package org.hibernate.jpa.test.graphs.mapped_by_id;
|
||||
|
||||
import org.hibernate.Hibernate;
|
||||
import org.hibernate.Session;
|
||||
import org.hibernate.jpa.test.BaseEntityManagerFunctionalTestCase;
|
||||
import org.hibernate.jpa.test.graphs.*;
|
||||
import org.hibernate.testing.TestForIssue;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import javax.persistence.EntityGraph;
|
||||
import javax.persistence.EntityManager;
|
||||
import javax.persistence.Subgraph;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
* @author Nathan Xu
|
||||
*/
|
||||
public class FetchGraphFindByIdTest extends BaseEntityManagerFunctionalTestCase {
|
||||
|
||||
private long companyId;
|
||||
|
||||
private long companyWithFetchProfileId;
|
||||
|
||||
@Test
|
||||
@TestForIssue(jiraKey = "HHH-8776")
|
||||
public void testFetchGraphByFind() {
|
||||
EntityManager entityManager = getOrCreateEntityManager();
|
||||
entityManager.getTransaction().begin();
|
||||
|
||||
EntityGraph<Company> entityGraph = entityManager.createEntityGraph( Company.class );
|
||||
entityGraph.addAttributeNodes( "location" );
|
||||
entityGraph.addAttributeNodes( "markets" );
|
||||
|
||||
Map<String, Object> properties = Collections.singletonMap( "javax.persistence.fetchgraph", entityGraph );
|
||||
|
||||
Company company = entityManager.find( Company.class, companyId, properties );
|
||||
|
||||
entityManager.getTransaction().commit();
|
||||
entityManager.close();
|
||||
|
||||
assertFalse( Hibernate.isInitialized( company.employees ) );
|
||||
assertTrue( Hibernate.isInitialized( company.location ) );
|
||||
assertTrue( Hibernate.isInitialized( company.markets ) );
|
||||
// With "fetchgraph", non-specified attributes effect 'lazy' mode. So, here,
|
||||
// @ElementCollection(fetch = FetchType.EAGER) should not be initialized.
|
||||
assertFalse( Hibernate.isInitialized( company.phoneNumbers ) );
|
||||
|
||||
entityManager = getOrCreateEntityManager();
|
||||
entityManager.getTransaction().begin();
|
||||
|
||||
Subgraph<Employee> subgraph = entityGraph.addSubgraph( "employees" );
|
||||
subgraph.addAttributeNodes( "managers" );
|
||||
subgraph.addAttributeNodes( "friends" );
|
||||
Subgraph<Manager> subSubgraph = subgraph.addSubgraph( "managers", Manager.class );
|
||||
subSubgraph.addAttributeNodes( "managers" );
|
||||
subSubgraph.addAttributeNodes( "friends" );
|
||||
|
||||
company = entityManager.find( Company.class, companyId, properties );
|
||||
|
||||
entityManager.getTransaction().commit();
|
||||
entityManager.close();
|
||||
|
||||
assertTrue( Hibernate.isInitialized( company.employees ) );
|
||||
assertTrue( Hibernate.isInitialized( company.location ) );
|
||||
assertEquals( 12345, company.location.zip );
|
||||
assertTrue( Hibernate.isInitialized( company.markets ) );
|
||||
// With "fetchgraph", non-specified attributes effect 'lazy' mode. So, here,
|
||||
// @ElementCollection(fetch = FetchType.EAGER) should not be initialized.
|
||||
assertFalse( Hibernate.isInitialized( company.phoneNumbers ) );
|
||||
|
||||
boolean foundManager = false;
|
||||
Iterator<Employee> employeeItr = company.employees.iterator();
|
||||
while (employeeItr.hasNext()) {
|
||||
Employee employee = employeeItr.next();
|
||||
assertTrue( Hibernate.isInitialized( employee.managers ) );
|
||||
assertTrue( Hibernate.isInitialized( employee.friends ) );
|
||||
// test 1 more level
|
||||
Iterator<Manager> managerItr = employee.managers.iterator();
|
||||
while (managerItr.hasNext()) {
|
||||
foundManager = true;
|
||||
Manager manager = managerItr.next();
|
||||
assertTrue( Hibernate.isInitialized( manager.managers ) );
|
||||
assertTrue( Hibernate.isInitialized( manager.friends ) );
|
||||
}
|
||||
}
|
||||
assertTrue(foundManager);
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestForIssue(jiraKey = "HHH-8776")
|
||||
public void testFetchGraphByFindTakingPrecedenceOverFetchProfile() {
|
||||
EntityManager entityManager = getOrCreateEntityManager();
|
||||
entityManager.getTransaction().begin();
|
||||
|
||||
entityManager.unwrap( Session.class ).enableFetchProfile("company.location");
|
||||
|
||||
EntityGraph<CompanyWithFetchProfile> entityGraph = entityManager.createEntityGraph( CompanyWithFetchProfile.class );
|
||||
entityGraph.addAttributeNodes( "markets" );
|
||||
|
||||
Map<String, Object> properties = Collections.singletonMap( "javax.persistence.fetchgraph", entityGraph );
|
||||
|
||||
CompanyWithFetchProfile company = entityManager.find( CompanyWithFetchProfile.class, companyWithFetchProfileId, properties );
|
||||
|
||||
entityManager.getTransaction().commit();
|
||||
entityManager.close();
|
||||
|
||||
assertFalse( Hibernate.isInitialized( company.employees ) );
|
||||
assertFalse( Hibernate.isInitialized( company.location ) ); // should be initialized if 'company.location' fetch profile takes effect
|
||||
assertTrue( Hibernate.isInitialized( company.markets ) );
|
||||
// With "fetchgraph", non-specified attributes effect 'lazy' mode. So, here,
|
||||
// @ElementCollection(fetch = FetchType.EAGER) should not be initialized.
|
||||
assertFalse( Hibernate.isInitialized( company.phoneNumbers ) );
|
||||
|
||||
entityManager = getOrCreateEntityManager();
|
||||
entityManager.getTransaction().begin();
|
||||
|
||||
Subgraph<Employee> subgraph = entityGraph.addSubgraph( "employees" );
|
||||
subgraph.addAttributeNodes( "managers" );
|
||||
subgraph.addAttributeNodes( "friends" );
|
||||
Subgraph<Manager> subSubgraph = subgraph.addSubgraph( "managers", Manager.class );
|
||||
subSubgraph.addAttributeNodes( "managers" );
|
||||
subSubgraph.addAttributeNodes( "friends" );
|
||||
|
||||
company = entityManager.find( CompanyWithFetchProfile.class, companyWithFetchProfileId, properties );
|
||||
|
||||
entityManager.getTransaction().commit();
|
||||
entityManager.close();
|
||||
|
||||
assertTrue( Hibernate.isInitialized( company.employees ) );
|
||||
assertFalse( Hibernate.isInitialized( company.location ) ); // should be initialized if 'company.location' fetch profile takes effect
|
||||
assertTrue( Hibernate.isInitialized( company.markets ) );
|
||||
// With "fetchgraph", non-specified attributes effect 'lazy' mode. So, here,
|
||||
// @ElementCollection(fetch = FetchType.EAGER) should not be initialized.
|
||||
assertFalse( Hibernate.isInitialized( company.phoneNumbers ) );
|
||||
|
||||
boolean foundManager = false;
|
||||
Iterator<Employee> employeeItr = company.employees.iterator();
|
||||
while (employeeItr.hasNext()) {
|
||||
Employee employee = employeeItr.next();
|
||||
assertTrue( Hibernate.isInitialized( employee.managers ) );
|
||||
assertTrue( Hibernate.isInitialized( employee.friends ) );
|
||||
// test 1 more level
|
||||
Iterator<Manager> managerItr = employee.managers.iterator();
|
||||
while (managerItr.hasNext()) {
|
||||
foundManager = true;
|
||||
Manager manager = managerItr.next();
|
||||
assertTrue( Hibernate.isInitialized( manager.managers ) );
|
||||
assertTrue( Hibernate.isInitialized( manager.friends ) );
|
||||
}
|
||||
}
|
||||
assertTrue(foundManager);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void createData() {
|
||||
EntityManager entityManager = getOrCreateEntityManager();
|
||||
entityManager.getTransaction().begin();
|
||||
|
||||
Manager manager1 = new Manager();
|
||||
entityManager.persist( manager1 );
|
||||
|
||||
Manager manager2 = new Manager();
|
||||
manager2.managers.add( manager1 );
|
||||
entityManager.persist( manager2 );
|
||||
|
||||
Employee employee = new Employee();
|
||||
employee.managers.add( manager1 );
|
||||
entityManager.persist( employee );
|
||||
|
||||
Location location = new Location();
|
||||
location.address = "123 somewhere";
|
||||
location.zip = 12345;
|
||||
entityManager.persist( location );
|
||||
|
||||
Company company = new Company();
|
||||
company.employees.add( employee );
|
||||
company.employees.add( manager1 );
|
||||
company.employees.add( manager2 );
|
||||
company.location = location;
|
||||
company.markets.add( Market.SERVICES );
|
||||
company.markets.add( Market.TECHNOLOGY );
|
||||
company.phoneNumbers.add( "012-345-6789" );
|
||||
company.phoneNumbers.add( "987-654-3210" );
|
||||
entityManager.persist( company );
|
||||
companyId = company.id;
|
||||
|
||||
CompanyWithFetchProfile companyWithFetchProfile = new CompanyWithFetchProfile();
|
||||
companyWithFetchProfile.employees.add( employee );
|
||||
companyWithFetchProfile.employees.add( manager1 );
|
||||
companyWithFetchProfile.employees.add( manager2 );
|
||||
companyWithFetchProfile.location = location;
|
||||
companyWithFetchProfile.markets.add( Market.SERVICES );
|
||||
companyWithFetchProfile.markets.add( Market.TECHNOLOGY );
|
||||
companyWithFetchProfile.phoneNumbers.add( "012-345-6789" );
|
||||
companyWithFetchProfile.phoneNumbers.add( "987-654-3210" );
|
||||
entityManager.persist( companyWithFetchProfile );
|
||||
companyWithFetchProfileId = companyWithFetchProfile.id;
|
||||
|
||||
entityManager.getTransaction().commit();
|
||||
entityManager.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Class<?>[] getAnnotatedClasses() {
|
||||
return new Class<?>[] { Company.class, CompanyWithFetchProfile.class, Employee.class, Manager.class, Location.class, Course.class, Student.class };
|
||||
}
|
||||
|
||||
}
|
|
@ -12,7 +12,6 @@ import javax.persistence.Entity;
|
|||
import javax.persistence.EntityGraph;
|
||||
import javax.persistence.EntityManager;
|
||||
import javax.persistence.FetchType;
|
||||
import javax.persistence.GeneratedValue;
|
||||
import javax.persistence.Id;
|
||||
import javax.persistence.MapsId;
|
||||
import javax.persistence.OneToOne;
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
*/
|
||||
package org.hibernate.jpa.test.graphs.queryhint;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
@ -17,15 +16,10 @@ import javax.persistence.Query;
|
|||
import javax.persistence.Subgraph;
|
||||
|
||||
import org.hibernate.Hibernate;
|
||||
import org.hibernate.Session;
|
||||
import org.hibernate.jpa.QueryHints;
|
||||
import org.hibernate.jpa.test.BaseEntityManagerFunctionalTestCase;
|
||||
import org.hibernate.jpa.test.graphs.Company;
|
||||
import org.hibernate.jpa.test.graphs.Course;
|
||||
import org.hibernate.jpa.test.graphs.Employee;
|
||||
import org.hibernate.jpa.test.graphs.Location;
|
||||
import org.hibernate.jpa.test.graphs.Manager;
|
||||
import org.hibernate.jpa.test.graphs.Market;
|
||||
import org.hibernate.jpa.test.graphs.Student;
|
||||
import org.hibernate.jpa.test.graphs.*;
|
||||
|
||||
import org.hibernate.testing.TestForIssue;
|
||||
import org.junit.Before;
|
||||
|
@ -34,18 +28,14 @@ import org.junit.Test;
|
|||
import static org.hibernate.testing.transaction.TransactionUtil.doInJPA;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertSame;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* @author Brett Meyer
|
||||
* @author Nathan Xu
|
||||
*/
|
||||
public class QueryHintEntityGraphTest extends BaseEntityManagerFunctionalTestCase {
|
||||
|
||||
// TODO: Currently, "loadgraph" and "fetchgraph" operate identically in JPQL. The spec states that "fetchgraph"
|
||||
// shall use LAZY for non-specified attributes, ignoring their metadata. Changes to ToOne select vs. join,
|
||||
// allowing queries to force laziness, etc. will require changes here and impl logic.
|
||||
|
||||
@Test
|
||||
public void testLoadGraph() {
|
||||
EntityManager entityManager = getOrCreateEntityManager();
|
||||
|
@ -111,6 +101,138 @@ public class QueryHintEntityGraphTest extends BaseEntityManagerFunctionalTestCas
|
|||
assertTrue(foundManager);
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestForIssue(jiraKey = "HHH-8776")
|
||||
public void testFetchGraph() {
|
||||
EntityManager entityManager = getOrCreateEntityManager();
|
||||
entityManager.getTransaction().begin();
|
||||
|
||||
EntityGraph<Company> entityGraph = entityManager.createEntityGraph( Company.class );
|
||||
entityGraph.addAttributeNodes( "location" );
|
||||
entityGraph.addAttributeNodes( "markets" );
|
||||
Query query = entityManager.createQuery( "from " + Company.class.getName() );
|
||||
query.setHint( QueryHints.HINT_FETCHGRAPH, entityGraph );
|
||||
Company company = (Company) query.getSingleResult();
|
||||
|
||||
entityManager.getTransaction().commit();
|
||||
entityManager.close();
|
||||
|
||||
assertFalse( Hibernate.isInitialized( company.employees ) );
|
||||
assertTrue( Hibernate.isInitialized( company.location ) );
|
||||
assertTrue( Hibernate.isInitialized( company.markets ) );
|
||||
// With "fetchgraph", non-specified attributes effect 'lazy' mode. So, here,
|
||||
// @ElementCollection(fetch = FetchType.EAGER) should not be initialized.
|
||||
assertFalse( Hibernate.isInitialized( company.phoneNumbers ) );
|
||||
|
||||
entityManager = getOrCreateEntityManager();
|
||||
entityManager.getTransaction().begin();
|
||||
|
||||
Subgraph<Employee> subgraph = entityGraph.addSubgraph( "employees" );
|
||||
subgraph.addAttributeNodes( "managers" );
|
||||
subgraph.addAttributeNodes( "friends" );
|
||||
Subgraph<Manager> subSubgraph = subgraph.addSubgraph( "managers", Manager.class );
|
||||
subSubgraph.addAttributeNodes( "managers" );
|
||||
subSubgraph.addAttributeNodes( "friends" );
|
||||
|
||||
query = entityManager.createQuery( "from " + Company.class.getName() );
|
||||
query.setHint( QueryHints.HINT_FETCHGRAPH, entityGraph );
|
||||
company = (Company) query.getSingleResult();
|
||||
|
||||
entityManager.getTransaction().commit();
|
||||
entityManager.close();
|
||||
|
||||
assertTrue( Hibernate.isInitialized( company.employees ) );
|
||||
assertTrue( Hibernate.isInitialized( company.location ) );
|
||||
assertEquals( 12345, company.location.zip );
|
||||
assertTrue( Hibernate.isInitialized( company.markets ) );
|
||||
// With "fetchgraph", non-specified attributes effect 'lazy' mode. So, here,
|
||||
// @ElementCollection(fetch = FetchType.EAGER) should not be initialized.
|
||||
assertFalse( Hibernate.isInitialized( company.phoneNumbers ) );
|
||||
|
||||
boolean foundManager = false;
|
||||
Iterator<Employee> employeeItr = company.employees.iterator();
|
||||
while (employeeItr.hasNext()) {
|
||||
Employee employee = employeeItr.next();
|
||||
assertTrue( Hibernate.isInitialized( employee.managers ) );
|
||||
assertTrue( Hibernate.isInitialized( employee.friends ) );
|
||||
// test 1 more level
|
||||
Iterator<Manager> managerItr = employee.managers.iterator();
|
||||
while (managerItr.hasNext()) {
|
||||
foundManager = true;
|
||||
Manager manager = managerItr.next();
|
||||
assertTrue( Hibernate.isInitialized( manager.managers ) );
|
||||
assertTrue( Hibernate.isInitialized( manager.friends ) );
|
||||
}
|
||||
}
|
||||
assertTrue(foundManager);
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestForIssue(jiraKey = "HHH-8776")
|
||||
public void testFetchGraphTakingPrecedenceOverFetchProfile() {
|
||||
EntityManager entityManager = getOrCreateEntityManager();
|
||||
entityManager.getTransaction().begin();
|
||||
|
||||
entityManager.unwrap( Session.class ).enableFetchProfile( "company.location" );
|
||||
|
||||
EntityGraph<CompanyWithFetchProfile> entityGraph = entityManager.createEntityGraph( CompanyWithFetchProfile.class );
|
||||
entityGraph.addAttributeNodes( "markets" );
|
||||
Query query = entityManager.createQuery( "from " + CompanyWithFetchProfile.class.getName() );
|
||||
query.setHint( QueryHints.HINT_FETCHGRAPH, entityGraph );
|
||||
CompanyWithFetchProfile company = (CompanyWithFetchProfile) query.getSingleResult();
|
||||
|
||||
entityManager.getTransaction().commit();
|
||||
entityManager.close();
|
||||
|
||||
assertFalse( Hibernate.isInitialized( company.employees ) );
|
||||
assertFalse( Hibernate.isInitialized( company.location ) ); // should be initialized if 'company.location' fetch profile takes effect
|
||||
assertTrue( Hibernate.isInitialized( company.markets ) );
|
||||
// With "fetchgraph", non-specified attributes effect 'lazy' mode. So, here,
|
||||
// @ElementCollection(fetch = FetchType.EAGER) should not be initialized.
|
||||
assertFalse( Hibernate.isInitialized( company.phoneNumbers ) );
|
||||
|
||||
entityManager = getOrCreateEntityManager();
|
||||
entityManager.getTransaction().begin();
|
||||
|
||||
Subgraph<Employee> subgraph = entityGraph.addSubgraph( "employees" );
|
||||
subgraph.addAttributeNodes( "managers" );
|
||||
subgraph.addAttributeNodes( "friends" );
|
||||
Subgraph<Manager> subSubgraph = subgraph.addSubgraph( "managers", Manager.class );
|
||||
subSubgraph.addAttributeNodes( "managers" );
|
||||
subSubgraph.addAttributeNodes( "friends" );
|
||||
|
||||
query = entityManager.createQuery( "from " + CompanyWithFetchProfile.class.getName() );
|
||||
query.setHint( QueryHints.HINT_FETCHGRAPH, entityGraph );
|
||||
company = (CompanyWithFetchProfile) query.getSingleResult();
|
||||
|
||||
entityManager.getTransaction().commit();
|
||||
entityManager.close();
|
||||
|
||||
assertTrue( Hibernate.isInitialized( company.employees ) );
|
||||
assertFalse( Hibernate.isInitialized( company.location ) ); // should be initialized if 'company.location' fetch profile takes effect
|
||||
assertTrue( Hibernate.isInitialized( company.markets ) );
|
||||
// With "fetchgraph", non-specified attributes effect 'lazy' mode. So, here,
|
||||
// @ElementCollection(fetch = FetchType.EAGER) should not be initialized.
|
||||
assertFalse( Hibernate.isInitialized( company.phoneNumbers ) );
|
||||
|
||||
boolean foundManager = false;
|
||||
Iterator<Employee> employeeItr = company.employees.iterator();
|
||||
while (employeeItr.hasNext()) {
|
||||
Employee employee = employeeItr.next();
|
||||
assertTrue( Hibernate.isInitialized( employee.managers ) );
|
||||
assertTrue( Hibernate.isInitialized( employee.friends ) );
|
||||
// test 1 more level
|
||||
Iterator<Manager> managerItr = employee.managers.iterator();
|
||||
while (managerItr.hasNext()) {
|
||||
foundManager = true;
|
||||
Manager manager = managerItr.next();
|
||||
assertTrue( Hibernate.isInitialized( manager.managers ) );
|
||||
assertTrue( Hibernate.isInitialized( manager.friends ) );
|
||||
}
|
||||
}
|
||||
assertTrue(foundManager);
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestForIssue( jiraKey = "HHH-9457")
|
||||
public void testLoadGraphOrderByWithImplicitJoin() {
|
||||
|
@ -385,6 +507,17 @@ public class QueryHintEntityGraphTest extends BaseEntityManagerFunctionalTestCas
|
|||
company.phoneNumbers.add( "012-345-6789" );
|
||||
company.phoneNumbers.add( "987-654-3210" );
|
||||
entityManager.persist( company );
|
||||
|
||||
CompanyWithFetchProfile companyWithFetchProfile = new CompanyWithFetchProfile();
|
||||
companyWithFetchProfile.employees.add( employee );
|
||||
companyWithFetchProfile.employees.add( manager1 );
|
||||
companyWithFetchProfile.employees.add( manager2 );
|
||||
companyWithFetchProfile.location = location;
|
||||
companyWithFetchProfile.markets.add( Market.SERVICES );
|
||||
companyWithFetchProfile.markets.add( Market.TECHNOLOGY );
|
||||
companyWithFetchProfile.phoneNumbers.add( "012-345-6789" );
|
||||
companyWithFetchProfile.phoneNumbers.add( "987-654-3210" );
|
||||
entityManager.persist( companyWithFetchProfile );
|
||||
|
||||
entityManager.getTransaction().commit();
|
||||
entityManager.close();
|
||||
|
@ -392,6 +525,6 @@ public class QueryHintEntityGraphTest extends BaseEntityManagerFunctionalTestCas
|
|||
|
||||
@Override
|
||||
protected Class<?>[] getAnnotatedClasses() {
|
||||
return new Class<?>[] { Company.class, Employee.class, Manager.class, Location.class, Course.class, Student.class };
|
||||
return new Class<?>[] { Company.class, CompanyWithFetchProfile.class, Employee.class, Manager.class, Location.class, Course.class, Student.class };
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,188 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.jpa.test.jointable;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Collections;
|
||||
import java.util.Objects;
|
||||
import javax.persistence.Embeddable;
|
||||
import javax.persistence.EmbeddedId;
|
||||
import javax.persistence.Entity;
|
||||
import javax.persistence.Inheritance;
|
||||
import javax.persistence.InheritanceType;
|
||||
import javax.persistence.JoinColumn;
|
||||
import javax.persistence.JoinColumns;
|
||||
import javax.persistence.ManyToOne;
|
||||
import javax.persistence.NamedQuery;
|
||||
import javax.persistence.SecondaryTable;
|
||||
|
||||
import org.hibernate.cfg.Configuration;
|
||||
import org.hibernate.engine.query.spi.HQLQueryPlan;
|
||||
import org.hibernate.hql.spi.QueryTranslator;
|
||||
|
||||
import org.hibernate.testing.junit4.BaseCoreFunctionalTestCase;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.hibernate.testing.transaction.TransactionUtil.doInHibernate;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
|
||||
/**
|
||||
* @author Christian Beikov
|
||||
*/
|
||||
public class ManyToOneJoinTableTest extends BaseCoreFunctionalTestCase {
|
||||
@Override
|
||||
protected Class<?>[] getAnnotatedClasses() {
|
||||
return new Class[] {
|
||||
Person.class,
|
||||
Address.class,
|
||||
ResourceImpl.class,
|
||||
IssuerImpl.class
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure(Configuration configuration) {
|
||||
super.configure( configuration );
|
||||
// configuration.setProperty(AvailableSettings.OMIT_JOIN_OF_SUPERCLASS_TABLES, Boolean.FALSE.toString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAvoidJoin() {
|
||||
final HQLQueryPlan plan = sessionFactory().getQueryPlanCache().getHQLQueryPlan(
|
||||
"SELECT e.id FROM Person e",
|
||||
false,
|
||||
Collections.EMPTY_MAP
|
||||
);
|
||||
assertEquals( 1, plan.getTranslators().length );
|
||||
final QueryTranslator translator = plan.getTranslators()[0];
|
||||
final String generatedSql = translator.getSQLString();
|
||||
// Ideally, we could detect that *ToOne join tables aren't used, but that requires tracking the uses of properties
|
||||
// Since *ToOne join tables are treated like secondary or subclass/superclass tables, the proper fix will allow many more optimizations
|
||||
assertFalse( generatedSql.contains( "join" ) );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRegression() {
|
||||
doInHibernate( this::sessionFactory, session -> {
|
||||
session.createNamedQuery( IssuerImpl.SELECT_RESOURCES_BY_ISSUER )
|
||||
.setParameter( "issuer", session.getReference( IssuerImpl.class, new Identifier( 1l, "ABC" ) ) )
|
||||
.getResultList();
|
||||
} );
|
||||
}
|
||||
|
||||
public interface Issuer extends Resource {
|
||||
}
|
||||
|
||||
@Entity(name = IssuerImpl.ENTITY_NAME)
|
||||
@SecondaryTable(name = IssuerImpl.TABLE_NAME)
|
||||
@NamedQuery(name = IssuerImpl.SELECT_RESOURCES_BY_ISSUER, query = "SELECT resource.identifier FROM " + ResourceImpl.ENTITY_NAME + " resource WHERE resource.identifier.issuer IN (SELECT issuer.identifier.issuer FROM " + IssuerImpl.ENTITY_NAME + " issuer WHERE issuer.parentIssuer = :issuer OR issuer = :issuer)")
|
||||
public static class IssuerImpl extends ResourceImpl implements Issuer {
|
||||
|
||||
private static final String SELECT_RESOURCES_BY_ISSUER = "SELECT_RESOURCES_BY_ISSUER";
|
||||
private static final String ENTITY_NAME = "Issuer";
|
||||
public static final String PARENT_ISSUER_COLUMN = "parent_issuer";
|
||||
public static final String PARENT_IDENTIFIER_COLUMN = "parent_identifier";
|
||||
public static final String TABLE_NAME = "issuer_impl";
|
||||
|
||||
@ManyToOne(targetEntity = IssuerImpl.class)
|
||||
@JoinColumns({
|
||||
@JoinColumn(name = PARENT_ISSUER_COLUMN, table = TABLE_NAME, referencedColumnName = "issuer"),
|
||||
@JoinColumn(name = PARENT_IDENTIFIER_COLUMN, table = TABLE_NAME, referencedColumnName = "identifier")
|
||||
})
|
||||
private Issuer parentIssuer;
|
||||
|
||||
public Identifier getIdentifier() {
|
||||
return identifier;
|
||||
}
|
||||
|
||||
public void setIdentifier(Identifier identifier) {
|
||||
this.identifier = identifier;
|
||||
}
|
||||
|
||||
public Issuer getParentIssuer() {
|
||||
return parentIssuer;
|
||||
}
|
||||
|
||||
public void setParentIssuer(Issuer parentIssuer) {
|
||||
this.parentIssuer = parentIssuer;
|
||||
}
|
||||
}
|
||||
|
||||
@Embeddable
|
||||
public static class Identifier implements Serializable {
|
||||
Long issuer;
|
||||
String identifier;
|
||||
|
||||
public Long getIssuer() {
|
||||
return issuer;
|
||||
}
|
||||
|
||||
public void setIssuer(Long issuer) {
|
||||
this.issuer = issuer;
|
||||
}
|
||||
|
||||
public String getIdentifier() {
|
||||
return identifier;
|
||||
}
|
||||
|
||||
public void setIdentifier(String identifier) {
|
||||
this.identifier = identifier;
|
||||
}
|
||||
|
||||
public Identifier() {
|
||||
|
||||
}
|
||||
|
||||
public Identifier(Long issuer, String identifier) {
|
||||
this.issuer = issuer;
|
||||
this.identifier = identifier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if ( this == o ) {
|
||||
return true;
|
||||
}
|
||||
if ( o == null || getClass() != o.getClass() ) {
|
||||
return false;
|
||||
}
|
||||
Identifier that = (Identifier) o;
|
||||
return Objects.equals( issuer, that.issuer ) &&
|
||||
Objects.equals( identifier, that.identifier );
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash( issuer, identifier );
|
||||
}
|
||||
}
|
||||
|
||||
public interface Resource {
|
||||
|
||||
}
|
||||
|
||||
@Entity(name = ResourceImpl.ENTITY_NAME)
|
||||
@Inheritance(strategy = InheritanceType.SINGLE_TABLE)
|
||||
public static class ResourceImpl implements Resource {
|
||||
|
||||
private static final String ENTITY_NAME = "TestResource";
|
||||
|
||||
@EmbeddedId
|
||||
Identifier identifier;
|
||||
|
||||
public Identifier getIdentifier() {
|
||||
return identifier;
|
||||
}
|
||||
|
||||
public void setIdentifier(Identifier identifier) {
|
||||
this.identifier = identifier;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
package org.hibernate.query;
|
||||
|
||||
import org.hibernate.jpa.test.BaseEntityManagerFunctionalTestCase;
|
||||
import org.hibernate.testing.TestForIssue;
|
||||
import org.junit.Test;
|
||||
|
||||
import javax.persistence.*;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.hibernate.testing.transaction.TransactionUtil.doInJPA;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
/**
|
||||
* @author Nathan Xu
|
||||
*/
|
||||
@TestForIssue(jiraKey = "HHH-6686")
|
||||
public class IsEmptyJQLTest extends BaseEntityManagerFunctionalTestCase {
|
||||
|
||||
private Long personWithoutNicknameId = 1L;
|
||||
private Long personaWithSingleNicknameId = 2L;
|
||||
private Long personWithMultipleNicknamesId = 3L;
|
||||
|
||||
@Override
|
||||
public Class<?>[] getAnnotatedClasses() {
|
||||
return new Class[] { Person.class };
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testJQLContainingEmpty() {
|
||||
List<Person> personWithNicknames = doInJPA( this::entityManagerFactory, entityManager -> {
|
||||
return entityManager.createQuery(
|
||||
"select p from Person p where p.nicknames is not empty", Person.class )
|
||||
.getResultList();
|
||||
});
|
||||
|
||||
assertEquals( new HashSet<>( Arrays.asList(personaWithSingleNicknameId, personWithMultipleNicknamesId)),
|
||||
personWithNicknames.stream().map( Person::getId ).collect( Collectors.toSet() ));
|
||||
|
||||
List<Person> personWithOutNickname = doInJPA( this::entityManagerFactory, entityManager -> {
|
||||
return entityManager.createQuery(
|
||||
"select p from Person p where p.nicknames is empty", Person.class )
|
||||
.getResultList();
|
||||
});
|
||||
|
||||
assertEquals( Collections.singleton(personWithoutNicknameId),
|
||||
personWithOutNickname.stream().map( Person::getId ).collect( Collectors.toSet() ));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void afterEntityManagerFactoryBuilt() {
|
||||
doInJPA( this::entityManagerFactory, entityManager -> {
|
||||
Person personaWithoutNickname = new Person();
|
||||
personaWithoutNickname.setId(personWithoutNicknameId);
|
||||
|
||||
Person personaWithSingleNickname = new Person();
|
||||
personaWithSingleNickname.getNicknames().add( "nickname" );
|
||||
personaWithSingleNickname.setId(personaWithSingleNicknameId);
|
||||
|
||||
Person personWithMultipleNicknames = new Person();
|
||||
personWithMultipleNicknames.getNicknames().addAll( Arrays.asList( "nickName1", "nickName2" ) );
|
||||
personWithMultipleNicknames.setId(personWithMultipleNicknamesId);
|
||||
|
||||
entityManager.persist( personaWithoutNickname );
|
||||
entityManager.persist( personaWithSingleNickname );
|
||||
entityManager.persist( personWithMultipleNicknames );
|
||||
} );
|
||||
}
|
||||
|
||||
@Entity(name = "Person")
|
||||
public static class Person {
|
||||
|
||||
@Id
|
||||
private Long id;
|
||||
|
||||
@ElementCollection
|
||||
private List<String> nicknames = new ArrayList<>();
|
||||
|
||||
public Long getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Long id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public List<String> getNicknames() {
|
||||
return nicknames;
|
||||
}
|
||||
|
||||
public void setNicknames(List<String> nicknames) {
|
||||
this.nicknames = nicknames;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,84 @@
|
|||
package org.hibernate.test.dialect.functional;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
|
||||
import org.hibernate.cfg.Environment;
|
||||
import org.hibernate.dialect.MariaDB103Dialect;
|
||||
import org.hibernate.engine.jdbc.env.spi.JdbcEnvironment;
|
||||
|
||||
import org.hibernate.testing.BeforeClassOnce;
|
||||
import org.hibernate.testing.RequiresDialect;
|
||||
import org.hibernate.testing.TestForIssue;
|
||||
import org.hibernate.testing.junit4.BaseCoreFunctionalTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* @author Nathan Xu
|
||||
*/
|
||||
@RequiresDialect(MariaDB103Dialect.class)
|
||||
public class MariaDBExtractSequenceMatadataTest extends BaseCoreFunctionalTestCase {
|
||||
|
||||
private static String primaryDbName;
|
||||
private static String primarySequenceName = "seq_HHH13373";
|
||||
|
||||
private static String secondaryDbName = "secondary_db_HHH13373";
|
||||
private static String secondarySequenceName = "secondary_seq_HHH13373";
|
||||
|
||||
@BeforeClassOnce
|
||||
public static void setUpDBs() throws Exception {
|
||||
try (Connection conn = getConnection()) {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
try (ResultSet resultSet = stmt.executeQuery( "SELECT DATABASE()" )) {
|
||||
assert resultSet.next();
|
||||
primaryDbName = resultSet.getString( 1 );
|
||||
}
|
||||
stmt.execute( "CREATE DATABASE IF NOT EXISTS " + secondaryDbName );
|
||||
stmt.execute( "USE " + secondaryDbName );
|
||||
stmt.execute( "CREATE SEQUENCE IF NOT EXISTS " + secondarySequenceName );
|
||||
stmt.execute( "USE " + primaryDbName );
|
||||
stmt.execute( "DROP SEQUENCE IF EXISTS " + secondarySequenceName );
|
||||
stmt.execute( "CREATE SEQUENCE IF NOT EXISTS " + primarySequenceName );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestForIssue(jiraKey = "HHH-13373")
|
||||
public void testHibernateLaunchedSuccessfully() {
|
||||
JdbcEnvironment jdbcEnvironment = serviceRegistry().getService( JdbcEnvironment.class );
|
||||
Assert.assertFalse( jdbcEnvironment.getExtractedDatabaseMetaData().getSequenceInformationList().isEmpty() );
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDownDBs() throws SQLException {
|
||||
try (Connection conn = getConnection()) {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute( "DROP DATABASE IF EXISTS " + secondaryDbName );
|
||||
}
|
||||
catch (Exception e) {
|
||||
// Ignore
|
||||
}
|
||||
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute( "DROP SEQUENCE IF EXISTS " + primarySequenceName );
|
||||
}
|
||||
catch (Exception e) {
|
||||
// Ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static Connection getConnection() throws SQLException {
|
||||
String url = Environment.getProperties().getProperty( Environment.URL );
|
||||
String user = Environment.getProperties().getProperty( Environment.USER );
|
||||
String password = Environment.getProperties().getProperty( Environment.PASS );
|
||||
return DriverManager.getConnection( url, user, password );
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.test.idgen.enhanced.sequence;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import javax.persistence.Entity;
|
||||
import javax.persistence.GeneratedValue;
|
||||
import javax.persistence.GenerationType;
|
||||
import javax.persistence.Id;
|
||||
|
||||
import org.hibernate.annotations.GenericGenerator;
|
||||
import org.hibernate.annotations.Parameter;
|
||||
import org.hibernate.cfg.AvailableSettings;
|
||||
import org.hibernate.cfg.Configuration;
|
||||
import org.hibernate.cfg.Environment;
|
||||
import org.hibernate.engine.jdbc.connections.internal.DriverManagerConnectionProviderImpl;
|
||||
import org.hibernate.id.SequenceMismatchStrategy;
|
||||
import org.hibernate.id.enhanced.HiLoOptimizer;
|
||||
import org.hibernate.id.enhanced.SequenceStyleGenerator;
|
||||
import org.hibernate.persister.entity.EntityPersister;
|
||||
|
||||
import org.hibernate.testing.DialectChecks;
|
||||
import org.hibernate.testing.RequiresDialectFeature;
|
||||
import org.hibernate.testing.TestForIssue;
|
||||
import org.hibernate.testing.junit4.BaseCoreFunctionalTestCase;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.hibernate.testing.junit4.ExtraAssertions.assertClassAssignability;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
/**
|
||||
* @author Nathan Xu
|
||||
*/
|
||||
@TestForIssue(jiraKey = "HHH-13783")
|
||||
@RequiresDialectFeature(DialectChecks.SupportsSequences.class)
|
||||
public class HiLoSequenceMismatchStrategyTest extends BaseCoreFunctionalTestCase {
|
||||
|
||||
public final static String sequenceName = "ID_SEQ_HILO_SEQ";
|
||||
|
||||
@Override
|
||||
protected Class<?>[] getAnnotatedClasses() {
|
||||
return new Class[]{ TestEntity.class };
|
||||
}
|
||||
|
||||
protected void configure(Configuration configuration) {
|
||||
configuration.setProperty(
|
||||
AvailableSettings.SEQUENCE_INCREMENT_SIZE_MISMATCH_STRATEGY,
|
||||
SequenceMismatchStrategy.EXCEPTION.toString() );
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void afterConfigurationBuilt(Configuration configuration) {
|
||||
DriverManagerConnectionProviderImpl connectionProvider = new DriverManagerConnectionProviderImpl();
|
||||
connectionProvider.configure( Environment.getProperties() );
|
||||
|
||||
String[] dropSequenceStatements = getDialect().getDropSequenceStrings( sequenceName );
|
||||
String[] createSequenceStatements = getDialect().getCreateSequenceStrings( sequenceName, 1, 1 );
|
||||
|
||||
try ( Connection connection = connectionProvider.getConnection();
|
||||
Statement statement = connection.createStatement() ) {
|
||||
|
||||
for ( String dropSequenceStatement : dropSequenceStatements ) {
|
||||
try {
|
||||
statement.execute( dropSequenceStatement );
|
||||
}
|
||||
catch (SQLException e) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
|
||||
for ( String createSequenceStatement : createSequenceStatements ) {
|
||||
statement.execute( createSequenceStatement );
|
||||
}
|
||||
}
|
||||
catch (SQLException e) {
|
||||
fail( e.getMessage() );
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSequenceMismatchStrategyNotApplied() {
|
||||
EntityPersister persister = sessionFactory().getEntityPersister( TestEntity.class.getName() );
|
||||
assertClassAssignability( SequenceStyleGenerator.class, persister.getIdentifierGenerator().getClass() );
|
||||
|
||||
SequenceStyleGenerator generator = (SequenceStyleGenerator) persister.getIdentifierGenerator();
|
||||
assertClassAssignability( HiLoOptimizer.class, generator.getOptimizer().getClass() );
|
||||
|
||||
String sequenceName = generator.getDatabaseStructure().getName();
|
||||
Assert.assertEquals( this.sequenceName, sequenceName );
|
||||
|
||||
int incrementSize = generator.getOptimizer().getIncrementSize();
|
||||
Assert.assertNotEquals( 1, incrementSize );
|
||||
}
|
||||
|
||||
@Entity(name = "TestEntity")
|
||||
public static class TestEntity {
|
||||
|
||||
@Id
|
||||
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "hilo_sequence_generator")
|
||||
@GenericGenerator(name = "hilo_sequence_generator", strategy = "org.hibernate.id.enhanced.SequenceStyleGenerator", parameters = {
|
||||
@Parameter(name = "sequence_name", value = sequenceName),
|
||||
@Parameter(name = "initial_value", value = "1"),
|
||||
@Parameter(name = "increment_size", value = "10"),
|
||||
@Parameter(name = "optimizer", value = "hilo")
|
||||
})
|
||||
private Long id;
|
||||
|
||||
private String aString;
|
||||
}
|
||||
|
||||
}
|
|
@ -40,10 +40,11 @@ public class LegacyModifiedColumnNamingStrategy implements ModifiedColumnNamingS
|
|||
else {
|
||||
columnName = propertyAuditingData.getModifiedFlagName();
|
||||
}
|
||||
MetadataTools.addModifiedFlagProperty(
|
||||
MetadataTools.addModifiedFlagPropertyWithColumn(
|
||||
parent,
|
||||
propertyAuditingData.getName(),
|
||||
globalCfg.getModifiedFlagSuffix(),
|
||||
propertyAuditingData.getModifiedFlagName(),
|
||||
columnName
|
||||
);
|
||||
}
|
||||
|
|
|
@ -422,6 +422,14 @@ public class RevisionInfoConfiguration {
|
|||
revisionInfoXmlMapping = generateDefaultRevisionInfoXmlMapping();
|
||||
}
|
||||
|
||||
final RevisionInfoNumberReader revisionInfoNumberReader = new RevisionInfoNumberReader(
|
||||
revisionInfoClass,
|
||||
revisionInfoIdData,
|
||||
metadata.getMetadataBuildingOptions().getServiceRegistry()
|
||||
);
|
||||
|
||||
revisionInfoGenerator.setRevisionInfoNumberReader( revisionInfoNumberReader );
|
||||
|
||||
return new RevisionInfoConfigurationResult(
|
||||
revisionInfoGenerator, revisionInfoXmlMapping,
|
||||
new RevisionInfoQueryCreator(
|
||||
|
@ -429,7 +437,7 @@ public class RevisionInfoConfiguration {
|
|||
revisionInfoTimestampData.getName(), isTimestampAsDate()
|
||||
),
|
||||
generateRevisionInfoRelationMapping(),
|
||||
new RevisionInfoNumberReader( revisionInfoClass, revisionInfoIdData, metadata.getMetadataBuildingOptions().getServiceRegistry() ),
|
||||
revisionInfoNumberReader,
|
||||
globalCfg.isTrackEntitiesChangedInRevision()
|
||||
? new ModifiedEntityNamesReader( revisionInfoClass, modifiedEntityNamesData, metadata.getMetadataBuildingOptions().getServiceRegistry() )
|
||||
: null,
|
||||
|
|
|
@ -110,7 +110,7 @@ public final class AuditMetadataGenerator {
|
|||
this.auditStrategy = auditStrategy;
|
||||
this.revisionInfoRelationMapping = revisionInfoRelationMapping;
|
||||
|
||||
this.basicMetadataGenerator = new BasicMetadataGenerator();
|
||||
this.basicMetadataGenerator = new BasicMetadataGenerator( this );
|
||||
this.componentMetadataGenerator = new ComponentMetadataGenerator( this );
|
||||
this.idMetadataGenerator = new IdMetadataGenerator( this );
|
||||
this.toOneRelationMetadataGenerator = new ToOneRelationMetadataGenerator( this );
|
||||
|
@ -460,7 +460,7 @@ public final class AuditMetadataGenerator {
|
|||
}
|
||||
|
||||
final Element joinKey = joinElement.addElement( "key" );
|
||||
MetadataTools.addColumns( joinKey, join.getKey().getColumnIterator() );
|
||||
MetadataTools.addColumns( joinKey, join.getKey().getColumnIterator(), metadata );
|
||||
MetadataTools.addColumn( joinKey, verEntCfg.getRevisionFieldName(), null, null, null, null, null, null );
|
||||
}
|
||||
}
|
||||
|
@ -509,7 +509,7 @@ public final class AuditMetadataGenerator {
|
|||
if ( pc.getDiscriminator() != null ) {
|
||||
final Element discriminatorElement = classMapping.addElement( "discriminator" );
|
||||
// Database column or SQL formula allowed to distinguish entity types
|
||||
MetadataTools.addColumnsOrFormulas( discriminatorElement, pc.getDiscriminator().getColumnIterator() );
|
||||
MetadataTools.addColumnsOrFormulas( discriminatorElement, pc.getDiscriminator().getColumnIterator(), metadata );
|
||||
discriminatorElement.addAttribute( "type", pc.getDiscriminator().getType().getName() );
|
||||
}
|
||||
|
||||
|
@ -633,7 +633,7 @@ public final class AuditMetadataGenerator {
|
|||
|
||||
// Adding the "key" element with all id columns...
|
||||
final Element keyMapping = mappingData.getFirst().addElement( "key" );
|
||||
MetadataTools.addColumns( keyMapping, pc.getTable().getPrimaryKey().columnIterator() );
|
||||
MetadataTools.addColumns( keyMapping, pc.getTable().getPrimaryKey().columnIterator(), metadata );
|
||||
|
||||
// ... and the revision number column, read from the revision info relation mapping.
|
||||
keyMapping.add( (Element) cloneAndSetupRevisionInfoRelationMapping().element( "column" ).clone() );
|
||||
|
|
|
@ -8,6 +8,7 @@ package org.hibernate.envers.configuration.internal.metadata;
|
|||
|
||||
import java.util.Properties;
|
||||
|
||||
import org.hibernate.boot.Metadata;
|
||||
import org.hibernate.envers.configuration.internal.metadata.reader.PropertyAuditingData;
|
||||
import org.hibernate.envers.internal.entities.PropertyData;
|
||||
import org.hibernate.envers.internal.entities.mapper.SimpleMapperBuilder;
|
||||
|
@ -28,6 +29,12 @@ import org.dom4j.Element;
|
|||
*/
|
||||
public final class BasicMetadataGenerator {
|
||||
|
||||
private final Metadata metadata;
|
||||
|
||||
public BasicMetadataGenerator(AuditMetadataGenerator mainGenerator) {
|
||||
this.metadata = mainGenerator.getMetadata();
|
||||
}
|
||||
|
||||
boolean addBasic(
|
||||
Element parent,
|
||||
PropertyAuditingData propertyAuditingData,
|
||||
|
@ -109,7 +116,7 @@ public final class BasicMetadataGenerator {
|
|||
key
|
||||
);
|
||||
|
||||
MetadataTools.addColumns( propMapping, value.getColumnIterator() );
|
||||
MetadataTools.addColumns( propMapping, value.getColumnIterator(), metadata );
|
||||
|
||||
return propMapping;
|
||||
}
|
||||
|
|
|
@ -277,7 +277,9 @@ public final class CollectionMetadataGenerator {
|
|||
// The mapper will only be used to map from entity to map, so no need to provide other details
|
||||
// when constructing the PropertyData.
|
||||
new PropertyData( auditMappedBy, null, null, null ),
|
||||
referencingEntityName, false
|
||||
referencingEntityName,
|
||||
false,
|
||||
false
|
||||
);
|
||||
|
||||
final String positionMappedBy;
|
||||
|
|
|
@ -313,7 +313,7 @@ public final class IdMetadataGenerator {
|
|||
// schema and the base table schema when a @ManyToOne is present in an identifier.
|
||||
manyToOneElement.addAttribute( "foreign-key", "none" );
|
||||
|
||||
MetadataTools.addColumns( manyToOneElement, value.getColumnIterator() );
|
||||
MetadataTools.addColumns( manyToOneElement, value.getColumnIterator(), mainGenerator.getMetadata() );
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -9,6 +9,10 @@ package org.hibernate.envers.configuration.internal.metadata;
|
|||
import java.util.Iterator;
|
||||
import javax.persistence.JoinColumn;
|
||||
|
||||
import org.hibernate.boot.Metadata;
|
||||
import org.hibernate.dialect.Dialect;
|
||||
import org.hibernate.engine.spi.Mapping;
|
||||
import org.hibernate.envers.internal.EnversMessageLogger;
|
||||
import org.hibernate.envers.internal.tools.StringTools;
|
||||
import org.hibernate.mapping.Column;
|
||||
import org.hibernate.mapping.Formula;
|
||||
|
@ -18,12 +22,20 @@ import org.dom4j.Attribute;
|
|||
import org.dom4j.Document;
|
||||
import org.dom4j.Element;
|
||||
|
||||
import org.jboss.logging.Logger;
|
||||
|
||||
/**
|
||||
* @author Adam Warski (adam at warski dot org)
|
||||
* @author Lukasz Antoniak (lukasz dot antoniak at gmail dot com)
|
||||
* @author Michal Skowronek (mskowr at o2 dot pl)
|
||||
*/
|
||||
public final class MetadataTools {
|
||||
|
||||
private static final EnversMessageLogger LOG = Logger.getMessageLogger(
|
||||
EnversMessageLogger.class,
|
||||
MetadataTools.class.getName()
|
||||
);
|
||||
|
||||
private MetadataTools() {
|
||||
}
|
||||
|
||||
|
@ -285,38 +297,68 @@ public final class MetadataTools {
|
|||
return joinMapping;
|
||||
}
|
||||
|
||||
public static void addColumns(Element anyMapping, Iterator selectables) {
|
||||
public static void addColumns(Element anyMapping, Iterator<?> selectables, Metadata metadata) {
|
||||
addColumns( anyMapping, selectables, metadata, metadata.getDatabase().getDialect() );
|
||||
}
|
||||
|
||||
public static void addColumns(Element anyMapping, Iterator<?> selectables, Mapping mapping, Dialect dialect) {
|
||||
while ( selectables.hasNext() ) {
|
||||
final Selectable selectable = (Selectable) selectables.next();
|
||||
if ( selectable.isFormula() ) {
|
||||
throw new FormulaNotSupportedException();
|
||||
}
|
||||
addColumn( anyMapping, (Column) selectable );
|
||||
addColumn( anyMapping, (Column) selectable, mapping, dialect );
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds <code>column</code> element with the following attributes (unless empty): <code>name</code>,
|
||||
* <code>length</code>, <code>scale</code>, <code>precision</code>, <code>sql-type</code>, <code>read</code>
|
||||
* and <code>write</code>.
|
||||
* Adds {@code column} element with the following attributes (unless empty):
|
||||
* <ul>
|
||||
* <li>name</li>>
|
||||
* <li>length</li>
|
||||
* <li>scale</li>
|
||||
* <li>precision</li>
|
||||
* <li>sql-type</li>
|
||||
* <li>read</li>
|
||||
* <li>write</li>
|
||||
*
|
||||
* @param anyMapping Parent element.
|
||||
* @param column Column descriptor.
|
||||
* </ul>
|
||||
*
|
||||
* @param anyMapping parent element
|
||||
* @param column column descriptor
|
||||
* @param mapping the metadata mapping
|
||||
* @param dialect the dialect
|
||||
*/
|
||||
public static void addColumn(Element anyMapping, Column column) {
|
||||
public static void addColumn(Element anyMapping, Column column, Mapping mapping, Dialect dialect) {
|
||||
addColumn(
|
||||
anyMapping,
|
||||
column.getName(),
|
||||
column.getLength(),
|
||||
column.getScale(),
|
||||
column.getPrecision(),
|
||||
column.getSqlType(),
|
||||
resolveSqlType( column, mapping, dialect ),
|
||||
column.getCustomRead(),
|
||||
column.getCustomWrite(),
|
||||
column.isQuoted()
|
||||
);
|
||||
}
|
||||
|
||||
private static String resolveSqlType(Column column, Mapping mapping, Dialect dialect) {
|
||||
String columnDefinition = column.getSqlType();
|
||||
if ( !StringTools.isEmpty( columnDefinition ) ) {
|
||||
final int sqlTypeCode = column.getSqlTypeCode( mapping );
|
||||
final String sqlType = dialect.getTypeName( sqlTypeCode, column.getLength(), column.getPrecision(), column.getScale() );
|
||||
LOG.infof(
|
||||
"Column [%s] uses a column-definition of [%s], resolved sql-type as [%s].",
|
||||
column.getName(),
|
||||
columnDefinition,
|
||||
sqlType
|
||||
);
|
||||
columnDefinition = sqlType;
|
||||
}
|
||||
return columnDefinition;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
private static void changeNamesInColumnElement(Element element, ColumnNameIterator columnNameIterator) {
|
||||
final Iterator<Element> properties = element.elementIterator();
|
||||
|
@ -392,12 +434,13 @@ public final class MetadataTools {
|
|||
* @param element Parent element.
|
||||
* @param columnIterator Iterator pointing at {@link org.hibernate.mapping.Column} and/or
|
||||
* {@link org.hibernate.mapping.Formula} objects.
|
||||
* @param metadata The boot-time entity model metadata
|
||||
*/
|
||||
public static void addColumnsOrFormulas(Element element, Iterator columnIterator) {
|
||||
public static void addColumnsOrFormulas(Element element, Iterator columnIterator, Metadata metadata) {
|
||||
while ( columnIterator.hasNext() ) {
|
||||
final Object o = columnIterator.next();
|
||||
if ( o instanceof Column ) {
|
||||
addColumn( element, (Column) o );
|
||||
addColumn( element, (Column) o, metadata, metadata.getDatabase().getDialect() );
|
||||
}
|
||||
else if ( o instanceof Formula ) {
|
||||
addFormula( element, (Formula) o );
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.dom4j.Element;
|
|||
*
|
||||
* @author Adam Warski (adam at warski dot org)
|
||||
* @author Lukasz Antoniak (lukasz dot antoniak at gmail dot com)
|
||||
* @author Chris Cranford
|
||||
*/
|
||||
public final class ToOneRelationMetadataGenerator {
|
||||
private final AuditMetadataGenerator mainGenerator;
|
||||
|
@ -99,11 +100,13 @@ public final class ToOneRelationMetadataGenerator {
|
|||
parent.add( element );
|
||||
}
|
||||
|
||||
boolean lazy = ( (ToOne) value ).isLazy();
|
||||
|
||||
// Adding mapper for the id
|
||||
final PropertyData propertyData = propertyAuditingData.getPropertyData();
|
||||
mapper.addComposite(
|
||||
propertyData,
|
||||
new ToOneIdMapper( relMapper, propertyData, referencedEntityName, nonInsertableFake )
|
||||
new ToOneIdMapper( relMapper, propertyData, referencedEntityName, nonInsertableFake, lazy )
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -591,7 +591,7 @@ public class AuditedPropertiesReader {
|
|||
propertyData.setRelationTargetAuditMode( aud.targetAuditMode() );
|
||||
propertyData.setUsingModifiedFlag( checkUsingModifiedFlag( aud ) );
|
||||
propertyData.setModifiedFlagName( MetadataTools.getModifiedFlagPropertyName( propertyName, modifiedFlagSuffix ) );
|
||||
if( aud.modifiedColumnName() != null && !"".equals( aud.modifiedColumnName() ) ) {
|
||||
if ( !StringTools.isEmpty( aud.modifiedColumnName() ) ) {
|
||||
propertyData.setExplicitModifiedFlagName( aud.modifiedColumnName() );
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -19,26 +19,31 @@ import org.hibernate.envers.internal.reader.AuditReaderImplementor;
|
|||
import org.hibernate.envers.internal.tools.EntityTools;
|
||||
import org.hibernate.envers.internal.tools.query.Parameters;
|
||||
import org.hibernate.persister.entity.EntityPersister;
|
||||
import org.hibernate.proxy.HibernateProxy;
|
||||
|
||||
/**
|
||||
* @author Adam Warski (adam at warski dot org)
|
||||
* @author HernпїЅn Chanfreau
|
||||
* @author Michal Skowronek (mskowr at o2 dot pl)
|
||||
* @author Chris Cranford
|
||||
*/
|
||||
public class ToOneIdMapper extends AbstractToOneMapper {
|
||||
private final IdMapper delegate;
|
||||
private final String referencedEntityName;
|
||||
private final boolean nonInsertableFake;
|
||||
private final boolean lazyMapping;
|
||||
|
||||
public ToOneIdMapper(
|
||||
IdMapper delegate,
|
||||
PropertyData propertyData,
|
||||
String referencedEntityName,
|
||||
boolean nonInsertableFake) {
|
||||
boolean nonInsertableFake,
|
||||
boolean lazyMapping) {
|
||||
super( delegate.getServiceRegistry(), propertyData );
|
||||
this.delegate = delegate;
|
||||
this.referencedEntityName = referencedEntityName;
|
||||
this.nonInsertableFake = nonInsertableFake;
|
||||
this.lazyMapping = lazyMapping;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -50,9 +55,14 @@ public class ToOneIdMapper extends AbstractToOneMapper {
|
|||
final HashMap<String, Object> newData = new HashMap<>();
|
||||
|
||||
// If this property is originally non-insertable, but made insertable because it is in a many-to-one "fake"
|
||||
// bi-directional relation, we always store the "old", unchaged data, to prevent storing changes made
|
||||
// bi-directional relation, we always store the "old", unchanged data, to prevent storing changes made
|
||||
// to this field. It is the responsibility of the collection to properly update it if it really changed.
|
||||
delegate.mapToMapFromEntity( newData, nonInsertableFake ? oldObj : newObj );
|
||||
Object entity = nonInsertableFake ? oldObj : newObj;
|
||||
if ( lazyMapping && entity instanceof HibernateProxy ) {
|
||||
entity = ( (HibernateProxy) entity ).getHibernateLazyInitializer().getImplementation();
|
||||
}
|
||||
|
||||
delegate.mapToMapFromEntity( newData, entity );
|
||||
|
||||
for ( Map.Entry<String, Object> entry : newData.entrySet() ) {
|
||||
data.put( entry.getKey(), entry.getValue() );
|
||||
|
|
|
@ -13,6 +13,7 @@ import org.hibernate.Session;
|
|||
import org.hibernate.envers.EntityTrackingRevisionListener;
|
||||
import org.hibernate.envers.RevisionListener;
|
||||
import org.hibernate.envers.RevisionType;
|
||||
import org.hibernate.envers.exception.AuditException;
|
||||
import org.hibernate.envers.internal.entities.PropertyData;
|
||||
import org.hibernate.envers.internal.synchronization.SessionCacheCleaner;
|
||||
import org.hibernate.envers.internal.tools.ReflectionTools;
|
||||
|
@ -35,6 +36,8 @@ public class DefaultRevisionInfoGenerator implements RevisionInfoGenerator {
|
|||
private final Constructor<?> revisionInfoClassConstructor;
|
||||
private final SessionCacheCleaner sessionCacheCleaner;
|
||||
|
||||
private RevisionInfoNumberReader revisionInfoNumberReader;
|
||||
|
||||
public DefaultRevisionInfoGenerator(
|
||||
String revisionInfoEntityName,
|
||||
Class<?> revisionInfoClass,
|
||||
|
@ -53,9 +56,17 @@ public class DefaultRevisionInfoGenerator implements RevisionInfoGenerator {
|
|||
this.sessionCacheCleaner = new SessionCacheCleaner();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setRevisionInfoNumberReader(RevisionInfoNumberReader revisionInfoNumberReader) {
|
||||
this.revisionInfoNumberReader = revisionInfoNumberReader;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void saveRevisionData(Session session, Object revisionData) {
|
||||
session.save( revisionInfoEntityName, revisionData );
|
||||
if ( revisionInfoNumberReader != null && revisionInfoNumberReader.getRevisionNumber( revisionData ).longValue() < 0 ) {
|
||||
throw new AuditException( "Negative revision numbers are not allowed" );
|
||||
}
|
||||
sessionCacheCleaner.scheduleAuditDataRemoval( session, revisionData );
|
||||
}
|
||||
|
||||
|
|
|
@ -13,6 +13,11 @@ import org.hibernate.envers.RevisionType;
|
|||
* @author Adam Warski (adam at warski dot org)
|
||||
*/
|
||||
public interface RevisionInfoGenerator {
|
||||
/**
|
||||
* Set the revision entity number reader instance.
|
||||
*/
|
||||
void setRevisionInfoNumberReader(RevisionInfoNumberReader revisionInfoNumberReader);
|
||||
|
||||
void saveRevisionData(Session session, Object revisionData);
|
||||
|
||||
Object generate();
|
||||
|
|
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.envers.test.integration.basic;
|
||||
|
||||
import javax.persistence.Column;
|
||||
import javax.persistence.Entity;
|
||||
import javax.persistence.GeneratedValue;
|
||||
import javax.persistence.Id;
|
||||
|
||||
import org.hibernate.annotations.Generated;
|
||||
import org.hibernate.annotations.GenerationTime;
|
||||
import org.hibernate.dialect.H2Dialect;
|
||||
import org.hibernate.envers.Audited;
|
||||
import org.hibernate.envers.test.BaseEnversJPAFunctionalTestCase;
|
||||
import org.hibernate.envers.test.Priority;
|
||||
import org.hibernate.mapping.Table;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.hibernate.testing.RequiresDialect;
|
||||
import org.hibernate.testing.TestForIssue;
|
||||
|
||||
import static org.hibernate.boot.model.naming.Identifier.toIdentifier;
|
||||
import static org.hibernate.mapping.Column.DEFAULT_LENGTH;
|
||||
import static org.hibernate.mapping.Column.DEFAULT_PRECISION;
|
||||
import static org.hibernate.mapping.Column.DEFAULT_SCALE;
|
||||
import static org.hibernate.testing.transaction.TransactionUtil.doInJPA;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
/**
|
||||
* This test verifies that resolving a column mapping's {@code sql-type} for HBM XML is performed
|
||||
* correctly such that when a column supplies a {@code columnDefinition}, Envers properly builds
|
||||
* its schema based on the right type rather than directly using the column definition as-is.
|
||||
*
|
||||
* The following illustrate some examples of expected transformations:
|
||||
*
|
||||
* <li>{@code @Column(columnDefinition = "varchar(10) not null")} => {@code sql-type = "varchar(255)"}</li>
|
||||
* <li>{@code @Column(length = 10, columnDefinition = "varchar(10) not null")} => {@code sql-type = "varchar(10)"}</li>
|
||||
* <li>{@code @Column(columnDefinition = "integer not null auto_increment")} => {@code sql-type = "integer"}</li>
|
||||
*
|
||||
* It is important to point out that resolving the sql-types length/precision/scale is all based on the
|
||||
* values supplied as part of the {@link Column} annotation itself and not what is in the definition text.
|
||||
*
|
||||
* @author Chris Cranford
|
||||
*/
|
||||
@TestForIssue(jiraKey = "HHH-10844")
|
||||
@RequiresDialect(value = H2Dialect.class)
|
||||
public class BasicTypeColumnDefinitionTest extends BaseEnversJPAFunctionalTestCase {
|
||||
@Override
|
||||
protected Class<?>[] getAnnotatedClasses() {
|
||||
return new Class<?>[] { BasicTypeContainer.class };
|
||||
}
|
||||
|
||||
@Test
|
||||
@Priority(10)
|
||||
public void testMetadataBindings() {
|
||||
final Table auditTable = metadata().getEntityBinding( BasicTypeContainer.class.getName() + "_AUD" ).getTable();
|
||||
|
||||
final org.hibernate.mapping.Column caseNumber = auditTable.getColumn( toIdentifier( "caseNumber" ) );
|
||||
assertEquals( "integer", caseNumber.getSqlType() );
|
||||
assertEquals( DEFAULT_LENGTH, caseNumber.getLength() );
|
||||
assertEquals( DEFAULT_PRECISION, caseNumber.getPrecision() );
|
||||
assertEquals( DEFAULT_SCALE, caseNumber.getScale() );
|
||||
|
||||
final org.hibernate.mapping.Column colDef = auditTable.getColumn( toIdentifier( "columnWithDefinition" ) );
|
||||
assertEquals( "varchar(10)", colDef.getSqlType() );
|
||||
assertEquals( 10, colDef.getLength() );
|
||||
assertEquals( DEFAULT_PRECISION, colDef.getPrecision() );
|
||||
assertEquals( DEFAULT_SCALE, colDef.getScale() );
|
||||
}
|
||||
|
||||
@Test
|
||||
@Priority(10)
|
||||
public void initData() {
|
||||
final BasicTypeContainer detachedEntity = doInJPA( this::entityManagerFactory, entityManager -> {
|
||||
final BasicTypeContainer entity = new BasicTypeContainer();
|
||||
entity.setData( "test" );
|
||||
entity.setColumnWithDefinition( "1234567890" );
|
||||
entityManager.persist( entity );
|
||||
return entity;
|
||||
} );
|
||||
|
||||
doInJPA( this::entityManagerFactory, entityManager -> {
|
||||
final BasicTypeContainer entity = entityManager.find( BasicTypeContainer.class, detachedEntity.getId() );
|
||||
entity.setData( "test2" );
|
||||
entityManager.merge( entity );
|
||||
} );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRevisionHistory() {
|
||||
assertEquals( 2, getAuditReader().getRevisions( BasicTypeContainer.class, 1 ).size() );
|
||||
|
||||
final BasicTypeContainer rev1 = getAuditReader().find( BasicTypeContainer.class, 1, 1 );
|
||||
assertEquals( "test", rev1.getData() );
|
||||
assertEquals( "1234567890", rev1.getColumnWithDefinition() );
|
||||
assertEquals( Integer.valueOf( 1 ), rev1.getCaseNumber() );
|
||||
|
||||
final BasicTypeContainer rev2 = getAuditReader().find( BasicTypeContainer.class, 1, 2 );
|
||||
assertEquals( "test2", rev2.getData() );
|
||||
assertEquals( "1234567890", rev2.getColumnWithDefinition() );
|
||||
assertEquals( Integer.valueOf( 1 ), rev2.getCaseNumber() );
|
||||
}
|
||||
|
||||
@Entity(name = "BasicTypeContainer")
|
||||
@Audited
|
||||
public static class BasicTypeContainer {
|
||||
@Id
|
||||
@GeneratedValue
|
||||
private Integer id;
|
||||
|
||||
@Generated(GenerationTime.INSERT)
|
||||
@Column(name = "caseNumber", columnDefinition = "integer not null auto_increment")
|
||||
private Integer caseNumber;
|
||||
|
||||
@Column(name = "columnWithDefinition", length = 10, nullable = false, columnDefinition = "varchar(10) not null")
|
||||
private String columnWithDefinition;
|
||||
|
||||
private String data;
|
||||
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Integer id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public Integer getCaseNumber() {
|
||||
return caseNumber;
|
||||
}
|
||||
|
||||
public void setCaseNumber(Integer caseNumber) {
|
||||
this.caseNumber = caseNumber;
|
||||
}
|
||||
|
||||
public String getColumnWithDefinition() {
|
||||
return columnWithDefinition;
|
||||
}
|
||||
|
||||
public void setColumnWithDefinition(String columnWithDefinition) {
|
||||
this.columnWithDefinition = columnWithDefinition;
|
||||
}
|
||||
|
||||
public String getData() {
|
||||
return data;
|
||||
}
|
||||
|
||||
public void setData(String data) {
|
||||
this.data = data;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.envers.test.integration.manytoone.lazy;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.Collection;
|
||||
import java.util.LinkedList;
|
||||
|
||||
import javax.persistence.CascadeType;
|
||||
import javax.persistence.Column;
|
||||
import javax.persistence.Entity;
|
||||
import javax.persistence.FetchType;
|
||||
import javax.persistence.OneToMany;
|
||||
import javax.persistence.Table;
|
||||
|
||||
/**
|
||||
* @author Chris Cranford
|
||||
*/
|
||||
@Entity
|
||||
@Table(name = "address")
|
||||
public class Address extends BaseDomainEntity {
|
||||
private static final long serialVersionUID = 7380477602657080463L;
|
||||
|
||||
@Column(name = "name")
|
||||
private String name;
|
||||
|
||||
@OneToMany(fetch = FetchType.LAZY, mappedBy = "id", cascade = CascadeType.ALL)
|
||||
Collection<AddressVersion> versions = new LinkedList<>();
|
||||
|
||||
Address() {
|
||||
}
|
||||
|
||||
Address(Instant when, String who, String name) {
|
||||
super( when, who );
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public AddressVersion addInitialVersion(String description) {
|
||||
AddressVersion version = new AddressVersion( getCreatedAt(), getCreatedBy(), this, 0, description );
|
||||
versions.add( version );
|
||||
return version;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public Collection<AddressVersion> getVersions() {
|
||||
return versions;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.envers.test.integration.manytoone.lazy;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.Objects;
|
||||
|
||||
import javax.persistence.Column;
|
||||
import javax.persistence.Entity;
|
||||
import javax.persistence.FetchType;
|
||||
import javax.persistence.Id;
|
||||
import javax.persistence.JoinColumn;
|
||||
import javax.persistence.ManyToOne;
|
||||
import javax.persistence.Table;
|
||||
|
||||
/**
|
||||
* @author Chris Cranford
|
||||
*/
|
||||
@Entity
|
||||
@Table(name = "address_version")
|
||||
public class AddressVersion extends BaseDomainEntityVersion {
|
||||
private static final long serialVersionUID = 1100389518057335117L;
|
||||
|
||||
@Id
|
||||
@ManyToOne(optional = false, fetch = FetchType.LAZY)
|
||||
@JoinColumn(name = "id", referencedColumnName = "id", updatable = false, nullable = false)
|
||||
private Address id;
|
||||
|
||||
@Column(name = "description", updatable = false)
|
||||
private String description;
|
||||
|
||||
AddressVersion() {
|
||||
}
|
||||
|
||||
AddressVersion(Instant when, String who, Address id, long version, String description) {
|
||||
setCreatedAt( when );
|
||||
setCreatedBy( who );
|
||||
setVersion( version );
|
||||
this.id = Objects.requireNonNull(id );
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Address getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public AddressVersion update(Instant when, String who, String description) {
|
||||
AddressVersion version = new AddressVersion( when, who, id, getVersion() + 1, description );
|
||||
id.versions.add( version );
|
||||
return version;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.envers.test.integration.manytoone.lazy;
|
||||
|
||||
import javax.persistence.Access;
|
||||
import javax.persistence.AccessType;
|
||||
import javax.persistence.MappedSuperclass;
|
||||
|
||||
/**
|
||||
* @author Chris Cranford
|
||||
*/
|
||||
@MappedSuperclass
|
||||
@Access(AccessType.FIELD)
|
||||
public class Base {
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.envers.test.integration.manytoone.lazy;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.Objects;
|
||||
|
||||
import javax.persistence.GeneratedValue;
|
||||
import javax.persistence.GenerationType;
|
||||
import javax.persistence.Id;
|
||||
import javax.persistence.MappedSuperclass;
|
||||
|
||||
/**
|
||||
* @author Chris Cranford
|
||||
*/
|
||||
@MappedSuperclass
|
||||
public abstract class BaseDomainEntity extends BaseDomainEntityMetadata {
|
||||
private static final long serialVersionUID = 1023010094948580516L;
|
||||
|
||||
@Id
|
||||
@GeneratedValue(strategy = GenerationType.IDENTITY)
|
||||
protected long id = 0;
|
||||
|
||||
BaseDomainEntity() {
|
||||
|
||||
}
|
||||
|
||||
BaseDomainEntity(Instant timestamp, String who) {
|
||||
super( timestamp, who );
|
||||
}
|
||||
|
||||
public long getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if ( this == o ) {
|
||||
return true;
|
||||
}
|
||||
if ( o == null || getClass() != o.getClass() ) {
|
||||
return false;
|
||||
}
|
||||
BaseDomainEntity that = (BaseDomainEntity) o;
|
||||
return id == that.id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash( id );
|
||||
}
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.envers.test.integration.manytoone.lazy;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.time.Instant;
|
||||
|
||||
import javax.persistence.Column;
|
||||
import javax.persistence.MappedSuperclass;
|
||||
|
||||
import org.hibernate.annotations.CreationTimestamp;
|
||||
|
||||
/**
|
||||
* @author Chris Cranford
|
||||
*/
|
||||
@MappedSuperclass
|
||||
public abstract class BaseDomainEntityMetadata extends Base implements Serializable {
|
||||
private static final long serialVersionUID = 2765056578095518489L;
|
||||
|
||||
@Column(name = "created_by", nullable = false, updatable = false)
|
||||
private String createdBy;
|
||||
|
||||
@CreationTimestamp
|
||||
@Column(name = "created_at", nullable = false, updatable = false)
|
||||
private Instant createdAt;
|
||||
|
||||
BaseDomainEntityMetadata() {
|
||||
|
||||
}
|
||||
|
||||
BaseDomainEntityMetadata(Instant timestamp, String who) {
|
||||
this.createdBy = who;
|
||||
this.createdAt = timestamp;
|
||||
}
|
||||
|
||||
public String getCreatedBy() {
|
||||
return createdBy;
|
||||
}
|
||||
|
||||
public void setCreatedBy(String createdBy) {
|
||||
this.createdBy = createdBy;
|
||||
}
|
||||
|
||||
public Instant getCreatedAt() {
|
||||
return createdAt;
|
||||
}
|
||||
|
||||
public void setCreatedAt(Instant createdAt) {
|
||||
this.createdAt = createdAt;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.envers.test.integration.manytoone.lazy;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
import javax.persistence.Column;
|
||||
import javax.persistence.Id;
|
||||
import javax.persistence.MappedSuperclass;
|
||||
|
||||
/**
|
||||
* @author Chris Cranford
|
||||
*/
|
||||
@MappedSuperclass
|
||||
public abstract class BaseDomainEntityVersion extends BaseDomainEntityMetadata {
|
||||
private static final long serialVersionUID = 1564895954324242368L;
|
||||
|
||||
@Id
|
||||
@Column(name = "version", nullable = false, updatable = false)
|
||||
private long version;
|
||||
|
||||
public long getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public void setVersion(long version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public abstract Object getId();
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if ( this == o ) {
|
||||
return true;
|
||||
}
|
||||
if ( o == null || getClass() != o.getClass() ) {
|
||||
return false;
|
||||
}
|
||||
BaseDomainEntityVersion that = (BaseDomainEntityVersion) o;
|
||||
return Objects.equals( getId(), that.getId() ) && version == that.version;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash( getId(), version );
|
||||
}
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.envers.test.integration.manytoone.lazy;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.hibernate.Hibernate;
|
||||
import org.hibernate.envers.test.BaseEnversFunctionalTestCase;
|
||||
import org.hibernate.envers.test.Priority;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.hibernate.testing.TestForIssue;
|
||||
|
||||
import static org.hibernate.testing.transaction.TransactionUtil.doInHibernate;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
/**
|
||||
* Tests that proxies are resolved correctly by the ToOneIdMapper such that when the values
|
||||
* are inserted for the join columns, they're resolved correclty avoiding ClassCastException
|
||||
*
|
||||
* @author Chris Cranford
|
||||
*/
|
||||
@TestForIssue(jiraKey = "HHH-13760")
|
||||
public class ManyToOneLazyFetchTest extends BaseEnversFunctionalTestCase {
|
||||
private Long shipmentId;
|
||||
|
||||
@Override
|
||||
protected Class<?>[] getAnnotatedClasses() {
|
||||
return new Class<?>[] { Shipment.class, Address.class, AddressVersion.class };
|
||||
}
|
||||
|
||||
@Test
|
||||
@Priority(10)
|
||||
public void initData() {
|
||||
this.shipmentId = doInHibernate( this::sessionFactory, session -> {
|
||||
final Shipment shipment = new Shipment( Instant.now(), "system", Instant.now().plus( Duration.ofDays( 3 ) ), "abcd123", null, null );
|
||||
session.persist( shipment );
|
||||
session.flush();
|
||||
|
||||
final Address origin = new Address( Instant.now(), "system", "Valencia#1" );
|
||||
final Address destination = new Address( Instant.now(), "system", "Madrid#3" );
|
||||
final AddressVersion originVersion0 = origin.addInitialVersion( "Poligono Manises" );
|
||||
final AddressVersion destinationVersion0 = destination.addInitialVersion( "Poligono Alcobendas" );
|
||||
session.persist( origin );
|
||||
session.persist( destination );
|
||||
session.flush();
|
||||
|
||||
shipment.setOrigin( originVersion0 );
|
||||
shipment.setDestination( destinationVersion0 );
|
||||
session.merge( shipment );
|
||||
session.flush();
|
||||
|
||||
return shipment.getId();
|
||||
} );
|
||||
|
||||
doInHibernate( this::sessionFactory, session -> {
|
||||
final Shipment shipment = session.get( Shipment.class, shipmentId );
|
||||
|
||||
Hibernate.initialize( shipment.getOrigin() );
|
||||
Hibernate.initialize( shipment.getDestination() );
|
||||
shipment.setClosed( true );
|
||||
|
||||
session.merge( shipment );
|
||||
session.flush();
|
||||
} );
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRevisionHistory() {
|
||||
assertEquals( Arrays.asList( 1, 2 ), getAuditReader().getRevisions( Shipment.class, shipmentId ) );
|
||||
}
|
||||
}
|
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* Hibernate, Relational Persistence for Idiomatic Java
|
||||
*
|
||||
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
|
||||
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
|
||||
*/
|
||||
package org.hibernate.envers.test.integration.manytoone.lazy;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.Objects;
|
||||
|
||||
import javax.persistence.Column;
|
||||
import javax.persistence.Entity;
|
||||
import javax.persistence.FetchType;
|
||||
import javax.persistence.JoinColumn;
|
||||
import javax.persistence.JoinColumns;
|
||||
import javax.persistence.ManyToOne;
|
||||
import javax.persistence.Table;
|
||||
import javax.persistence.UniqueConstraint;
|
||||
import javax.persistence.Version;
|
||||
|
||||
import org.hibernate.envers.AuditTable;
|
||||
import org.hibernate.envers.Audited;
|
||||
import org.hibernate.envers.RelationTargetAuditMode;
|
||||
|
||||
/**
|
||||
* @author Chris Cranford
|
||||
*/
|
||||
@Entity
|
||||
@Table(name = "shipment", uniqueConstraints = @UniqueConstraint(columnNames = { "identifier" }))
|
||||
@Audited
|
||||
@AuditTable(value = "shipment_audit")
|
||||
public class Shipment extends BaseDomainEntity {
|
||||
private static final long serialVersionUID = 5061763935663020703L;
|
||||
|
||||
@Column(name = "due_date", nullable = false, updatable = false)
|
||||
private Instant dueDate;
|
||||
|
||||
@Column(name = "identifier", nullable = false, updatable = false)
|
||||
private String identifier;
|
||||
|
||||
@Version
|
||||
@Column(name = "mvc_version", nullable = false)
|
||||
private Long mvcVersion;
|
||||
|
||||
@Column(name = "closed")
|
||||
private Boolean closed;
|
||||
|
||||
@ManyToOne(optional = true, fetch = FetchType.LAZY, targetEntity = AddressVersion.class)
|
||||
@JoinColumns(value = {
|
||||
@JoinColumn(name = "origin_address_id", referencedColumnName = "id", nullable = true),
|
||||
@JoinColumn(name = "origin_address_version", referencedColumnName = "version", nullable = true)
|
||||
})
|
||||
@Audited(targetAuditMode = RelationTargetAuditMode.NOT_AUDITED)
|
||||
private AddressVersion origin;
|
||||
|
||||
@ManyToOne(optional = true, fetch = FetchType.LAZY, targetEntity = AddressVersion.class)
|
||||
@JoinColumns(value = {
|
||||
@JoinColumn(name = "destination_address_id", referencedColumnName = "id", nullable = true),
|
||||
@JoinColumn(name = "destination_address_version", referencedColumnName = "version", nullable = true)
|
||||
})
|
||||
@Audited(targetAuditMode = RelationTargetAuditMode.NOT_AUDITED)
|
||||
private AddressVersion destination;
|
||||
|
||||
Shipment() {
|
||||
}
|
||||
|
||||
Shipment(Instant when, String who, Instant dueDate, String identifier, AddressVersion origin, AddressVersion dest) {
|
||||
super( when, who );
|
||||
this.dueDate = dueDate;
|
||||
this.identifier = Objects.requireNonNull( identifier );
|
||||
this.origin = origin;
|
||||
this.destination = dest;
|
||||
}
|
||||
|
||||
public Instant getDueDate() {
|
||||
return dueDate;
|
||||
}
|
||||
|
||||
public String getIdentifier() {
|
||||
return identifier;
|
||||
}
|
||||
|
||||
public Boolean getClosed() {
|
||||
return closed;
|
||||
}
|
||||
|
||||
public void setClosed(Boolean closed) {
|
||||
this.closed = closed;
|
||||
}
|
||||
|
||||
public AddressVersion getOrigin() {
|
||||
return origin;
|
||||
}
|
||||
|
||||
public void setOrigin(AddressVersion origin) {
|
||||
this.origin = origin;
|
||||
}
|
||||
|
||||
public AddressVersion getDestination() {
|
||||
return destination;
|
||||
}
|
||||
|
||||
public void setDestination(AddressVersion destination) {
|
||||
this.destination = destination;
|
||||
}
|
||||
}
|
|
@ -14,8 +14,12 @@ import org.hibernate.envers.test.Priority;
|
|||
import org.hibernate.envers.test.integration.auditReader.AuditedTestEntity;
|
||||
import org.hibernate.envers.test.integration.auditReader.NotAuditedTestEntity;
|
||||
|
||||
import org.hibernate.envers.test.integration.modifiedflags.entities.EnumEntity;
|
||||
import org.hibernate.envers.test.integration.modifiedflags.entities.EnumOption;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.hibernate.testing.TestForIssue;
|
||||
|
||||
import static junit.framework.Assert.assertEquals;
|
||||
import static junit.framework.Assert.assertTrue;
|
||||
|
||||
|
@ -28,7 +32,7 @@ import static junit.framework.Assert.assertTrue;
|
|||
public class HasChangedAPITest extends AbstractModifiedFlagsEntityTest {
|
||||
@Override
|
||||
protected Class<?>[] getAnnotatedClasses() {
|
||||
return new Class[] {AuditedTestEntity.class, NotAuditedTestEntity.class};
|
||||
return new Class[] {AuditedTestEntity.class, NotAuditedTestEntity.class, EnumEntity.class};
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -38,17 +42,22 @@ public class HasChangedAPITest extends AbstractModifiedFlagsEntityTest {
|
|||
em.getTransaction().begin();
|
||||
AuditedTestEntity ent1 = new AuditedTestEntity( 1, "str1" );
|
||||
NotAuditedTestEntity ent2 = new NotAuditedTestEntity( 1, "str1" );
|
||||
EnumEntity ent3 = new EnumEntity( 1, EnumOption.A );
|
||||
|
||||
|
||||
em.persist( ent1 );
|
||||
em.persist( ent2 );
|
||||
em.persist( ent3 );
|
||||
em.getTransaction().commit();
|
||||
|
||||
em.getTransaction().begin();
|
||||
|
||||
ent1 = em.find( AuditedTestEntity.class, 1 );
|
||||
ent2 = em.find( NotAuditedTestEntity.class, 1 );
|
||||
ent3 = em.find( EnumEntity.class, 1 );
|
||||
ent1.setStr1( "str2" );
|
||||
ent2.setStr1( "str2" );
|
||||
ent3.setOption( EnumOption.B );
|
||||
em.getTransaction().commit();
|
||||
}
|
||||
|
||||
|
@ -65,4 +74,18 @@ public class HasChangedAPITest extends AbstractModifiedFlagsEntityTest {
|
|||
assertTrue( list.isEmpty() );
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestForIssue(jiraKey = "HHH-13770")
|
||||
public void testHasChangedHasNotChangedEnum() {
|
||||
List list = getAuditReader().createQuery().forRevisionsOfEntity( EnumEntity.class, true, true )
|
||||
.add( AuditEntity.property( "option" ).hasChanged() ).getResultList();
|
||||
assertEquals( 2, list.size() );
|
||||
assertEquals( EnumOption.A, ( (EnumEntity) list.get( 0 ) ).getOption() );
|
||||
assertEquals( EnumOption.B, ( (EnumEntity) list.get( 1 ) ).getOption() );
|
||||
|
||||
list = getAuditReader().createQuery().forRevisionsOfEntity( EnumEntity.class, true, true )
|
||||
.add( AuditEntity.property( "option" ).hasNotChanged() ).getResultList();
|
||||
assertTrue( list.isEmpty() );
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue