HHH-15178 Backport Jenkinsfile and GH actions

This commit is contained in:
Christian Beikov 2022-04-06 12:23:55 +02:00
parent 323f7253f5
commit ebfc6b4254
32 changed files with 814 additions and 267 deletions

View File

@ -1,4 +1,4 @@
# The main CI of Hibernate ORM is https://ci.hibernate.org/job/hibernate-orm-6.0-h2-main/.
# The main CI of Hibernate ORM is https://ci.hibernate.org/job/hibernate-orm-5.6-h2/.
# However, Hibernate ORM builds run on GitHub actions regularly
# to check that it still works and can be used in GitHub forks.
# See https://docs.github.com/en/free-pro-team@latest/actions
@ -13,35 +13,38 @@ on:
pull_request:
branches:
- '5.6'
# See https://github.com/hibernate/hibernate-orm/pull/4615 for a description of the behavior we're getting.
concurrency:
# Consider that two builds are in the same concurrency group (cannot run concurrently)
# if they use the same workflow and are about the same branch ("ref") or pull request.
group: "workflow = ${{ github.workflow }}, ref = ${{ github.event.ref }}, pr = ${{ github.event.pull_request.id }}"
# Cancel previous builds in the same concurrency group even if they are in process
# for pull requests or pushes to forks (not the upstream repository).
cancel-in-progress: ${{ github.event_name == 'pull_request' || github.repository != 'hibernate/hibernate-orm' }}
jobs:
build:
name: Java 8
runs-on: ubuntu-latest
# We want to know the test results of all matrix entries
continue-on-error: true
strategy:
fail-fast: false
matrix:
# When GitHub Actions supports it: https://github.com/actions/toolkit/issues/399
# We will use the experimental flag as indicator whether a failure should cause a workflow failure
include:
- rdbms: h2
experimental: false
# - rdbms: hsqldb
- rdbms: derby
experimental: true
- rdbms: mysql8
- rdbms: mariadb
experimental: true
- rdbms: postgresql
experimental: true
- rdbms: postgresql_9_5
- rdbms: postgresql_13
- rdbms: oracle
experimental: true
- rdbms: db2
experimental: true
- rdbms: mssql
experimental: true
# Testing against Sybase requires many backports so let's skip it for now
# - rdbms: sybase
# Running with HANA requires at least 8GB memory just for the database, which we don't have on GH Actions runners
# - rdbms: hana
# experimental: true
steps:
- uses: actions/checkout@v2
with:
@ -85,45 +88,4 @@ jobs:
./**/target/reports/tests/
./**/target/reports/checkstyle/
- name: Omit produced artifacts from build cache
run: ./ci/before-cache.sh
build11:
name: Java 11
runs-on: ubuntu-latest
# We want to know the test results of all matrix entries
continue-on-error: true
steps:
- uses: actions/checkout@v2
with:
persist-credentials: false
- name: Set up Java 11
uses: actions/setup-java@v1
with:
java-version: 11
- name: Get year/month for cache key
id: get-date
run: |
echo "::set-output name=yearmonth::$(/bin/date -u "+%Y-%m")"
shell: bash
- name: Cache Maven local repository
uses: actions/cache@v2
id: cache-maven
with:
path: |
~/.m2/repository
~/.gradle/caches/
~/.gradle/wrapper/
# refresh cache every month to avoid unlimited growth
key: maven-localrepo-${{ steps.get-date.outputs.yearmonth }}
- name: Run build script
run: ./ci/build-github.sh
shell: bash
- name: Upload test reports (if Gradle failed)
uses: actions/upload-artifact@v2
if: failure()
with:
name: test-reports-java11
path: |
./**/target/reports/tests/
./**/target/reports/checkstyle/
- name: Omit produced artifacts from build cache
run: ./ci/before-cache.sh
run: ./ci/before-cache.sh

380
Jenkinsfile vendored Normal file
View File

@ -0,0 +1,380 @@
/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
*/
import groovy.transform.Field
import io.jenkins.blueocean.rest.impl.pipeline.PipelineNodeGraphVisitor
import io.jenkins.blueocean.rest.impl.pipeline.FlowNodeWrapper
import org.jenkinsci.plugins.workflow.support.steps.build.RunWrapper
/*
* See https://github.com/hibernate/hibernate-jenkins-pipeline-helpers
*/
@Library('hibernate-jenkins-pipeline-helpers@1.5') _
import org.hibernate.jenkins.pipeline.helpers.job.JobHelper
@Field final String NODE_PATTERN_BASE = 'Worker&&Containers'
@Field List<BuildEnvironment> environments
this.helper = new JobHelper(this)
helper.runWithNotification {
def defaultJdk = '8'
stage('Configure') {
this.environments = [
// buildEnv(defaultJdk, 'h2'),
// buildEnv(defaultJdk, 'hsqldb'),
// buildEnv(defaultJdk, 'derby'),
// buildEnv(defaultJdk, 'mysql8'),
// buildEnv(defaultJdk, 'mariadb'),
// buildEnv(defaultJdk, 'postgresql_9_5'),
// buildEnv(defaultJdk, 'postgresql_13'),
// buildEnv(defaultJdk, 'oracle'),
buildEnv(defaultJdk, 'oracle_ee'),
// buildEnv(defaultJdk, 'db2'),
// buildEnv(defaultJdk, 'mssql'),
// buildEnv(defaultJdk, 'sybase'),
buildEnv(defaultJdk, 'hana', 'HANA'),
// buildEnv(defaultJdk, 's390x', 's390x'),
// buildEnv(defaultJdk, 'tidb', 'tidb', 'tidb_hibernate@pingcap.com'),
// Disable EDB for now as the image is not available anymore
// buildEnv(defaultJdk, 'edb')
jdkBuildEnv(defaultJdk, '11'),
jdkBuildEnv(defaultJdk, '17'),
jdkBuildEnv(defaultJdk, '18'),
jdkBuildEnv(defaultJdk, '19'),
];
helper.configure {
file 'job-configuration.yaml'
// We don't require the following, but the build helper plugin apparently does
jdk {
defaultTool "OpenJDK ${defaultJdk} Latest"
}
maven {
defaultTool 'Apache Maven 3.8'
}
}
properties([
buildDiscarder(
logRotator(daysToKeepStr: '30', numToKeepStr: '10')
),
// If two builds are about the same branch or pull request,
// the older one will be aborted when the newer one starts.
disableConcurrentBuilds(abortPrevious: true),
helper.generateNotificationProperty()
])
}
// Avoid running the pipeline on branch indexing
if (currentBuild.getBuildCauses().toString().contains('BranchIndexingCause')) {
print "INFO: Build skipped due to trigger being Branch Indexing"
currentBuild.result = 'ABORTED'
return
}
stage('Build') {
Map<String, Closure> executions = [:]
Map<String, Map<String, String>> state = [:]
environments.each { BuildEnvironment buildEnv ->
// Don't build environments for newer JDKs when this is a PR
if ( buildEnv.getVersion() != defaultJdk ) {
if ( helper.scmSource.pullRequest ) {
return
}
}
state[buildEnv.tag] = [:]
executions.put(buildEnv.tag, {
runBuildOnNode(buildEnv.node) {
// Use withEnv instead of setting env directly, as that is global!
// See https://github.com/jenkinsci/pipeline-plugin/blob/master/TUTORIAL.md
withEnv(["JAVA_HOME=${tool buildEnv.buildJdkTool}", "PATH+JAVA=${tool buildEnv.buildJdkTool}/bin", "TEST_JAVA_HOME=${tool buildEnv.testJdkTool}"]) {
if ( buildEnv.getVersion() != defaultJdk ) {
state[buildEnv.tag]['additionalOptions'] = " -Ptest.jdk.version=${buildEnv.getTestVersion()} -Porg.gradle.java.installations.paths=${JAVA_HOME},${TEST_JAVA_HOME}";
}
else {
state[buildEnv.tag]['additionalOptions'] = "";
}
state[buildEnv.tag]['containerName'] = null;
stage('Checkout') {
checkout scm
}
try {
stage('Start database') {
switch (buildEnv.dbName) {
case "mysql8":
docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') {
docker.image('mysql:8.0.21').pull()
}
sh "./docker_db.sh mysql_8_0"
state[buildEnv.tag]['containerName'] = "mysql"
break;
case "mariadb":
docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') {
docker.image('mariadb:10.5.8').pull()
}
sh "./docker_db.sh mariadb"
state[buildEnv.tag]['containerName'] = "mariadb"
break;
case "postgresql_9_5":
// use the postgis image to enable the PGSQL GIS (spatial) extension
docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') {
docker.image('postgis/postgis:9.5-2.5').pull()
}
sh "./docker_db.sh postgresql_9_5"
state[buildEnv.tag]['containerName'] = "postgres"
break;
case "postgresql_13":
// use the postgis image to enable the PGSQL GIS (spatial) extension
docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') {
docker.image('postgis/postgis:13-3.1').pull()
}
sh "./docker_db.sh postgresql_13"
state[buildEnv.tag]['containerName'] = "postgres"
break;
case "oracle":
docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') {
docker.image('quillbuilduser/oracle-18-xe').pull()
}
sh "./docker_db.sh oracle_18"
state[buildEnv.tag]['containerName'] = "oracle"
break;
case "db2":
docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') {
docker.image('ibmcom/db2:11.5.7.0').pull()
}
sh "./docker_db.sh db2"
state[buildEnv.tag]['containerName'] = "db2"
break;
case "mssql":
docker.image('mcr.microsoft.com/mssql/server:2017-CU13').pull()
sh "./docker_db.sh mssql"
state[buildEnv.tag]['containerName'] = "mssql"
break;
case "sybase":
docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') {
docker.image('nguoianphu/docker-sybase').pull()
}
sh "./docker_db.sh sybase"
state[buildEnv.tag]['containerName'] = "sybase"
break;
case "edb":
docker.withRegistry('https://containers.enterprisedb.com', 'hibernateci.containers.enterprisedb.com') {
// withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'hibernateci.containers.enterprisedb.com',
// usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]) {
// sh 'docker login -u "$USERNAME" -p "$PASSWORD" https://containers.enterprisedb.com'
docker.image('containers.enterprisedb.com/edb/edb-as-lite:v11').pull()
}
sh "./docker_db.sh edb"
state[buildEnv.tag]['containerName'] = "edb"
break;
}
}
stage('Test') {
switch (buildEnv.dbName) {
case "h2":
case "derby":
case "hsqldb":
runTest("-Pdb=${buildEnv.dbName}${state[buildEnv.tag]['additionalOptions']}")
break;
case "mysql8":
runTest("-Pdb=mysql_ci${state[buildEnv.tag]['additionalOptions']}")
break;
case "tidb":
runTest("-Pdb=tidb -DdbHost=localhost:4000${state[buildEnv.tag]['additionalOptions']}", 'TIDB')
break;
case "postgresql_9_5":
case "postgresql_13":
runTest("-Pdb=pgsql_ci${state[buildEnv.tag]['additionalOptions']}")
break;
case "oracle":
runTest("-Pdb=oracle_ci -PexcludeTests=**.LockTest.testQueryTimeout*${state[buildEnv.tag]['additionalOptions']}")
break;
case "oracle_ee":
runTest("-Pdb=oracle_jenkins${state[buildEnv.tag]['additionalOptions']}", 'ORACLE_RDS')
break;
case "hana":
runTest("-Pdb=hana_jenkins${state[buildEnv.tag]['additionalOptions']}", 'HANA')
break;
case "edb":
runTest("-Pdb=edb_ci -DdbHost=localhost:5433${state[buildEnv.tag]['additionalOptions']}")
break;
case "s390x":
runTest("-Pdb=h2${state[buildEnv.tag]['additionalOptions']}")
break;
default:
runTest("-Pdb=${buildEnv.dbName}_ci${state[buildEnv.tag]['additionalOptions']}")
break;
}
}
}
finally {
if ( state[buildEnv.tag]['containerName'] != null ) {
sh "docker rm -f ${state[buildEnv.tag]['containerName']}"
}
// Skip this for PRs
if ( !env.CHANGE_ID && buildEnv.notificationRecipients != null ) {
handleNotifications(currentBuild, buildEnv)
}
}
}
}
})
}
parallel(executions)
}
} // End of helper.runWithNotification
// Job-specific helpers
BuildEnvironment buildEnv(String version, String dbName) {
return new BuildEnvironment( version, version, dbName, NODE_PATTERN_BASE, null );
}
BuildEnvironment buildEnv(String version, String dbName, String node) {
return new BuildEnvironment( version, version, dbName, node, null );
}
BuildEnvironment buildEnv(String version, String dbName, String node, String notificationRecipients) {
return new BuildEnvironment( version, version, dbName, node, notificationRecipients );
}
BuildEnvironment jdkBuildEnv(String version, String testVersion) {
return new BuildEnvironment( version,testVersion, "h2", NODE_PATTERN_BASE, null );
}
BuildEnvironment jdkBuildEnv(String version, String testVersion, String notificationRecipients) {
return new BuildEnvironment( version,testVersion, "h2", NODE_PATTERN_BASE, notificationRecipients );
}
public class BuildEnvironment {
private String version;
private String testVersion;
private String buildJdkTool;
private String testJdkTool;
private String dbName;
private String node;
private String notificationRecipients;
public BuildEnvironment(String version, String testVersion, String dbName, String node, String notificationRecipients) {
this.version = version;
this.testVersion = testVersion;
this.dbName = dbName;
this.node = node;
this.notificationRecipients = notificationRecipients;
this.buildJdkTool = "OpenJDK ${version} Latest";
this.testJdkTool = "OpenJDK ${testVersion} Latest";
}
String toString() { getTag() }
String getTag() { "jdk_${testVersion}_${dbName}" }
String getNode() { node }
String getVersion() { version }
String getTestVersion() { testVersion }
String getNotificationRecipients() { notificationRecipients }
}
void runBuildOnNode(String label, Closure body) {
node( label ) {
pruneDockerContainers()
try {
timeout( [time: 200, unit: 'MINUTES'], body )
}
finally {
// If this is a PR, we clean the workspace at the end
if ( env.CHANGE_BRANCH != null ) {
cleanWs()
}
pruneDockerContainers()
}
}
}
void pruneDockerContainers() {
if ( !sh( script: 'command -v docker || true', returnStdout: true ).trim().isEmpty() ) {
sh 'docker container prune -f || true'
sh 'docker image prune -f || true'
sh 'docker network prune -f || true'
sh 'docker volume prune -f || true'
}
}
// Clean by default otherwise the PackagedEntityManager tests fail on a node that previously ran a different DB
void runTest(String goal, String lockableResource = null, boolean clean = true) {
String cmd = "./gradlew" + (clean ? " clean" : "") + " check ${goal} -Plog-test-progress=true --stacktrace";
try {
if (lockableResource == null) {
sh cmd
}
else {
lock(lockableResource) {
sh cmd
}
}
}
finally {
junit '**/target/test-results/test/*.xml,**/target/test-results/testKitTest/*.xml'
}
}
void handleNotifications(currentBuild, buildEnv) {
def currentResult = getParallelResult(currentBuild, buildEnv.tag)
boolean success = currentResult == 'SUCCESS' || currentResult == 'UNKNOWN'
def previousResult = currentBuild.previousBuild == null ? null : getParallelResult(currentBuild.previousBuild, buildEnv.tag)
// Ignore success after success
if ( !( success && previousResult == 'SUCCESS' ) ) {
def subject
def body
if ( success ) {
if ( previousResult != 'SUCCESS' && previousResult != null ) {
subject = "${env.JOB_NAME} - Build ${env.BUILD_NUMBER} - Fixed"
body = """<p>${env.JOB_NAME} - Build ${env.BUILD_NUMBER} - Fixed:</p>
<p>Check console output at <a href='${env.BUILD_URL}'>${env.BUILD_URL}</a> to view the results.</p>"""
}
else {
subject = "${env.JOB_NAME} - Build ${env.BUILD_NUMBER} - Success"
body = """<p>${env.JOB_NAME} - Build ${env.BUILD_NUMBER} - Success:</p>
<p>Check console output at <a href='${env.BUILD_URL}'>${env.BUILD_URL}</a> to view the results.</p>"""
}
}
else if ( currentResult == 'FAILURE' ) {
if ( previousResult != null && previousResult == "FAILURE" ) {
subject = "${env.JOB_NAME} - Build ${env.BUILD_NUMBER} - Still failing"
body = """<p>${env.JOB_NAME} - Build ${env.BUILD_NUMBER} - Still failing:</p>
<p>Check console output at <a href='${env.BUILD_URL}'>${env.BUILD_URL}</a> to view the results.</p>"""
}
else {
subject = "${env.JOB_NAME} - Build ${env.BUILD_NUMBER} - Failure"
body = """<p>${env.JOB_NAME} - Build ${env.BUILD_NUMBER} - Failure:</p>
<p>Check console output at <a href='${env.BUILD_URL}'>${env.BUILD_URL}</a> to view the results.</p>"""
}
}
else {
subject = "${env.JOB_NAME} - Build ${env.BUILD_NUMBER} - ${currentResult}"
body = """<p>${env.JOB_NAME} - Build ${env.BUILD_NUMBER} - ${currentResult}:</p>
<p>Check console output at <a href='${env.BUILD_URL}'>${env.BUILD_URL}</a> to view the results.</p>"""
}
emailext(
subject: subject,
body: body,
to: buildEnv.notificationRecipients
)
}
}
@NonCPS
String getParallelResult( RunWrapper build, String parallelBranchName ) {
def visitor = new PipelineNodeGraphVisitor( build.rawBuild )
def branch = visitor.pipelineNodes.find{ it.type == FlowNodeWrapper.NodeType.PARALLEL && parallelBranchName == it.displayName }
if ( branch == null ) {
echo "Couldn't find parallel branch name '$parallelBranchName'. Available parallel branch names:"
visitor.pipelineNodes.findAll{ it.type == FlowNodeWrapper.NodeType.PARALLEL }.each{
echo " - ${it.displayName}"
}
return null;
}
return branch.status.result
}

View File

@ -3,9 +3,17 @@
goal=
if [ "$RDBMS" == "derby" ]; then
goal="-Pdb=derby"
elif [ "$RDBMS" == "hsqldb" ]; then
goal="-Pdb=hsqldb"
elif [ "$RDBMS" == "mysql8" ]; then
goal="-Pdb=mysql_ci"
elif [ "$RDBMS" == "mysql" ]; then
goal="-Pdb=mysql_ci"
elif [ "$RDBMS" == "mariadb" ]; then
goal="-Pdb=mariadb_ci"
elif [ "$RDBMS" == "postgresql" ]; then
elif [ "$RDBMS" == "postgresql_9_5" ]; then
goal="-Pdb=pgsql_ci"
elif [ "$RDBMS" == "postgresql_13" ]; then
goal="-Pdb=pgsql_ci"
elif [ "$RDBMS" == "oracle" ]; then
# I have no idea why, but these tests don't work on GH Actions
@ -16,6 +24,8 @@ elif [ "$RDBMS" == "mssql" ]; then
goal="-Pdb=mssql_ci"
elif [ "$RDBMS" == "hana" ]; then
goal="-Pdb=hana_ci"
elif [ "$RDBMS" == "sybase" ]; then
goal="-Pdb=sybase_ci"
fi
exec ./gradlew check ${goal} -Plog-test-progress=true --stacktrace

View File

@ -8,14 +8,18 @@ elif [ "$RDBMS" == 'mysql8' ]; then
bash $DIR/../docker_db.sh mysql_8_0
elif [ "$RDBMS" == 'mariadb' ]; then
bash $DIR/../docker_db.sh mariadb
elif [ "$RDBMS" == 'postgresql' ]; then
elif [ "$RDBMS" == 'postgresql_9_5' ]; then
bash $DIR/../docker_db.sh postgresql_9_5
elif [ "$RDBMS" == 'postgresql_13' ]; then
bash $DIR/../docker_db.sh postgresql_13
elif [ "$RDBMS" == 'db2' ]; then
bash $DIR/../docker_db.sh db2
elif [ "$RDBMS" == 'oracle' ]; then
bash $DIR/../docker_db.sh oracle
bash $DIR/../docker_db.sh oracle_18
elif [ "$RDBMS" == 'mssql' ]; then
bash $DIR/../docker_db.sh mssql
elif [ "$RDBMS" == 'hana' ]; then
bash $DIR/../docker_db.sh hana
elif [ "$RDBMS" == 'sybase' ]; then
bash $DIR/../docker_db.sh sybase
fi

View File

@ -1,45 +1,123 @@
#! /bin/bash
if command -v podman > /dev/null; then
CONTAINER_CLI=$(command -v podman)
HEALTCHECK_PATH="{{.State.Healthcheck.Status}}"
# Only use sudo for podman
if command -v sudo > /dev/null; then
PRIVILEGED_CLI="sudo"
else
PRIVILEGED_CLI=""
fi
else
CONTAINER_CLI=$(command -v docker)
HEALTCHECK_PATH="{{.State.Health.Status}}"
PRIVILEGED_CLI=""
fi
mysql_5_7() {
docker rm -f mysql || true
docker run --name mysql -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -p3306:3306 -d mysql:5.7 --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
$CONTAINER_CLI rm -f mysql || true
$CONTAINER_CLI run --name mysql -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -p3306:3306 -d docker.io/mysql:5.7 --character-set-server=utf8mb4 --collation-server=utf8mb4_bin --skip-character-set-client-handshake --log-bin-trust-function-creators=1
# Give the container some time to start
OUTPUT=
n=0
until [ "$n" -ge 5 ]
do
# Need to access STDERR. Thanks for the snippet https://stackoverflow.com/a/56577569/412446
{ OUTPUT="$( { $CONTAINER_CLI logs mysql; } 2>&1 1>&3 3>&- )"; } 3>&1;
if [[ $OUTPUT == *"ready for connections"* ]]; then
break;
fi
n=$((n+1))
echo "Waiting for MySQL to start..."
sleep 3
done
if [ "$n" -ge 5 ]; then
echo "MySQL failed to start and configure after 15 seconds"
else
echo "MySQL successfully started"
fi
}
mysql_8_0() {
docker rm -f mysql || true
docker run --name mysql -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -p3306:3306 -d mysql:8.0.21 --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
$CONTAINER_CLI rm -f mysql || true
$CONTAINER_CLI run --name mysql -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -p3306:3306 -d docker.io/mysql:8.0.21 --character-set-server=utf8mb4 --collation-server=utf8mb4_0900_as_cs --skip-character-set-client-handshake --log-bin-trust-function-creators=1
# Give the container some time to start
OUTPUT=
n=0
until [ "$n" -ge 5 ]
do
# Need to access STDERR. Thanks for the snippet https://stackoverflow.com/a/56577569/412446
{ OUTPUT="$( { $CONTAINER_CLI logs mysql; } 2>&1 1>&3 3>&- )"; } 3>&1;
if [[ $OUTPUT == *"ready for connections"* ]]; then
break;
fi
n=$((n+1))
echo "Waiting for MySQL to start..."
sleep 3
done
if [ "$n" -ge 5 ]; then
echo "MySQL failed to start and configure after 15 seconds"
else
echo "MySQL successfully started"
fi
}
mariadb() {
docker rm -f mariadb || true
docker run --name mariadb -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -p3306:3306 -d mariadb:10.5.8 --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
$CONTAINER_CLI rm -f mariadb || true
$CONTAINER_CLI run --name mariadb -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -p3306:3306 -d docker.io/mariadb:10.5.8 --character-set-server=utf8mb4 --collation-server=utf8mb4_bin --skip-character-set-client-handshake
OUTPUT=
n=0
until [ "$n" -ge 5 ]
do
# Need to access STDERR. Thanks for the snippet https://stackoverflow.com/a/56577569/412446
{ OUTPUT="$( { $CONTAINER_CLI logs mariadb; } 2>&1 1>&3 3>&- )"; } 3>&1;
if [[ $OUTPUT == *"ready for connections"* ]]; then
break;
fi
n=$((n+1))
echo "Waiting for MariaDB to start..."
sleep 3
done
if [ "$n" -ge 5 ]; then
echo "MariaDB failed to start and configure after 15 seconds"
else
echo "MariaDB successfully started"
fi
}
postgresql_9_5() {
docker rm -f postgres || true
docker run --name postgres -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p5432:5432 -d postgres:9.5
$CONTAINER_CLI rm -f postgres || true
$CONTAINER_CLI run --name postgres -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p5432:5432 -d docker.io/postgis/postgis:9.5-2.5
}
postgis(){
docker rm -f postgis || true
docker run --name postgis -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p5432:5432 -d postgis/postgis:11-2.5
postgresql_13() {
$CONTAINER_CLI rm -f postgres || true
$CONTAINER_CLI run --name postgres -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p5432:5432 -d docker.io/postgis/postgis:13-3.1
}
edb() {
#$CONTAINER_CLI login containers.enterprisedb.com
$CONTAINER_CLI rm -f edb || true
$CONTAINER_CLI run --name edb -e ACCEPT_EULA=Yes -e DATABASE_USER=hibernate_orm_test -e DATABASE_USER_PASSWORD=hibernate_orm_test -e ENTERPRISEDB_PASSWORD=hibernate_orm_test -e DATABASE_NAME=hibernate_orm_test -e PGPORT=5433 -p 5433:5433 --mount type=tmpfs,destination=/edbvolume -d containers.enterprisedb.com/edb/edb-as-lite:v11
}
db2() {
docker rm -f db2 || true
docker run --name db2 --privileged -e DB2INSTANCE=orm_test -e DB2INST1_PASSWORD=orm_test -e DBNAME=orm_test -e LICENSE=accept -e AUTOCONFIG=false -e ARCHIVE_LOGS=false -e TO_CREATE_SAMPLEDB=false -e REPODB=false -p 50000:50000 -d ibmcom/db2:11.5.5.0
echo $CONTAINER_CLI
$PRIVILEGED_CLI $CONTAINER_CLI rm -f db2 || true
$PRIVILEGED_CLI $CONTAINER_CLI run --name db2 --privileged -e DB2INSTANCE=orm_test -e DB2INST1_PASSWORD=orm_test -e DBNAME=orm_test -e LICENSE=accept -e AUTOCONFIG=false -e ARCHIVE_LOGS=false -e TO_CREATE_SAMPLEDB=false -e REPODB=false -p 50000:50000 -d docker.io/ibmcom/db2:11.5.7.0
# Give the container some time to start
OUTPUT=
while [[ $OUTPUT != *"INSTANCE"* ]]; do
echo "Waiting for DB2 to start..."
sleep 10
OUTPUT=$(docker logs db2)
OUTPUT=$($PRIVILEGED_CLI $CONTAINER_CLI logs db2)
done
docker exec -t db2 su - orm_test bash -c ". /database/config/orm_test/sqllib/db2profile && /database/config/orm_test/sqllib/bin/db2 'connect to orm_test' && /database/config/orm_test/sqllib/bin/db2 'CREATE USER TEMPORARY TABLESPACE usr_tbsp MANAGED BY AUTOMATIC STORAGE'"
$PRIVILEGED_CLI $CONTAINER_CLI exec -t db2 su - orm_test bash -c ". /database/config/orm_test/sqllib/db2profile && /database/config/orm_test/sqllib/bin/db2 'connect to orm_test' && /database/config/orm_test/sqllib/bin/db2 'CREATE USER TEMPORARY TABLESPACE usr_tbsp MANAGED BY AUTOMATIC STORAGE'"
}
db2_spatial() {
docker rm -f db2spatial || true
$PRIVILEGED_CLI $CONTAINER_CLI rm -f db2spatial || true
temp_dir=$(mktemp -d)
cat <<EOF >${temp_dir}/ewkt.sql
create or replace function db2gse.asewkt(geometry db2gse.st_geometry)
@ -78,35 +156,35 @@ CREATE TRANSFORM FOR db2gse.ST_Geometry DB2_PROGRAM (
TO SQL WITH FUNCTION db2gse.geomfromewkt(varchar(32000)) )
;
EOF
docker run --name db2spatial --privileged -e DB2INSTANCE=orm_test -e DB2INST1_PASSWORD=orm_test -e DBNAME=orm_test -e LICENSE=accept -e AUTOCONFIG=false -e ARCHIVE_LOGS=false -e TO_CREATE_SAMPLEDB=false -e REPODB=false \
$PRIVILEGED_CLI $CONTAINER_CLI run --name db2spatial --privileged -e DB2INSTANCE=orm_test -e DB2INST1_PASSWORD=orm_test -e DBNAME=orm_test -e LICENSE=accept -e AUTOCONFIG=false -e ARCHIVE_LOGS=false -e TO_CREATE_SAMPLEDB=false -e REPODB=false \
-v ${temp_dir}:/conf \
-p 50000:50000 -d ibmcom/db2:11.5.5.0
-p 50000:50000 -d docker.io/ibmcom/db2:11.5.5.0
# Give the container some time to start
OUTPUT=
while [[ $OUTPUT != *"Setup has completed."* ]]; do
echo "Waiting for DB2 to start..."
sleep 10
OUTPUT=$(docker logs db2spatial)
OUTPUT=$($PRIVILEGED_CLI $CONTAINER_CLI logs db2spatial)
done
sleep 10
echo "Enabling spatial extender"
docker exec -t db2spatial su - orm_test bash -c "/database/config/orm_test/sqllib/db2profile && /database/config/orm_test/sqllib/bin/db2se enable_db orm_test"
$PRIVILEGED_CLI $CONTAINER_CLI exec -t db2spatial su - orm_test bash -c "/database/config/orm_test/sqllib/db2profile && /database/config/orm_test/sqllib/bin/db2se enable_db orm_test"
echo "Installing required transform group"
docker exec -t db2spatial su - orm_test bash -c "/database/config/orm_test/sqllib/db2profile && /database/config/orm_test/sqllib/bin/db2 'connect to orm_test' && /database/config/orm_test/sqllib/bin/db2 -tvf /conf/ewkt.sql"
$PRIVILEGED_CLI $CONTAINER_CLI exec -t db2spatial su - orm_test bash -c "/database/config/orm_test/sqllib/db2profile && /database/config/orm_test/sqllib/bin/db2 'connect to orm_test' && /database/config/orm_test/sqllib/bin/db2 -tvf /conf/ewkt.sql"
}
mssql() {
docker rm -f mssql || true
docker run --name mssql -d -p 1433:1433 -e "SA_PASSWORD=Hibernate_orm_test" -e ACCEPT_EULA=Y mcr.microsoft.com/mssql/server:2017-CU13
$CONTAINER_CLI rm -f mssql || true
$CONTAINER_CLI run --name mssql -d -p 1433:1433 -e "SA_PASSWORD=Hibernate_orm_test" -e ACCEPT_EULA=Y mcr.microsoft.com/mssql/server:2017-CU13
sleep 5
n=0
until [ "$n" -ge 5 ]
do
# We need a database that uses a non-lock based MVCC approach
# https://github.com/microsoft/homebrew-mssql-release/issues/2#issuecomment-682285561
docker exec mssql bash -c 'echo "create database hibernate_orm_test collate SQL_Latin1_General_CP1_CI_AS; alter database hibernate_orm_test set READ_COMMITTED_SNAPSHOT ON" | /opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P Hibernate_orm_test -i /dev/stdin' && break
$CONTAINER_CLI exec mssql bash -c 'echo "create database hibernate_orm_test collate SQL_Latin1_General_CP1_CS_AS; alter database hibernate_orm_test set READ_COMMITTED_SNAPSHOT ON" | /opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P Hibernate_orm_test -i /dev/stdin' && break
echo "Waiting for SQL Server to start..."
n=$((n+1))
sleep 5
@ -118,19 +196,146 @@ mssql() {
fi
}
oracle() {
docker rm -f oracle || true
# We need to use the defaults
# SYSTEM/Oracle18
docker run --shm-size=1536m --name oracle -d -p 1521:1521 --ulimit nofile=1048576:1048576 quillbuilduser/oracle-18-xe
until [ "`docker inspect -f {{.State.Health.Status}} oracle`" == "healthy" ];
sybase() {
$CONTAINER_CLI rm -f sybase || true
# Yup, that sucks, but on ubuntu we need to use -T11889 as per: https://github.com/DataGrip/docker-env/issues/12
$CONTAINER_CLI run -d -p 5000:5000 -p 5001:5001 --name sybase --entrypoint /bin/bash docker.io/nguoianphu/docker-sybase -c "source /opt/sybase/SYBASE.sh
/opt/sybase/ASE-16_0/bin/dataserver \
-d/opt/sybase/data/master.dat \
-e/opt/sybase/ASE-16_0/install/MYSYBASE.log \
-c/opt/sybase/ASE-16_0/MYSYBASE.cfg \
-M/opt/sybase/ASE-16_0 \
-N/opt/sybase/ASE-16_0/sysam/MYSYBASE.properties \
-i/opt/sybase \
-sMYSYBASE \
-T11889
RET=\$?
exit 0
"
sybase_check() {
$CONTAINER_CLI exec sybase bash -c "source /opt/sybase/SYBASE.sh;
/opt/sybase/OCS-16_0/bin/isql -Usa -P myPassword -S MYSYBASE <<EOF
Select name from sysdatabases where status2 & 48 > 0
go
quit
EOF
"
}
START_STATUS=0
j=1
while (( $j < 30 )); do
echo "Waiting for Sybase to start..."
sleep 1
j=$((j+1))
START_STATUS=$(sybase_check | grep '(0 rows affected)' | wc -c)
if (( $START_STATUS > 0 )); then
break
fi
done
if (( $j == 30 )); then
echo "Failed starting Sybase"
$CONTAINER_CLI ps -a
$CONTAINER_CLI logs sybase
sybase_check
exit 1
fi
export SYBASE_DB=hibernate_orm_test
export SYBASE_USER=hibernate_orm_test
export SYBASE_PASSWORD=hibernate_orm_test
$CONTAINER_CLI exec sybase bash -c "source /opt/sybase/SYBASE.sh;
cat <<-EOSQL > init1.sql
use master
go
disk resize name='master', size='256m'
go
create database $SYBASE_DB on master = '96m'
go
sp_dboption $SYBASE_DB, \"single user\", true
go
alter database $SYBASE_DB log on master = '50m'
go
use $SYBASE_DB
go
exec sp_extendsegment logsegment, $SYBASE_DB, master
go
use master
go
sp_dboption $SYBASE_DB, \"single user\", false
go
use $SYBASE_DB
go
checkpoint
go
use master
go
create login $SYBASE_USER with password $SYBASE_PASSWORD
go
exec sp_dboption $SYBASE_DB, 'abort tran on log full', true
go
exec sp_dboption $SYBASE_DB, 'allow nulls by default', true
go
exec sp_dboption $SYBASE_DB, 'ddl in tran', true
go
exec sp_dboption $SYBASE_DB, 'trunc log on chkpt', true
go
exec sp_dboption $SYBASE_DB, 'full logging for select into', true
go
exec sp_dboption $SYBASE_DB, 'full logging for alter table', true
go
sp_dboption $SYBASE_DB, \"select into\", true
go
sp_dboption tempdb, 'ddl in tran', true
go
EOSQL
/opt/sybase/OCS-16_0/bin/isql -Usa -P myPassword -S MYSYBASE -i ./init1.sql
echo =============== CREATING DB ==========================
cat <<-EOSQL > init2.sql
use $SYBASE_DB
go
sp_adduser '$SYBASE_USER', '$SYBASE_USER', null
go
grant create default to $SYBASE_USER
go
grant create table to $SYBASE_USER
go
grant create view to $SYBASE_USER
go
grant create rule to $SYBASE_USER
go
grant create function to $SYBASE_USER
go
grant create procedure to $SYBASE_USER
go
commit
go
EOSQL
/opt/sybase/OCS-16_0/bin/isql -Usa -P myPassword -S MYSYBASE -i ./init2.sql"
echo "Sybase successfully started"
}
oracle_setup() {
HEALTHSTATUS=
until [ "$HEALTHSTATUS" == "healthy" ];
do
echo "Waiting for Oracle to start..."
sleep 10;
sleep 5;
# On WSL, health-checks intervals don't work for Podman, so run them manually
if command -v podman > /dev/null; then
$CONTAINER_CLI healthcheck run oracle > /dev/null
fi
HEALTHSTATUS="`$CONTAINER_CLI inspect -f $HEALTCHECK_PATH oracle`"
HEALTHSTATUS=${HEALTHSTATUS##+( )} #Remove longest matching series of spaces from the front
HEALTHSTATUS=${HEALTHSTATUS%%+( )} #Remove longest matching series of spaces from the back
done
sleep 2;
echo "Oracle successfully started"
# We increase file sizes to avoid online resizes as that requires lots of CPU which is restricted in XE
docker exec oracle bash -c "source /home/oracle/.bashrc; bash -c \"
$CONTAINER_CLI exec oracle bash -c "source /home/oracle/.bashrc; bash -c \"
cat <<EOF | \$ORACLE_HOME/bin/sqlplus sys/Oracle18@localhost/XE as sysdba
alter database tempfile '/opt/oracle/oradata/XE/temp01.dbf' resize 400M;
alter database datafile '/opt/oracle/oradata/XE/system01.dbf' resize 1000M;
@ -152,21 +357,73 @@ alter system set open_cursors=1000 sid='*' scope=both;
EOF\""
}
oracle_legacy() {
$CONTAINER_CLI rm -f oracle || true
# We need to use the defaults
# SYSTEM/Oracle18
$CONTAINER_CLI run --shm-size=1536m --name oracle -d -p 1521:1521 --ulimit nofile=1048576:1048576 docker.io/quillbuilduser/oracle-18-xe
oracle_setup
}
oracle() {
oracle_18
}
oracle_11() {
$CONTAINER_CLI rm -f oracle || true
# We need to use the defaults
# SYSTEM/Oracle18
$CONTAINER_CLI run --name oracle -d -p 1521:1521 -e ORACLE_PASSWORD=Oracle18 \
--health-cmd healthcheck.sh \
--health-interval 5s \
--health-timeout 5s \
--health-retries 10 \
docker.io/gvenzl/oracle-xe:11.2.0.2-full
oracle_setup
}
oracle_18() {
$CONTAINER_CLI rm -f oracle || true
# We need to use the defaults
# SYSTEM/Oracle18
$CONTAINER_CLI run --name oracle -d -p 1521:1521 -e ORACLE_PASSWORD=Oracle18 \
--health-cmd healthcheck.sh \
--health-interval 5s \
--health-timeout 5s \
--health-retries 10 \
docker.io/gvenzl/oracle-xe:18.4.0-full
oracle_setup
}
oracle_21() {
$CONTAINER_CLI rm -f oracle || true
# We need to use the defaults
# SYSTEM/Oracle18
$CONTAINER_CLI run --name oracle -d -p 1521:1521 -e ORACLE_PASSWORD=Oracle18 \
--health-cmd healthcheck.sh \
--health-interval 5s \
--health-timeout 5s \
--health-retries 10 \
docker.io/gvenzl/oracle-xe:21.3.0-full
oracle_setup
}
oracle_ee() {
docker rm -f oracle || true
#$CONTAINER_CLI login
$CONTAINER_CLI rm -f oracle || true
# We need to use the defaults
# sys as sysdba/Oradoc_db1
docker run --name oracle -d -p 1521:1521 store/oracle/database-enterprise:12.2.0.1-slim
$CONTAINER_CLI run --name oracle -d -p 1521:1521 docker.io/store/oracle/database-enterprise:12.2.0.1-slim
# Give the container some time to start
OUTPUT=
while [[ $OUTPUT != *"NLS_CALENDAR"* ]]; do
echo "Waiting for Oracle to start..."
sleep 10
OUTPUT=$(docker logs oracle)
OUTPUT=$($CONTAINER_CLI logs oracle)
done
echo "Oracle successfully started"
# We increase file sizes to avoid online resizes as that requires lots of CPU which is restricted in XE
docker exec oracle bash -c "source /home/oracle/.bashrc; \$ORACLE_HOME/bin/sqlplus sys/Oradoc_db1@ORCLCDB as sysdba <<EOF
$CONTAINER_CLI exec oracle bash -c "source /home/oracle/.bashrc; \$ORACLE_HOME/bin/sqlplus sys/Oradoc_db1@ORCLCDB as sysdba <<EOF
create user c##hibernate_orm_test identified by hibernate_orm_test container=all;
grant connect, resource, dba to c##hibernate_orm_test container=all;
alter database tempfile '/u02/app/oracle/oradata/ORCL/temp01.dbf' resize 400M;
@ -195,8 +452,8 @@ hana() {
temp_dir=$(mktemp -d)
echo '{"master_password" : "H1bernate_test"}' >$temp_dir/password.json
chmod 777 -R $temp_dir
docker rm -f hana || true
docker run -d --name hana -p 39013:39013 -p 39017:39017 -p 39041-39045:39041-39045 -p 1128-1129:1128-1129 -p 59013-59014:59013-59014 \
$CONTAINER_CLI rm -f hana || true
$CONTAINER_CLI run -d --name hana -p 39013:39013 -p 39017:39017 -p 39041-39045:39041-39045 -p 1128-1129:1128-1129 -p 59013-59014:59013-59014 \
--memory=8g \
--ulimit nofile=1048576:1048576 \
--sysctl kernel.shmmax=1073741824 \
@ -204,7 +461,7 @@ hana() {
--sysctl kernel.shmmni=4096 \
--sysctl kernel.shmall=8388608 \
-v $temp_dir:/config \
store/saplabs/hanaexpress:2.00.045.00.20200121.1 \
docker.io/store/saplabs/hanaexpress:2.00.045.00.20200121.1 \
--passwords-url file:///config/password.json \
--agree-to-sap-license
# Give the container some time to start
@ -212,22 +469,22 @@ hana() {
while [[ $OUTPUT != *"Startup finished"* ]]; do
echo "Waiting for HANA to start..."
sleep 10
OUTPUT=$(docker logs hana)
OUTPUT=$($CONTAINER_CLI logs hana)
done
echo "HANA successfully started"
}
cockroachdb() {
docker rm -f cockroach || true
docker run -d --name=cockroach -p 26257:26257 -p 8080:8080 cockroachdb/cockroach:v20.2.4 start-single-node --insecure
$CONTAINER_CLI rm -f cockroach || true
$CONTAINER_CLI run -d --name=cockroach -p 26257:26257 -p 8080:8080 docker.io/cockroachdb/cockroach:v20.2.4 start-single-node --insecure
OUTPUT=
while [[ $OUTPUT != *"CockroachDB node starting"* ]]; do
echo "Waiting for CockroachDB to start..."
sleep 10
OUTPUT=$(docker logs cockroach)
OUTPUT=$($CONTAINER_CLI logs cockroach)
done
echo "Enabling experimental box2d operators"
docker exec -it cockroach bash -c "cat <<EOF | ./cockroach sql --insecure
$CONTAINER_CLI exec -it cockroach bash -c "cat <<EOF | ./cockroach sql --insecure
SET CLUSTER SETTING sql.spatial.experimental_box2d_comparison_operators.enabled = on;
quit
EOF
@ -239,17 +496,24 @@ EOF
if [ -z ${1} ]; then
echo "No db name provided"
echo "Provide one of:"
echo -e "\tcockroachdb"
echo -e "\tdb2"
echo -e "\tdb2_spatial"
echo -e "\tedb"
echo -e "\thana"
echo -e "\tmariadb"
echo -e "\tmssql"
echo -e "\tmysql_5_7"
echo -e "\tmysql_8_0"
echo -e "\tmariadb"
echo -e "\tpostgresql_9_5"
echo -e "\tdb2"
echo -e "\tmssql"
echo -e "\toracle"
echo -e "\toracle_11"
echo -e "\toracle_18"
echo -e "\toracle_21"
echo -e "\toracle_ee"
echo -e "\tpostgis"
echo -e "\tdb2_spatial"
echo -e "\thana"
echo -e "\tcockroachdb"
echo -e "\tpostgresql_13"
echo -e "\tpostgresql_9_5"
echo -e "\tsybase"
else
${1}
fi

View File

@ -7,6 +7,7 @@
package org.hibernate.userguide.mapping.basic;
import java.util.BitSet;
import javax.persistence.Column;
import javax.persistence.ColumnResult;
import javax.persistence.ConstructorResult;
import javax.persistence.Entity;
@ -94,7 +95,7 @@ public class BitSetUserTypeTest extends BaseCoreFunctionalTestCase {
query =
"SELECT " +
" pr.id AS \"pr.id\", " +
" pr.bitset AS \"pr.bitset\" " +
" pr.bitset_col AS \"pr.bitset\" " +
"FROM Product pr " +
"WHERE pr.id = :id",
resultSetMapping = "Person"
@ -117,6 +118,7 @@ public class BitSetUserTypeTest extends BaseCoreFunctionalTestCase {
private Integer id;
@Type( type = "bitset" )
@Column(name = "bitset_col")
private BitSet bitSet;
//Constructors, getters, and setters are omitted for brevity

View File

@ -55,6 +55,15 @@ ext {
// Disable prepared statement caching due to https://www.postgresql.org/message-id/CAEcMXhmmRd4-%2BNQbnjDT26XNdUoXdmntV9zdr8%3DTu8PL9aVCYg%40mail.gmail.com
'jdbc.url' : 'jdbc:postgresql://' + dbHost + '/hibernate_orm_test?preparedStatementCacheQueries=0'
],
sybase_ci : [
'db.dialect' : 'org.hibernate.dialect.SybaseASE157Dialect',
'jdbc.driver': 'net.sourceforge.jtds.jdbc.Driver',
'jdbc.user' : 'hibernate_orm_test',
'jdbc.pass' : 'hibernate_orm_test',
// Disable prepared statement caching to avoid issues with changing schemas
'jdbc.url' : 'jdbc:jtds:sybase://' + dbHost + ':5000/hibernate_orm_test;maxStatements=0;cacheMetaData=false',
'connection.init_sql' : 'set ansinull on'
],
mysql : [
'db.dialect' : 'org.hibernate.dialect.MySQL57Dialect',
'jdbc.driver': 'com.mysql.jdbc.Driver',
@ -69,6 +78,14 @@ ext {
'jdbc.pass' : 'hibernate_orm_test',
'jdbc.url' : 'jdbc:mysql://' + dbHost + '/hibernate_orm_test?useSSL=false'
],
mysql_ci : [
'db.dialect' : 'org.hibernate.dialect.MySQL8Dialect',
'jdbc.driver': 'com.mysql.jdbc.Driver',
'jdbc.user' : 'hibernate_orm_test',
'jdbc.pass' : 'hibernate_orm_test',
'jdbc.url' : 'jdbc:mysql://' + dbHost + '/hibernate_orm_test?allowPublicKeyRetrieval=true',
'connection.init_sql' : ''
],
// uses docker mysql_8_0
mysql8_spatial_ci: [
'db.dialect' : 'org.hibernate.spatial.dialect.mysql.MySQL8SpatialDialect',
@ -113,6 +130,14 @@ ext {
'jdbc.pass' : 'hibernate_orm_test',
'jdbc.url' : 'jdbc:oracle:thin:@' + dbHost + ':1521/xe'
],
oracle_jenkins : [
'db.dialect' : 'org.hibernate.dialect.Oracle12cDialect',
'jdbc.driver': 'oracle.jdbc.OracleDriver',
'jdbc.user' : 'hibernate_orm_test',
'jdbc.pass' : 'hibernate_orm_test',
'jdbc.url' : 'jdbc:oracle:thin:@hibernate-testing-oracle-se.ccuzkqo3zqzq.us-east-1.rds.amazonaws.com:1521:ORCL',
'connection.init_sql' : ''
],
// Use ./docker_db.sh oracle_ee to start the database
oracle_docker : [
'db.dialect' : 'org.hibernate.dialect.Oracle12cDialect',
@ -147,7 +172,7 @@ ext {
'jdbc.driver': 'com.microsoft.sqlserver.jdbc.SQLServerDriver',
'jdbc.user' : 'sa',
'jdbc.pass' : 'Hibernate_orm_test',
'jdbc.url' : 'jdbc:sqlserver://' + dbHost + ';databaseName=hibernate_orm_test'
'jdbc.url' : 'jdbc:sqlserver://' + dbHost + ';databaseName=hibernate_orm_test;sendTimeAsDatetime=false'
],
mssql_spatial_ci : [
'db.dialect' : 'org.hibernate.spatial.dialect.sqlserver.SqlServer2012SpatialDialect',
@ -200,6 +225,15 @@ ext {
// Disable prepared statement caching due to https://help.sap.com/viewer/0eec0d68141541d1b07893a39944924e/2.0.04/en-US/78f2163887814223858e4369d18e2847.html
'jdbc.url' : 'jdbc:sap://' + dbHost + ':443/?encrypt=true&validateCertificate=false&statementCacheSize=0'
],
hana_jenkins : [
'db.dialect' : 'org.hibernate.dialect.HANAColumnStoreDialect',
'jdbc.driver': 'com.sap.db.jdbc.Driver',
'jdbc.user' : 'HIBERNATE_TEST',
'jdbc.pass' : 'H1bernate_test',
// Disable prepared statement caching due to https://help.sap.com/viewer/0eec0d68141541d1b07893a39944924e/2.0.04/en-US/78f2163887814223858e4369d18e2847.html
'jdbc.url' : 'jdbc:sap://' + dbHost + ':39015/?statementCacheSize=0',
'connection.init_sql' : ''
],
hana_vlad : [
'db.dialect' : 'org.hibernate.dialect.HANAColumnStoreDialect',
'jdbc.driver': 'com.sap.db.jdbc.Driver',

View File

@ -79,17 +79,14 @@ dependencies {
testRuntime( libraries.derby )
testRuntime( libraries.hsqldb )
testRuntime( libraries.postgresql )
testRuntime( libraries.mysql )
testRuntime( libraries.mariadb )
testRuntime( libraries.mssql )
testRuntime( libraries.informix )
testRuntime( libraries.hana )
testRuntime( libraries.cockroachdb )
testRuntime( libraries.oracle )
testRuntime( libraries.sybase )
asciidoclet 'org.asciidoctor:asciidoclet:1.+'
testRuntime( libraries.oracle )
// Since both the DB2 driver and HANA have a package "net.jpountz" we have to add dependencies conditionally
// This is due to the "no split-packages" requirement of Java 9+
@ -99,6 +96,12 @@ dependencies {
else if ( db.startsWith( 'hana' ) ) {
testRuntime( libraries.hana )
}
else if ( db.startsWith( 'mysql' ) ) {
testRuntimeOnly libraries.mysql
}
else if ( db.startsWith( 'mariadb' ) ) {
testRuntimeOnly libraries.mariadb
}
// Mac-specific
project.ext.toolsJar = file("${System.getProperty('java.home')}/../lib/tools.jar")

View File

@ -141,6 +141,7 @@ ext {
mssql: 'com.microsoft.sqlserver:mssql-jdbc:7.2.1.jre8',
db2: 'com.ibm.db2:jcc:11.5.4.0',
hana: 'com.sap.cloud.db.jdbc:ngdbc:2.4.59',
sybase: 'net.sourceforge.jtds:jtds:1.3.1',
jodaTime: "joda-time:joda-time:${jodaTimeVersion}",

View File

@ -109,7 +109,7 @@ public class EntityGraphAttributeResolutionTest extends BaseEntityManagerFunctio
attributeNodes = {
@NamedAttributeNode("permissions")
})
@Table(name = "groups") // Name 'group' not accepted by H2
@Table( name = "t_group") // Name 'group' not accepted by H2
public static class Group {
public static final String ENTITY_GRAPH = "group-with-permissions";

View File

@ -36,7 +36,7 @@ public class HHH14112Test extends BaseCoreFunctionalTestCase {
@Entity(name = "Super")
@Inheritance(strategy = InheritanceType.JOINED)
@Where(clause = "DELETED = false")
@Where(clause = "deleted = false")
public static class Super {
@Id
@GeneratedValue(strategy = GenerationType.AUTO)

View File

@ -67,7 +67,7 @@ public class EntityProxySerializationTest extends BaseCoreFunctionalTestCase {
final Transaction t = s.beginTransaction();
try {
final Number count = (Number) s.createQuery("SELECT count(ID) FROM SimpleEntity").getSingleResult();
final Number count = (Number) s.createQuery("SELECT count(e.id) FROM SimpleEntity e").getSingleResult();
if (count.longValue() > 0L) {
// entity already added previously
return;

View File

@ -55,6 +55,7 @@ import org.hibernate.cfg.Environment;
import org.hibernate.dialect.MariaDB102Dialect;
import org.hibernate.dialect.MariaDB10Dialect;
import org.hibernate.dialect.MariaDB53Dialect;
import org.hibernate.dialect.MySQLDialect;
import org.hibernate.dialect.SQLServer2012Dialect;
import org.hibernate.engine.config.spi.ConfigurationService;
import org.hibernate.engine.jdbc.env.spi.NameQualifierSupport;
@ -85,6 +86,7 @@ import org.junit.runners.Parameterized;
@RunWith(CustomParameterized.class)
@TestForIssue(jiraKey = { "HHH-14921", "HHH-14922" })
@SkipForDialect(value = MySQLDialect.class, comment = "MySQL doesn't support sequences")
@SkipForDialect(value = MariaDB53Dialect.class, strictMatching = true,
comment = "MariaDB < 10.3 doesn't support sequences")
@SkipForDialect(value = MariaDB10Dialect.class, strictMatching = true,

View File

@ -131,7 +131,7 @@ public class PersistentListTest extends BaseCoreFunctionalTestCase {
final QueryableCollection queryableCollection = (QueryableCollection) collectionPersister;
SimpleSelect select = new SimpleSelect( getDialect() )
.setTableName( queryableCollection.getTableName() )
.addColumn( "ORDER_ID" )
.addColumn( "order_id" )
.addColumn( "INDX" )
.addColumn( "PRD_CODE" );
PreparedStatement preparedStatement = ((SessionImplementor)session2).getJdbcCoordinator().getStatementPreparer().prepareStatement( select.toStatementString() );

View File

@ -24,7 +24,7 @@
<class name="Child">
<id name="name" column="NAME" type="string"/>
<many-to-one name="parent" class="Parent" cascade="none" />
<many-to-one name="parent" column="PARENT" class="Parent" cascade="none" />
</class>
</hibernate-mapping>

View File

@ -22,7 +22,7 @@
<class name="Child">
<id name="name" column="NAME" type="string"/>
<many-to-one name="parent" class="Parent" cascade="none" />
<many-to-one name="parent" column="PARENT" class="Parent" cascade="none" />
<property name="description" type="string"/>
</class>

View File

@ -22,7 +22,7 @@
<class name="Child">
<id name="name" column="NAME" type="string"/>
<many-to-one name="parent" class="Parent" cascade="none" lazy="false" />
<many-to-one name="parent" column="PARENT" class="Parent" cascade="none" lazy="false" />
<property name="description" type="string"/>
</class>

View File

@ -232,7 +232,7 @@ public class ComponentTest extends BaseNonConfigCoreFunctionalTestCase {
// Value returned by Oracle native query is a Types.NUMERIC, which is mapped to a BigDecimalType;
// Cast returned value to Number then call Number.doubleValue() so it works on all dialects.
Double heightViaSql =
( (Number)s.createSQLQuery("select height_centimeters from T_USER where T_USER.username='steve'").uniqueResult())
( (Number)s.createSQLQuery("select height_centimeters from T_USER where T_USER.userName='steve'").uniqueResult())
.doubleValue();
assertEquals(HEIGHT_CENTIMETERS, heightViaSql, 0.01d);
@ -257,7 +257,7 @@ public class ComponentTest extends BaseNonConfigCoreFunctionalTestCase {
u.getPerson().setHeightInches(1);
s.flush();
heightViaSql =
( (Number)s.createSQLQuery("select height_centimeters from T_USER where T_USER.username='steve'").uniqueResult() )
( (Number)s.createSQLQuery("select height_centimeters from T_USER where T_USER.userName='steve'").uniqueResult() )
.doubleValue();
assertEquals(2.54d, heightViaSql, 0.01d);
s.delete(u);

View File

@ -368,15 +368,11 @@ public class QueryCacheTest extends BaseNonConfigCoreFunctionalTestCase {
}
@Test
@RequiresDialectFeature(
value = DialectChecks.CaseSensitiveCheck.class,
comment = "i.name='widget' should not match on case sensitive database."
)
public void testCaseInsensitiveComparison() {
public void testComparison() {
Session s = openSession();
s.beginTransaction();
Item i = new Item();
i.setName( "Widget" );
i.setName( "widget" );
i.setDescription( "A really top-quality, full-featured widget." );
s.save( i );
s.getTransaction().commit();
@ -387,7 +383,7 @@ public class QueryCacheTest extends BaseNonConfigCoreFunctionalTestCase {
List result = s.createQuery( queryString ).list();
assertEquals(1, result.size());
i = (Item) s.get( Item.class, new Long(i.getId()) );
assertEquals( i.getName(), "Widget" );
assertEquals( i.getName(), "widget" );
s.delete(i);
s.getTransaction().commit();
s.close();

View File

@ -22,7 +22,7 @@
<class name="Child">
<id name="name" column="NAME" type="string"/>
<many-to-one name="parent" class="Parent" cascade="none" />
<many-to-one name="parent" column="PARENT" class="Parent" cascade="none" />
</class>
</hibernate-mapping>

View File

@ -33,7 +33,7 @@ import static org.junit.Assert.assertEquals;
*/
public class AutoDiscoveryTest extends BaseCoreFunctionalTestCase {
private static final String QUERY_STRING =
"select u.name as username, g.name as groupname, m.joindate " +
"select u.name as username, g.name as groupname, m.joinDate " +
"from t_membership m " +
" inner join t_user u on m.member_id = u.id " +
" inner join t_group g on m.group_id = g.id";

View File

@ -70,11 +70,11 @@
<sql-delete>DELETE FROM EMPLOYMENT WHERE EMPID=?</sql-delete>
</class>
<class name="TextHolder">
<id name="id" column="id">
<class name="TextHolder" table="TEXTHOLDER">
<id name="id" column="ID">
<generator class="increment"/>
</id>
<property name="description" type="text" length="15000"/>
<property name="description" column="DESCRIPTION" type="text" length="15000"/>
<loader query-ref="textholder"/>
<sql-insert>
INSERT INTO TEXTHOLDER
@ -85,11 +85,11 @@
<sql-delete>DELETE FROM TEXTHOLDER WHERE ID=?</sql-delete>
</class>
<class name="ImageHolder">
<id name="id" column="id">
<class name="ImageHolder" table="IMAGEHOLDER">
<id name="id" column="ID">
<generator class="increment"/>
</id>
<property name="photo" type="image" length="15000"/>
<property name="photo" column="PHOTO" type="image" length="15000"/>
<loader query-ref="imageholder"/>
<sql-insert>
INSERT INTO IMAGEHOLDER

View File

@ -127,14 +127,14 @@
<id name="id" column="id">
<generator class="increment"/>
</id>
<property name="description" type="text" length="15000"/>
<property name="description" column="DESCRIPTION" type="text" length="15000"/>
</class>
<class name="ImageHolder" table="IMAGE_HOLDER">
<id name="id" column="id">
<generator class="increment"/>
</id>
<property name="photo" type="image" length="15000"/>
<property name="photo" column="PHOTO" type="image" length="15000"/>
</class>
<resultset name="org-emp-regionCode">

View File

@ -103,7 +103,7 @@ public class NativeSQLQueriesTest extends BaseCoreFunctionalTestCase {
}
protected String getEmploymentSQLMixedScalarEntity() {
return "SELECT e.*, e.employer as employerid FROM EMPLOYMENT e" ;
return "SELECT e.*, e.EMPLOYER as employerid FROM EMPLOYMENT e" ;
}
protected String getOrgEmpRegionSQL() {

View File

@ -1,111 +0,0 @@
/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
*/
package org.hibernate.test.timestamp;
import java.time.LocalDate;
import java.util.Map;
import java.util.TimeZone;
import javax.persistence.Entity;
import javax.persistence.Id;
import org.hibernate.cfg.AvailableSettings;
import org.hibernate.dialect.MySQL5Dialect;
import org.hibernate.testing.RequiresDialect;
import org.hibernate.testing.TestForIssue;
import org.hibernate.testing.jdbc.ConnectionProviderDelegate;
import org.hibernate.testing.junit4.BaseNonConfigCoreFunctionalTestCase;
import org.junit.Test;
import static org.hibernate.testing.transaction.TransactionUtil.doInHibernateSessionBuilder;
import static org.junit.Assert.assertEquals;
/**
* @author Vlad Mihalcea
*/
@RequiresDialect(MySQL5Dialect.class)
public class LocalDateCustomSessionLevelTimeZoneTest
extends BaseNonConfigCoreFunctionalTestCase {
private static final TimeZone TIME_ZONE = TimeZone.getTimeZone(
"Europe/Berlin" );
private ConnectionProviderDelegate connectionProvider = new ConnectionProviderDelegate() {
@Override
public void configure(Map configurationValues) {
String url = (String) configurationValues.get( AvailableSettings.URL );
if(!url.contains( "?" )) {
url += "?";
}
else if(!url.endsWith( "&" )) {
url += "&";
}
url += "useUnicode=true&useJDBCCompliantTimezoneShift=true&useLegacyDatetimeCode=false&serverTimezone=Europe/Berlin";
configurationValues.put( AvailableSettings.URL, url);
super.configure( configurationValues );
}
};
@Override
protected Class<?>[] getAnnotatedClasses() {
return new Class<?>[] {
Person.class
};
}
@Override
protected void addSettings(Map settings) {
settings.put(
AvailableSettings.CONNECTION_PROVIDER,
connectionProvider
);
}
@Override
protected void releaseResources() {
super.releaseResources();
connectionProvider.stop();
}
@Test
@TestForIssue( jiraKey = "HHH-11396" )
public void testTimeZone() {
TimeZone old = TimeZone.getDefault();
try {
// The producer (MySQL) Berlin and returns 1980-01-01
TimeZone jdbcTimeZone = TimeZone.getTimeZone( "Europe/Berlin" );
TimeZone.setDefault( jdbcTimeZone );
//hibernate.connection.url jdbc:mysql://localhost/hibernate_orm_test
doInHibernateSessionBuilder( () -> sessionFactory().withOptions().jdbcTimeZone( TIME_ZONE ), s -> {
Person person = new Person();
person.id = 1L;
s.persist( person );
} );
doInHibernateSessionBuilder( () -> sessionFactory().withOptions().jdbcTimeZone( TIME_ZONE ), s -> {
Person person = s.find( Person.class, 1L );
assertEquals( LocalDate.of( 2017, 3, 7 ), person.createdOn );
} );
}
finally {
TimeZone.setDefault( old );
}
}
@Entity(name = "Person")
public static class Person {
@Id
private Long id;
private LocalDate createdOn = LocalDate.of( 2017, 3, 7 );
}
}

View File

@ -300,7 +300,7 @@ public class LazyManyToManyNonUniqueIdWhereTest extends BaseCoreFunctionalTestCa
inverseJoinColumns = { @JoinColumn( name = "ASSOCIATION_ID" ) }
)
@WhereJoinTable( clause = "MAIN_CODE='MATERIAL' AND ASSOCIATION_CODE='RATING'" )
@Where( clause = "name = 'high' or name = 'medium'" )
@Where( clause = "NAME = 'high' or NAME = 'medium'" )
@Immutable
public List<Rating> getMediumOrHighRatingsFromCombined() {
return mediumOrHighRatingsFromCombined;
@ -387,7 +387,7 @@ public class LazyManyToManyNonUniqueIdWhereTest extends BaseCoreFunctionalTestCa
joinColumns = { @JoinColumn( name = "BUILDING_ID") },
inverseJoinColumns = { @JoinColumn( name = "RATING_ID" ) }
)
@Where( clause = "name = 'high' or name = 'medium'" )
@Where( clause = "NAME = 'high' or NAME = 'medium'" )
@Immutable
public List<Rating> getMediumOrHighRatings() {
return mediumOrHighRatings;

View File

@ -197,7 +197,7 @@ public class LazyOneToManyNonUniqueIdWhereTest extends BaseCoreFunctionalTestCas
@OneToMany
@JoinColumn( name = "MATERIAL_OWNER_ID")
@Where( clause = "name = 'high' or name = 'medium'" )
@Where( clause = "NAME = 'high' or NAME = 'medium'" )
@Immutable
public List<Rating> getMediumOrHighRatingsFromCombined() {
return mediumOrHighRatingsFromCombined;

View File

@ -22,7 +22,7 @@
where="MAIN_CODE='MATERIAL' AND ASSOCIATION_CODE='RATING'">
<key column="MAIN_ID"/>
<many-to-many column="ASSOCIATION_ID" class="LazyManyToManyNonUniqueIdWhereTest$Rating"
where="name = 'high' or name = 'medium'"/>
where="NAME = 'high' or NAME = 'medium'"/>
</bag>
<set name="ratings" table="MATERIAL_RATINGS" lazy="true" mutable="false">
@ -54,7 +54,7 @@
<bag name="mediumOrHighRatings" table="BUILDING_RATINGS" lazy="true" mutable="false">
<key column="BUILDING_ID"/>
<many-to-many column="RATING_ID" class="LazyManyToManyNonUniqueIdWhereTest$Rating"
where="name = 'high' or name = 'medium'"/>
where="NAME = 'high' or NAME = 'medium'"/>
</bag>
</class>

View File

@ -18,7 +18,7 @@
</set>
<bag name="mediumOrHighRatingsFromCombined" lazy="true" mutable="false"
where="name = 'high' or name = 'medium'">
where="NAME = 'high' or NAME = 'medium'">
<key column="MATERIAL_OWNER_ID"/>
<one-to-many class="LazyOneToManyNonUniqueIdWhereTest$Rating"/>
</bag>

View File

@ -104,7 +104,7 @@ public class UnspecifiedEnumTypeTest extends BaseEnversFunctionalTestCase {
@SuppressWarnings("unchecked")
List<Object[]> values = session
.createNativeQuery( "SELECT enum1 e1, enum2 e2 FROM ENUM_ENTITY_AUD ORDER BY rev ASC" )
.createNativeQuery( "SELECT enum1 e1, enum2 e2 FROM ENUM_ENTITY_AUD ORDER BY REV ASC" )
.addScalar( "e1", IntegerType.INSTANCE )
.addScalar( "e2", IntegerType.INSTANCE )
.list();

View File

@ -89,7 +89,7 @@ public class GroupMemberTest extends BaseEnversJPAFunctionalTestCase {
return TransactionUtil.doInJPA( this::entityManagerFactory, entityManager -> {
final Session session = entityManager.unwrap( Session.class );
final Query query = session.createSQLQuery(
"SELECT uniqueGroup_id FROM GroupMember_AUD ORDER BY rev DESC" ).addScalar(
"SELECT uniqueGroup_id FROM GroupMember_AUD ORDER BY REV DESC" ).addScalar(
"uniqueGroup_id",
IntegerType.INSTANCE
).setMaxResults( 1 );

View File

@ -54,22 +54,22 @@ public class SQLServerDatabaseCleaner implements DatabaseCleaner {
LOG.log( Level.FINEST, "Collect schema objects: START" );
rs = s.executeQuery(
"SELECT 'ALTER TABLE [' + TABLE_SCHEMA + '].[' + TABLE_NAME + '] DROP CONSTRAINT [' + CONSTRAINT_NAME + ']' FROM INFORMATION_SCHEMA.CONSTRAINT_TABLE_USAGE " +
"WHERE EXISTS (SELECT 1 FROM sys.Tables t JOIN sys.Schemas s ON t.schema_id = s.schema_id WHERE t.is_ms_shipped = 0 AND s.name = TABLE_SCHEMA AND t.name = TABLE_NAME) " +
"AND EXISTS (SELECT 1 FROM sys.Foreign_keys WHERE name = CONSTRAINT_NAME)" );
"WHERE EXISTS (SELECT 1 FROM sys.tables t JOIN sys.schemas s ON t.schema_id = s.schema_id WHERE t.is_ms_shipped = 0 AND s.name = TABLE_SCHEMA AND t.name = TABLE_NAME) " +
"AND EXISTS (SELECT 1 FROM sys.foreign_keys WHERE name = CONSTRAINT_NAME)" );
while ( rs.next() ) {
sqls.add( rs.getString( 1 ) );
}
rs = s.executeQuery(
"SELECT 'DROP VIEW [' + TABLE_SCHEMA + '].[' + TABLE_NAME + ']' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'VIEW' " +
"AND EXISTS (SELECT 1 FROM sys.Views t JOIN sys.Schemas s ON t.schema_id = s.schema_id WHERE t.is_ms_shipped = 0 AND s.name = TABLE_SCHEMA AND t.name = TABLE_NAME)" );
"AND EXISTS (SELECT 1 FROM sys.views t JOIN sys.schemas s ON t.schema_id = s.schema_id WHERE t.is_ms_shipped = 0 AND s.name = TABLE_SCHEMA AND t.name = TABLE_NAME)" );
while ( rs.next() ) {
sqls.add( rs.getString( 1 ) );
}
rs = s.executeQuery(
"SELECT 'DROP TABLE [' + TABLE_SCHEMA + '].[' + TABLE_NAME + ']' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE' " +
"AND EXISTS (SELECT 1 FROM sys.Tables t JOIN sys.Schemas s ON t.schema_id = s.schema_id WHERE t.is_ms_shipped = 0 AND s.name = TABLE_SCHEMA AND t.name = TABLE_NAME)" );
"AND EXISTS (SELECT 1 FROM sys.tables t JOIN sys.schemas s ON t.schema_id = s.schema_id WHERE t.is_ms_shipped = 0 AND s.name = TABLE_SCHEMA AND t.name = TABLE_NAME)" );
while ( rs.next() ) {
sqls.add( rs.getString( 1 ) );
}