HHH-15180 - Remove support for SQL Server versions older than 2008

Signed-off-by: Jan Schatteman <jschatte@redhat.com>
This commit is contained in:
Jan Schatteman 2022-06-15 22:54:17 +02:00 committed by Christian Beikov
parent 75f4c95274
commit 429ab5b936
11 changed files with 2165 additions and 196 deletions

View File

@ -0,0 +1,948 @@
/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
*/
package org.hibernate.community.dialect;
import org.hibernate.*;
import org.hibernate.boot.Metadata;
import org.hibernate.boot.model.TypeContributions;
import org.hibernate.boot.model.relational.QualifiedSequenceName;
import org.hibernate.boot.model.relational.Sequence;
import org.hibernate.boot.model.relational.SqlStringGenerationContext;
import org.hibernate.dialect.AbstractTransactSQLDialect;
import org.hibernate.dialect.DatabaseVersion;
import org.hibernate.dialect.Dialect;
import org.hibernate.dialect.Replacer;
import org.hibernate.dialect.TimeZoneSupport;
import org.hibernate.dialect.function.CommonFunctionFactory;
import org.hibernate.dialect.function.CountFunction;
import org.hibernate.dialect.function.SQLServerFormatEmulation;
import org.hibernate.dialect.identity.IdentityColumnSupport;
import org.hibernate.dialect.identity.SQLServerIdentityColumnSupport;
import org.hibernate.dialect.pagination.LimitHandler;
import org.hibernate.dialect.pagination.SQLServer2005LimitHandler;
import org.hibernate.dialect.pagination.SQLServer2012LimitHandler;
import org.hibernate.dialect.pagination.TopLimitHandler;
import org.hibernate.dialect.sequence.NoSequenceSupport;
import org.hibernate.dialect.sequence.SQLServer16SequenceSupport;
import org.hibernate.dialect.sequence.SQLServerSequenceSupport;
import org.hibernate.dialect.sequence.SequenceSupport;
import org.hibernate.engine.jdbc.dialect.spi.DialectResolutionInfo;
import org.hibernate.engine.jdbc.env.spi.IdentifierCaseStrategy;
import org.hibernate.engine.jdbc.env.spi.IdentifierHelper;
import org.hibernate.engine.jdbc.env.spi.IdentifierHelperBuilder;
import org.hibernate.engine.jdbc.env.spi.NameQualifierSupport;
import org.hibernate.engine.spi.SessionFactoryImplementor;
import org.hibernate.exception.LockTimeoutException;
import org.hibernate.exception.spi.SQLExceptionConversionDelegate;
import org.hibernate.internal.util.JdbcExceptionHelper;
import org.hibernate.query.sqm.CastType;
import org.hibernate.query.sqm.FetchClauseType;
import org.hibernate.query.sqm.IntervalType;
import org.hibernate.query.sqm.TemporalUnit;
import org.hibernate.query.spi.QueryEngine;
import org.hibernate.service.ServiceRegistry;
import org.hibernate.sql.ast.SqlAstNodeRenderingMode;
import org.hibernate.sql.ast.SqlAstTranslator;
import org.hibernate.sql.ast.SqlAstTranslatorFactory;
import org.hibernate.sql.ast.spi.SqlAppender;
import org.hibernate.sql.ast.spi.StandardSqlAstTranslatorFactory;
import org.hibernate.sql.ast.tree.Statement;
import org.hibernate.sql.exec.spi.JdbcOperation;
import org.hibernate.tool.schema.internal.StandardSequenceExporter;
import org.hibernate.tool.schema.spi.Exporter;
import org.hibernate.type.BasicType;
import org.hibernate.type.BasicTypeRegistry;
import org.hibernate.type.StandardBasicTypes;
import org.hibernate.type.descriptor.java.PrimitiveByteArrayJavaType;
import org.hibernate.type.descriptor.jdbc.SmallIntJdbcType;
import org.hibernate.type.descriptor.jdbc.XmlJdbcType;
import org.hibernate.type.descriptor.sql.internal.DdlTypeImpl;
import org.hibernate.type.descriptor.sql.spi.DdlTypeRegistry;
import java.sql.DatabaseMetaData;
import java.sql.SQLException;
import java.sql.Types;
import java.time.temporal.ChronoField;
import java.time.temporal.TemporalAccessor;
import java.util.Calendar;
import java.util.Date;
import java.util.TimeZone;
import jakarta.persistence.TemporalType;
import static org.hibernate.query.sqm.TemporalUnit.NANOSECOND;
import static org.hibernate.query.sqm.produce.function.FunctionParameterType.INTEGER;
import static org.hibernate.type.SqlTypes.*;
import static org.hibernate.type.descriptor.DateTimeUtils.appendAsDate;
import static org.hibernate.type.descriptor.DateTimeUtils.appendAsTime;
import static org.hibernate.type.descriptor.DateTimeUtils.appendAsTimestampWithMicros;
/**
* A dialect for Microsoft SQL Server 2000 and above
*
* @author Gavin King
*/
public class SQLServerLegacyDialect extends AbstractTransactSQLDialect {
private static final int PARAM_LIST_SIZE_LIMIT = 2100;
private final StandardSequenceExporter exporter;
public SQLServerLegacyDialect() {
this( DatabaseVersion.make( 8, 0 ) );
}
public SQLServerLegacyDialect(DatabaseVersion version) {
super(version);
exporter = createSequenceExporter(version);
}
public SQLServerLegacyDialect(DialectResolutionInfo info) {
super(info);
exporter = createSequenceExporter(info);
}
private StandardSequenceExporter createSequenceExporter(DatabaseVersion version) {
return version.isSameOrAfter(11) ? new SqlServerSequenceExporter(this) : null;
}
@Override
protected void registerDefaultKeywords() {
super.registerDefaultKeywords();
registerKeyword( "top" );
registerKeyword( "key" );
}
@Override
protected String columnType(int sqlTypeCode) {
// there is no 'double' type in SQL server
// but 'float' is double precision by default
if ( sqlTypeCode == DOUBLE ) {
return "float";
}
if ( getVersion().isSameOrAfter( 9 ) ) {
switch ( sqlTypeCode ) {
// Prefer 'varchar(max)' and 'varbinary(max)' to
// the deprecated TEXT and IMAGE types. Note that
// the length of a VARCHAR or VARBINARY column must
// be either between 1 and 8000 or exactly MAX, and
// the length of an NVARCHAR column must be either
// between 1 and 4000 or exactly MAX. (HHH-3965)
case CLOB:
return "varchar(max)";
case NCLOB:
return "nvarchar(max)";
case BLOB:
return "varbinary(max)";
case DATE:
return getVersion().isSameOrAfter( 10 ) ? "date" : super.columnType( sqlTypeCode );
case TIME:
return getVersion().isSameOrAfter( 10 ) ? "time" : super.columnType( sqlTypeCode );
case TIMESTAMP:
return getVersion().isSameOrAfter( 10 ) ? "datetime2($p)" : super.columnType( sqlTypeCode );
case TIMESTAMP_WITH_TIMEZONE:
return getVersion().isSameOrAfter( 10 ) ? "datetimeoffset($p)" : super.columnType( sqlTypeCode );
}
}
return super.columnType( sqlTypeCode );
}
@Override
protected String castType(int sqlTypeCode) {
if ( getVersion().isSameOrAfter( 9 ) ) {
switch ( sqlTypeCode ) {
case VARCHAR:
case LONG32VARCHAR:
case CLOB:
return "varchar(max)";
case NVARCHAR:
case LONG32NVARCHAR:
case NCLOB:
return "nvarchar(max)";
case VARBINARY:
case LONG32VARBINARY:
case BLOB:
return "varbinary(max)";
}
}
return super.castType( sqlTypeCode );
}
@Override
protected void registerColumnTypes(TypeContributions typeContributions, ServiceRegistry serviceRegistry) {
super.registerColumnTypes( typeContributions, serviceRegistry );
final DdlTypeRegistry ddlTypeRegistry = typeContributions.getTypeConfiguration().getDdlTypeRegistry();
if ( getVersion().isSameOrAfter( 10 ) ) {
ddlTypeRegistry.addDescriptor( new DdlTypeImpl( GEOMETRY, "geometry", this ) );
ddlTypeRegistry.addDescriptor( new DdlTypeImpl( GEOGRAPHY, "geography", this ) );
}
ddlTypeRegistry.addDescriptor( new DdlTypeImpl( SQLXML, "xml", this ) );
}
@Override
public int getMaxVarcharLength() {
return 8000;
}
@Override
public int getMaxNVarcharLength() {
return 4000;
}
@Override
public TimeZoneSupport getTimeZoneSupport() {
return getVersion().isSameOrAfter( 10 ) ? TimeZoneSupport.NATIVE : TimeZoneSupport.NONE;
}
@Override
public long getDefaultLobLength() {
// this is essentially the only legal length for
// a "lob" in SQL Server, i.e. the value of MAX
// (caveat: for NVARCHAR it is half this value)
return 2_147_483_647;
}
@Override
public int getMaxIdentifierLength() {
return 128;
}
@Override
public void contributeTypes(TypeContributions typeContributions, ServiceRegistry serviceRegistry) {
super.contributeTypes( typeContributions, serviceRegistry );
typeContributions.getTypeConfiguration().getJdbcTypeRegistry().addDescriptor(
Types.TINYINT,
SmallIntJdbcType.INSTANCE
);
typeContributions.contributeJdbcType( XmlJdbcType.INSTANCE );
}
@Override
public void initializeFunctionRegistry(QueryEngine queryEngine) {
super.initializeFunctionRegistry(queryEngine);
final BasicTypeRegistry basicTypeRegistry = queryEngine.getTypeConfiguration().getBasicTypeRegistry();
BasicType<Date> dateType = basicTypeRegistry.resolve( StandardBasicTypes.DATE );
BasicType<Date> timeType = basicTypeRegistry.resolve( StandardBasicTypes.TIME );
BasicType<Date> timestampType = basicTypeRegistry.resolve( StandardBasicTypes.TIMESTAMP );
CommonFunctionFactory functionFactory = new CommonFunctionFactory(queryEngine);
// For SQL-Server we need to cast certain arguments to varchar(max) to be able to concat them
queryEngine.getSqmFunctionRegistry().register(
"count",
new CountFunction(
this,
queryEngine.getTypeConfiguration(),
SqlAstNodeRenderingMode.DEFAULT,
"+",
"varchar(max)",
false
)
);
// AVG by default uses the input type, so we possibly need to cast the argument type, hence a special function
functionFactory.avg_castingNonDoubleArguments( this, SqlAstNodeRenderingMode.DEFAULT );
functionFactory.truncate_round();
functionFactory.everyAny_minMaxIif();
functionFactory.octetLength_pattern( "datalength(?1)" );
functionFactory.bitLength_pattern( "datalength(?1)*8" );
if ( getVersion().isSameOrAfter( 10 ) ) {
functionFactory.locate_charindex();
functionFactory.stddevPopSamp_stdevp();
functionFactory.varPopSamp_varp();
}
if ( getVersion().isSameOrAfter( 11 ) ) {
queryEngine.getSqmFunctionRegistry().register(
"format",
new SQLServerFormatEmulation( queryEngine.getTypeConfiguration() )
);
//actually translate() was added in 2017 but
//it's not worth adding a new dialect for that!
functionFactory.translate();
functionFactory.median_percentileCont( true );
queryEngine.getSqmFunctionRegistry().namedDescriptorBuilder( "datefromparts" )
.setInvariantType( dateType )
.setExactArgumentCount( 3 )
.setParameterTypes(INTEGER)
.register();
queryEngine.getSqmFunctionRegistry().namedDescriptorBuilder( "timefromparts" )
.setInvariantType( timeType )
.setExactArgumentCount( 5 )
.setParameterTypes(INTEGER)
.register();
queryEngine.getSqmFunctionRegistry().namedDescriptorBuilder( "smalldatetimefromparts" )
.setInvariantType( timestampType )
.setExactArgumentCount( 5 )
.setParameterTypes(INTEGER)
.register();
queryEngine.getSqmFunctionRegistry().namedDescriptorBuilder( "datetimefromparts" )
.setInvariantType( timestampType )
.setExactArgumentCount( 7 )
.setParameterTypes(INTEGER)
.register();
queryEngine.getSqmFunctionRegistry().namedDescriptorBuilder( "datetime2fromparts" )
.setInvariantType( timestampType )
.setExactArgumentCount( 8 )
.setParameterTypes(INTEGER)
.register();
queryEngine.getSqmFunctionRegistry().namedDescriptorBuilder( "datetimeoffsetfromparts" )
.setInvariantType( timestampType )
.setExactArgumentCount( 10 )
.setParameterTypes(INTEGER)
.register();
}
functionFactory.windowFunctions();
functionFactory.inverseDistributionOrderedSetAggregates_windowEmulation();
functionFactory.hypotheticalOrderedSetAggregates_windowEmulation();
if ( getVersion().isSameOrAfter( 14 ) ) {
functionFactory.listagg_stringAggWithinGroup( "varchar(max)" );
}
}
@Override
public SqlAstTranslatorFactory getSqlAstTranslatorFactory() {
return new StandardSqlAstTranslatorFactory() {
@Override
protected <T extends JdbcOperation> SqlAstTranslator<T> buildTranslator(
SessionFactoryImplementor sessionFactory, Statement statement) {
return new SQLServerLegacySqlAstTranslator<>( sessionFactory, statement );
}
};
}
@Override
public String castPattern(CastType from, CastType to) {
if ( to == CastType.STRING ) {
switch ( from ) {
case TIMESTAMP:
// SQL Server uses yyyy-MM-dd HH:mm:ss.nnnnnnn by default when doing a cast, but only need second precision
return "format(?1,'yyyy-MM-dd HH:mm:ss')";
case TIME:
// SQL Server uses HH:mm:ss.nnnnnnn by default when doing a cast, but only need second precision
// SQL Server requires quoting of ':' in time formats and the use of 'hh' instead of 'HH'
return "format(?1,'hh\\:mm\\:ss')";
}
}
return super.castPattern( from, to );
}
@Override
public String currentTimestamp() {
return "sysdatetime()";
}
@Override
public IdentifierHelper buildIdentifierHelper(
IdentifierHelperBuilder builder, DatabaseMetaData dbMetaData) throws SQLException {
if ( dbMetaData == null ) {
// TODO: if DatabaseMetaData != null, unquoted case strategy is set to IdentifierCaseStrategy.UPPER
// Check to see if this setting is correct.
builder.setUnquotedCaseStrategy( IdentifierCaseStrategy.MIXED );
builder.setQuotedCaseStrategy( IdentifierCaseStrategy.MIXED );
}
return super.buildIdentifierHelper( builder, dbMetaData );
}
@Override
public String currentTime() {
return "convert(time,getdate())";
}
@Override
public String currentDate() {
return "convert(date,getdate())";
}
@Override
public String currentTimestampWithTimeZone() {
return "sysdatetimeoffset()";
}
@Override
public String getNoColumnsInsertString() {
return "default values";
}
@Override
public LimitHandler getLimitHandler() {
if ( getVersion().isSameOrAfter( 11 ) ) {
return SQLServer2012LimitHandler.INSTANCE;
}
else if ( getVersion().isSameOrAfter( 9 ) ) {
//this is a stateful class, don't cache
//it in the Dialect!
return new SQLServer2005LimitHandler();
}
else {
return new TopLimitHandler(false);
}
}
@Override
public boolean supportsValuesList() {
return getVersion().isSameOrAfter( 10 );
}
@Override
public char closeQuote() {
return ']';
}
@Override
public String getCurrentSchemaCommand() {
return "select schema_name()";
}
@Override
public boolean supportsIfExistsBeforeTableName() {
if ( getVersion().isSameOrAfter( 16 ) ) {
return true;
}
return super.supportsIfExistsBeforeTableName();
}
@Override
public boolean supportsIfExistsBeforeConstraintName() {
if ( getVersion().isSameOrAfter( 16 ) ) {
return true;
}
return super.supportsIfExistsBeforeConstraintName();
}
@Override
public char openQuote() {
return '[';
}
@Override
public String appendLockHint(LockOptions lockOptions, String tableName) {
if ( getVersion().isSameOrAfter( 9 ) ) {
LockMode lockMode = lockOptions.getAliasSpecificLockMode( tableName );
if (lockMode == null) {
lockMode = lockOptions.getLockMode();
}
final String writeLockStr = lockOptions.getTimeOut() == LockOptions.SKIP_LOCKED ? "updlock" : "updlock,holdlock";
final String readLockStr = lockOptions.getTimeOut() == LockOptions.SKIP_LOCKED ? "updlock" : "holdlock";
final String noWaitStr = lockOptions.getTimeOut() == LockOptions.NO_WAIT ? ",nowait" : "";
final String skipLockStr = lockOptions.getTimeOut() == LockOptions.SKIP_LOCKED ? ",readpast" : "";
switch ( lockMode ) {
case PESSIMISTIC_WRITE:
case WRITE:
return tableName + " with (" + writeLockStr + ",rowlock" + noWaitStr + skipLockStr + ")";
case PESSIMISTIC_READ:
return tableName + " with (" + readLockStr + ",rowlock" + noWaitStr + skipLockStr + ")";
case UPGRADE_SKIPLOCKED:
return tableName + " with (updlock,rowlock,readpast" + noWaitStr + ")";
case UPGRADE_NOWAIT:
return tableName + " with (updlock,holdlock,rowlock,nowait)";
default:
return tableName;
}
}
else {
switch ( lockOptions.getLockMode() ) {
case UPGRADE_NOWAIT:
case PESSIMISTIC_WRITE:
case WRITE:
return tableName + " with (updlock,rowlock)";
case PESSIMISTIC_READ:
return tableName + " with (holdlock,rowlock)";
case UPGRADE_SKIPLOCKED:
return tableName + " with (updlock,rowlock,readpast)";
default:
return tableName;
}
}
}
/**
* The current_timestamp is more accurate, but only known to be supported in SQL Server 7.0 and later and
* Sybase not known to support it at all
* <p/>
* {@inheritDoc}
*/
@Override
public String getCurrentTimestampSelectString() {
return "select current_timestamp";
}
// Overridden informational metadata ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@Override
public boolean supportsResultSetPositionQueryMethodsOnForwardOnlyCursor() {
return false;
}
@Override
public boolean supportsCircularCascadeDeleteConstraints() {
// SQL Server (at least up through 2005) does not support defining
// cascade delete constraints which can circle back to the mutating
// table
return false;
}
@Override
public boolean supportsLobValueChangePropagation() {
// note: at least my local SQL Server 2005 Express shows this not working...
return false;
}
@Override
public boolean doesReadCommittedCauseWritersToBlockReaders() {
// here assume SQLServer2005 using snapshot isolation, which does not have this problem
return false;
}
@Override
public boolean doesRepeatableReadCauseReadersToBlockWriters() {
// here assume SQLServer2005 using snapshot isolation, which does not have this problem
return false;
}
@Override
public int getInExpressionCountLimit() {
return PARAM_LIST_SIZE_LIMIT;
}
@Override
public IdentityColumnSupport getIdentityColumnSupport() {
return new SQLServerIdentityColumnSupport();
}
@Override
public boolean supportsNonQueryWithCTE() {
return getVersion().isSameOrAfter( 9 );
}
@Override
public boolean supportsSkipLocked() {
return getVersion().isSameOrAfter( 9 );
}
@Override
public boolean supportsNoWait() {
return getVersion().isSameOrAfter( 9 );
}
@Override
public boolean supportsWait() {
return false;
}
@Override
public SequenceSupport getSequenceSupport() {
if ( getVersion().isBefore( 11 ) ) {
return NoSequenceSupport.INSTANCE;
}
else if ( getVersion().isSameOrAfter( 16 ) ) {
return SQLServer16SequenceSupport.INSTANCE;
}
else {
return SQLServerSequenceSupport.INSTANCE;
}
}
@Override
public String getQuerySequencesString() {
return getVersion().isBefore( 11 )
? super.getQuerySequencesString() //null
// The upper-case name should work on both case-sensitive
// and case-insensitive collations.
: "select * from INFORMATION_SCHEMA.SEQUENCES";
}
@Override
public String getQueryHintString(String sql, String hints) {
if ( getVersion().isBefore( 11 ) ) {
return super.getQueryHintString( sql, hints );
}
final StringBuilder buffer = new StringBuilder(
sql.length() + hints.length() + 12
);
final int pos = sql.indexOf( ";" );
if ( pos > -1 ) {
buffer.append( sql, 0, pos );
}
else {
buffer.append( sql );
}
buffer.append( " OPTION (" ).append( hints ).append( ")" );
if ( pos > -1 ) {
buffer.append( ";" );
}
sql = buffer.toString();
return sql;
}
@Override
public boolean supportsNullPrecedence() {
return false;
}
@Override
public boolean supportsOffsetInSubquery() {
return true;
}
@Override
public boolean supportsWindowFunctions() {
return true;
}
@Override
public boolean supportsLateral() {
return getVersion().isSameOrAfter( 9 );
}
@Override
public boolean supportsFetchClause(FetchClauseType type) {
return getVersion().isSameOrAfter( 11 );
}
@Override
public SQLExceptionConversionDelegate buildSQLExceptionConversionDelegate() {
if ( getVersion().isBefore( 9 ) ) {
return super.buildSQLExceptionConversionDelegate(); //null
}
return (sqlException, message, sql) -> {
final String sqlState = JdbcExceptionHelper.extractSqlState( sqlException );
final int errorCode = JdbcExceptionHelper.extractErrorCode( sqlException );
if ( "HY008".equals( sqlState ) ) {
throw new QueryTimeoutException( message, sqlException, sql );
}
if ( 1222 == errorCode ) {
throw new LockTimeoutException( message, sqlException, sql );
}
return null;
};
}
/**
* SQL server supports up to 7 decimal digits of
* fractional second precision in a datetime2,
* but since its duration arithmetic functions
* try to fit durations into an int,
* which is impossible with such high precision,
* so default to generating {@code datetime2(3)}
* columns.
*/
@Override
public int getDefaultTimestampPrecision() {
return 6; //microseconds!
}
/**
* SQL server supports up to 7 decimal digits of
* fractional second precision in a datetime2,
* but unfortunately its duration arithmetic
* functions have a nasty habit of overflowing.
* So to give ourselves a little extra headroom,
* we will use {@code microsecond} as the native
* unit of precision (but even then we have to
* use tricks when calling {@code dateadd()}).
*/
@Override
public long getFractionalSecondPrecisionInNanos() {
return 1_000; //microseconds!
}
@Override
public String extractPattern(TemporalUnit unit) {
switch (unit) {
case TIMEZONE_HOUR:
return "(datepart(tz,?2)/60)";
case TIMEZONE_MINUTE:
return "(datepart(tz,?2)%60)";
//currently Dialect.extract() doesn't need
//to handle NANOSECOND (might change that?)
// case NANOSECOND:
// //this should evaluate to a bigint type
// return "(datepart(second,?2)*1000000000+datepart(nanosecond,?2))";
case SECOND:
//this should evaluate to a floating point type
return "(datepart(second,?2)+datepart(nanosecond,?2)/1e9)";
case WEEK:
// Thanks https://www.sqlservercentral.com/articles/a-simple-formula-to-calculate-the-iso-week-number
if ( getVersion().isBefore( 10 ) ) {
return "(DATEPART(dy,DATEADD(dd,DATEDIFF(dd,'17530101',?2)/7*7,'17530104'))+6)/7)";
}
default:
return "datepart(?1,?2)";
}
}
@Override
public String timestampaddPattern(TemporalUnit unit, TemporalType temporalType, IntervalType intervalType) {
// dateadd() supports only especially small magnitudes
// since it casts its argument to int (and unfortunately
// there's no dateadd_big()) so here we need to use two
// calls to dateadd() to add a whole duration
switch (unit) {
case NANOSECOND:
//Java Durations are usually the only thing
//we find expressed in nanosecond precision,
//and they can easily be very large
return "dateadd(nanosecond,?2%1000000000,dateadd(second,?2/1000000000,?3))";
case NATIVE:
//microsecond is the "native" precision
return "dateadd(microsecond,?2%1000000,dateadd(second,?2/1000000,?3))";
default:
return "dateadd(?1,?2,?3)";
}
}
@Override
public String timestampdiffPattern(TemporalUnit unit, TemporalType fromTemporalType, TemporalType toTemporalType) {
if ( unit == TemporalUnit.NATIVE ) {//use microsecond as the "native" precision
return "datediff_big(microsecond,?2,?3)";
}
//datediff() returns an int, and can easily
//overflow when dealing with "physical"
//durations, so use datediff_big()
return unit.normalized() == NANOSECOND
? "datediff_big(?1,?2,?3)"
: "datediff(?1,?2,?3)";
}
@Override
public String translateDurationField(TemporalUnit unit) {
//use microsecond as the "native" precision
if ( unit == TemporalUnit.NATIVE ) {
return "microsecond";
}
return super.translateDurationField( unit );
}
@Override
public String translateExtractField(TemporalUnit unit) {
switch ( unit ) {
//the ISO week number (behavior of "week" depends on a system property)
case WEEK: return "isowk";
case OFFSET: return "tz";
default: return super.translateExtractField(unit);
}
}
@Override
public void appendDatetimeFormat(SqlAppender appender, String format) {
appender.appendSql( datetimeFormat(format).result() );
}
public static Replacer datetimeFormat(String format) {
return new Replacer( format, "'", "\"" )
//era
.replace("G", "g")
//y nothing to do
//M nothing to do
//w no equivalent
//W no equivalent
//Y no equivalent
//day of week
.replace("EEEE", "dddd")
.replace("EEE", "ddd")
//e no equivalent
//d nothing to do
//D no equivalent
//am pm
.replace("a", "tt")
//h nothing to do
//H nothing to do
//m nothing to do
//s nothing to do
//fractional seconds
.replace("S", "F")
//timezones
.replace("XXX", "K") //UTC represented as "Z"
.replace("xxx", "zzz")
.replace("x", "zz");
}
@Override
public void appendBinaryLiteral(SqlAppender appender, byte[] bytes) {
appender.appendSql( "0x" );
PrimitiveByteArrayJavaType.INSTANCE.appendString( appender, bytes );
}
@Override
public void appendDateTimeLiteral(
SqlAppender appender,
TemporalAccessor temporalAccessor,
TemporalType precision,
TimeZone jdbcTimeZone) {
switch ( precision ) {
case DATE:
appender.appendSql( "cast('" );
appendAsDate( appender, temporalAccessor );
appender.appendSql( "' as date)" );
break;
case TIME:
//needed because the {t ... } JDBC is just buggy
appender.appendSql( "cast('" );
appendAsTime( appender, temporalAccessor, supportsTemporalLiteralOffset(), jdbcTimeZone );
appender.appendSql( "' as time)" );
break;
case TIMESTAMP:
appender.appendSql( "cast('" );
appendAsTimestampWithMicros( appender, temporalAccessor, supportsTemporalLiteralOffset(), jdbcTimeZone );
//needed because the {ts ... } JDBC escape chokes on microseconds
if ( supportsTemporalLiteralOffset() && temporalAccessor.isSupported( ChronoField.OFFSET_SECONDS ) ) {
appender.appendSql( "' as datetimeoffset)" );
}
else {
appender.appendSql( "' as datetime2)" );
}
break;
default:
throw new IllegalArgumentException();
}
}
@Override
public void appendDateTimeLiteral(SqlAppender appender, Date date, TemporalType precision, TimeZone jdbcTimeZone) {
switch ( precision ) {
case DATE:
appender.appendSql( "cast('" );
appendAsDate( appender, date );
appender.appendSql( "' as date)" );
break;
case TIME:
//needed because the {t ... } JDBC is just buggy
appender.appendSql( "cast('" );
appendAsTime( appender, date );
appender.appendSql( "' as time)" );
break;
case TIMESTAMP:
appender.appendSql( "cast('" );
appendAsTimestampWithMicros( appender, date, jdbcTimeZone );
appender.appendSql( "' as datetimeoffset)" );
break;
default:
throw new IllegalArgumentException();
}
}
@Override
public void appendDateTimeLiteral(
SqlAppender appender,
Calendar calendar,
TemporalType precision,
TimeZone jdbcTimeZone) {
switch ( precision ) {
case DATE:
appender.appendSql( "cast('" );
appendAsDate( appender, calendar );
appender.appendSql( "' as date)" );
break;
case TIME:
//needed because the {t ... } JDBC is just buggy
appender.appendSql( "cast('" );
appendAsTime( appender, calendar );
appender.appendSql( "' as time)" );
break;
case TIMESTAMP:
appender.appendSql( "cast('" );
appendAsTimestampWithMicros( appender, calendar, jdbcTimeZone );
appender.appendSql( "' as datetime2)" );
break;
default:
throw new IllegalArgumentException();
}
}
@Override
public String getCreateTemporaryTableColumnAnnotation(int sqlTypeCode) {
switch (sqlTypeCode) {
case Types.CHAR:
case Types.NCHAR:
case Types.VARCHAR:
case Types.NVARCHAR:
case Types.LONGVARCHAR:
case Types.LONGNVARCHAR:
return "collate database_default";
default:
return "";
}
}
@Override
public String[] getDropSchemaCommand(String schemaName) {
if ( getVersion().isSameOrAfter( 16 ) ) {
return new String[] { "drop schema if exists " + schemaName };
}
return super.getDropSchemaCommand( schemaName );
}
@Override
public NameQualifierSupport getNameQualifierSupport() {
return NameQualifierSupport.BOTH;
}
public Exporter<Sequence> getSequenceExporter() {
if ( exporter == null ) {
return super.getSequenceExporter();
}
return exporter;
}
private static class SqlServerSequenceExporter extends StandardSequenceExporter {
public SqlServerSequenceExporter(Dialect dialect) {
super( dialect );
}
@Override
protected String getFormattedSequenceName(QualifiedSequenceName name, Metadata metadata, SqlStringGenerationContext context) {
// SQL Server does not allow the catalog in the sequence name.
// See https://docs.microsoft.com/en-us/sql/t-sql/statements/create-sequence-transact-sql?view=sql-server-ver15&viewFallbackFrom=sql-server-ver12
// Keeping the catalog in the name does not break on ORM, but it fails using Vert.X for Reactive.
return context.formatWithoutCatalog( name );
}
}
@Override
public boolean supportsNamedParameters(DatabaseMetaData databaseMetaData) {
// Not sure if it's a JDBC driver issue, but it doesn't work
return false;
}
@Override
public String generatedAs(String generatedAs) {
return " as (" + generatedAs + ") persisted";
}
@Override
public boolean hasDataTypeBeforeGeneratedAs() {
return false;
}
}

View File

@ -0,0 +1,439 @@
/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later
* See the lgpl.txt file in the root directory or http://www.gnu.org/licenses/lgpl-2.1.html
*/
package org.hibernate.community.dialect;
import java.util.List;
import org.hibernate.LockMode;
import org.hibernate.LockOptions;
import org.hibernate.dialect.DatabaseVersion;
import org.hibernate.query.sqm.FetchClauseType;
import org.hibernate.engine.spi.SessionFactoryImplementor;
import org.hibernate.query.sqm.ComparisonOperator;
import org.hibernate.sql.ast.Clause;
import org.hibernate.sql.ast.SqlAstJoinType;
import org.hibernate.sql.ast.spi.AbstractSqlAstTranslator;
import org.hibernate.sql.ast.spi.SqlSelection;
import org.hibernate.sql.ast.tree.Statement;
import org.hibernate.sql.ast.tree.cte.CteStatement;
import org.hibernate.sql.ast.tree.expression.Expression;
import org.hibernate.sql.ast.tree.expression.Literal;
import org.hibernate.sql.ast.tree.expression.SqlTuple;
import org.hibernate.sql.ast.tree.expression.Summarization;
import org.hibernate.sql.ast.tree.from.NamedTableReference;
import org.hibernate.sql.ast.tree.from.TableGroup;
import org.hibernate.sql.ast.tree.from.TableGroupJoin;
import org.hibernate.sql.ast.tree.from.TableReference;
import org.hibernate.sql.ast.tree.from.UnionTableReference;
import org.hibernate.sql.ast.tree.predicate.Predicate;
import org.hibernate.sql.ast.tree.select.QueryGroup;
import org.hibernate.sql.ast.tree.select.QueryPart;
import org.hibernate.sql.ast.tree.select.QuerySpec;
import org.hibernate.sql.ast.tree.select.SelectClause;
import org.hibernate.sql.ast.tree.select.SortSpecification;
import org.hibernate.sql.exec.spi.JdbcOperation;
/**
* A SQL AST translator for SQL Server.
*
* @author Christian Beikov
*/
public class SQLServerLegacySqlAstTranslator<T extends JdbcOperation> extends AbstractSqlAstTranslator<T> {
private static final String UNION_ALL = " union all ";
private Predicate lateralPredicate;
public SQLServerLegacySqlAstTranslator(SessionFactoryImplementor sessionFactory, Statement statement) {
super( sessionFactory, statement );
}
@Override
protected void renderTableGroupJoin(TableGroupJoin tableGroupJoin, List<TableGroupJoin> tableGroupJoinCollector) {
appendSql( WHITESPACE );
if ( tableGroupJoin.getJoinedGroup().isLateral() ) {
if ( tableGroupJoin.getJoinType() == SqlAstJoinType.LEFT ) {
appendSql( "outer apply " );
}
else {
appendSql( "cross apply " );
}
}
else {
appendSql( tableGroupJoin.getJoinType().getText() );
appendSql( "join " );
}
final Predicate predicate = tableGroupJoin.getPredicate();
if ( predicate != null && !predicate.isEmpty() ) {
if ( tableGroupJoin.getJoinedGroup().isLateral() ) {
// We have to inject the lateral predicate into the sub-query
final Predicate lateralPredicate = this.lateralPredicate;
this.lateralPredicate = predicate;
renderTableGroup( tableGroupJoin.getJoinedGroup(), null, tableGroupJoinCollector );
this.lateralPredicate = lateralPredicate;
}
else {
renderTableGroup( tableGroupJoin.getJoinedGroup(), predicate, tableGroupJoinCollector );
}
}
else {
renderTableGroup( tableGroupJoin.getJoinedGroup(), null, tableGroupJoinCollector );
}
}
protected boolean renderPrimaryTableReference(TableGroup tableGroup, LockMode lockMode) {
final TableReference tableReference = tableGroup.getPrimaryTableReference();
if ( tableReference instanceof NamedTableReference ) {
return renderNamedTableReference( (NamedTableReference) tableReference, lockMode );
}
tableReference.accept( this );
return false;
}
@Override
protected boolean renderNamedTableReference(NamedTableReference tableReference, LockMode lockMode) {
final String tableExpression = tableReference.getTableExpression();
if ( tableReference instanceof UnionTableReference && lockMode != LockMode.NONE && tableExpression.charAt( 0 ) == '(' ) {
// SQL Server requires to push down the lock hint to the actual table names
int searchIndex = 0;
int unionIndex;
while ( ( unionIndex = tableExpression.indexOf( UNION_ALL, searchIndex ) ) != -1 ) {
append( tableExpression, searchIndex, unionIndex );
renderLockHint( lockMode );
appendSql( UNION_ALL );
searchIndex = unionIndex + UNION_ALL.length();
}
append( tableExpression, searchIndex, tableExpression.length() - 2 );
renderLockHint( lockMode );
appendSql( " )" );
registerAffectedTable( tableReference );
final Clause currentClause = getClauseStack().getCurrent();
if ( rendersTableReferenceAlias( currentClause ) ) {
final String identificationVariable = tableReference.getIdentificationVariable();
if ( identificationVariable != null ) {
appendSql( ' ' );
appendSql( identificationVariable );
}
}
}
else {
super.renderNamedTableReference( tableReference, lockMode );
renderLockHint( lockMode );
}
// Just always return true because SQL Server doesn't support the FOR UPDATE clause
return true;
}
private void renderLockHint(LockMode lockMode) {
if ( getDialect().getVersion().isSameOrAfter( 9 ) ) {
final int effectiveLockTimeout = getEffectiveLockTimeout( lockMode );
switch ( lockMode ) {
case PESSIMISTIC_WRITE:
case WRITE: {
switch ( effectiveLockTimeout ) {
case LockOptions.SKIP_LOCKED:
appendSql( " with (updlock,rowlock,readpast)" );
break;
case LockOptions.NO_WAIT:
appendSql( " with (updlock,holdlock,rowlock,nowait)" );
break;
default:
appendSql( " with (updlock,holdlock,rowlock)" );
break;
}
break;
}
case PESSIMISTIC_READ: {
switch ( effectiveLockTimeout ) {
case LockOptions.SKIP_LOCKED:
appendSql( " with (updlock,rowlock,readpast)" );
break;
case LockOptions.NO_WAIT:
appendSql( " with (holdlock,rowlock,nowait)" );
break;
default:
appendSql( " with (holdlock,rowlock)" );
break;
}
break;
}
case UPGRADE_SKIPLOCKED: {
if ( effectiveLockTimeout == LockOptions.NO_WAIT ) {
appendSql( " with (updlock,rowlock,readpast,nowait)" );
}
else {
appendSql( " with (updlock,rowlock,readpast)" );
}
break;
}
case UPGRADE_NOWAIT: {
appendSql( " with (updlock,holdlock,rowlock,nowait)" );
break;
}
}
}
else {
switch ( lockMode ) {
case UPGRADE_NOWAIT:
case PESSIMISTIC_WRITE:
case WRITE: {
appendSql( " with (updlock,rowlock)" );
break;
}
case PESSIMISTIC_READ: {
appendSql( " with (holdlock,rowlock)" );
break;
}
case UPGRADE_SKIPLOCKED: {
appendSql( " with (updlock,rowlock,readpast)" );
break;
}
}
}
}
@Override
protected void renderForUpdateClause(QuerySpec querySpec, ForUpdateClause forUpdateClause) {
// SQL Server does not support the FOR UPDATE clause
}
protected OffsetFetchClauseMode getOffsetFetchClauseMode(QueryPart queryPart) {
final DatabaseVersion version = getDialect().getVersion();
final boolean hasLimit;
final boolean hasOffset;
if ( queryPart.isRoot() && hasLimit() ) {
hasLimit = getLimit().getMaxRows() != null;
hasOffset = getLimit().getFirstRow() != null;
}
else {
hasLimit = queryPart.getFetchClauseExpression() != null;
hasOffset = queryPart.getOffsetClauseExpression() != null;
}
if ( queryPart instanceof QueryGroup ) {
// We can't use TOP for set operations
if ( hasOffset || hasLimit ) {
if ( version.isBefore( 11 ) || !isRowsOnlyFetchClauseType( queryPart ) ) {
return OffsetFetchClauseMode.EMULATED;
}
else {
return OffsetFetchClauseMode.STANDARD;
}
}
return null;
}
else {
if ( version.isBefore( 9 ) || !hasOffset ) {
return hasLimit ? OffsetFetchClauseMode.TOP_ONLY : null;
}
else if ( version.isBefore( 11 ) || !isRowsOnlyFetchClauseType( queryPart ) ) {
return OffsetFetchClauseMode.EMULATED;
}
else {
return OffsetFetchClauseMode.STANDARD;
}
}
}
@Override
protected boolean supportsSimpleQueryGrouping() {
// SQL Server is quite strict i.e. it requires `select .. union all select * from (select ...)`
// rather than `select .. union all (select ...)` because parenthesis followed by select
// is always treated as a subquery, which is not supported in a set operation
return false;
}
protected boolean shouldEmulateFetchClause(QueryPart queryPart) {
// Check if current query part is already row numbering to avoid infinite recursion
return getQueryPartForRowNumbering() != queryPart && getOffsetFetchClauseMode( queryPart ) == OffsetFetchClauseMode.EMULATED;
}
@Override
public void visitQueryGroup(QueryGroup queryGroup) {
final Predicate lateralPredicate = this.lateralPredicate;
if ( lateralPredicate != null ) {
this.lateralPredicate = null;
addAdditionalWherePredicate( lateralPredicate );
}
if ( shouldEmulateFetchClause( queryGroup ) ) {
emulateFetchOffsetWithWindowFunctions( queryGroup, !isRowsOnlyFetchClauseType( queryGroup ) );
}
else {
super.visitQueryGroup( queryGroup );
}
}
@Override
public void visitQuerySpec(QuerySpec querySpec) {
if ( shouldEmulateFetchClause( querySpec ) ) {
emulateFetchOffsetWithWindowFunctions( querySpec, !isRowsOnlyFetchClauseType( querySpec ) );
}
else {
super.visitQuerySpec( querySpec );
}
}
@Override
public void visitSelectClause(SelectClause selectClause) {
if ( lateralPredicate != null ) {
addAdditionalWherePredicate( lateralPredicate );
lateralPredicate = null;
}
super.visitSelectClause( selectClause );
}
@Override
protected boolean needsRowsToSkip() {
return getDialect().getVersion().isBefore( 9 );
}
@Override
protected void renderFetchPlusOffsetExpression(
Expression fetchClauseExpression,
Expression offsetClauseExpression,
int offset) {
renderFetchPlusOffsetExpressionAsSingleParameter( fetchClauseExpression, offsetClauseExpression, offset );
}
@Override
protected void visitSqlSelections(SelectClause selectClause) {
final QuerySpec querySpec = (QuerySpec) getQueryPartStack().getCurrent();
final OffsetFetchClauseMode offsetFetchClauseMode = getOffsetFetchClauseMode( querySpec );
if ( offsetFetchClauseMode == OffsetFetchClauseMode.TOP_ONLY ) {
renderTopClause( querySpec, true, true );
}
else if ( offsetFetchClauseMode == OffsetFetchClauseMode.EMULATED ) {
renderTopClause( querySpec, isRowsOnlyFetchClauseType( querySpec ), true );
}
super.visitSqlSelections( selectClause );
}
@Override
protected void renderOrderBy(boolean addWhitespace, List<SortSpecification> sortSpecifications) {
if ( sortSpecifications != null && !sortSpecifications.isEmpty() ) {
super.renderOrderBy( addWhitespace, sortSpecifications );
}
else if ( getClauseStack().getCurrent() == Clause.OVER ) {
if ( addWhitespace ) {
appendSql( ' ' );
}
renderEmptyOrderBy();
}
}
protected void renderEmptyOrderBy() {
// Always need an order by clause: https://blog.jooq.org/2014/05/13/sql-server-trick-circumvent-missing-order-by-clause/
appendSql( "order by @@version" );
}
@Override
public void visitOffsetFetchClause(QueryPart queryPart) {
if ( !isRowNumberingCurrentQueryPart() ) {
if ( getDialect().getVersion().isBefore( 9 ) && !queryPart.isRoot() && queryPart.getOffsetClauseExpression() != null ) {
throw new IllegalArgumentException( "Can't emulate offset clause in subquery" );
}
// Note that SQL Server is very strict i.e. it requires an order by clause for TOP or OFFSET
final OffsetFetchClauseMode offsetFetchClauseMode = getOffsetFetchClauseMode( queryPart );
if ( offsetFetchClauseMode == OffsetFetchClauseMode.STANDARD ) {
if ( !queryPart.hasSortSpecifications() ) {
appendSql( ' ' );
renderEmptyOrderBy();
}
final Expression offsetExpression;
final Expression fetchExpression;
final FetchClauseType fetchClauseType;
if ( queryPart.isRoot() && hasLimit() ) {
prepareLimitOffsetParameters();
offsetExpression = getOffsetParameter();
fetchExpression = getLimitParameter();
fetchClauseType = FetchClauseType.ROWS_ONLY;
}
else {
offsetExpression = queryPart.getOffsetClauseExpression();
fetchExpression = queryPart.getFetchClauseExpression();
fetchClauseType = queryPart.getFetchClauseType();
}
if ( offsetExpression == null ) {
appendSql( " offset 0 rows" );
}
else {
renderOffset( offsetExpression, true );
}
if ( fetchExpression != null ) {
renderFetch( fetchExpression, null, fetchClauseType );
}
}
else if ( offsetFetchClauseMode == OffsetFetchClauseMode.TOP_ONLY && !queryPart.hasSortSpecifications() ) {
appendSql( ' ' );
renderEmptyOrderBy();
}
}
}
@Override
protected void renderSearchClause(CteStatement cte) {
// SQL Server does not support this, but it's just a hint anyway
}
@Override
protected void renderCycleClause(CteStatement cte) {
// SQL Server does not support this, but it can be emulated
}
@Override
protected void renderComparison(Expression lhs, ComparisonOperator operator, Expression rhs) {
renderComparisonEmulateIntersect( lhs, operator, rhs );
}
@Override
protected void renderSelectTupleComparison(
List<SqlSelection> lhsExpressions,
SqlTuple tuple,
ComparisonOperator operator) {
emulateSelectTupleComparison( lhsExpressions, tuple.getExpressions(), operator, true );
}
@Override
protected void renderPartitionItem(Expression expression) {
if ( expression instanceof Literal ) {
appendSql( "()" );
}
else if ( expression instanceof Summarization ) {
Summarization summarization = (Summarization) expression;
renderCommaSeparated( summarization.getGroupings() );
appendSql( " with " );
appendSql( summarization.getKind().sqlText() );
}
else {
expression.accept( this );
}
}
@Override
protected boolean supportsRowValueConstructorSyntax() {
return false;
}
@Override
protected boolean supportsRowValueConstructorSyntaxInInList() {
return false;
}
@Override
protected boolean supportsRowValueConstructorSyntaxInQuantifiedPredicates() {
return false;
}
enum OffsetFetchClauseMode {
STANDARD,
TOP_ONLY,
EMULATED;
}
}

View File

@ -4,12 +4,13 @@
* License: GNU Lesser General Public License (LGPL), version 2.1 or later. * License: GNU Lesser General Public License (LGPL), version 2.1 or later.
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>. * See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
*/ */
package org.hibernate.orm.test.dialect; package org.hibernate.community.dialect;
import java.util.Locale; import java.util.Locale;
import org.hibernate.LockMode; import org.hibernate.LockMode;
import org.hibernate.LockOptions; import org.hibernate.LockOptions;
import org.hibernate.dialect.DatabaseVersion;
import org.hibernate.dialect.SQLServer2005Dialect; import org.hibernate.dialect.SQLServer2005Dialect;
import org.hibernate.query.spi.Limit; import org.hibernate.query.spi.Limit;
@ -29,11 +30,11 @@ import static org.junit.Assert.assertEquals;
* @author Chris Cranford * @author Chris Cranford
*/ */
public class SQLServer2005DialectTestCase extends BaseUnitTestCase { public class SQLServer2005DialectTestCase extends BaseUnitTestCase {
private SQLServer2005Dialect dialect; private SQLServerLegacyDialect dialect;
@Before @Before
public void setup() { public void setup() {
dialect = new SQLServer2005Dialect(); dialect = new SQLServerLegacyDialect( DatabaseVersion.make( 9 ) );
} }
@After @After

View File

@ -4,18 +4,21 @@
* License: GNU Lesser General Public License (LGPL), version 2.1 or later. * License: GNU Lesser General Public License (LGPL), version 2.1 or later.
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>. * See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
*/ */
package org.hibernate.orm.test.dialect.unit.lockhint; package org.hibernate.community.dialect.unit.lockhint;
import org.hibernate.LockMode; import org.hibernate.LockMode;
import org.hibernate.LockOptions; import org.hibernate.LockOptions;
import org.hibernate.community.dialect.SQLServerLegacyDialect;
import org.hibernate.dialect.DatabaseVersion;
import org.hibernate.dialect.Dialect; import org.hibernate.dialect.Dialect;
import org.hibernate.dialect.SQLServer2005Dialect; import org.hibernate.dialect.SQLServer2005Dialect;
import org.hibernate.orm.test.dialect.unit.lockhint.AbstractLockHintTest;
/** /**
* @author Vlad Mihalcea * @author Vlad Mihalcea
*/ */
public class SQLServer2005LockHintsTest extends AbstractLockHintTest { public class SQLServer2005LockHintsTest extends AbstractLockHintTest {
public static final Dialect DIALECT = new SQLServer2005Dialect(); public static final Dialect DIALECT = new SQLServerLegacyDialect( DatabaseVersion.make( 9 ) );
protected String getLockHintUsed() { protected String getLockHintUsed() {
return "with (updlock,holdlock,rowlock,nowait)"; return "with (updlock,holdlock,rowlock,nowait)";

View File

@ -82,12 +82,13 @@ import static org.hibernate.type.descriptor.DateTimeUtils.appendAsTimestampWithM
* @author Gavin King * @author Gavin King
*/ */
public class SQLServerDialect extends AbstractTransactSQLDialect { public class SQLServerDialect extends AbstractTransactSQLDialect {
private final static DatabaseVersion MINIMUM_VERSION = DatabaseVersion.make( 10, 0 );
private static final int PARAM_LIST_SIZE_LIMIT = 2100; private static final int PARAM_LIST_SIZE_LIMIT = 2100;
private final StandardSequenceExporter exporter; private final StandardSequenceExporter exporter;
public SQLServerDialect() { public SQLServerDialect() {
this( DatabaseVersion.make( 8, 0 ) ); this( MINIMUM_VERSION );
} }
public SQLServerDialect(DatabaseVersion version) { public SQLServerDialect(DatabaseVersion version) {
@ -104,6 +105,11 @@ public class SQLServerDialect extends AbstractTransactSQLDialect {
return version.isSameOrAfter(11) ? new SqlServerSequenceExporter(this) : null; return version.isSameOrAfter(11) ? new SqlServerSequenceExporter(this) : null;
} }
@Override
protected DatabaseVersion getMinimumSupportedVersion() {
return MINIMUM_VERSION;
}
@Override @Override
protected void registerDefaultKeywords() { protected void registerDefaultKeywords() {
super.registerDefaultKeywords(); super.registerDefaultKeywords();
@ -113,55 +119,50 @@ public class SQLServerDialect extends AbstractTransactSQLDialect {
@Override @Override
protected String columnType(int sqlTypeCode) { protected String columnType(int sqlTypeCode) {
// there is no 'double' type in SQL server switch ( sqlTypeCode ) {
// but 'float' is double precision by default // there is no 'double' type in SQL server
if ( sqlTypeCode == DOUBLE ) { // but 'float' is double precision by default
return "float"; case DOUBLE:
} return "float";
if ( getVersion().isSameOrAfter( 9 ) ) { // Prefer 'varchar(max)' and 'varbinary(max)' to
switch ( sqlTypeCode ) { // the deprecated TEXT and IMAGE types. Note that
// Prefer 'varchar(max)' and 'varbinary(max)' to // the length of a VARCHAR or VARBINARY column must
// the deprecated TEXT and IMAGE types. Note that // be either between 1 and 8000 or exactly MAX, and
// the length of a VARCHAR or VARBINARY column must // the length of an NVARCHAR column must be either
// be either between 1 and 8000 or exactly MAX, and // between 1 and 4000 or exactly MAX. (HHH-3965)
// the length of an NVARCHAR column must be either case CLOB:
// between 1 and 4000 or exactly MAX. (HHH-3965) return "varchar(max)";
case CLOB: case NCLOB:
return "varchar(max)"; return "nvarchar(max)";
case NCLOB: case BLOB:
return "nvarchar(max)"; return "varbinary(max)";
case BLOB: case DATE:
return "varbinary(max)"; return "date";
case DATE: case TIME:
return getVersion().isSameOrAfter( 10 ) ? "date" : super.columnType( sqlTypeCode ); return "time";
case TIME: case TIMESTAMP:
return getVersion().isSameOrAfter( 10 ) ? "time" : super.columnType( sqlTypeCode ); return "datetime2($p)";
case TIMESTAMP: case TIMESTAMP_WITH_TIMEZONE:
return getVersion().isSameOrAfter( 10 ) ? "datetime2($p)" : super.columnType( sqlTypeCode ); return "datetimeoffset($p)";
case TIMESTAMP_WITH_TIMEZONE:
return getVersion().isSameOrAfter( 10 ) ? "datetimeoffset($p)" : super.columnType( sqlTypeCode );
}
} }
return super.columnType( sqlTypeCode ); return super.columnType( sqlTypeCode );
} }
@Override @Override
protected String castType(int sqlTypeCode) { protected String castType(int sqlTypeCode) {
if ( getVersion().isSameOrAfter( 9 ) ) { switch ( sqlTypeCode ) {
switch ( sqlTypeCode ) { case VARCHAR:
case VARCHAR: case LONG32VARCHAR:
case LONG32VARCHAR: case CLOB:
case CLOB: return "varchar(max)";
return "varchar(max)"; case NVARCHAR:
case NVARCHAR: case LONG32NVARCHAR:
case LONG32NVARCHAR: case NCLOB:
case NCLOB: return "nvarchar(max)";
return "nvarchar(max)"; case VARBINARY:
case VARBINARY: case LONG32VARBINARY:
case LONG32VARBINARY: case BLOB:
case BLOB: return "varbinary(max)";
return "varbinary(max)";
}
} }
return super.castType( sqlTypeCode ); return super.castType( sqlTypeCode );
} }
@ -170,10 +171,8 @@ public class SQLServerDialect extends AbstractTransactSQLDialect {
protected void registerColumnTypes(TypeContributions typeContributions, ServiceRegistry serviceRegistry) { protected void registerColumnTypes(TypeContributions typeContributions, ServiceRegistry serviceRegistry) {
super.registerColumnTypes( typeContributions, serviceRegistry ); super.registerColumnTypes( typeContributions, serviceRegistry );
final DdlTypeRegistry ddlTypeRegistry = typeContributions.getTypeConfiguration().getDdlTypeRegistry(); final DdlTypeRegistry ddlTypeRegistry = typeContributions.getTypeConfiguration().getDdlTypeRegistry();
if ( getVersion().isSameOrAfter( 10 ) ) { ddlTypeRegistry.addDescriptor( new DdlTypeImpl( GEOMETRY, "geometry", this ) );
ddlTypeRegistry.addDescriptor( new DdlTypeImpl( GEOMETRY, "geometry", this ) ); ddlTypeRegistry.addDescriptor( new DdlTypeImpl( GEOGRAPHY, "geography", this ) );
ddlTypeRegistry.addDescriptor( new DdlTypeImpl( GEOGRAPHY, "geography", this ) );
}
ddlTypeRegistry.addDescriptor( new DdlTypeImpl( SQLXML, "xml", this ) ); ddlTypeRegistry.addDescriptor( new DdlTypeImpl( SQLXML, "xml", this ) );
} }
@ -189,7 +188,7 @@ public class SQLServerDialect extends AbstractTransactSQLDialect {
@Override @Override
public TimeZoneSupport getTimeZoneSupport() { public TimeZoneSupport getTimeZoneSupport() {
return getVersion().isSameOrAfter( 10 ) ? TimeZoneSupport.NATIVE : TimeZoneSupport.NONE; return TimeZoneSupport.NATIVE;
} }
@Override @Override
@ -248,16 +247,14 @@ public class SQLServerDialect extends AbstractTransactSQLDialect {
functionFactory.octetLength_pattern( "datalength(?1)" ); functionFactory.octetLength_pattern( "datalength(?1)" );
functionFactory.bitLength_pattern( "datalength(?1)*8" ); functionFactory.bitLength_pattern( "datalength(?1)*8" );
if ( getVersion().isSameOrAfter( 10 ) ) { functionFactory.locate_charindex();
functionFactory.locate_charindex(); functionFactory.stddevPopSamp_stdevp();
functionFactory.stddevPopSamp_stdevp(); functionFactory.varPopSamp_varp();
functionFactory.varPopSamp_varp();
}
if ( getVersion().isSameOrAfter( 11 ) ) { if ( getVersion().isSameOrAfter( 11 ) ) {
queryEngine.getSqmFunctionRegistry().register( queryEngine.getSqmFunctionRegistry().register(
"format", "format",
new SQLServerFormatEmulation( this, queryEngine.getTypeConfiguration() ) new SQLServerFormatEmulation( queryEngine.getTypeConfiguration() )
); );
//actually translate() was added in 2017 but //actually translate() was added in 2017 but
@ -376,19 +373,16 @@ public class SQLServerDialect extends AbstractTransactSQLDialect {
if ( getVersion().isSameOrAfter( 11 ) ) { if ( getVersion().isSameOrAfter( 11 ) ) {
return SQLServer2012LimitHandler.INSTANCE; return SQLServer2012LimitHandler.INSTANCE;
} }
else if ( getVersion().isSameOrAfter( 9 ) ) { else {
//this is a stateful class, don't cache //this is a stateful class, don't cache
//it in the Dialect! //it in the Dialect!
return new SQLServer2005LimitHandler(); return new SQLServer2005LimitHandler();
} }
else {
return new TopLimitHandler(false);
}
} }
@Override @Override
public boolean supportsValuesList() { public boolean supportsValuesList() {
return getVersion().isSameOrAfter( 10 ); return true;
} }
@Override @Override
@ -424,45 +418,29 @@ public class SQLServerDialect extends AbstractTransactSQLDialect {
@Override @Override
public String appendLockHint(LockOptions lockOptions, String tableName) { public String appendLockHint(LockOptions lockOptions, String tableName) {
if ( getVersion().isSameOrAfter( 9 ) ) { LockMode lockMode = lockOptions.getAliasSpecificLockMode( tableName );
LockMode lockMode = lockOptions.getAliasSpecificLockMode( tableName ); if (lockMode == null) {
if (lockMode == null) { lockMode = lockOptions.getLockMode();
lockMode = lockOptions.getLockMode();
}
final String writeLockStr = lockOptions.getTimeOut() == LockOptions.SKIP_LOCKED ? "updlock" : "updlock,holdlock";
final String readLockStr = lockOptions.getTimeOut() == LockOptions.SKIP_LOCKED ? "updlock" : "holdlock";
final String noWaitStr = lockOptions.getTimeOut() == LockOptions.NO_WAIT ? ",nowait" : "";
final String skipLockStr = lockOptions.getTimeOut() == LockOptions.SKIP_LOCKED ? ",readpast" : "";
switch ( lockMode ) {
case PESSIMISTIC_WRITE:
case WRITE:
return tableName + " with (" + writeLockStr + ",rowlock" + noWaitStr + skipLockStr + ")";
case PESSIMISTIC_READ:
return tableName + " with (" + readLockStr + ",rowlock" + noWaitStr + skipLockStr + ")";
case UPGRADE_SKIPLOCKED:
return tableName + " with (updlock,rowlock,readpast" + noWaitStr + ")";
case UPGRADE_NOWAIT:
return tableName + " with (updlock,holdlock,rowlock,nowait)";
default:
return tableName;
}
} }
else {
switch ( lockOptions.getLockMode() ) { final String writeLockStr = lockOptions.getTimeOut() == LockOptions.SKIP_LOCKED ? "updlock" : "updlock,holdlock";
case UPGRADE_NOWAIT: final String readLockStr = lockOptions.getTimeOut() == LockOptions.SKIP_LOCKED ? "updlock" : "holdlock";
case PESSIMISTIC_WRITE:
case WRITE: final String noWaitStr = lockOptions.getTimeOut() == LockOptions.NO_WAIT ? ",nowait" : "";
return tableName + " with (updlock,rowlock)"; final String skipLockStr = lockOptions.getTimeOut() == LockOptions.SKIP_LOCKED ? ",readpast" : "";
case PESSIMISTIC_READ:
return tableName + " with (holdlock,rowlock)"; switch ( lockMode ) {
case UPGRADE_SKIPLOCKED: case PESSIMISTIC_WRITE:
return tableName + " with (updlock,rowlock,readpast)"; case WRITE:
default: return tableName + " with (" + writeLockStr + ",rowlock" + noWaitStr + skipLockStr + ")";
return tableName; case PESSIMISTIC_READ:
} return tableName + " with (" + readLockStr + ",rowlock" + noWaitStr + skipLockStr + ")";
case UPGRADE_SKIPLOCKED:
return tableName + " with (updlock,rowlock,readpast" + noWaitStr + ")";
case UPGRADE_NOWAIT:
return tableName + " with (updlock,holdlock,rowlock,nowait)";
default:
return tableName;
} }
} }
@ -523,17 +501,17 @@ public class SQLServerDialect extends AbstractTransactSQLDialect {
@Override @Override
public boolean supportsNonQueryWithCTE() { public boolean supportsNonQueryWithCTE() {
return getVersion().isSameOrAfter( 9 ); return true;
} }
@Override @Override
public boolean supportsSkipLocked() { public boolean supportsSkipLocked() {
return getVersion().isSameOrAfter( 9 ); return true;
} }
@Override @Override
public boolean supportsNoWait() { public boolean supportsNoWait() {
return getVersion().isSameOrAfter( 9 ); return true;
} }
@Override @Override
@ -605,7 +583,7 @@ public class SQLServerDialect extends AbstractTransactSQLDialect {
@Override @Override
public boolean supportsLateral() { public boolean supportsLateral() {
return getVersion().isSameOrAfter( 9 ); return true;
} }
@Override @Override
@ -615,9 +593,6 @@ public class SQLServerDialect extends AbstractTransactSQLDialect {
@Override @Override
public SQLExceptionConversionDelegate buildSQLExceptionConversionDelegate() { public SQLExceptionConversionDelegate buildSQLExceptionConversionDelegate() {
if ( getVersion().isBefore( 9 ) ) {
return super.buildSQLExceptionConversionDelegate(); //null
}
return (sqlException, message, sql) -> { return (sqlException, message, sql) -> {
final String sqlState = JdbcExceptionHelper.extractSqlState( sqlException ); final String sqlState = JdbcExceptionHelper.extractSqlState( sqlException );
final int errorCode = JdbcExceptionHelper.extractErrorCode( sqlException ); final int errorCode = JdbcExceptionHelper.extractErrorCode( sqlException );
@ -675,11 +650,6 @@ public class SQLServerDialect extends AbstractTransactSQLDialect {
case SECOND: case SECOND:
//this should evaluate to a floating point type //this should evaluate to a floating point type
return "(datepart(second,?2)+datepart(nanosecond,?2)/1e9)"; return "(datepart(second,?2)+datepart(nanosecond,?2)/1e9)";
case WEEK:
// Thanks https://www.sqlservercentral.com/articles/a-simple-formula-to-calculate-the-iso-week-number
if ( getVersion().isBefore( 10 ) ) {
return "(DATEPART(dy,DATEADD(dd,DATEDIFF(dd,'17530101',?2)/7*7,'17530104'))+6)/7)";
}
default: default:
return "datepart(?1,?2)"; return "datepart(?1,?2)";
} }

View File

@ -130,69 +130,49 @@ public class SQLServerSqlAstTranslator<T extends JdbcOperation> extends Abstract
} }
private void renderLockHint(LockMode lockMode) { private void renderLockHint(LockMode lockMode) {
if ( getDialect().getVersion().isSameOrAfter( 9 ) ) { final int effectiveLockTimeout = getEffectiveLockTimeout( lockMode );
final int effectiveLockTimeout = getEffectiveLockTimeout( lockMode ); switch ( lockMode ) {
switch ( lockMode ) { case PESSIMISTIC_WRITE:
case PESSIMISTIC_WRITE: case WRITE: {
case WRITE: { switch ( effectiveLockTimeout ) {
switch ( effectiveLockTimeout ) { case LockOptions.SKIP_LOCKED:
case LockOptions.SKIP_LOCKED:
appendSql( " with (updlock,rowlock,readpast)" );
break;
case LockOptions.NO_WAIT:
appendSql( " with (updlock,holdlock,rowlock,nowait)" );
break;
default:
appendSql( " with (updlock,holdlock,rowlock)" );
break;
}
break;
}
case PESSIMISTIC_READ: {
switch ( effectiveLockTimeout ) {
case LockOptions.SKIP_LOCKED:
appendSql( " with (updlock,rowlock,readpast)" );
break;
case LockOptions.NO_WAIT:
appendSql( " with (holdlock,rowlock,nowait)" );
break;
default:
appendSql( " with (holdlock,rowlock)" );
break;
}
break;
}
case UPGRADE_SKIPLOCKED: {
if ( effectiveLockTimeout == LockOptions.NO_WAIT ) {
appendSql( " with (updlock,rowlock,readpast,nowait)" );
}
else {
appendSql( " with (updlock,rowlock,readpast)" ); appendSql( " with (updlock,rowlock,readpast)" );
} break;
break; case LockOptions.NO_WAIT:
} appendSql( " with (updlock,holdlock,rowlock,nowait)" );
case UPGRADE_NOWAIT: { break;
appendSql( " with (updlock,holdlock,rowlock,nowait)" ); default:
break; appendSql( " with (updlock,holdlock,rowlock)" );
break;
} }
break;
} }
} case PESSIMISTIC_READ: {
else { switch ( effectiveLockTimeout ) {
switch ( lockMode ) { case LockOptions.SKIP_LOCKED:
case UPGRADE_NOWAIT: appendSql( " with (updlock,rowlock,readpast)" );
case PESSIMISTIC_WRITE: break;
case WRITE: { case LockOptions.NO_WAIT:
appendSql( " with (updlock,rowlock)" ); appendSql( " with (holdlock,rowlock,nowait)" );
break; break;
default:
appendSql( " with (holdlock,rowlock)" );
break;
} }
case PESSIMISTIC_READ: { break;
appendSql( " with (holdlock,rowlock)" ); }
break; case UPGRADE_SKIPLOCKED: {
if ( effectiveLockTimeout == LockOptions.NO_WAIT ) {
appendSql( " with (updlock,rowlock,readpast,nowait)" );
} }
case UPGRADE_SKIPLOCKED: { else {
appendSql( " with (updlock,rowlock,readpast)" ); appendSql( " with (updlock,rowlock,readpast)" );
break;
} }
break;
}
case UPGRADE_NOWAIT: {
appendSql( " with (updlock,holdlock,rowlock,nowait)" );
break;
} }
} }
} }
@ -228,7 +208,7 @@ public class SQLServerSqlAstTranslator<T extends JdbcOperation> extends Abstract
return null; return null;
} }
else { else {
if ( version.isBefore( 9 ) || !hasOffset ) { if ( !hasOffset ) {
return hasLimit ? OffsetFetchClauseMode.TOP_ONLY : null; return hasLimit ? OffsetFetchClauseMode.TOP_ONLY : null;
} }
else if ( version.isBefore( 11 ) || !isRowsOnlyFetchClauseType( queryPart ) ) { else if ( version.isBefore( 11 ) || !isRowsOnlyFetchClauseType( queryPart ) ) {
@ -289,7 +269,7 @@ public class SQLServerSqlAstTranslator<T extends JdbcOperation> extends Abstract
@Override @Override
protected boolean needsRowsToSkip() { protected boolean needsRowsToSkip() {
return getDialect().getVersion().isBefore( 9 ); return false;
} }
@Override @Override
@ -334,9 +314,6 @@ public class SQLServerSqlAstTranslator<T extends JdbcOperation> extends Abstract
@Override @Override
public void visitOffsetFetchClause(QueryPart queryPart) { public void visitOffsetFetchClause(QueryPart queryPart) {
if ( !isRowNumberingCurrentQueryPart() ) { if ( !isRowNumberingCurrentQueryPart() ) {
if ( getDialect().getVersion().isBefore( 9 ) && !queryPart.isRoot() && queryPart.getOffsetClauseExpression() != null ) {
throw new IllegalArgumentException( "Can't emulate offset clause in subquery" );
}
// Note that SQL Server is very strict i.e. it requires an order by clause for TOP or OFFSET // Note that SQL Server is very strict i.e. it requires an order by clause for TOP or OFFSET
final OffsetFetchClauseMode offsetFetchClauseMode = getOffsetFetchClauseMode( queryPart ); final OffsetFetchClauseMode offsetFetchClauseMode = getOffsetFetchClauseMode( queryPart );
if ( offsetFetchClauseMode == OffsetFetchClauseMode.STANDARD ) { if ( offsetFetchClauseMode == OffsetFetchClauseMode.STANDARD ) {

View File

@ -9,21 +9,12 @@ package org.hibernate.dialect.function;
import java.util.List; import java.util.List;
import jakarta.persistence.TemporalType; import jakarta.persistence.TemporalType;
import org.hibernate.dialect.SQLServerDialect;
import org.hibernate.query.sqm.function.AbstractSqmSelfRenderingFunctionDescriptor;
import org.hibernate.query.sqm.produce.function.StandardFunctionArgumentTypeResolvers;
import org.hibernate.query.sqm.produce.function.StandardFunctionReturnTypeResolvers;
import org.hibernate.sql.ast.SqlAstTranslator; import org.hibernate.sql.ast.SqlAstTranslator;
import org.hibernate.sql.ast.spi.SqlAppender; import org.hibernate.sql.ast.spi.SqlAppender;
import org.hibernate.sql.ast.tree.SqlAstNode; import org.hibernate.sql.ast.tree.SqlAstNode;
import org.hibernate.sql.ast.tree.expression.Expression; import org.hibernate.sql.ast.tree.expression.Expression;
import org.hibernate.sql.ast.tree.expression.Format;
import org.hibernate.type.StandardBasicTypes;
import org.hibernate.type.spi.TypeConfiguration; import org.hibernate.type.spi.TypeConfiguration;
import static org.hibernate.query.sqm.produce.function.FunctionParameterType.STRING;
import static org.hibernate.query.sqm.produce.function.FunctionParameterType.TEMPORAL;
/** /**
* SQL Server behaves strangely when the first argument to format is of the type time, so we cast to datetime. * SQL Server behaves strangely when the first argument to format is of the type time, so we cast to datetime.
* *
@ -31,11 +22,8 @@ import static org.hibernate.query.sqm.produce.function.FunctionParameterType.TEM
*/ */
public class SQLServerFormatEmulation extends FormatFunction { public class SQLServerFormatEmulation extends FormatFunction {
private final SQLServerDialect dialect; public SQLServerFormatEmulation(TypeConfiguration typeConfiguration) {
public SQLServerFormatEmulation(SQLServerDialect dialect, TypeConfiguration typeConfiguration) {
super( "format", typeConfiguration ); super( "format", typeConfiguration );
this.dialect = dialect;
} }
@Override @Override

View File

@ -0,0 +1,644 @@
/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
*/
package org.hibernate.orm.test.dialect;
import java.util.Locale;
import org.hibernate.LockMode;
import org.hibernate.LockOptions;
import org.hibernate.dialect.SQLServerDialect;
import org.hibernate.query.spi.Limit;
import org.hibernate.testing.TestForIssue;
import org.hibernate.testing.junit4.BaseUnitTestCase;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/**
* Unit test of the behavior of the SQLServerDialect utility methods
*
* @author Valotasion Yoryos
* @author Lukasz Antoniak (lukasz dot antoniak at gmail dot com)
* @author Chris Cranford
*/
public class SQLServer2008DialectTestCase extends BaseUnitTestCase {
private SQLServerDialect dialect;
@Before
public void setup() {
dialect = new SQLServerDialect();
}
@After
public void tearDown() {
dialect = null;
}
@Test
public void testGetLimitString() {
String input = "select distinct f1 as f53245 from table849752 order by f234, f67 desc";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select distinct top(?) f1 as f53245 from table849752 order by f234, f67 desc) row_)" +
" select f53245 from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( input, toRowSelection( 10, 15 ) ).toLowerCase(Locale.ROOT) );
}
@Test
@TestForIssue(jiraKey = "HHH-10736")
public void testGetLimitStringWithNewlineAfterSelect() {
final String query = "select" + System.lineSeparator() + "* FROM Employee E WHERE E.firstName = :firstName";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
query + ") row_) select * from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( query, toRowSelection( 1, 25 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-10736")
public void testGetLimitStringWithNewlineAfterSelectWithMultipleSpaces() {
final String query = "select " + System.lineSeparator() + "* FROM Employee E WHERE E.firstName = :firstName";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
query + ") row_) select * from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( query, toRowSelection( 1, 25 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-8507")
public void testGetLimitStringWithNewlineAfterColumnList() {
final String query = "select E.fieldA,E.fieldB\r\nFROM Employee E WHERE E.firstName = :firstName";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select E.fieldA as col0_,E.fieldB\r\n as col1_" +
"FROM Employee E WHERE E.firstName = :firstName) row_) select col0_,col1_ from query_ " +
"where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( query, toRowSelection( 1, 25 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-6950")
public void testGetLimitStringWithFromColumnName() {
final String fromColumnNameSQL = "select persistent0_.rid as rid1688_, " +
"persistent0_.deviationfromtarget as deviati16_1688_, " + // "from" character sequence as a part of the column name
"persistent0_.sortindex as sortindex1688_ " +
"from m_evalstate persistent0_ " +
"where persistent0_.customerid=?";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
fromColumnNameSQL + ") row_) " +
"select rid1688_,deviati16_1688_,sortindex1688_ from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( fromColumnNameSQL, toRowSelection( 1, 10 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-8301")
public void testGetLimitStringAliasGeneration() {
final String notAliasedSQL = "select column1, column2, column3, column4 from table1";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select column1 as col0_, column2 as col1_, column3 as col2_, column4 as col3_ from table1) row_) " +
"select col0_,col1_,col2_,col3_ from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( notAliasedSQL, toRowSelection( 3, 5 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-10994")
public void testGetLimitStringAliasGenerationWithAliasesNoAs() {
final String aliasedSQLNoAs = "select column1 c1, column c2, column c3, column c4 from table1";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select column1 c1, column c2, column c3, column c4 from table1) row_) " +
"select c1,c2,c3,c4 from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( aliasedSQLNoAs, toRowSelection( 3, 5 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-11352")
public void testPagingWithColumnNameStartingWithFrom() {
final String sql = "select column1 c1, from_column c2 from table1";
assertEquals( "with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select column1 c1, from_column c2 from table1) row_) " +
"select c1,c2 from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql(sql, toRowSelection(3, 5)));
}
@Test
@TestForIssue(jiraKey = "HHH-7019")
public void testGetLimitStringWithSubselect() {
final String subselectInSelectClauseSQL = "select persistent0_.id as col_0_0_, " +
"(select max(persistent1_.acceptancedate) " +
"from av_advisoryvariant persistent1_ " +
"where persistent1_.clientid=persistent0_.id) as col_1_0_ " +
"from c_customer persistent0_ " +
"where persistent0_.type='v'";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
subselectInSelectClauseSQL + ") row_) " +
"select col_0_0_,col_1_0_ from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( subselectInSelectClauseSQL, toRowSelection( 2, 5 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-11084")
public void testGetLimitStringWithSelectDistinctSubselect() {
final String selectDistinctSubselectSQL = "select col0_.CONTENTID as CONTENT1_12_ " +
"where col0_.CONTENTTYPE='PAGE' and (col0_.CONTENTID in " +
"(select distinct col2_.PREVVER from CONTENT col2_ where (col2_.PREVVER is not null)))";
assertEquals(
"select top(?) col0_.CONTENTID as CONTENT1_12_ " +
"where col0_.CONTENTTYPE='PAGE' and (col0_.CONTENTID in " +
"(select distinct col2_.PREVVER from CONTENT col2_ where (col2_.PREVVER is not null)))",
dialect.getLimitHandler().processSql( selectDistinctSubselectSQL, toRowSelection( 0, 5 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-11084")
public void testGetLimitStringWithSelectDistinctSubselectNotFirst() {
final String selectDistinctSubselectSQL = "select col0_.CONTENTID as CONTENT1_12_ FROM CONTEXT col0_ " +
"where col0_.CONTENTTYPE='PAGE' and (col0_.CONTENTID in " +
"(select distinct col2_.PREVVER from CONTENT col2_ where (col2_.PREVVER is not null)))";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ " +
"from (" + selectDistinctSubselectSQL + ") row_) " +
"select CONTENT1_12_ from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( selectDistinctSubselectSQL, toRowSelection( 1, 5 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-6728")
public void testGetLimitStringCaseSensitive() {
final String caseSensitiveSQL = "select persistent0_.id, persistent0_.uid AS tmp1, " +
"(select case when persistent0_.name = 'Smith' then 'Neo' else persistent0_.id end) " +
"from C_Customer persistent0_ " +
"where persistent0_.type='Va' " +
"order by persistent0_.Order";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select top(?) persistent0_.id as col0_, persistent0_.uid AS tmp1, " +
"(select case when persistent0_.name = 'Smith' then 'Neo' else persistent0_.id end) as col1_ " +
"from C_Customer persistent0_ where persistent0_.type='Va' order by persistent0_.Order) " +
"row_) select col0_,tmp1,col1_ from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( caseSensitiveSQL, toRowSelection( 1, 2 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-6310")
public void testGetLimitStringDistinctWithinAggregation() {
final String distinctInAggregateSQL = "select aggregate_function(distinct p.n) as f1 from table849752 p order by f1";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select top(?) aggregate_function(distinct p.n) as f1 from table849752 p order by f1) row_) " +
"select f1 from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( distinctInAggregateSQL, toRowSelection( 2, 5 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-10994")
public void testGetLimitStringDistinctWithinAggregationWithoutAlias() {
final String distinctInAggregateSQL = "select aggregate_function(distinct p.n) from table849752 p order by f1";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select top(?) aggregate_function(distinct p.n) as col0_ from table849752 p order by f1) row_) " +
"select col0_ from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( distinctInAggregateSQL, toRowSelection( 2, 5 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-10994")
public void testGetLimitStringDistinctWithinAggregationWithAliasNoAs() {
final String distinctInAggregateSQL = "select aggregate_function(distinct p.n) f1 from table849752 p order by f1";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select top(?) aggregate_function(distinct p.n) f1 from table849752 p order by f1) row_) " +
"select f1 from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( distinctInAggregateSQL, toRowSelection( 2, 5 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-7370")
public void testGetLimitStringWithMaxOnly() {
final String query = "select product2x0_.id as id0_, product2x0_.description as descript2_0_ " +
"from Product2 product2x0_ order by product2x0_.id";
assertEquals(
"select top(?) product2x0_.id as id0_, product2x0_.description as descript2_0_ " +
"from Product2 product2x0_ order by product2x0_.id",
dialect.getLimitHandler().processSql( query, toRowSelection( 0, 1 ) )
);
final String distinctQuery = "select distinct product2x0_.id as id0_, product2x0_.description as descript2_0_ " +
"from Product2 product2x0_ order by product2x0_.id";
assertEquals(
"select distinct top(?) product2x0_.id as id0_, product2x0_.description as descript2_0_ " +
"from Product2 product2x0_ order by product2x0_.id",
dialect.getLimitHandler().processSql( distinctQuery, toRowSelection( 0, 5 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-7781")
public void testGetLimitStringWithCastOperator() {
final String query = "select cast(lc302_doku6_.redniBrojStavke as varchar(255)) as col_0_0_, lc302_doku6_.dokumentiID as col_1_0_ " +
"from LC302_Dokumenti lc302_doku6_ order by lc302_doku6_.dokumentiID DESC";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select top(?) cast(lc302_doku6_.redniBrojStavke as varchar(255)) as col_0_0_, lc302_doku6_.dokumentiID as col_1_0_ " +
"from LC302_Dokumenti lc302_doku6_ order by lc302_doku6_.dokumentiID DESC) row_) " +
"select col_0_0_,col_1_0_ from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( query, toRowSelection( 1, 3 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-10994")
public void testGetLimitStringWithCastOperatorWithAliasNoAs() {
final String query = "select cast(lc302_doku6_.redniBrojStavke as varchar(255)) f1, lc302_doku6_.dokumentiID f2 " +
"from LC302_Dokumenti lc302_doku6_ order by lc302_doku6_.dokumentiID DESC";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select top(?) cast(lc302_doku6_.redniBrojStavke as varchar(255)) f1, lc302_doku6_.dokumentiID f2 " +
"from LC302_Dokumenti lc302_doku6_ order by lc302_doku6_.dokumentiID DESC) row_) " +
"select f1,f2 from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( query, toRowSelection( 1, 3 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-10994")
public void testGetLimitStringWithCastOperatorWithoutAliases() {
final String query = "select cast(lc302_doku6_.redniBrojStavke as varchar(255)), lc302_doku6_.dokumentiID " +
"from LC302_Dokumenti lc302_doku6_ order by lc302_doku6_.dokumentiID DESC";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select top(?) cast(lc302_doku6_.redniBrojStavke as varchar(255)) as col0_, lc302_doku6_.dokumentiID as col1_ " +
"from LC302_Dokumenti lc302_doku6_ order by lc302_doku6_.dokumentiID DESC) row_) " +
"select col0_,col1_ from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( query, toRowSelection( 1, 3 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-8007")
public void testGetLimitStringSelectingMultipleColumnsFromSeveralTables() {
final String query = "select t1.*, t2.* from tab1 t1, tab2 t2 where t1.ref = t2.ref order by t1.id desc";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select top(?) t1.*, t2.* from tab1 t1, tab2 t2 where t1.ref = t2.ref order by t1.id desc) row_) " +
"select * from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( query, toRowSelection( 1, 3 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-8007")
public void testGetLimitStringSelectingAllColumns() {
final String query = "select * from tab1 t1, tab2 t2 where t1.ref = t2.ref order by t1.id desc";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select top(?) * from tab1 t1, tab2 t2 where t1.ref = t2.ref order by t1.id desc) row_) " +
"select * from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( query, toRowSelection( 1, 3 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-11145")
public void testGetLimitStringWithFromInColumnName() {
final String query = "select [Created From Nonstock Item], field2 from table1";
assertEquals( "with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select [Created From Nonstock Item] as col0_, field2 as col1_ from table1) row_) " +
"select col0_,col1_ from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( query, toRowSelection( 1, 5 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-11145")
public void testGetLimitStringWithQuotedColumnNamesAndAlias() {
final String query = "select [Created From Item] c1, field2 from table1";
assertEquals( "with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select [Created From Item] c1, field2 as col0_ from table1) row_) " +
"select c1,col0_ from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( query, toRowSelection( 1, 5 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-11145")
public void testGetLimitStringWithQuotedColumnNamesAndAliasWithAs() {
final String query = "select [Created From Item] as c1, field2 from table1";
assertEquals( "with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select [Created From Item] as c1, field2 as col0_ from table1) row_) " +
"select c1,col0_ from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( query, toRowSelection( 1, 5 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-11324")
public void testGetLimitStringWithSelectClauseNestedQueryUsingParenthesis() {
final String query = "select t1.c1 as col_0_0, (select case when count(t2.c1)>0 then 'ADDED' else 'UNMODIFIED' end from table2 t2 WHERE (t2.c1 in (?))) as col_1_0 from table1 t1 WHERE 1=1 ORDER BY t1.c1 ASC";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select top(?) t1.c1 as col_0_0, (select case when count(t2.c1)>0 then 'ADDED' else 'UNMODIFIED' end from table2 t2 WHERE (t2.c1 in (?))) as col_1_0 from table1 t1 WHERE 1=1 ORDER BY t1.c1 ASC) row_) " +
"select col_0_0,col_1_0 from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( query, toRowSelection( 1, 5 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-11650")
public void testGetLimitWithStringValueContainingParenthesis() {
final String query = "select t1.c1 as col_0_0 FROM table1 t1 where t1.c1 = '(123' ORDER BY t1.c1 ASC";
assertEquals(
"with query_ as (select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"select top(?) t1.c1 as col_0_0 FROM table1 t1 where t1.c1 = '(123' ORDER BY t1.c1 ASC) row_) select col_0_0 from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( query, toRowSelection( 1, 5 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-11324")
public void testGetLimitStringWithSelectClauseNestedQueryUsingParenthesisOnlyTop() {
final String query = "select t1.c1 as col_0_0, (select case when count(t2.c1)>0 then 'ADDED' else 'UNMODIFIED' end from table2 t2 WHERE (t2.c1 in (?))) as col_1_0 from table1 t1 WHERE 1=1 ORDER BY t1.c1 ASC";
assertEquals(
"select top(?) t1.c1 as col_0_0, (select case when count(t2.c1)>0 then 'ADDED' else 'UNMODIFIED' end from table2 t2 WHERE (t2.c1 in (?))) as col_1_0 from table1 t1 WHERE 1=1 ORDER BY t1.c1 ASC",
dialect.getLimitHandler().processSql( query, toRowSelection( 0, 5 ) )
);
}
@Test
@TestForIssue(jiraKey = "HHH-8916")
public void testGetLimitStringUsingCTEQueryNoOffset() {
Limit selection = toRowSelection( 0, 5 );
// test top-based CTE with single CTE query_ definition with no odd formatting
final String query1 = "WITH a (c1, c2) AS (SELECT c1, c2 FROM t) SELECT c1, c2 FROM a";
assertEquals(
"WITH a (c1, c2) AS (SELECT c1, c2 FROM t) SELECT top(?) c1, c2 FROM a",
dialect.getLimitHandler().processSql( query1, selection )
);
// test top-based CTE with single CTE query_ definition and various tab, newline spaces
final String query2 = " \n\tWITH a (c1\n\t,c2)\t\nAS (SELECT\n\tc1,c2 FROM t)\t\nSELECT c1, c2 FROM a";
assertEquals(
"WITH a (c1\n\t,c2)\t\nAS (SELECT\n\tc1,c2 FROM t)\t\nSELECT top(?) c1, c2 FROM a",
dialect.getLimitHandler().processSql( query2, selection )
);
// test top-based CTE with multiple CTE query_ definitions with no odd formatting
final String query3 = "WITH a (c1, c2) AS (SELECT c1, c2 FROM t1), b (b1, b2) AS (SELECT b1, b2 FROM t2) " +
"SELECT c1, c2, b1, b2 FROM t1, t2 WHERE t1.c1 = t2.b1";
assertEquals(
"WITH a (c1, c2) AS (SELECT c1, c2 FROM t1), b (b1, b2) AS (SELECT b1, b2 FROM t2) " +
"SELECT top(?) c1, c2, b1, b2 FROM t1, t2 WHERE t1.c1 = t2.b1",
dialect.getLimitHandler().processSql( query3, selection )
);
// test top-based CTE with multiple CTE query_ definitions and various tab, newline spaces
final String query4 = " \n\r\tWITH a (c1, c2) AS\n\r (SELECT c1, c2 FROM t1)\n\r, b (b1, b2)\tAS\t" +
"(SELECT b1, b2 FROM t2) SELECT c1, c2, b1, b2 FROM t1, t2 WHERE t1.c1 = t2.b1";
assertEquals(
"WITH a (c1, c2) AS\n\r (SELECT c1, c2 FROM t1)\n\r, b (b1, b2)\tAS\t(SELECT b1, b2 FROM t2)" +
" SELECT top(?) c1, c2, b1, b2 FROM t1, t2 WHERE t1.c1 = t2.b1",
dialect.getLimitHandler().processSql( query4, selection )
);
}
@Test
@TestForIssue(jiraKey = "HHH-8916")
public void testGetLimitStringUsingCTEQueryWithOffset() {
Limit selection = toRowSelection( 1, 5 );
// test non-top based CTE with single CTE query_ definition with no odd formatting
final String query1 = "WITH a (c1, c2) AS (SELECT c1, c2 FROM t) SELECT c1, c2 FROM a";
assertEquals(
"WITH a (c1, c2) AS (SELECT c1, c2 FROM t) , query_ as (select row_.*,row_number() over " +
"(order by current_timestamp) as rownumber_ from (SELECT c1 as col0_, c2 as col1_ " +
"FROM a) row_) select col0_,col1_ from query_ where rownumber_>=? " +
"and rownumber_<?",
dialect.getLimitHandler().processSql( query1, selection )
);
// test non-top based CTE with single CTE query_ definition and various tab, newline spaces
final String query2 = " \n\tWITH a (c1\n\t,c2)\t\nAS (SELECT\n\tc1,c2 FROM t)\t\nSELECT c1, c2 FROM a";
assertEquals(
"WITH a (c1\n\t,c2)\t\nAS (SELECT\n\tc1,c2 FROM t)\t\n, query_ as (select row_.*,row_number()" +
" over (order by current_timestamp) as rownumber_ from (SELECT c1 as col0_, c2 " +
"as col1_ FROM a) row_) select col0_,col1_ from query_ where rownumber_>=" +
"? and rownumber_<?",
dialect.getLimitHandler().processSql( query2, selection )
);
// test non-top based CTE with multiple CTE query_ definitions with no odd formatting
final String query3 = "WITH a (c1, c2) AS (SELECT c1, c2 FROM t1), b (b1, b2) AS (SELECT b1, b2 FROM t2) " +
" SELECT c1, c2, b1, b2 FROM t1, t2 WHERE t1.c1 = t2.b1";
assertEquals(
"WITH a (c1, c2) AS (SELECT c1, c2 FROM t1), b (b1, b2) AS (SELECT b1, b2 FROM t2) , query_ as (" +
"select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"SELECT c1 as col0_, c2 as col1_, b1 as col2_, b2 as col3_ FROM t1, t2 WHERE t1.c1 = t2.b1) row_)" +
" select col0_,col1_,col2_,col3_ from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( query3, selection )
);
// test top-based CTE with multiple CTE query_ definitions and various tab, newline spaces
final String query4 = " \n\r\tWITH a (c1, c2) AS\n\r (SELECT c1, c2 FROM t1)\n\r, b (b1, b2)\tAS\t(SELECT b1, " +
"b2 FROM t2) SELECT c1, c2, b1, b2 FROM t1, t2 WHERE t1.c1 = t2.b1";
assertEquals(
"WITH a (c1, c2) AS\n\r (SELECT c1, c2 FROM t1)\n\r, b (b1, b2)\tAS\t(SELECT b1, b2 FROM t2) , query_ as (" +
"select row_.*,row_number() over (order by current_timestamp) as rownumber_ from (" +
"SELECT c1 as col0_, c2 as col1_, b1 as col2_, b2 as col3_ FROM t1, t2 WHERE t1.c1 = t2.b1) row_)" +
" select col0_,col1_,col2_,col3_ from query_ where rownumber_>=? and rownumber_<?",
dialect.getLimitHandler().processSql( query4, selection )
);
}
@Test
@TestForIssue(jiraKey = "HHH-9635")
public void testAppendLockHintReadPastLocking() {
final String expectedLockHint = "tab1 with (updlock,rowlock,readpast)";
LockOptions lockOptions = new LockOptions( LockMode.UPGRADE_SKIPLOCKED );
String lockHint = dialect.appendLockHint( lockOptions, "tab1" );
assertEquals( expectedLockHint, lockHint );
}
@Test
@TestForIssue(jiraKey = "HHH-9635")
public void testAppendLockHintReadPastLockingNoTimeOut() {
final String expectedLockHint = "tab1 with (updlock,rowlock,readpast,nowait)";
LockOptions lockOptions = new LockOptions( LockMode.UPGRADE_SKIPLOCKED );
lockOptions.setTimeOut( LockOptions.NO_WAIT );
String lockHint = dialect.appendLockHint( lockOptions, "tab1" );
assertEquals( expectedLockHint, lockHint );
}
@Test
@TestForIssue(jiraKey = "HHH-9635")
public void testAppendLockHintPessimisticRead() {
final String expectedLockHint = "tab1 with (holdlock,rowlock)";
LockOptions lockOptions = new LockOptions( LockMode.PESSIMISTIC_READ );
String lockHint = dialect.appendLockHint( lockOptions, "tab1" );
assertEquals( expectedLockHint, lockHint );
}
@Test
@TestForIssue(jiraKey = "HHH-9635")
public void testAppendLockHintPessimisticReadNoTimeOut() {
final String expectedLockHint = "tab1 with (holdlock,rowlock,nowait)";
LockOptions lockOptions = new LockOptions( LockMode.PESSIMISTIC_READ );
lockOptions.setTimeOut( LockOptions.NO_WAIT );
String lockHint = dialect.appendLockHint( lockOptions, "tab1" );
assertEquals( expectedLockHint, lockHint );
}
@Test
@TestForIssue(jiraKey = "HHH-9635")
public void testAppendLockHintWrite() {
final String expectedLockHint = "tab1 with (updlock,holdlock,rowlock)";
LockOptions lockOptions = new LockOptions( LockMode.WRITE );
String lockHint = dialect.appendLockHint( lockOptions, "tab1" );
assertEquals( expectedLockHint, lockHint );
}
@Test
@TestForIssue(jiraKey = "HHH-9635")
public void testAppendLockHintWriteWithNoTimeOut() {
final String expectedLockHint = "tab1 with (updlock,holdlock,rowlock,nowait)";
LockOptions lockOptions = new LockOptions( LockMode.WRITE );
lockOptions.setTimeOut( LockOptions.NO_WAIT );
String lockHint = dialect.appendLockHint( lockOptions, "tab1" );
assertEquals( expectedLockHint, lockHint );
}
@Test
@TestForIssue(jiraKey = "HHH-9635")
public void testAppendLockHintUpgradeNoWait() {
final String expectedLockHint = "tab1 with (updlock,holdlock,rowlock,nowait)";
LockOptions lockOptions = new LockOptions( LockMode.UPGRADE_NOWAIT );
String lockHint = dialect.appendLockHint( lockOptions, "tab1" );
assertEquals( expectedLockHint, lockHint );
}
@Test
@TestForIssue(jiraKey = "HHH-9635")
public void testAppendLockHintUpgradeNoWaitNoTimeout() {
final String expectedLockHint = "tab1 with (updlock,holdlock,rowlock,nowait)";
LockOptions lockOptions = new LockOptions( LockMode.UPGRADE_NOWAIT );
lockOptions.setTimeOut( LockOptions.NO_WAIT );
String lockHint = dialect.appendLockHint( lockOptions, "tab1" );
assertEquals( expectedLockHint, lockHint );
}
@Test
@TestForIssue(jiraKey = "HHH-9635")
public void testAppendLockHintUpgrade() {
final String expectedLockHint = "tab1 with (updlock,holdlock,rowlock)";
LockOptions lockOptions = new LockOptions( LockMode.PESSIMISTIC_WRITE );
String lockHint = dialect.appendLockHint( lockOptions, "tab1" );
assertEquals( expectedLockHint, lockHint );
}
@Test
@TestForIssue(jiraKey = "HHH-9635")
public void testAppendLockHintUpgradeNoTimeout() {
final String expectedLockHint = "tab1 with (updlock,holdlock,rowlock,nowait)";
LockOptions lockOptions = new LockOptions( LockMode.PESSIMISTIC_WRITE );
lockOptions.setTimeOut( LockOptions.NO_WAIT );
String lockHint = dialect.appendLockHint( lockOptions, "tab1" );
assertEquals( expectedLockHint, lockHint );
}
@Test
@TestForIssue(jiraKey = "HHH-9635")
public void testAppendLockHintPessimisticWrite() {
final String expectedLockHint = "tab1 with (updlock,holdlock,rowlock)";
LockOptions lockOptions = new LockOptions( LockMode.PESSIMISTIC_WRITE );
String lockHint = dialect.appendLockHint( lockOptions, "tab1" );
assertEquals( expectedLockHint, lockHint );
}
@Test
@TestForIssue(jiraKey = "HHH-9635")
public void testAppendLockHintPessimisticWriteNoTimeOut() {
final String expectedLockHint = "tab1 with (updlock,holdlock,rowlock,nowait)";
LockOptions lockOptions = new LockOptions( LockMode.PESSIMISTIC_WRITE );
lockOptions.setTimeOut( LockOptions.NO_WAIT );
String lockHint = dialect.appendLockHint( lockOptions, "tab1" );
assertEquals( expectedLockHint, lockHint );
}
private Limit toRowSelection(int firstRow, int maxRows) {
Limit selection = new Limit();
selection.setFirstRow( firstRow );
selection.setMaxRows( maxRows );
return selection;
}
}

View File

@ -16,7 +16,7 @@ public class SQLServerLockHintsTest extends AbstractLockHintTest {
public static final Dialect DIALECT = new SQLServerDialect(); public static final Dialect DIALECT = new SQLServerDialect();
protected String getLockHintUsed() { protected String getLockHintUsed() {
return "with (updlock,rowlock)"; return "with (updlock,holdlock,rowlock)";
} }
protected Dialect getDialectUnderTest() { protected Dialect getDialectUnderTest() {

View File

@ -29,12 +29,12 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
* @author Vlad Mihalcea * @author Vlad Mihalcea
*/ */
@TestForIssue(jiraKey = "HHH-10183") @TestForIssue(jiraKey = "HHH-10183")
@RequiresDialect(value = SQLServerDialect.class, majorVersion = 10) @RequiresDialect(value = SQLServerDialect.class)
@DomainModel( @DomainModel(
annotatedClasses = SQLServer2008NationalizedScalarQueryTest.User.class annotatedClasses = SQLServerNationalizedScalarQueryTest.User.class
) )
@SessionFactory @SessionFactory
public class SQLServer2008NationalizedScalarQueryTest { public class SQLServerNationalizedScalarQueryTest {
@Test @Test

View File

@ -22,7 +22,6 @@ import org.hibernate.tool.hbm2ddl.SchemaExport;
import org.hibernate.tool.schema.TargetType; import org.hibernate.tool.schema.TargetType;
import org.hibernate.testing.TestForIssue; import org.hibernate.testing.TestForIssue;
import org.hibernate.testing.junit4.BaseUnitTestCase;
import org.hibernate.testing.orm.junit.RequiresDialect; import org.hibernate.testing.orm.junit.RequiresDialect;
import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeEach;
@ -32,8 +31,8 @@ import org.junit.jupiter.api.Test;
* @author Andrea Boriero * @author Andrea Boriero
*/ */
@TestForIssue(jiraKey = "HHH-10529") @TestForIssue(jiraKey = "HHH-10529")
@RequiresDialect(value = SQLServerDialect.class, majorVersion = 10) @RequiresDialect(value = SQLServerDialect.class)
public class SQLServer2008NVarCharTypeTest { public class SQLServerNVarCharTypeTest {
private StandardServiceRegistry ssr; private StandardServiceRegistry ssr;
private MetadataImplementor metadata; private MetadataImplementor metadata;
private SchemaExport schemaExport; private SchemaExport schemaExport;