HHH-18043 Change SQL Server default timestamp precision to 7

This commit is contained in:
Christian Beikov 2024-04-30 23:06:56 +02:00 committed by Steve Ebersole
parent e414373f85
commit 405d80bd29
3 changed files with 4 additions and 21 deletions

View File

@ -812,18 +812,9 @@ public class SQLServerLegacyDialect extends AbstractTransactSQLDialect {
};
}
/**
* SQL server supports up to 7 decimal digits of
* fractional second precision in a datetime2,
* but since its duration arithmetic functions
* try to fit durations into an int,
* which is impossible with such high precision,
* so default to generating {@code datetime2(3)}
* columns.
*/
@Override
public int getDefaultTimestampPrecision() {
return 6; //microseconds!
return 7;
}
@Override

View File

@ -815,18 +815,9 @@ public class SQLServerDialect extends AbstractTransactSQLDialect {
};
}
/**
* SQL server supports up to 7 decimal digits of
* fractional second precision in a datetime2,
* but since its duration arithmetic functions
* try to fit durations into an int,
* which is impossible with such high precision,
* so default to generating {@code datetime2(3)}
* columns.
*/
@Override
public int getDefaultTimestampPrecision() {
return 6; //microseconds!
return 7;
}
/**

View File

@ -132,9 +132,10 @@ String isDefault();
[[ddl-implicit-datatype-timestamp]]
== Default precision for Oracle timestamp
== Default precision for timestamp on some databases
The default precision for Oracle timestamps was changed to 9 i.e. nanosecond precision.
The default precision for SQL Server timestamps was changed to 7 i.e. 100 nanosecond precision.
[[todo]]
== Todos (dev)