HHH-18043 Change SQL Server default timestamp precision to 7
This commit is contained in:
parent
e414373f85
commit
405d80bd29
|
@ -812,18 +812,9 @@ public class SQLServerLegacyDialect extends AbstractTransactSQLDialect {
|
|||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* SQL server supports up to 7 decimal digits of
|
||||
* fractional second precision in a datetime2,
|
||||
* but since its duration arithmetic functions
|
||||
* try to fit durations into an int,
|
||||
* which is impossible with such high precision,
|
||||
* so default to generating {@code datetime2(3)}
|
||||
* columns.
|
||||
*/
|
||||
@Override
|
||||
public int getDefaultTimestampPrecision() {
|
||||
return 6; //microseconds!
|
||||
return 7;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -815,18 +815,9 @@ public class SQLServerDialect extends AbstractTransactSQLDialect {
|
|||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* SQL server supports up to 7 decimal digits of
|
||||
* fractional second precision in a datetime2,
|
||||
* but since its duration arithmetic functions
|
||||
* try to fit durations into an int,
|
||||
* which is impossible with such high precision,
|
||||
* so default to generating {@code datetime2(3)}
|
||||
* columns.
|
||||
*/
|
||||
@Override
|
||||
public int getDefaultTimestampPrecision() {
|
||||
return 6; //microseconds!
|
||||
return 7;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -132,9 +132,10 @@ String isDefault();
|
|||
|
||||
|
||||
[[ddl-implicit-datatype-timestamp]]
|
||||
== Default precision for Oracle timestamp
|
||||
== Default precision for timestamp on some databases
|
||||
|
||||
The default precision for Oracle timestamps was changed to 9 i.e. nanosecond precision.
|
||||
The default precision for SQL Server timestamps was changed to 7 i.e. 100 nanosecond precision.
|
||||
|
||||
[[todo]]
|
||||
== Todos (dev)
|
||||
|
|
Loading…
Reference in New Issue