mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 06:02:05 +00:00
x86/vdso: Make delta calculation overflow safe
Kernel timekeeping is designed to keep the change in cycles (since the last timer interrupt) below max_cycles, which prevents multiplication overflow when converting cycles to nanoseconds. However, if timer interrupts stop, the calculation will eventually overflow. Add protection against that. Select GENERIC_VDSO_OVERFLOW_PROTECT so that max_cycles is made available in the VDSO data page. Check against max_cycles, falling back to a slower higher precision calculation. Take advantage of the opportunity to move masking and negative motion check into the slow path. The result is a calculation that has similar performance as before. Newer machines showed performance benefit, whereas older Skylake-based hardware such as Intel Kaby Lake was seen <1% worse. Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20240325064023.2997-9-adrian.hunter@intel.com
This commit is contained in:
parent
456e3788bc
commit
7e90ffb716
@ -168,6 +168,7 @@ config X86
|
||||
select GENERIC_TIME_VSYSCALL
|
||||
select GENERIC_GETTIMEOFDAY
|
||||
select GENERIC_VDSO_TIME_NS
|
||||
select GENERIC_VDSO_OVERFLOW_PROTECT
|
||||
select GUP_GET_PXX_LOW_HIGH if X86_PAE
|
||||
select HARDIRQS_SW_RESEND
|
||||
select HARDLOCKUP_CHECK_TIMESTAMP if X86_64
|
||||
|
@ -319,18 +319,31 @@ static inline bool arch_vdso_cycles_ok(u64 cycles)
|
||||
*/
|
||||
static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
|
||||
{
|
||||
/*
|
||||
* Due to the MSB/Sign-bit being used as invalid marker (see
|
||||
* arch_vdso_cycles_valid() above), the effective mask is S64_MAX.
|
||||
*/
|
||||
u64 delta = (cycles - vd->cycle_last) & S64_MAX;
|
||||
u64 delta = cycles - vd->cycle_last;
|
||||
|
||||
/*
|
||||
* Due to the above mentioned TSC wobbles, filter out negative motion.
|
||||
* Per the above masking, the effective sign bit is now bit 62.
|
||||
* Negative motion and deltas which can cause multiplication
|
||||
* overflow require special treatment. This check covers both as
|
||||
* negative motion is guaranteed to be greater than @vd::max_cycles
|
||||
* due to unsigned comparison.
|
||||
*
|
||||
* Due to the MSB/Sign-bit being used as invalid marker (see
|
||||
* arch_vdso_cycles_valid() above), the effective mask is S64_MAX,
|
||||
* but that case is also unlikely and will also take the unlikely path
|
||||
* here.
|
||||
*/
|
||||
if (unlikely(delta & (1ULL << 62)))
|
||||
return base >> vd->shift;
|
||||
if (unlikely(delta > vd->max_cycles)) {
|
||||
/*
|
||||
* Due to the above mentioned TSC wobbles, filter out
|
||||
* negative motion. Per the above masking, the effective
|
||||
* sign bit is now bit 62.
|
||||
*/
|
||||
if (delta & (1ULL << 62))
|
||||
return base >> vd->shift;
|
||||
|
||||
/* Handle multiplication overflow gracefully */
|
||||
return mul_u64_u32_add_u64_shr(delta & S64_MAX, vd->mult, base, vd->shift);
|
||||
}
|
||||
|
||||
return ((delta * vd->mult) + base) >> vd->shift;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user