mirror of
https://github.com/torvalds/linux.git
synced 2024-12-22 02:52:56 +00:00
0898a16a36
Place the branch with no concurrent write before the contended case. Performance numbers for Intel(R) Core(TM) i5-6300U CPU @ 2.40GHz (more clock_gettime() cycles - the better): | before | after ----------------------------------- | 150252214 | 153242367 | 150301112 | 153324800 | 150392773 | 153125401 | 150373957 | 153399355 | 150303157 | 153489417 | 150365237 | 153494270 ----------------------------------- avg | 150331408 | 153345935 diff % | 2 | 0 ----------------------------------- stdev % | 0.3 | 0.1 Co-developed-by: Dmitry Safonov <dima@arista.com> Signed-off-by: Andrei Vagin <avagin@gmail.com> Signed-off-by: Dmitry Safonov <dima@arista.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Link: https://lore.kernel.org/r/20191112012724.250792-2-dima@arista.com
57 lines
1.3 KiB
C
57 lines
1.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __VDSO_HELPERS_H
|
|
#define __VDSO_HELPERS_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <vdso/datapage.h>
|
|
|
|
static __always_inline u32 vdso_read_begin(const struct vdso_data *vd)
|
|
{
|
|
u32 seq;
|
|
|
|
while (unlikely((seq = READ_ONCE(vd->seq)) & 1))
|
|
cpu_relax();
|
|
|
|
smp_rmb();
|
|
return seq;
|
|
}
|
|
|
|
static __always_inline u32 vdso_read_retry(const struct vdso_data *vd,
|
|
u32 start)
|
|
{
|
|
u32 seq;
|
|
|
|
smp_rmb();
|
|
seq = READ_ONCE(vd->seq);
|
|
return seq != start;
|
|
}
|
|
|
|
static __always_inline void vdso_write_begin(struct vdso_data *vd)
|
|
{
|
|
/*
|
|
* WRITE_ONCE it is required otherwise the compiler can validly tear
|
|
* updates to vd[x].seq and it is possible that the value seen by the
|
|
* reader it is inconsistent.
|
|
*/
|
|
WRITE_ONCE(vd[CS_HRES_COARSE].seq, vd[CS_HRES_COARSE].seq + 1);
|
|
WRITE_ONCE(vd[CS_RAW].seq, vd[CS_RAW].seq + 1);
|
|
smp_wmb();
|
|
}
|
|
|
|
static __always_inline void vdso_write_end(struct vdso_data *vd)
|
|
{
|
|
smp_wmb();
|
|
/*
|
|
* WRITE_ONCE it is required otherwise the compiler can validly tear
|
|
* updates to vd[x].seq and it is possible that the value seen by the
|
|
* reader it is inconsistent.
|
|
*/
|
|
WRITE_ONCE(vd[CS_HRES_COARSE].seq, vd[CS_HRES_COARSE].seq + 1);
|
|
WRITE_ONCE(vd[CS_RAW].seq, vd[CS_RAW].seq + 1);
|
|
}
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#endif /* __VDSO_HELPERS_H */
|