- Reorganize the perf LBR init code so that a TSX quirk is applied early

enough in order for the LBR MSR access to not #GP
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmLdD5sACgkQEsHwGGHe
 VUoaDA//Yiir+zrTjbCOSnma/pgn3wbWQVe2Y1E1+Tj+av3hBuzTdoAqgDDrFTQ7
 zKwJBjGRb+yQduFbLpLCD9BittX2yk2WGIP3gvLB0ffCLMKU/NOCSKPDZ+HDtvPE
 MkJ1usBLhAYbExgoQVI0Cu9dBL5AR1hKy+ZAw/W7uUhRdR7g9B2gLd1Hg261jmLS
 +imOAQiUgbCAl8d7kMk9lB5gCl3j5DZ+FgpCTYWV4n87cvGBIjAESyjhfAKXRwmg
 Y7acUtOWB935LleA9su2HCtejeIhRoLlV5qUrc0oMUiM6GSymkd6PBRykQ16kajL
 MzKb56daTiK8q4sEhmte0grNNNdyY0nVp2YmvzQ1Cf16Au0XvHIjhLg9WWI+WYmd
 rSP1OXrGoUQ9C2Vex008ajIPQFVDCgRV4euzE8CveiBnNzVp6JCScjNlvVpOgaxM
 04Yesok4nAhJYSYLg1IWXuF5caOyNODJYRNj18ypt2FCEJdDxuDwqVL8DW/oqFz5
 JxTRAuyvojhir/0ztDdMiQ9IkMTjdSboIp1gbjkj0C1J0gJq8Bh9tqNK/uChw58J
 UY9rojGnAa2JEXxQ3P5Dj/zSfw5FoCe7VgACrw+lXw0sJ3CKh08ojRjofP4ET+s1
 77ru8deYJ4sy0IyGLml6WpA3hWwUvkjxWVx1cdnyeBHW9oc7fGQ=
 =nYW6
 -----END PGP SIGNATURE-----

Merge tag 'perf_urgent_for_v5.19_rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fix from Borislav Petkov:

 - Reorganize the perf LBR init code so that a TSX quirk is applied
   early enough in order for the LBR MSR access to not #GP

* tag 'perf_urgent_for_v5.19_rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86/intel/lbr: Fix unchecked MSR access error on HSW
This commit is contained in:
Linus Torvalds 2022-07-24 09:55:53 -07:00
commit af2c9ac240

View File

@ -278,9 +278,9 @@ enum {
}; };
/* /*
* For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in * For format LBR_FORMAT_EIP_FLAGS2, bits 61:62 in MSR_LAST_BRANCH_FROM_x
* MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when * are the TSX flags when TSX is supported, but when TSX is not supported
* TSX is not supported they have no consistent behavior: * they have no consistent behavior:
* *
* - For wrmsr(), bits 61:62 are considered part of the sign extension. * - For wrmsr(), bits 61:62 are considered part of the sign extension.
* - For HW updates (branch captures) bits 61:62 are always OFF and are not * - For HW updates (branch captures) bits 61:62 are always OFF and are not
@ -288,7 +288,7 @@ enum {
* *
* Therefore, if: * Therefore, if:
* *
* 1) LBR has TSX format * 1) LBR format LBR_FORMAT_EIP_FLAGS2
* 2) CPU has no TSX support enabled * 2) CPU has no TSX support enabled
* *
* ... then any value passed to wrmsr() must be sign extended to 63 bits and any * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
@ -300,7 +300,7 @@ static inline bool lbr_from_signext_quirk_needed(void)
bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) || bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
boot_cpu_has(X86_FEATURE_RTM); boot_cpu_has(X86_FEATURE_RTM);
return !tsx_support && x86_pmu.lbr_has_tsx; return !tsx_support;
} }
static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key); static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
@ -1609,9 +1609,6 @@ void intel_pmu_lbr_init_hsw(void)
x86_pmu.lbr_sel_map = hsw_lbr_sel_map; x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0); x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
if (lbr_from_signext_quirk_needed())
static_branch_enable(&lbr_from_quirk_key);
} }
/* skylake */ /* skylake */
@ -1702,7 +1699,11 @@ void intel_pmu_lbr_init(void)
switch (x86_pmu.intel_cap.lbr_format) { switch (x86_pmu.intel_cap.lbr_format) {
case LBR_FORMAT_EIP_FLAGS2: case LBR_FORMAT_EIP_FLAGS2:
x86_pmu.lbr_has_tsx = 1; x86_pmu.lbr_has_tsx = 1;
fallthrough; x86_pmu.lbr_from_flags = 1;
if (lbr_from_signext_quirk_needed())
static_branch_enable(&lbr_from_quirk_key);
break;
case LBR_FORMAT_EIP_FLAGS: case LBR_FORMAT_EIP_FLAGS:
x86_pmu.lbr_from_flags = 1; x86_pmu.lbr_from_flags = 1;
break; break;