mirror of
https://github.com/torvalds/linux.git
synced 2024-12-21 10:31:54 +00:00
x86/mce: Detect and use SMCA-specific msr_ops
Replace all calls to MCx_IA32_{CTL,ADDR,MISC,STATUS} with the appropriate msr_ops. Use SMCA-specific msr_ops when on an SMCA-enabled processor. Carved out from a patch by Aravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com>. Signed-off-by: Yazen Ghannam <Yazen.Ghannam@amd.com> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Aravind Gopalakrishnan <aravindksg.lkml@gmail.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: linux-edac <linux-edac@vger.kernel.org> Link: http://lkml.kernel.org/r/1462019637-16474-6-git-send-email-bp@alien8.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
a9750a31ef
commit
d9d73fcc87
@ -403,11 +403,11 @@ static int msr_to_offset(u32 msr)
|
||||
|
||||
if (msr == mca_cfg.rip_msr)
|
||||
return offsetof(struct mce, ip);
|
||||
if (msr == MSR_IA32_MCx_STATUS(bank))
|
||||
if (msr == msr_ops.status(bank))
|
||||
return offsetof(struct mce, status);
|
||||
if (msr == MSR_IA32_MCx_ADDR(bank))
|
||||
if (msr == msr_ops.addr(bank))
|
||||
return offsetof(struct mce, addr);
|
||||
if (msr == MSR_IA32_MCx_MISC(bank))
|
||||
if (msr == msr_ops.misc(bank))
|
||||
return offsetof(struct mce, misc);
|
||||
if (msr == MSR_IA32_MCG_STATUS)
|
||||
return offsetof(struct mce, mcgstatus);
|
||||
@ -570,9 +570,9 @@ static struct notifier_block mce_srao_nb = {
|
||||
static void mce_read_aux(struct mce *m, int i)
|
||||
{
|
||||
if (m->status & MCI_STATUS_MISCV)
|
||||
m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
|
||||
m->misc = mce_rdmsrl(msr_ops.misc(i));
|
||||
if (m->status & MCI_STATUS_ADDRV) {
|
||||
m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
|
||||
m->addr = mce_rdmsrl(msr_ops.addr(i));
|
||||
|
||||
/*
|
||||
* Mask the reported address by the reported granularity.
|
||||
@ -654,7 +654,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
|
||||
m.tsc = 0;
|
||||
|
||||
barrier();
|
||||
m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
|
||||
m.status = mce_rdmsrl(msr_ops.status(i));
|
||||
if (!(m.status & MCI_STATUS_VAL))
|
||||
continue;
|
||||
|
||||
@ -701,7 +701,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
|
||||
/*
|
||||
* Clear state for this bank.
|
||||
*/
|
||||
mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
|
||||
mce_wrmsrl(msr_ops.status(i), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -726,7 +726,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
|
||||
char *tmp;
|
||||
|
||||
for (i = 0; i < mca_cfg.banks; i++) {
|
||||
m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
|
||||
m->status = mce_rdmsrl(msr_ops.status(i));
|
||||
if (m->status & MCI_STATUS_VAL) {
|
||||
__set_bit(i, validp);
|
||||
if (quirk_no_way_out)
|
||||
@ -1004,7 +1004,7 @@ static void mce_clear_state(unsigned long *toclear)
|
||||
|
||||
for (i = 0; i < mca_cfg.banks; i++) {
|
||||
if (test_bit(i, toclear))
|
||||
mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
|
||||
mce_wrmsrl(msr_ops.status(i), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1123,7 +1123,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
|
||||
m.addr = 0;
|
||||
m.bank = i;
|
||||
|
||||
m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
|
||||
m.status = mce_rdmsrl(msr_ops.status(i));
|
||||
if ((m.status & MCI_STATUS_VAL) == 0)
|
||||
continue;
|
||||
|
||||
@ -1493,8 +1493,8 @@ static void __mcheck_cpu_init_clear_banks(void)
|
||||
|
||||
if (!b->init)
|
||||
continue;
|
||||
wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
|
||||
wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
|
||||
wrmsrl(msr_ops.ctl(i), b->ctl);
|
||||
wrmsrl(msr_ops.status(i), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1684,6 +1684,16 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
|
||||
mce_flags.overflow_recov = !!(ebx & BIT(0));
|
||||
mce_flags.succor = !!(ebx & BIT(1));
|
||||
mce_flags.smca = !!(ebx & BIT(3));
|
||||
|
||||
/*
|
||||
* Install proper ops for Scalable MCA enabled processors
|
||||
*/
|
||||
if (mce_flags.smca) {
|
||||
msr_ops.ctl = smca_ctl_reg;
|
||||
msr_ops.status = smca_status_reg;
|
||||
msr_ops.addr = smca_addr_reg;
|
||||
msr_ops.misc = smca_misc_reg;
|
||||
}
|
||||
mce_amd_feature_init(c);
|
||||
|
||||
break;
|
||||
@ -2134,7 +2144,7 @@ static void mce_disable_error_reporting(void)
|
||||
struct mce_bank *b = &mce_banks[i];
|
||||
|
||||
if (b->init)
|
||||
wrmsrl(MSR_IA32_MCx_CTL(i), 0);
|
||||
wrmsrl(msr_ops.ctl(i), 0);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -2467,7 +2477,7 @@ static void mce_reenable_cpu(void *h)
|
||||
struct mce_bank *b = &mce_banks[i];
|
||||
|
||||
if (b->init)
|
||||
wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
|
||||
wrmsrl(msr_ops.ctl(i), b->ctl);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -333,7 +333,7 @@ static u32 get_block_address(u32 current_addr, u32 low, u32 high,
|
||||
/* Fall back to method we used for older processors: */
|
||||
switch (block) {
|
||||
case 0:
|
||||
addr = MSR_IA32_MCx_MISC(bank);
|
||||
addr = msr_ops.misc(bank);
|
||||
break;
|
||||
case 1:
|
||||
offset = ((low & MASK_BLKPTR_LO) >> 21);
|
||||
@ -435,7 +435,7 @@ static void __log_error(unsigned int bank, bool threshold_err, u64 misc)
|
||||
struct mce m;
|
||||
u64 status;
|
||||
|
||||
rdmsrl(MSR_IA32_MCx_STATUS(bank), status);
|
||||
rdmsrl(msr_ops.status(bank), status);
|
||||
if (!(status & MCI_STATUS_VAL))
|
||||
return;
|
||||
|
||||
@ -448,10 +448,10 @@ static void __log_error(unsigned int bank, bool threshold_err, u64 misc)
|
||||
m.misc = misc;
|
||||
|
||||
if (m.status & MCI_STATUS_ADDRV)
|
||||
rdmsrl(MSR_IA32_MCx_ADDR(bank), m.addr);
|
||||
rdmsrl(msr_ops.addr(bank), m.addr);
|
||||
|
||||
mce_log(&m);
|
||||
wrmsrl(MSR_IA32_MCx_STATUS(bank), 0);
|
||||
wrmsrl(msr_ops.status(bank), 0);
|
||||
}
|
||||
|
||||
static inline void __smp_deferred_error_interrupt(void)
|
||||
@ -483,7 +483,7 @@ static void amd_deferred_error_interrupt(void)
|
||||
unsigned int bank;
|
||||
|
||||
for (bank = 0; bank < mca_cfg.banks; ++bank) {
|
||||
rdmsrl(MSR_IA32_MCx_STATUS(bank), status);
|
||||
rdmsrl(msr_ops.status(bank), status);
|
||||
|
||||
if (!(status & MCI_STATUS_VAL) ||
|
||||
!(status & MCI_STATUS_DEFERRED))
|
||||
|
Loading…
Reference in New Issue
Block a user