aa8a5e0062
On some CPUs we can prevent the Meltdown vulnerability by flushing the L1-D cache on exit from kernel to user mode, and from hypervisor to guest. This is known to be the case on at least Power7, Power8 and Power9. At this time we do not know the status of the vulnerability on other CPUs such as the 970 (Apple G5), pasemi CPUs (AmigaOne X1000) or Freescale CPUs. As more information comes to light we can enable this, or other mechanisms on those CPUs. The vulnerability occurs when the load of an architecturally inaccessible memory region (eg. userspace load of kernel memory) is speculatively executed to the point where its result can influence the address of a subsequent speculatively executed load. In order for that to happen, the first load must hit in the L1, because before the load is sent to the L2 the permission check is performed. Therefore if no kernel addresses hit in the L1 the vulnerability can not occur. We can ensure that is the case by flushing the L1 whenever we return to userspace. Similarly for hypervisor vs guest. In order to flush the L1-D cache on exit, we add a section of nops at each (h)rfi location that returns to a lower privileged context, and patch that with some sequence. Newer firmwares are able to advertise to us that there is a special nop instruction that flushes the L1-D. If we do not see that advertised, we fall back to doing a displacement flush in software. For guest kernels we support migration between some CPU versions, and different CPUs may use different flush instructions. So that we are prepared to migrate to a machine with a different flush instruction activated, we may have to patch more than one flush instruction at boot if the hypervisor tells us to. In the end this patch is mostly the work of Nicholas Piggin and Michael Ellerman. However a cast of thousands contributed to analysis of the issue, earlier versions of the patch, back ports testing etc. Many thanks to all of them. Tested-by: Jon Masters <jcm@redhat.com> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
59 lines
1.6 KiB
C
59 lines
1.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_POWERPC_SETUP_H
|
|
#define _ASM_POWERPC_SETUP_H
|
|
|
|
#include <uapi/asm/setup.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
extern void ppc_printk_progress(char *s, unsigned short hex);
|
|
|
|
extern unsigned int rtas_data;
|
|
extern unsigned long long memory_limit;
|
|
extern unsigned long klimit;
|
|
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
|
|
|
|
struct device_node;
|
|
extern void note_scsi_host(struct device_node *, void *);
|
|
|
|
/* Used in very early kernel initialization. */
|
|
extern unsigned long reloc_offset(void);
|
|
extern unsigned long add_reloc_offset(unsigned long);
|
|
extern void reloc_got2(unsigned long);
|
|
|
|
#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
|
|
|
|
void check_for_initrd(void);
|
|
void initmem_init(void);
|
|
void setup_panic(void);
|
|
#define ARCH_PANIC_TIMEOUT 180
|
|
|
|
#ifdef CONFIG_PPC_PSERIES
|
|
extern void pseries_enable_reloc_on_exc(void);
|
|
extern void pseries_disable_reloc_on_exc(void);
|
|
extern void pseries_big_endian_exceptions(void);
|
|
extern void pseries_little_endian_exceptions(void);
|
|
#else
|
|
static inline void pseries_enable_reloc_on_exc(void) {}
|
|
static inline void pseries_disable_reloc_on_exc(void) {}
|
|
static inline void pseries_big_endian_exceptions(void) {}
|
|
static inline void pseries_little_endian_exceptions(void) {}
|
|
#endif /* CONFIG_PPC_PSERIES */
|
|
|
|
void rfi_flush_enable(bool enable);
|
|
|
|
/* These are bit flags */
|
|
enum l1d_flush_type {
|
|
L1D_FLUSH_NONE = 0x1,
|
|
L1D_FLUSH_FALLBACK = 0x2,
|
|
L1D_FLUSH_ORI = 0x4,
|
|
L1D_FLUSH_MTTRIG = 0x8,
|
|
};
|
|
|
|
void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
|
|
void do_rfi_flush_fixups(enum l1d_flush_type types);
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_POWERPC_SETUP_H */
|
|
|