forked from Minki/linux
Merge remote-tracking branch 'tip/x86/urgent' into edac-for-5.1
... to pick up dependent change:
00ae831dfe
("x86/cpu: Add Atom Tremont (Jacobsville)")
introducing the model number define which will be needed by the new
i10nm_edac driver for 10nm Intel Atoms.
Signed-off-by: Borislav Petkov <bp@suse.de>
This commit is contained in:
commit
84ba10d633
@ -9,7 +9,7 @@ Fenghua Yu <fenghua.yu@intel.com>
|
||||
Tony Luck <tony.luck@intel.com>
|
||||
Vikas Shivappa <vikas.shivappa@intel.com>
|
||||
|
||||
This feature is enabled by the CONFIG_RESCTRL and the X86 /proc/cpuinfo
|
||||
This feature is enabled by the CONFIG_X86_CPU_RESCTRL and the x86 /proc/cpuinfo
|
||||
flag bits:
|
||||
RDT (Resource Director Technology) Allocation - "rdt_a"
|
||||
CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
|
||||
|
@ -16638,6 +16638,15 @@ S: Maintained
|
||||
F: drivers/platform/x86/
|
||||
F: drivers/platform/olpc/
|
||||
|
||||
X86 PLATFORM DRIVERS - ARCH
|
||||
R: Darren Hart <dvhart@infradead.org>
|
||||
R: Andy Shevchenko <andy@infradead.org>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
L: x86@kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
|
||||
S: Maintained
|
||||
F: arch/x86/platform
|
||||
|
||||
X86 VDSO
|
||||
M: Andy Lutomirski <luto@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
|
@ -1,4 +1,5 @@
|
||||
include include/uapi/asm-generic/Kbuild.asm
|
||||
|
||||
generic-y += kvm_para.h
|
||||
generic-y += shmparam.h
|
||||
generic-y += ucontext.h
|
||||
|
@ -1,4 +1,5 @@
|
||||
include include/uapi/asm-generic/Kbuild.asm
|
||||
|
||||
generic-y += kvm_para.h
|
||||
generic-y += shmparam.h
|
||||
generic-y += ucontext.h
|
||||
|
@ -1,3 +1,4 @@
|
||||
include include/uapi/asm-generic/Kbuild.asm
|
||||
|
||||
generic-y += shmparam.h
|
||||
generic-y += ucontext.h
|
||||
|
@ -2,3 +2,4 @@ include include/uapi/asm-generic/Kbuild.asm
|
||||
|
||||
generated-y += unistd_32.h
|
||||
generic-y += kvm_para.h
|
||||
generic-y += shmparam.h
|
||||
|
@ -2,4 +2,5 @@ include include/uapi/asm-generic/Kbuild.asm
|
||||
|
||||
generated-y += unistd_32.h
|
||||
generic-y += kvm_para.h
|
||||
generic-y += shmparam.h
|
||||
generic-y += ucontext.h
|
||||
|
@ -1,4 +1,5 @@
|
||||
include include/uapi/asm-generic/Kbuild.asm
|
||||
|
||||
generic-y += kvm_para.h
|
||||
generic-y += shmparam.h
|
||||
generic-y += ucontext.h
|
||||
|
@ -1,4 +1,5 @@
|
||||
include include/uapi/asm-generic/Kbuild.asm
|
||||
|
||||
generic-y += kvm_para.h
|
||||
generic-y += shmparam.h
|
||||
generic-y += ucontext.h
|
||||
|
@ -198,7 +198,7 @@ config X86
|
||||
select IRQ_FORCED_THREADING
|
||||
select NEED_SG_DMA_LENGTH
|
||||
select PCI_DOMAINS if PCI
|
||||
select PCI_LOCKLESS_CONFIG
|
||||
select PCI_LOCKLESS_CONFIG if PCI
|
||||
select PERF_EVENTS
|
||||
select RTC_LIB
|
||||
select RTC_MC146818_LIB
|
||||
@ -446,12 +446,12 @@ config RETPOLINE
|
||||
branches. Requires a compiler with -mindirect-branch=thunk-extern
|
||||
support for full protection. The kernel may run slower.
|
||||
|
||||
config RESCTRL
|
||||
bool "Resource Control support"
|
||||
config X86_CPU_RESCTRL
|
||||
bool "x86 CPU resource control support"
|
||||
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
|
||||
select KERNFS
|
||||
help
|
||||
Enable Resource Control support.
|
||||
Enable x86 CPU resource control support.
|
||||
|
||||
Provide support for the allocation and monitoring of system resources
|
||||
usage by the CPU.
|
||||
@ -617,7 +617,7 @@ config X86_INTEL_QUARK
|
||||
|
||||
config X86_INTEL_LPSS
|
||||
bool "Intel Low Power Subsystem Support"
|
||||
depends on X86 && ACPI
|
||||
depends on X86 && ACPI && PCI
|
||||
select COMMON_CLK
|
||||
select PINCTRL
|
||||
select IOSF_MBI
|
||||
|
@ -600,6 +600,14 @@ ENTRY(trampoline_32bit_src)
|
||||
leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
|
||||
movl %eax, %cr3
|
||||
3:
|
||||
/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
|
||||
pushl %ecx
|
||||
movl $MSR_EFER, %ecx
|
||||
rdmsr
|
||||
btsl $_EFER_LME, %eax
|
||||
wrmsr
|
||||
popl %ecx
|
||||
|
||||
/* Enable PAE and LA57 (if required) paging modes */
|
||||
movl $X86_CR4_PAE, %eax
|
||||
cmpl $0, %edx
|
||||
|
@ -6,7 +6,7 @@
|
||||
#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0
|
||||
|
||||
#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
|
||||
#define TRAMPOLINE_32BIT_CODE_SIZE 0x60
|
||||
#define TRAMPOLINE_32BIT_CODE_SIZE 0x70
|
||||
|
||||
#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE
|
||||
|
||||
|
@ -361,7 +361,8 @@ ENTRY(entry_INT80_compat)
|
||||
|
||||
/* Need to switch before accessing the thread stack. */
|
||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
|
||||
movq %rsp, %rdi
|
||||
/* In the Xen PV case we already run on the thread stack. */
|
||||
ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
|
||||
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
||||
|
||||
pushq 6*8(%rdi) /* regs->ss */
|
||||
@ -370,8 +371,9 @@ ENTRY(entry_INT80_compat)
|
||||
pushq 3*8(%rdi) /* regs->cs */
|
||||
pushq 2*8(%rdi) /* regs->ip */
|
||||
pushq 1*8(%rdi) /* regs->orig_ax */
|
||||
|
||||
pushq (%rdi) /* pt_regs->di */
|
||||
.Lint80_keep_stack:
|
||||
|
||||
pushq %rsi /* pt_regs->si */
|
||||
xorl %esi, %esi /* nospec si */
|
||||
pushq %rdx /* pt_regs->dx */
|
||||
|
@ -6,7 +6,7 @@
|
||||
* "Big Core" Processors (Branded as Core, Xeon, etc...)
|
||||
*
|
||||
* The "_X" parts are generally the EP and EX Xeons, or the
|
||||
* "Extreme" ones, like Broadwell-E.
|
||||
* "Extreme" ones, like Broadwell-E, or Atom microserver.
|
||||
*
|
||||
* While adding a new CPUID for a new microarchitecture, add a new
|
||||
* group to keep logically sorted out in chronological order. Within
|
||||
@ -71,6 +71,7 @@
|
||||
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
|
||||
#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
|
||||
#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
|
||||
#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */
|
||||
|
||||
/* Xeon Phi */
|
||||
|
||||
|
@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
|
||||
|
||||
void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
|
||||
|
||||
/*
|
||||
* Init a new mm. Used on mm copies, like at fork()
|
||||
* and on mm's that are brand-new, like at execve().
|
||||
*/
|
||||
static inline int init_new_context(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
@ -228,8 +232,22 @@ do { \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
static inline void arch_dup_pkeys(struct mm_struct *oldmm,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
|
||||
return;
|
||||
|
||||
/* Duplicate the oldmm pkey state in mm: */
|
||||
mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
|
||||
mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
|
||||
{
|
||||
arch_dup_pkeys(oldmm, mm);
|
||||
paravirt_arch_dup_mmap(oldmm, mm);
|
||||
return ldt_dup_context(oldmm, mm);
|
||||
}
|
||||
|
@ -2,7 +2,7 @@
|
||||
#ifndef _ASM_X86_RESCTRL_SCHED_H
|
||||
#define _ASM_X86_RESCTRL_SCHED_H
|
||||
|
||||
#ifdef CONFIG_RESCTRL
|
||||
#ifdef CONFIG_X86_CPU_RESCTRL
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/jump_label.h>
|
||||
@ -88,6 +88,6 @@ static inline void resctrl_sched_in(void)
|
||||
|
||||
static inline void resctrl_sched_in(void) {}
|
||||
|
||||
#endif /* CONFIG_RESCTRL */
|
||||
#endif /* CONFIG_X86_CPU_RESCTRL */
|
||||
|
||||
#endif /* _ASM_X86_RESCTRL_SCHED_H */
|
||||
|
@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
|
||||
obj-$(CONFIG_X86_MCE) += mce/
|
||||
obj-$(CONFIG_MTRR) += mtrr/
|
||||
obj-$(CONFIG_MICROCODE) += microcode/
|
||||
obj-$(CONFIG_RESCTRL) += resctrl/
|
||||
obj-$(CONFIG_X86_CPU_RESCTRL) += resctrl/
|
||||
|
||||
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
|
||||
|
||||
|
@ -215,7 +215,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
|
||||
static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
|
||||
SPECTRE_V2_USER_NONE;
|
||||
|
||||
#ifdef RETPOLINE
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
static bool spectre_v2_bad_module;
|
||||
|
||||
bool retpoline_module_ok(bool has_retpoline)
|
||||
|
@ -855,7 +855,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
|
||||
if (!p) {
|
||||
return ret;
|
||||
} else {
|
||||
if (boot_cpu_data.microcode == p->patch_id)
|
||||
if (boot_cpu_data.microcode >= p->patch_id)
|
||||
return ret;
|
||||
|
||||
ret = UCODE_NEW;
|
||||
|
@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-$(CONFIG_RESCTRL) += core.o rdtgroup.o monitor.o
|
||||
obj-$(CONFIG_RESCTRL) += ctrlmondata.o pseudo_lock.o
|
||||
obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o
|
||||
obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o pseudo_lock.o
|
||||
CFLAGS_pseudo_lock.o = -I$(src)
|
||||
|
@ -470,6 +470,7 @@ int crash_load_segments(struct kimage *image)
|
||||
|
||||
kbuf.memsz = kbuf.bufsz;
|
||||
kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
|
||||
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret) {
|
||||
vfree((void *)image->arch.elf_headers);
|
||||
|
@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
|
||||
struct efi_info *current_ei = &boot_params.efi_info;
|
||||
struct efi_info *ei = ¶ms->efi_info;
|
||||
|
||||
if (!efi_enabled(EFI_RUNTIME_SERVICES))
|
||||
return 0;
|
||||
|
||||
if (!current_ei->efi_memmap_size)
|
||||
return 0;
|
||||
|
||||
@ -434,6 +437,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
|
||||
kbuf.memsz = PAGE_ALIGN(header->init_size);
|
||||
kbuf.buf_align = header->kernel_alignment;
|
||||
kbuf.buf_min = MIN_KERNEL_LOAD_ADDR;
|
||||
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret)
|
||||
goto out_free_params;
|
||||
@ -448,6 +452,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
|
||||
kbuf.bufsz = kbuf.memsz = initrd_len;
|
||||
kbuf.buf_align = PAGE_SIZE;
|
||||
kbuf.buf_min = MIN_INITRD_LOAD_ADDR;
|
||||
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret)
|
||||
goto out_free_params;
|
||||
|
@ -36,8 +36,8 @@ static inline u16 i8254(void)
|
||||
u16 status, timer;
|
||||
|
||||
do {
|
||||
outb(I8254_PORT_CONTROL,
|
||||
I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
|
||||
outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
|
||||
I8254_PORT_CONTROL);
|
||||
status = inb(I8254_PORT_COUNTER0);
|
||||
timer = inb(I8254_PORT_COUNTER0);
|
||||
timer |= inb(I8254_PORT_COUNTER0) << 8;
|
||||
|
@ -595,7 +595,7 @@ static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
|
||||
return;
|
||||
}
|
||||
|
||||
addr = desc.base0 | (desc.base1 << 16) | (desc.base2 << 24);
|
||||
addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
|
||||
#ifdef CONFIG_X86_64
|
||||
addr |= ((u64)desc.base3 << 32);
|
||||
#endif
|
||||
|
@ -158,8 +158,8 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
|
||||
pmd = pmd_offset(pud, ppd->vaddr);
|
||||
if (pmd_none(*pmd)) {
|
||||
pte = ppd->pgtable_area;
|
||||
memset(pte, 0, sizeof(pte) * PTRS_PER_PTE);
|
||||
ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE;
|
||||
memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
|
||||
ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
|
||||
set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@
|
||||
*/
|
||||
#define uninitialized_var(x) x = x
|
||||
|
||||
#ifdef RETPOLINE
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#define __noretpoline __attribute__((__indirect_branch__("keep")))
|
||||
#endif
|
||||
|
||||
|
@ -828,7 +828,7 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
|
||||
static inline void module_bug_cleanup(struct module *mod) {}
|
||||
#endif /* CONFIG_GENERIC_BUG */
|
||||
|
||||
#ifdef RETPOLINE
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
extern bool retpoline_module_ok(bool has_retpoline);
|
||||
#else
|
||||
static inline bool retpoline_module_ok(bool has_retpoline)
|
||||
|
@ -995,7 +995,7 @@ struct task_struct {
|
||||
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
|
||||
struct list_head cg_list;
|
||||
#endif
|
||||
#ifdef CONFIG_RESCTRL
|
||||
#ifdef CONFIG_X86_CPU_RESCTRL
|
||||
u32 closid;
|
||||
u32 rmid;
|
||||
#endif
|
||||
|
@ -34,6 +34,7 @@ HOSTCFLAGS_bpf-direct.o += $(MFLAG)
|
||||
HOSTCFLAGS_dropper.o += $(MFLAG)
|
||||
HOSTCFLAGS_bpf-helper.o += $(MFLAG)
|
||||
HOSTCFLAGS_bpf-fancy.o += $(MFLAG)
|
||||
HOSTCFLAGS_user-trap.o += $(MFLAG)
|
||||
HOSTLDLIBS_bpf-direct += $(MFLAG)
|
||||
HOSTLDLIBS_bpf-fancy += $(MFLAG)
|
||||
HOSTLDLIBS_dropper += $(MFLAG)
|
||||
|
@ -2185,7 +2185,7 @@ static void add_intree_flag(struct buffer *b, int is_intree)
|
||||
/* Cannot check for assembler */
|
||||
static void add_retpoline(struct buffer *b)
|
||||
{
|
||||
buf_printf(b, "\n#ifdef RETPOLINE\n");
|
||||
buf_printf(b, "\n#ifdef CONFIG_RETPOLINE\n");
|
||||
buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n");
|
||||
buf_printf(b, "#endif\n");
|
||||
}
|
||||
|
@ -1133,6 +1133,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
|
||||
pkey_assert(err);
|
||||
}
|
||||
|
||||
void become_child(void)
|
||||
{
|
||||
pid_t forkret;
|
||||
|
||||
forkret = fork();
|
||||
pkey_assert(forkret >= 0);
|
||||
dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
|
||||
|
||||
if (!forkret) {
|
||||
/* in the child */
|
||||
return;
|
||||
}
|
||||
exit(0);
|
||||
}
|
||||
|
||||
/* Assumes that all pkeys other than 'pkey' are unallocated */
|
||||
void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
|
||||
{
|
||||
@ -1141,7 +1156,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
|
||||
int nr_allocated_pkeys = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_PKEYS*2; i++) {
|
||||
for (i = 0; i < NR_PKEYS*3; i++) {
|
||||
int new_pkey;
|
||||
dprintf1("%s() alloc loop: %d\n", __func__, i);
|
||||
new_pkey = alloc_pkey();
|
||||
@ -1152,20 +1167,26 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
|
||||
if ((new_pkey == -1) && (errno == ENOSPC)) {
|
||||
dprintf2("%s() failed to allocate pkey after %d tries\n",
|
||||
__func__, nr_allocated_pkeys);
|
||||
break;
|
||||
} else {
|
||||
/*
|
||||
* Ensure the number of successes never
|
||||
* exceeds the number of keys supported
|
||||
* in the hardware.
|
||||
*/
|
||||
pkey_assert(nr_allocated_pkeys < NR_PKEYS);
|
||||
allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
|
||||
}
|
||||
pkey_assert(nr_allocated_pkeys < NR_PKEYS);
|
||||
allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
|
||||
|
||||
/*
|
||||
* Make sure that allocation state is properly
|
||||
* preserved across fork().
|
||||
*/
|
||||
if (i == NR_PKEYS*2)
|
||||
become_child();
|
||||
}
|
||||
|
||||
dprintf3("%s()::%d\n", __func__, __LINE__);
|
||||
|
||||
/*
|
||||
* ensure it did not reach the end of the loop without
|
||||
* failure:
|
||||
*/
|
||||
pkey_assert(i < NR_PKEYS*2);
|
||||
|
||||
/*
|
||||
* There are 16 pkeys supported in hardware. Three are
|
||||
* allocated by the time we get here:
|
||||
|
Loading…
Reference in New Issue
Block a user