mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 04:31:50 +00:00
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: "A set of fixes for x86: - Fix the swapped outb() parameters in the KASLR code - Fix the PKEY handling at fork which missed to preserve the pkey state for the child. Comes with a test case to validate that. - Fix the entry stack handling for XEN PV to respect that XEN PV systems enter the function already on the current thread stack and not on the trampoline. - Fix kexec load failure caused by using a stale value when the kexec_buf structure is reused for subsequent allocations. - Fix a bogus sizeof() in the memory encryption code - Enforce PCI dependency for the Intel Low Power Subsystem - Enforce PCI_LOCKLESS_CONFIG when PCI is enabled" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/Kconfig: Select PCI_LOCKLESS_CONFIG if PCI is enabled x86/entry/64/compat: Fix stack switching for XEN PV x86/kexec: Fix a kexec_file_load() failure x86/mm/mem_encrypt: Fix erroneous sizeof() x86/selftests/pkeys: Fork() to check for state being preserved x86/pkeys: Properly copy pkey state at fork() x86/kaslr: Fix incorrect i8254 outb() parameters x86/intel/lpss: Make PCI dependency explicit
This commit is contained in:
commit
8a5f06056a
@ -198,7 +198,7 @@ config X86
|
||||
select IRQ_FORCED_THREADING
|
||||
select NEED_SG_DMA_LENGTH
|
||||
select PCI_DOMAINS if PCI
|
||||
select PCI_LOCKLESS_CONFIG
|
||||
select PCI_LOCKLESS_CONFIG if PCI
|
||||
select PERF_EVENTS
|
||||
select RTC_LIB
|
||||
select RTC_MC146818_LIB
|
||||
|
@ -361,7 +361,8 @@ ENTRY(entry_INT80_compat)
|
||||
|
||||
/* Need to switch before accessing the thread stack. */
|
||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
|
||||
movq %rsp, %rdi
|
||||
/* In the Xen PV case we already run on the thread stack. */
|
||||
ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
|
||||
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
||||
|
||||
pushq 6*8(%rdi) /* regs->ss */
|
||||
@ -370,8 +371,9 @@ ENTRY(entry_INT80_compat)
|
||||
pushq 3*8(%rdi) /* regs->cs */
|
||||
pushq 2*8(%rdi) /* regs->ip */
|
||||
pushq 1*8(%rdi) /* regs->orig_ax */
|
||||
|
||||
pushq (%rdi) /* pt_regs->di */
|
||||
.Lint80_keep_stack:
|
||||
|
||||
pushq %rsi /* pt_regs->si */
|
||||
xorl %esi, %esi /* nospec si */
|
||||
pushq %rdx /* pt_regs->dx */
|
||||
|
@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
|
||||
|
||||
void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
|
||||
|
||||
/*
|
||||
* Init a new mm. Used on mm copies, like at fork()
|
||||
* and on mm's that are brand-new, like at execve().
|
||||
*/
|
||||
static inline int init_new_context(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
@ -228,8 +232,22 @@ do { \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
static inline void arch_dup_pkeys(struct mm_struct *oldmm,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
|
||||
return;
|
||||
|
||||
/* Duplicate the oldmm pkey state in mm: */
|
||||
mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
|
||||
mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
|
||||
{
|
||||
arch_dup_pkeys(oldmm, mm);
|
||||
paravirt_arch_dup_mmap(oldmm, mm);
|
||||
return ldt_dup_context(oldmm, mm);
|
||||
}
|
||||
|
@ -470,6 +470,7 @@ int crash_load_segments(struct kimage *image)
|
||||
|
||||
kbuf.memsz = kbuf.bufsz;
|
||||
kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
|
||||
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret) {
|
||||
vfree((void *)image->arch.elf_headers);
|
||||
|
@ -434,6 +434,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
|
||||
kbuf.memsz = PAGE_ALIGN(header->init_size);
|
||||
kbuf.buf_align = header->kernel_alignment;
|
||||
kbuf.buf_min = MIN_KERNEL_LOAD_ADDR;
|
||||
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret)
|
||||
goto out_free_params;
|
||||
@ -448,6 +449,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
|
||||
kbuf.bufsz = kbuf.memsz = initrd_len;
|
||||
kbuf.buf_align = PAGE_SIZE;
|
||||
kbuf.buf_min = MIN_INITRD_LOAD_ADDR;
|
||||
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret)
|
||||
goto out_free_params;
|
||||
|
@ -36,8 +36,8 @@ static inline u16 i8254(void)
|
||||
u16 status, timer;
|
||||
|
||||
do {
|
||||
outb(I8254_PORT_CONTROL,
|
||||
I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
|
||||
outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
|
||||
I8254_PORT_CONTROL);
|
||||
status = inb(I8254_PORT_COUNTER0);
|
||||
timer = inb(I8254_PORT_COUNTER0);
|
||||
timer |= inb(I8254_PORT_COUNTER0) << 8;
|
||||
|
@ -158,8 +158,8 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
|
||||
pmd = pmd_offset(pud, ppd->vaddr);
|
||||
if (pmd_none(*pmd)) {
|
||||
pte = ppd->pgtable_area;
|
||||
memset(pte, 0, sizeof(pte) * PTRS_PER_PTE);
|
||||
ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE;
|
||||
memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
|
||||
ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
|
||||
set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
|
||||
}
|
||||
|
||||
|
@ -1133,6 +1133,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
|
||||
pkey_assert(err);
|
||||
}
|
||||
|
||||
void become_child(void)
|
||||
{
|
||||
pid_t forkret;
|
||||
|
||||
forkret = fork();
|
||||
pkey_assert(forkret >= 0);
|
||||
dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
|
||||
|
||||
if (!forkret) {
|
||||
/* in the child */
|
||||
return;
|
||||
}
|
||||
exit(0);
|
||||
}
|
||||
|
||||
/* Assumes that all pkeys other than 'pkey' are unallocated */
|
||||
void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
|
||||
{
|
||||
@ -1141,7 +1156,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
|
||||
int nr_allocated_pkeys = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_PKEYS*2; i++) {
|
||||
for (i = 0; i < NR_PKEYS*3; i++) {
|
||||
int new_pkey;
|
||||
dprintf1("%s() alloc loop: %d\n", __func__, i);
|
||||
new_pkey = alloc_pkey();
|
||||
@ -1152,20 +1167,26 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
|
||||
if ((new_pkey == -1) && (errno == ENOSPC)) {
|
||||
dprintf2("%s() failed to allocate pkey after %d tries\n",
|
||||
__func__, nr_allocated_pkeys);
|
||||
break;
|
||||
} else {
|
||||
/*
|
||||
* Ensure the number of successes never
|
||||
* exceeds the number of keys supported
|
||||
* in the hardware.
|
||||
*/
|
||||
pkey_assert(nr_allocated_pkeys < NR_PKEYS);
|
||||
allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
|
||||
}
|
||||
pkey_assert(nr_allocated_pkeys < NR_PKEYS);
|
||||
allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
|
||||
|
||||
/*
|
||||
* Make sure that allocation state is properly
|
||||
* preserved across fork().
|
||||
*/
|
||||
if (i == NR_PKEYS*2)
|
||||
become_child();
|
||||
}
|
||||
|
||||
dprintf3("%s()::%d\n", __func__, __LINE__);
|
||||
|
||||
/*
|
||||
* ensure it did not reach the end of the loop without
|
||||
* failure:
|
||||
*/
|
||||
pkey_assert(i < NR_PKEYS*2);
|
||||
|
||||
/*
|
||||
* There are 16 pkeys supported in hardware. Three are
|
||||
* allocated by the time we get here:
|
||||
|
Loading…
Reference in New Issue
Block a user