Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar: "Misc fixes" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm: Flush lazy MMU when DEBUG_PAGEALLOC is set x86/mm/cpa/selftest: Fix false positive in CPA self test x86/mm/cpa: Convert noop to functional fix x86, mm: Patch out arch_flush_lazy_mmu_mode() when running on bare metal x86, mm, paravirt: Fix vmalloc_fault oops during lazy MMU updates
This commit is contained in:
commit
6c4c4d4bda
@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void)
|
||||
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
|
||||
}
|
||||
|
||||
void arch_flush_lazy_mmu_mode(void);
|
||||
static inline void arch_flush_lazy_mmu_mode(void)
|
||||
{
|
||||
PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
|
||||
}
|
||||
|
||||
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
||||
phys_addr_t phys, pgprot_t flags)
|
||||
|
@ -91,6 +91,7 @@ struct pv_lazy_ops {
|
||||
/* Set deferred update mode, used for batching operations. */
|
||||
void (*enter)(void);
|
||||
void (*leave)(void);
|
||||
void (*flush)(void);
|
||||
};
|
||||
|
||||
struct pv_time_ops {
|
||||
@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next);
|
||||
|
||||
void paravirt_enter_lazy_mmu(void);
|
||||
void paravirt_leave_lazy_mmu(void);
|
||||
void paravirt_flush_lazy_mmu(void);
|
||||
|
||||
void _paravirt_nop(void);
|
||||
u32 _paravirt_ident_32(u32);
|
||||
|
@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void)
|
||||
leave_lazy(PARAVIRT_LAZY_MMU);
|
||||
}
|
||||
|
||||
void paravirt_flush_lazy_mmu(void)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
|
||||
arch_leave_lazy_mmu_mode();
|
||||
arch_enter_lazy_mmu_mode();
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void paravirt_start_context_switch(struct task_struct *prev)
|
||||
{
|
||||
BUG_ON(preemptible());
|
||||
@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
|
||||
return this_cpu_read(paravirt_lazy_mode);
|
||||
}
|
||||
|
||||
void arch_flush_lazy_mmu_mode(void)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
|
||||
arch_leave_lazy_mmu_mode();
|
||||
arch_enter_lazy_mmu_mode();
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
struct pv_info pv_info = {
|
||||
.name = "bare hardware",
|
||||
.paravirt_enabled = 0,
|
||||
@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = {
|
||||
.lazy_mode = {
|
||||
.enter = paravirt_nop,
|
||||
.leave = paravirt_nop,
|
||||
.flush = paravirt_nop,
|
||||
},
|
||||
|
||||
.set_fixmap = native_set_fixmap,
|
||||
|
@ -1334,6 +1334,7 @@ __init void lguest_init(void)
|
||||
pv_mmu_ops.read_cr3 = lguest_read_cr3;
|
||||
pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
|
||||
pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
|
||||
pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
|
||||
pv_mmu_ops.pte_update = lguest_pte_update;
|
||||
pv_mmu_ops.pte_update_defer = lguest_pte_update;
|
||||
|
||||
|
@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
|
||||
if (pgd_none(*pgd_ref))
|
||||
return -1;
|
||||
|
||||
if (pgd_none(*pgd))
|
||||
if (pgd_none(*pgd)) {
|
||||
set_pgd(pgd, *pgd_ref);
|
||||
else
|
||||
arch_flush_lazy_mmu_mode();
|
||||
} else {
|
||||
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
|
||||
}
|
||||
|
||||
/*
|
||||
* Below here mismatches are bugs because these lower tables
|
||||
|
@ -68,7 +68,7 @@ static int print_split(struct split_state *s)
|
||||
s->gpg++;
|
||||
i += GPS/PAGE_SIZE;
|
||||
} else if (level == PG_LEVEL_2M) {
|
||||
if (!(pte_val(*pte) & _PAGE_PSE)) {
|
||||
if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) {
|
||||
printk(KERN_ERR
|
||||
"%lx level %d but not PSE %Lx\n",
|
||||
addr, level, (u64)pte_val(*pte));
|
||||
|
@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
|
||||
* We are safe now. Check whether the new pgprot is the same:
|
||||
*/
|
||||
old_pte = *kpte;
|
||||
old_prot = new_prot = req_prot = pte_pgprot(old_pte);
|
||||
old_prot = req_prot = pte_pgprot(old_pte);
|
||||
|
||||
pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
|
||||
pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
|
||||
@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
|
||||
* a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
|
||||
* for the ancient hardware that doesn't support it.
|
||||
*/
|
||||
if (pgprot_val(new_prot) & _PAGE_PRESENT)
|
||||
pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
|
||||
if (pgprot_val(req_prot) & _PAGE_PRESENT)
|
||||
pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
|
||||
else
|
||||
pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
|
||||
pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
|
||||
|
||||
new_prot = canon_pgprot(new_prot);
|
||||
req_prot = canon_pgprot(req_prot);
|
||||
|
||||
/*
|
||||
* old_pte points to the large page base address. So we need
|
||||
@ -1413,6 +1413,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
* but that can deadlock->flush only current cpu:
|
||||
*/
|
||||
__flush_tlb_all();
|
||||
|
||||
arch_flush_lazy_mmu_mode();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
|
@ -2200,6 +2200,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
|
||||
.lazy_mode = {
|
||||
.enter = paravirt_enter_lazy_mmu,
|
||||
.leave = xen_leave_lazy_mmu,
|
||||
.flush = paravirt_flush_lazy_mmu,
|
||||
},
|
||||
|
||||
.set_fixmap = xen_set_fixmap,
|
||||
|
Loading…
Reference in New Issue
Block a user