forked from Minki/linux
s390/mm: use set_pXd()/set_pte() helper functions everywhere
Use the new set_pXd()/set_pte() helper functions at all places where page table entries are modified. Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
parent
f29111f117
commit
b8e3b37900
@ -45,9 +45,9 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
|
||||
pte_val(*ptep) = _REGION3_ENTRY_EMPTY;
|
||||
set_pte(ptep, __pte(_REGION3_ENTRY_EMPTY));
|
||||
else
|
||||
pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
|
||||
set_pte(ptep, __pte(_SEGMENT_ENTRY_EMPTY));
|
||||
}
|
||||
|
||||
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
@ -103,17 +103,17 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
|
||||
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
|
||||
{
|
||||
pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d);
|
||||
set_pgd(pgd, __pgd(_REGION1_ENTRY | __pa(p4d)));
|
||||
}
|
||||
|
||||
static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
|
||||
{
|
||||
p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud);
|
||||
set_p4d(p4d, __p4d(_REGION2_ENTRY | __pa(pud)));
|
||||
}
|
||||
|
||||
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
||||
{
|
||||
pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
|
||||
set_pud(pud, __pud(_REGION3_ENTRY | __pa(pmd)));
|
||||
}
|
||||
|
||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
@ -129,7 +129,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
static inline void pmd_populate(struct mm_struct *mm,
|
||||
pmd_t *pmd, pgtable_t pte)
|
||||
{
|
||||
pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
|
||||
set_pmd(pmd, __pmd(_SEGMENT_ENTRY | __pa(pte)));
|
||||
}
|
||||
|
||||
#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
|
||||
|
@ -939,29 +939,29 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
|
||||
static inline void pgd_clear(pgd_t *pgd)
|
||||
{
|
||||
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
|
||||
pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
|
||||
set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY));
|
||||
}
|
||||
|
||||
static inline void p4d_clear(p4d_t *p4d)
|
||||
{
|
||||
if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
|
||||
p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
|
||||
set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
|
||||
}
|
||||
|
||||
static inline void pud_clear(pud_t *pud)
|
||||
{
|
||||
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
|
||||
pud_val(*pud) = _REGION3_ENTRY_EMPTY;
|
||||
set_pud(pud, __pud(_REGION3_ENTRY_EMPTY));
|
||||
}
|
||||
|
||||
static inline void pmd_clear(pmd_t *pmdp)
|
||||
{
|
||||
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
|
||||
set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
|
||||
}
|
||||
|
||||
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_val(*ptep) = _PAGE_INVALID;
|
||||
set_pte(ptep, __pte(_PAGE_INVALID));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1169,7 +1169,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
|
||||
|
||||
if (full) {
|
||||
res = *ptep;
|
||||
*ptep = __pte(_PAGE_INVALID);
|
||||
set_pte(ptep, __pte(_PAGE_INVALID));
|
||||
} else {
|
||||
res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
|
||||
}
|
||||
@ -1257,7 +1257,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
if (mm_has_pgste(mm))
|
||||
ptep_set_pte_at(mm, addr, ptep, entry);
|
||||
else
|
||||
*ptep = entry;
|
||||
set_pte(ptep, entry);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1641,7 +1641,7 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
{
|
||||
if (!MACHINE_HAS_NX)
|
||||
pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
|
||||
*pmdp = entry;
|
||||
set_pmd(pmdp, entry);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkhuge(pmd_t pmd)
|
||||
@ -1666,7 +1666,7 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
|
||||
{
|
||||
if (full) {
|
||||
pmd_t pmd = *pmdp;
|
||||
*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
|
||||
set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
|
||||
return pmd;
|
||||
}
|
||||
return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
|
||||
|
@ -985,7 +985,7 @@ static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
|
||||
}
|
||||
|
||||
if (bits & GMAP_NOTIFY_MPROT)
|
||||
pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
|
||||
set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN)));
|
||||
|
||||
/* Shadow GMAP protection needs split PMDs */
|
||||
if (bits & GMAP_NOTIFY_SHADOW)
|
||||
@ -1151,7 +1151,7 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
|
||||
address = pte_val(pte) & PAGE_MASK;
|
||||
address += gaddr & ~PAGE_MASK;
|
||||
*val = *(unsigned long *) address;
|
||||
pte_val(*ptep) |= _PAGE_YOUNG;
|
||||
set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_YOUNG)));
|
||||
/* Do *NOT* clear the _PAGE_INVALID bit! */
|
||||
rc = 0;
|
||||
}
|
||||
@ -2275,7 +2275,7 @@ EXPORT_SYMBOL_GPL(ptep_notify);
|
||||
static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
|
||||
unsigned long gaddr)
|
||||
{
|
||||
pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN;
|
||||
set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN)));
|
||||
gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
@ -2302,7 +2302,7 @@ static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
|
||||
__pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
|
||||
else
|
||||
__pmdp_csp(pmdp);
|
||||
*pmdp = new;
|
||||
set_pmd(pmdp, new);
|
||||
}
|
||||
|
||||
static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
|
||||
@ -2324,7 +2324,7 @@ static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
|
||||
_SEGMENT_ENTRY_GMAP_UC));
|
||||
if (purge)
|
||||
__pmdp_csp(pmdp);
|
||||
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
|
||||
set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
|
||||
}
|
||||
spin_unlock(&gmap->guest_table_lock);
|
||||
}
|
||||
@ -2447,7 +2447,7 @@ static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
|
||||
return false;
|
||||
|
||||
/* Clear UC indication and reset protection */
|
||||
pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
|
||||
set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_UC)));
|
||||
gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
|
||||
return true;
|
||||
}
|
||||
|
@ -168,7 +168,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
rste |= _SEGMENT_ENTRY_LARGE;
|
||||
|
||||
clear_huge_pte_skeys(mm, rste);
|
||||
pte_val(*ptep) = rste;
|
||||
set_pte(ptep, __pte(rste));
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get(pte_t *ptep)
|
||||
|
@ -175,7 +175,7 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
|
||||
page = kasan_early_alloc_segment();
|
||||
memset(page, 0, _SEGMENT_SIZE);
|
||||
}
|
||||
pmd_val(*pm_dir) = __pa(page) | sgt_prot;
|
||||
set_pmd(pm_dir, __pmd(__pa(page) | sgt_prot));
|
||||
address = (address + PMD_SIZE) & PMD_MASK;
|
||||
continue;
|
||||
}
|
||||
@ -194,16 +194,16 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
|
||||
switch (mode) {
|
||||
case POPULATE_ONE2ONE:
|
||||
page = (void *)address;
|
||||
pte_val(*pt_dir) = __pa(page) | pgt_prot;
|
||||
set_pte(pt_dir, __pte(__pa(page) | pgt_prot));
|
||||
break;
|
||||
case POPULATE_MAP:
|
||||
page = kasan_early_alloc_pages(0);
|
||||
memset(page, 0, PAGE_SIZE);
|
||||
pte_val(*pt_dir) = __pa(page) | pgt_prot;
|
||||
set_pte(pt_dir, __pte(__pa(page) | pgt_prot));
|
||||
break;
|
||||
case POPULATE_ZERO_SHADOW:
|
||||
page = kasan_early_shadow_page;
|
||||
pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
|
||||
set_pte(pt_dir, __pte(__pa(page) | pgt_prot_zero));
|
||||
break;
|
||||
case POPULATE_SHALLOW:
|
||||
/* should never happen */
|
||||
|
@ -127,7 +127,7 @@ static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
|
||||
prot &= ~_PAGE_NOEXEC;
|
||||
ptep = pt_dir;
|
||||
for (i = 0; i < PTRS_PER_PTE; i++) {
|
||||
pte_val(*ptep) = pte_addr | prot;
|
||||
set_pte(ptep, __pte(pte_addr | prot));
|
||||
pte_addr += PAGE_SIZE;
|
||||
ptep++;
|
||||
}
|
||||
@ -208,7 +208,7 @@ static int split_pud_page(pud_t *pudp, unsigned long addr)
|
||||
prot &= ~_SEGMENT_ENTRY_NOEXEC;
|
||||
pmdp = pm_dir;
|
||||
for (i = 0; i < PTRS_PER_PMD; i++) {
|
||||
pmd_val(*pmdp) = pmd_addr | prot;
|
||||
set_pmd(pmdp, __pmd(pmd_addr | prot));
|
||||
pmd_addr += PMD_SIZE;
|
||||
pmdp++;
|
||||
}
|
||||
@ -347,23 +347,24 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
|
||||
void __kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
{
|
||||
unsigned long address;
|
||||
pte_t *ptep, pte;
|
||||
int nr, i, j;
|
||||
pte_t *pte;
|
||||
|
||||
for (i = 0; i < numpages;) {
|
||||
address = (unsigned long)page_to_virt(page + i);
|
||||
pte = virt_to_kpte(address);
|
||||
nr = (unsigned long)pte >> ilog2(sizeof(long));
|
||||
ptep = virt_to_kpte(address);
|
||||
nr = (unsigned long)ptep >> ilog2(sizeof(long));
|
||||
nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
|
||||
nr = min(numpages - i, nr);
|
||||
if (enable) {
|
||||
for (j = 0; j < nr; j++) {
|
||||
pte_val(*pte) &= ~_PAGE_INVALID;
|
||||
pte = clear_pte_bit(*ptep, __pgprot(_PAGE_INVALID));
|
||||
set_pte(ptep, pte);
|
||||
address += PAGE_SIZE;
|
||||
pte++;
|
||||
ptep++;
|
||||
}
|
||||
} else {
|
||||
ipte_range(pte, address, nr);
|
||||
ipte_range(ptep, address, nr);
|
||||
}
|
||||
i += nr;
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
|
||||
atomic_inc(&mm->context.flush_count);
|
||||
if (cpumask_equal(&mm->context.cpu_attach_mask,
|
||||
cpumask_of(smp_processor_id()))) {
|
||||
pte_val(*ptep) |= _PAGE_INVALID;
|
||||
set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_INVALID)));
|
||||
mm->context.flush_mm = 1;
|
||||
} else
|
||||
ptep_ipte_global(mm, addr, ptep, nodat);
|
||||
@ -232,7 +232,7 @@ static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
|
||||
pgste_val(pgste) |= PGSTE_UC_BIT;
|
||||
}
|
||||
#endif
|
||||
*ptep = entry;
|
||||
set_pte(ptep, entry);
|
||||
return pgste;
|
||||
}
|
||||
|
||||
@ -280,7 +280,7 @@ static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
|
||||
pgste = pgste_set_pte(ptep, pgste, new);
|
||||
pgste_set_unlock(ptep, pgste);
|
||||
} else {
|
||||
*ptep = new;
|
||||
set_pte(ptep, new);
|
||||
}
|
||||
return old;
|
||||
}
|
||||
@ -352,7 +352,7 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
|
||||
pgste = pgste_set_pte(ptep, pgste, pte);
|
||||
pgste_set_unlock(ptep, pgste);
|
||||
} else {
|
||||
*ptep = pte;
|
||||
set_pte(ptep, pte);
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
@ -417,7 +417,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
|
||||
atomic_inc(&mm->context.flush_count);
|
||||
if (cpumask_equal(&mm->context.cpu_attach_mask,
|
||||
cpumask_of(smp_processor_id()))) {
|
||||
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
|
||||
set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_INVALID)));
|
||||
mm->context.flush_mm = 1;
|
||||
if (mm_has_pgste(mm))
|
||||
gmap_pmdp_invalidate(mm, addr);
|
||||
@ -469,7 +469,7 @@ pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
preempt_disable();
|
||||
old = pmdp_flush_direct(mm, addr, pmdp);
|
||||
*pmdp = new;
|
||||
set_pmd(pmdp, new);
|
||||
preempt_enable();
|
||||
return old;
|
||||
}
|
||||
@ -482,7 +482,7 @@ pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
preempt_disable();
|
||||
old = pmdp_flush_lazy(mm, addr, pmdp);
|
||||
*pmdp = new;
|
||||
set_pmd(pmdp, new);
|
||||
preempt_enable();
|
||||
return old;
|
||||
}
|
||||
@ -539,7 +539,7 @@ pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
preempt_disable();
|
||||
old = pudp_flush_direct(mm, addr, pudp);
|
||||
*pudp = new;
|
||||
set_pud(pudp, new);
|
||||
preempt_enable();
|
||||
return old;
|
||||
}
|
||||
@ -579,9 +579,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
|
||||
list_del(lh);
|
||||
}
|
||||
ptep = (pte_t *) pgtable;
|
||||
pte_val(*ptep) = _PAGE_INVALID;
|
||||
set_pte(ptep, __pte(_PAGE_INVALID));
|
||||
ptep++;
|
||||
pte_val(*ptep) = _PAGE_INVALID;
|
||||
set_pte(ptep, __pte(_PAGE_INVALID));
|
||||
return pgtable;
|
||||
}
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
@ -776,7 +776,7 @@ bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
|
||||
pte_val(pte) |= _PAGE_PROTECT;
|
||||
else
|
||||
pte_val(pte) |= _PAGE_INVALID;
|
||||
*ptep = pte;
|
||||
set_pte(ptep, pte);
|
||||
}
|
||||
pgste_set_unlock(ptep, pgste);
|
||||
return dirty;
|
||||
|
@ -174,9 +174,9 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
|
||||
|
||||
if (!new_page)
|
||||
goto out;
|
||||
pte_val(*pte) = __pa(new_page) | prot;
|
||||
set_pte(pte, __pte(__pa(new_page) | prot));
|
||||
} else {
|
||||
pte_val(*pte) = __pa(addr) | prot;
|
||||
set_pte(pte, __pte(__pa(addr) | prot));
|
||||
}
|
||||
} else {
|
||||
continue;
|
||||
@ -242,7 +242,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
|
||||
IS_ALIGNED(next, PMD_SIZE) &&
|
||||
MACHINE_HAS_EDAT1 && addr && direct &&
|
||||
!debug_pagealloc_enabled()) {
|
||||
pmd_val(*pmd) = __pa(addr) | prot;
|
||||
set_pmd(pmd, __pmd(__pa(addr) | prot));
|
||||
pages++;
|
||||
continue;
|
||||
} else if (!direct && MACHINE_HAS_EDAT1) {
|
||||
@ -257,7 +257,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
|
||||
*/
|
||||
new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
|
||||
if (new_page) {
|
||||
pmd_val(*pmd) = __pa(new_page) | prot;
|
||||
set_pmd(pmd, __pmd(__pa(new_page) | prot));
|
||||
if (!IS_ALIGNED(addr, PMD_SIZE) ||
|
||||
!IS_ALIGNED(next, PMD_SIZE)) {
|
||||
vmemmap_use_new_sub_pmd(addr, next);
|
||||
@ -338,7 +338,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
|
||||
IS_ALIGNED(next, PUD_SIZE) &&
|
||||
MACHINE_HAS_EDAT2 && addr && direct &&
|
||||
!debug_pagealloc_enabled()) {
|
||||
pud_val(*pud) = __pa(addr) | prot;
|
||||
set_pud(pud, __pud(__pa(addr) | prot));
|
||||
pages++;
|
||||
continue;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user