s390/mm: remove page table downgrade support

This update consolidates page table handling code. Because
there are hardly any 31-bit binaries left we do not need to
optimize for that.

No extra efforts are needed to ensure that a compat task does
not map anything above 2GB. The TASK_SIZE limit for 31-bit
tasks is 2GB already and the generic code does check that a
resulting map address would not surpass that limit.

Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
Alexander Gordeev 2020-02-28 11:32:01 +01:00 committed by Vasily Gorbik
parent b2745655be
commit 6a3eb35e56
5 changed files with 1 additions and 47 deletions

View File

@ -32,8 +32,6 @@ typedef struct {
unsigned int uses_cmm:1; unsigned int uses_cmm:1;
/* The gmaps associated with this context are allowed to use huge pages. */ /* The gmaps associated with this context are allowed to use huge pages. */
unsigned int allow_gmap_hpage_1m:1; unsigned int allow_gmap_hpage_1m:1;
/* The mmu context is for compat task */
unsigned int compat_mm:1;
} mm_context_t; } mm_context_t;
#define INIT_MM_CONTEXT(name) \ #define INIT_MM_CONTEXT(name) \

View File

@ -25,7 +25,6 @@ static inline int init_new_context(struct task_struct *tsk,
atomic_set(&mm->context.flush_count, 0); atomic_set(&mm->context.flush_count, 0);
mm->context.gmap_asce = 0; mm->context.gmap_asce = 0;
mm->context.flush_mm = 0; mm->context.flush_mm = 0;
mm->context.compat_mm = test_thread_flag(TIF_31BIT);
#ifdef CONFIG_PGSTE #ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste || mm->context.alloc_pgste = page_table_allocate_pgste ||
test_thread_flag(TIF_PGSTE) || test_thread_flag(TIF_PGSTE) ||
@ -57,10 +56,6 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION2; _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
break; break;
case _REGION3_SIZE:
/* forked 2-level compat task, set new asce with new mm->pgd */
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
} }
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0; return 0;

View File

@ -46,7 +46,6 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
} }
int crst_table_upgrade(struct mm_struct *mm, unsigned long limit); int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
void crst_table_downgrade(struct mm_struct *);
static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr, static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr,
unsigned long len) unsigned long len)
@ -130,24 +129,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
unsigned long *table = crst_table_alloc(mm); return (pgd_t *) crst_table_alloc(mm);
if (!table)
return NULL;
if (mm->context.asce_limit == _REGION3_SIZE) {
/* Forking a compat process with 2 page table levels */
if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
crst_table_free(mm, table);
return NULL;
}
}
return (pgd_t *) table;
} }
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{ {
if (mm->context.asce_limit == _REGION3_SIZE)
pgtable_pmd_page_dtor(virt_to_page(pgd));
crst_table_free(mm, (unsigned long *) pgd); crst_table_free(mm, (unsigned long *) pgd);
} }

View File

@ -179,7 +179,6 @@ typedef struct thread_struct thread_struct;
regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \ regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
regs->psw.addr = new_psw; \ regs->psw.addr = new_psw; \
regs->gprs[15] = new_stackp; \ regs->gprs[15] = new_stackp; \
crst_table_downgrade(current->mm); \
execve_tail(); \ execve_tail(); \
} while (0) } while (0)

View File

@ -138,30 +138,6 @@ err_p4d:
return -ENOMEM; return -ENOMEM;
} }
void crst_table_downgrade(struct mm_struct *mm)
{
pgd_t *pgd;
/* downgrade should only happen from 3 to 2 levels (compat only) */
VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
if (current->active_mm == mm) {
clear_user_asce();
__tlb_flush_mm(mm);
}
pgd = mm->pgd;
mm_dec_nr_pmds(mm);
mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
mm->context.asce_limit = _REGION3_SIZE;
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
crst_table_free(mm, (unsigned long *) pgd);
if (current->active_mm == mm)
set_user_asce(mm);
}
static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
{ {
unsigned int old, new; unsigned int old, new;