mirror of
https://github.com/torvalds/linux.git
synced 2024-12-25 20:32:22 +00:00
043d07084b
The noexec support on s390 does not rely on a bit in the page table entry but utilizes the secondary space mode to distinguish between memory accesses for instructions vs. data. The noexec code relies on the assumption that the cpu will always use the secondary space page table for data accesses while it is running in the secondary space mode. Up to the z9-109 class machines this has been the case. Unfortunately this is not true anymore with z10 and later machines. The load-relative-long instructions lrl, lgrl and lgfrl access the memory operand using the same addressing-space mode that has been used to fetch the instruction. This breaks the noexec mode for all user space binaries compiled with march=z10 or later. The only option is to remove the current noexec support. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
131 lines
2.5 KiB
C
131 lines
2.5 KiB
C
/*
|
|
* IBM System z Huge TLB Page Support for Kernel.
|
|
*
|
|
* Copyright 2007 IBM Corp.
|
|
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/hugetlb.h>
|
|
|
|
|
|
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *pteptr, pte_t pteval)
|
|
{
|
|
pmd_t *pmdp = (pmd_t *) pteptr;
|
|
unsigned long mask;
|
|
|
|
if (!MACHINE_HAS_HPAGE) {
|
|
pteptr = (pte_t *) pte_page(pteval)[1].index;
|
|
mask = pte_val(pteval) &
|
|
(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
|
|
pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
|
|
}
|
|
|
|
pmd_val(*pmdp) = pte_val(pteval);
|
|
}
|
|
|
|
int arch_prepare_hugepage(struct page *page)
|
|
{
|
|
unsigned long addr = page_to_phys(page);
|
|
pte_t pte;
|
|
pte_t *ptep;
|
|
int i;
|
|
|
|
if (MACHINE_HAS_HPAGE)
|
|
return 0;
|
|
|
|
ptep = (pte_t *) pte_alloc_one(&init_mm, address);
|
|
if (!ptep)
|
|
return -ENOMEM;
|
|
|
|
pte = mk_pte(page, PAGE_RW);
|
|
for (i = 0; i < PTRS_PER_PTE; i++) {
|
|
set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
|
|
pte_val(pte) += PAGE_SIZE;
|
|
}
|
|
page[1].index = (unsigned long) ptep;
|
|
return 0;
|
|
}
|
|
|
|
void arch_release_hugepage(struct page *page)
|
|
{
|
|
pte_t *ptep;
|
|
|
|
if (MACHINE_HAS_HPAGE)
|
|
return;
|
|
|
|
ptep = (pte_t *) page[1].index;
|
|
if (!ptep)
|
|
return;
|
|
page_table_free(&init_mm, (unsigned long *) ptep);
|
|
page[1].index = 0;
|
|
}
|
|
|
|
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|
unsigned long addr, unsigned long sz)
|
|
{
|
|
pgd_t *pgdp;
|
|
pud_t *pudp;
|
|
pmd_t *pmdp = NULL;
|
|
|
|
pgdp = pgd_offset(mm, addr);
|
|
pudp = pud_alloc(mm, pgdp, addr);
|
|
if (pudp)
|
|
pmdp = pmd_alloc(mm, pudp, addr);
|
|
return (pte_t *) pmdp;
|
|
}
|
|
|
|
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
pgd_t *pgdp;
|
|
pud_t *pudp;
|
|
pmd_t *pmdp = NULL;
|
|
|
|
pgdp = pgd_offset(mm, addr);
|
|
if (pgd_present(*pgdp)) {
|
|
pudp = pud_offset(pgdp, addr);
|
|
if (pud_present(*pudp))
|
|
pmdp = pmd_offset(pudp, addr);
|
|
}
|
|
return (pte_t *) pmdp;
|
|
}
|
|
|
|
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
|
|
int write)
|
|
{
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
int pmd_huge(pmd_t pmd)
|
|
{
|
|
if (!MACHINE_HAS_HPAGE)
|
|
return 0;
|
|
|
|
return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
|
|
}
|
|
|
|
int pud_huge(pud_t pud)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
|
pmd_t *pmdp, int write)
|
|
{
|
|
struct page *page;
|
|
|
|
if (!MACHINE_HAS_HPAGE)
|
|
return NULL;
|
|
|
|
page = pmd_page(*pmdp);
|
|
if (page)
|
|
page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
|
|
return page;
|
|
}
|