2017-11-24 14:00:35 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2007-10-22 10:52:47 +00:00
|
|
|
/*
|
2012-07-20 09:15:04 +00:00
|
|
|
* Copyright IBM Corp. 2007, 2011
|
2007-10-22 10:52:47 +00:00
|
|
|
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/gfp.h>
|
2007-10-22 10:52:47 +00:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/spinlock.h>
|
2010-10-25 14:10:11 +00:00
|
|
|
#include <linux/rcupdate.h>
|
2011-07-24 08:48:20 +00:00
|
|
|
#include <linux/slab.h>
|
2013-04-17 15:36:29 +00:00
|
|
|
#include <linux/swapops.h>
|
2015-04-15 11:23:26 +00:00
|
|
|
#include <linux/sysctl.h>
|
2014-10-23 10:09:17 +00:00
|
|
|
#include <linux/ksm.h>
|
|
|
|
#include <linux/mman.h>
|
2007-10-22 10:52:47 +00:00
|
|
|
|
|
|
|
#include <asm/tlb.h>
|
|
|
|
#include <asm/tlbflush.h>
|
2008-02-09 17:24:37 +00:00
|
|
|
#include <asm/mmu_context.h>
|
2017-04-20 08:03:45 +00:00
|
|
|
#include <asm/page-states.h>
|
2007-10-22 10:52:47 +00:00
|
|
|
|
2020-07-13 12:12:49 +00:00
|
|
|
pgprot_t pgprot_writecombine(pgprot_t prot)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* mio_wb_bit_mask may be set on a different CPU, but it is only set
|
|
|
|
* once at init and only read afterwards.
|
|
|
|
*/
|
|
|
|
return __pgprot(pgprot_val(prot) | mio_wb_bit_mask);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pgprot_writecombine);
|
|
|
|
|
|
|
|
pgprot_t pgprot_writethrough(pgprot_t prot)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* mio_wb_bit_mask may be set on a different CPU, but it is only set
|
|
|
|
* once at init and only read afterwards.
|
|
|
|
*/
|
|
|
|
return __pgprot(pgprot_val(prot) & ~mio_wb_bit_mask);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pgprot_writethrough);
|
|
|
|
|
2016-07-26 14:53:09 +00:00
|
|
|
static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
|
2016-07-26 15:02:31 +00:00
|
|
|
pte_t *ptep, int nodat)
|
2016-07-26 14:53:09 +00:00
|
|
|
{
|
|
|
|
unsigned long opt, asce;
|
|
|
|
|
|
|
|
if (MACHINE_HAS_TLB_GUEST) {
|
|
|
|
opt = 0;
|
|
|
|
asce = READ_ONCE(mm->context.gmap_asce);
|
2016-07-26 15:02:31 +00:00
|
|
|
if (asce == 0UL || nodat)
|
2016-07-26 14:53:09 +00:00
|
|
|
opt |= IPTE_NODAT;
|
2016-07-26 14:00:22 +00:00
|
|
|
if (asce != -1UL) {
|
|
|
|
asce = asce ? : mm->context.asce;
|
|
|
|
opt |= IPTE_GUEST_ASCE;
|
|
|
|
}
|
|
|
|
__ptep_ipte(addr, ptep, opt, asce, IPTE_LOCAL);
|
2016-07-26 14:53:09 +00:00
|
|
|
} else {
|
2016-07-26 14:00:22 +00:00
|
|
|
__ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL);
|
2016-07-26 14:53:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr,
|
2016-07-26 15:02:31 +00:00
|
|
|
pte_t *ptep, int nodat)
|
2016-07-26 14:53:09 +00:00
|
|
|
{
|
|
|
|
unsigned long opt, asce;
|
|
|
|
|
|
|
|
if (MACHINE_HAS_TLB_GUEST) {
|
|
|
|
opt = 0;
|
|
|
|
asce = READ_ONCE(mm->context.gmap_asce);
|
2016-07-26 15:02:31 +00:00
|
|
|
if (asce == 0UL || nodat)
|
2016-07-26 14:53:09 +00:00
|
|
|
opt |= IPTE_NODAT;
|
2016-07-26 14:00:22 +00:00
|
|
|
if (asce != -1UL) {
|
|
|
|
asce = asce ? : mm->context.asce;
|
|
|
|
opt |= IPTE_GUEST_ASCE;
|
|
|
|
}
|
|
|
|
__ptep_ipte(addr, ptep, opt, asce, IPTE_GLOBAL);
|
2016-07-26 14:53:09 +00:00
|
|
|
} else {
|
2016-07-26 14:00:22 +00:00
|
|
|
__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
|
2016-07-26 14:53:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-08 10:08:09 +00:00
|
|
|
static inline pte_t ptep_flush_direct(struct mm_struct *mm,
|
2016-07-26 15:02:31 +00:00
|
|
|
unsigned long addr, pte_t *ptep,
|
|
|
|
int nodat)
|
2016-03-08 10:08:09 +00:00
|
|
|
{
|
|
|
|
pte_t old;
|
|
|
|
|
|
|
|
old = *ptep;
|
|
|
|
if (unlikely(pte_val(old) & _PAGE_INVALID))
|
|
|
|
return old;
|
2016-05-25 07:45:26 +00:00
|
|
|
atomic_inc(&mm->context.flush_count);
|
|
|
|
if (MACHINE_HAS_TLB_LC &&
|
2016-03-08 10:08:09 +00:00
|
|
|
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
|
2016-07-26 15:02:31 +00:00
|
|
|
ptep_ipte_local(mm, addr, ptep, nodat);
|
2016-03-08 10:08:09 +00:00
|
|
|
else
|
2016-07-26 15:02:31 +00:00
|
|
|
ptep_ipte_global(mm, addr, ptep, nodat);
|
2016-05-25 07:45:26 +00:00
|
|
|
atomic_dec(&mm->context.flush_count);
|
2016-03-08 10:08:09 +00:00
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
|
2016-07-26 15:02:31 +00:00
|
|
|
unsigned long addr, pte_t *ptep,
|
|
|
|
int nodat)
|
2016-03-08 10:08:09 +00:00
|
|
|
{
|
|
|
|
pte_t old;
|
|
|
|
|
|
|
|
old = *ptep;
|
|
|
|
if (unlikely(pte_val(old) & _PAGE_INVALID))
|
|
|
|
return old;
|
2016-05-25 07:45:26 +00:00
|
|
|
atomic_inc(&mm->context.flush_count);
|
|
|
|
if (cpumask_equal(&mm->context.cpu_attach_mask,
|
|
|
|
cpumask_of(smp_processor_id()))) {
|
2022-02-21 19:50:07 +00:00
|
|
|
set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_INVALID)));
|
2016-03-08 10:08:09 +00:00
|
|
|
mm->context.flush_mm = 1;
|
|
|
|
} else
|
2016-07-26 15:02:31 +00:00
|
|
|
ptep_ipte_global(mm, addr, ptep, nodat);
|
2016-05-25 07:45:26 +00:00
|
|
|
atomic_dec(&mm->context.flush_count);
|
2016-03-08 10:08:09 +00:00
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2016-03-08 10:49:57 +00:00
|
|
|
static inline pgste_t pgste_get_lock(pte_t *ptep)
|
|
|
|
{
|
|
|
|
unsigned long new = 0;
|
|
|
|
#ifdef CONFIG_PGSTE
|
|
|
|
unsigned long old;
|
|
|
|
|
|
|
|
asm(
|
|
|
|
" lg %0,%2\n"
|
|
|
|
"0: lgr %1,%0\n"
|
|
|
|
" nihh %0,0xff7f\n" /* clear PCL bit in old */
|
|
|
|
" oihh %1,0x0080\n" /* set PCL bit in new */
|
|
|
|
" csg %0,%1,%2\n"
|
|
|
|
" jl 0b\n"
|
|
|
|
: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
|
|
|
|
: "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
|
|
|
|
#endif
|
|
|
|
return __pgste(new);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_PGSTE
|
|
|
|
asm(
|
|
|
|
" nihh %1,0xff7f\n" /* clear PCL bit */
|
|
|
|
" stg %1,%0\n"
|
|
|
|
: "=Q" (ptep[PTRS_PER_PTE])
|
|
|
|
: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
|
|
|
|
: "cc", "memory");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pgste_t pgste_get(pte_t *ptep)
|
|
|
|
{
|
|
|
|
unsigned long pgste = 0;
|
|
|
|
#ifdef CONFIG_PGSTE
|
|
|
|
pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
|
|
|
|
#endif
|
|
|
|
return __pgste(pgste);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pgste_set(pte_t *ptep, pgste_t pgste)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_PGSTE
|
|
|
|
*(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-03-08 10:08:09 +00:00
|
|
|
static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
|
|
|
|
struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_PGSTE
|
|
|
|
unsigned long address, bits, skey;
|
|
|
|
|
2018-02-15 15:33:47 +00:00
|
|
|
if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID)
|
2016-03-08 10:08:09 +00:00
|
|
|
return pgste;
|
|
|
|
address = pte_val(pte) & PAGE_MASK;
|
|
|
|
skey = (unsigned long) page_get_storage_key(address);
|
|
|
|
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
|
|
|
|
/* Transfer page changed & referenced bit to guest bits in pgste */
|
|
|
|
pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
|
|
|
|
/* Copy page access key and fetch protection bit to pgste */
|
|
|
|
pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
|
|
|
|
pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
|
|
|
|
#endif
|
|
|
|
return pgste;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
|
|
|
|
struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_PGSTE
|
|
|
|
unsigned long address;
|
|
|
|
unsigned long nkey;
|
|
|
|
|
2018-02-15 15:33:47 +00:00
|
|
|
if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID)
|
2016-03-08 10:08:09 +00:00
|
|
|
return;
|
|
|
|
VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
|
|
|
|
address = pte_val(entry) & PAGE_MASK;
|
|
|
|
/*
|
|
|
|
* Set page access key and fetch protection bit from pgste.
|
|
|
|
* The guest C/R information is still in the PGSTE, set real
|
|
|
|
* key C/R to 0.
|
|
|
|
*/
|
|
|
|
nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
|
|
|
|
nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
|
|
|
|
page_set_storage_key(address, nkey, 0);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_PGSTE
|
|
|
|
if ((pte_val(entry) & _PAGE_PRESENT) &&
|
|
|
|
(pte_val(entry) & _PAGE_WRITE) &&
|
|
|
|
!(pte_val(entry) & _PAGE_INVALID)) {
|
|
|
|
if (!MACHINE_HAS_ESOP) {
|
|
|
|
/*
|
|
|
|
* Without enhanced suppression-on-protection force
|
|
|
|
* the dirty bit on for all writable ptes.
|
|
|
|
*/
|
2022-02-21 20:24:01 +00:00
|
|
|
entry = set_pte_bit(entry, __pgprot(_PAGE_DIRTY));
|
|
|
|
entry = clear_pte_bit(entry, __pgprot(_PAGE_PROTECT));
|
2016-03-08 10:08:09 +00:00
|
|
|
}
|
|
|
|
if (!(pte_val(entry) & _PAGE_PROTECT))
|
|
|
|
/* This pte allows write access, set user-dirty */
|
|
|
|
pgste_val(pgste) |= PGSTE_UC_BIT;
|
|
|
|
}
|
|
|
|
#endif
|
2022-02-21 19:50:07 +00:00
|
|
|
set_pte(ptep, entry);
|
2016-03-08 10:08:09 +00:00
|
|
|
return pgste;
|
|
|
|
}
|
|
|
|
|
2016-03-08 10:54:42 +00:00
|
|
|
static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
|
|
|
|
unsigned long addr,
|
|
|
|
pte_t *ptep, pgste_t pgste)
|
2016-03-08 10:08:09 +00:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_PGSTE
|
2016-03-08 11:12:18 +00:00
|
|
|
unsigned long bits;
|
|
|
|
|
|
|
|
bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
|
|
|
|
if (bits) {
|
|
|
|
pgste_val(pgste) ^= bits;
|
|
|
|
ptep_notify(mm, addr, ptep, bits);
|
2016-03-08 10:08:09 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return pgste;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
|
|
|
|
unsigned long addr, pte_t *ptep)
|
|
|
|
{
|
|
|
|
pgste_t pgste = __pgste(0);
|
|
|
|
|
|
|
|
if (mm_has_pgste(mm)) {
|
|
|
|
pgste = pgste_get_lock(ptep);
|
2016-03-08 10:54:42 +00:00
|
|
|
pgste = pgste_pte_notify(mm, addr, ptep, pgste);
|
2016-03-08 10:08:09 +00:00
|
|
|
}
|
|
|
|
return pgste;
|
|
|
|
}
|
|
|
|
|
2017-01-23 21:59:44 +00:00
|
|
|
static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
|
2016-03-08 10:08:09 +00:00
|
|
|
unsigned long addr, pte_t *ptep,
|
|
|
|
pgste_t pgste, pte_t old, pte_t new)
|
|
|
|
{
|
|
|
|
if (mm_has_pgste(mm)) {
|
|
|
|
if (pte_val(old) & _PAGE_INVALID)
|
|
|
|
pgste_set_key(ptep, pgste, new, mm);
|
|
|
|
if (pte_val(new) & _PAGE_INVALID) {
|
|
|
|
pgste = pgste_update_all(old, pgste, mm);
|
|
|
|
if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
|
|
|
|
_PGSTE_GPS_USAGE_UNUSED)
|
2022-02-21 20:24:01 +00:00
|
|
|
old = set_pte_bit(old, __pgprot(_PAGE_UNUSED));
|
2016-03-08 10:08:09 +00:00
|
|
|
}
|
|
|
|
pgste = pgste_set_pte(ptep, pgste, new);
|
|
|
|
pgste_set_unlock(ptep, pgste);
|
|
|
|
} else {
|
2022-02-21 19:50:07 +00:00
|
|
|
set_pte(ptep, new);
|
2016-03-08 10:08:09 +00:00
|
|
|
}
|
2017-01-23 21:59:44 +00:00
|
|
|
return old;
|
2016-03-08 10:08:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t *ptep, pte_t new)
|
|
|
|
{
|
|
|
|
pgste_t pgste;
|
|
|
|
pte_t old;
|
2016-07-26 15:02:31 +00:00
|
|
|
int nodat;
|
2016-03-08 10:08:09 +00:00
|
|
|
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_disable();
|
2016-03-08 10:08:09 +00:00
|
|
|
pgste = ptep_xchg_start(mm, addr, ptep);
|
2016-07-26 15:02:31 +00:00
|
|
|
nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
|
|
|
|
old = ptep_flush_direct(mm, addr, ptep, nodat);
|
2017-01-23 21:59:44 +00:00
|
|
|
old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_enable();
|
2016-03-08 10:08:09 +00:00
|
|
|
return old;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ptep_xchg_direct);
|
|
|
|
|
|
|
|
pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t *ptep, pte_t new)
|
|
|
|
{
|
|
|
|
pgste_t pgste;
|
|
|
|
pte_t old;
|
2016-07-26 15:02:31 +00:00
|
|
|
int nodat;
|
2016-03-08 10:08:09 +00:00
|
|
|
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_disable();
|
2016-03-08 10:08:09 +00:00
|
|
|
pgste = ptep_xchg_start(mm, addr, ptep);
|
2016-07-26 15:02:31 +00:00
|
|
|
nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
|
|
|
|
old = ptep_flush_lazy(mm, addr, ptep, nodat);
|
2017-01-23 21:59:44 +00:00
|
|
|
old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_enable();
|
2016-03-08 10:08:09 +00:00
|
|
|
return old;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ptep_xchg_lazy);
|
|
|
|
|
2019-03-05 23:46:26 +00:00
|
|
|
pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
|
2016-03-08 10:08:09 +00:00
|
|
|
pte_t *ptep)
|
|
|
|
{
|
|
|
|
pgste_t pgste;
|
|
|
|
pte_t old;
|
2016-07-26 15:02:31 +00:00
|
|
|
int nodat;
|
2019-03-05 23:46:26 +00:00
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
2016-03-08 10:08:09 +00:00
|
|
|
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_disable();
|
2016-03-08 10:08:09 +00:00
|
|
|
pgste = ptep_xchg_start(mm, addr, ptep);
|
2016-07-26 15:02:31 +00:00
|
|
|
nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
|
|
|
|
old = ptep_flush_lazy(mm, addr, ptep, nodat);
|
2016-03-08 10:08:09 +00:00
|
|
|
if (mm_has_pgste(mm)) {
|
|
|
|
pgste = pgste_update_all(old, pgste, mm);
|
|
|
|
pgste_set(ptep, pgste);
|
|
|
|
}
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2019-03-05 23:46:26 +00:00
|
|
|
void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
|
2019-03-05 23:46:29 +00:00
|
|
|
pte_t *ptep, pte_t old_pte, pte_t pte)
|
2016-03-08 10:08:09 +00:00
|
|
|
{
|
|
|
|
pgste_t pgste;
|
2019-03-05 23:46:26 +00:00
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
2016-03-08 10:08:09 +00:00
|
|
|
|
2016-03-22 09:54:24 +00:00
|
|
|
if (!MACHINE_HAS_NX)
|
2022-02-21 20:24:01 +00:00
|
|
|
pte = clear_pte_bit(pte, __pgprot(_PAGE_NOEXEC));
|
2016-03-08 10:08:09 +00:00
|
|
|
if (mm_has_pgste(mm)) {
|
|
|
|
pgste = pgste_get(ptep);
|
|
|
|
pgste_set_key(ptep, pgste, pte, mm);
|
|
|
|
pgste = pgste_set_pte(ptep, pgste, pte);
|
|
|
|
pgste_set_unlock(ptep, pgste);
|
|
|
|
} else {
|
2022-02-21 19:50:07 +00:00
|
|
|
set_pte(ptep, pte);
|
2016-03-08 10:08:09 +00:00
|
|
|
}
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_enable();
|
2016-03-08 10:08:09 +00:00
|
|
|
}
|
|
|
|
|
2016-07-26 14:53:09 +00:00
|
|
|
static inline void pmdp_idte_local(struct mm_struct *mm,
|
|
|
|
unsigned long addr, pmd_t *pmdp)
|
|
|
|
{
|
|
|
|
if (MACHINE_HAS_TLB_GUEST)
|
2016-07-26 14:00:22 +00:00
|
|
|
__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
|
|
|
|
mm->context.asce, IDTE_LOCAL);
|
2016-07-26 14:53:09 +00:00
|
|
|
else
|
2016-07-26 14:00:22 +00:00
|
|
|
__pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
|
2018-07-13 10:28:37 +00:00
|
|
|
if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
|
2018-07-13 10:28:22 +00:00
|
|
|
gmap_pmdp_idte_local(mm, addr);
|
2016-07-26 14:53:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pmdp_idte_global(struct mm_struct *mm,
|
|
|
|
unsigned long addr, pmd_t *pmdp)
|
|
|
|
{
|
2018-07-13 10:28:22 +00:00
|
|
|
if (MACHINE_HAS_TLB_GUEST) {
|
2016-07-26 14:00:22 +00:00
|
|
|
__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
|
|
|
|
mm->context.asce, IDTE_GLOBAL);
|
2018-07-13 10:28:37 +00:00
|
|
|
if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
|
2018-07-13 10:28:22 +00:00
|
|
|
gmap_pmdp_idte_global(mm, addr);
|
|
|
|
} else if (MACHINE_HAS_IDTE) {
|
2016-07-26 14:00:22 +00:00
|
|
|
__pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
|
2018-07-13 10:28:37 +00:00
|
|
|
if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
|
2018-07-13 10:28:22 +00:00
|
|
|
gmap_pmdp_idte_global(mm, addr);
|
|
|
|
} else {
|
2016-07-26 14:53:09 +00:00
|
|
|
__pmdp_csp(pmdp);
|
2018-07-13 10:28:37 +00:00
|
|
|
if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
|
2018-07-13 10:28:22 +00:00
|
|
|
gmap_pmdp_csp(mm, addr);
|
|
|
|
}
|
2016-07-26 14:53:09 +00:00
|
|
|
}
|
|
|
|
|
2016-03-08 10:09:25 +00:00
|
|
|
static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
|
|
|
|
unsigned long addr, pmd_t *pmdp)
|
|
|
|
{
|
|
|
|
pmd_t old;
|
|
|
|
|
|
|
|
old = *pmdp;
|
|
|
|
if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
|
|
|
|
return old;
|
2016-05-25 07:45:26 +00:00
|
|
|
atomic_inc(&mm->context.flush_count);
|
|
|
|
if (MACHINE_HAS_TLB_LC &&
|
2016-03-08 10:09:25 +00:00
|
|
|
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
|
2016-07-26 14:53:09 +00:00
|
|
|
pmdp_idte_local(mm, addr, pmdp);
|
2016-03-08 10:09:25 +00:00
|
|
|
else
|
2016-07-26 14:53:09 +00:00
|
|
|
pmdp_idte_global(mm, addr, pmdp);
|
2016-05-25 07:45:26 +00:00
|
|
|
atomic_dec(&mm->context.flush_count);
|
2016-03-08 10:09:25 +00:00
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
|
|
|
|
unsigned long addr, pmd_t *pmdp)
|
|
|
|
{
|
|
|
|
pmd_t old;
|
|
|
|
|
|
|
|
old = *pmdp;
|
|
|
|
if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
|
|
|
|
return old;
|
2016-05-25 07:45:26 +00:00
|
|
|
atomic_inc(&mm->context.flush_count);
|
|
|
|
if (cpumask_equal(&mm->context.cpu_attach_mask,
|
|
|
|
cpumask_of(smp_processor_id()))) {
|
2022-02-21 19:50:07 +00:00
|
|
|
set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_INVALID)));
|
2016-03-08 10:09:25 +00:00
|
|
|
mm->context.flush_mm = 1;
|
2018-07-13 10:28:22 +00:00
|
|
|
if (mm_has_pgste(mm))
|
|
|
|
gmap_pmdp_invalidate(mm, addr);
|
2016-07-26 14:53:09 +00:00
|
|
|
} else {
|
|
|
|
pmdp_idte_global(mm, addr, pmdp);
|
|
|
|
}
|
2016-05-25 07:45:26 +00:00
|
|
|
atomic_dec(&mm->context.flush_count);
|
2016-03-08 10:09:25 +00:00
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2019-04-07 12:55:09 +00:00
|
|
|
#ifdef CONFIG_PGSTE
|
2021-09-09 16:22:43 +00:00
|
|
|
static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp)
|
2018-07-13 10:28:28 +00:00
|
|
|
{
|
2021-09-09 16:22:43 +00:00
|
|
|
struct vm_area_struct *vma;
|
2018-07-13 10:28:28 +00:00
|
|
|
pgd_t *pgd;
|
|
|
|
p4d_t *p4d;
|
|
|
|
pud_t *pud;
|
2021-09-09 16:22:43 +00:00
|
|
|
|
|
|
|
/* We need a valid VMA, otherwise this is clearly a fault. */
|
|
|
|
vma = vma_lookup(mm, addr);
|
|
|
|
if (!vma)
|
|
|
|
return -EFAULT;
|
2018-07-13 10:28:28 +00:00
|
|
|
|
|
|
|
pgd = pgd_offset(mm, addr);
|
2021-09-09 16:22:43 +00:00
|
|
|
if (!pgd_present(*pgd))
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
p4d = p4d_offset(pgd, addr);
|
|
|
|
if (!p4d_present(*p4d))
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
pud = pud_offset(p4d, addr);
|
|
|
|
if (!pud_present(*pud))
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
/* Large PUDs are not supported yet. */
|
|
|
|
if (pud_large(*pud))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
*pmdp = pmd_offset(pud, addr);
|
|
|
|
return 0;
|
2018-07-13 10:28:28 +00:00
|
|
|
}
|
2019-04-07 12:55:09 +00:00
|
|
|
#endif
|
2018-07-13 10:28:28 +00:00
|
|
|
|
2016-03-08 10:09:25 +00:00
|
|
|
pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pmd_t *pmdp, pmd_t new)
|
|
|
|
{
|
|
|
|
pmd_t old;
|
|
|
|
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_disable();
|
2016-03-08 10:09:25 +00:00
|
|
|
old = pmdp_flush_direct(mm, addr, pmdp);
|
2022-02-21 19:50:07 +00:00
|
|
|
set_pmd(pmdp, new);
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_enable();
|
2016-03-08 10:09:25 +00:00
|
|
|
return old;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pmdp_xchg_direct);
|
|
|
|
|
|
|
|
pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pmd_t *pmdp, pmd_t new)
|
|
|
|
{
|
|
|
|
pmd_t old;
|
|
|
|
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_disable();
|
2016-03-08 10:09:25 +00:00
|
|
|
old = pmdp_flush_lazy(mm, addr, pmdp);
|
2022-02-21 19:50:07 +00:00
|
|
|
set_pmd(pmdp, new);
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_enable();
|
2016-03-08 10:09:25 +00:00
|
|
|
return old;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pmdp_xchg_lazy);
|
|
|
|
|
2016-07-26 14:53:09 +00:00
|
|
|
static inline void pudp_idte_local(struct mm_struct *mm,
|
|
|
|
unsigned long addr, pud_t *pudp)
|
2016-07-04 12:47:01 +00:00
|
|
|
{
|
2016-07-26 14:53:09 +00:00
|
|
|
if (MACHINE_HAS_TLB_GUEST)
|
2016-07-26 14:00:22 +00:00
|
|
|
__pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
|
|
|
|
mm->context.asce, IDTE_LOCAL);
|
2016-07-26 14:53:09 +00:00
|
|
|
else
|
2016-07-26 14:00:22 +00:00
|
|
|
__pudp_idte(addr, pudp, 0, 0, IDTE_LOCAL);
|
2016-07-26 14:53:09 +00:00
|
|
|
}
|
2016-07-04 12:47:01 +00:00
|
|
|
|
2016-07-26 14:53:09 +00:00
|
|
|
static inline void pudp_idte_global(struct mm_struct *mm,
|
|
|
|
unsigned long addr, pud_t *pudp)
|
|
|
|
{
|
|
|
|
if (MACHINE_HAS_TLB_GUEST)
|
2016-07-26 14:00:22 +00:00
|
|
|
__pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
|
|
|
|
mm->context.asce, IDTE_GLOBAL);
|
2016-07-26 14:53:09 +00:00
|
|
|
else if (MACHINE_HAS_IDTE)
|
2016-07-26 14:00:22 +00:00
|
|
|
__pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL);
|
2016-07-26 14:53:09 +00:00
|
|
|
else
|
2016-07-04 12:47:01 +00:00
|
|
|
/*
|
|
|
|
* Invalid bit position is the same for pmd and pud, so we can
|
|
|
|
* re-use _pmd_csp() here
|
|
|
|
*/
|
|
|
|
__pmdp_csp((pmd_t *) pudp);
|
2016-07-26 14:53:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline pud_t pudp_flush_direct(struct mm_struct *mm,
|
|
|
|
unsigned long addr, pud_t *pudp)
|
|
|
|
{
|
|
|
|
pud_t old;
|
|
|
|
|
|
|
|
old = *pudp;
|
|
|
|
if (pud_val(old) & _REGION_ENTRY_INVALID)
|
2016-07-04 12:47:01 +00:00
|
|
|
return old;
|
|
|
|
atomic_inc(&mm->context.flush_count);
|
|
|
|
if (MACHINE_HAS_TLB_LC &&
|
|
|
|
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
|
2016-07-26 14:53:09 +00:00
|
|
|
pudp_idte_local(mm, addr, pudp);
|
2016-07-04 12:47:01 +00:00
|
|
|
else
|
2016-07-26 14:53:09 +00:00
|
|
|
pudp_idte_global(mm, addr, pudp);
|
2016-07-04 12:47:01 +00:00
|
|
|
atomic_dec(&mm->context.flush_count);
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pud_t *pudp, pud_t new)
|
|
|
|
{
|
|
|
|
pud_t old;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
old = pudp_flush_direct(mm, addr, pudp);
|
2022-02-21 19:50:07 +00:00
|
|
|
set_pud(pudp, new);
|
2016-07-04 12:47:01 +00:00
|
|
|
preempt_enable();
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pudp_xchg_direct);
|
|
|
|
|
2012-10-08 23:30:15 +00:00
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
2013-06-06 00:14:02 +00:00
|
|
|
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
|
|
|
|
pgtable_t pgtable)
|
2012-10-08 23:30:18 +00:00
|
|
|
{
|
|
|
|
struct list_head *lh = (struct list_head *) pgtable;
|
|
|
|
|
2014-02-12 13:16:18 +00:00
|
|
|
assert_spin_locked(pmd_lockptr(mm, pmdp));
|
2012-10-08 23:30:18 +00:00
|
|
|
|
|
|
|
/* FIFO */
|
2013-11-14 22:30:59 +00:00
|
|
|
if (!pmd_huge_pte(mm, pmdp))
|
2012-10-08 23:30:18 +00:00
|
|
|
INIT_LIST_HEAD(lh);
|
|
|
|
else
|
2013-11-14 22:30:59 +00:00
|
|
|
list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
|
|
|
|
pmd_huge_pte(mm, pmdp) = pgtable;
|
2012-10-08 23:30:18 +00:00
|
|
|
}
|
|
|
|
|
2013-06-06 00:14:02 +00:00
|
|
|
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
|
2012-10-08 23:30:18 +00:00
|
|
|
{
|
|
|
|
struct list_head *lh;
|
|
|
|
pgtable_t pgtable;
|
|
|
|
pte_t *ptep;
|
|
|
|
|
2014-02-12 13:16:18 +00:00
|
|
|
assert_spin_locked(pmd_lockptr(mm, pmdp));
|
2012-10-08 23:30:18 +00:00
|
|
|
|
|
|
|
/* FIFO */
|
2013-11-14 22:30:59 +00:00
|
|
|
pgtable = pmd_huge_pte(mm, pmdp);
|
2012-10-08 23:30:18 +00:00
|
|
|
lh = (struct list_head *) pgtable;
|
|
|
|
if (list_empty(lh))
|
2013-11-14 22:30:59 +00:00
|
|
|
pmd_huge_pte(mm, pmdp) = NULL;
|
2012-10-08 23:30:18 +00:00
|
|
|
else {
|
2013-11-14 22:30:59 +00:00
|
|
|
pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
|
2012-10-08 23:30:18 +00:00
|
|
|
list_del(lh);
|
|
|
|
}
|
|
|
|
ptep = (pte_t *) pgtable;
|
2022-02-21 19:50:07 +00:00
|
|
|
set_pte(ptep, __pte(_PAGE_INVALID));
|
2012-10-08 23:30:18 +00:00
|
|
|
ptep++;
|
2022-02-21 19:50:07 +00:00
|
|
|
set_pte(ptep, __pte(_PAGE_INVALID));
|
2012-10-08 23:30:18 +00:00
|
|
|
return pgtable;
|
|
|
|
}
|
2012-10-08 23:30:15 +00:00
|
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
2016-03-08 10:49:57 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_PGSTE
|
|
|
|
void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t *ptep, pte_t entry)
|
|
|
|
{
|
|
|
|
pgste_t pgste;
|
|
|
|
|
|
|
|
/* the mm_has_pgste() check is done in set_pte_at() */
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_disable();
|
2016-03-08 10:49:57 +00:00
|
|
|
pgste = pgste_get_lock(ptep);
|
|
|
|
pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
|
|
|
|
pgste_set_key(ptep, pgste, entry, mm);
|
|
|
|
pgste = pgste_set_pte(ptep, pgste, entry);
|
|
|
|
pgste_set_unlock(ptep, pgste);
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_enable();
|
2016-03-08 10:49:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
|
|
|
{
|
|
|
|
pgste_t pgste;
|
|
|
|
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_disable();
|
2016-03-08 10:49:57 +00:00
|
|
|
pgste = pgste_get_lock(ptep);
|
|
|
|
pgste_val(pgste) |= PGSTE_IN_BIT;
|
|
|
|
pgste_set_unlock(ptep, pgste);
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_enable();
|
2016-03-08 10:49:57 +00:00
|
|
|
}
|
|
|
|
|
2016-03-08 10:54:42 +00:00
|
|
|
/**
|
|
|
|
* ptep_force_prot - change access rights of a locked pte
|
|
|
|
* @mm: pointer to the process mm_struct
|
|
|
|
* @addr: virtual address in the guest address space
|
|
|
|
* @ptep: pointer to the page table entry
|
|
|
|
* @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
|
2016-03-08 11:12:18 +00:00
|
|
|
* @bit: pgste bit to set (e.g. for notification)
|
2016-03-08 10:54:42 +00:00
|
|
|
*
|
|
|
|
* Returns 0 if the access rights were changed and -EAGAIN if the current
|
|
|
|
* and requested access rights are incompatible.
|
|
|
|
*/
|
|
|
|
int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
|
2016-03-08 11:12:18 +00:00
|
|
|
pte_t *ptep, int prot, unsigned long bit)
|
2016-03-08 10:54:42 +00:00
|
|
|
{
|
|
|
|
pte_t entry;
|
|
|
|
pgste_t pgste;
|
2016-07-26 15:02:31 +00:00
|
|
|
int pte_i, pte_p, nodat;
|
2016-03-08 10:54:42 +00:00
|
|
|
|
|
|
|
pgste = pgste_get_lock(ptep);
|
|
|
|
entry = *ptep;
|
|
|
|
/* Check pte entry after all locks have been acquired */
|
|
|
|
pte_i = pte_val(entry) & _PAGE_INVALID;
|
|
|
|
pte_p = pte_val(entry) & _PAGE_PROTECT;
|
|
|
|
if ((pte_i && (prot != PROT_NONE)) ||
|
|
|
|
(pte_p && (prot & PROT_WRITE))) {
|
|
|
|
pgste_set_unlock(ptep, pgste);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
2016-03-08 11:12:18 +00:00
|
|
|
/* Change access rights and set pgste bit */
|
2016-07-26 15:02:31 +00:00
|
|
|
nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
|
2016-03-08 10:54:42 +00:00
|
|
|
if (prot == PROT_NONE && !pte_i) {
|
2016-07-26 15:02:31 +00:00
|
|
|
ptep_flush_direct(mm, addr, ptep, nodat);
|
2016-03-08 10:54:42 +00:00
|
|
|
pgste = pgste_update_all(entry, pgste, mm);
|
2022-02-21 20:24:01 +00:00
|
|
|
entry = set_pte_bit(entry, __pgprot(_PAGE_INVALID));
|
2016-03-08 10:54:42 +00:00
|
|
|
}
|
|
|
|
if (prot == PROT_READ && !pte_p) {
|
2016-07-26 15:02:31 +00:00
|
|
|
ptep_flush_direct(mm, addr, ptep, nodat);
|
2022-02-21 20:24:01 +00:00
|
|
|
entry = clear_pte_bit(entry, __pgprot(_PAGE_INVALID));
|
|
|
|
entry = set_pte_bit(entry, __pgprot(_PAGE_PROTECT));
|
2016-03-08 10:54:42 +00:00
|
|
|
}
|
2016-03-08 11:12:18 +00:00
|
|
|
pgste_val(pgste) |= bit;
|
2016-03-08 10:54:42 +00:00
|
|
|
pgste = pgste_set_pte(ptep, pgste, entry);
|
|
|
|
pgste_set_unlock(ptep, pgste);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-03-08 11:12:18 +00:00
|
|
|
int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
|
2016-03-08 11:21:41 +00:00
|
|
|
pte_t *sptep, pte_t *tptep, pte_t pte)
|
2016-03-08 11:12:18 +00:00
|
|
|
{
|
|
|
|
pgste_t spgste, tpgste;
|
|
|
|
pte_t spte, tpte;
|
|
|
|
int rc = -EAGAIN;
|
|
|
|
|
2016-03-08 11:21:41 +00:00
|
|
|
if (!(pte_val(*tptep) & _PAGE_INVALID))
|
|
|
|
return 0; /* already shadowed */
|
2016-03-08 11:12:18 +00:00
|
|
|
spgste = pgste_get_lock(sptep);
|
|
|
|
spte = *sptep;
|
|
|
|
if (!(pte_val(spte) & _PAGE_INVALID) &&
|
2016-03-08 11:21:41 +00:00
|
|
|
!((pte_val(spte) & _PAGE_PROTECT) &&
|
|
|
|
!(pte_val(pte) & _PAGE_PROTECT))) {
|
2016-03-08 11:12:18 +00:00
|
|
|
pgste_val(spgste) |= PGSTE_VSIE_BIT;
|
|
|
|
tpgste = pgste_get_lock(tptep);
|
2022-02-21 20:24:01 +00:00
|
|
|
tpte = __pte((pte_val(spte) & PAGE_MASK) |
|
|
|
|
(pte_val(pte) & _PAGE_PROTECT));
|
2016-03-08 11:12:18 +00:00
|
|
|
/* don't touch the storage key - it belongs to parent pgste */
|
|
|
|
tpgste = pgste_set_pte(tptep, tpgste, tpte);
|
|
|
|
pgste_set_unlock(tptep, tpgste);
|
2016-03-08 11:21:41 +00:00
|
|
|
rc = 1;
|
2016-03-08 11:12:18 +00:00
|
|
|
}
|
|
|
|
pgste_set_unlock(sptep, spgste);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
|
|
|
|
{
|
|
|
|
pgste_t pgste;
|
2016-07-26 15:02:31 +00:00
|
|
|
int nodat;
|
2016-03-08 11:12:18 +00:00
|
|
|
|
|
|
|
pgste = pgste_get_lock(ptep);
|
|
|
|
/* notifier is called by the caller */
|
2016-07-26 15:02:31 +00:00
|
|
|
nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
|
|
|
|
ptep_flush_direct(mm, saddr, ptep, nodat);
|
2016-03-08 11:12:18 +00:00
|
|
|
/* don't touch the storage key - it belongs to parent pgste */
|
|
|
|
pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
|
|
|
|
pgste_set_unlock(ptep, pgste);
|
|
|
|
}
|
|
|
|
|
2016-03-08 10:49:57 +00:00
|
|
|
static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
|
|
|
|
{
|
|
|
|
if (!non_swap_entry(entry))
|
|
|
|
dec_mm_counter(mm, MM_SWAPENTS);
|
|
|
|
else if (is_migration_entry(entry)) {
|
2021-07-01 01:54:06 +00:00
|
|
|
struct page *page = pfn_swap_entry_to_page(entry);
|
2016-03-08 10:49:57 +00:00
|
|
|
|
|
|
|
dec_mm_counter(mm, mm_counter(page));
|
|
|
|
}
|
|
|
|
free_swap_and_cache(entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t *ptep, int reset)
|
|
|
|
{
|
|
|
|
unsigned long pgstev;
|
|
|
|
pgste_t pgste;
|
|
|
|
pte_t pte;
|
|
|
|
|
|
|
|
/* Zap unused and logically-zero pages */
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_disable();
|
2016-03-08 10:49:57 +00:00
|
|
|
pgste = pgste_get_lock(ptep);
|
|
|
|
pgstev = pgste_val(pgste);
|
|
|
|
pte = *ptep;
|
2016-06-13 11:14:56 +00:00
|
|
|
if (!reset && pte_swap(pte) &&
|
2016-03-08 10:49:57 +00:00
|
|
|
((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
|
|
|
|
(pgstev & _PGSTE_GPS_ZERO))) {
|
|
|
|
ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
|
|
|
|
pte_clear(mm, addr, ptep);
|
|
|
|
}
|
|
|
|
if (reset)
|
|
|
|
pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
|
|
|
|
pgste_set_unlock(ptep, pgste);
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_enable();
|
2016-03-08 10:49:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
|
|
|
{
|
|
|
|
unsigned long ptev;
|
|
|
|
pgste_t pgste;
|
|
|
|
|
2017-07-06 08:12:58 +00:00
|
|
|
/* Clear storage key ACC and F, but set R/C */
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_disable();
|
2016-03-08 10:49:57 +00:00
|
|
|
pgste = pgste_get_lock(ptep);
|
2017-07-06 08:12:58 +00:00
|
|
|
pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
|
|
|
|
pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
|
2016-03-08 10:49:57 +00:00
|
|
|
ptev = pte_val(*ptep);
|
|
|
|
if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
|
|
|
|
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
|
|
|
|
pgste_set_unlock(ptep, pgste);
|
2016-06-06 08:30:45 +00:00
|
|
|
preempt_enable();
|
2016-03-08 10:49:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Test and reset if a guest page is dirty
|
|
|
|
*/
|
2018-07-17 12:21:22 +00:00
|
|
|
bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t *ptep)
|
2016-03-08 10:49:57 +00:00
|
|
|
{
|
|
|
|
pgste_t pgste;
|
|
|
|
pte_t pte;
|
|
|
|
bool dirty;
|
2016-07-26 15:02:31 +00:00
|
|
|
int nodat;
|
2016-03-08 10:49:57 +00:00
|
|
|
|
|
|
|
pgste = pgste_get_lock(ptep);
|
|
|
|
dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
|
|
|
|
pgste_val(pgste) &= ~PGSTE_UC_BIT;
|
|
|
|
pte = *ptep;
|
|
|
|
if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
|
2016-03-08 10:54:42 +00:00
|
|
|
pgste = pgste_pte_notify(mm, addr, ptep, pgste);
|
2016-07-26 15:02:31 +00:00
|
|
|
nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
|
|
|
|
ptep_ipte_global(mm, addr, ptep, nodat);
|
2016-03-08 10:49:57 +00:00
|
|
|
if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
|
2022-02-21 20:24:01 +00:00
|
|
|
pte = set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
|
2016-03-08 10:49:57 +00:00
|
|
|
else
|
2022-02-21 20:24:01 +00:00
|
|
|
pte = set_pte_bit(pte, __pgprot(_PAGE_INVALID));
|
2022-02-21 19:50:07 +00:00
|
|
|
set_pte(ptep, pte);
|
2016-03-08 10:49:57 +00:00
|
|
|
}
|
|
|
|
pgste_set_unlock(ptep, pgste);
|
|
|
|
return dirty;
|
|
|
|
}
|
2018-07-17 12:21:22 +00:00
|
|
|
EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);
|
2016-03-08 10:49:57 +00:00
|
|
|
|
|
|
|
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
|
|
|
|
unsigned char key, bool nq)
|
|
|
|
{
|
2018-07-13 10:28:28 +00:00
|
|
|
unsigned long keyul, paddr;
|
2016-03-08 10:49:57 +00:00
|
|
|
spinlock_t *ptl;
|
|
|
|
pgste_t old, new;
|
2018-07-13 10:28:28 +00:00
|
|
|
pmd_t *pmdp;
|
2016-03-08 10:49:57 +00:00
|
|
|
pte_t *ptep;
|
|
|
|
|
2021-09-09 16:22:47 +00:00
|
|
|
/*
|
|
|
|
* If we don't have a PTE table and if there is no huge page mapped,
|
|
|
|
* we can ignore attempts to set the key to 0, because it already is 0.
|
|
|
|
*/
|
|
|
|
switch (pmd_lookup(mm, addr, &pmdp)) {
|
|
|
|
case -ENOENT:
|
|
|
|
return key ? -EFAULT : 0;
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
default:
|
2018-07-13 10:28:28 +00:00
|
|
|
return -EFAULT;
|
2021-09-09 16:22:47 +00:00
|
|
|
}
|
2018-07-13 10:28:28 +00:00
|
|
|
|
|
|
|
ptl = pmd_lock(mm, pmdp);
|
|
|
|
if (!pmd_present(*pmdp)) {
|
|
|
|
spin_unlock(ptl);
|
2021-09-09 16:22:47 +00:00
|
|
|
return key ? -EFAULT : 0;
|
2018-07-13 10:28:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pmd_large(*pmdp)) {
|
|
|
|
paddr = pmd_val(*pmdp) & HPAGE_MASK;
|
|
|
|
paddr |= addr & ~HPAGE_MASK;
|
|
|
|
/*
|
|
|
|
* Huge pmds need quiescing operations, they are
|
|
|
|
* always mapped.
|
|
|
|
*/
|
|
|
|
page_set_storage_key(paddr, key, 1);
|
|
|
|
spin_unlock(ptl);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
spin_unlock(ptl);
|
|
|
|
|
2021-09-09 16:22:46 +00:00
|
|
|
ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
|
2016-03-08 10:49:57 +00:00
|
|
|
new = old = pgste_get_lock(ptep);
|
|
|
|
pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
|
|
|
|
PGSTE_ACC_BITS | PGSTE_FP_BIT);
|
|
|
|
keyul = (unsigned long) key;
|
|
|
|
pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
|
|
|
|
pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
|
|
|
|
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
|
2018-07-13 10:28:28 +00:00
|
|
|
unsigned long bits, skey;
|
2016-03-08 10:49:57 +00:00
|
|
|
|
2018-07-13 10:28:28 +00:00
|
|
|
paddr = pte_val(*ptep) & PAGE_MASK;
|
|
|
|
skey = (unsigned long) page_get_storage_key(paddr);
|
2016-03-08 10:49:57 +00:00
|
|
|
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
|
|
|
|
skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
|
|
|
|
/* Set storage key ACC and FP */
|
2018-07-13 10:28:28 +00:00
|
|
|
page_set_storage_key(paddr, skey, !nq);
|
2016-03-08 10:49:57 +00:00
|
|
|
/* Merge host changed & referenced into pgste */
|
|
|
|
pgste_val(new) |= bits << 52;
|
|
|
|
}
|
|
|
|
/* changing the guest storage key is considered a change of the page */
|
|
|
|
if ((pgste_val(new) ^ pgste_val(old)) &
|
|
|
|
(PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
|
|
|
|
pgste_val(new) |= PGSTE_UC_BIT;
|
|
|
|
|
|
|
|
pgste_set_unlock(ptep, new);
|
|
|
|
pte_unmap_unlock(ptep, ptl);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(set_guest_storage_key);
|
|
|
|
|
2021-09-02 17:47:37 +00:00
|
|
|
/*
|
2016-05-10 07:43:11 +00:00
|
|
|
* Conditionally set a guest storage key (handling csske).
|
|
|
|
* oldkey will be updated when either mr or mc is set and a pointer is given.
|
|
|
|
*
|
|
|
|
* Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
|
|
|
|
* storage key was updated and -EFAULT on access errors.
|
|
|
|
*/
|
|
|
|
int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
|
|
|
|
unsigned char key, unsigned char *oldkey,
|
|
|
|
bool nq, bool mr, bool mc)
|
|
|
|
{
|
|
|
|
unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* we can drop the pgste lock between getting and setting the key */
|
|
|
|
if (mr | mc) {
|
|
|
|
rc = get_guest_storage_key(current->mm, addr, &tmp);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
if (oldkey)
|
|
|
|
*oldkey = tmp;
|
|
|
|
if (!mr)
|
|
|
|
mask |= _PAGE_REFERENCED;
|
|
|
|
if (!mc)
|
|
|
|
mask |= _PAGE_CHANGED;
|
|
|
|
if (!((tmp ^ key) & mask))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
rc = set_guest_storage_key(current->mm, addr, key, nq);
|
|
|
|
return rc < 0 ? rc : 1;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cond_set_guest_storage_key);
|
|
|
|
|
2021-09-02 17:47:37 +00:00
|
|
|
/*
|
2016-05-10 07:50:21 +00:00
|
|
|
* Reset a guest reference bit (rrbe), returning the reference and changed bit.
|
|
|
|
*
|
|
|
|
* Returns < 0 in case of error, otherwise the cc to be reported to the guest.
|
|
|
|
*/
|
|
|
|
int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
|
2016-03-08 10:49:57 +00:00
|
|
|
{
|
|
|
|
spinlock_t *ptl;
|
2018-07-13 10:28:28 +00:00
|
|
|
unsigned long paddr;
|
2016-05-10 07:50:21 +00:00
|
|
|
pgste_t old, new;
|
2018-07-13 10:28:28 +00:00
|
|
|
pmd_t *pmdp;
|
2016-03-08 10:49:57 +00:00
|
|
|
pte_t *ptep;
|
2016-05-10 07:50:21 +00:00
|
|
|
int cc = 0;
|
2016-03-08 10:49:57 +00:00
|
|
|
|
2021-09-09 16:22:48 +00:00
|
|
|
/*
|
|
|
|
* If we don't have a PTE table and if there is no huge page mapped,
|
|
|
|
* the storage key is 0 and there is nothing for us to do.
|
|
|
|
*/
|
|
|
|
switch (pmd_lookup(mm, addr, &pmdp)) {
|
|
|
|
case -ENOENT:
|
|
|
|
return 0;
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
default:
|
2018-07-13 10:28:28 +00:00
|
|
|
return -EFAULT;
|
2021-09-09 16:22:48 +00:00
|
|
|
}
|
2018-07-13 10:28:28 +00:00
|
|
|
|
|
|
|
ptl = pmd_lock(mm, pmdp);
|
|
|
|
if (!pmd_present(*pmdp)) {
|
|
|
|
spin_unlock(ptl);
|
2021-09-09 16:22:48 +00:00
|
|
|
return 0;
|
2018-07-13 10:28:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pmd_large(*pmdp)) {
|
|
|
|
paddr = pmd_val(*pmdp) & HPAGE_MASK;
|
|
|
|
paddr |= addr & ~HPAGE_MASK;
|
|
|
|
cc = page_reset_referenced(paddr);
|
|
|
|
spin_unlock(ptl);
|
|
|
|
return cc;
|
|
|
|
}
|
|
|
|
spin_unlock(ptl);
|
|
|
|
|
2021-09-09 16:22:46 +00:00
|
|
|
ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
|
2016-05-10 07:50:21 +00:00
|
|
|
new = old = pgste_get_lock(ptep);
|
|
|
|
/* Reset guest reference bit only */
|
|
|
|
pgste_val(new) &= ~PGSTE_GR_BIT;
|
2016-03-08 10:49:57 +00:00
|
|
|
|
2016-05-10 07:50:21 +00:00
|
|
|
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
|
2018-07-13 10:28:28 +00:00
|
|
|
paddr = pte_val(*ptep) & PAGE_MASK;
|
|
|
|
cc = page_reset_referenced(paddr);
|
2016-05-10 07:50:21 +00:00
|
|
|
/* Merge real referenced bit into host-set */
|
|
|
|
pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
|
2016-03-08 10:49:57 +00:00
|
|
|
}
|
2016-05-10 07:50:21 +00:00
|
|
|
/* Reflect guest's logical view, not physical */
|
|
|
|
cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
|
|
|
|
/* Changing the guest storage key is considered a change of the page */
|
|
|
|
if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
|
|
|
|
pgste_val(new) |= PGSTE_UC_BIT;
|
|
|
|
|
|
|
|
pgste_set_unlock(ptep, new);
|
|
|
|
pte_unmap_unlock(ptep, ptl);
|
2017-01-27 09:23:59 +00:00
|
|
|
return cc;
|
2016-05-10 07:50:21 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(reset_guest_reference_bit);
|
|
|
|
|
2016-05-09 09:22:34 +00:00
|
|
|
int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
|
|
|
|
unsigned char *key)
|
2016-03-08 10:49:57 +00:00
|
|
|
{
|
2018-07-13 10:28:28 +00:00
|
|
|
unsigned long paddr;
|
2016-03-08 10:49:57 +00:00
|
|
|
spinlock_t *ptl;
|
|
|
|
pgste_t pgste;
|
2018-07-13 10:28:28 +00:00
|
|
|
pmd_t *pmdp;
|
2016-03-08 10:49:57 +00:00
|
|
|
pte_t *ptep;
|
|
|
|
|
2021-09-09 16:22:43 +00:00
|
|
|
/*
|
|
|
|
* If we don't have a PTE table and if there is no huge page mapped,
|
|
|
|
* the storage key is 0.
|
|
|
|
*/
|
|
|
|
*key = 0;
|
|
|
|
|
|
|
|
switch (pmd_lookup(mm, addr, &pmdp)) {
|
|
|
|
case -ENOENT:
|
|
|
|
return 0;
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
default:
|
2018-07-13 10:28:28 +00:00
|
|
|
return -EFAULT;
|
2021-09-09 16:22:43 +00:00
|
|
|
}
|
2018-07-13 10:28:28 +00:00
|
|
|
|
|
|
|
ptl = pmd_lock(mm, pmdp);
|
|
|
|
if (!pmd_present(*pmdp)) {
|
|
|
|
spin_unlock(ptl);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pmd_large(*pmdp)) {
|
|
|
|
paddr = pmd_val(*pmdp) & HPAGE_MASK;
|
|
|
|
paddr |= addr & ~HPAGE_MASK;
|
|
|
|
*key = page_get_storage_key(paddr);
|
|
|
|
spin_unlock(ptl);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
spin_unlock(ptl);
|
|
|
|
|
2021-09-09 16:22:46 +00:00
|
|
|
ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
|
2016-03-08 10:53:35 +00:00
|
|
|
pgste = pgste_get_lock(ptep);
|
2016-05-09 09:22:34 +00:00
|
|
|
*key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
|
2018-07-13 10:28:28 +00:00
|
|
|
paddr = pte_val(*ptep) & PAGE_MASK;
|
2016-05-09 09:15:32 +00:00
|
|
|
if (!(pte_val(*ptep) & _PAGE_INVALID))
|
2018-07-13 10:28:28 +00:00
|
|
|
*key = page_get_storage_key(paddr);
|
2016-05-09 09:15:32 +00:00
|
|
|
/* Reflect guest's logical view, not physical */
|
2016-05-09 09:22:34 +00:00
|
|
|
*key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
|
2016-03-08 10:49:57 +00:00
|
|
|
pgste_set_unlock(ptep, pgste);
|
|
|
|
pte_unmap_unlock(ptep, ptl);
|
2016-05-09 09:22:34 +00:00
|
|
|
return 0;
|
2016-03-08 10:49:57 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(get_guest_storage_key);
|
2017-04-20 08:03:45 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* pgste_perform_essa - perform ESSA actions on the PGSTE.
|
|
|
|
* @mm: the memory context. It must have PGSTEs, no check is performed here!
|
|
|
|
* @hva: the host virtual address of the page whose PGSTE is to be processed
|
|
|
|
* @orc: the specific action to perform, see the ESSA_SET_* macros.
|
|
|
|
* @oldpte: the PTE will be saved there if the pointer is not NULL.
|
|
|
|
* @oldpgste: the old PGSTE will be saved there if the pointer is not NULL.
|
|
|
|
*
|
|
|
|
* Return: 1 if the page is to be added to the CBRL, otherwise 0,
|
|
|
|
* or < 0 in case of error. -EINVAL is returned for invalid values
|
|
|
|
* of orc, -EFAULT for invalid addresses.
|
|
|
|
*/
|
|
|
|
int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
|
|
|
|
unsigned long *oldpte, unsigned long *oldpgste)
|
|
|
|
{
|
2021-09-09 16:22:42 +00:00
|
|
|
struct vm_area_struct *vma;
|
2017-04-20 08:03:45 +00:00
|
|
|
unsigned long pgstev;
|
|
|
|
spinlock_t *ptl;
|
|
|
|
pgste_t pgste;
|
|
|
|
pte_t *ptep;
|
|
|
|
int res = 0;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(orc > ESSA_MAX);
|
|
|
|
if (unlikely(orc > ESSA_MAX))
|
|
|
|
return -EINVAL;
|
2021-09-09 16:22:42 +00:00
|
|
|
|
|
|
|
vma = vma_lookup(mm, hva);
|
|
|
|
if (!vma || is_vm_hugetlb_page(vma))
|
|
|
|
return -EFAULT;
|
2017-04-20 08:03:45 +00:00
|
|
|
ptep = get_locked_pte(mm, hva, &ptl);
|
|
|
|
if (unlikely(!ptep))
|
|
|
|
return -EFAULT;
|
|
|
|
pgste = pgste_get_lock(ptep);
|
|
|
|
pgstev = pgste_val(pgste);
|
|
|
|
if (oldpte)
|
|
|
|
*oldpte = pte_val(*ptep);
|
|
|
|
if (oldpgste)
|
|
|
|
*oldpgste = pgstev;
|
|
|
|
|
|
|
|
switch (orc) {
|
|
|
|
case ESSA_GET_STATE:
|
|
|
|
break;
|
|
|
|
case ESSA_SET_STABLE:
|
2016-08-29 13:56:55 +00:00
|
|
|
pgstev &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
|
2017-04-20 08:03:45 +00:00
|
|
|
pgstev |= _PGSTE_GPS_USAGE_STABLE;
|
|
|
|
break;
|
|
|
|
case ESSA_SET_UNUSED:
|
|
|
|
pgstev &= ~_PGSTE_GPS_USAGE_MASK;
|
|
|
|
pgstev |= _PGSTE_GPS_USAGE_UNUSED;
|
|
|
|
if (pte_val(*ptep) & _PAGE_INVALID)
|
|
|
|
res = 1;
|
|
|
|
break;
|
|
|
|
case ESSA_SET_VOLATILE:
|
|
|
|
pgstev &= ~_PGSTE_GPS_USAGE_MASK;
|
|
|
|
pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
|
|
|
|
if (pte_val(*ptep) & _PAGE_INVALID)
|
|
|
|
res = 1;
|
|
|
|
break;
|
|
|
|
case ESSA_SET_POT_VOLATILE:
|
|
|
|
pgstev &= ~_PGSTE_GPS_USAGE_MASK;
|
|
|
|
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
|
|
|
|
pgstev |= _PGSTE_GPS_USAGE_POT_VOLATILE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (pgstev & _PGSTE_GPS_ZERO) {
|
|
|
|
pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!(pgstev & PGSTE_GC_BIT)) {
|
|
|
|
pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
|
|
|
|
res = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ESSA_SET_STABLE_RESIDENT:
|
|
|
|
pgstev &= ~_PGSTE_GPS_USAGE_MASK;
|
|
|
|
pgstev |= _PGSTE_GPS_USAGE_STABLE;
|
|
|
|
/*
|
|
|
|
* Since the resident state can go away any time after this
|
|
|
|
* call, we will not make this page resident. We can revisit
|
|
|
|
* this decision if a guest will ever start using this.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
case ESSA_SET_STABLE_IF_RESIDENT:
|
|
|
|
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
|
|
|
|
pgstev &= ~_PGSTE_GPS_USAGE_MASK;
|
|
|
|
pgstev |= _PGSTE_GPS_USAGE_STABLE;
|
|
|
|
}
|
|
|
|
break;
|
2016-08-29 13:56:55 +00:00
|
|
|
case ESSA_SET_STABLE_NODAT:
|
|
|
|
pgstev &= ~_PGSTE_GPS_USAGE_MASK;
|
|
|
|
pgstev |= _PGSTE_GPS_USAGE_STABLE | _PGSTE_GPS_NODAT;
|
|
|
|
break;
|
2017-04-20 08:03:45 +00:00
|
|
|
default:
|
|
|
|
/* we should never get here! */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* If we are discarding a page, set it to logical zero */
|
|
|
|
if (res)
|
|
|
|
pgstev |= _PGSTE_GPS_ZERO;
|
|
|
|
|
|
|
|
pgste_val(pgste) = pgstev;
|
|
|
|
pgste_set_unlock(ptep, pgste);
|
|
|
|
pte_unmap_unlock(ptep, ptl);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pgste_perform_essa);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* set_pgste_bits - set specific PGSTE bits.
|
|
|
|
* @mm: the memory context. It must have PGSTEs, no check is performed here!
|
|
|
|
* @hva: the host virtual address of the page whose PGSTE is to be processed
|
|
|
|
* @bits: a bitmask representing the bits that will be touched
|
|
|
|
* @value: the values of the bits to be written. Only the bits in the mask
|
|
|
|
* will be written.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, < 0 in case of error.
|
|
|
|
*/
|
|
|
|
int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
|
|
|
|
unsigned long bits, unsigned long value)
|
|
|
|
{
|
2021-09-09 16:22:42 +00:00
|
|
|
struct vm_area_struct *vma;
|
2017-04-20 08:03:45 +00:00
|
|
|
spinlock_t *ptl;
|
|
|
|
pgste_t new;
|
|
|
|
pte_t *ptep;
|
|
|
|
|
2021-09-09 16:22:42 +00:00
|
|
|
vma = vma_lookup(mm, hva);
|
|
|
|
if (!vma || is_vm_hugetlb_page(vma))
|
|
|
|
return -EFAULT;
|
2017-04-20 08:03:45 +00:00
|
|
|
ptep = get_locked_pte(mm, hva, &ptl);
|
|
|
|
if (unlikely(!ptep))
|
|
|
|
return -EFAULT;
|
|
|
|
new = pgste_get_lock(ptep);
|
|
|
|
|
|
|
|
pgste_val(new) &= ~bits;
|
|
|
|
pgste_val(new) |= value & bits;
|
|
|
|
|
|
|
|
pgste_set_unlock(ptep, new);
|
|
|
|
pte_unmap_unlock(ptep, ptl);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(set_pgste_bits);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* get_pgste - get the current PGSTE for the given address.
|
|
|
|
* @mm: the memory context. It must have PGSTEs, no check is performed here!
|
|
|
|
* @hva: the host virtual address of the page whose PGSTE is to be processed
|
|
|
|
* @pgstep: will be written with the current PGSTE for the given address.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, < 0 in case of error.
|
|
|
|
*/
|
|
|
|
int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
|
|
|
|
{
|
2021-09-09 16:22:42 +00:00
|
|
|
struct vm_area_struct *vma;
|
2017-04-20 08:03:45 +00:00
|
|
|
spinlock_t *ptl;
|
|
|
|
pte_t *ptep;
|
|
|
|
|
2021-09-09 16:22:42 +00:00
|
|
|
vma = vma_lookup(mm, hva);
|
|
|
|
if (!vma || is_vm_hugetlb_page(vma))
|
|
|
|
return -EFAULT;
|
2017-04-20 08:03:45 +00:00
|
|
|
ptep = get_locked_pte(mm, hva, &ptl);
|
|
|
|
if (unlikely(!ptep))
|
|
|
|
return -EFAULT;
|
|
|
|
*pgstep = pgste_val(pgste_get(ptep));
|
|
|
|
pte_unmap_unlock(ptep, ptl);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(get_pgste);
|
2016-03-08 10:49:57 +00:00
|
|
|
#endif
|