forked from Minki/linux
Merge branch 'for-next/mte' into for-next/core
KASAN optimisations for the hardware tagging (MTE) implementation. * for-next/mte: kasan: disable freed user page poisoning with HW tags arm64: mte: handle tags zeroing at page allocation time kasan: use separate (un)poison implementation for integrated init mm: arch: remove indirection level in alloc_zeroed_user_highpage_movable() kasan: speed up mte_set_mem_tag_range
This commit is contained in:
commit
fdceddb06a
@ -17,9 +17,9 @@
|
||||
extern void clear_page(void *page);
|
||||
#define clear_user_page(page, vaddr, pg) clear_page(page)
|
||||
|
||||
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
|
||||
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vmaddr)
|
||||
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
|
||||
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
|
||||
alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vmaddr)
|
||||
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
|
||||
|
||||
extern void copy_page(void * _to, void * _from);
|
||||
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
|
||||
|
@ -48,43 +48,84 @@ static inline u8 mte_get_random_tag(void)
|
||||
return mte_get_ptr_tag(addr);
|
||||
}
|
||||
|
||||
static inline u64 __stg_post(u64 p)
|
||||
{
|
||||
asm volatile(__MTE_PREAMBLE "stg %0, [%0], #16"
|
||||
: "+r"(p)
|
||||
:
|
||||
: "memory");
|
||||
return p;
|
||||
}
|
||||
|
||||
static inline u64 __stzg_post(u64 p)
|
||||
{
|
||||
asm volatile(__MTE_PREAMBLE "stzg %0, [%0], #16"
|
||||
: "+r"(p)
|
||||
:
|
||||
: "memory");
|
||||
return p;
|
||||
}
|
||||
|
||||
static inline void __dc_gva(u64 p)
|
||||
{
|
||||
asm volatile(__MTE_PREAMBLE "dc gva, %0" : : "r"(p) : "memory");
|
||||
}
|
||||
|
||||
static inline void __dc_gzva(u64 p)
|
||||
{
|
||||
asm volatile(__MTE_PREAMBLE "dc gzva, %0" : : "r"(p) : "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* Assign allocation tags for a region of memory based on the pointer tag.
|
||||
* Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
|
||||
* size must be non-zero and MTE_GRANULE_SIZE aligned.
|
||||
* size must be MTE_GRANULE_SIZE aligned.
|
||||
*/
|
||||
static inline void mte_set_mem_tag_range(void *addr, size_t size,
|
||||
u8 tag, bool init)
|
||||
static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
|
||||
bool init)
|
||||
{
|
||||
u64 curr, end;
|
||||
u64 curr, mask, dczid_bs, end1, end2, end3;
|
||||
|
||||
if (!size)
|
||||
return;
|
||||
/* Read DC G(Z)VA block size from the system register. */
|
||||
dczid_bs = 4ul << (read_cpuid(DCZID_EL0) & 0xf);
|
||||
|
||||
curr = (u64)__tag_set(addr, tag);
|
||||
end = curr + size;
|
||||
mask = dczid_bs - 1;
|
||||
/* STG/STZG up to the end of the first block. */
|
||||
end1 = curr | mask;
|
||||
end3 = curr + size;
|
||||
/* DC GVA / GZVA in [end1, end2) */
|
||||
end2 = end3 & ~mask;
|
||||
|
||||
/*
|
||||
* 'asm volatile' is required to prevent the compiler to move
|
||||
* the statement outside of the loop.
|
||||
* The following code uses STG on the first DC GVA block even if the
|
||||
* start address is aligned - it appears to be faster than an alignment
|
||||
* check + conditional branch. Also, if the range size is at least 2 DC
|
||||
* GVA blocks, the first two loops can use post-condition to save one
|
||||
* branch each.
|
||||
*/
|
||||
if (init) {
|
||||
do {
|
||||
asm volatile(__MTE_PREAMBLE "stzg %0, [%0]"
|
||||
:
|
||||
: "r" (curr)
|
||||
: "memory");
|
||||
curr += MTE_GRANULE_SIZE;
|
||||
} while (curr != end);
|
||||
} else {
|
||||
do {
|
||||
asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
|
||||
:
|
||||
: "r" (curr)
|
||||
: "memory");
|
||||
curr += MTE_GRANULE_SIZE;
|
||||
} while (curr != end);
|
||||
}
|
||||
#define SET_MEMTAG_RANGE(stg_post, dc_gva) \
|
||||
do { \
|
||||
if (size >= 2 * dczid_bs) { \
|
||||
do { \
|
||||
curr = stg_post(curr); \
|
||||
} while (curr < end1); \
|
||||
\
|
||||
do { \
|
||||
dc_gva(curr); \
|
||||
curr += dczid_bs; \
|
||||
} while (curr < end2); \
|
||||
} \
|
||||
\
|
||||
while (curr < end3) \
|
||||
curr = stg_post(curr); \
|
||||
} while (0)
|
||||
|
||||
if (init)
|
||||
SET_MEMTAG_RANGE(__stzg_post, __dc_gzva);
|
||||
else
|
||||
SET_MEMTAG_RANGE(__stg_post, __dc_gva);
|
||||
#undef SET_MEMTAG_RANGE
|
||||
}
|
||||
|
||||
void mte_enable_kernel_sync(void);
|
||||
|
@ -37,6 +37,7 @@ void mte_free_tag_storage(char *storage);
|
||||
/* track which pages have valid allocation tags */
|
||||
#define PG_mte_tagged PG_arch_2
|
||||
|
||||
void mte_zero_clear_page_tags(void *addr);
|
||||
void mte_sync_tags(pte_t *ptep, pte_t pte);
|
||||
void mte_copy_page_tags(void *kto, const void *kfrom);
|
||||
void mte_thread_init_user(void);
|
||||
@ -53,6 +54,9 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request,
|
||||
/* unused if !CONFIG_ARM64_MTE, silence the compiler */
|
||||
#define PG_mte_tagged 0
|
||||
|
||||
static inline void mte_zero_clear_page_tags(void *addr)
|
||||
{
|
||||
}
|
||||
static inline void mte_sync_tags(pte_t *ptep, pte_t pte)
|
||||
{
|
||||
}
|
||||
|
@ -13,6 +13,7 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/personality.h> /* for READ_IMPLIES_EXEC */
|
||||
#include <linux/types.h> /* for gfp_t */
|
||||
#include <asm/pgtable-types.h>
|
||||
|
||||
struct page;
|
||||
@ -28,9 +29,12 @@ void copy_user_highpage(struct page *to, struct page *from,
|
||||
void copy_highpage(struct page *to, struct page *from);
|
||||
#define __HAVE_ARCH_COPY_HIGHPAGE
|
||||
|
||||
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
|
||||
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
|
||||
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
|
||||
struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
|
||||
unsigned long vaddr);
|
||||
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
|
||||
|
||||
void tag_clear_highpage(struct page *to);
|
||||
#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
|
||||
|
||||
#define clear_user_page(page, vaddr, pg) clear_page(page)
|
||||
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
|
||||
|
@ -36,6 +36,26 @@ SYM_FUNC_START(mte_clear_page_tags)
|
||||
ret
|
||||
SYM_FUNC_END(mte_clear_page_tags)
|
||||
|
||||
/*
|
||||
* Zero the page and tags at the same time
|
||||
*
|
||||
* Parameters:
|
||||
* x0 - address to the beginning of the page
|
||||
*/
|
||||
SYM_FUNC_START(mte_zero_clear_page_tags)
|
||||
mrs x1, dczid_el0
|
||||
and w1, w1, #0xf
|
||||
mov x2, #4
|
||||
lsl x1, x2, x1
|
||||
and x0, x0, #(1 << MTE_TAG_SHIFT) - 1 // clear the tag
|
||||
|
||||
1: dc gzva, x0
|
||||
add x0, x0, x1
|
||||
tst x0, #(PAGE_SIZE - 1)
|
||||
b.ne 1b
|
||||
ret
|
||||
SYM_FUNC_END(mte_zero_clear_page_tags)
|
||||
|
||||
/*
|
||||
* Copy the tags from the source page to the destination one
|
||||
* x0 - address of the destination page
|
||||
|
@ -919,3 +919,29 @@ void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
|
||||
debug_exception_exit(regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_debug_exception);
|
||||
|
||||
/*
|
||||
* Used during anonymous page fault handling.
|
||||
*/
|
||||
struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
|
||||
unsigned long vaddr)
|
||||
{
|
||||
gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO;
|
||||
|
||||
/*
|
||||
* If the page is mapped with PROT_MTE, initialise the tags at the
|
||||
* point of allocation and page zeroing as this is usually faster than
|
||||
* separate DC ZVA and STGM.
|
||||
*/
|
||||
if (vma->vm_flags & VM_MTE)
|
||||
flags |= __GFP_ZEROTAGS;
|
||||
|
||||
return alloc_page_vma(flags, vma, vaddr);
|
||||
}
|
||||
|
||||
void tag_clear_highpage(struct page *page)
|
||||
{
|
||||
mte_zero_clear_page_tags(page_address(page));
|
||||
page_kasan_tag_reset(page);
|
||||
set_bit(PG_mte_tagged, &page->flags);
|
||||
}
|
||||
|
@ -46,9 +46,13 @@
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
|
||||
#define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
|
||||
#else
|
||||
#define TCR_KASAN_HW_FLAGS 0
|
||||
/*
|
||||
* The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
|
||||
* TBI being enabled at EL1.
|
||||
*/
|
||||
#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -454,7 +458,7 @@ SYM_FUNC_START(__cpu_setup)
|
||||
msr_s SYS_TFSRE0_EL1, xzr
|
||||
|
||||
/* set the TCR_EL1 bits */
|
||||
mov_q x10, TCR_KASAN_HW_FLAGS
|
||||
mov_q x10, TCR_MTE_FLAGS
|
||||
orr tcr, tcr, x10
|
||||
1:
|
||||
#endif
|
||||
|
@ -82,16 +82,16 @@ do { \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
|
||||
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
|
||||
({ \
|
||||
struct page *page = alloc_page_vma( \
|
||||
GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr); \
|
||||
GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr); \
|
||||
if (page) \
|
||||
flush_dcache_page(page); \
|
||||
page; \
|
||||
})
|
||||
|
||||
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
|
||||
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
|
||||
|
||||
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
|
||||
|
||||
|
@ -13,9 +13,9 @@ extern unsigned long memory_end;
|
||||
#define clear_user_page(page, vaddr, pg) clear_page(page)
|
||||
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
|
||||
|
||||
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
|
||||
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
|
||||
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
|
||||
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
|
||||
alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
|
||||
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
|
||||
|
||||
#define __pa(vaddr) ((unsigned long)(vaddr))
|
||||
#define __va(paddr) ((void *)((unsigned long)(paddr)))
|
||||
|
@ -68,9 +68,9 @@ static inline void copy_page(void *to, void *from)
|
||||
#define clear_user_page(page, vaddr, pg) clear_page(page)
|
||||
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
|
||||
|
||||
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
|
||||
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
|
||||
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
|
||||
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
|
||||
alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
|
||||
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
|
||||
|
||||
/*
|
||||
* These are used to make use of C type-checking..
|
||||
|
@ -34,9 +34,9 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
|
||||
copy_page(to, from);
|
||||
}
|
||||
|
||||
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
|
||||
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
|
||||
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
|
||||
#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
|
||||
alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
|
||||
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
|
||||
|
||||
#ifndef __pa
|
||||
#define __pa(x) __phys_addr((unsigned long)(x))
|
||||
|
@ -53,8 +53,10 @@ struct vm_area_struct;
|
||||
#define ___GFP_HARDWALL 0x100000u
|
||||
#define ___GFP_THISNODE 0x200000u
|
||||
#define ___GFP_ACCOUNT 0x400000u
|
||||
#define ___GFP_ZEROTAGS 0x800000u
|
||||
#define ___GFP_SKIP_KASAN_POISON 0x1000000u
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#define ___GFP_NOLOCKDEP 0x800000u
|
||||
#define ___GFP_NOLOCKDEP 0x2000000u
|
||||
#else
|
||||
#define ___GFP_NOLOCKDEP 0
|
||||
#endif
|
||||
@ -229,16 +231,25 @@ struct vm_area_struct;
|
||||
* %__GFP_COMP address compound page metadata.
|
||||
*
|
||||
* %__GFP_ZERO returns a zeroed page on success.
|
||||
*
|
||||
* %__GFP_ZEROTAGS returns a page with zeroed memory tags on success, if
|
||||
* __GFP_ZERO is set.
|
||||
*
|
||||
* %__GFP_SKIP_KASAN_POISON returns a page which does not need to be poisoned
|
||||
* on deallocation. Typically used for userspace pages. Currently only has an
|
||||
* effect in HW tags mode.
|
||||
*/
|
||||
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
|
||||
#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
|
||||
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
|
||||
#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS)
|
||||
#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON)
|
||||
|
||||
/* Disable lockdep for GFP context tracking */
|
||||
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
|
||||
|
||||
/* Room for N __GFP_FOO bits */
|
||||
#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
|
||||
#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP))
|
||||
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
||||
|
||||
/**
|
||||
@ -319,7 +330,8 @@ struct vm_area_struct;
|
||||
#define GFP_DMA __GFP_DMA
|
||||
#define GFP_DMA32 __GFP_DMA32
|
||||
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
|
||||
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
|
||||
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \
|
||||
__GFP_SKIP_KASAN_POISON)
|
||||
#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
|
||||
__GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
|
||||
#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
|
||||
|
@ -152,28 +152,24 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
|
||||
#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
|
||||
/**
|
||||
* __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
|
||||
* @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
|
||||
* alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
|
||||
* @vma: The VMA the page is to be allocated for
|
||||
* @vaddr: The virtual address the page will be inserted into
|
||||
*
|
||||
* This function will allocate a page for a VMA but the caller is expected
|
||||
* to specify via movableflags whether the page will be movable in the
|
||||
* future or not
|
||||
* This function will allocate a page for a VMA that the caller knows will
|
||||
* be able to migrate in the future using move_pages() or reclaimed
|
||||
*
|
||||
* An architecture may override this function by defining
|
||||
* __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
|
||||
* __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
|
||||
* implementation.
|
||||
*/
|
||||
static inline struct page *
|
||||
__alloc_zeroed_user_highpage(gfp_t movableflags,
|
||||
struct vm_area_struct *vma,
|
||||
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
|
||||
unsigned long vaddr)
|
||||
{
|
||||
struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
|
||||
vma, vaddr);
|
||||
struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
|
||||
|
||||
if (page)
|
||||
clear_user_highpage(page, vaddr);
|
||||
@ -182,21 +178,6 @@ __alloc_zeroed_user_highpage(gfp_t movableflags,
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
|
||||
* @vma: The VMA the page is to be allocated for
|
||||
* @vaddr: The virtual address the page will be inserted into
|
||||
*
|
||||
* This function will allocate a page for a VMA that the caller knows will
|
||||
* be able to migrate in the future using move_pages() or reclaimed
|
||||
*/
|
||||
static inline struct page *
|
||||
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
|
||||
unsigned long vaddr)
|
||||
{
|
||||
return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
|
||||
}
|
||||
|
||||
static inline void clear_highpage(struct page *page)
|
||||
{
|
||||
void *kaddr = kmap_atomic(page);
|
||||
@ -204,6 +185,14 @@ static inline void clear_highpage(struct page *page)
|
||||
kunmap_atomic(kaddr);
|
||||
}
|
||||
|
||||
#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
|
||||
|
||||
static inline void tag_clear_highpage(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If we pass in a base or tail page, we can zero up to PAGE_SIZE.
|
||||
* If we pass in a head page, we can zero up to the size of the compound page.
|
||||
|
@ -2,6 +2,7 @@
|
||||
#ifndef _LINUX_KASAN_H
|
||||
#define _LINUX_KASAN_H
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/static_key.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
@ -79,14 +80,6 @@ static inline void kasan_disable_current(void) {}
|
||||
|
||||
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
|
||||
struct kasan_cache {
|
||||
int alloc_meta_offset;
|
||||
int free_meta_offset;
|
||||
bool is_kmalloc;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
|
||||
@ -101,11 +94,14 @@ static inline bool kasan_has_integrated_init(void)
|
||||
return kasan_enabled();
|
||||
}
|
||||
|
||||
void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
|
||||
void kasan_free_pages(struct page *page, unsigned int order);
|
||||
|
||||
#else /* CONFIG_KASAN_HW_TAGS */
|
||||
|
||||
static inline bool kasan_enabled(void)
|
||||
{
|
||||
return true;
|
||||
return IS_ENABLED(CONFIG_KASAN);
|
||||
}
|
||||
|
||||
static inline bool kasan_has_integrated_init(void)
|
||||
@ -113,8 +109,30 @@ static inline bool kasan_has_integrated_init(void)
|
||||
return false;
|
||||
}
|
||||
|
||||
static __always_inline void kasan_alloc_pages(struct page *page,
|
||||
unsigned int order, gfp_t flags)
|
||||
{
|
||||
/* Only available for integrated init. */
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
||||
static __always_inline void kasan_free_pages(struct page *page,
|
||||
unsigned int order)
|
||||
{
|
||||
/* Only available for integrated init. */
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
||||
#endif /* CONFIG_KASAN_HW_TAGS */
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
|
||||
struct kasan_cache {
|
||||
int alloc_meta_offset;
|
||||
int free_meta_offset;
|
||||
bool is_kmalloc;
|
||||
};
|
||||
|
||||
slab_flags_t __kasan_never_merge(void);
|
||||
static __always_inline slab_flags_t kasan_never_merge(void)
|
||||
{
|
||||
@ -130,20 +148,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
|
||||
__kasan_unpoison_range(addr, size);
|
||||
}
|
||||
|
||||
void __kasan_alloc_pages(struct page *page, unsigned int order, bool init);
|
||||
static __always_inline void kasan_alloc_pages(struct page *page,
|
||||
void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
|
||||
static __always_inline void kasan_poison_pages(struct page *page,
|
||||
unsigned int order, bool init)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_alloc_pages(page, order, init);
|
||||
__kasan_poison_pages(page, order, init);
|
||||
}
|
||||
|
||||
void __kasan_free_pages(struct page *page, unsigned int order, bool init);
|
||||
static __always_inline void kasan_free_pages(struct page *page,
|
||||
void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
|
||||
static __always_inline void kasan_unpoison_pages(struct page *page,
|
||||
unsigned int order, bool init)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_free_pages(page, order, init);
|
||||
__kasan_unpoison_pages(page, order, init);
|
||||
}
|
||||
|
||||
void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
|
||||
@ -285,21 +303,15 @@ void kasan_restore_multi_shot(bool enabled);
|
||||
|
||||
#else /* CONFIG_KASAN */
|
||||
|
||||
static inline bool kasan_enabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool kasan_has_integrated_init(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline slab_flags_t kasan_never_merge(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void kasan_unpoison_range(const void *address, size_t size) {}
|
||||
static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {}
|
||||
static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {}
|
||||
static inline void kasan_poison_pages(struct page *page, unsigned int order,
|
||||
bool init) {}
|
||||
static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
|
||||
bool init) {}
|
||||
static inline void kasan_cache_create(struct kmem_cache *cache,
|
||||
unsigned int *size,
|
||||
slab_flags_t *flags) {}
|
||||
|
@ -137,6 +137,9 @@ enum pageflags {
|
||||
#endif
|
||||
#ifdef CONFIG_64BIT
|
||||
PG_arch_2,
|
||||
#endif
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
PG_skip_kasan_poison,
|
||||
#endif
|
||||
__NR_PAGEFLAGS,
|
||||
|
||||
@ -443,6 +446,12 @@ TESTCLEARFLAG(Young, young, PF_ANY)
|
||||
PAGEFLAG(Idle, idle, PF_ANY)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD)
|
||||
#else
|
||||
PAGEFLAG_FALSE(SkipKASanPoison)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* PageReported() is used to track reported free pages within the Buddy
|
||||
* allocator. We can use the non-atomic version of the test and set
|
||||
|
@ -85,6 +85,12 @@
|
||||
#define IF_HAVE_PG_ARCH_2(flag,string)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string) ,{1UL << flag, string}
|
||||
#else
|
||||
#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string)
|
||||
#endif
|
||||
|
||||
#define __def_pageflag_names \
|
||||
{1UL << PG_locked, "locked" }, \
|
||||
{1UL << PG_waiters, "waiters" }, \
|
||||
@ -112,7 +118,8 @@ IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \
|
||||
IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \
|
||||
IF_HAVE_PG_IDLE(PG_young, "young" ) \
|
||||
IF_HAVE_PG_IDLE(PG_idle, "idle" ) \
|
||||
IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" )
|
||||
IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) \
|
||||
IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison")
|
||||
|
||||
#define show_page_flags(flags) \
|
||||
(flags) ? __print_flags(flags, "|", \
|
||||
|
@ -97,7 +97,7 @@ slab_flags_t __kasan_never_merge(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
|
||||
void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
|
||||
{
|
||||
u8 tag;
|
||||
unsigned long i;
|
||||
@ -111,7 +111,7 @@ void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
|
||||
kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
|
||||
}
|
||||
|
||||
void __kasan_free_pages(struct page *page, unsigned int order, bool init)
|
||||
void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
|
||||
{
|
||||
if (likely(!PageHighMem(page)))
|
||||
kasan_poison(page_address(page), PAGE_SIZE << order,
|
||||
|
@ -238,6 +238,38 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
|
||||
return &alloc_meta->free_track[0];
|
||||
}
|
||||
|
||||
void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
|
||||
{
|
||||
/*
|
||||
* This condition should match the one in post_alloc_hook() in
|
||||
* page_alloc.c.
|
||||
*/
|
||||
bool init = !want_init_on_free() && want_init_on_alloc(flags);
|
||||
|
||||
if (flags & __GFP_SKIP_KASAN_POISON)
|
||||
SetPageSkipKASanPoison(page);
|
||||
|
||||
if (flags & __GFP_ZEROTAGS) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i != 1 << order; ++i)
|
||||
tag_clear_highpage(page + i);
|
||||
} else {
|
||||
kasan_unpoison_pages(page, order, init);
|
||||
}
|
||||
}
|
||||
|
||||
void kasan_free_pages(struct page *page, unsigned int order)
|
||||
{
|
||||
/*
|
||||
* This condition should match the one in free_pages_prepare() in
|
||||
* page_alloc.c.
|
||||
*/
|
||||
bool init = want_init_on_free();
|
||||
|
||||
kasan_poison_pages(page, order, init);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
|
||||
|
||||
void kasan_set_tagging_report_once(bool state)
|
||||
|
@ -106,7 +106,8 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
|
||||
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
|
||||
kasan_slab_free_mempool(element);
|
||||
else if (pool->alloc == mempool_alloc_pages)
|
||||
kasan_free_pages(element, (unsigned long)pool->pool_data, false);
|
||||
kasan_poison_pages(element, (unsigned long)pool->pool_data,
|
||||
false);
|
||||
}
|
||||
|
||||
static void kasan_unpoison_element(mempool_t *pool, void *element)
|
||||
@ -114,7 +115,8 @@ static void kasan_unpoison_element(mempool_t *pool, void *element)
|
||||
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
|
||||
kasan_unpoison_range(element, __ksize(element));
|
||||
else if (pool->alloc == mempool_alloc_pages)
|
||||
kasan_alloc_pages(element, (unsigned long)pool->pool_data, false);
|
||||
kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
|
||||
false);
|
||||
}
|
||||
|
||||
static __always_inline void add_element(mempool_t *pool, void *element)
|
||||
|
@ -382,7 +382,7 @@ int page_group_by_mobility_disabled __read_mostly;
|
||||
static DEFINE_STATIC_KEY_TRUE(deferred_pages);
|
||||
|
||||
/*
|
||||
* Calling kasan_free_pages() only after deferred memory initialization
|
||||
* Calling kasan_poison_pages() only after deferred memory initialization
|
||||
* has completed. Poisoning pages during deferred memory init will greatly
|
||||
* lengthen the process and cause problem in large memory systems as the
|
||||
* deferred pages initialization is done with interrupt disabled.
|
||||
@ -394,15 +394,12 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages);
|
||||
* on-demand allocation and then freed again before the deferred pages
|
||||
* initialization is done, but this is not likely to happen.
|
||||
*/
|
||||
static inline void kasan_free_nondeferred_pages(struct page *page, int order,
|
||||
bool init, fpi_t fpi_flags)
|
||||
static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
|
||||
{
|
||||
if (static_branch_unlikely(&deferred_pages))
|
||||
return;
|
||||
if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
|
||||
(fpi_flags & FPI_SKIP_KASAN_POISON))
|
||||
return;
|
||||
kasan_free_pages(page, order, init);
|
||||
return static_branch_unlikely(&deferred_pages) ||
|
||||
(!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
|
||||
(fpi_flags & FPI_SKIP_KASAN_POISON)) ||
|
||||
PageSkipKASanPoison(page);
|
||||
}
|
||||
|
||||
/* Returns true if the struct page for the pfn is uninitialised */
|
||||
@ -453,13 +450,11 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
static inline void kasan_free_nondeferred_pages(struct page *page, int order,
|
||||
bool init, fpi_t fpi_flags)
|
||||
static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
|
||||
(fpi_flags & FPI_SKIP_KASAN_POISON))
|
||||
return;
|
||||
kasan_free_pages(page, order, init);
|
||||
return (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
|
||||
(fpi_flags & FPI_SKIP_KASAN_POISON)) ||
|
||||
PageSkipKASanPoison(page);
|
||||
}
|
||||
|
||||
static inline bool early_page_uninitialised(unsigned long pfn)
|
||||
@ -1226,10 +1221,16 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void kernel_init_free_pages(struct page *page, int numpages)
|
||||
static void kernel_init_free_pages(struct page *page, int numpages, bool zero_tags)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (zero_tags) {
|
||||
for (i = 0; i < numpages; i++)
|
||||
tag_clear_highpage(page + i);
|
||||
return;
|
||||
}
|
||||
|
||||
/* s390's use of memset() could override KASAN redzones. */
|
||||
kasan_disable_current();
|
||||
for (i = 0; i < numpages; i++) {
|
||||
@ -1245,7 +1246,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
|
||||
unsigned int order, bool check_free, fpi_t fpi_flags)
|
||||
{
|
||||
int bad = 0;
|
||||
bool init;
|
||||
bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
|
||||
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
|
||||
@ -1314,10 +1315,17 @@ static __always_inline bool free_pages_prepare(struct page *page,
|
||||
* With hardware tag-based KASAN, memory tags must be set before the
|
||||
* page becomes unavailable via debug_pagealloc or arch_free_page.
|
||||
*/
|
||||
init = want_init_on_free();
|
||||
if (init && !kasan_has_integrated_init())
|
||||
kernel_init_free_pages(page, 1 << order);
|
||||
kasan_free_nondeferred_pages(page, order, init, fpi_flags);
|
||||
if (kasan_has_integrated_init()) {
|
||||
if (!skip_kasan_poison)
|
||||
kasan_free_pages(page, order);
|
||||
} else {
|
||||
bool init = want_init_on_free();
|
||||
|
||||
if (init)
|
||||
kernel_init_free_pages(page, 1 << order, false);
|
||||
if (!skip_kasan_poison)
|
||||
kasan_poison_pages(page, order, init);
|
||||
}
|
||||
|
||||
/*
|
||||
* arch_free_page() can make the page's contents inaccessible. s390
|
||||
@ -2324,8 +2332,6 @@ static bool check_new_pages(struct page *page, unsigned int order)
|
||||
inline void post_alloc_hook(struct page *page, unsigned int order,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
bool init;
|
||||
|
||||
set_page_private(page, 0);
|
||||
set_page_refcounted(page);
|
||||
|
||||
@ -2344,10 +2350,16 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
|
||||
* kasan_alloc_pages and kernel_init_free_pages must be
|
||||
* kept together to avoid discrepancies in behavior.
|
||||
*/
|
||||
init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
|
||||
kasan_alloc_pages(page, order, init);
|
||||
if (init && !kasan_has_integrated_init())
|
||||
kernel_init_free_pages(page, 1 << order);
|
||||
if (kasan_has_integrated_init()) {
|
||||
kasan_alloc_pages(page, order, gfp_flags);
|
||||
} else {
|
||||
bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
|
||||
|
||||
kasan_unpoison_pages(page, order, init);
|
||||
if (init)
|
||||
kernel_init_free_pages(page, 1 << order,
|
||||
gfp_flags & __GFP_ZEROTAGS);
|
||||
}
|
||||
|
||||
set_page_owner(page, order, gfp_flags);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user