mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
2d1494fb31
Making virt_to_pfn() a static inline taking a strongly typed (const void *) makes the contract of a passing a pointer of that type to the function explicit and exposes any misuse of the macro virt_to_pfn() acting polymorphic and accepting many types such as (void *), (unitptr_t) or (unsigned long) as arguments without warnings. For symmetry do the same with pfn_to_virt() reflecting the current layout in asm-generic/page.h. Doing this reveals a number of offenders in the arch code and the S390-specific drivers, so just bite the bullet and fix up all of those as well. Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com> Link: https://lore.kernel.org/r/20230812-virt-to-phys-s390-v2-1-6c40f31fe36f@linaro.org Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
43 lines
1.1 KiB
C
43 lines
1.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _ASM_S390_KFENCE_H
|
|
#define _ASM_S390_KFENCE_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/kfence.h>
|
|
#include <asm/set_memory.h>
|
|
#include <asm/page.h>
|
|
|
|
void __kernel_map_pages(struct page *page, int numpages, int enable);
|
|
|
|
static __always_inline bool arch_kfence_init_pool(void)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
#define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)
|
|
|
|
/*
|
|
* Do not split kfence pool to 4k mapping with arch_kfence_init_pool(),
|
|
* but earlier where page table allocations still happen with memblock.
|
|
* Reason is that arch_kfence_init_pool() gets called when the system
|
|
* is still in a limbo state - disabling and enabling bottom halves is
|
|
* not yet allowed, but that is what our page_table_alloc() would do.
|
|
*/
|
|
static __always_inline void kfence_split_mapping(void)
|
|
{
|
|
#ifdef CONFIG_KFENCE
|
|
unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT;
|
|
|
|
set_memory_4k((unsigned long)__kfence_pool, pool_pages);
|
|
#endif
|
|
}
|
|
|
|
static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
|
{
|
|
__kernel_map_pages(virt_to_page((void *)addr), 1, !protect);
|
|
return true;
|
|
}
|
|
|
|
#endif /* _ASM_S390_KFENCE_H */
|