kasan, x86, arm64, s390: rename functions for modules shadow
Rename kasan_free_shadow to kasan_free_module_shadow and kasan_module_alloc to kasan_alloc_module_shadow. These functions are used to allocate/free shadow memory for kernel modules when KASAN_VMALLOC is not enabled. The new names better reflect their purpose. Also reword the comment next to their declaration to improve clarity. Link: https://lkml.kernel.org/r/36db32bde765d5d0b856f77d2d806e838513fe84.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marco Elver <elver@google.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Collingbourne <pcc@google.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
00a756133b
commit
63840de296
@@ -58,7 +58,7 @@ void *module_alloc(unsigned long size)
|
|||||||
PAGE_KERNEL, 0, NUMA_NO_NODE,
|
PAGE_KERNEL, 0, NUMA_NO_NODE,
|
||||||
__builtin_return_address(0));
|
__builtin_return_address(0));
|
||||||
|
|
||||||
if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) {
|
if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
|
||||||
vfree(p);
|
vfree(p);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ void *module_alloc(unsigned long size)
|
|||||||
p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
|
p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
|
||||||
gfp_mask, PAGE_KERNEL_EXEC, VM_DEFER_KMEMLEAK, NUMA_NO_NODE,
|
gfp_mask, PAGE_KERNEL_EXEC, VM_DEFER_KMEMLEAK, NUMA_NO_NODE,
|
||||||
__builtin_return_address(0));
|
__builtin_return_address(0));
|
||||||
if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) {
|
if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
|
||||||
vfree(p);
|
vfree(p);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ void *module_alloc(unsigned long size)
|
|||||||
MODULES_END, gfp_mask,
|
MODULES_END, gfp_mask,
|
||||||
PAGE_KERNEL, VM_DEFER_KMEMLEAK, NUMA_NO_NODE,
|
PAGE_KERNEL, VM_DEFER_KMEMLEAK, NUMA_NO_NODE,
|
||||||
__builtin_return_address(0));
|
__builtin_return_address(0));
|
||||||
if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) {
|
if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
|
||||||
vfree(p);
|
vfree(p);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -433,17 +433,17 @@ static inline void kasan_populate_early_vm_area_shadow(void *start,
|
|||||||
!defined(CONFIG_KASAN_VMALLOC)
|
!defined(CONFIG_KASAN_VMALLOC)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* These functions provide a special case to support backing module
|
* These functions allocate and free shadow memory for kernel modules.
|
||||||
* allocations with real shadow memory. With KASAN vmalloc, the special
|
* They are only required when KASAN_VMALLOC is not supported, as otherwise
|
||||||
* case is unnecessary, as the work is handled in the generic case.
|
* shadow memory is allocated by the generic vmalloc handlers.
|
||||||
*/
|
*/
|
||||||
int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask);
|
int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
|
||||||
void kasan_free_shadow(const struct vm_struct *vm);
|
void kasan_free_module_shadow(const struct vm_struct *vm);
|
||||||
|
|
||||||
#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
|
#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
|
||||||
|
|
||||||
static inline int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
|
static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
|
||||||
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
|
static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
|
||||||
|
|
||||||
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
|
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
|
||||||
|
|
||||||
|
|||||||
@@ -498,7 +498,7 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
|
|||||||
|
|
||||||
#else /* CONFIG_KASAN_VMALLOC */
|
#else /* CONFIG_KASAN_VMALLOC */
|
||||||
|
|
||||||
int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask)
|
int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
size_t scaled_size;
|
size_t scaled_size;
|
||||||
@@ -534,7 +534,7 @@ int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kasan_free_shadow(const struct vm_struct *vm)
|
void kasan_free_module_shadow(const struct vm_struct *vm)
|
||||||
{
|
{
|
||||||
if (vm->flags & VM_KASAN)
|
if (vm->flags & VM_KASAN)
|
||||||
vfree(kasan_mem_to_shadow(vm->addr));
|
vfree(kasan_mem_to_shadow(vm->addr));
|
||||||
|
|||||||
@@ -2547,7 +2547,7 @@ struct vm_struct *remove_vm_area(const void *addr)
|
|||||||
va->vm = NULL;
|
va->vm = NULL;
|
||||||
spin_unlock(&vmap_area_lock);
|
spin_unlock(&vmap_area_lock);
|
||||||
|
|
||||||
kasan_free_shadow(vm);
|
kasan_free_module_shadow(vm);
|
||||||
free_unmap_vmap_area(va);
|
free_unmap_vmap_area(va);
|
||||||
|
|
||||||
return vm;
|
return vm;
|
||||||
|
|||||||
Reference in New Issue
Block a user