mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 05:02:12 +00:00
mm: add comments for allocation helpers explaining why they are macros
A number of allocation helper functions were converted into macros to
account them at the call sites. Add a comment for each converted
allocation helper explaining why it has to be a macro and why we typecast
the return value wherever required. The patch also moves
acpi_os_acquire_object() closer to other allocation helpers to group them
together under the same comment. The patch has no functional changes.
Link: https://lkml.kernel.org/r/20240703174225.3891393-1-surenb@google.com
Fixes: 2c321f3f70
("mm: change inlined allocation helpers to account at the call site")
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Suggested-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Christian König <christian.koenig@amd.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jan Kara <jack@suse.cz>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Thorsten Blum <thorsten.blum@toblux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
cd1e0dac3a
commit
3b0ba54d5f
@ -46,6 +46,10 @@ static inline void nfs_add_stats(const struct inode *inode,
|
||||
nfs_add_server_stats(NFS_SERVER(inode), stat, addend);
|
||||
}
|
||||
|
||||
/*
|
||||
* This specialized allocator has to be a macro for its allocations to be
|
||||
* accounted separately (to have a separate alloc_tag).
|
||||
*/
|
||||
#define nfs_alloc_iostats() alloc_percpu(struct nfs_iostats)
|
||||
|
||||
static inline void nfs_free_iostats(struct nfs_iostats __percpu *stats)
|
||||
|
@ -46,6 +46,9 @@ acpi_status acpi_os_terminate(void);
|
||||
* Interrupts are off during resume, just like they are for boot.
|
||||
* However, boot has (system_state != SYSTEM_RUNNING)
|
||||
* to quiet __might_sleep() in kmalloc() and resume does not.
|
||||
*
|
||||
* These specialized allocators have to be macros for their allocations to be
|
||||
* accounted separately (to have separate alloc_tag).
|
||||
*/
|
||||
#define acpi_os_allocate(_size) \
|
||||
kmalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
|
||||
@ -53,14 +56,14 @@ acpi_status acpi_os_terminate(void);
|
||||
#define acpi_os_allocate_zeroed(_size) \
|
||||
kzalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
|
||||
|
||||
#define acpi_os_acquire_object(_cache) \
|
||||
kmem_cache_zalloc(_cache, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
|
||||
|
||||
static inline void acpi_os_free(void *memory)
|
||||
{
|
||||
kfree(memory);
|
||||
}
|
||||
|
||||
#define acpi_os_acquire_object(_cache) \
|
||||
kmem_cache_zalloc(_cache, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
|
||||
|
||||
static inline acpi_thread_id acpi_os_get_thread_id(void)
|
||||
{
|
||||
return (acpi_thread_id) (unsigned long)current;
|
||||
|
@ -2261,6 +2261,10 @@ void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
|
||||
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
|
||||
size_t align, gfp_t flags);
|
||||
#else
|
||||
/*
|
||||
* These specialized allocators have to be macros for their allocations to be
|
||||
* accounted separately (to have separate alloc_tag).
|
||||
*/
|
||||
#define bpf_map_kmalloc_node(_map, _size, _flags, _node) \
|
||||
kmalloc_node(_size, _flags, _node)
|
||||
#define bpf_map_kzalloc(_map, _size, _flags) \
|
||||
|
@ -85,6 +85,10 @@ dma_fence_chain_contained(struct dma_fence *fence)
|
||||
* dma_fence_chain_alloc
|
||||
*
|
||||
* Returns a new struct dma_fence_chain object or NULL on failure.
|
||||
*
|
||||
* This specialized allocator has to be a macro for its allocations to be
|
||||
* accounted separately (to have a separate alloc_tag). The typecast is
|
||||
* intentional to enforce typesafety.
|
||||
*/
|
||||
#define dma_fence_chain_alloc() \
|
||||
((struct dma_fence_chain *)kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL))
|
||||
|
@ -152,6 +152,11 @@ static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; }
|
||||
static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {}
|
||||
static inline void hid_bpf_destroy_device(struct hid_device *hid) {}
|
||||
static inline void hid_bpf_device_init(struct hid_device *hid) {}
|
||||
/*
|
||||
* This specialized allocator has to be a macro for its allocations to be
|
||||
* accounted separately (to have a separate alloc_tag). The typecast is
|
||||
* intentional to enforce typesafety.
|
||||
*/
|
||||
#define call_hid_bpf_rdesc_fixup(_hdev, _rdesc, _size) \
|
||||
((u8 *)kmemdup(_rdesc, *(_size), GFP_KERNEL))
|
||||
|
||||
|
@ -1588,6 +1588,11 @@ void jbd2_journal_put_journal_head(struct journal_head *jh);
|
||||
*/
|
||||
extern struct kmem_cache *jbd2_handle_cache;
|
||||
|
||||
/*
|
||||
* This specialized allocator has to be a macro for its allocations to be
|
||||
* accounted separately (to have a separate alloc_tag). The typecast is
|
||||
* intentional to enforce typesafety.
|
||||
*/
|
||||
#define jbd2_alloc_handle(_gfp_flags) \
|
||||
((handle_t *)kmem_cache_zalloc(jbd2_handle_cache, _gfp_flags))
|
||||
|
||||
@ -1602,6 +1607,11 @@ static inline void jbd2_free_handle(handle_t *handle)
|
||||
*/
|
||||
extern struct kmem_cache *jbd2_inode_cache;
|
||||
|
||||
/*
|
||||
* This specialized allocator has to be a macro for its allocations to be
|
||||
* accounted separately (to have a separate alloc_tag). The typecast is
|
||||
* intentional to enforce typesafety.
|
||||
*/
|
||||
#define jbd2_alloc_inode(_gfp_flags) \
|
||||
((struct jbd2_inode *)kmem_cache_alloc(jbd2_inode_cache, _gfp_flags))
|
||||
|
||||
|
@ -3400,6 +3400,10 @@ static inline struct page *__dev_alloc_pages_noprof(gfp_t gfp_mask,
|
||||
}
|
||||
#define __dev_alloc_pages(...) alloc_hooks(__dev_alloc_pages_noprof(__VA_ARGS__))
|
||||
|
||||
/*
|
||||
* This specialized allocator has to be a macro for its allocations to be
|
||||
* accounted separately (to have a separate alloc_tag).
|
||||
*/
|
||||
#define dev_alloc_pages(_order) __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, _order)
|
||||
|
||||
/**
|
||||
@ -3416,6 +3420,10 @@ static inline struct page *__dev_alloc_page_noprof(gfp_t gfp_mask)
|
||||
}
|
||||
#define __dev_alloc_page(...) alloc_hooks(__dev_alloc_page_noprof(__VA_ARGS__))
|
||||
|
||||
/*
|
||||
* This specialized allocator has to be a macro for its allocations to be
|
||||
* accounted separately (to have a separate alloc_tag).
|
||||
*/
|
||||
#define dev_alloc_page() dev_alloc_pages(0)
|
||||
|
||||
/**
|
||||
|
@ -414,6 +414,11 @@ void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
|
||||
int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
|
||||
struct sk_msg *msg);
|
||||
|
||||
/*
|
||||
* This specialized allocator has to be a macro for its allocations to be
|
||||
* accounted separately (to have a separate alloc_tag). The typecast is
|
||||
* intentional to enforce typesafety.
|
||||
*/
|
||||
#define sk_psock_init_link() \
|
||||
((struct sk_psock_link *)kzalloc(sizeof(struct sk_psock_link), \
|
||||
GFP_ATOMIC | __GFP_NOWARN))
|
||||
|
Loading…
Reference in New Issue
Block a user