mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
rcu: Refactor kvfree_call_rcu() and high-level helpers
Currently a kvfree_call_rcu() takes an offset within a structure as a second parameter, so a helper such as a kvfree_rcu_arg_2() has to convert rcu_head and a freed ptr to an offset in order to pass it. That leads to an extra conversion on macro entry. Instead of converting, refactor the code in way that a pointer that has to be freed is passed directly to the kvfree_call_rcu(). This patch does not make any functional change and is transparent to all kvfree_rcu() users. Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
1b929c02af
commit
04a522b7da
@ -1011,8 +1011,7 @@ do { \
|
||||
\
|
||||
if (___p) { \
|
||||
BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \
|
||||
kvfree_call_rcu(&((___p)->rhf), (rcu_callback_t)(unsigned long) \
|
||||
(offsetof(typeof(*(ptr)), rhf))); \
|
||||
kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
@ -1021,7 +1020,7 @@ do { \
|
||||
typeof(ptr) ___p = (ptr); \
|
||||
\
|
||||
if (___p) \
|
||||
kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \
|
||||
kvfree_call_rcu(NULL, (void *) (___p)); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
|
@ -98,25 +98,25 @@ static inline void synchronize_rcu_expedited(void)
|
||||
*/
|
||||
extern void kvfree(const void *addr);
|
||||
|
||||
static inline void __kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
|
||||
static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
|
||||
{
|
||||
if (head) {
|
||||
call_rcu(head, func);
|
||||
call_rcu(head, (rcu_callback_t) ((void *) head - ptr));
|
||||
return;
|
||||
}
|
||||
|
||||
// kvfree_rcu(one_arg) call.
|
||||
might_sleep();
|
||||
synchronize_rcu();
|
||||
kvfree((void *) func);
|
||||
kvfree(ptr);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KASAN_GENERIC
|
||||
void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
|
||||
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
|
||||
#else
|
||||
static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
|
||||
static inline void kvfree_call_rcu(struct rcu_head *head, void *ptr)
|
||||
{
|
||||
__kvfree_call_rcu(head, func);
|
||||
__kvfree_call_rcu(head, ptr);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -33,7 +33,7 @@ static inline void rcu_virt_note_context_switch(void)
|
||||
}
|
||||
|
||||
void synchronize_rcu_expedited(void);
|
||||
void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
|
||||
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
|
||||
|
||||
void rcu_barrier(void);
|
||||
bool rcu_eqs_special_set(int cpu);
|
||||
|
@ -246,15 +246,12 @@ bool poll_state_synchronize_rcu(unsigned long oldstate)
|
||||
EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
|
||||
|
||||
#ifdef CONFIG_KASAN_GENERIC
|
||||
void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
|
||||
void kvfree_call_rcu(struct rcu_head *head, void *ptr)
|
||||
{
|
||||
if (head) {
|
||||
void *ptr = (void *) head - (unsigned long) func;
|
||||
|
||||
if (head)
|
||||
kasan_record_aux_stack_noalloc(ptr);
|
||||
}
|
||||
|
||||
__kvfree_call_rcu(head, func);
|
||||
__kvfree_call_rcu(head, ptr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvfree_call_rcu);
|
||||
#endif
|
||||
|
@ -3103,8 +3103,8 @@ static void kfree_rcu_work(struct work_struct *work)
|
||||
* This list is named "Channel 3".
|
||||
*/
|
||||
for (; head; head = next) {
|
||||
unsigned long offset = (unsigned long)head->func;
|
||||
void *ptr = (void *)head - offset;
|
||||
void *ptr = (void *) head->func;
|
||||
unsigned long offset = (void *) head - ptr;
|
||||
|
||||
next = head->next;
|
||||
debug_rcu_head_unqueue((struct rcu_head *)ptr);
|
||||
@ -3342,26 +3342,21 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
|
||||
* be free'd in workqueue context. This allows us to: batch requests together to
|
||||
* reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
|
||||
*/
|
||||
void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
|
||||
void kvfree_call_rcu(struct rcu_head *head, void *ptr)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct kfree_rcu_cpu *krcp;
|
||||
bool success;
|
||||
void *ptr;
|
||||
|
||||
if (head) {
|
||||
ptr = (void *) head - (unsigned long) func;
|
||||
} else {
|
||||
/*
|
||||
* Please note there is a limitation for the head-less
|
||||
* variant, that is why there is a clear rule for such
|
||||
* objects: it can be used from might_sleep() context
|
||||
* only. For other places please embed an rcu_head to
|
||||
* your data.
|
||||
*/
|
||||
/*
|
||||
* Please note there is a limitation for the head-less
|
||||
* variant, that is why there is a clear rule for such
|
||||
* objects: it can be used from might_sleep() context
|
||||
* only. For other places please embed an rcu_head to
|
||||
* your data.
|
||||
*/
|
||||
if (!head)
|
||||
might_sleep();
|
||||
ptr = (unsigned long *) func;
|
||||
}
|
||||
|
||||
// Queue the object but don't yet schedule the batch.
|
||||
if (debug_rcu_head_queue(ptr)) {
|
||||
@ -3382,7 +3377,7 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
|
||||
// Inline if kvfree_rcu(one_arg) call.
|
||||
goto unlock_return;
|
||||
|
||||
head->func = func;
|
||||
head->func = ptr;
|
||||
head->next = krcp->head;
|
||||
krcp->head = head;
|
||||
success = true;
|
||||
|
Loading…
Reference in New Issue
Block a user