linux/lib/refcount.c
Will Deacon 77e9971c79 locking/refcount: Move the bulk of the REFCOUNT_FULL implementation into the <linux/refcount.h> header
In an effort to improve performance of the REFCOUNT_FULL implementation,
move the bulk of its functions into linux/refcount.h. This allows them
to be inlined in the same way as if they had been provided via
CONFIG_ARCH_HAS_REFCOUNT.

Signed-off-by: Will Deacon <will@kernel.org>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Tested-by: Hanjun Guo <guohanjun@huawei.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Elena Reshetova <elena.reshetova@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20191121115902.2551-5-will@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-11-25 09:15:06 +01:00

159 lines
4.1 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Out-of-line refcount functions common to all refcount implementations.
*/
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/spinlock.h>
#include <linux/bug.h>
/**
* refcount_dec_if_one - decrement a refcount if it is 1
* @r: the refcount
*
* No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
* success thereof.
*
* Like all decrement operations, it provides release memory order and provides
* a control dependency.
*
* It can be used like a try-delete operator; this explicit case is provided
* and not cmpxchg in generic, because that would allow implementing unsafe
* operations.
*
* Return: true if the resulting refcount is 0, false otherwise
*/
bool refcount_dec_if_one(refcount_t *r)
{
int val = 1;
return atomic_try_cmpxchg_release(&r->refs, &val, 0);
}
EXPORT_SYMBOL(refcount_dec_if_one);
/**
* refcount_dec_not_one - decrement a refcount if it is not 1
* @r: the refcount
*
* No atomic_t counterpart, it decrements unless the value is 1, in which case
* it will return false.
*
* Was often done like: atomic_add_unless(&var, -1, 1)
*
* Return: true if the decrement operation was successful, false otherwise
*/
bool refcount_dec_not_one(refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
do {
if (unlikely(val == REFCOUNT_SATURATED))
return true;
if (val == 1)
return false;
new = val - 1;
if (new > val) {
WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
return true;
}
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
return true;
}
EXPORT_SYMBOL(refcount_dec_not_one);
/**
* refcount_dec_and_mutex_lock - return holding mutex if able to decrement
* refcount to 0
* @r: the refcount
* @lock: the mutex to be locked
*
* Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
* to decrement when saturated at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
* See the comment on top.
*
* Return: true and hold mutex if able to decrement refcount to 0, false
* otherwise
*/
bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
{
if (refcount_dec_not_one(r))
return false;
mutex_lock(lock);
if (!refcount_dec_and_test(r)) {
mutex_unlock(lock);
return false;
}
return true;
}
EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
/**
* refcount_dec_and_lock - return holding spinlock if able to decrement
* refcount to 0
* @r: the refcount
* @lock: the spinlock to be locked
*
* Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
* decrement when saturated at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
* See the comment on top.
*
* Return: true and hold spinlock if able to decrement refcount to 0, false
* otherwise
*/
bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
{
if (refcount_dec_not_one(r))
return false;
spin_lock(lock);
if (!refcount_dec_and_test(r)) {
spin_unlock(lock);
return false;
}
return true;
}
EXPORT_SYMBOL(refcount_dec_and_lock);
/**
* refcount_dec_and_lock_irqsave - return holding spinlock with disabled
* interrupts if able to decrement refcount to 0
* @r: the refcount
* @lock: the spinlock to be locked
* @flags: saved IRQ-flags if the is acquired
*
* Same as refcount_dec_and_lock() above except that the spinlock is acquired
* with disabled interupts.
*
* Return: true and hold spinlock if able to decrement refcount to 0, false
* otherwise
*/
bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
unsigned long *flags)
{
if (refcount_dec_not_one(r))
return false;
spin_lock_irqsave(lock, *flags);
if (!refcount_dec_and_test(r)) {
spin_unlock_irqrestore(lock, *flags);
return false;
}
return true;
}
EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);