mirror of
https://github.com/torvalds/linux.git
synced 2024-11-01 01:31:44 +00:00
8b5baa460b
some clean ups and some patches to use the new generic lru list code. There is still plenty of scope for some further changes in due course - faster lookups of quota structures is very much on the todo list. Also, a start has been made towards the more tricky issue of using the generic lru code with glocks, but that will have to be completed in a subsequent merge window. The other, more minor feature, is that there have been a number of performance patches which relate to block allocation. In particular they will improve performance when the disk is nearly full. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQIcBAABAgAGBQJSeQMSAAoJEMrg3m4a/8jS4/EP/AtkfsT+GATPmK2R3Yoy0Hrb 4KucaloOtlUmSsVwTpzYGYZaqJo2D5BndJWw9jekPJOS4aB5CbE1ZYCMIKyuhhr4 Y70kjfGlwK5hRSItPJ5gHnWkiTZzR65wBLj/+EBAFm2gF3UsJ4DJvLNvd8DP9SJC 3IfYfqV6cPa7aPDhmEbdq5h0X5iSI+Ee/X2Z3a6fe7rErR1cD4iAEFEyPHa0aHgt TkrS32DodOn/J26PvUFq5MUb+El+Ul6EpeB3CC8UN0+pvucAKCMVy8+sPROTbViz mMRyWxHHHPDEEulFPWJlXW/tOAhHMHTPGbnWu4bH+iDudzOHif7E0tWklPR9bJAY 4/1Fa4ILIxV02kdGBHxO74Vv/ir4gyLzzXCPbXOpxu5jMw3VdN9dp0/Uck+rmsya rC5Q/8vm4AUO7YYHUBEEaT7Nqp8HRRWGwv11Wdyqf3RQyC5jYHNEXYJkdMsODEae p+Ju/O6MfLw68IrG38RaGT4/tCBPonggsCVxwqNxqyDnjtNEpO/o3VjJMJ/3j2b5 CCRx+9JYENT8EsdpIFWasfABy66xbKPTE9RiMUbk1e73julXLfzIMI3/Ol/Bj7rQ YLs5XYrKcz3QfYgMvNS3nMbI3w3nJrCnzdV7STps8nyaOa1oQndGxe9b7tDb+Fb8 /acuYuQclbvsAkzvH4jc =qwXe -----END PGP SIGNATURE----- Merge tag 'gfs2-merge-window' of git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw Pull gfs2 updates from Steven Whitehouse: "The main feature of interest this time is quota updates. There are some clean ups and some patches to use the new generic lru list code. There is still plenty of scope for some further changes in due course - faster lookups of quota structures is very much on the todo list. Also, a start has been made towards the more tricky issue of using the generic lru code with glocks, but that will have to be completed in a subsequent merge window. The other, more minor feature, is that there have been a number of performance patches which relate to block allocation. In particular they will improve performance when the disk is nearly full" * tag 'gfs2-merge-window' of git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw: GFS2: Use generic list_lru for quota GFS2: Rename quota qd_lru_lock qd_lock GFS2: Use reflink for quota data cache GFS2: Use lockref for glocks GFS2: Protect quota sync generation GFS2: Inline qd_trylock into gfs2_quota_unlock GFS2: Make two similar quota code fragments into a function GFS2: Remove obsolete quota tunable GFS2: Move gfs2_icbit_munge into quota.c GFS2: Speed up starting point selection for block allocation GFS2: Add allocation parameters structure GFS2: Clean up reservation removal GFS2: fix dentry leaks GFS2: new function gfs2_rbm_incr GFS2: Introduce rbm field bii GFS2: Do not reset flags on active reservations GFS2: introduce bi_blocks for optimization GFS2: optimize rbm_from_block wrt bi_start GFS2: d_splice_alias() can't return error
185 lines
3.9 KiB
C
185 lines
3.9 KiB
C
#include <linux/export.h>
|
|
#include <linux/lockref.h>
|
|
|
|
#ifdef CONFIG_CMPXCHG_LOCKREF
|
|
|
|
/*
|
|
* Allow weakly-ordered memory architectures to provide barrier-less
|
|
* cmpxchg semantics for lockref updates.
|
|
*/
|
|
#ifndef cmpxchg64_relaxed
|
|
# define cmpxchg64_relaxed cmpxchg64
|
|
#endif
|
|
|
|
/*
|
|
* Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
|
|
* This is useful for architectures with an expensive cpu_relax().
|
|
*/
|
|
#ifndef arch_mutex_cpu_relax
|
|
# define arch_mutex_cpu_relax() cpu_relax()
|
|
#endif
|
|
|
|
/*
|
|
* Note that the "cmpxchg()" reloads the "old" value for the
|
|
* failure case.
|
|
*/
|
|
#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
|
|
struct lockref old; \
|
|
BUILD_BUG_ON(sizeof(old) != 8); \
|
|
old.lock_count = ACCESS_ONCE(lockref->lock_count); \
|
|
while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
|
|
struct lockref new = old, prev = old; \
|
|
CODE \
|
|
old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
|
|
old.lock_count, \
|
|
new.lock_count); \
|
|
if (likely(old.lock_count == prev.lock_count)) { \
|
|
SUCCESS; \
|
|
} \
|
|
arch_mutex_cpu_relax(); \
|
|
} \
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
|
|
|
|
#endif
|
|
|
|
/**
|
|
* lockref_get - Increments reference count unconditionally
|
|
* @lockref: pointer to lockref structure
|
|
*
|
|
* This operation is only valid if you already hold a reference
|
|
* to the object, so you know the count cannot be zero.
|
|
*/
|
|
void lockref_get(struct lockref *lockref)
|
|
{
|
|
CMPXCHG_LOOP(
|
|
new.count++;
|
|
,
|
|
return;
|
|
);
|
|
|
|
spin_lock(&lockref->lock);
|
|
lockref->count++;
|
|
spin_unlock(&lockref->lock);
|
|
}
|
|
EXPORT_SYMBOL(lockref_get);
|
|
|
|
/**
|
|
* lockref_get_not_zero - Increments count unless the count is 0
|
|
* @lockref: pointer to lockref structure
|
|
* Return: 1 if count updated successfully or 0 if count was zero
|
|
*/
|
|
int lockref_get_not_zero(struct lockref *lockref)
|
|
{
|
|
int retval;
|
|
|
|
CMPXCHG_LOOP(
|
|
new.count++;
|
|
if (!old.count)
|
|
return 0;
|
|
,
|
|
return 1;
|
|
);
|
|
|
|
spin_lock(&lockref->lock);
|
|
retval = 0;
|
|
if (lockref->count) {
|
|
lockref->count++;
|
|
retval = 1;
|
|
}
|
|
spin_unlock(&lockref->lock);
|
|
return retval;
|
|
}
|
|
EXPORT_SYMBOL(lockref_get_not_zero);
|
|
|
|
/**
|
|
* lockref_get_or_lock - Increments count unless the count is 0
|
|
* @lockref: pointer to lockref structure
|
|
* Return: 1 if count updated successfully or 0 if count was zero
|
|
* and we got the lock instead.
|
|
*/
|
|
int lockref_get_or_lock(struct lockref *lockref)
|
|
{
|
|
CMPXCHG_LOOP(
|
|
new.count++;
|
|
if (!old.count)
|
|
break;
|
|
,
|
|
return 1;
|
|
);
|
|
|
|
spin_lock(&lockref->lock);
|
|
if (!lockref->count)
|
|
return 0;
|
|
lockref->count++;
|
|
spin_unlock(&lockref->lock);
|
|
return 1;
|
|
}
|
|
EXPORT_SYMBOL(lockref_get_or_lock);
|
|
|
|
/**
|
|
* lockref_put_or_lock - decrements count unless count <= 1 before decrement
|
|
* @lockref: pointer to lockref structure
|
|
* Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
|
|
*/
|
|
int lockref_put_or_lock(struct lockref *lockref)
|
|
{
|
|
CMPXCHG_LOOP(
|
|
new.count--;
|
|
if (old.count <= 1)
|
|
break;
|
|
,
|
|
return 1;
|
|
);
|
|
|
|
spin_lock(&lockref->lock);
|
|
if (lockref->count <= 1)
|
|
return 0;
|
|
lockref->count--;
|
|
spin_unlock(&lockref->lock);
|
|
return 1;
|
|
}
|
|
EXPORT_SYMBOL(lockref_put_or_lock);
|
|
|
|
/**
|
|
* lockref_mark_dead - mark lockref dead
|
|
* @lockref: pointer to lockref structure
|
|
*/
|
|
void lockref_mark_dead(struct lockref *lockref)
|
|
{
|
|
assert_spin_locked(&lockref->lock);
|
|
lockref->count = -128;
|
|
}
|
|
EXPORT_SYMBOL(lockref_mark_dead);
|
|
|
|
/**
|
|
* lockref_get_not_dead - Increments count unless the ref is dead
|
|
* @lockref: pointer to lockref structure
|
|
* Return: 1 if count updated successfully or 0 if lockref was dead
|
|
*/
|
|
int lockref_get_not_dead(struct lockref *lockref)
|
|
{
|
|
int retval;
|
|
|
|
CMPXCHG_LOOP(
|
|
new.count++;
|
|
if ((int)old.count < 0)
|
|
return 0;
|
|
,
|
|
return 1;
|
|
);
|
|
|
|
spin_lock(&lockref->lock);
|
|
retval = 0;
|
|
if ((int) lockref->count >= 0) {
|
|
lockref->count++;
|
|
retval = 1;
|
|
}
|
|
spin_unlock(&lockref->lock);
|
|
return retval;
|
|
}
|
|
EXPORT_SYMBOL(lockref_get_not_dead);
|