[XFS] Unwrap mru_lock.

Un-obfuscate mru_lock, remove mutex_lock->spin_lock macros, call spin_lock
directly, remove extraneous cookie holdover from old xfs code.

SGI-PV: 970382
SGI-Modid: xfs-linux-melb:xfs-kern:29745a

Signed-off-by: Eric Sandeen <sandeen@sandeen.net>
Signed-off-by: Donald Douwsma <donaldd@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>
This commit is contained in:
Eric Sandeen 2007-10-11 17:42:10 +10:00 committed by Lachlan McIlroy
parent 703e1f0fd2
commit ba74d0cba5

View File

@ -245,7 +245,7 @@ _xfs_mru_cache_clear_reap_list(
*/ */
list_move(&elem->list_node, &tmp); list_move(&elem->list_node, &tmp);
} }
mutex_spinunlock(&mru->lock, 0); spin_unlock(&mru->lock);
list_for_each_entry_safe(elem, next, &tmp, list_node) { list_for_each_entry_safe(elem, next, &tmp, list_node) {
@ -259,7 +259,7 @@ _xfs_mru_cache_clear_reap_list(
kmem_zone_free(xfs_mru_elem_zone, elem); kmem_zone_free(xfs_mru_elem_zone, elem);
} }
mutex_spinlock(&mru->lock); spin_lock(&mru->lock);
} }
/* /*
@ -280,7 +280,7 @@ _xfs_mru_cache_reap(
if (!mru || !mru->lists) if (!mru || !mru->lists)
return; return;
mutex_spinlock(&mru->lock); spin_lock(&mru->lock);
next = _xfs_mru_cache_migrate(mru, jiffies); next = _xfs_mru_cache_migrate(mru, jiffies);
_xfs_mru_cache_clear_reap_list(mru); _xfs_mru_cache_clear_reap_list(mru);
@ -294,7 +294,7 @@ _xfs_mru_cache_reap(
queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
} }
mutex_spinunlock(&mru->lock, 0); spin_unlock(&mru->lock);
} }
int int
@ -398,17 +398,17 @@ xfs_mru_cache_flush(
if (!mru || !mru->lists) if (!mru || !mru->lists)
return; return;
mutex_spinlock(&mru->lock); spin_lock(&mru->lock);
if (mru->queued) { if (mru->queued) {
mutex_spinunlock(&mru->lock, 0); spin_unlock(&mru->lock);
cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work); cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
mutex_spinlock(&mru->lock); spin_lock(&mru->lock);
} }
_xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time); _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time);
_xfs_mru_cache_clear_reap_list(mru); _xfs_mru_cache_clear_reap_list(mru);
mutex_spinunlock(&mru->lock, 0); spin_unlock(&mru->lock);
} }
void void
@ -454,13 +454,13 @@ xfs_mru_cache_insert(
elem->key = key; elem->key = key;
elem->value = value; elem->value = value;
mutex_spinlock(&mru->lock); spin_lock(&mru->lock);
radix_tree_insert(&mru->store, key, elem); radix_tree_insert(&mru->store, key, elem);
radix_tree_preload_end(); radix_tree_preload_end();
_xfs_mru_cache_list_insert(mru, elem); _xfs_mru_cache_list_insert(mru, elem);
mutex_spinunlock(&mru->lock, 0); spin_unlock(&mru->lock);
return 0; return 0;
} }
@ -483,14 +483,14 @@ xfs_mru_cache_remove(
if (!mru || !mru->lists) if (!mru || !mru->lists)
return NULL; return NULL;
mutex_spinlock(&mru->lock); spin_lock(&mru->lock);
elem = radix_tree_delete(&mru->store, key); elem = radix_tree_delete(&mru->store, key);
if (elem) { if (elem) {
value = elem->value; value = elem->value;
list_del(&elem->list_node); list_del(&elem->list_node);
} }
mutex_spinunlock(&mru->lock, 0); spin_unlock(&mru->lock);
if (elem) if (elem)
kmem_zone_free(xfs_mru_elem_zone, elem); kmem_zone_free(xfs_mru_elem_zone, elem);
@ -540,14 +540,14 @@ xfs_mru_cache_lookup(
if (!mru || !mru->lists) if (!mru || !mru->lists)
return NULL; return NULL;
mutex_spinlock(&mru->lock); spin_lock(&mru->lock);
elem = radix_tree_lookup(&mru->store, key); elem = radix_tree_lookup(&mru->store, key);
if (elem) { if (elem) {
list_del(&elem->list_node); list_del(&elem->list_node);
_xfs_mru_cache_list_insert(mru, elem); _xfs_mru_cache_list_insert(mru, elem);
} }
else else
mutex_spinunlock(&mru->lock, 0); spin_unlock(&mru->lock);
return elem ? elem->value : NULL; return elem ? elem->value : NULL;
} }
@ -571,10 +571,10 @@ xfs_mru_cache_peek(
if (!mru || !mru->lists) if (!mru || !mru->lists)
return NULL; return NULL;
mutex_spinlock(&mru->lock); spin_lock(&mru->lock);
elem = radix_tree_lookup(&mru->store, key); elem = radix_tree_lookup(&mru->store, key);
if (!elem) if (!elem)
mutex_spinunlock(&mru->lock, 0); spin_unlock(&mru->lock);
return elem ? elem->value : NULL; return elem ? elem->value : NULL;
} }
@ -588,5 +588,5 @@ void
xfs_mru_cache_done( xfs_mru_cache_done(
xfs_mru_cache_t *mru) xfs_mru_cache_t *mru)
{ {
mutex_spinunlock(&mru->lock, 0); spin_unlock(&mru->lock);
} }