staging/lustre/libcfs: remove init_waitqueue_entry_current
Cc: Andreas Dilger <andreas.dilger@intel.com> Cc: Oleg Drokin <oleg.drokin@intel.com> Signed-off-by: Peng Tao <bergwolf@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
6d56be08f7
commit
9e795d3581
@ -51,7 +51,6 @@ void cfs_pause(cfs_duration_t ticks);
|
||||
typedef void (cfs_timer_func_t)(ulong_ptr_t);
|
||||
void schedule_timeout_and_set_state(long, int64_t);
|
||||
|
||||
void init_waitqueue_entry_current(wait_queue_t *link);
|
||||
int64_t waitq_timedwait(wait_queue_t *, long, int64_t);
|
||||
void waitq_wait(wait_queue_t *, long);
|
||||
void add_wait_queue_exclusive_head(wait_queue_head_t *, wait_queue_t *);
|
||||
|
@ -3127,7 +3127,7 @@ kiblnd_connd (void *arg)
|
||||
|
||||
cfs_block_allsigs ();
|
||||
|
||||
init_waitqueue_entry_current (&wait);
|
||||
init_waitqueue_entry(&wait, current);
|
||||
kiblnd_data.kib_connd = current;
|
||||
|
||||
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
|
||||
@ -3324,7 +3324,7 @@ kiblnd_scheduler(void *arg)
|
||||
|
||||
cfs_block_allsigs();
|
||||
|
||||
init_waitqueue_entry_current(&wait);
|
||||
init_waitqueue_entry(&wait, current);
|
||||
|
||||
sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
|
||||
|
||||
@ -3450,7 +3450,7 @@ kiblnd_failover_thread(void *arg)
|
||||
|
||||
cfs_block_allsigs ();
|
||||
|
||||
init_waitqueue_entry_current(&wait);
|
||||
init_waitqueue_entry(&wait, current);
|
||||
write_lock_irqsave(glock, flags);
|
||||
|
||||
while (!kiblnd_data.kib_shutdown) {
|
||||
|
@ -2140,7 +2140,7 @@ ksocknal_connd (void *arg)
|
||||
|
||||
cfs_block_allsigs ();
|
||||
|
||||
init_waitqueue_entry_current (&wait);
|
||||
init_waitqueue_entry(&wait, current);
|
||||
|
||||
spin_lock_bh(connd_lock);
|
||||
|
||||
@ -2532,7 +2532,7 @@ ksocknal_reaper (void *arg)
|
||||
cfs_block_allsigs ();
|
||||
|
||||
INIT_LIST_HEAD(&enomem_conns);
|
||||
init_waitqueue_entry_current (&wait);
|
||||
init_waitqueue_entry(&wait, current);
|
||||
|
||||
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
|
||||
|
||||
|
@ -334,7 +334,7 @@ lnet_eq_wait_locked(int *timeout_ms)
|
||||
if (tms == 0)
|
||||
return -1; /* don't want to wait and no new event */
|
||||
|
||||
init_waitqueue_entry_current(&wl);
|
||||
init_waitqueue_entry(&wl, current);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
|
||||
|
||||
|
@ -256,7 +256,7 @@ int seq_client_get_seq(const struct lu_env *env,
|
||||
|
||||
LASSERT(seqnr != NULL);
|
||||
mutex_lock(&seq->lcs_mutex);
|
||||
init_waitqueue_entry_current(&link);
|
||||
init_waitqueue_entry(&link, current);
|
||||
|
||||
while (1) {
|
||||
rc = seq_fid_alloc_prep(seq, &link);
|
||||
@ -306,7 +306,7 @@ int seq_client_alloc_fid(const struct lu_env *env,
|
||||
LASSERT(seq != NULL);
|
||||
LASSERT(fid != NULL);
|
||||
|
||||
init_waitqueue_entry_current(&link);
|
||||
init_waitqueue_entry(&link, current);
|
||||
mutex_lock(&seq->lcs_mutex);
|
||||
|
||||
if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_EXHAUST))
|
||||
@ -370,7 +370,7 @@ void seq_client_flush(struct lu_client_seq *seq)
|
||||
wait_queue_t link;
|
||||
|
||||
LASSERT(seq != NULL);
|
||||
init_waitqueue_entry_current(&link);
|
||||
init_waitqueue_entry(&link, current);
|
||||
mutex_lock(&seq->lcs_mutex);
|
||||
|
||||
while (seq->lcs_update) {
|
||||
|
@ -536,7 +536,7 @@ do { \
|
||||
if (condition) \
|
||||
break; \
|
||||
\
|
||||
init_waitqueue_entry_current(&__wait); \
|
||||
init_waitqueue_entry(&__wait, current); \
|
||||
l_add_wait(&wq, &__wait); \
|
||||
\
|
||||
/* Block all signals (just the non-fatal ones if no timeout). */ \
|
||||
|
@ -1196,7 +1196,7 @@ static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
|
||||
|
||||
bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
|
||||
|
||||
init_waitqueue_entry_current(&waiter);
|
||||
init_waitqueue_entry(&waiter, current);
|
||||
add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
|
||||
|
||||
while (1) {
|
||||
|
@ -368,7 +368,7 @@ void libcfs_debug_dumplog(void)
|
||||
/* we're being careful to ensure that the kernel thread is
|
||||
* able to set our state to running as it exits before we
|
||||
* get to schedule() */
|
||||
init_waitqueue_entry_current(&wait);
|
||||
init_waitqueue_entry(&wait, current);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
add_wait_queue(&debug_ctlwq, &wait);
|
||||
|
||||
|
@ -46,13 +46,6 @@
|
||||
#include <asm/kgdb.h>
|
||||
#endif
|
||||
|
||||
void
|
||||
init_waitqueue_entry_current(wait_queue_t *link)
|
||||
{
|
||||
init_waitqueue_entry(link, current);
|
||||
}
|
||||
EXPORT_SYMBOL(init_waitqueue_entry_current);
|
||||
|
||||
/**
|
||||
* wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
|
||||
* waiting threads, which is not always desirable because all threads will
|
||||
|
@ -1076,7 +1076,7 @@ end_loop:
|
||||
break;
|
||||
}
|
||||
}
|
||||
init_waitqueue_entry_current(&__wait);
|
||||
init_waitqueue_entry(&__wait, current);
|
||||
add_wait_queue(&tctl->tctl_waitq, &__wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
waitq_timedwait(&__wait, TASK_INTERRUPTIBLE,
|
||||
|
@ -218,7 +218,7 @@ find_again:
|
||||
MAX_SCHEDULE_TIMEOUT;
|
||||
long left;
|
||||
|
||||
init_waitqueue_entry_current(&wait);
|
||||
init_waitqueue_entry(&wait, current);
|
||||
add_wait_queue(&entry->ue_waitq, &wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
spin_unlock(&cache->uc_lock);
|
||||
|
@ -310,7 +310,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
|
||||
* ->lo_sub[] slot in lovsub_object_fini() */
|
||||
if (r0->lo_sub[idx] == los) {
|
||||
waiter = &lov_env_info(env)->lti_waiter;
|
||||
init_waitqueue_entry_current(waiter);
|
||||
init_waitqueue_entry(waiter, current);
|
||||
add_wait_queue(&bkt->lsb_marche_funebre, waiter);
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
while (1) {
|
||||
|
@ -932,7 +932,7 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
|
||||
* LU-305 */
|
||||
blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
|
||||
|
||||
init_waitqueue_entry_current(&waiter);
|
||||
init_waitqueue_entry(&waiter, current);
|
||||
add_wait_queue(&lock->cll_wq, &waiter);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
cl_lock_mutex_put(env, lock);
|
||||
|
@ -571,7 +571,7 @@ static struct lu_object *htable_lookup(struct lu_site *s,
|
||||
* drained), and moreover, lookup has to wait until object is freed.
|
||||
*/
|
||||
|
||||
init_waitqueue_entry_current(waiter);
|
||||
init_waitqueue_entry(waiter, current);
|
||||
add_wait_queue(&bkt->lsb_marche_funebre, waiter);
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
|
||||
|
@ -880,7 +880,7 @@ int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
|
||||
|
||||
cache_get(&rsip->h); /* take an extra ref */
|
||||
init_waitqueue_head(&rsip->waitq);
|
||||
init_waitqueue_entry_current(&wait);
|
||||
init_waitqueue_entry(&wait, current);
|
||||
add_wait_queue(&rsip->waitq, &wait);
|
||||
|
||||
cache_check:
|
||||
|
@ -545,7 +545,7 @@ again:
|
||||
page_pools.epp_waitqlen;
|
||||
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
init_waitqueue_entry_current(&waitlink);
|
||||
init_waitqueue_entry(&waitlink, current);
|
||||
add_wait_queue(&page_pools.epp_waitq, &waitlink);
|
||||
|
||||
spin_unlock(&page_pools.epp_lock);
|
||||
|
Loading…
Reference in New Issue
Block a user