staging: lustre: discard cfs_time_seconds()

cfs_time_seconds() converts a number of seconds to the
matching number of jiffies.
The standard way to do this in Linux is  "* HZ".
So discard cfs_time_seconds() and use "* HZ" instead.

Reviewed-by: James Simmons <jsimmons@infradead.org>
Signed-off-by: NeilBrown <neilb@suse.com>
Reviewed-by: Patrick Farrell <paf@cray.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
NeilBrown 2018-02-13 08:22:36 +11:00 committed by Greg Kroah-Hartman
parent db1d6cbc2a
commit 672b63e55b
46 changed files with 96 additions and 106 deletions

View File

@ -66,8 +66,8 @@ extern unsigned int libcfs_panic_on_lbug;
# define DEBUG_SUBSYSTEM S_UNDEFINED
#endif
#define CDEBUG_DEFAULT_MAX_DELAY (cfs_time_seconds(600)) /* jiffies */
#define CDEBUG_DEFAULT_MIN_DELAY ((cfs_time_seconds(1) + 1) / 2) /* jiffies */
#define CDEBUG_DEFAULT_MAX_DELAY (600 * HZ) /* jiffies */
#define CDEBUG_DEFAULT_MIN_DELAY ((HZ + 1) / 2) /* jiffies */
#define CDEBUG_DEFAULT_BACKOFF 2
struct cfs_debug_limit_state {
unsigned long cdls_next;

View File

@ -62,7 +62,7 @@ static inline int cfs_time_aftereq(unsigned long t1, unsigned long t2)
static inline unsigned long cfs_time_shift(int seconds)
{
return cfs_time_add(cfs_time_current(), cfs_time_seconds(seconds));
return cfs_time_add(cfs_time_current(), seconds * HZ);
}
/*

View File

@ -65,11 +65,6 @@ static inline unsigned long cfs_time_current(void)
return jiffies;
}
static inline long cfs_time_seconds(int seconds)
{
return ((long)seconds) * msecs_to_jiffies(MSEC_PER_SEC);
}
static inline long cfs_duration_sec(long d)
{
return d / msecs_to_jiffies(MSEC_PER_SEC);
@ -85,7 +80,7 @@ static inline u64 cfs_time_add_64(u64 t, u64 d)
static inline u64 cfs_time_shift_64(int seconds)
{
return cfs_time_add_64(cfs_time_current_64(),
cfs_time_seconds(seconds));
seconds * HZ);
}
static inline int cfs_time_before_64(u64 t1, u64 t2)

View File

@ -1211,7 +1211,7 @@ static struct kib_hca_dev *kiblnd_current_hdev(struct kib_dev *dev)
CDEBUG(D_NET, "%s: Wait for failover\n",
dev->ibd_ifname);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1) / 100);
schedule_timeout(HZ / 100);
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
}
@ -1921,7 +1921,7 @@ struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps)
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(interval);
if (interval < cfs_time_seconds(1))
if (interval < HZ)
interval *= 2;
goto again;
@ -2541,7 +2541,7 @@ static void kiblnd_base_shutdown(void)
"Waiting for %d threads to terminate\n",
atomic_read(&kiblnd_data.kib_nthreads));
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
schedule_timeout(HZ);
}
/* fall through */
@ -2592,7 +2592,7 @@ static void kiblnd_shutdown(struct lnet_ni *ni)
libcfs_nid2str(ni->ni_nid),
atomic_read(&net->ibn_npeers));
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
schedule_timeout(HZ);
}
kiblnd_net_fini_pools(net);

View File

@ -3728,8 +3728,8 @@ kiblnd_failover_thread(void *arg)
add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
write_unlock_irqrestore(glock, flags);
rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
cfs_time_seconds(1));
rc = schedule_timeout(long_sleep ? 10 * HZ :
HZ);
remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
write_lock_irqsave(glock, flags);

View File

@ -1677,7 +1677,7 @@ ksocknal_destroy_conn(struct ksock_conn *conn)
switch (conn->ksnc_rx_state) {
case SOCKNAL_RX_LNET_PAYLOAD:
last_rcv = conn->ksnc_rx_deadline -
cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
*ksocknal_tunables.ksnd_timeout * HZ;
CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %zd, left: %d, last alive is %ld secs ago\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
&conn->ksnc_ipaddr, conn->ksnc_port,
@ -2356,7 +2356,7 @@ ksocknal_base_shutdown(void)
ksocknal_data.ksnd_nthreads);
read_unlock(&ksocknal_data.ksnd_global_lock);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
schedule_timeout(HZ);
read_lock(&ksocknal_data.ksnd_global_lock);
}
read_unlock(&ksocknal_data.ksnd_global_lock);
@ -2599,7 +2599,7 @@ ksocknal_shutdown(struct lnet_ni *ni)
"waiting for %d peers to disconnect\n",
net->ksnn_npeers);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
schedule_timeout(HZ);
ksocknal_debug_peerhash(ni);

View File

@ -189,7 +189,7 @@ ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
if (ksocknal_data.ksnd_stall_tx) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
schedule_timeout(ksocknal_data.ksnd_stall_tx * HZ);
}
LASSERT(tx->tx_resid);
@ -294,7 +294,7 @@ ksocknal_receive(struct ksock_conn *conn)
if (ksocknal_data.ksnd_stall_rx) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
schedule_timeout(ksocknal_data.ksnd_stall_rx * HZ);
}
rc = ksocknal_connsock_addref(conn);
@ -1780,7 +1780,7 @@ ksocknal_connect(struct ksock_route *route)
int rc = 0;
deadline = cfs_time_add(cfs_time_current(),
cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
*ksocknal_tunables.ksnd_timeout * HZ);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
@ -1878,7 +1878,7 @@ ksocknal_connect(struct ksock_route *route)
* so min_reconnectms should be good heuristic
*/
route->ksnr_retry_interval =
cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000;
*ksocknal_tunables.ksnd_min_reconnectms * HZ / 1000;
route->ksnr_timeout = cfs_time_add(cfs_time_current(),
route->ksnr_retry_interval);
}
@ -1899,10 +1899,10 @@ ksocknal_connect(struct ksock_route *route)
route->ksnr_retry_interval *= 2;
route->ksnr_retry_interval =
max(route->ksnr_retry_interval,
cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000);
(long)*ksocknal_tunables.ksnd_min_reconnectms * HZ / 1000);
route->ksnr_retry_interval =
min(route->ksnr_retry_interval,
cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms) / 1000);
(long)*ksocknal_tunables.ksnd_max_reconnectms * HZ / 1000);
LASSERT(route->ksnr_retry_interval);
route->ksnr_timeout = cfs_time_add(cfs_time_current(),
@ -1972,7 +1972,7 @@ ksocknal_connd_check_start(time64_t sec, long *timeout)
if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
/* may run out of resource, retry later */
*timeout = cfs_time_seconds(1);
*timeout = HZ;
return 0;
}
@ -2031,8 +2031,8 @@ ksocknal_connd_check_stop(time64_t sec, long *timeout)
val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
SOCKNAL_CONND_TIMEOUT - sec);
*timeout = (val > 0) ? cfs_time_seconds(val) :
cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
*timeout = (val > 0) ? val * HZ :
SOCKNAL_CONND_TIMEOUT * HZ;
if (val > 0)
return 0;
@ -2307,7 +2307,7 @@ ksocknal_send_keepalive_locked(struct ksock_peer *peer)
if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
time_before(cfs_time_current(),
cfs_time_add(peer->ksnp_last_alive,
cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive))))
*ksocknal_tunables.ksnd_keepalive * HZ)))
return 0;
if (time_before(cfs_time_current(), peer->ksnp_send_keepalive))
@ -2563,7 +2563,7 @@ ksocknal_reaper(void *arg)
ksocknal_data.ksnd_peer_hash_size;
}
deadline = cfs_time_add(deadline, cfs_time_seconds(p));
deadline = cfs_time_add(deadline, p * HZ);
}
if (nenomem_conns) {

View File

@ -113,7 +113,7 @@ static int param_set_delay_minmax(const char *val,
if (rc)
return -EINVAL;
d = cfs_time_seconds(sec) / 100;
d = sec * HZ / 100;
if (d < min || d > max)
return -EINVAL;

View File

@ -134,7 +134,7 @@ int __cfs_fail_timeout_set(u32 id, u32 value, int ms, int set)
CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
id, ms);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(ms) / 1000);
schedule_timeout(ms * HZ / 1000);
CERROR("cfs_fail_timeout id %x awake\n", id);
}
return ret;

View File

@ -441,7 +441,7 @@ console:
if (cfs_time_after(cfs_time_current(),
cdls->cdls_next + libcfs_console_max_delay +
cfs_time_seconds(10))) {
10 * HZ)) {
/* last timeout was a long time ago */
cdls->cdls_delay /= libcfs_console_backoff * 4;
} else {
@ -1071,7 +1071,7 @@ end_loop:
init_waitqueue_entry(&__wait, current);
add_wait_queue(&tctl->tctl_waitq, &__wait);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
schedule_timeout(HZ);
remove_wait_queue(&tctl->tctl_waitq, &__wait);
}
complete(&tctl->tctl_stop);

View File

@ -365,7 +365,7 @@ lnet_acceptor(void *arg)
if (rc != -EAGAIN) {
CWARN("Accept error %d: pausing...\n", rc);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
schedule_timeout(HZ);
}
continue;
}

View File

@ -970,7 +970,7 @@ lnet_ping_md_unlink(struct lnet_ping_info *pinfo,
while (pinfo->pi_features != LNET_PING_FEAT_INVAL) {
CDEBUG(D_NET, "Still waiting for ping MD to unlink\n");
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
schedule_timeout(HZ);
}
cfs_restore_sigs(blocked);
@ -1109,7 +1109,7 @@ lnet_clear_zombies_nis_locked(void)
libcfs_nid2str(ni->ni_nid));
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
schedule_timeout(HZ);
lnet_net_lock(LNET_LOCK_EX);
continue;
}

View File

@ -524,7 +524,7 @@ lnet_peer_is_alive(struct lnet_peer *lp, unsigned long now)
return 0;
deadline = cfs_time_add(lp->lp_last_alive,
cfs_time_seconds(lp->lp_ni->ni_peertimeout));
lp->lp_ni->ni_peertimeout * HZ);
alive = cfs_time_after(deadline, now);
/* Update obsolete lp_alive except for routers assumed to be dead
@ -562,7 +562,7 @@ lnet_peer_alive_locked(struct lnet_peer *lp)
unsigned long next_query =
cfs_time_add(lp->lp_last_query,
cfs_time_seconds(lnet_queryinterval));
lnet_queryinterval * HZ);
if (time_before(now, next_query)) {
if (lp->lp_alive)

View File

@ -315,9 +315,8 @@ drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
rule->dr_time_base = now;
rule->dr_drop_time = rule->dr_time_base +
cfs_time_seconds(
prandom_u32_max(attr->u.drop.da_interval));
rule->dr_time_base += cfs_time_seconds(attr->u.drop.da_interval);
prandom_u32_max(attr->u.drop.da_interval) * HZ;
rule->dr_time_base += attr->u.drop.da_interval * HZ;
CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lu\n",
libcfs_nid2str(attr->fa_src),
@ -440,8 +439,7 @@ static struct delay_daemon_data delay_dd;
static unsigned long
round_timeout(unsigned long timeout)
{
return cfs_time_seconds((unsigned int)
cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
return (unsigned int)rounddown(timeout, HZ) + HZ;
}
static void
@ -483,10 +481,8 @@ delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
rule->dl_time_base = now;
rule->dl_delay_time = rule->dl_time_base +
cfs_time_seconds(
prandom_u32_max(
attr->u.delay.la_interval));
rule->dl_time_base += cfs_time_seconds(attr->u.delay.la_interval);
prandom_u32_max(attr->u.delay.la_interval) * HZ;
rule->dl_time_base += attr->u.delay.la_interval * HZ;
CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lu\n",
libcfs_nid2str(attr->fa_src),

View File

@ -137,7 +137,7 @@ lnet_peer_table_deathrow_wait_locked(struct lnet_peer_table *ptable,
ptable->pt_zombies);
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1) >> 1);
schedule_timeout(HZ >> 1);
lnet_net_lock(cpt_locked);
}
}

View File

@ -808,7 +808,7 @@ lnet_wait_known_routerstate(void)
return;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
schedule_timeout(HZ);
}
}
@ -1011,7 +1011,7 @@ lnet_ping_router_locked(struct lnet_peer *rtr)
if (secs && !rtr->lp_ping_notsent &&
cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
cfs_time_seconds(secs)))) {
secs * HZ))) {
int rc;
struct lnet_process_id id;
struct lnet_handle_md mdh;
@ -1185,7 +1185,7 @@ lnet_prune_rc_data(int wait_unlink)
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for rc buffers to unlink\n");
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1) / 4);
schedule_timeout(HZ / 4);
lnet_net_lock(LNET_LOCK_EX);
}
@ -1282,7 +1282,7 @@ rescan:
else
wait_event_interruptible_timeout(the_lnet.ln_rc_waitq,
false,
cfs_time_seconds(1));
HZ);
}
lnet_prune_rc_data(1); /* wait for UNLINK */

View File

@ -359,7 +359,7 @@ lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout)
rc = wait_event_interruptible_timeout(trans->tas_waitq,
lstcon_rpc_trans_check(trans),
cfs_time_seconds(timeout));
timeout * HZ);
rc = (rc > 0) ? 0 : ((rc < 0) ? -EINTR : -ETIMEDOUT);
mutex_lock(&console_session.ses_mutex);
@ -1350,7 +1350,7 @@ lstcon_rpc_cleanup_wait(void)
CWARN("Session is shutting down, waiting for termination of transactions\n");
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
schedule_timeout(HZ);
mutex_lock(&console_session.ses_mutex);
}

View File

@ -1604,7 +1604,7 @@ srpc_startup(void)
/* 1 second pause to avoid timestamp reuse */
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
schedule_timeout(HZ);
srpc_data.rpc_matchbits = ((__u64)ktime_get_real_seconds()) << 48;
srpc_data.rpc_state = SRPC_STATE_NONE;

View File

@ -575,7 +575,7 @@ swi_state2str(int state)
#define selftest_wait_events() \
do { \
set_current_state(TASK_UNINTERRUPTIBLE); \
schedule_timeout(cfs_time_seconds(1) / 10); \
schedule_timeout(HZ / 10); \
} while (0)
#define lst_wait_until(cond, lock, fmt, ...) \

View File

@ -177,7 +177,7 @@ stt_timer_main(void *arg)
rc = wait_event_timeout(stt_data.stt_waitq,
stt_data.stt_shuttingdown,
cfs_time_seconds(STTIMER_SLOTTIME));
STTIMER_SLOTTIME * HZ);
}
spin_lock(&stt_data.stt_lock);

View File

@ -60,7 +60,7 @@ struct obd_device;
#define OBD_LDLM_DEVICENAME "ldlm"
#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(3900)) /* 65 min */
#define LDLM_DEFAULT_MAX_ALIVE (65 * 60 * HZ) /* 65 min */
#define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
/**

View File

@ -124,7 +124,7 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck,
*/
while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) {
mutex_unlock(&lck->rpcl_mutex);
schedule_timeout(cfs_time_seconds(1) / 4);
schedule_timeout(HZ / 4);
goto again;
}

View File

@ -2262,7 +2262,7 @@ static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req)
{
if (req->rq_delay_limit != 0 &&
time_before(cfs_time_add(req->rq_queued_time,
cfs_time_seconds(req->rq_delay_limit)),
req->rq_delay_limit * HZ),
cfs_time_current())) {
return 1;
}

View File

@ -1366,7 +1366,7 @@ out:
}
}
lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
lwi = LWI_TIMEOUT_INTR(obd_timeout * HZ,
NULL, LWI_ON_SIGNAL_NOOP, NULL);
/* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */

View File

@ -163,7 +163,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
LDLM_DEBUG(lock, "client completion callback handler START");
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
int to = cfs_time_seconds(1);
int to = HZ;
while (to > 0) {
set_current_state(TASK_INTERRUPTIBLE);
@ -327,7 +327,7 @@ static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
!lock->l_readers && !lock->l_writers &&
cfs_time_after(cfs_time_current(),
cfs_time_add(lock->l_last_used,
cfs_time_seconds(10)))) {
10 * HZ))) {
unlock_res_and_lock(lock);
if (ldlm_bl_to_thread_lock(ns, NULL, lock))
ldlm_handle_bl_callback(ns, NULL, lock);

View File

@ -1008,7 +1008,7 @@ static int ldlm_pools_thread_main(void *arg)
* Wait until the next check time, or until we're
* stopped.
*/
lwi = LWI_TIMEOUT(cfs_time_seconds(c_time),
lwi = LWI_TIMEOUT(c_time * HZ,
NULL, NULL);
l_wait_event(thread->t_ctl_waitq,
thread_is_stopping(thread) ||

View File

@ -288,7 +288,7 @@ noreproc:
LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
lwi = LWI_INTR(interrupted_completion_wait, &lwd);
} else {
lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
lwi = LWI_TIMEOUT_INTR(timeout * HZ,
ldlm_expired_completion_wait,
interrupted_completion_wait, &lwd);
}

View File

@ -799,7 +799,7 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
if (lock->l_flags & LDLM_FL_FAIL_LOC) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(4));
schedule_timeout(4 * HZ);
set_current_state(TASK_RUNNING);
}
if (lock->l_completion_ast)

View File

@ -2026,8 +2026,8 @@ void ll_umount_begin(struct super_block *sb)
* to decrement mnt_cnt and hope to finish it within 10sec.
*/
init_waitqueue_head(&waitq);
lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(10),
cfs_time_seconds(1), NULL, NULL);
lwi = LWI_TIMEOUT_INTERVAL(10 * HZ,
HZ, NULL, NULL);
l_wait_event(waitq, may_umount(sbi->ll_mnt.mnt), &lwi);
schedule();

View File

@ -1424,7 +1424,7 @@ static int revalidate_statahead_dentry(struct inode *dir,
spin_lock(&lli->lli_sa_lock);
sai->sai_index_wait = entry->se_index;
spin_unlock(&lli->lli_sa_lock);
lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
lwi = LWI_TIMEOUT_INTR(30 * HZ, NULL,
LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(sai->sai_waitq, sa_ready(entry), &lwi);
if (rc < 0) {

View File

@ -126,8 +126,8 @@ static int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx)
mutex_unlock(&lov->lov_lock);
init_waitqueue_head(&waitq);
lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(obd_timeout),
cfs_time_seconds(1), NULL, NULL);
lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * HZ,
HZ, NULL, NULL);
rc = l_wait_event(waitq, lov_check_set(lov, ost_idx), &lwi);
if (tgt->ltd_active)

View File

@ -888,7 +888,7 @@ restart_bulk:
exp->exp_obd->obd_name, -EIO);
return -EIO;
}
lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL,
lwi = LWI_TIMEOUT_INTR(resends * HZ, NULL, NULL,
NULL);
l_wait_event(waitq, 0, &lwi);

View File

@ -1628,7 +1628,7 @@ restart:
if (rcl == -ESHUTDOWN &&
atomic_read(&mgc->u.cli.cl_mgc_refcount) > 0 && !retry) {
int secs = cfs_time_seconds(obd_timeout);
int secs = obd_timeout * HZ;
struct obd_import *imp;
struct l_wait_info lwi;

View File

@ -1097,7 +1097,7 @@ EXPORT_SYMBOL(cl_sync_io_init);
int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
long timeout)
{
struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout * HZ,
NULL, NULL, NULL);
int rc;

View File

@ -752,7 +752,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
spin_unlock(&ec->ec_lock);
CERROR("echo_client still has objects at cleanup time, wait for 1 second\n");
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
schedule_timeout(HZ);
lu_site_purge(env, ed->ed_site, -1);
spin_lock(&ec->ec_lock);
}

View File

@ -934,7 +934,7 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
enum osc_extent_state state)
{
struct osc_object *obj = ext->oe_obj;
struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
struct l_wait_info lwi = LWI_TIMEOUT_INTR(600 * HZ, NULL,
LWI_ON_SIGNAL_NOOP, NULL);
int rc = 0;
@ -1571,7 +1571,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
struct l_wait_info lwi;
int rc = -EDQUOT;
lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(AT_OFF ? obd_timeout : at_max),
lwi = LWI_TIMEOUT_INTR((AT_OFF ? obd_timeout : at_max) * HZ,
NULL, LWI_ON_SIGNAL_NOOP, NULL);
OSC_DUMP_GRANT(D_CACHE, cli, "need:%d\n", bytes);

View File

@ -328,7 +328,7 @@ int osc_object_is_contended(struct osc_object *obj)
* ll_file_is_contended.
*/
retry_time = cfs_time_add(obj->oo_contention_time,
cfs_time_seconds(osc_contention_time));
osc_contention_time * HZ);
if (cfs_time_after(cur_time, retry_time)) {
osc_object_clear_contended(obj);
return 0;

View File

@ -766,7 +766,7 @@ int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
* fail_loc
*/
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(2));
schedule_timeout(2 * HZ);
set_current_state(TASK_RUNNING);
}
}
@ -2284,7 +2284,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
* We still want to block for a limited time,
* so we allow interrupts during the timeout.
*/
lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1),
lwi = LWI_TIMEOUT_INTR_ALL(HZ,
ptlrpc_expired_set,
ptlrpc_interrupted_set, set);
else
@ -2293,7 +2293,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
* interrupts are allowed. Wait until all
* complete, or an in-flight req times out.
*/
lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
lwi = LWI_TIMEOUT((timeout ? timeout : 1) * HZ,
ptlrpc_expired_set, set);
rc = l_wait_event(set->set_waitq, ptlrpc_check_set(NULL, set), &lwi);
@ -2538,8 +2538,8 @@ static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
* Network access will complete in finite time but the HUGE
* timeout lets us CWARN for visibility of sluggish NALs
*/
lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
cfs_time_seconds(1), NULL, NULL);
lwi = LWI_TIMEOUT_INTERVAL(LONG_UNLINK * HZ,
HZ, NULL, NULL);
rc = l_wait_event(*wq, !ptlrpc_client_recv_or_unlink(request),
&lwi);
if (rc == 0) {

View File

@ -517,7 +517,7 @@ static void ptlrpc_ni_fini(void)
/* Wait for a bit */
init_waitqueue_head(&waitq);
lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
lwi = LWI_TIMEOUT(2 * HZ, NULL, NULL);
l_wait_event(waitq, 0, &lwi);
break;
}

View File

@ -307,9 +307,9 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
* have been locally cancelled by ptlrpc_abort_inflight.
*/
lwi = LWI_TIMEOUT_INTERVAL(
cfs_timeout_cap(cfs_time_seconds(timeout)),
(timeout > 1) ? cfs_time_seconds(1) :
cfs_time_seconds(1) / 2,
cfs_timeout_cap(timeout * HZ),
(timeout > 1) ? HZ :
HZ / 2,
NULL, NULL);
rc = l_wait_event(imp->imp_recovery_waitq,
(atomic_read(&imp->imp_inflight) == 0),
@ -431,7 +431,7 @@ void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
int ptlrpc_reconnect_import(struct obd_import *imp)
{
struct l_wait_info lwi;
int secs = cfs_time_seconds(obd_timeout);
int secs = obd_timeout * HZ;
int rc;
ptlrpc_pinger_force(imp);
@ -1508,14 +1508,13 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
if (AT_OFF) {
if (imp->imp_server_timeout)
timeout = cfs_time_seconds(obd_timeout / 2);
timeout = obd_timeout * HZ / 2;
else
timeout = cfs_time_seconds(obd_timeout);
timeout = obd_timeout * HZ;
} else {
int idx = import_at_get_index(imp,
imp->imp_client->cli_request_portal);
timeout = cfs_time_seconds(
at_get(&imp->imp_at.iat_service_estimate[idx]));
timeout = at_get(&imp->imp_at.iat_service_estimate[idx]) * HZ;
}
lwi = LWI_TIMEOUT_INTR(cfs_timeout_cap(timeout),

View File

@ -270,8 +270,8 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
/* Network access will complete in finite time but the HUGE
* timeout lets us CWARN for visibility of sluggish LNDs
*/
lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
cfs_time_seconds(1), NULL, NULL);
lwi = LWI_TIMEOUT_INTERVAL(LONG_UNLINK * HZ,
HZ, NULL, NULL);
rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
if (rc == 0) {
ptlrpc_rqphase_move(req, req->rq_next_phase);

View File

@ -267,7 +267,7 @@ lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt)
/* If we cannot get anything for some long time, we better
* bail out instead of waiting infinitely
*/
lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL);
lwi = LWI_TIMEOUT(10 * HZ, NULL, NULL);
rc = l_wait_event(svcpt->scp_rep_waitq,
!list_empty(&svcpt->scp_rep_idle), &lwi);
if (rc != 0)

View File

@ -141,7 +141,7 @@ static long pinger_check_timeout(unsigned long time)
}
mutex_unlock(&pinger_mutex);
return cfs_time_sub(cfs_time_add(time, cfs_time_seconds(timeout)),
return cfs_time_sub(cfs_time_add(time, timeout * HZ),
cfs_time_current());
}
@ -247,7 +247,7 @@ static int ptlrpc_pinger_main(void *arg)
if (imp->imp_pingable && imp->imp_next_ping &&
cfs_time_after(imp->imp_next_ping,
cfs_time_add(this_ping,
cfs_time_seconds(PING_INTERVAL))))
PING_INTERVAL * HZ)))
ptlrpc_update_next_ping(imp, 0);
}
mutex_unlock(&pinger_mutex);
@ -264,10 +264,10 @@ static int ptlrpc_pinger_main(void *arg)
CDEBUG(D_INFO, "next wakeup in " CFS_DURATION_T " (%ld)\n",
time_to_next_wake,
cfs_time_add(this_ping,
cfs_time_seconds(PING_INTERVAL)));
PING_INTERVAL * HZ));
if (time_to_next_wake > 0) {
lwi = LWI_TIMEOUT(max_t(long, time_to_next_wake,
cfs_time_seconds(1)),
HZ),
NULL, NULL);
l_wait_event(thread->t_ctl_waitq,
thread_is_stopping(thread) ||

View File

@ -230,7 +230,7 @@ void ptlrpcd_add_req(struct ptlrpc_request *req)
spin_lock(&req->rq_lock);
if (req->rq_invalid_rqset) {
struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
struct l_wait_info lwi = LWI_TIMEOUT(5 * HZ,
back_to_sleep, NULL);
req->rq_invalid_rqset = 0;
@ -438,7 +438,7 @@ static int ptlrpcd(void *arg)
int timeout;
timeout = ptlrpc_set_next_timeout(set);
lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
lwi = LWI_TIMEOUT((timeout ? timeout : 1) * HZ,
ptlrpc_expired_set, set);
lu_context_enter(&env.le_ctx);

View File

@ -347,7 +347,7 @@ int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
if (!async) {
struct l_wait_info lwi;
int secs = cfs_time_seconds(obd_timeout);
int secs = obd_timeout * HZ;
CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
obd2cli_tgt(imp->imp_obd), secs);

View File

@ -2149,7 +2149,7 @@ static int ptlrpc_main(void *arg)
* Wait for a timeout (unless something else
* happens) before I try again
*/
svcpt->scp_rqbd_timeout = cfs_time_seconds(1) / 10;
svcpt->scp_rqbd_timeout = HZ / 10;
CDEBUG(D_RPCTRACE, "Posted buffers: %d\n",
svcpt->scp_nrqbds_posted);
}
@ -2588,7 +2588,7 @@ static void ptlrpc_wait_replies(struct ptlrpc_service_part *svcpt)
{
while (1) {
int rc;
struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(10),
struct l_wait_info lwi = LWI_TIMEOUT(10 * HZ,
NULL, NULL);
rc = l_wait_event(svcpt->scp_waitq,
@ -2660,8 +2660,8 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc)
* of sluggish LNDs
*/
lwi = LWI_TIMEOUT_INTERVAL(
cfs_time_seconds(LONG_UNLINK),
cfs_time_seconds(1), NULL, NULL);
LONG_UNLINK * HZ,
HZ, NULL, NULL);
rc = l_wait_event(svcpt->scp_waitq,
svcpt->scp_nrqbds_posted == 0, &lwi);
if (rc == -ETIMEDOUT) {