mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
block: add blk_time_get_ns() and blk_time_get() helpers
Convert any user of ktime_get_ns() to use blk_time_get_ns(), and ktime_get() to blk_time_get(), so we have a unified API for querying the current time in nanoseconds or as ktime. No functional changes intended, this patch just wraps ktime_get_ns() and ktime_get() with a block helper. Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
c4e47bbb00
commit
08420cf70c
@ -127,7 +127,7 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
|
|||||||
if (!bfqg_stats_waiting(stats))
|
if (!bfqg_stats_waiting(stats))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
now = ktime_get_ns();
|
now = blk_time_get_ns();
|
||||||
if (now > stats->start_group_wait_time)
|
if (now > stats->start_group_wait_time)
|
||||||
bfq_stat_add(&stats->group_wait_time,
|
bfq_stat_add(&stats->group_wait_time,
|
||||||
now - stats->start_group_wait_time);
|
now - stats->start_group_wait_time);
|
||||||
@ -144,7 +144,7 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
|
|||||||
return;
|
return;
|
||||||
if (bfqg == curr_bfqg)
|
if (bfqg == curr_bfqg)
|
||||||
return;
|
return;
|
||||||
stats->start_group_wait_time = ktime_get_ns();
|
stats->start_group_wait_time = blk_time_get_ns();
|
||||||
bfqg_stats_mark_waiting(stats);
|
bfqg_stats_mark_waiting(stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -156,7 +156,7 @@ static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
|
|||||||
if (!bfqg_stats_empty(stats))
|
if (!bfqg_stats_empty(stats))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
now = ktime_get_ns();
|
now = blk_time_get_ns();
|
||||||
if (now > stats->start_empty_time)
|
if (now > stats->start_empty_time)
|
||||||
bfq_stat_add(&stats->empty_time,
|
bfq_stat_add(&stats->empty_time,
|
||||||
now - stats->start_empty_time);
|
now - stats->start_empty_time);
|
||||||
@ -183,7 +183,7 @@ void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
|
|||||||
if (bfqg_stats_empty(stats))
|
if (bfqg_stats_empty(stats))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
stats->start_empty_time = ktime_get_ns();
|
stats->start_empty_time = blk_time_get_ns();
|
||||||
bfqg_stats_mark_empty(stats);
|
bfqg_stats_mark_empty(stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -192,7 +192,7 @@ void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
|
|||||||
struct bfqg_stats *stats = &bfqg->stats;
|
struct bfqg_stats *stats = &bfqg->stats;
|
||||||
|
|
||||||
if (bfqg_stats_idling(stats)) {
|
if (bfqg_stats_idling(stats)) {
|
||||||
u64 now = ktime_get_ns();
|
u64 now = blk_time_get_ns();
|
||||||
|
|
||||||
if (now > stats->start_idle_time)
|
if (now > stats->start_idle_time)
|
||||||
bfq_stat_add(&stats->idle_time,
|
bfq_stat_add(&stats->idle_time,
|
||||||
@ -205,7 +205,7 @@ void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
|
|||||||
{
|
{
|
||||||
struct bfqg_stats *stats = &bfqg->stats;
|
struct bfqg_stats *stats = &bfqg->stats;
|
||||||
|
|
||||||
stats->start_idle_time = ktime_get_ns();
|
stats->start_idle_time = blk_time_get_ns();
|
||||||
bfqg_stats_mark_idling(stats);
|
bfqg_stats_mark_idling(stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -242,7 +242,7 @@ void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
|
|||||||
u64 io_start_time_ns, blk_opf_t opf)
|
u64 io_start_time_ns, blk_opf_t opf)
|
||||||
{
|
{
|
||||||
struct bfqg_stats *stats = &bfqg->stats;
|
struct bfqg_stats *stats = &bfqg->stats;
|
||||||
u64 now = ktime_get_ns();
|
u64 now = blk_time_get_ns();
|
||||||
|
|
||||||
if (now > io_start_time_ns)
|
if (now > io_start_time_ns)
|
||||||
blkg_rwstat_add(&stats->service_time, opf,
|
blkg_rwstat_add(&stats->service_time, opf,
|
||||||
|
@ -1005,7 +1005,7 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
|
|||||||
|
|
||||||
rq = rq_entry_fifo(bfqq->fifo.next);
|
rq = rq_entry_fifo(bfqq->fifo.next);
|
||||||
|
|
||||||
if (rq == last || ktime_get_ns() < rq->fifo_time)
|
if (rq == last || blk_time_get_ns() < rq->fifo_time)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
|
bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
|
||||||
@ -1829,7 +1829,7 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
|
|||||||
* bfq_bfqq_update_budg_for_activation for
|
* bfq_bfqq_update_budg_for_activation for
|
||||||
* details on the usage of the next variable.
|
* details on the usage of the next variable.
|
||||||
*/
|
*/
|
||||||
arrived_in_time = ktime_get_ns() <=
|
arrived_in_time = blk_time_get_ns() <=
|
||||||
bfqq->ttime.last_end_request +
|
bfqq->ttime.last_end_request +
|
||||||
bfqd->bfq_slice_idle * 3;
|
bfqd->bfq_slice_idle * 3;
|
||||||
unsigned int act_idx = bfq_actuator_index(bfqd, rq->bio);
|
unsigned int act_idx = bfq_actuator_index(bfqd, rq->bio);
|
||||||
@ -2208,7 +2208,7 @@ static void bfq_add_request(struct request *rq)
|
|||||||
struct request *next_rq, *prev;
|
struct request *next_rq, *prev;
|
||||||
unsigned int old_wr_coeff = bfqq->wr_coeff;
|
unsigned int old_wr_coeff = bfqq->wr_coeff;
|
||||||
bool interactive = false;
|
bool interactive = false;
|
||||||
u64 now_ns = ktime_get_ns();
|
u64 now_ns = blk_time_get_ns();
|
||||||
|
|
||||||
bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
|
bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
|
||||||
bfqq->queued[rq_is_sync(rq)]++;
|
bfqq->queued[rq_is_sync(rq)]++;
|
||||||
@ -2262,7 +2262,7 @@ static void bfq_add_request(struct request *rq)
|
|||||||
bfqd->rqs_injected && bfqd->tot_rq_in_driver > 0)) &&
|
bfqd->rqs_injected && bfqd->tot_rq_in_driver > 0)) &&
|
||||||
time_is_before_eq_jiffies(bfqq->decrease_time_jif +
|
time_is_before_eq_jiffies(bfqq->decrease_time_jif +
|
||||||
msecs_to_jiffies(10))) {
|
msecs_to_jiffies(10))) {
|
||||||
bfqd->last_empty_occupied_ns = ktime_get_ns();
|
bfqd->last_empty_occupied_ns = blk_time_get_ns();
|
||||||
/*
|
/*
|
||||||
* Start the state machine for measuring the
|
* Start the state machine for measuring the
|
||||||
* total service time of rq: setting
|
* total service time of rq: setting
|
||||||
@ -3294,7 +3294,7 @@ static void bfq_set_budget_timeout(struct bfq_data *bfqd,
|
|||||||
else
|
else
|
||||||
timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
|
timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
|
||||||
|
|
||||||
bfqd->last_budget_start = ktime_get();
|
bfqd->last_budget_start = blk_time_get();
|
||||||
|
|
||||||
bfqq->budget_timeout = jiffies +
|
bfqq->budget_timeout = jiffies +
|
||||||
bfqd->bfq_timeout * timeout_coeff;
|
bfqd->bfq_timeout * timeout_coeff;
|
||||||
@ -3394,7 +3394,7 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd)
|
|||||||
else if (bfqq->wr_coeff > 1)
|
else if (bfqq->wr_coeff > 1)
|
||||||
sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
|
sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
|
||||||
|
|
||||||
bfqd->last_idling_start = ktime_get();
|
bfqd->last_idling_start = blk_time_get();
|
||||||
bfqd->last_idling_start_jiffies = jiffies;
|
bfqd->last_idling_start_jiffies = jiffies;
|
||||||
|
|
||||||
hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
|
hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
|
||||||
@ -3433,7 +3433,7 @@ static void bfq_reset_rate_computation(struct bfq_data *bfqd,
|
|||||||
struct request *rq)
|
struct request *rq)
|
||||||
{
|
{
|
||||||
if (rq != NULL) { /* new rq dispatch now, reset accordingly */
|
if (rq != NULL) { /* new rq dispatch now, reset accordingly */
|
||||||
bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
|
bfqd->last_dispatch = bfqd->first_dispatch = blk_time_get_ns();
|
||||||
bfqd->peak_rate_samples = 1;
|
bfqd->peak_rate_samples = 1;
|
||||||
bfqd->sequential_samples = 0;
|
bfqd->sequential_samples = 0;
|
||||||
bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
|
bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
|
||||||
@ -3590,7 +3590,7 @@ reset_computation:
|
|||||||
*/
|
*/
|
||||||
static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
|
static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
|
||||||
{
|
{
|
||||||
u64 now_ns = ktime_get_ns();
|
u64 now_ns = blk_time_get_ns();
|
||||||
|
|
||||||
if (bfqd->peak_rate_samples == 0) { /* first dispatch */
|
if (bfqd->peak_rate_samples == 0) { /* first dispatch */
|
||||||
bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
|
bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
|
||||||
@ -4162,7 +4162,7 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
|||||||
if (compensate)
|
if (compensate)
|
||||||
delta_ktime = bfqd->last_idling_start;
|
delta_ktime = bfqd->last_idling_start;
|
||||||
else
|
else
|
||||||
delta_ktime = ktime_get();
|
delta_ktime = blk_time_get();
|
||||||
delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
|
delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
|
||||||
delta_usecs = ktime_to_us(delta_ktime);
|
delta_usecs = ktime_to_us(delta_ktime);
|
||||||
|
|
||||||
@ -5591,7 +5591,7 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
|||||||
struct bfq_io_cq *bic, pid_t pid, int is_sync,
|
struct bfq_io_cq *bic, pid_t pid, int is_sync,
|
||||||
unsigned int act_idx)
|
unsigned int act_idx)
|
||||||
{
|
{
|
||||||
u64 now_ns = ktime_get_ns();
|
u64 now_ns = blk_time_get_ns();
|
||||||
|
|
||||||
bfqq->actuator_idx = act_idx;
|
bfqq->actuator_idx = act_idx;
|
||||||
RB_CLEAR_NODE(&bfqq->entity.rb_node);
|
RB_CLEAR_NODE(&bfqq->entity.rb_node);
|
||||||
@ -5903,7 +5903,7 @@ static void bfq_update_io_thinktime(struct bfq_data *bfqd,
|
|||||||
*/
|
*/
|
||||||
if (bfqq->dispatched || bfq_bfqq_busy(bfqq))
|
if (bfqq->dispatched || bfq_bfqq_busy(bfqq))
|
||||||
return;
|
return;
|
||||||
elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
|
elapsed = blk_time_get_ns() - bfqq->ttime.last_end_request;
|
||||||
elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
|
elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
|
||||||
|
|
||||||
ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
|
ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
|
||||||
@ -6194,7 +6194,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
|
|||||||
bfq_add_request(rq);
|
bfq_add_request(rq);
|
||||||
idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
|
idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
|
||||||
|
|
||||||
rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
|
rq->fifo_time = blk_time_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
|
||||||
list_add_tail(&rq->queuelist, &bfqq->fifo);
|
list_add_tail(&rq->queuelist, &bfqq->fifo);
|
||||||
|
|
||||||
bfq_rq_enqueued(bfqd, bfqq, rq);
|
bfq_rq_enqueued(bfqd, bfqq, rq);
|
||||||
@ -6370,7 +6370,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
|
|||||||
bfq_weights_tree_remove(bfqq);
|
bfq_weights_tree_remove(bfqq);
|
||||||
}
|
}
|
||||||
|
|
||||||
now_ns = ktime_get_ns();
|
now_ns = blk_time_get_ns();
|
||||||
|
|
||||||
bfqq->ttime.last_end_request = now_ns;
|
bfqq->ttime.last_end_request = now_ns;
|
||||||
|
|
||||||
@ -6585,7 +6585,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
|
|||||||
static void bfq_update_inject_limit(struct bfq_data *bfqd,
|
static void bfq_update_inject_limit(struct bfq_data *bfqd,
|
||||||
struct bfq_queue *bfqq)
|
struct bfq_queue *bfqq)
|
||||||
{
|
{
|
||||||
u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns;
|
u64 tot_time_ns = blk_time_get_ns() - bfqd->last_empty_occupied_ns;
|
||||||
unsigned int old_limit = bfqq->inject_limit;
|
unsigned int old_limit = bfqq->inject_limit;
|
||||||
|
|
||||||
if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) {
|
if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) {
|
||||||
|
@ -1846,7 +1846,7 @@ static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
|
|||||||
{
|
{
|
||||||
unsigned long pflags;
|
unsigned long pflags;
|
||||||
bool clamp;
|
bool clamp;
|
||||||
u64 now = ktime_to_ns(ktime_get());
|
u64 now = blk_time_get_ns();
|
||||||
u64 exp;
|
u64 exp;
|
||||||
u64 delay_nsec = 0;
|
u64 delay_nsec = 0;
|
||||||
int tok;
|
int tok;
|
||||||
|
@ -143,7 +143,7 @@ static void blk_account_io_flush(struct request *rq)
|
|||||||
part_stat_lock();
|
part_stat_lock();
|
||||||
part_stat_inc(part, ios[STAT_FLUSH]);
|
part_stat_inc(part, ios[STAT_FLUSH]);
|
||||||
part_stat_add(part, nsecs[STAT_FLUSH],
|
part_stat_add(part, nsecs[STAT_FLUSH],
|
||||||
ktime_get_ns() - rq->start_time_ns);
|
blk_time_get_ns() - rq->start_time_ns);
|
||||||
part_stat_unlock();
|
part_stat_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -829,7 +829,7 @@ static int ioc_autop_idx(struct ioc *ioc, struct gendisk *disk)
|
|||||||
|
|
||||||
/* step up/down based on the vrate */
|
/* step up/down based on the vrate */
|
||||||
vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
|
vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
|
||||||
now_ns = ktime_get_ns();
|
now_ns = blk_time_get_ns();
|
||||||
|
|
||||||
if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
|
if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
|
||||||
if (!ioc->autop_too_fast_at)
|
if (!ioc->autop_too_fast_at)
|
||||||
@ -1044,7 +1044,7 @@ static void ioc_now(struct ioc *ioc, struct ioc_now *now)
|
|||||||
unsigned seq;
|
unsigned seq;
|
||||||
u64 vrate;
|
u64 vrate;
|
||||||
|
|
||||||
now->now_ns = ktime_get();
|
now->now_ns = blk_time_get_ns();
|
||||||
now->now = ktime_to_us(now->now_ns);
|
now->now = ktime_to_us(now->now_ns);
|
||||||
vrate = atomic64_read(&ioc->vtime_rate);
|
vrate = atomic64_read(&ioc->vtime_rate);
|
||||||
|
|
||||||
@ -2810,7 +2810,7 @@ static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
|
on_q_ns = blk_time_get_ns() - rq->alloc_time_ns;
|
||||||
rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
|
rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
|
||||||
size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
|
size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
|
||||||
|
|
||||||
@ -2893,7 +2893,7 @@ static int blk_iocost_init(struct gendisk *disk)
|
|||||||
ioc->vtime_base_rate = VTIME_PER_USEC;
|
ioc->vtime_base_rate = VTIME_PER_USEC;
|
||||||
atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
|
atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
|
||||||
seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
|
seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
|
||||||
ioc->period_at = ktime_to_us(ktime_get());
|
ioc->period_at = ktime_to_us(blk_time_get());
|
||||||
atomic64_set(&ioc->cur_period, 0);
|
atomic64_set(&ioc->cur_period, 0);
|
||||||
atomic_set(&ioc->hweight_gen, 0);
|
atomic_set(&ioc->hweight_gen, 0);
|
||||||
|
|
||||||
|
@ -609,7 +609,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
|
|||||||
if (!iolat->blkiolat->enabled)
|
if (!iolat->blkiolat->enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
now = ktime_to_ns(ktime_get());
|
now = blk_time_get_ns();
|
||||||
while (blkg && blkg->parent) {
|
while (blkg && blkg->parent) {
|
||||||
iolat = blkg_to_lat(blkg);
|
iolat = blkg_to_lat(blkg);
|
||||||
if (!iolat) {
|
if (!iolat) {
|
||||||
@ -661,7 +661,7 @@ static void blkiolatency_timer_fn(struct timer_list *t)
|
|||||||
struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
|
struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
|
||||||
struct blkcg_gq *blkg;
|
struct blkcg_gq *blkg;
|
||||||
struct cgroup_subsys_state *pos_css;
|
struct cgroup_subsys_state *pos_css;
|
||||||
u64 now = ktime_to_ns(ktime_get());
|
u64 now = blk_time_get_ns();
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
blkg_for_each_descendant_pre(blkg, pos_css,
|
blkg_for_each_descendant_pre(blkg, pos_css,
|
||||||
@ -985,7 +985,7 @@ static void iolatency_pd_init(struct blkg_policy_data *pd)
|
|||||||
struct blkcg_gq *blkg = lat_to_blkg(iolat);
|
struct blkcg_gq *blkg = lat_to_blkg(iolat);
|
||||||
struct rq_qos *rqos = iolat_rq_qos(blkg->q);
|
struct rq_qos *rqos = iolat_rq_qos(blkg->q);
|
||||||
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
|
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
|
||||||
u64 now = ktime_to_ns(ktime_get());
|
u64 now = blk_time_get_ns();
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
if (blk_queue_nonrot(blkg->q))
|
if (blk_queue_nonrot(blkg->q))
|
||||||
|
@ -322,7 +322,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
|
|||||||
RB_CLEAR_NODE(&rq->rb_node);
|
RB_CLEAR_NODE(&rq->rb_node);
|
||||||
rq->tag = BLK_MQ_NO_TAG;
|
rq->tag = BLK_MQ_NO_TAG;
|
||||||
rq->internal_tag = BLK_MQ_NO_TAG;
|
rq->internal_tag = BLK_MQ_NO_TAG;
|
||||||
rq->start_time_ns = ktime_get_ns();
|
rq->start_time_ns = blk_time_get_ns();
|
||||||
rq->part = NULL;
|
rq->part = NULL;
|
||||||
blk_crypto_rq_set_defaults(rq);
|
blk_crypto_rq_set_defaults(rq);
|
||||||
}
|
}
|
||||||
@ -332,7 +332,7 @@ EXPORT_SYMBOL(blk_rq_init);
|
|||||||
static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
|
static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
|
||||||
{
|
{
|
||||||
if (blk_mq_need_time_stamp(rq))
|
if (blk_mq_need_time_stamp(rq))
|
||||||
rq->start_time_ns = ktime_get_ns();
|
rq->start_time_ns = blk_time_get_ns();
|
||||||
else
|
else
|
||||||
rq->start_time_ns = 0;
|
rq->start_time_ns = 0;
|
||||||
|
|
||||||
@ -443,7 +443,7 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
|
|||||||
|
|
||||||
/* alloc_time includes depth and tag waits */
|
/* alloc_time includes depth and tag waits */
|
||||||
if (blk_queue_rq_alloc_time(q))
|
if (blk_queue_rq_alloc_time(q))
|
||||||
alloc_time_ns = ktime_get_ns();
|
alloc_time_ns = blk_time_get_ns();
|
||||||
|
|
||||||
if (data->cmd_flags & REQ_NOWAIT)
|
if (data->cmd_flags & REQ_NOWAIT)
|
||||||
data->flags |= BLK_MQ_REQ_NOWAIT;
|
data->flags |= BLK_MQ_REQ_NOWAIT;
|
||||||
@ -628,7 +628,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
|||||||
|
|
||||||
/* alloc_time includes depth and tag waits */
|
/* alloc_time includes depth and tag waits */
|
||||||
if (blk_queue_rq_alloc_time(q))
|
if (blk_queue_rq_alloc_time(q))
|
||||||
alloc_time_ns = ktime_get_ns();
|
alloc_time_ns = blk_time_get_ns();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the tag allocator sleeps we could get an allocation for a
|
* If the tag allocator sleeps we could get an allocation for a
|
||||||
@ -1041,7 +1041,7 @@ static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
|
|||||||
inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
|
inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
|
||||||
{
|
{
|
||||||
if (blk_mq_need_time_stamp(rq))
|
if (blk_mq_need_time_stamp(rq))
|
||||||
__blk_mq_end_request_acct(rq, ktime_get_ns());
|
__blk_mq_end_request_acct(rq, blk_time_get_ns());
|
||||||
|
|
||||||
blk_mq_finish_request(rq);
|
blk_mq_finish_request(rq);
|
||||||
|
|
||||||
@ -1084,7 +1084,7 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
|
|||||||
u64 now = 0;
|
u64 now = 0;
|
||||||
|
|
||||||
if (iob->need_ts)
|
if (iob->need_ts)
|
||||||
now = ktime_get_ns();
|
now = blk_time_get_ns();
|
||||||
|
|
||||||
while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
|
while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
|
||||||
prefetch(rq->bio);
|
prefetch(rq->bio);
|
||||||
@ -1254,7 +1254,7 @@ void blk_mq_start_request(struct request *rq)
|
|||||||
|
|
||||||
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags) &&
|
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags) &&
|
||||||
!blk_rq_is_passthrough(rq)) {
|
!blk_rq_is_passthrough(rq)) {
|
||||||
rq->io_start_time_ns = ktime_get_ns();
|
rq->io_start_time_ns = blk_time_get_ns();
|
||||||
rq->stats_sectors = blk_rq_sectors(rq);
|
rq->stats_sectors = blk_rq_sectors(rq);
|
||||||
rq->rq_flags |= RQF_STATS;
|
rq->rq_flags |= RQF_STATS;
|
||||||
rq_qos_issue(q, rq);
|
rq_qos_issue(q, rq);
|
||||||
@ -3104,7 +3104,7 @@ blk_status_t blk_insert_cloned_request(struct request *rq)
|
|||||||
blk_mq_run_dispatch_ops(q,
|
blk_mq_run_dispatch_ops(q,
|
||||||
ret = blk_mq_request_issue_directly(rq, true));
|
ret = blk_mq_request_issue_directly(rq, true));
|
||||||
if (ret)
|
if (ret)
|
||||||
blk_account_io_done(rq, ktime_get_ns());
|
blk_account_io_done(rq, blk_time_get_ns());
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
|
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
|
||||||
|
@ -1815,7 +1815,7 @@ static bool throtl_tg_is_idle(struct throtl_grp *tg)
|
|||||||
time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
|
time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
|
||||||
ret = tg->latency_target == DFL_LATENCY_TARGET ||
|
ret = tg->latency_target == DFL_LATENCY_TARGET ||
|
||||||
tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
|
tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
|
||||||
(ktime_get_ns() >> 10) - tg->last_finish_time > time ||
|
(blk_time_get_ns() >> 10) - tg->last_finish_time > time ||
|
||||||
tg->avg_idletime > tg->idletime_threshold ||
|
tg->avg_idletime > tg->idletime_threshold ||
|
||||||
(tg->latency_target && tg->bio_cnt &&
|
(tg->latency_target && tg->bio_cnt &&
|
||||||
tg->bad_bio_cnt * 5 < tg->bio_cnt);
|
tg->bad_bio_cnt * 5 < tg->bio_cnt);
|
||||||
@ -2060,7 +2060,7 @@ static void blk_throtl_update_idletime(struct throtl_grp *tg)
|
|||||||
if (last_finish_time == 0)
|
if (last_finish_time == 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
now = ktime_get_ns() >> 10;
|
now = blk_time_get_ns() >> 10;
|
||||||
if (now <= last_finish_time ||
|
if (now <= last_finish_time ||
|
||||||
last_finish_time == tg->checked_last_finish_time)
|
last_finish_time == tg->checked_last_finish_time)
|
||||||
return;
|
return;
|
||||||
@ -2327,7 +2327,7 @@ void blk_throtl_bio_endio(struct bio *bio)
|
|||||||
if (!tg->td->limit_valid[LIMIT_LOW])
|
if (!tg->td->limit_valid[LIMIT_LOW])
|
||||||
return;
|
return;
|
||||||
|
|
||||||
finish_time_ns = ktime_get_ns();
|
finish_time_ns = blk_time_get_ns();
|
||||||
tg->last_finish_time = finish_time_ns >> 10;
|
tg->last_finish_time = finish_time_ns >> 10;
|
||||||
|
|
||||||
start_time = bio_issue_time(&bio->bi_issue) >> 10;
|
start_time = bio_issue_time(&bio->bi_issue) >> 10;
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include "blk-wbt.h"
|
#include "blk-wbt.h"
|
||||||
#include "blk-rq-qos.h"
|
#include "blk-rq-qos.h"
|
||||||
#include "elevator.h"
|
#include "elevator.h"
|
||||||
|
#include "blk.h"
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include <trace/events/wbt.h>
|
#include <trace/events/wbt.h>
|
||||||
@ -274,13 +275,12 @@ static inline bool stat_sample_valid(struct blk_rq_stat *stat)
|
|||||||
|
|
||||||
static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
|
static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
|
||||||
{
|
{
|
||||||
u64 now, issue = READ_ONCE(rwb->sync_issue);
|
u64 issue = READ_ONCE(rwb->sync_issue);
|
||||||
|
|
||||||
if (!issue || !rwb->sync_cookie)
|
if (!issue || !rwb->sync_cookie)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
now = ktime_to_ns(ktime_get());
|
return blk_time_get_ns() - issue;
|
||||||
return now - issue;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int wbt_inflight(struct rq_wb *rwb)
|
static inline unsigned int wbt_inflight(struct rq_wb *rwb)
|
||||||
|
13
block/blk.h
13
block/blk.h
@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
#include <linux/blk-crypto.h>
|
#include <linux/blk-crypto.h>
|
||||||
#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
|
#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
|
||||||
|
#include <linux/timekeeping.h>
|
||||||
#include <xen/xen.h>
|
#include <xen/xen.h>
|
||||||
#include "blk-crypto-internal.h"
|
#include "blk-crypto-internal.h"
|
||||||
|
|
||||||
@ -516,6 +517,16 @@ static inline int req_ref_read(struct request *req)
|
|||||||
return atomic_read(&req->ref);
|
return atomic_read(&req->ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u64 blk_time_get_ns(void)
|
||||||
|
{
|
||||||
|
return ktime_get_ns();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline ktime_t blk_time_get(void)
|
||||||
|
{
|
||||||
|
return ns_to_ktime(blk_time_get_ns());
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* From most significant bit:
|
* From most significant bit:
|
||||||
* 1 bit: reserved for other usage, see below
|
* 1 bit: reserved for other usage, see below
|
||||||
@ -554,7 +565,7 @@ static inline void bio_issue_init(struct bio_issue *issue,
|
|||||||
{
|
{
|
||||||
size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
|
size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
|
||||||
issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
|
issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
|
||||||
(ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
|
(blk_time_get_ns() & BIO_ISSUE_TIME_MASK) |
|
||||||
((u64)size << BIO_ISSUE_SIZE_SHIFT));
|
((u64)size << BIO_ISSUE_SIZE_SHIFT));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user