forked from Minki/linux
blktrace: Use the new blk_opf_t type
Improve static type checking by using the new blk_opf_t type for a function argument that represents a combination of a request operation and request flags. Rename that argument from 'op' into 'opf' to make its role more clear. Cc: Christoph Hellwig <hch@lst.de> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Chaitanya Kulkarni <kch@nvidia.com> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20220714180729.1065367-12-bvanassche@acm.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
22c80aac88
commit
919dbca867
@ -7,6 +7,7 @@
|
||||
#include <linux/compat.h>
|
||||
#include <uapi/linux/blktrace_api.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/blk_types.h>
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_IO_TRACE)
|
||||
|
||||
@ -105,7 +106,7 @@ struct compat_blk_user_trace_setup {
|
||||
|
||||
#endif
|
||||
|
||||
void blk_fill_rwbs(char *rwbs, unsigned int op);
|
||||
void blk_fill_rwbs(char *rwbs, blk_opf_t opf);
|
||||
|
||||
static inline sector_t blk_rq_trace_sector(struct request *rq)
|
||||
{
|
||||
|
@ -205,7 +205,7 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
|
||||
#define BLK_TC_PREFLUSH BLK_TC_FLUSH
|
||||
|
||||
/* The ilog2() calls fall out because they're constant */
|
||||
#define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
|
||||
#define MASK_TC_BIT(rw, __name) ((__force u32)(rw & REQ_ ## __name) << \
|
||||
(ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
|
||||
|
||||
/*
|
||||
@ -213,8 +213,8 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
|
||||
* blk_io_trace structure and places it in a per-cpu subbuffer.
|
||||
*/
|
||||
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
|
||||
int op, int op_flags, u32 what, int error, int pdu_len,
|
||||
void *pdu_data, u64 cgid)
|
||||
const blk_opf_t opf, u32 what, int error,
|
||||
int pdu_len, void *pdu_data, u64 cgid)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
struct ring_buffer_event *event = NULL;
|
||||
@ -227,16 +227,17 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
|
||||
int cpu;
|
||||
bool blk_tracer = blk_tracer_enabled;
|
||||
ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
|
||||
const enum req_op op = opf & REQ_OP_MASK;
|
||||
|
||||
if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
|
||||
return;
|
||||
|
||||
what |= ddir_act[op_is_write(op) ? WRITE : READ];
|
||||
what |= MASK_TC_BIT(op_flags, SYNC);
|
||||
what |= MASK_TC_BIT(op_flags, RAHEAD);
|
||||
what |= MASK_TC_BIT(op_flags, META);
|
||||
what |= MASK_TC_BIT(op_flags, PREFLUSH);
|
||||
what |= MASK_TC_BIT(op_flags, FUA);
|
||||
what |= MASK_TC_BIT(opf, SYNC);
|
||||
what |= MASK_TC_BIT(opf, RAHEAD);
|
||||
what |= MASK_TC_BIT(opf, META);
|
||||
what |= MASK_TC_BIT(opf, PREFLUSH);
|
||||
what |= MASK_TC_BIT(opf, FUA);
|
||||
if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
|
||||
what |= BLK_TC_ACT(BLK_TC_DISCARD);
|
||||
if (op == REQ_OP_FLUSH)
|
||||
@ -842,9 +843,8 @@ static void blk_add_trace_rq(struct request *rq, blk_status_t error,
|
||||
else
|
||||
what |= BLK_TC_ACT(BLK_TC_FS);
|
||||
|
||||
__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
|
||||
rq->cmd_flags, what, blk_status_to_errno(error), 0,
|
||||
NULL, cgid);
|
||||
__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, rq->cmd_flags,
|
||||
what, blk_status_to_errno(error), 0, NULL, cgid);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@ -903,7 +903,7 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
|
||||
}
|
||||
|
||||
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
|
||||
bio_op(bio), bio->bi_opf, what, error, 0, NULL,
|
||||
bio->bi_opf, what, error, 0, NULL,
|
||||
blk_trace_bio_get_cgid(q, bio));
|
||||
rcu_read_unlock();
|
||||
}
|
||||
@ -949,7 +949,7 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
|
||||
rcu_read_lock();
|
||||
bt = rcu_dereference(q->blk_trace);
|
||||
if (bt)
|
||||
__blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
|
||||
__blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@ -969,7 +969,7 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
|
||||
else
|
||||
what = BLK_TA_UNPLUG_TIMER;
|
||||
|
||||
__blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
|
||||
__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
@ -985,8 +985,7 @@ static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
|
||||
__be64 rpdu = cpu_to_be64(pdu);
|
||||
|
||||
__blk_add_trace(bt, bio->bi_iter.bi_sector,
|
||||
bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
|
||||
BLK_TA_SPLIT,
|
||||
bio->bi_iter.bi_size, bio->bi_opf, BLK_TA_SPLIT,
|
||||
blk_status_to_errno(bio->bi_status),
|
||||
sizeof(rpdu), &rpdu,
|
||||
blk_trace_bio_get_cgid(q, bio));
|
||||
@ -1022,7 +1021,7 @@ static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
|
||||
r.sector_from = cpu_to_be64(from);
|
||||
|
||||
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
|
||||
bio_op(bio), bio->bi_opf, BLK_TA_REMAP,
|
||||
bio->bi_opf, BLK_TA_REMAP,
|
||||
blk_status_to_errno(bio->bi_status),
|
||||
sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
|
||||
rcu_read_unlock();
|
||||
@ -1058,7 +1057,7 @@ static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
|
||||
r.sector_from = cpu_to_be64(from);
|
||||
|
||||
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
|
||||
req_op(rq), rq->cmd_flags, BLK_TA_REMAP, 0,
|
||||
rq->cmd_flags, BLK_TA_REMAP, 0,
|
||||
sizeof(r), &r, blk_trace_request_get_cgid(rq));
|
||||
rcu_read_unlock();
|
||||
}
|
||||
@ -1084,7 +1083,7 @@ void blk_add_driver_data(struct request *rq, void *data, size_t len)
|
||||
return;
|
||||
}
|
||||
|
||||
__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
|
||||
__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0,
|
||||
BLK_TA_DRV_DATA, 0, len, data,
|
||||
blk_trace_request_get_cgid(rq));
|
||||
rcu_read_unlock();
|
||||
@ -1881,14 +1880,14 @@ out:
|
||||
* caller with resulting string.
|
||||
*
|
||||
**/
|
||||
void blk_fill_rwbs(char *rwbs, unsigned int op)
|
||||
void blk_fill_rwbs(char *rwbs, blk_opf_t opf)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
if (op & REQ_PREFLUSH)
|
||||
if (opf & REQ_PREFLUSH)
|
||||
rwbs[i++] = 'F';
|
||||
|
||||
switch (op & REQ_OP_MASK) {
|
||||
switch (opf & REQ_OP_MASK) {
|
||||
case REQ_OP_WRITE:
|
||||
rwbs[i++] = 'W';
|
||||
break;
|
||||
@ -1909,13 +1908,13 @@ void blk_fill_rwbs(char *rwbs, unsigned int op)
|
||||
rwbs[i++] = 'N';
|
||||
}
|
||||
|
||||
if (op & REQ_FUA)
|
||||
if (opf & REQ_FUA)
|
||||
rwbs[i++] = 'F';
|
||||
if (op & REQ_RAHEAD)
|
||||
if (opf & REQ_RAHEAD)
|
||||
rwbs[i++] = 'A';
|
||||
if (op & REQ_SYNC)
|
||||
if (opf & REQ_SYNC)
|
||||
rwbs[i++] = 'S';
|
||||
if (op & REQ_META)
|
||||
if (opf & REQ_META)
|
||||
rwbs[i++] = 'M';
|
||||
|
||||
rwbs[i] = '\0';
|
||||
|
Loading…
Reference in New Issue
Block a user