block: kill request ->cpu member

This was used for completion placement for the legacy path,
but for mq we have rq->mq_ctx->cpu for that. Add a helper
to get the request CPU assignment, as the mq_ctx type is
private to blk-mq.

Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2018-10-31 17:01:22 -06:00
parent c7bb9ad174
commit 9cf2bab630
9 changed files with 12 additions and 23 deletions

View File

@ -145,7 +145,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->timeout_list); INIT_LIST_HEAD(&rq->timeout_list);
rq->cpu = -1;
rq->q = q; rq->q = q;
rq->__sector = (sector_t) -1; rq->__sector = (sector_t) -1;
INIT_HLIST_NODE(&rq->hash); INIT_HLIST_NODE(&rq->hash);
@ -1770,7 +1769,6 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
*/ */
static void __blk_rq_prep_clone(struct request *dst, struct request *src) static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{ {
dst->cpu = src->cpu;
dst->__sector = blk_rq_pos(src); dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src); dst->__data_len = blk_rq_bytes(src);
if (src->rq_flags & RQF_SPECIAL_PAYLOAD) { if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {

View File

@ -806,8 +806,6 @@ static struct request *attempt_merge(struct request_queue *q,
blk_account_io_merge(next); blk_account_io_merge(next);
req->ioprio = ioprio_best(req->ioprio, next->ioprio); req->ioprio = ioprio_best(req->ioprio, next->ioprio);
if (blk_rq_cpu_valid(next))
req->cpu = next->cpu;
/* /*
* ownership of bio passed from next to req, return 'next' for * ownership of bio passed from next to req, return 'next' for

View File

@ -297,7 +297,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->q = data->q; rq->q = data->q;
rq->mq_ctx = data->ctx; rq->mq_ctx = data->ctx;
rq->rq_flags = rq_flags; rq->rq_flags = rq_flags;
rq->cpu = -1;
rq->cmd_flags = op; rq->cmd_flags = op;
if (data->flags & BLK_MQ_REQ_PREEMPT) if (data->flags & BLK_MQ_REQ_PREEMPT)
rq->rq_flags |= RQF_PREEMPT; rq->rq_flags |= RQF_PREEMPT;
@ -3282,6 +3281,12 @@ static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
return __blk_mq_poll(hctx, rq); return __blk_mq_poll(hctx, rq);
} }
unsigned int blk_mq_rq_cpu(struct request *rq)
{
return rq->mq_ctx->cpu;
}
EXPORT_SYMBOL(blk_mq_rq_cpu);
static int __init blk_mq_init(void) static int __init blk_mq_init(void)
{ {
cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,

View File

@ -98,7 +98,7 @@ static int blk_softirq_cpu_dead(unsigned int cpu)
void __blk_complete_request(struct request *req) void __blk_complete_request(struct request *req)
{ {
struct request_queue *q = req->q; struct request_queue *q = req->q;
int cpu, ccpu = q->mq_ops ? req->mq_ctx->cpu : req->cpu; int cpu, ccpu = req->mq_ctx->cpu;
unsigned long flags; unsigned long flags;
bool shared = false; bool shared = false;

View File

@ -1906,7 +1906,6 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
struct iscsi_task *task; struct iscsi_task *task;
struct scsi_cmnd *sc; struct scsi_cmnd *sc;
int rc = 0; int rc = 0;
int cpu;
spin_lock(&session->back_lock); spin_lock(&session->back_lock);
task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data, task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
@ -1917,14 +1916,9 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
} }
sc = task->sc; sc = task->sc;
if (!blk_rq_cpu_valid(sc->request))
cpu = smp_processor_id();
else
cpu = sc->request->cpu;
spin_unlock(&session->back_lock); spin_unlock(&session->back_lock);
p = &per_cpu(bnx2i_percpu, cpu); p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(sc->request));
spin_lock(&p->p_work_lock); spin_lock(&p->p_work_lock);
if (unlikely(!p->iothread)) { if (unlikely(!p->iothread)) {
rc = -EINVAL; rc = -EINVAL;

View File

@ -1780,16 +1780,10 @@ csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
int nsge = 0; int nsge = 0;
int rv = SCSI_MLQUEUE_HOST_BUSY, nr; int rv = SCSI_MLQUEUE_HOST_BUSY, nr;
int retval; int retval;
int cpu;
struct csio_scsi_qset *sqset; struct csio_scsi_qset *sqset;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
if (!blk_rq_cpu_valid(cmnd->request)) sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(cmnd->request)];
cpu = smp_processor_id();
else
cpu = cmnd->request->cpu;
sqset = &hw->sqset[ln->portid][cpu];
nr = fc_remote_port_chkready(rport); nr = fc_remote_port_chkready(rport);
if (nr) { if (nr) {

View File

@ -1460,7 +1460,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
goto eh_reset_failed; goto eh_reset_failed;
} }
err = 2; err = 2;
if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) if (do_reset(fcport, cmd->device->lun, blk_mq_rq_cpu(cmd->request) + 1)
!= QLA_SUCCESS) { != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800c, ql_log(ql_log_warn, vha, 0x800c,
"do_reset failed for cmd=%p.\n", cmd); "do_reset failed for cmd=%p.\n", cmd);

View File

@ -300,6 +300,8 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q); void blk_mq_quiesce_queue_nowait(struct request_queue *q);
unsigned int blk_mq_rq_cpu(struct request *rq);
/** /**
* blk_mq_mark_complete() - Set request state to complete * blk_mq_mark_complete() - Set request state to complete
* @rq: request to set to complete state * @rq: request to set to complete state

View File

@ -130,7 +130,6 @@ struct request {
struct request_queue *q; struct request_queue *q;
struct blk_mq_ctx *mq_ctx; struct blk_mq_ctx *mq_ctx;
int cpu;
unsigned int cmd_flags; /* op and common flags */ unsigned int cmd_flags; /* op and common flags */
req_flags_t rq_flags; req_flags_t rq_flags;
@ -669,7 +668,6 @@ static inline bool blk_account_rq(struct request *rq)
return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
} }
#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) #define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)