mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 22:21:42 +00:00
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A set of fixes for this series. This contains: - Set of fixes for the nvme target code - A revert of patch from this merge window, causing a regression with WRITE_SAME on iSCSI targets at least. - A fix for a use-after-free in the new O_DIRECT bdev code. - Two fixes for the xen-blkfront driver" * 'for-linus' of git://git.kernel.dk/linux-block: Revert "sd: remove __data_len hack for WRITE SAME" nvme-fc: use blk_rq_nr_phys_segments nvmet-rdma: Fix missing dma sync to nvme data structures nvmet: Call fatal_error from keep-alive timout expiration nvmet: cancel fatal error and flush async work before free controller nvmet: delete controllers deletion upon subsystem release nvmet_fc: correct logic in disconnect queue LS handling block: fix use after free in __blkdev_direct_IO xen-blkfront: correct maximum segment accounting xen-blkfront: feature flags handling adjustments
This commit is contained in:
commit
2fb78e8940
@ -197,13 +197,13 @@ struct blkfront_info
|
|||||||
/* Number of pages per ring buffer. */
|
/* Number of pages per ring buffer. */
|
||||||
unsigned int nr_ring_pages;
|
unsigned int nr_ring_pages;
|
||||||
struct request_queue *rq;
|
struct request_queue *rq;
|
||||||
unsigned int feature_flush;
|
unsigned int feature_flush:1;
|
||||||
unsigned int feature_fua;
|
unsigned int feature_fua:1;
|
||||||
unsigned int feature_discard:1;
|
unsigned int feature_discard:1;
|
||||||
unsigned int feature_secdiscard:1;
|
unsigned int feature_secdiscard:1;
|
||||||
|
unsigned int feature_persistent:1;
|
||||||
unsigned int discard_granularity;
|
unsigned int discard_granularity;
|
||||||
unsigned int discard_alignment;
|
unsigned int discard_alignment;
|
||||||
unsigned int feature_persistent:1;
|
|
||||||
/* Number of 4KB segments handled */
|
/* Number of 4KB segments handled */
|
||||||
unsigned int max_indirect_segments;
|
unsigned int max_indirect_segments;
|
||||||
int is_ready;
|
int is_ready;
|
||||||
@ -2223,7 +2223,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
grants = info->max_indirect_segments;
|
grants = info->max_indirect_segments;
|
||||||
psegs = grants / GRANTS_PER_PSEG;
|
psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
|
||||||
|
|
||||||
err = fill_grant_buffer(rinfo,
|
err = fill_grant_buffer(rinfo,
|
||||||
(grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
|
(grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
|
||||||
@ -2323,13 +2323,16 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
|
|||||||
blkfront_setup_discard(info);
|
blkfront_setup_discard(info);
|
||||||
|
|
||||||
info->feature_persistent =
|
info->feature_persistent =
|
||||||
xenbus_read_unsigned(info->xbdev->otherend,
|
!!xenbus_read_unsigned(info->xbdev->otherend,
|
||||||
"feature-persistent", 0);
|
"feature-persistent", 0);
|
||||||
|
|
||||||
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
|
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
|
||||||
"feature-max-indirect-segments", 0);
|
"feature-max-indirect-segments", 0);
|
||||||
info->max_indirect_segments = min(indirect_segments,
|
if (indirect_segments > xen_blkif_max_segments)
|
||||||
xen_blkif_max_segments);
|
indirect_segments = xen_blkif_max_segments;
|
||||||
|
if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
|
||||||
|
indirect_segments = 0;
|
||||||
|
info->max_indirect_segments = indirect_segments;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2652,6 +2655,9 @@ static int __init xlblk_init(void)
|
|||||||
if (!xen_domain())
|
if (!xen_domain())
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
|
||||||
|
xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
|
||||||
|
|
||||||
if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
|
if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
|
||||||
pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
|
pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
|
||||||
xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
|
xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
|
||||||
|
@ -1663,13 +1663,13 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
freq->sg_table.sgl = freq->first_sgl;
|
freq->sg_table.sgl = freq->first_sgl;
|
||||||
ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments,
|
ret = sg_alloc_table_chained(&freq->sg_table,
|
||||||
freq->sg_table.sgl);
|
blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
|
||||||
if (ret)
|
if (ret)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
|
op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
|
||||||
WARN_ON(op->nents > rq->nr_phys_segments);
|
WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
|
||||||
dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
||||||
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
|
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
|
||||||
op->nents, dir);
|
op->nents, dir);
|
||||||
|
@ -631,6 +631,7 @@ static void nvmet_subsys_release(struct config_item *item)
|
|||||||
{
|
{
|
||||||
struct nvmet_subsys *subsys = to_subsys(item);
|
struct nvmet_subsys *subsys = to_subsys(item);
|
||||||
|
|
||||||
|
nvmet_subsys_del_ctrls(subsys);
|
||||||
nvmet_subsys_put(subsys);
|
nvmet_subsys_put(subsys);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -200,7 +200,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
|
|||||||
pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
|
pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
|
||||||
ctrl->cntlid, ctrl->kato);
|
ctrl->cntlid, ctrl->kato);
|
||||||
|
|
||||||
ctrl->ops->delete_ctrl(ctrl);
|
nvmet_ctrl_fatal_error(ctrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
|
static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
|
||||||
@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref)
|
|||||||
list_del(&ctrl->subsys_entry);
|
list_del(&ctrl->subsys_entry);
|
||||||
mutex_unlock(&subsys->lock);
|
mutex_unlock(&subsys->lock);
|
||||||
|
|
||||||
|
flush_work(&ctrl->async_event_work);
|
||||||
|
cancel_work_sync(&ctrl->fatal_err_work);
|
||||||
|
|
||||||
ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
|
ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
|
||||||
nvmet_subsys_put(subsys);
|
nvmet_subsys_put(subsys);
|
||||||
|
|
||||||
@ -935,6 +938,16 @@ static void nvmet_subsys_free(struct kref *ref)
|
|||||||
kfree(subsys);
|
kfree(subsys);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
|
||||||
|
{
|
||||||
|
struct nvmet_ctrl *ctrl;
|
||||||
|
|
||||||
|
mutex_lock(&subsys->lock);
|
||||||
|
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
|
||||||
|
ctrl->ops->delete_ctrl(ctrl);
|
||||||
|
mutex_unlock(&subsys->lock);
|
||||||
|
}
|
||||||
|
|
||||||
void nvmet_subsys_put(struct nvmet_subsys *subsys)
|
void nvmet_subsys_put(struct nvmet_subsys *subsys)
|
||||||
{
|
{
|
||||||
kref_put(&subsys->ref, nvmet_subsys_free);
|
kref_put(&subsys->ref, nvmet_subsys_free);
|
||||||
|
@ -1314,7 +1314,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
|
|||||||
(struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
|
(struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
|
||||||
struct fcnvme_ls_disconnect_acc *acc =
|
struct fcnvme_ls_disconnect_acc *acc =
|
||||||
(struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
|
(struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
|
||||||
struct nvmet_fc_tgt_queue *queue;
|
struct nvmet_fc_tgt_queue *queue = NULL;
|
||||||
struct nvmet_fc_tgt_assoc *assoc;
|
struct nvmet_fc_tgt_assoc *assoc;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
bool del_assoc = false;
|
bool del_assoc = false;
|
||||||
@ -1348,7 +1348,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
|
|||||||
assoc = nvmet_fc_find_target_assoc(tgtport,
|
assoc = nvmet_fc_find_target_assoc(tgtport,
|
||||||
be64_to_cpu(rqst->associd.association_id));
|
be64_to_cpu(rqst->associd.association_id));
|
||||||
iod->assoc = assoc;
|
iod->assoc = assoc;
|
||||||
if (!assoc)
|
if (assoc) {
|
||||||
|
if (rqst->discon_cmd.scope ==
|
||||||
|
FCNVME_DISCONN_CONNECTION) {
|
||||||
|
queue = nvmet_fc_find_target_queue(tgtport,
|
||||||
|
be64_to_cpu(
|
||||||
|
rqst->discon_cmd.id));
|
||||||
|
if (!queue) {
|
||||||
|
nvmet_fc_tgt_a_put(assoc);
|
||||||
|
ret = VERR_NO_CONN;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else
|
||||||
ret = VERR_NO_ASSOC;
|
ret = VERR_NO_ASSOC;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1373,21 +1384,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
|
|||||||
FCNVME_LS_DISCONNECT);
|
FCNVME_LS_DISCONNECT);
|
||||||
|
|
||||||
|
|
||||||
if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) {
|
/* are we to delete a Connection ID (queue) */
|
||||||
queue = nvmet_fc_find_target_queue(tgtport,
|
if (queue) {
|
||||||
be64_to_cpu(rqst->discon_cmd.id));
|
int qid = queue->qid;
|
||||||
if (queue) {
|
|
||||||
int qid = queue->qid;
|
|
||||||
|
|
||||||
nvmet_fc_delete_target_queue(queue);
|
nvmet_fc_delete_target_queue(queue);
|
||||||
|
|
||||||
/* release the get taken by find_target_queue */
|
/* release the get taken by find_target_queue */
|
||||||
nvmet_fc_tgt_q_put(queue);
|
nvmet_fc_tgt_q_put(queue);
|
||||||
|
|
||||||
/* tear association down if io queue terminated */
|
/* tear association down if io queue terminated */
|
||||||
if (!qid)
|
if (!qid)
|
||||||
del_assoc = true;
|
del_assoc = true;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* release get taken in nvmet_fc_find_target_assoc */
|
/* release get taken in nvmet_fc_find_target_assoc */
|
||||||
|
@ -282,6 +282,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
|
|||||||
struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
|
struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
|
||||||
enum nvme_subsys_type type);
|
enum nvme_subsys_type type);
|
||||||
void nvmet_subsys_put(struct nvmet_subsys *subsys);
|
void nvmet_subsys_put(struct nvmet_subsys *subsys);
|
||||||
|
void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
|
||||||
|
|
||||||
struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
|
struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
|
||||||
void nvmet_put_namespace(struct nvmet_ns *ns);
|
void nvmet_put_namespace(struct nvmet_ns *ns);
|
||||||
|
@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
|
|||||||
{
|
{
|
||||||
struct ib_recv_wr *bad_wr;
|
struct ib_recv_wr *bad_wr;
|
||||||
|
|
||||||
|
ib_dma_sync_single_for_device(ndev->device,
|
||||||
|
cmd->sge[0].addr, cmd->sge[0].length,
|
||||||
|
DMA_FROM_DEVICE);
|
||||||
|
|
||||||
if (ndev->srq)
|
if (ndev->srq)
|
||||||
return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
|
return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
|
||||||
return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
|
return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
|
||||||
@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
|
|||||||
first_wr = &rsp->send_wr;
|
first_wr = &rsp->send_wr;
|
||||||
|
|
||||||
nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
|
nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
|
||||||
|
|
||||||
|
ib_dma_sync_single_for_device(rsp->queue->dev->device,
|
||||||
|
rsp->send_sge.addr, rsp->send_sge.length,
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
|
if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
|
||||||
pr_err("sending cmd response failed\n");
|
pr_err("sending cmd response failed\n");
|
||||||
nvmet_rdma_release_rsp(rsp);
|
nvmet_rdma_release_rsp(rsp);
|
||||||
@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
|
|||||||
cmd->n_rdma = 0;
|
cmd->n_rdma = 0;
|
||||||
cmd->req.port = queue->port;
|
cmd->req.port = queue->port;
|
||||||
|
|
||||||
|
|
||||||
|
ib_dma_sync_single_for_cpu(queue->dev->device,
|
||||||
|
cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
|
||||||
|
DMA_FROM_DEVICE);
|
||||||
|
ib_dma_sync_single_for_cpu(queue->dev->device,
|
||||||
|
cmd->send_sge.addr, cmd->send_sge.length,
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
|
if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
|
||||||
&queue->nvme_sq, &nvmet_rdma_ops))
|
&queue->nvme_sq, &nvmet_rdma_ops))
|
||||||
return;
|
return;
|
||||||
|
@ -836,6 +836,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
|
|||||||
struct bio *bio = rq->bio;
|
struct bio *bio = rq->bio;
|
||||||
sector_t sector = blk_rq_pos(rq);
|
sector_t sector = blk_rq_pos(rq);
|
||||||
unsigned int nr_sectors = blk_rq_sectors(rq);
|
unsigned int nr_sectors = blk_rq_sectors(rq);
|
||||||
|
unsigned int nr_bytes = blk_rq_bytes(rq);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (sdkp->device->no_write_same)
|
if (sdkp->device->no_write_same)
|
||||||
@ -868,7 +869,21 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
|
|||||||
|
|
||||||
cmd->transfersize = sdp->sector_size;
|
cmd->transfersize = sdp->sector_size;
|
||||||
cmd->allowed = SD_MAX_RETRIES;
|
cmd->allowed = SD_MAX_RETRIES;
|
||||||
return scsi_init_io(cmd);
|
|
||||||
|
/*
|
||||||
|
* For WRITE SAME the data transferred via the DATA OUT buffer is
|
||||||
|
* different from the amount of data actually written to the target.
|
||||||
|
*
|
||||||
|
* We set up __data_len to the amount of data transferred via the
|
||||||
|
* DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
|
||||||
|
* to transfer a single sector of data first, but then reset it to
|
||||||
|
* the amount of data to be written right after so that the I/O path
|
||||||
|
* knows how much to actually write.
|
||||||
|
*/
|
||||||
|
rq->__data_len = sdp->sector_size;
|
||||||
|
ret = scsi_init_io(cmd);
|
||||||
|
rq->__data_len = nr_bytes;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
|
static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
|
||||||
|
@ -331,7 +331,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
|||||||
struct blk_plug plug;
|
struct blk_plug plug;
|
||||||
struct blkdev_dio *dio;
|
struct blkdev_dio *dio;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
bool is_read = (iov_iter_rw(iter) == READ);
|
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
|
||||||
loff_t pos = iocb->ki_pos;
|
loff_t pos = iocb->ki_pos;
|
||||||
blk_qc_t qc = BLK_QC_T_NONE;
|
blk_qc_t qc = BLK_QC_T_NONE;
|
||||||
int ret;
|
int ret;
|
||||||
@ -344,7 +344,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
|||||||
bio_get(bio); /* extra ref for the completion handler */
|
bio_get(bio); /* extra ref for the completion handler */
|
||||||
|
|
||||||
dio = container_of(bio, struct blkdev_dio, bio);
|
dio = container_of(bio, struct blkdev_dio, bio);
|
||||||
dio->is_sync = is_sync_kiocb(iocb);
|
dio->is_sync = is_sync = is_sync_kiocb(iocb);
|
||||||
if (dio->is_sync)
|
if (dio->is_sync)
|
||||||
dio->waiter = current;
|
dio->waiter = current;
|
||||||
else
|
else
|
||||||
@ -398,7 +398,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
|||||||
}
|
}
|
||||||
blk_finish_plug(&plug);
|
blk_finish_plug(&plug);
|
||||||
|
|
||||||
if (!dio->is_sync)
|
if (!is_sync)
|
||||||
return -EIOCBQUEUED;
|
return -EIOCBQUEUED;
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
Loading…
Reference in New Issue
Block a user