block-5.15-2021-09-17
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmFEiqgQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgppsyEACPxyCgNBhiRukleT1fyOcKW7W3AlEKGnvS VnU4YadVGNaFgqQircD3eivB0BRsm5C5JmWOwTaLDyC2kL3SsiXczSDN9QFsibiB 3CkuE/enH/8//9GE7e+UgG1JHF526zI05ABTNL3vtiu2d/VvRsB+LmCkyni600wL 33uogXG7ruXBIydF4/IYj5K5zsJA6IRmkn8EALEpOuO87MpI8BwL9heKOl8Tt6Hi H1vODm8ej4o1dIgvs0Q6VNZ+jxt6bxK8s1W8kkF0mbmH/XEksvX2HlSp2f8KnRQG Ti6J0VLh07MMa1AbUkGiAjZ7BjnzBv5X2/EFDNQJa6lJ361rys7LBvR2qjYAeTLP 0rA3suEyvRPmO17ON+ELOZrYXZ3lpHotMoi1vYjXlAg6/mcWOXINUSqW5+G0BeDS SA2EvbVNs+kmdqeKoGEU3KCt5zcaNgMqGzGXJEqeBwccVOa0qHxIBWYRTT/BgPdf 6qN+lxW1dKokDfTlHJffr/xNGpsLS1cF8nAnmI/3NeVoWePR8tAj2KFvMvuOFL1H VJrxZA28gV131hjK9SJrW5xScdH/k3e4Rvan8/SXAW9IKIvaWrO+foZAr1cdEZu4 KUz1L2F+IHaW4XXPXf2LZPzYhOXZS0AU1uAmwhaOaWKQB9H2HkWxOoad24Ao18pT NrTYxDEU1w== =A2Zd -----END PGP SIGNATURE----- Merge tag 'block-5.15-2021-09-17' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: - NVMe pull request via Christoph: - fix ANA state updates when a namespace is not present (Anton Eidelman) - nvmet: fix a width vs precision bug in nvmet_subsys_attr_serial_show (Dan Carpenter) - avoid race in shutdown namespace removal (Daniel Wagner) - fix io_work priority inversion in nvme-tcp (Keith Busch) - destroy cm id before destroy qp to avoid use after free (Ruozhu Li) - blk-integrity profile registration fixes (Christoph, Lihong) - blk-cgroup UAF fix (Li) - blk-mq tag iterator fix (Ming) - blkcg memory leak fix (Yanfei) * tag 'block-5.15-2021-09-17' of git://git.kernel.dk/linux-block: blk-cgroup: fix UAF by grabbing blkcg lock before destroying blkg pd blkcg: fix memory leak in blk_iolatency_init nvme: remove the call to nvme_update_disk_info in nvme_ns_remove block: flush the integrity workqueue in blk_integrity_unregister block: check if a profile is actually registered in blk_integrity_unregister nvme-tcp: fix io_work priority inversion nvme-rdma: destroy cm id before destroy qp to avoid use after free nvme-multipath: fix ANA state updates when a namespace is not present nvme: avoid race in shutdown namespace removal nvmet: fix a width vs precision bug in nvmet_subsys_attr_serial_show() blk-mq: avoid to iterate over stale request
This commit is contained in:
commit
36d6753bc2
@ -1182,10 +1182,6 @@ int blkcg_init_queue(struct request_queue *q)
|
|||||||
if (preloaded)
|
if (preloaded)
|
||||||
radix_tree_preload_end();
|
radix_tree_preload_end();
|
||||||
|
|
||||||
ret = blk_iolatency_init(q);
|
|
||||||
if (ret)
|
|
||||||
goto err_destroy_all;
|
|
||||||
|
|
||||||
ret = blk_ioprio_init(q);
|
ret = blk_ioprio_init(q);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_destroy_all;
|
goto err_destroy_all;
|
||||||
@ -1194,6 +1190,12 @@ int blkcg_init_queue(struct request_queue *q)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err_destroy_all;
|
goto err_destroy_all;
|
||||||
|
|
||||||
|
ret = blk_iolatency_init(q);
|
||||||
|
if (ret) {
|
||||||
|
blk_throtl_exit(q);
|
||||||
|
goto err_destroy_all;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_destroy_all:
|
err_destroy_all:
|
||||||
@ -1364,10 +1366,14 @@ enomem:
|
|||||||
/* alloc failed, nothing's initialized yet, free everything */
|
/* alloc failed, nothing's initialized yet, free everything */
|
||||||
spin_lock_irq(&q->queue_lock);
|
spin_lock_irq(&q->queue_lock);
|
||||||
list_for_each_entry(blkg, &q->blkg_list, q_node) {
|
list_for_each_entry(blkg, &q->blkg_list, q_node) {
|
||||||
|
struct blkcg *blkcg = blkg->blkcg;
|
||||||
|
|
||||||
|
spin_lock(&blkcg->lock);
|
||||||
if (blkg->pd[pol->plid]) {
|
if (blkg->pd[pol->plid]) {
|
||||||
pol->pd_free_fn(blkg->pd[pol->plid]);
|
pol->pd_free_fn(blkg->pd[pol->plid]);
|
||||||
blkg->pd[pol->plid] = NULL;
|
blkg->pd[pol->plid] = NULL;
|
||||||
}
|
}
|
||||||
|
spin_unlock(&blkcg->lock);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&q->queue_lock);
|
spin_unlock_irq(&q->queue_lock);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
@ -1399,12 +1405,16 @@ void blkcg_deactivate_policy(struct request_queue *q,
|
|||||||
__clear_bit(pol->plid, q->blkcg_pols);
|
__clear_bit(pol->plid, q->blkcg_pols);
|
||||||
|
|
||||||
list_for_each_entry(blkg, &q->blkg_list, q_node) {
|
list_for_each_entry(blkg, &q->blkg_list, q_node) {
|
||||||
|
struct blkcg *blkcg = blkg->blkcg;
|
||||||
|
|
||||||
|
spin_lock(&blkcg->lock);
|
||||||
if (blkg->pd[pol->plid]) {
|
if (blkg->pd[pol->plid]) {
|
||||||
if (pol->pd_offline_fn)
|
if (pol->pd_offline_fn)
|
||||||
pol->pd_offline_fn(blkg->pd[pol->plid]);
|
pol->pd_offline_fn(blkg->pd[pol->plid]);
|
||||||
pol->pd_free_fn(blkg->pd[pol->plid]);
|
pol->pd_free_fn(blkg->pd[pol->plid]);
|
||||||
blkg->pd[pol->plid] = NULL;
|
blkg->pd[pol->plid] = NULL;
|
||||||
}
|
}
|
||||||
|
spin_unlock(&blkcg->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irq(&q->queue_lock);
|
spin_unlock_irq(&q->queue_lock);
|
||||||
|
@ -426,8 +426,15 @@ EXPORT_SYMBOL(blk_integrity_register);
|
|||||||
*/
|
*/
|
||||||
void blk_integrity_unregister(struct gendisk *disk)
|
void blk_integrity_unregister(struct gendisk *disk)
|
||||||
{
|
{
|
||||||
|
struct blk_integrity *bi = &disk->queue->integrity;
|
||||||
|
|
||||||
|
if (!bi->profile)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* ensure all bios are off the integrity workqueue */
|
||||||
|
blk_flush_integrity();
|
||||||
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
|
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
|
||||||
memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
|
memset(bi, 0, sizeof(*bi));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_integrity_unregister);
|
EXPORT_SYMBOL(blk_integrity_unregister);
|
||||||
|
|
||||||
|
@ -208,7 +208,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
|
|||||||
|
|
||||||
spin_lock_irqsave(&tags->lock, flags);
|
spin_lock_irqsave(&tags->lock, flags);
|
||||||
rq = tags->rqs[bitnr];
|
rq = tags->rqs[bitnr];
|
||||||
if (!rq || !refcount_inc_not_zero(&rq->ref))
|
if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
|
||||||
rq = NULL;
|
rq = NULL;
|
||||||
spin_unlock_irqrestore(&tags->lock, flags);
|
spin_unlock_irqrestore(&tags->lock, flags);
|
||||||
return rq;
|
return rq;
|
||||||
|
@ -3524,7 +3524,9 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
|
|||||||
lockdep_assert_held(&subsys->lock);
|
lockdep_assert_held(&subsys->lock);
|
||||||
|
|
||||||
list_for_each_entry(h, &subsys->nsheads, entry) {
|
list_for_each_entry(h, &subsys->nsheads, entry) {
|
||||||
if (h->ns_id == nsid && nvme_tryget_ns_head(h))
|
if (h->ns_id != nsid)
|
||||||
|
continue;
|
||||||
|
if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
|
||||||
return h;
|
return h;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3843,6 +3845,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
|
|||||||
|
|
||||||
mutex_lock(&ns->ctrl->subsys->lock);
|
mutex_lock(&ns->ctrl->subsys->lock);
|
||||||
list_del_rcu(&ns->siblings);
|
list_del_rcu(&ns->siblings);
|
||||||
|
if (list_empty(&ns->head->list)) {
|
||||||
|
list_del_init(&ns->head->entry);
|
||||||
|
last_path = true;
|
||||||
|
}
|
||||||
mutex_unlock(&ns->ctrl->subsys->lock);
|
mutex_unlock(&ns->ctrl->subsys->lock);
|
||||||
|
|
||||||
/* guarantee not available in head->list */
|
/* guarantee not available in head->list */
|
||||||
@ -3856,20 +3862,11 @@ static void nvme_ns_remove(struct nvme_ns *ns)
|
|||||||
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
|
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
|
||||||
del_gendisk(ns->disk);
|
del_gendisk(ns->disk);
|
||||||
blk_cleanup_queue(ns->queue);
|
blk_cleanup_queue(ns->queue);
|
||||||
if (blk_get_integrity(ns->disk))
|
|
||||||
blk_integrity_unregister(ns->disk);
|
|
||||||
|
|
||||||
down_write(&ns->ctrl->namespaces_rwsem);
|
down_write(&ns->ctrl->namespaces_rwsem);
|
||||||
list_del_init(&ns->list);
|
list_del_init(&ns->list);
|
||||||
up_write(&ns->ctrl->namespaces_rwsem);
|
up_write(&ns->ctrl->namespaces_rwsem);
|
||||||
|
|
||||||
/* Synchronize with nvme_init_ns_head() */
|
|
||||||
mutex_lock(&ns->head->subsys->lock);
|
|
||||||
if (list_empty(&ns->head->list)) {
|
|
||||||
list_del_init(&ns->head->entry);
|
|
||||||
last_path = true;
|
|
||||||
}
|
|
||||||
mutex_unlock(&ns->head->subsys->lock);
|
|
||||||
if (last_path)
|
if (last_path)
|
||||||
nvme_mpath_shutdown_disk(ns->head);
|
nvme_mpath_shutdown_disk(ns->head);
|
||||||
nvme_put_ns(ns);
|
nvme_put_ns(ns);
|
||||||
|
@ -600,14 +600,17 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
|
|||||||
|
|
||||||
down_read(&ctrl->namespaces_rwsem);
|
down_read(&ctrl->namespaces_rwsem);
|
||||||
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
||||||
unsigned nsid = le32_to_cpu(desc->nsids[n]);
|
unsigned nsid;
|
||||||
|
again:
|
||||||
|
nsid = le32_to_cpu(desc->nsids[n]);
|
||||||
if (ns->head->ns_id < nsid)
|
if (ns->head->ns_id < nsid)
|
||||||
continue;
|
continue;
|
||||||
if (ns->head->ns_id == nsid)
|
if (ns->head->ns_id == nsid)
|
||||||
nvme_update_ns_ana_state(desc, ns);
|
nvme_update_ns_ana_state(desc, ns);
|
||||||
if (++n == nr_nsids)
|
if (++n == nr_nsids)
|
||||||
break;
|
break;
|
||||||
|
if (ns->head->ns_id > nsid)
|
||||||
|
goto again;
|
||||||
}
|
}
|
||||||
up_read(&ctrl->namespaces_rwsem);
|
up_read(&ctrl->namespaces_rwsem);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -656,8 +656,8 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
|
|||||||
if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
|
if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
nvme_rdma_destroy_queue_ib(queue);
|
|
||||||
rdma_destroy_id(queue->cm_id);
|
rdma_destroy_id(queue->cm_id);
|
||||||
|
nvme_rdma_destroy_queue_ib(queue);
|
||||||
mutex_destroy(&queue->queue_lock);
|
mutex_destroy(&queue->queue_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1815,14 +1815,10 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
|
|||||||
for (i = 0; i < queue->queue_size; i++) {
|
for (i = 0; i < queue->queue_size; i++) {
|
||||||
ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
|
ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_destroy_queue_ib;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_destroy_queue_ib:
|
|
||||||
nvme_rdma_destroy_queue_ib(queue);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
|
static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
|
||||||
@ -1916,14 +1912,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
|
|||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(ctrl->ctrl.device,
|
dev_err(ctrl->ctrl.device,
|
||||||
"rdma_connect_locked failed (%d).\n", ret);
|
"rdma_connect_locked failed (%d).\n", ret);
|
||||||
goto out_destroy_queue_ib;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_destroy_queue_ib:
|
|
||||||
nvme_rdma_destroy_queue_ib(queue);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
||||||
@ -1954,8 +1946,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
|
|||||||
case RDMA_CM_EVENT_ROUTE_ERROR:
|
case RDMA_CM_EVENT_ROUTE_ERROR:
|
||||||
case RDMA_CM_EVENT_CONNECT_ERROR:
|
case RDMA_CM_EVENT_CONNECT_ERROR:
|
||||||
case RDMA_CM_EVENT_UNREACHABLE:
|
case RDMA_CM_EVENT_UNREACHABLE:
|
||||||
nvme_rdma_destroy_queue_ib(queue);
|
|
||||||
fallthrough;
|
|
||||||
case RDMA_CM_EVENT_ADDR_ERROR:
|
case RDMA_CM_EVENT_ADDR_ERROR:
|
||||||
dev_dbg(queue->ctrl->ctrl.device,
|
dev_dbg(queue->ctrl->ctrl.device,
|
||||||
"CM error event %d\n", ev->event);
|
"CM error event %d\n", ev->event);
|
||||||
|
@ -274,6 +274,12 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
|
|||||||
} while (ret > 0);
|
} while (ret > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
|
||||||
|
{
|
||||||
|
return !list_empty(&queue->send_list) ||
|
||||||
|
!llist_empty(&queue->req_list) || queue->more_requests;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
||||||
bool sync, bool last)
|
bool sync, bool last)
|
||||||
{
|
{
|
||||||
@ -294,9 +300,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
|||||||
nvme_tcp_send_all(queue);
|
nvme_tcp_send_all(queue);
|
||||||
queue->more_requests = false;
|
queue->more_requests = false;
|
||||||
mutex_unlock(&queue->send_mutex);
|
mutex_unlock(&queue->send_mutex);
|
||||||
} else if (last) {
|
|
||||||
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (last && nvme_tcp_queue_more(queue))
|
||||||
|
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
|
static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
|
||||||
@ -906,12 +913,6 @@ done:
|
|||||||
read_unlock_bh(&sk->sk_callback_lock);
|
read_unlock_bh(&sk->sk_callback_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
|
|
||||||
{
|
|
||||||
return !list_empty(&queue->send_list) ||
|
|
||||||
!llist_empty(&queue->req_list) || queue->more_requests;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
|
static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
|
||||||
{
|
{
|
||||||
queue->request = NULL;
|
queue->request = NULL;
|
||||||
@ -1145,8 +1146,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
|
|||||||
pending = true;
|
pending = true;
|
||||||
else if (unlikely(result < 0))
|
else if (unlikely(result < 0))
|
||||||
break;
|
break;
|
||||||
} else
|
}
|
||||||
pending = !llist_empty(&queue->req_list);
|
|
||||||
|
|
||||||
result = nvme_tcp_try_recv(queue);
|
result = nvme_tcp_try_recv(queue);
|
||||||
if (result > 0)
|
if (result > 0)
|
||||||
|
@ -1067,7 +1067,7 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
|
|||||||
{
|
{
|
||||||
struct nvmet_subsys *subsys = to_subsys(item);
|
struct nvmet_subsys *subsys = to_subsys(item);
|
||||||
|
|
||||||
return snprintf(page, PAGE_SIZE, "%*s\n",
|
return snprintf(page, PAGE_SIZE, "%.*s\n",
|
||||||
NVMET_SN_MAX_SIZE, subsys->serial);
|
NVMET_SN_MAX_SIZE, subsys->serial);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user