forked from Minki/linux
Pull request for 5.3-rc3
- Fix a memory registration release flow issue that was causing a WARN_ON (mlx5) - If the counters for a port aren't allocated, then we can't do operations on the non-existent counters (core) - Check the right variable for error code result (mlx5) - Fix a use after free issue (mlx5) - Fix an off by one memory leak (siw) - Actually return an error code on error (core) - Allow siw to be built on 32bit arches (siw, ABI change, but OK since siw was just merged this merge window and there is no prior released kernel to maintain compatibility with and we also updated the rdma-core user space package to match) Signed-off-by: Doug Ledford <dledford@redhat.com> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEErmsb2hIrI7QmWxJ0uCajMw5XL90FAl1UH9kACgkQuCajMw5X L922QQ/+ON5Vhb3CZkv1K6mVk/+sXSBIkeceoBJCw46XjVkYQaiE46DyonLDOwco 4z6caV5HmS0CDY7VuoCuA3OmvsYEYWpLi0ktyRJIaRJtWnJmYmVLju8ORrD6s709 FBe7Ay9pE6VIXXbDz2np3aAZW1EL1dPr6fBccHZWvGjb6bwu+a2HbZlIdtKKBRgf r+bp9G3M5FKL0RTGSy+S+w/xO0Ntc0Nbo0RRj+/4sRdxjTdx+B1sLxPya5AgycF9 kQ/a+/mppmfmXe0/PzL30rvbmf29ocodYHokb+OTc1Mwll6yc9Yo3BOlvZmK+EYG yyYXK23MkJDoJ7qaSI7cbiEd5pY2EgSABBKPv5b5wqt03AM0qdRpEUdPSbBZF0tv Lt/i2pke13R+TW3u2e8sY8iHWHC8+GDOyWFiVmrpEcoP80hfRKDkiULv5vrvFzVP 3XOG1z5hHDmZ4jJtHCjCNJLi1+/AxhYIaPSRyJnL5R5cJGX/hXOSex+OsjbcAx7o djVTRbR1JOx603NX4sYgpLcn1TEPvaxKXcrqP8Nhj++xgZWNNfDw0RBk8jICYkOq k+tt70hq1ME0DvsJZiV2vyyVR/o5Amj7o7cdUtT3T2IDJAK1jbrNVD79VrXqJecq laOmge4M40pHPvFs/gtVuQsqsM7YHa1urX+vrFsG3i7QpMDekIo= =misR -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Doug Ledford: "Fairly small pull request for -rc3. I'm out of town the rest of this week, so I made sure to clean out as much as possible from patchworks in enough time for 0-day to chew through it (Yay! for 0-day being back online! :-)). Jason might send through any emergency stuff that could pop up, otherwise I'm back next week. The only real thing of note is the siw ABI change. Since we just merged siw *this* release, there are no prior kernel releases to maintain kernel ABI with. I told Bernard that if there is anything else about the siw ABI he thinks he might want to change before it goes set in stone, he should get it in ASAP. The siw module was around for several years outside the kernel tree, and it had to be revamped considerably for inclusion upstream, so we are making no attempts to be backward compatible with the out of tree version. Once 5.3 is actually released, we will have our baseline ABI to maintain. Summary: - Fix a memory registration release flow issue that was causing a WARN_ON (mlx5) - If the counters for a port aren't allocated, then we can't do operations on the non-existent counters (core) - Check the right variable for error code result (mlx5) - Fix a use after free issue (mlx5) - Fix an off by one memory leak (siw) - Actually return an error code on error (core) - Allow siw to be built on 32bit arches (siw, ABI change, but OK since siw was just merged this merge window and there is no prior released kernel to maintain compatibility with and we also updated the rdma-core user space package to match)" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/siw: Change CQ flags from 64->32 bits RDMA/core: Fix error code in stat_get_doit_qp() RDMA/siw: Fix a memory leak in siw_init_cpulist() IB/mlx5: Fix use-after-free error while accessing ev_file pointer IB/mlx5: Check the correct variable in error handling code RDMA/counter: Prevent QP counter binding if counters unsupported IB/mlx5: Fix implicit MR release flow
This commit is contained in:
commit
a8dba0531b
@ -38,6 +38,9 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
|
||||
int ret;
|
||||
|
||||
port_counter = &dev->port_data[port].port_counter;
|
||||
if (!port_counter->hstats)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&port_counter->lock);
|
||||
if (on) {
|
||||
ret = __counter_set_mode(&port_counter->mode,
|
||||
@ -509,6 +512,9 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
|
||||
if (!rdma_is_port_valid(dev, port))
|
||||
return -EINVAL;
|
||||
|
||||
if (!dev->port_data[port].port_counter.hstats)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
qp = rdma_counter_get_qp(dev, qp_num);
|
||||
if (!qp)
|
||||
return -ENOENT;
|
||||
|
@ -1952,12 +1952,16 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
|
||||
if (fill_nldev_handle(msg, device) ||
|
||||
nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
|
||||
nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode))
|
||||
nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
|
||||
ret = -EMSGSIZE;
|
||||
goto err_msg;
|
||||
}
|
||||
|
||||
if ((mode == RDMA_COUNTER_MODE_AUTO) &&
|
||||
nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask))
|
||||
nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
|
||||
ret = -EMSGSIZE;
|
||||
goto err_msg;
|
||||
}
|
||||
|
||||
nlmsg_end(msg, nlh);
|
||||
ib_device_put(device);
|
||||
|
@ -112,10 +112,6 @@ static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
|
||||
* prevent any further fault handling on this MR.
|
||||
*/
|
||||
ib_umem_notifier_start_account(umem_odp);
|
||||
umem_odp->dying = 1;
|
||||
/* Make sure that the fact the umem is dying is out before we release
|
||||
* all pending page faults. */
|
||||
smp_wmb();
|
||||
complete_all(&umem_odp->notifier_completion);
|
||||
umem_odp->umem.context->invalidate_range(
|
||||
umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
|
||||
|
@ -2026,7 +2026,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
|
||||
event_sub->eventfd =
|
||||
eventfd_ctx_fdget(redirect_fd);
|
||||
|
||||
if (IS_ERR(event_sub)) {
|
||||
if (IS_ERR(event_sub->eventfd)) {
|
||||
err = PTR_ERR(event_sub->eventfd);
|
||||
event_sub->eventfd = NULL;
|
||||
goto err;
|
||||
@ -2644,12 +2644,13 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
|
||||
struct devx_async_event_file *ev_file = filp->private_data;
|
||||
struct devx_event_subscription *event_sub, *event_sub_tmp;
|
||||
struct devx_async_event_data *entry, *tmp;
|
||||
struct mlx5_ib_dev *dev = ev_file->dev;
|
||||
|
||||
mutex_lock(&ev_file->dev->devx_event_table.event_xa_lock);
|
||||
mutex_lock(&dev->devx_event_table.event_xa_lock);
|
||||
/* delete the subscriptions which are related to this FD */
|
||||
list_for_each_entry_safe(event_sub, event_sub_tmp,
|
||||
&ev_file->subscribed_events_list, file_list) {
|
||||
devx_cleanup_subscription(ev_file->dev, event_sub);
|
||||
devx_cleanup_subscription(dev, event_sub);
|
||||
if (event_sub->eventfd)
|
||||
eventfd_ctx_put(event_sub->eventfd);
|
||||
|
||||
@ -2658,7 +2659,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
|
||||
kfree_rcu(event_sub, rcu);
|
||||
}
|
||||
|
||||
mutex_unlock(&ev_file->dev->devx_event_table.event_xa_lock);
|
||||
mutex_unlock(&dev->devx_event_table.event_xa_lock);
|
||||
|
||||
/* free the pending events allocation */
|
||||
if (!ev_file->omit_data) {
|
||||
@ -2670,7 +2671,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
|
||||
}
|
||||
|
||||
uverbs_close_fd(filp);
|
||||
put_device(&ev_file->dev->ib_dev.dev);
|
||||
put_device(&dev->ib_dev.dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -579,7 +579,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
|
||||
u32 flags)
|
||||
{
|
||||
int npages = 0, current_seq, page_shift, ret, np;
|
||||
bool implicit = false;
|
||||
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
|
||||
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
|
||||
bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
|
||||
@ -594,7 +593,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
|
||||
if (IS_ERR(odp))
|
||||
return PTR_ERR(odp);
|
||||
mr = odp->private;
|
||||
implicit = true;
|
||||
} else {
|
||||
odp = odp_mr;
|
||||
}
|
||||
@ -682,19 +680,15 @@ next_mr:
|
||||
|
||||
out:
|
||||
if (ret == -EAGAIN) {
|
||||
if (implicit || !odp->dying) {
|
||||
unsigned long timeout =
|
||||
msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
|
||||
unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
|
||||
|
||||
if (!wait_for_completion_timeout(
|
||||
&odp->notifier_completion,
|
||||
timeout)) {
|
||||
mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
|
||||
current_seq, odp->notifiers_seq, odp->notifiers_count);
|
||||
}
|
||||
} else {
|
||||
/* The MR is being killed, kill the QP as well. */
|
||||
ret = -EFAULT;
|
||||
if (!wait_for_completion_timeout(&odp->notifier_completion,
|
||||
timeout)) {
|
||||
mlx5_ib_warn(
|
||||
dev,
|
||||
"timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
|
||||
current_seq, odp->notifiers_seq,
|
||||
odp->notifiers_count);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
config RDMA_SIW
|
||||
tristate "Software RDMA over TCP/IP (iWARP) driver"
|
||||
depends on INET && INFINIBAND && LIBCRC32C && 64BIT
|
||||
depends on INET && INFINIBAND && LIBCRC32C
|
||||
select DMA_VIRT_OPS
|
||||
help
|
||||
This driver implements the iWARP RDMA transport over
|
||||
|
@ -214,7 +214,7 @@ struct siw_wqe {
|
||||
struct siw_cq {
|
||||
struct ib_cq base_cq;
|
||||
spinlock_t lock;
|
||||
u64 *notify;
|
||||
struct siw_cq_ctrl *notify;
|
||||
struct siw_cqe *queue;
|
||||
u32 cq_put;
|
||||
u32 cq_get;
|
||||
|
@ -160,10 +160,8 @@ static int siw_init_cpulist(void)
|
||||
|
||||
out_err:
|
||||
siw_cpu_info.num_nodes = 0;
|
||||
while (i) {
|
||||
while (--i >= 0)
|
||||
kfree(siw_cpu_info.tx_valid_cpus[i]);
|
||||
siw_cpu_info.tx_valid_cpus[i--] = NULL;
|
||||
}
|
||||
kfree(siw_cpu_info.tx_valid_cpus);
|
||||
siw_cpu_info.tx_valid_cpus = NULL;
|
||||
|
||||
|
@ -1013,18 +1013,24 @@ out:
|
||||
*/
|
||||
static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags)
|
||||
{
|
||||
u64 cq_notify;
|
||||
u32 cq_notify;
|
||||
|
||||
if (!cq->base_cq.comp_handler)
|
||||
return false;
|
||||
|
||||
cq_notify = READ_ONCE(*cq->notify);
|
||||
/* Read application shared notification state */
|
||||
cq_notify = READ_ONCE(cq->notify->flags);
|
||||
|
||||
if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) ||
|
||||
((cq_notify & SIW_NOTIFY_SOLICITED) &&
|
||||
(flags & SIW_WQE_SOLICITED))) {
|
||||
/* dis-arm CQ */
|
||||
smp_store_mb(*cq->notify, SIW_NOTIFY_NOT);
|
||||
/*
|
||||
* CQ notification is one-shot: Since the
|
||||
* current CQE causes user notification,
|
||||
* the CQ gets dis-aremd and must be re-aremd
|
||||
* by the user for a new notification.
|
||||
*/
|
||||
WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -1049,7 +1049,7 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
|
||||
|
||||
spin_lock_init(&cq->lock);
|
||||
|
||||
cq->notify = &((struct siw_cq_ctrl *)&cq->queue[size])->notify;
|
||||
cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
|
||||
|
||||
if (udata) {
|
||||
struct siw_uresp_create_cq uresp = {};
|
||||
@ -1141,11 +1141,17 @@ int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
|
||||
siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
|
||||
|
||||
if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
|
||||
/* CQ event for next solicited completion */
|
||||
smp_store_mb(*cq->notify, SIW_NOTIFY_SOLICITED);
|
||||
/*
|
||||
* Enable CQ event for next solicited completion.
|
||||
* and make it visible to all associated producers.
|
||||
*/
|
||||
smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
|
||||
else
|
||||
/* CQ event for any signalled completion */
|
||||
smp_store_mb(*cq->notify, SIW_NOTIFY_ALL);
|
||||
/*
|
||||
* Enable CQ event for any signalled completion.
|
||||
* and make it visible to all associated producers.
|
||||
*/
|
||||
smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
|
||||
|
||||
if (flags & IB_CQ_REPORT_MISSED_EVENTS)
|
||||
return cq->cq_put - cq->cq_get;
|
||||
|
@ -180,6 +180,7 @@ struct siw_cqe {
|
||||
* to control CQ arming.
|
||||
*/
|
||||
struct siw_cq_ctrl {
|
||||
__aligned_u64 notify;
|
||||
__u32 flags;
|
||||
__u32 pad;
|
||||
};
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user