forked from Minki/linux
v5.18 first rc pull request
Several bug fixes for old bugs: - Welcome Leon as co-maintainer for RDMA so we are back to having two people - Some corner cases are fixed in mlx5's MR code - Long standing CM bug where a DREQ at the wrong time can result in a long timeout - Missing locking and refcounting in hf1 -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAmJQgxoACgkQOG33FX4g mxo+tA//W/DRl+ok8TexnWqhAbiKHx2IHpJQJIZVUdbi1y7gzaKbahJoXLm/rMJR 6RMQuvVotY5UWNMBBm0EBojklV2BoWOLYFCKmH/UPHXFlMkfSFW2GiO3mr+DmfyW g8rWEcyx/MeJhaBpkrgUf+HdO7TIy8xJOMfYo4+Z8x8AHGJZ+ULFIxST5Z1IZV3D j63AQgo4/qI2RlPQS58VfpihkZ+VDB7Q1whJBa87cqjM3R+Ll82DDYgvRpHFzRyA Ql2Rwjo/La8diyS/uIdN4SR5EhLfxvoE/n5KJ//7FUv7rPEaslt7uUVmLm0cqlEs gnQkj2hOrSzBUXBPC/H/bohyOe49g1iQQOLGC41tThgBWNT/XAJMQOeK0DB8kci/ bk8lmRJHZhRqMX5kG8DTkJZHfOC0rwJbA1X+50frLSYCK8ksjlAJfVUb2enVVn7h TWUfwiL5R14mO/6nWhNRS9K7/oeweZpysLXFr97Gcb0WIUTIqfhWVIwZc04KK09u vreaNpRiZjtgJd60NhXxSJ3CT4A6hAla1/0S3Spxa07rn9M4anZcOM/r6Ktmv5MQ LAvt3OAfPWKIwTuD5BSRfFjawyKWKo1j5u/gVny16z8b9Y/pEBDole30X41+64aW lMAoeQDBqDU0dQxo6BjNWGFRIxh+AXRPVaZ9N1Dq6Xpo4PbLy20= =hqzf -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "Several bug fixes for old bugs: - Welcome Leon as co-maintainer for RDMA so we are back to having two people - Some corner cases are fixed in mlx5's MR code - Long standing CM bug where a DREQ at the wrong time can result in a long timeout - Missing locking and refcounting in hf1" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/hfi1: Fix use-after-free bug for mm struct IB/rdmavt: add lock to call to rvt_error_qp to prevent a race condition IB/cm: Cancel mad on the DREQ event when the state is MRA_REP_RCVD RDMA/mlx5: Add a missing update of cache->last_add RDMA/mlx5: Don't remove cache MRs when a delay is needed MAINTAINERS: Update qib and hfi1 related drivers MAINTAINERS: Add Leon Romanovsky to RDMA maintainers
This commit is contained in:
commit
f335af1048
@ -8676,7 +8676,6 @@ F: include/linux/cciss*.h
|
||||
F: include/uapi/linux/cciss*.h
|
||||
|
||||
HFI1 DRIVER
|
||||
M: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
|
||||
M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
@ -9599,6 +9598,7 @@ F: drivers/iio/pressure/dps310.c
|
||||
|
||||
INFINIBAND SUBSYSTEM
|
||||
M: Jason Gunthorpe <jgg@nvidia.com>
|
||||
M: Leon Romanovsky <leonro@nvidia.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
W: https://github.com/linux-rdma/rdma-core
|
||||
@ -14657,7 +14657,6 @@ F: drivers/rtc/rtc-optee.c
|
||||
|
||||
OPA-VNIC DRIVER
|
||||
M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
|
||||
M: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/ulp/opa_vnic
|
||||
@ -16099,7 +16098,6 @@ F: include/uapi/linux/qemu_fw_cfg.h
|
||||
|
||||
QIB DRIVER
|
||||
M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
|
||||
M: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/qib/
|
||||
@ -16617,7 +16615,6 @@ F: drivers/net/ethernet/rdc/r6040.c
|
||||
|
||||
RDMAVT - RDMA verbs software
|
||||
M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
|
||||
M: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/sw/rdmavt
|
||||
|
@ -2824,6 +2824,7 @@ static int cm_dreq_handler(struct cm_work *work)
|
||||
switch (cm_id_priv->id.state) {
|
||||
case IB_CM_REP_SENT:
|
||||
case IB_CM_DREQ_SENT:
|
||||
case IB_CM_MRA_REP_RCVD:
|
||||
ib_cancel_mad(cm_id_priv->msg);
|
||||
break;
|
||||
case IB_CM_ESTABLISHED:
|
||||
@ -2831,8 +2832,6 @@ static int cm_dreq_handler(struct cm_work *work)
|
||||
cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
|
||||
ib_cancel_mad(cm_id_priv->msg);
|
||||
break;
|
||||
case IB_CM_MRA_REP_RCVD:
|
||||
break;
|
||||
case IB_CM_TIMEWAIT:
|
||||
atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
|
||||
[CM_DREQ_COUNTER]);
|
||||
|
@ -80,6 +80,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
|
||||
unsigned long flags;
|
||||
struct list_head del_list;
|
||||
|
||||
/* Prevent freeing of mm until we are completely finished. */
|
||||
mmgrab(handler->mn.mm);
|
||||
|
||||
/* Unregister first so we don't get any more notifications. */
|
||||
mmu_notifier_unregister(&handler->mn, handler->mn.mm);
|
||||
|
||||
@ -102,6 +105,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
|
||||
|
||||
do_remove(handler, &del_list);
|
||||
|
||||
/* Now the mm may be freed. */
|
||||
mmdrop(handler->mn.mm);
|
||||
|
||||
kfree(handler);
|
||||
}
|
||||
|
||||
|
@ -574,8 +574,10 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
|
||||
spin_lock_irq(&ent->lock);
|
||||
if (ent->disabled)
|
||||
goto out;
|
||||
if (need_delay)
|
||||
if (need_delay) {
|
||||
queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
|
||||
goto out;
|
||||
}
|
||||
remove_cache_mr_locked(ent);
|
||||
queue_adjust_cache_locked(ent);
|
||||
}
|
||||
@ -625,6 +627,7 @@ static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
{
|
||||
struct mlx5_cache_ent *ent = mr->cache_ent;
|
||||
|
||||
WRITE_ONCE(dev->cache.last_add, jiffies);
|
||||
spin_lock_irq(&ent->lock);
|
||||
list_add_tail(&mr->list, &ent->head);
|
||||
ent->available_mrs++;
|
||||
|
@ -3190,7 +3190,11 @@ serr_no_r_lock:
|
||||
spin_lock_irqsave(&sqp->s_lock, flags);
|
||||
rvt_send_complete(sqp, wqe, send_status);
|
||||
if (sqp->ibqp.qp_type == IB_QPT_RC) {
|
||||
int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
|
||||
int lastwqe;
|
||||
|
||||
spin_lock(&sqp->r_lock);
|
||||
lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
|
||||
spin_unlock(&sqp->r_lock);
|
||||
|
||||
sqp->s_flags &= ~RVT_S_BUSY;
|
||||
spin_unlock_irqrestore(&sqp->s_lock, flags);
|
||||
|
Loading…
Reference in New Issue
Block a user