mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 06:31:52 +00:00
RDMA subsystem updates for 5.4-rc
A number of bug fixes and a regression fix: - Various issues from static analysis in hfi1, uverbs, hns, and cxgb4 - Fix for deadlock in a case when the new auto RDMA module loading is used - Missing _irq notation in a prior -rc patch found by lockdep - Fix a locking and lifetime issue in siw - Minor functional bug fixes in cxgb4, mlx5, qedr - Fix a regression where vlan interfaces no longer worked with RDMA CM in some cases -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAl27JLIACgkQOG33FX4g mxqJbBAAj+ZpUkpwCzOcP09bxyEIX38j/22AqHFQ6MWA5FWkvEDrGAsTxYRArKHv dXq1kHd/ofX3TSR5beFdldoz6chjfqaq3whJEpGv22sDvUNATKl+5YnU1vdrgEGR ApimSV90s5lGwdZ3NaVWQHvH6vzwyTjR9sS1xd/uIYku9hvWimaQVw0Phutf6qVW GIem0fHRQihKwzg/kd3fk9u7ac9o43gSSWSSBQoBlT/PD6T8QOv1yLZ9BEAicynt 6QGt/6QbNfidm5R9iuKB+TIPt/6j3ISVUqGLEQCSKBj122uT6s+u9lZnUFu4LRkp AfBL18RUlCFnNiiEVJSfxwSm9GMZdyZQlmmLTnO+DLugWpaarr8PNkpjlTVP1Oyh JpEG/blf8pOU3ZMEVwVBHE/1NMs0x5Rp4u1eccNVNM4rh9FyHEnllKYeMA8rz7tz 7JgnueyMvMVVvXMQ14A2Zx+6kBdDQyAHMEBiZujRr7P1PDsocHZDrCIN/ouAIzPS tZLoiTToaaG84+HdzhF+swKAqXouwSh8Feu9zG/XzULn2yJip0pZV5Uuqj5FB5nP R19wcCG5ehW84XUgpStK3FpXpw2pmPaI/aZbiiNkD6kbDW/rs7uhKBd87yFFxMfV 50SoFWZUYMNqkGe0NeEvDuYMONOFI372v40zB4jMQDzb8wTXZ6Q= =MHSu -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "A number of bug fixes and a regression fix: - Various issues from static analysis in hfi1, uverbs, hns, and cxgb4 - Fix for deadlock in a case when the new auto RDMA module loading is used - Missing _irq notation in a prior -rc patch found by lockdep - Fix a locking and lifetime issue in siw - Minor functional bug fixes in cxgb4, mlx5, qedr - Fix a regression where vlan interfaces no longer worked with RDMA CM in some cases" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/hns: Prevent memory leaks of eq->buf_list RDMA/iw_cxgb4: Avoid freeing skb twice in arp failure case RDMA/mlx5: Use irq xarray locking for mkey_table IB/core: Avoid deadlock during netlink message handling RDMA/nldev: Skip counter if port doesn't match RDMA/uverbs: Prevent potential underflow IB/core: Use rdma_read_gid_l2_fields to compare GID L2 fields RDMA/qedr: Fix reported firmware version RDMA/siw: free siw_base_qp in kref release routine RDMA/iwcm: move iw_rem_ref() calls out of spinlock iw_cxgb4: fix ECN check on the passive accept IB/hfi1: Use a common pad buffer for 9B and 16B packets IB/hfi1: Avoid excessive retry for TID RDMA READ request RDMA/mlx5: Clear old rate limit when closing QP
This commit is contained in:
commit
4252a1a9b0
@ -199,6 +199,7 @@ void ib_mad_cleanup(void);
|
|||||||
int ib_sa_init(void);
|
int ib_sa_init(void);
|
||||||
void ib_sa_cleanup(void);
|
void ib_sa_cleanup(void);
|
||||||
|
|
||||||
|
void rdma_nl_init(void);
|
||||||
void rdma_nl_exit(void);
|
void rdma_nl_exit(void);
|
||||||
|
|
||||||
int ib_nl_handle_resolve_resp(struct sk_buff *skb,
|
int ib_nl_handle_resolve_resp(struct sk_buff *skb,
|
||||||
|
@ -2716,6 +2716,8 @@ static int __init ib_core_init(void)
|
|||||||
goto err_comp_unbound;
|
goto err_comp_unbound;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rdma_nl_init();
|
||||||
|
|
||||||
ret = addr_init();
|
ret = addr_init();
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_warn("Could't init IB address resolution\n");
|
pr_warn("Could't init IB address resolution\n");
|
||||||
|
@ -372,6 +372,7 @@ EXPORT_SYMBOL(iw_cm_disconnect);
|
|||||||
static void destroy_cm_id(struct iw_cm_id *cm_id)
|
static void destroy_cm_id(struct iw_cm_id *cm_id)
|
||||||
{
|
{
|
||||||
struct iwcm_id_private *cm_id_priv;
|
struct iwcm_id_private *cm_id_priv;
|
||||||
|
struct ib_qp *qp;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
|
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
|
||||||
@ -389,6 +390,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
|
|||||||
set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
|
set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
|
||||||
|
|
||||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||||
|
qp = cm_id_priv->qp;
|
||||||
|
cm_id_priv->qp = NULL;
|
||||||
|
|
||||||
switch (cm_id_priv->state) {
|
switch (cm_id_priv->state) {
|
||||||
case IW_CM_STATE_LISTEN:
|
case IW_CM_STATE_LISTEN:
|
||||||
cm_id_priv->state = IW_CM_STATE_DESTROYING;
|
cm_id_priv->state = IW_CM_STATE_DESTROYING;
|
||||||
@ -401,7 +405,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
|
|||||||
cm_id_priv->state = IW_CM_STATE_DESTROYING;
|
cm_id_priv->state = IW_CM_STATE_DESTROYING;
|
||||||
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
||||||
/* Abrupt close of the connection */
|
/* Abrupt close of the connection */
|
||||||
(void)iwcm_modify_qp_err(cm_id_priv->qp);
|
(void)iwcm_modify_qp_err(qp);
|
||||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||||
break;
|
break;
|
||||||
case IW_CM_STATE_IDLE:
|
case IW_CM_STATE_IDLE:
|
||||||
@ -426,11 +430,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
|
|||||||
BUG();
|
BUG();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (cm_id_priv->qp) {
|
|
||||||
cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
|
|
||||||
cm_id_priv->qp = NULL;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
||||||
|
if (qp)
|
||||||
|
cm_id_priv->id.device->ops.iw_rem_ref(qp);
|
||||||
|
|
||||||
if (cm_id->mapped) {
|
if (cm_id->mapped) {
|
||||||
iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
|
iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
|
||||||
@ -671,11 +673,11 @@ int iw_cm_accept(struct iw_cm_id *cm_id,
|
|||||||
BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
|
BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
|
||||||
cm_id_priv->state = IW_CM_STATE_IDLE;
|
cm_id_priv->state = IW_CM_STATE_IDLE;
|
||||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||||
if (cm_id_priv->qp) {
|
qp = cm_id_priv->qp;
|
||||||
cm_id->device->ops.iw_rem_ref(qp);
|
cm_id_priv->qp = NULL;
|
||||||
cm_id_priv->qp = NULL;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
||||||
|
if (qp)
|
||||||
|
cm_id->device->ops.iw_rem_ref(qp);
|
||||||
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
|
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
|
||||||
wake_up_all(&cm_id_priv->connect_wait);
|
wake_up_all(&cm_id_priv->connect_wait);
|
||||||
}
|
}
|
||||||
@ -696,7 +698,7 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
|
|||||||
struct iwcm_id_private *cm_id_priv;
|
struct iwcm_id_private *cm_id_priv;
|
||||||
int ret;
|
int ret;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct ib_qp *qp;
|
struct ib_qp *qp = NULL;
|
||||||
|
|
||||||
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
|
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
|
||||||
|
|
||||||
@ -730,13 +732,13 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
|
|||||||
return 0; /* success */
|
return 0; /* success */
|
||||||
|
|
||||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||||
if (cm_id_priv->qp) {
|
qp = cm_id_priv->qp;
|
||||||
cm_id->device->ops.iw_rem_ref(qp);
|
cm_id_priv->qp = NULL;
|
||||||
cm_id_priv->qp = NULL;
|
|
||||||
}
|
|
||||||
cm_id_priv->state = IW_CM_STATE_IDLE;
|
cm_id_priv->state = IW_CM_STATE_IDLE;
|
||||||
err:
|
err:
|
||||||
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
||||||
|
if (qp)
|
||||||
|
cm_id->device->ops.iw_rem_ref(qp);
|
||||||
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
|
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
|
||||||
wake_up_all(&cm_id_priv->connect_wait);
|
wake_up_all(&cm_id_priv->connect_wait);
|
||||||
return ret;
|
return ret;
|
||||||
@ -878,6 +880,7 @@ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
|
|||||||
static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
|
static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
|
||||||
struct iw_cm_event *iw_event)
|
struct iw_cm_event *iw_event)
|
||||||
{
|
{
|
||||||
|
struct ib_qp *qp = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -896,11 +899,13 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
|
|||||||
cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
|
cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
|
||||||
} else {
|
} else {
|
||||||
/* REJECTED or RESET */
|
/* REJECTED or RESET */
|
||||||
cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
|
qp = cm_id_priv->qp;
|
||||||
cm_id_priv->qp = NULL;
|
cm_id_priv->qp = NULL;
|
||||||
cm_id_priv->state = IW_CM_STATE_IDLE;
|
cm_id_priv->state = IW_CM_STATE_IDLE;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
||||||
|
if (qp)
|
||||||
|
cm_id_priv->id.device->ops.iw_rem_ref(qp);
|
||||||
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
|
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
|
||||||
|
|
||||||
if (iw_event->private_data_len)
|
if (iw_event->private_data_len)
|
||||||
@ -942,21 +947,18 @@ static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
|
|||||||
static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
|
static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
|
||||||
struct iw_cm_event *iw_event)
|
struct iw_cm_event *iw_event)
|
||||||
{
|
{
|
||||||
|
struct ib_qp *qp;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret = 0;
|
int ret = 0, notify_event = 0;
|
||||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||||
|
qp = cm_id_priv->qp;
|
||||||
|
cm_id_priv->qp = NULL;
|
||||||
|
|
||||||
if (cm_id_priv->qp) {
|
|
||||||
cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
|
|
||||||
cm_id_priv->qp = NULL;
|
|
||||||
}
|
|
||||||
switch (cm_id_priv->state) {
|
switch (cm_id_priv->state) {
|
||||||
case IW_CM_STATE_ESTABLISHED:
|
case IW_CM_STATE_ESTABLISHED:
|
||||||
case IW_CM_STATE_CLOSING:
|
case IW_CM_STATE_CLOSING:
|
||||||
cm_id_priv->state = IW_CM_STATE_IDLE;
|
cm_id_priv->state = IW_CM_STATE_IDLE;
|
||||||
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
notify_event = 1;
|
||||||
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
|
|
||||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
|
||||||
break;
|
break;
|
||||||
case IW_CM_STATE_DESTROYING:
|
case IW_CM_STATE_DESTROYING:
|
||||||
break;
|
break;
|
||||||
@ -965,6 +967,10 @@ static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
|
|||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
||||||
|
|
||||||
|
if (qp)
|
||||||
|
cm_id_priv->id.device->ops.iw_rem_ref(qp);
|
||||||
|
if (notify_event)
|
||||||
|
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,9 +42,12 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include "core_priv.h"
|
#include "core_priv.h"
|
||||||
|
|
||||||
static DEFINE_MUTEX(rdma_nl_mutex);
|
|
||||||
static struct {
|
static struct {
|
||||||
const struct rdma_nl_cbs *cb_table;
|
const struct rdma_nl_cbs *cb_table;
|
||||||
|
/* Synchronizes between ongoing netlink commands and netlink client
|
||||||
|
* unregistration.
|
||||||
|
*/
|
||||||
|
struct rw_semaphore sem;
|
||||||
} rdma_nl_types[RDMA_NL_NUM_CLIENTS];
|
} rdma_nl_types[RDMA_NL_NUM_CLIENTS];
|
||||||
|
|
||||||
bool rdma_nl_chk_listeners(unsigned int group)
|
bool rdma_nl_chk_listeners(unsigned int group)
|
||||||
@ -75,70 +78,53 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op)
|
|||||||
return (op < max_num_ops[type]) ? true : false;
|
return (op < max_num_ops[type]) ? true : false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static const struct rdma_nl_cbs *
|
||||||
is_nl_valid(const struct sk_buff *skb, unsigned int type, unsigned int op)
|
get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op)
|
||||||
{
|
{
|
||||||
const struct rdma_nl_cbs *cb_table;
|
const struct rdma_nl_cbs *cb_table;
|
||||||
|
|
||||||
if (!is_nl_msg_valid(type, op))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Currently only NLDEV client is supporting netlink commands in
|
* Currently only NLDEV client is supporting netlink commands in
|
||||||
* non init_net net namespace.
|
* non init_net net namespace.
|
||||||
*/
|
*/
|
||||||
if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
|
if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
|
||||||
return false;
|
return NULL;
|
||||||
|
|
||||||
|
cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
|
||||||
|
if (!cb_table) {
|
||||||
|
/*
|
||||||
|
* Didn't get valid reference of the table, attempt module
|
||||||
|
* load once.
|
||||||
|
*/
|
||||||
|
up_read(&rdma_nl_types[type].sem);
|
||||||
|
|
||||||
if (!rdma_nl_types[type].cb_table) {
|
|
||||||
mutex_unlock(&rdma_nl_mutex);
|
|
||||||
request_module("rdma-netlink-subsys-%d", type);
|
request_module("rdma-netlink-subsys-%d", type);
|
||||||
mutex_lock(&rdma_nl_mutex);
|
|
||||||
|
down_read(&rdma_nl_types[type].sem);
|
||||||
|
cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
|
||||||
}
|
}
|
||||||
|
|
||||||
cb_table = rdma_nl_types[type].cb_table;
|
|
||||||
|
|
||||||
if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
|
if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
|
||||||
return false;
|
return NULL;
|
||||||
return true;
|
return cb_table;
|
||||||
}
|
}
|
||||||
|
|
||||||
void rdma_nl_register(unsigned int index,
|
void rdma_nl_register(unsigned int index,
|
||||||
const struct rdma_nl_cbs cb_table[])
|
const struct rdma_nl_cbs cb_table[])
|
||||||
{
|
{
|
||||||
mutex_lock(&rdma_nl_mutex);
|
if (WARN_ON(!is_nl_msg_valid(index, 0)) ||
|
||||||
if (!is_nl_msg_valid(index, 0)) {
|
WARN_ON(READ_ONCE(rdma_nl_types[index].cb_table)))
|
||||||
/*
|
|
||||||
* All clients are not interesting in success/failure of
|
|
||||||
* this call. They want to see the print to error log and
|
|
||||||
* continue their initialization. Print warning for them,
|
|
||||||
* because it is programmer's error to be here.
|
|
||||||
*/
|
|
||||||
mutex_unlock(&rdma_nl_mutex);
|
|
||||||
WARN(true,
|
|
||||||
"The not-valid %u index was supplied to RDMA netlink\n",
|
|
||||||
index);
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
if (rdma_nl_types[index].cb_table) {
|
/* Pairs with the READ_ONCE in is_nl_valid() */
|
||||||
mutex_unlock(&rdma_nl_mutex);
|
smp_store_release(&rdma_nl_types[index].cb_table, cb_table);
|
||||||
WARN(true,
|
|
||||||
"The %u index is already registered in RDMA netlink\n",
|
|
||||||
index);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
rdma_nl_types[index].cb_table = cb_table;
|
|
||||||
mutex_unlock(&rdma_nl_mutex);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(rdma_nl_register);
|
EXPORT_SYMBOL(rdma_nl_register);
|
||||||
|
|
||||||
void rdma_nl_unregister(unsigned int index)
|
void rdma_nl_unregister(unsigned int index)
|
||||||
{
|
{
|
||||||
mutex_lock(&rdma_nl_mutex);
|
down_write(&rdma_nl_types[index].sem);
|
||||||
rdma_nl_types[index].cb_table = NULL;
|
rdma_nl_types[index].cb_table = NULL;
|
||||||
mutex_unlock(&rdma_nl_mutex);
|
up_write(&rdma_nl_types[index].sem);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(rdma_nl_unregister);
|
EXPORT_SYMBOL(rdma_nl_unregister);
|
||||||
|
|
||||||
@ -170,15 +156,21 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||||||
unsigned int index = RDMA_NL_GET_CLIENT(type);
|
unsigned int index = RDMA_NL_GET_CLIENT(type);
|
||||||
unsigned int op = RDMA_NL_GET_OP(type);
|
unsigned int op = RDMA_NL_GET_OP(type);
|
||||||
const struct rdma_nl_cbs *cb_table;
|
const struct rdma_nl_cbs *cb_table;
|
||||||
|
int err = -EINVAL;
|
||||||
|
|
||||||
if (!is_nl_valid(skb, index, op))
|
if (!is_nl_msg_valid(index, op))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
cb_table = rdma_nl_types[index].cb_table;
|
down_read(&rdma_nl_types[index].sem);
|
||||||
|
cb_table = get_cb_table(skb, index, op);
|
||||||
|
if (!cb_table)
|
||||||
|
goto done;
|
||||||
|
|
||||||
if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
|
if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
|
||||||
!netlink_capable(skb, CAP_NET_ADMIN))
|
!netlink_capable(skb, CAP_NET_ADMIN)) {
|
||||||
return -EPERM;
|
err = -EPERM;
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't
|
* LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't
|
||||||
@ -186,8 +178,8 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||||||
*/
|
*/
|
||||||
if (index == RDMA_NL_LS) {
|
if (index == RDMA_NL_LS) {
|
||||||
if (cb_table[op].doit)
|
if (cb_table[op].doit)
|
||||||
return cb_table[op].doit(skb, nlh, extack);
|
err = cb_table[op].doit(skb, nlh, extack);
|
||||||
return -EINVAL;
|
goto done;
|
||||||
}
|
}
|
||||||
/* FIXME: Convert IWCM to properly handle doit callbacks */
|
/* FIXME: Convert IWCM to properly handle doit callbacks */
|
||||||
if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) {
|
if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) {
|
||||||
@ -195,14 +187,15 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||||||
.dump = cb_table[op].dump,
|
.dump = cb_table[op].dump,
|
||||||
};
|
};
|
||||||
if (c.dump)
|
if (c.dump)
|
||||||
return netlink_dump_start(skb->sk, skb, nlh, &c);
|
err = netlink_dump_start(skb->sk, skb, nlh, &c);
|
||||||
return -EINVAL;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cb_table[op].doit)
|
if (cb_table[op].doit)
|
||||||
return cb_table[op].doit(skb, nlh, extack);
|
err = cb_table[op].doit(skb, nlh, extack);
|
||||||
|
done:
|
||||||
return 0;
|
up_read(&rdma_nl_types[index].sem);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -263,9 +256,7 @@ skip:
|
|||||||
|
|
||||||
static void rdma_nl_rcv(struct sk_buff *skb)
|
static void rdma_nl_rcv(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
mutex_lock(&rdma_nl_mutex);
|
|
||||||
rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
|
rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
|
||||||
mutex_unlock(&rdma_nl_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
|
int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
|
||||||
@ -297,6 +288,14 @@ int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(rdma_nl_multicast);
|
EXPORT_SYMBOL(rdma_nl_multicast);
|
||||||
|
|
||||||
|
void rdma_nl_init(void)
|
||||||
|
{
|
||||||
|
int idx;
|
||||||
|
|
||||||
|
for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
|
||||||
|
init_rwsem(&rdma_nl_types[idx].sem);
|
||||||
|
}
|
||||||
|
|
||||||
void rdma_nl_exit(void)
|
void rdma_nl_exit(void)
|
||||||
{
|
{
|
||||||
int idx;
|
int idx;
|
||||||
|
@ -778,7 +778,7 @@ static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
|
|||||||
container_of(res, struct rdma_counter, res);
|
container_of(res, struct rdma_counter, res);
|
||||||
|
|
||||||
if (port && port != counter->port)
|
if (port && port != counter->port)
|
||||||
return 0;
|
return -EAGAIN;
|
||||||
|
|
||||||
/* Dump it even query failed */
|
/* Dump it even query failed */
|
||||||
rdma_counter_query_stats(counter);
|
rdma_counter_query_stats(counter);
|
||||||
|
@ -98,7 +98,7 @@ ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata,
|
|||||||
|
|
||||||
struct ib_uverbs_device {
|
struct ib_uverbs_device {
|
||||||
atomic_t refcount;
|
atomic_t refcount;
|
||||||
int num_comp_vectors;
|
u32 num_comp_vectors;
|
||||||
struct completion comp;
|
struct completion comp;
|
||||||
struct device dev;
|
struct device dev;
|
||||||
/* First group for device attributes, NULL terminated array */
|
/* First group for device attributes, NULL terminated array */
|
||||||
|
@ -662,16 +662,17 @@ static bool find_gid_index(const union ib_gid *gid,
|
|||||||
void *context)
|
void *context)
|
||||||
{
|
{
|
||||||
struct find_gid_index_context *ctx = context;
|
struct find_gid_index_context *ctx = context;
|
||||||
|
u16 vlan_id = 0xffff;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (ctx->gid_type != gid_attr->gid_type)
|
if (ctx->gid_type != gid_attr->gid_type)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
|
ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
|
||||||
(is_vlan_dev(gid_attr->ndev) &&
|
if (ret)
|
||||||
vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
|
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return true;
|
return ctx->vlan_id == vlan_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct ib_gid_attr *
|
static const struct ib_gid_attr *
|
||||||
|
@ -495,7 +495,6 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||||||
|
|
||||||
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
|
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
|
||||||
release_ep_resources(ep);
|
release_ep_resources(ep);
|
||||||
kfree_skb(skb);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -506,7 +505,6 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||||||
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
|
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
|
||||||
c4iw_put_ep(&ep->parent_ep->com);
|
c4iw_put_ep(&ep->parent_ep->com);
|
||||||
release_ep_resources(ep);
|
release_ep_resources(ep);
|
||||||
kfree_skb(skb);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2424,20 +2422,6 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
|
|||||||
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
|
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
|
||||||
|
|
||||||
pr_debug("ep %p tid %u\n", ep, ep->hwtid);
|
pr_debug("ep %p tid %u\n", ep, ep->hwtid);
|
||||||
|
|
||||||
skb_get(skb);
|
|
||||||
rpl = cplhdr(skb);
|
|
||||||
if (!is_t4(adapter_type)) {
|
|
||||||
skb_trim(skb, roundup(sizeof(*rpl5), 16));
|
|
||||||
rpl5 = (void *)rpl;
|
|
||||||
INIT_TP_WR(rpl5, ep->hwtid);
|
|
||||||
} else {
|
|
||||||
skb_trim(skb, sizeof(*rpl));
|
|
||||||
INIT_TP_WR(rpl, ep->hwtid);
|
|
||||||
}
|
|
||||||
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
|
|
||||||
ep->hwtid));
|
|
||||||
|
|
||||||
cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
|
cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
|
||||||
enable_tcp_timestamps && req->tcpopt.tstamp,
|
enable_tcp_timestamps && req->tcpopt.tstamp,
|
||||||
(ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
|
(ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
|
||||||
@ -2483,6 +2467,20 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
|
|||||||
if (tcph->ece && tcph->cwr)
|
if (tcph->ece && tcph->cwr)
|
||||||
opt2 |= CCTRL_ECN_V(1);
|
opt2 |= CCTRL_ECN_V(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
skb_get(skb);
|
||||||
|
rpl = cplhdr(skb);
|
||||||
|
if (!is_t4(adapter_type)) {
|
||||||
|
skb_trim(skb, roundup(sizeof(*rpl5), 16));
|
||||||
|
rpl5 = (void *)rpl;
|
||||||
|
INIT_TP_WR(rpl5, ep->hwtid);
|
||||||
|
} else {
|
||||||
|
skb_trim(skb, sizeof(*rpl));
|
||||||
|
INIT_TP_WR(rpl, ep->hwtid);
|
||||||
|
}
|
||||||
|
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
|
||||||
|
ep->hwtid));
|
||||||
|
|
||||||
if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
|
if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
|
||||||
u32 isn = (prandom_u32() & ~7UL) - 1;
|
u32 isn = (prandom_u32() & ~7UL) - 1;
|
||||||
opt2 |= T5_OPT_2_VALID_F;
|
opt2 |= T5_OPT_2_VALID_F;
|
||||||
|
@ -65,6 +65,7 @@
|
|||||||
#define SDMA_DESCQ_CNT 2048
|
#define SDMA_DESCQ_CNT 2048
|
||||||
#define SDMA_DESC_INTR 64
|
#define SDMA_DESC_INTR 64
|
||||||
#define INVALID_TAIL 0xffff
|
#define INVALID_TAIL 0xffff
|
||||||
|
#define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32))
|
||||||
|
|
||||||
static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
|
static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
|
||||||
module_param(sdma_descq_cnt, uint, S_IRUGO);
|
module_param(sdma_descq_cnt, uint, S_IRUGO);
|
||||||
@ -1296,7 +1297,7 @@ void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
|
|||||||
struct sdma_engine *sde;
|
struct sdma_engine *sde;
|
||||||
|
|
||||||
if (dd->sdma_pad_dma) {
|
if (dd->sdma_pad_dma) {
|
||||||
dma_free_coherent(&dd->pcidev->dev, 4,
|
dma_free_coherent(&dd->pcidev->dev, SDMA_PAD,
|
||||||
(void *)dd->sdma_pad_dma,
|
(void *)dd->sdma_pad_dma,
|
||||||
dd->sdma_pad_phys);
|
dd->sdma_pad_phys);
|
||||||
dd->sdma_pad_dma = NULL;
|
dd->sdma_pad_dma = NULL;
|
||||||
@ -1491,7 +1492,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate memory for pad */
|
/* Allocate memory for pad */
|
||||||
dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
|
dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD,
|
||||||
&dd->sdma_pad_phys, GFP_KERNEL);
|
&dd->sdma_pad_phys, GFP_KERNEL);
|
||||||
if (!dd->sdma_pad_dma) {
|
if (!dd->sdma_pad_dma) {
|
||||||
dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
|
dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
|
||||||
|
@ -2736,11 +2736,6 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
|
|||||||
diff = cmp_psn(psn,
|
diff = cmp_psn(psn,
|
||||||
flow->flow_state.r_next_psn);
|
flow->flow_state.r_next_psn);
|
||||||
if (diff > 0) {
|
if (diff > 0) {
|
||||||
if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
|
|
||||||
restart_tid_rdma_read_req(rcd,
|
|
||||||
qp,
|
|
||||||
wqe);
|
|
||||||
|
|
||||||
/* Drop the packet.*/
|
/* Drop the packet.*/
|
||||||
goto s_unlock;
|
goto s_unlock;
|
||||||
} else if (diff < 0) {
|
} else if (diff < 0) {
|
||||||
|
@ -147,9 +147,6 @@ static int pio_wait(struct rvt_qp *qp,
|
|||||||
/* Length of buffer to create verbs txreq cache name */
|
/* Length of buffer to create verbs txreq cache name */
|
||||||
#define TXREQ_NAME_LEN 24
|
#define TXREQ_NAME_LEN 24
|
||||||
|
|
||||||
/* 16B trailing buffer */
|
|
||||||
static const u8 trail_buf[MAX_16B_PADDING];
|
|
||||||
|
|
||||||
static uint wss_threshold = 80;
|
static uint wss_threshold = 80;
|
||||||
module_param(wss_threshold, uint, S_IRUGO);
|
module_param(wss_threshold, uint, S_IRUGO);
|
||||||
MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
|
MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
|
||||||
@ -820,8 +817,8 @@ static int build_verbs_tx_desc(
|
|||||||
|
|
||||||
/* add icrc, lt byte, and padding to flit */
|
/* add icrc, lt byte, and padding to flit */
|
||||||
if (extra_bytes)
|
if (extra_bytes)
|
||||||
ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
|
ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
|
||||||
(void *)trail_buf, extra_bytes);
|
sde->dd->sdma_pad_phys, extra_bytes);
|
||||||
|
|
||||||
bail_txadd:
|
bail_txadd:
|
||||||
return ret;
|
return ret;
|
||||||
@ -1089,7 +1086,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
|
|||||||
}
|
}
|
||||||
/* add icrc, lt byte, and padding to flit */
|
/* add icrc, lt byte, and padding to flit */
|
||||||
if (extra_bytes)
|
if (extra_bytes)
|
||||||
seg_pio_copy_mid(pbuf, trail_buf, extra_bytes);
|
seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma,
|
||||||
|
extra_bytes);
|
||||||
|
|
||||||
seg_pio_copy_end(pbuf);
|
seg_pio_copy_end(pbuf);
|
||||||
}
|
}
|
||||||
|
@ -5389,9 +5389,9 @@ static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (eq->buf_list)
|
dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf,
|
||||||
dma_free_coherent(hr_dev->dev, buf_chk_sz,
|
eq->buf_list->map);
|
||||||
eq->buf_list->buf, eq->buf_list->map);
|
kfree(eq->buf_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
|
static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
|
||||||
|
@ -1967,8 +1967,8 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
|
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
|
||||||
xa_erase(&dev->mdev->priv.mkey_table,
|
xa_erase_irq(&dev->mdev->priv.mkey_table,
|
||||||
mlx5_base_mkey(mmw->mmkey.key));
|
mlx5_base_mkey(mmw->mmkey.key));
|
||||||
/*
|
/*
|
||||||
* pagefault_single_data_segment() may be accessing mmw under
|
* pagefault_single_data_segment() may be accessing mmw under
|
||||||
* SRCU if the user bound an ODP MR to this MW.
|
* SRCU if the user bound an ODP MR to this MW.
|
||||||
|
@ -3249,10 +3249,12 @@ static int modify_raw_packet_qp_sq(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Only remove the old rate after new rate was set */
|
/* Only remove the old rate after new rate was set */
|
||||||
if ((old_rl.rate &&
|
if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
|
||||||
!mlx5_rl_are_equal(&old_rl, &new_rl)) ||
|
(new_state != MLX5_SQC_STATE_RDY)) {
|
||||||
(new_state != MLX5_SQC_STATE_RDY))
|
|
||||||
mlx5_rl_remove_rate(dev, &old_rl);
|
mlx5_rl_remove_rate(dev, &old_rl);
|
||||||
|
if (new_state != MLX5_SQC_STATE_RDY)
|
||||||
|
memset(&new_rl, 0, sizeof(new_rl));
|
||||||
|
}
|
||||||
|
|
||||||
ibqp->rl = new_rl;
|
ibqp->rl = new_rl;
|
||||||
sq->state = new_state;
|
sq->state = new_state;
|
||||||
|
@ -76,7 +76,7 @@ static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
|
|||||||
struct qedr_dev *qedr = get_qedr_dev(ibdev);
|
struct qedr_dev *qedr = get_qedr_dev(ibdev);
|
||||||
u32 fw_ver = (u32)qedr->attr.fw_ver;
|
u32 fw_ver = (u32)qedr->attr.fw_ver;
|
||||||
|
|
||||||
snprintf(str, IB_FW_VERSION_NAME_MAX, "%d. %d. %d. %d",
|
snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
|
||||||
(fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
|
(fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
|
||||||
(fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
|
(fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
|
||||||
}
|
}
|
||||||
|
@ -1312,6 +1312,7 @@ int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
|
|||||||
void siw_free_qp(struct kref *ref)
|
void siw_free_qp(struct kref *ref)
|
||||||
{
|
{
|
||||||
struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
|
struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
|
||||||
|
struct siw_base_qp *siw_base_qp = to_siw_base_qp(qp->ib_qp);
|
||||||
struct siw_device *sdev = qp->sdev;
|
struct siw_device *sdev = qp->sdev;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
@ -1334,4 +1335,5 @@ void siw_free_qp(struct kref *ref)
|
|||||||
atomic_dec(&sdev->num_qp);
|
atomic_dec(&sdev->num_qp);
|
||||||
siw_dbg_qp(qp, "free QP\n");
|
siw_dbg_qp(qp, "free QP\n");
|
||||||
kfree_rcu(qp, rcu);
|
kfree_rcu(qp, rcu);
|
||||||
|
kfree(siw_base_qp);
|
||||||
}
|
}
|
||||||
|
@ -604,7 +604,6 @@ out:
|
|||||||
int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
|
int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
|
||||||
{
|
{
|
||||||
struct siw_qp *qp = to_siw_qp(base_qp);
|
struct siw_qp *qp = to_siw_qp(base_qp);
|
||||||
struct siw_base_qp *siw_base_qp = to_siw_base_qp(base_qp);
|
|
||||||
struct siw_ucontext *uctx =
|
struct siw_ucontext *uctx =
|
||||||
rdma_udata_to_drv_context(udata, struct siw_ucontext,
|
rdma_udata_to_drv_context(udata, struct siw_ucontext,
|
||||||
base_ucontext);
|
base_ucontext);
|
||||||
@ -641,7 +640,6 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
|
|||||||
qp->scq = qp->rcq = NULL;
|
qp->scq = qp->rcq = NULL;
|
||||||
|
|
||||||
siw_qp_put(qp);
|
siw_qp_put(qp);
|
||||||
kfree(siw_base_qp);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -366,7 +366,7 @@ struct ib_tm_caps {
|
|||||||
|
|
||||||
struct ib_cq_init_attr {
|
struct ib_cq_init_attr {
|
||||||
unsigned int cqe;
|
unsigned int cqe;
|
||||||
int comp_vector;
|
u32 comp_vector;
|
||||||
u32 flags;
|
u32 flags;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user