forked from Minki/linux
Merge branches 'cma', 'ipoib', 'misc', 'mlx4', 'ocrdma', 'qib' and 'srp' into for-next
This commit is contained in:
commit
c0369b296e
@ -439,7 +439,7 @@ static int c2_rnic_close(struct c2_dev *c2dev)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Called by c2_probe to initialize the RNIC. This principally
|
* Called by c2_probe to initialize the RNIC. This principally
|
||||||
* involves initalizing the various limits and resouce pools that
|
* involves initializing the various limits and resource pools that
|
||||||
* comprise the RNIC instance.
|
* comprise the RNIC instance.
|
||||||
*/
|
*/
|
||||||
int __devinit c2_rnic_init(struct c2_dev *c2dev)
|
int __devinit c2_rnic_init(struct c2_dev *c2dev)
|
||||||
|
@ -1680,7 +1680,7 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
|||||||
* T3A does 3 things when a TERM is received:
|
* T3A does 3 things when a TERM is received:
|
||||||
* 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
|
* 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
|
||||||
* 2) generate an async event on the QP with the TERMINATE opcode
|
* 2) generate an async event on the QP with the TERMINATE opcode
|
||||||
* 3) post a TERMINATE opcde cqe into the associated CQ.
|
* 3) post a TERMINATE opcode cqe into the associated CQ.
|
||||||
*
|
*
|
||||||
* For (1), we save the message in the qp for later consumer consumption.
|
* For (1), we save the message in the qp for later consumer consumption.
|
||||||
* For (2), we move the QP into TERMINATE, post a QP event and disconnect.
|
* For (2), we move the QP into TERMINATE, post a QP event and disconnect.
|
||||||
|
@ -125,6 +125,7 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
|
|||||||
{
|
{
|
||||||
struct ib_ah *new_ah;
|
struct ib_ah *new_ah;
|
||||||
struct ib_ah_attr ah_attr;
|
struct ib_ah_attr ah_attr;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
if (!dev->send_agent[port_num - 1][0])
|
if (!dev->send_agent[port_num - 1][0])
|
||||||
return;
|
return;
|
||||||
@ -139,11 +140,11 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
|
|||||||
if (IS_ERR(new_ah))
|
if (IS_ERR(new_ah))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock(&dev->sm_lock);
|
spin_lock_irqsave(&dev->sm_lock, flags);
|
||||||
if (dev->sm_ah[port_num - 1])
|
if (dev->sm_ah[port_num - 1])
|
||||||
ib_destroy_ah(dev->sm_ah[port_num - 1]);
|
ib_destroy_ah(dev->sm_ah[port_num - 1]);
|
||||||
dev->sm_ah[port_num - 1] = new_ah;
|
dev->sm_ah[port_num - 1] = new_ah;
|
||||||
spin_unlock(&dev->sm_lock);
|
spin_unlock_irqrestore(&dev->sm_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -197,13 +198,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
|
|||||||
static void node_desc_override(struct ib_device *dev,
|
static void node_desc_override(struct ib_device *dev,
|
||||||
struct ib_mad *mad)
|
struct ib_mad *mad)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
|
if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
|
||||||
mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
|
mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
|
||||||
mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
|
mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
|
||||||
mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
|
mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
|
||||||
spin_lock(&to_mdev(dev)->sm_lock);
|
spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
|
||||||
memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
|
memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
|
||||||
spin_unlock(&to_mdev(dev)->sm_lock);
|
spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -213,6 +216,7 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
|
|||||||
struct ib_mad_send_buf *send_buf;
|
struct ib_mad_send_buf *send_buf;
|
||||||
struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
|
struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
|
||||||
int ret;
|
int ret;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
if (agent) {
|
if (agent) {
|
||||||
send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
|
send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
|
||||||
@ -225,13 +229,13 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
|
|||||||
* wrong following the IB spec strictly, but we know
|
* wrong following the IB spec strictly, but we know
|
||||||
* it's OK for our devices).
|
* it's OK for our devices).
|
||||||
*/
|
*/
|
||||||
spin_lock(&dev->sm_lock);
|
spin_lock_irqsave(&dev->sm_lock, flags);
|
||||||
memcpy(send_buf->mad, mad, sizeof *mad);
|
memcpy(send_buf->mad, mad, sizeof *mad);
|
||||||
if ((send_buf->ah = dev->sm_ah[port_num - 1]))
|
if ((send_buf->ah = dev->sm_ah[port_num - 1]))
|
||||||
ret = ib_post_send_mad(send_buf, NULL);
|
ret = ib_post_send_mad(send_buf, NULL);
|
||||||
else
|
else
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
spin_unlock(&dev->sm_lock);
|
spin_unlock_irqrestore(&dev->sm_lock, flags);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
ib_free_send_mad(send_buf);
|
ib_free_send_mad(send_buf);
|
||||||
|
@ -423,6 +423,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
|
|||||||
struct ib_device_modify *props)
|
struct ib_device_modify *props)
|
||||||
{
|
{
|
||||||
struct mlx4_cmd_mailbox *mailbox;
|
struct mlx4_cmd_mailbox *mailbox;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
|
if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
@ -430,9 +431,9 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
|
|||||||
if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
|
if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock(&to_mdev(ibdev)->sm_lock);
|
spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
|
||||||
memcpy(ibdev->node_desc, props->node_desc, 64);
|
memcpy(ibdev->node_desc, props->node_desc, 64);
|
||||||
spin_unlock(&to_mdev(ibdev)->sm_lock);
|
spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If possible, pass node desc to FW, so it can generate
|
* If possible, pass node desc to FW, so it can generate
|
||||||
|
@ -1407,6 +1407,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
|
|||||||
struct mlx4_wqe_mlx_seg *mlx = wqe;
|
struct mlx4_wqe_mlx_seg *mlx = wqe;
|
||||||
struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
|
struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
|
||||||
struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
|
struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
|
||||||
|
struct net_device *ndev;
|
||||||
union ib_gid sgid;
|
union ib_gid sgid;
|
||||||
u16 pkey;
|
u16 pkey;
|
||||||
int send_size;
|
int send_size;
|
||||||
@ -1483,7 +1484,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
|
|||||||
|
|
||||||
memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
|
memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
|
||||||
/* FIXME: cache smac value? */
|
/* FIXME: cache smac value? */
|
||||||
smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr;
|
ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1];
|
||||||
|
if (!ndev)
|
||||||
|
return -ENODEV;
|
||||||
|
smac = ndev->dev_addr;
|
||||||
memcpy(sqp->ud_header.eth.smac_h, smac, 6);
|
memcpy(sqp->ud_header.eth.smac_h, smac, 6);
|
||||||
if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
|
if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
|
||||||
mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
|
mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
|
||||||
|
@ -161,7 +161,7 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
|
|||||||
ocrdma_get_guid(dev, &sgid->raw[8]);
|
ocrdma_get_guid(dev, &sgid->raw[8]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
|
#if IS_ENABLED(CONFIG_VLAN_8021Q)
|
||||||
static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
|
static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
|
||||||
{
|
{
|
||||||
struct net_device *netdev, *tmp;
|
struct net_device *netdev, *tmp;
|
||||||
@ -202,14 +202,13 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_VLAN_8021Q)
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
|
|
||||||
static int ocrdma_inet6addr_event(struct notifier_block *notifier,
|
static int ocrdma_inet6addr_event(struct notifier_block *notifier,
|
||||||
unsigned long event, void *ptr)
|
unsigned long event, void *ptr)
|
||||||
{
|
{
|
||||||
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
|
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
|
||||||
struct net_device *event_netdev = ifa->idev->dev;
|
struct net_device *netdev = ifa->idev->dev;
|
||||||
struct net_device *netdev = NULL;
|
|
||||||
struct ib_event gid_event;
|
struct ib_event gid_event;
|
||||||
struct ocrdma_dev *dev;
|
struct ocrdma_dev *dev;
|
||||||
bool found = false;
|
bool found = false;
|
||||||
@ -217,11 +216,12 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
|
|||||||
bool is_vlan = false;
|
bool is_vlan = false;
|
||||||
u16 vid = 0;
|
u16 vid = 0;
|
||||||
|
|
||||||
netdev = vlan_dev_real_dev(event_netdev);
|
is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
|
||||||
if (netdev != event_netdev) {
|
if (is_vlan) {
|
||||||
is_vlan = true;
|
vid = vlan_dev_vlan_id(netdev);
|
||||||
vid = vlan_dev_vlan_id(event_netdev);
|
netdev = vlan_dev_real_dev(netdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
|
list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
|
||||||
if (dev->nic_info.netdev == netdev) {
|
if (dev->nic_info.netdev == netdev) {
|
||||||
|
@ -6346,8 +6346,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
|
|||||||
dd->piobcnt4k * dd->align4k;
|
dd->piobcnt4k * dd->align4k;
|
||||||
dd->piovl15base = ioremap_nocache(vl15off,
|
dd->piovl15base = ioremap_nocache(vl15off,
|
||||||
NUM_VL15_BUFS * dd->align4k);
|
NUM_VL15_BUFS * dd->align4k);
|
||||||
if (!dd->piovl15base)
|
if (!dd->piovl15base) {
|
||||||
|
ret = -ENOMEM;
|
||||||
goto bail;
|
goto bail;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
|
qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
|
||||||
|
|
||||||
|
@ -372,7 +372,7 @@ static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
|
|||||||
/* Read CTRL reg for each channel to check TRIMDONE */
|
/* Read CTRL reg for each channel to check TRIMDONE */
|
||||||
if (baduns & (1 << chn)) {
|
if (baduns & (1 << chn)) {
|
||||||
qib_dev_err(dd,
|
qib_dev_err(dd,
|
||||||
"Reseting TRIMDONE on chn %d (%s)\n",
|
"Resetting TRIMDONE on chn %d (%s)\n",
|
||||||
chn, where);
|
chn, where);
|
||||||
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
|
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
|
||||||
IB_CTRL2(chn), 0x10, 0x10);
|
IB_CTRL2(chn), 0x10, 0x10);
|
||||||
|
@ -1271,12 +1271,15 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
|
|||||||
void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
|
void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
|
||||||
{
|
{
|
||||||
struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
|
struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
|
||||||
|
unsigned long flags;
|
||||||
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
|
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
|
||||||
|
spin_lock_irqsave(&priv->lock, flags);
|
||||||
list_move(&tx->list, &priv->cm.reap_list);
|
list_move(&tx->list, &priv->cm.reap_list);
|
||||||
queue_work(ipoib_workqueue, &priv->cm.reap_task);
|
queue_work(ipoib_workqueue, &priv->cm.reap_task);
|
||||||
ipoib_dbg(priv, "Reap connection for gid %pI6\n",
|
ipoib_dbg(priv, "Reap connection for gid %pI6\n",
|
||||||
tx->neigh->daddr + 4);
|
tx->neigh->daddr + 4);
|
||||||
tx->neigh = NULL;
|
tx->neigh = NULL;
|
||||||
|
spin_unlock_irqrestore(&priv->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1052,7 +1052,7 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh)
|
|||||||
for (n = rcu_dereference_protected(*np,
|
for (n = rcu_dereference_protected(*np,
|
||||||
lockdep_is_held(&ntbl->rwlock));
|
lockdep_is_held(&ntbl->rwlock));
|
||||||
n != NULL;
|
n != NULL;
|
||||||
n = rcu_dereference_protected(neigh->hnext,
|
n = rcu_dereference_protected(*np,
|
||||||
lockdep_is_held(&ntbl->rwlock))) {
|
lockdep_is_held(&ntbl->rwlock))) {
|
||||||
if (n == neigh) {
|
if (n == neigh) {
|
||||||
/* found */
|
/* found */
|
||||||
|
@ -586,24 +586,62 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
|
|||||||
scmnd->sc_data_direction);
|
scmnd->sc_data_direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void srp_remove_req(struct srp_target_port *target,
|
/**
|
||||||
struct srp_request *req, s32 req_lim_delta)
|
* srp_claim_req - Take ownership of the scmnd associated with a request.
|
||||||
|
* @target: SRP target port.
|
||||||
|
* @req: SRP request.
|
||||||
|
* @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
|
||||||
|
* ownership of @req->scmnd if it equals @scmnd.
|
||||||
|
*
|
||||||
|
* Return value:
|
||||||
|
* Either NULL or a pointer to the SCSI command the caller became owner of.
|
||||||
|
*/
|
||||||
|
static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
|
||||||
|
struct srp_request *req,
|
||||||
|
struct scsi_cmnd *scmnd)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
srp_unmap_data(req->scmnd, target, req);
|
spin_lock_irqsave(&target->lock, flags);
|
||||||
|
if (!scmnd) {
|
||||||
|
scmnd = req->scmnd;
|
||||||
|
req->scmnd = NULL;
|
||||||
|
} else if (req->scmnd == scmnd) {
|
||||||
|
req->scmnd = NULL;
|
||||||
|
} else {
|
||||||
|
scmnd = NULL;
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&target->lock, flags);
|
||||||
|
|
||||||
|
return scmnd;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* srp_free_req() - Unmap data and add request to the free request list.
|
||||||
|
*/
|
||||||
|
static void srp_free_req(struct srp_target_port *target,
|
||||||
|
struct srp_request *req, struct scsi_cmnd *scmnd,
|
||||||
|
s32 req_lim_delta)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
srp_unmap_data(scmnd, target, req);
|
||||||
|
|
||||||
spin_lock_irqsave(&target->lock, flags);
|
spin_lock_irqsave(&target->lock, flags);
|
||||||
target->req_lim += req_lim_delta;
|
target->req_lim += req_lim_delta;
|
||||||
req->scmnd = NULL;
|
|
||||||
list_add_tail(&req->list, &target->free_reqs);
|
list_add_tail(&req->list, &target->free_reqs);
|
||||||
spin_unlock_irqrestore(&target->lock, flags);
|
spin_unlock_irqrestore(&target->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
|
static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
|
||||||
{
|
{
|
||||||
req->scmnd->result = DID_RESET << 16;
|
struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
|
||||||
req->scmnd->scsi_done(req->scmnd);
|
|
||||||
srp_remove_req(target, req, 0);
|
if (scmnd) {
|
||||||
|
scmnd->result = DID_RESET << 16;
|
||||||
|
scmnd->scsi_done(scmnd);
|
||||||
|
srp_free_req(target, req, scmnd, 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int srp_reconnect_target(struct srp_target_port *target)
|
static int srp_reconnect_target(struct srp_target_port *target)
|
||||||
@ -1073,11 +1111,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
|
|||||||
complete(&target->tsk_mgmt_done);
|
complete(&target->tsk_mgmt_done);
|
||||||
} else {
|
} else {
|
||||||
req = &target->req_ring[rsp->tag];
|
req = &target->req_ring[rsp->tag];
|
||||||
scmnd = req->scmnd;
|
scmnd = srp_claim_req(target, req, NULL);
|
||||||
if (!scmnd)
|
if (!scmnd) {
|
||||||
shost_printk(KERN_ERR, target->scsi_host,
|
shost_printk(KERN_ERR, target->scsi_host,
|
||||||
"Null scmnd for RSP w/tag %016llx\n",
|
"Null scmnd for RSP w/tag %016llx\n",
|
||||||
(unsigned long long) rsp->tag);
|
(unsigned long long) rsp->tag);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&target->lock, flags);
|
||||||
|
target->req_lim += be32_to_cpu(rsp->req_lim_delta);
|
||||||
|
spin_unlock_irqrestore(&target->lock, flags);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
scmnd->result = rsp->status;
|
scmnd->result = rsp->status;
|
||||||
|
|
||||||
if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
|
if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
|
||||||
@ -1092,7 +1137,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
|
|||||||
else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
|
else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
|
||||||
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
|
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
|
||||||
|
|
||||||
srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
|
srp_free_req(target, req, scmnd,
|
||||||
|
be32_to_cpu(rsp->req_lim_delta));
|
||||||
|
|
||||||
scmnd->host_scribble = NULL;
|
scmnd->host_scribble = NULL;
|
||||||
scmnd->scsi_done(scmnd);
|
scmnd->scsi_done(scmnd);
|
||||||
}
|
}
|
||||||
@ -1631,25 +1678,17 @@ static int srp_abort(struct scsi_cmnd *scmnd)
|
|||||||
{
|
{
|
||||||
struct srp_target_port *target = host_to_target(scmnd->device->host);
|
struct srp_target_port *target = host_to_target(scmnd->device->host);
|
||||||
struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
|
struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
|
||||||
int ret = SUCCESS;
|
|
||||||
|
|
||||||
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
|
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
|
||||||
|
|
||||||
if (!req || target->qp_in_error)
|
if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
|
||||||
return FAILED;
|
|
||||||
if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
|
|
||||||
SRP_TSK_ABORT_TASK))
|
|
||||||
return FAILED;
|
return FAILED;
|
||||||
|
srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
|
||||||
|
SRP_TSK_ABORT_TASK);
|
||||||
|
srp_free_req(target, req, scmnd, 0);
|
||||||
|
scmnd->result = DID_ABORT << 16;
|
||||||
|
|
||||||
if (req->scmnd) {
|
return SUCCESS;
|
||||||
if (!target->tsk_mgmt_status) {
|
|
||||||
srp_remove_req(target, req, 0);
|
|
||||||
scmnd->result = DID_ABORT << 16;
|
|
||||||
} else
|
|
||||||
ret = FAILED;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int srp_reset_device(struct scsi_cmnd *scmnd)
|
static int srp_reset_device(struct scsi_cmnd *scmnd)
|
||||||
|
@ -1469,7 +1469,7 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
|
|||||||
*
|
*
|
||||||
* XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
|
* XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
|
||||||
* the data that has been transferred via IB RDMA had to be postponed until the
|
* the data that has been transferred via IB RDMA had to be postponed until the
|
||||||
* check_stop_free() callback. None of this is nessecary anymore and needs to
|
* check_stop_free() callback. None of this is necessary anymore and needs to
|
||||||
* be cleaned up.
|
* be cleaned up.
|
||||||
*/
|
*/
|
||||||
static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
|
static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
|
||||||
|
@ -358,13 +358,14 @@ void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||||
u64 virt, int obj_size, int nobj, int reserved,
|
u64 virt, int obj_size, u32 nobj, int reserved,
|
||||||
int use_lowmem, int use_coherent)
|
int use_lowmem, int use_coherent)
|
||||||
{
|
{
|
||||||
int obj_per_chunk;
|
int obj_per_chunk;
|
||||||
int num_icm;
|
int num_icm;
|
||||||
unsigned chunk_size;
|
unsigned chunk_size;
|
||||||
int i;
|
int i;
|
||||||
|
u64 size;
|
||||||
|
|
||||||
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
|
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
|
||||||
num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
|
num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
|
||||||
@ -380,10 +381,12 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
|||||||
table->coherent = use_coherent;
|
table->coherent = use_coherent;
|
||||||
mutex_init(&table->mutex);
|
mutex_init(&table->mutex);
|
||||||
|
|
||||||
|
size = (u64) nobj * obj_size;
|
||||||
for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
|
for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
|
||||||
chunk_size = MLX4_TABLE_CHUNK_SIZE;
|
chunk_size = MLX4_TABLE_CHUNK_SIZE;
|
||||||
if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
|
if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size)
|
||||||
chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
|
chunk_size = PAGE_ALIGN(size -
|
||||||
|
i * MLX4_TABLE_CHUNK_SIZE);
|
||||||
|
|
||||||
table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
|
table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
|
||||||
(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
|
(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
|
||||||
|
@ -78,7 +78,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
|||||||
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||||
int start, int end);
|
int start, int end);
|
||||||
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||||
u64 virt, int obj_size, int nobj, int reserved,
|
u64 virt, int obj_size, u32 nobj, int reserved,
|
||||||
int use_lowmem, int use_coherent);
|
int use_lowmem, int use_coherent);
|
||||||
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
|
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
|
||||||
void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle);
|
void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle);
|
||||||
|
@ -249,7 +249,7 @@ struct mlx4_bitmap {
|
|||||||
struct mlx4_buddy {
|
struct mlx4_buddy {
|
||||||
unsigned long **bits;
|
unsigned long **bits;
|
||||||
unsigned int *num_free;
|
unsigned int *num_free;
|
||||||
int max_order;
|
u32 max_order;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -258,7 +258,7 @@ struct mlx4_icm;
|
|||||||
struct mlx4_icm_table {
|
struct mlx4_icm_table {
|
||||||
u64 virt;
|
u64 virt;
|
||||||
int num_icm;
|
int num_icm;
|
||||||
int num_obj;
|
u32 num_obj;
|
||||||
int obj_size;
|
int obj_size;
|
||||||
int lowmem;
|
int lowmem;
|
||||||
int coherent;
|
int coherent;
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/vmalloc.h>
|
||||||
|
|
||||||
#include <linux/mlx4/cmd.h>
|
#include <linux/mlx4/cmd.h>
|
||||||
|
|
||||||
@ -120,7 +121,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
|
|||||||
buddy->max_order = max_order;
|
buddy->max_order = max_order;
|
||||||
spin_lock_init(&buddy->lock);
|
spin_lock_init(&buddy->lock);
|
||||||
|
|
||||||
buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
|
buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
|
buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
@ -129,10 +130,12 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
|
|||||||
|
|
||||||
for (i = 0; i <= buddy->max_order; ++i) {
|
for (i = 0; i <= buddy->max_order; ++i) {
|
||||||
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
|
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
|
||||||
buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
|
buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
|
||||||
if (!buddy->bits[i])
|
if (!buddy->bits[i]) {
|
||||||
goto err_out_free;
|
buddy->bits[i] = vzalloc(s * sizeof(long));
|
||||||
bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
|
if (!buddy->bits[i])
|
||||||
|
goto err_out_free;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
set_bit(0, buddy->bits[buddy->max_order]);
|
set_bit(0, buddy->bits[buddy->max_order]);
|
||||||
@ -142,7 +145,10 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
|
|||||||
|
|
||||||
err_out_free:
|
err_out_free:
|
||||||
for (i = 0; i <= buddy->max_order; ++i)
|
for (i = 0; i <= buddy->max_order; ++i)
|
||||||
kfree(buddy->bits[i]);
|
if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i]))
|
||||||
|
vfree(buddy->bits[i]);
|
||||||
|
else
|
||||||
|
kfree(buddy->bits[i]);
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
kfree(buddy->bits);
|
kfree(buddy->bits);
|
||||||
@ -156,7 +162,10 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i <= buddy->max_order; ++i)
|
for (i = 0; i <= buddy->max_order; ++i)
|
||||||
kfree(buddy->bits[i]);
|
if (is_vmalloc_addr(buddy->bits[i]))
|
||||||
|
vfree(buddy->bits[i]);
|
||||||
|
else
|
||||||
|
kfree(buddy->bits[i]);
|
||||||
|
|
||||||
kfree(buddy->bits);
|
kfree(buddy->bits);
|
||||||
kfree(buddy->num_free);
|
kfree(buddy->num_free);
|
||||||
@ -668,7 +677,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
err = mlx4_buddy_init(&mr_table->mtt_buddy,
|
err = mlx4_buddy_init(&mr_table->mtt_buddy,
|
||||||
ilog2(dev->caps.num_mtts /
|
ilog2((u32)dev->caps.num_mtts /
|
||||||
(1 << log_mtts_per_seg)));
|
(1 << log_mtts_per_seg)));
|
||||||
if (err)
|
if (err)
|
||||||
goto err_buddy;
|
goto err_buddy;
|
||||||
@ -678,7 +687,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
|
|||||||
mlx4_alloc_mtt_range(dev,
|
mlx4_alloc_mtt_range(dev,
|
||||||
fls(dev->caps.reserved_mtts - 1));
|
fls(dev->caps.reserved_mtts - 1));
|
||||||
if (priv->reserved_mtts < 0) {
|
if (priv->reserved_mtts < 0) {
|
||||||
mlx4_warn(dev, "MTT table of order %d is too small.\n",
|
mlx4_warn(dev, "MTT table of order %u is too small.\n",
|
||||||
mr_table->mtt_buddy.max_order);
|
mr_table->mtt_buddy.max_order);
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto err_reserve_mtts;
|
goto err_reserve_mtts;
|
||||||
|
@ -76,7 +76,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
|
|||||||
u64 size;
|
u64 size;
|
||||||
u64 start;
|
u64 start;
|
||||||
int type;
|
int type;
|
||||||
int num;
|
u32 num;
|
||||||
int log_num;
|
int log_num;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -105,7 +105,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
|
|||||||
si_meminfo(&si);
|
si_meminfo(&si);
|
||||||
request->num_mtt =
|
request->num_mtt =
|
||||||
roundup_pow_of_two(max_t(unsigned, request->num_mtt,
|
roundup_pow_of_two(max_t(unsigned, request->num_mtt,
|
||||||
min(1UL << 31,
|
min(1UL << (31 - log_mtts_per_seg),
|
||||||
si.totalram >> (log_mtts_per_seg - 1))));
|
si.totalram >> (log_mtts_per_seg - 1))));
|
||||||
|
|
||||||
profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz;
|
profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz;
|
||||||
|
Loading…
Reference in New Issue
Block a user