linux/drivers/infiniband/hw/hfi1/verbs_txreq.c
Michael J. Ruhl b697d7d8c7 IB/hfi1: Fix incorrect mixing of ERR_PTR and NULL return values
The __get_txreq() function can return a pointer, ERR_PTR(-EBUSY), or NULL.
All of the relevant call sites look for IS_ERR, so the NULL return would
lead to a NULL pointer exception.

Do not use the ERR_PTR mechanism for this function.

Update all call sites to handle the return value correctly.

Clean up error paths to reflect return value.

Fixes: 45842abbb2 ("staging/rdma/hfi1: move txreq header code")
Cc: <stable@vger.kernel.org> # 4.9.x+
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: Kamenee Arumugam <kamenee.arumugam@intel.com>
Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2018-06-26 14:35:55 -06:00

142 lines
4.3 KiB
C

/*
* Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "hfi.h"
#include "verbs_txreq.h"
#include "qp.h"
#include "trace.h"
#define TXREQ_LEN 24
void hfi1_put_txreq(struct verbs_txreq *tx)
{
struct hfi1_ibdev *dev;
struct rvt_qp *qp;
unsigned long flags;
unsigned int seq;
struct hfi1_qp_priv *priv;
qp = tx->qp;
dev = to_idev(qp->ibqp.device);
if (tx->mr)
rvt_put_mr(tx->mr);
sdma_txclean(dd_from_dev(dev), &tx->txreq);
/* Free verbs_txreq and return to slab cache */
kmem_cache_free(dev->verbs_txreq_cache, tx);
do {
seq = read_seqbegin(&dev->txwait_lock);
if (!list_empty(&dev->txwait)) {
struct iowait *wait;
write_seqlock_irqsave(&dev->txwait_lock, flags);
wait = list_first_entry(&dev->txwait, struct iowait,
list);
qp = iowait_to_qp(wait);
priv = qp->priv;
list_del_init(&priv->s_iowait.list);
/* refcount held until actual wake up */
write_sequnlock_irqrestore(&dev->txwait_lock, flags);
hfi1_qp_wakeup(qp, RVT_S_WAIT_TX);
break;
}
} while (read_seqretry(&dev->txwait_lock, seq));
}
struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp)
__must_hold(&qp->s_lock)
{
struct verbs_txreq *tx = NULL;
write_seqlock(&dev->txwait_lock);
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
struct hfi1_qp_priv *priv;
tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
if (tx)
goto out;
priv = qp->priv;
if (list_empty(&priv->s_iowait.list)) {
dev->n_txwait++;
qp->s_flags |= RVT_S_WAIT_TX;
list_add_tail(&priv->s_iowait.list, &dev->txwait);
priv->s_iowait.lock = &dev->txwait_lock;
trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX);
rvt_get_qp(qp);
}
qp->s_flags &= ~RVT_S_BUSY;
}
out:
write_sequnlock(&dev->txwait_lock);
return tx;
}
int verbs_txreq_init(struct hfi1_ibdev *dev)
{
char buf[TXREQ_LEN];
struct hfi1_devdata *dd = dd_from_dev(dev);
snprintf(buf, sizeof(buf), "hfi1_%u_vtxreq_cache", dd->unit);
dev->verbs_txreq_cache = kmem_cache_create(buf,
sizeof(struct verbs_txreq),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!dev->verbs_txreq_cache)
return -ENOMEM;
return 0;
}
void verbs_txreq_exit(struct hfi1_ibdev *dev)
{
kmem_cache_destroy(dev->verbs_txreq_cache);
dev->verbs_txreq_cache = NULL;
}