mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
crypto: qat - fix deadlock in backlog processing
If a request has the flag CRYPTO_TFM_REQ_MAY_BACKLOG set, the function
qat_alg_send_message_maybacklog(), enqueues it in a backlog list if
either (1) there is already at least one request in the backlog list, or
(2) the HW ring is nearly full or (3) the enqueue to the HW ring fails.
If an interrupt occurs right before the lock in qat_alg_backlog_req() is
taken and the backlog queue is being emptied, then there is no request
in the HW queues that can trigger a subsequent interrupt that can clear
the backlog queue. In addition subsequent requests are enqueued to the
backlog list and not sent to the hardware.
Fix it by holding the lock while taking the decision if the request
needs to be included in the backlog queue or not. This synchronizes the
flow with the interrupt handler that drains the backlog queue.
For performance reasons, the logic has been changed to try to enqueue
first without holding the lock.
Fixes: 3868238397
("crypto: qat - add backlog mechanism")
Reported-by: Mikulas Patocka <mpatocka@redhat.com>
Closes: https://lore.kernel.org/all/af9581e2-58f9-cc19-428f-6f18f1f83d54@redhat.com/T/
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
03c76e8e7a
commit
203b01001c
@ -40,40 +40,44 @@ void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
|
||||
spin_unlock_bh(&backlog->lock);
|
||||
}
|
||||
|
||||
static void qat_alg_backlog_req(struct qat_alg_req *req,
|
||||
struct qat_instance_backlog *backlog)
|
||||
{
|
||||
INIT_LIST_HEAD(&req->list);
|
||||
|
||||
spin_lock_bh(&backlog->lock);
|
||||
list_add_tail(&req->list, &backlog->list);
|
||||
spin_unlock_bh(&backlog->lock);
|
||||
}
|
||||
|
||||
static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
|
||||
static bool qat_alg_try_enqueue(struct qat_alg_req *req)
|
||||
{
|
||||
struct qat_instance_backlog *backlog = req->backlog;
|
||||
struct adf_etr_ring_data *tx_ring = req->tx_ring;
|
||||
u32 *fw_req = req->fw_req;
|
||||
|
||||
/* If any request is already backlogged, then add to backlog list */
|
||||
/* Check if any request is already backlogged */
|
||||
if (!list_empty(&backlog->list))
|
||||
goto enqueue;
|
||||
return false;
|
||||
|
||||
/* If ring is nearly full, then add to backlog list */
|
||||
/* Check if ring is nearly full */
|
||||
if (adf_ring_nearly_full(tx_ring))
|
||||
goto enqueue;
|
||||
return false;
|
||||
|
||||
/* If adding request to HW ring fails, then add to backlog list */
|
||||
/* Try to enqueue to HW ring */
|
||||
if (adf_send_message(tx_ring, fw_req))
|
||||
goto enqueue;
|
||||
return false;
|
||||
|
||||
return -EINPROGRESS;
|
||||
return true;
|
||||
}
|
||||
|
||||
enqueue:
|
||||
qat_alg_backlog_req(req, backlog);
|
||||
|
||||
return -EBUSY;
|
||||
static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
|
||||
{
|
||||
struct qat_instance_backlog *backlog = req->backlog;
|
||||
int ret = -EINPROGRESS;
|
||||
|
||||
if (qat_alg_try_enqueue(req))
|
||||
return ret;
|
||||
|
||||
spin_lock_bh(&backlog->lock);
|
||||
if (!qat_alg_try_enqueue(req)) {
|
||||
list_add_tail(&req->list, &backlog->list);
|
||||
ret = -EBUSY;
|
||||
}
|
||||
spin_unlock_bh(&backlog->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int qat_alg_send_message(struct qat_alg_req *req)
|
||||
|
Loading…
Reference in New Issue
Block a user