net: ipa: use a Boolean rather than count when replenishing

The count argument to ipa_endpoint_replenish() is only ever 0 or 1,
and always will be (because we always handle each receive buffer in
a single transaction).  Rename the argument to be add_one and change
it to be Boolean.

Update the function description to reflect the current code.

Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Alex Elder 2021-02-05 16:10:58 -06:00 committed by Jakub Kicinski
parent d5bc5015eb
commit 9af5ccf323

View File

@ -1020,31 +1020,34 @@ err_free_pages:
}
/**
* ipa_endpoint_replenish() - Replenish the Rx packets cache.
* ipa_endpoint_replenish() - Replenish endpoint receive buffers
* @endpoint: Endpoint to be replenished
* @count: Number of buffers to send to hardware
* @add_one: Whether this is replacing a just-consumed buffer
*
* Allocate RX packet wrapper structures with maximal socket buffers
* for an endpoint. These are supplied to the hardware, which fills
* them with incoming data.
* The IPA hardware can hold a fixed number of receive buffers for an RX
* endpoint, based on the number of entries in the underlying channel ring
* buffer. If an endpoint's "backlog" is non-zero, it indicates how many
* more receive buffers can be supplied to the hardware. Replenishing for
* an endpoint can be disabled, in which case requests to replenish a
* buffer are "saved", and transferred to the backlog once it is re-enabled
* again.
*/
static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
{
struct gsi *gsi;
u32 backlog;
if (!endpoint->replenish_enabled) {
if (count)
atomic_add(count, &endpoint->replenish_saved);
if (add_one)
atomic_inc(&endpoint->replenish_saved);
return;
}
while (atomic_dec_not_zero(&endpoint->replenish_backlog))
if (ipa_endpoint_replenish_one(endpoint))
goto try_again_later;
if (count)
atomic_add(count, &endpoint->replenish_backlog);
if (add_one)
atomic_inc(&endpoint->replenish_backlog);
return;
@ -1052,8 +1055,8 @@ try_again_later:
/* The last one didn't succeed, so fix the backlog */
backlog = atomic_inc_return(&endpoint->replenish_backlog);
if (count)
atomic_add(count, &endpoint->replenish_backlog);
if (add_one)
atomic_inc(&endpoint->replenish_backlog);
/* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even
@ -1080,7 +1083,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
/* Start replenishing if hardware currently has no buffers */
max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
ipa_endpoint_replenish(endpoint, 0);
ipa_endpoint_replenish(endpoint, false);
}
static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
@ -1099,7 +1102,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work)
endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
ipa_endpoint_replenish(endpoint, 0);
ipa_endpoint_replenish(endpoint, false);
}
static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
@ -1300,7 +1303,7 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
{
struct page *page;
ipa_endpoint_replenish(endpoint, 1);
ipa_endpoint_replenish(endpoint, true);
if (trans->cancelled)
return;