forked from Minki/linux
ath10k: cleanup copy engine receive next completion
The physical address necessary to unmap DMA ('bufferp') is stored in ath10k_skb_cb as 'paddr'. For diag register read and write operations, 'paddr' is stored in transfer context. ath10k doesn't rely on the meta/transfer_id. So the unused output arguments {bufferp, nbytesp and transfer_idp} are removed from CE recv_next completion. Signed-off-by: Rajkumar Manoharan <rmanohar@qti.qualcomm.com> Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
This commit is contained in:
parent
e3a91f877c
commit
24d9ef5eff
@ -444,14 +444,10 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
|
|||||||
*/
|
*/
|
||||||
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
|
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
|
||||||
void **per_transfer_contextp,
|
void **per_transfer_contextp,
|
||||||
u32 *bufferp,
|
unsigned int *nbytesp)
|
||||||
unsigned int *nbytesp,
|
|
||||||
unsigned int *transfer_idp,
|
|
||||||
unsigned int *flagsp)
|
|
||||||
{
|
{
|
||||||
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
|
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
|
||||||
unsigned int nentries_mask = dest_ring->nentries_mask;
|
unsigned int nentries_mask = dest_ring->nentries_mask;
|
||||||
struct ath10k *ar = ce_state->ar;
|
|
||||||
unsigned int sw_index = dest_ring->sw_index;
|
unsigned int sw_index = dest_ring->sw_index;
|
||||||
|
|
||||||
struct ce_desc *base = dest_ring->base_addr_owner_space;
|
struct ce_desc *base = dest_ring->base_addr_owner_space;
|
||||||
@ -476,14 +472,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
|
|||||||
desc->nbytes = 0;
|
desc->nbytes = 0;
|
||||||
|
|
||||||
/* Return data from completed destination descriptor */
|
/* Return data from completed destination descriptor */
|
||||||
*bufferp = __le32_to_cpu(sdesc.addr);
|
|
||||||
*nbytesp = nbytes;
|
*nbytesp = nbytes;
|
||||||
*transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
|
|
||||||
|
|
||||||
if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
|
|
||||||
*flagsp = CE_RECV_FLAG_SWAPPED;
|
|
||||||
else
|
|
||||||
*flagsp = 0;
|
|
||||||
|
|
||||||
if (per_transfer_contextp)
|
if (per_transfer_contextp)
|
||||||
*per_transfer_contextp =
|
*per_transfer_contextp =
|
||||||
@ -501,10 +490,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
|
|||||||
|
|
||||||
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
|
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
|
||||||
void **per_transfer_contextp,
|
void **per_transfer_contextp,
|
||||||
u32 *bufferp,
|
unsigned int *nbytesp)
|
||||||
unsigned int *nbytesp,
|
|
||||||
unsigned int *transfer_idp,
|
|
||||||
unsigned int *flagsp)
|
|
||||||
{
|
{
|
||||||
struct ath10k *ar = ce_state->ar;
|
struct ath10k *ar = ce_state->ar;
|
||||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||||
@ -513,8 +499,7 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
|
|||||||
spin_lock_bh(&ar_pci->ce_lock);
|
spin_lock_bh(&ar_pci->ce_lock);
|
||||||
ret = ath10k_ce_completed_recv_next_nolock(ce_state,
|
ret = ath10k_ce_completed_recv_next_nolock(ce_state,
|
||||||
per_transfer_contextp,
|
per_transfer_contextp,
|
||||||
bufferp, nbytesp,
|
nbytesp);
|
||||||
transfer_idp, flagsp);
|
|
||||||
spin_unlock_bh(&ar_pci->ce_lock);
|
spin_unlock_bh(&ar_pci->ce_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -177,10 +177,7 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
|
|||||||
*/
|
*/
|
||||||
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
|
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
|
||||||
void **per_transfer_contextp,
|
void **per_transfer_contextp,
|
||||||
u32 *bufferp,
|
unsigned int *nbytesp);
|
||||||
unsigned int *nbytesp,
|
|
||||||
unsigned int *transfer_idp,
|
|
||||||
unsigned int *flagsp);
|
|
||||||
/*
|
/*
|
||||||
* Supply data for the next completed unprocessed send descriptor.
|
* Supply data for the next completed unprocessed send descriptor.
|
||||||
* Pops 1 completed send buffer from Source ring.
|
* Pops 1 completed send buffer from Source ring.
|
||||||
@ -212,10 +209,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
|
|||||||
|
|
||||||
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
|
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
|
||||||
void **per_transfer_contextp,
|
void **per_transfer_contextp,
|
||||||
u32 *bufferp,
|
unsigned int *nbytesp);
|
||||||
unsigned int *nbytesp,
|
|
||||||
unsigned int *transfer_idp,
|
|
||||||
unsigned int *flagsp);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Support clean shutdown by allowing the caller to cancel
|
* Support clean shutdown by allowing the caller to cancel
|
||||||
|
@ -870,10 +870,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
|
|||||||
{
|
{
|
||||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
u32 buf;
|
u32 *buf;
|
||||||
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
|
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
|
||||||
unsigned int id;
|
|
||||||
unsigned int flags;
|
|
||||||
struct ath10k_ce_pipe *ce_diag;
|
struct ath10k_ce_pipe *ce_diag;
|
||||||
/* Host buffer address in CE space */
|
/* Host buffer address in CE space */
|
||||||
u32 ce_data;
|
u32 ce_data;
|
||||||
@ -909,7 +907,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
|
|||||||
nbytes = min_t(unsigned int, remaining_bytes,
|
nbytes = min_t(unsigned int, remaining_bytes,
|
||||||
DIAG_TRANSFER_LIMIT);
|
DIAG_TRANSFER_LIMIT);
|
||||||
|
|
||||||
ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
|
ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
@ -940,9 +938,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
|
while (ath10k_ce_completed_recv_next_nolock(ce_diag,
|
||||||
&completed_nbytes,
|
(void **)&buf,
|
||||||
&id, &flags) != 0) {
|
&completed_nbytes)
|
||||||
|
!= 0) {
|
||||||
mdelay(1);
|
mdelay(1);
|
||||||
|
|
||||||
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
|
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
|
||||||
@ -956,7 +955,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (buf != ce_data) {
|
if (*buf != ce_data) {
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
@ -1026,10 +1025,8 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
|
|||||||
{
|
{
|
||||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
u32 buf;
|
u32 *buf;
|
||||||
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
|
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
|
||||||
unsigned int id;
|
|
||||||
unsigned int flags;
|
|
||||||
struct ath10k_ce_pipe *ce_diag;
|
struct ath10k_ce_pipe *ce_diag;
|
||||||
void *data_buf = NULL;
|
void *data_buf = NULL;
|
||||||
u32 ce_data; /* Host buffer address in CE space */
|
u32 ce_data; /* Host buffer address in CE space */
|
||||||
@ -1078,7 +1075,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
|
|||||||
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
|
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
|
||||||
|
|
||||||
/* Set up to receive directly into Target(!) address */
|
/* Set up to receive directly into Target(!) address */
|
||||||
ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
|
ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
@ -1103,9 +1100,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
|
|||||||
}
|
}
|
||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
|
while (ath10k_ce_completed_recv_next_nolock(ce_diag,
|
||||||
&completed_nbytes,
|
(void **)&buf,
|
||||||
&id, &flags) != 0) {
|
&completed_nbytes)
|
||||||
|
!= 0) {
|
||||||
mdelay(1);
|
mdelay(1);
|
||||||
|
|
||||||
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
|
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
|
||||||
@ -1119,7 +1117,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (buf != address) {
|
if (*buf != address) {
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
@ -1181,15 +1179,11 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
|
|||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct sk_buff_head list;
|
struct sk_buff_head list;
|
||||||
void *transfer_context;
|
void *transfer_context;
|
||||||
u32 ce_data;
|
|
||||||
unsigned int nbytes, max_nbytes;
|
unsigned int nbytes, max_nbytes;
|
||||||
unsigned int transfer_id;
|
|
||||||
unsigned int flags;
|
|
||||||
|
|
||||||
__skb_queue_head_init(&list);
|
__skb_queue_head_init(&list);
|
||||||
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
|
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
|
||||||
&ce_data, &nbytes, &transfer_id,
|
&nbytes) == 0) {
|
||||||
&flags) == 0) {
|
|
||||||
skb = transfer_context;
|
skb = transfer_context;
|
||||||
max_nbytes = skb->len + skb_tailroom(skb);
|
max_nbytes = skb->len + skb_tailroom(skb);
|
||||||
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
|
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
|
||||||
@ -1835,13 +1829,10 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
|
|||||||
{
|
{
|
||||||
struct ath10k *ar = ce_state->ar;
|
struct ath10k *ar = ce_state->ar;
|
||||||
struct bmi_xfer *xfer;
|
struct bmi_xfer *xfer;
|
||||||
u32 ce_data;
|
|
||||||
unsigned int nbytes;
|
unsigned int nbytes;
|
||||||
unsigned int transfer_id;
|
|
||||||
unsigned int flags;
|
|
||||||
|
|
||||||
if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
|
if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
|
||||||
&nbytes, &transfer_id, &flags))
|
&nbytes))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (WARN_ON_ONCE(!xfer))
|
if (WARN_ON_ONCE(!xfer))
|
||||||
|
Loading…
Reference in New Issue
Block a user