net: brcmfmac: Convey execution context via argument to brcmf_netif_rx()
bcrmgf_netif_rx() uses in_interrupt to chose between netif_rx() and netif_rx_ni(). in_interrupt() usage in drivers is phased out. Convey the execution mode via an 'inirq' argument through the various callchains leading to brcmf_netif_rx(): brcmf_pcie_isr_thread() <- Task context brcmf_proto_msgbuf_rx_trigger() brcmf_msgbuf_process_rx() brcmf_msgbuf_process_msgtype() brcmf_msgbuf_process_rx_complete() brcmf_netif_mon_rx() brcmf_netif_rx(isirq = false) brcmf_netif_rx(isirq = false) brcmf_sdio_readframes() <- Task context sdio_claim_host() might sleep brcmf_rx_frame(isirq = false) brcmf_sdio_rxglom() <- Task context sdio_claim_host() might sleep brcmf_rx_frame(isirq = false) brcmf_usb_rx_complete() <- Interrupt context brcmf_rx_frame(isirq = true) brcmf_rx_frame() brcmf_proto_rxreorder() brcmf_proto_bcdc_rxreorder() brcmf_fws_rxreorder() brcmf_netif_rx() brcmf_netif_rx() Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Arend van Spriel <arend.vanspriel@broadcom.com> Cc: Kalle Valo <kvalo@codeaurora.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
d067c0fa29
commit
687006e20c
@ -397,9 +397,9 @@ brcmf_proto_bcdc_add_tdls_peer(struct brcmf_pub *drvr, int ifidx,
|
||||
}
|
||||
|
||||
static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp,
|
||||
struct sk_buff *skb)
|
||||
struct sk_buff *skb, bool inirq)
|
||||
{
|
||||
brcmf_fws_rxreorder(ifp, skb);
|
||||
brcmf_fws_rxreorder(ifp, skb, inirq);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -249,7 +249,8 @@ int brcmf_bus_reset(struct brcmf_bus *bus)
|
||||
*/
|
||||
|
||||
/* Receive frame for delivery to OS. Callee disposes of rxp. */
|
||||
void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event);
|
||||
void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event,
|
||||
bool inirq);
|
||||
/* Receive async event packet from firmware. Callee disposes of rxp. */
|
||||
void brcmf_rx_event(struct device *dev, struct sk_buff *rxp);
|
||||
|
||||
|
@ -395,7 +395,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
|
||||
spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
|
||||
}
|
||||
|
||||
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
|
||||
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
|
||||
{
|
||||
/* Most of Broadcom's firmwares send 802.11f ADD frame every time a new
|
||||
* STA connects to the AP interface. This is an obsoleted standard most
|
||||
@ -418,14 +418,15 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
|
||||
ifp->ndev->stats.rx_packets++;
|
||||
|
||||
brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
|
||||
if (in_interrupt())
|
||||
if (inirq) {
|
||||
netif_rx(skb);
|
||||
else
|
||||
} else {
|
||||
/* If the receive is not processed inside an ISR,
|
||||
* the softirqd must be woken explicitly to service
|
||||
* the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
|
||||
*/
|
||||
netif_rx_ni(skb);
|
||||
}
|
||||
}
|
||||
|
||||
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
|
||||
@ -474,7 +475,7 @@ void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
|
||||
skb->pkt_type = PACKET_OTHERHOST;
|
||||
skb->protocol = htons(ETH_P_802_2);
|
||||
|
||||
brcmf_netif_rx(ifp, skb);
|
||||
brcmf_netif_rx(ifp, skb, false);
|
||||
}
|
||||
|
||||
static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
|
||||
@ -496,7 +497,8 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
|
||||
void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event,
|
||||
bool inirq)
|
||||
{
|
||||
struct brcmf_if *ifp;
|
||||
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
|
||||
@ -508,14 +510,14 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
|
||||
return;
|
||||
|
||||
if (brcmf_proto_is_reorder_skb(skb)) {
|
||||
brcmf_proto_rxreorder(ifp, skb);
|
||||
brcmf_proto_rxreorder(ifp, skb, inirq);
|
||||
} else {
|
||||
/* Process special event packets */
|
||||
if (handle_event)
|
||||
brcmf_fweh_process_skb(ifp->drvr, skb,
|
||||
BCMILCP_SUBTYPE_VENDOR_LONG);
|
||||
|
||||
brcmf_netif_rx(ifp, skb);
|
||||
brcmf_netif_rx(ifp, skb, inirq);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -208,7 +208,7 @@ void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked);
|
||||
void brcmf_txflowblock_if(struct brcmf_if *ifp,
|
||||
enum brcmf_netif_stop_reason reason, bool state);
|
||||
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
|
||||
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
|
||||
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
|
||||
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
|
||||
void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked);
|
||||
int brcmf_net_mon_attach(struct brcmf_if *ifp);
|
||||
|
@ -1664,7 +1664,7 @@ static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
|
||||
rfi->pend_pkts -= skb_queue_len(skb_list);
|
||||
}
|
||||
|
||||
void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
|
||||
void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
|
||||
{
|
||||
struct brcmf_pub *drvr = ifp->drvr;
|
||||
u8 *reorder_data;
|
||||
@ -1682,7 +1682,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
|
||||
/* validate flags and flow id */
|
||||
if (flags == 0xFF) {
|
||||
bphy_err(drvr, "invalid flags...so ignore this packet\n");
|
||||
brcmf_netif_rx(ifp, pkt);
|
||||
brcmf_netif_rx(ifp, pkt, inirq);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1694,7 +1694,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
|
||||
if (rfi == NULL) {
|
||||
brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
|
||||
flow_id);
|
||||
brcmf_netif_rx(ifp, pkt);
|
||||
brcmf_netif_rx(ifp, pkt, inirq);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1719,7 +1719,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
|
||||
rfi = kzalloc(buf_size, GFP_ATOMIC);
|
||||
if (rfi == NULL) {
|
||||
bphy_err(drvr, "failed to alloc buffer\n");
|
||||
brcmf_netif_rx(ifp, pkt);
|
||||
brcmf_netif_rx(ifp, pkt, inirq);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1833,7 +1833,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
|
||||
netif_rx:
|
||||
skb_queue_walk_safe(&reorder_list, pkt, pnext) {
|
||||
__skb_unlink(pkt, &reorder_list);
|
||||
brcmf_netif_rx(ifp, pkt);
|
||||
brcmf_netif_rx(ifp, pkt, inirq);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -42,6 +42,6 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp);
|
||||
void brcmf_fws_del_interface(struct brcmf_if *ifp);
|
||||
void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
|
||||
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
|
||||
void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb);
|
||||
void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
|
||||
|
||||
#endif /* FWSIGNAL_H_ */
|
||||
|
@ -536,7 +536,8 @@ static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
|
||||
static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb,
|
||||
bool inirq)
|
||||
{
|
||||
}
|
||||
|
||||
@ -1190,7 +1191,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
|
||||
}
|
||||
|
||||
skb->protocol = eth_type_trans(skb, ifp->ndev);
|
||||
brcmf_netif_rx(ifp, skb);
|
||||
brcmf_netif_rx(ifp, skb, false);
|
||||
}
|
||||
|
||||
static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf,
|
||||
|
@ -32,7 +32,7 @@ struct brcmf_proto {
|
||||
u8 peer[ETH_ALEN]);
|
||||
void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
|
||||
u8 peer[ETH_ALEN]);
|
||||
void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb);
|
||||
void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
|
||||
void (*add_if)(struct brcmf_if *ifp);
|
||||
void (*del_if)(struct brcmf_if *ifp);
|
||||
void (*reset_if)(struct brcmf_if *ifp);
|
||||
@ -109,9 +109,9 @@ static inline bool brcmf_proto_is_reorder_skb(struct sk_buff *skb)
|
||||
}
|
||||
|
||||
static inline void
|
||||
brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
|
||||
brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
|
||||
{
|
||||
ifp->drvr->proto->rxreorder(ifp, skb);
|
||||
ifp->drvr->proto->rxreorder(ifp, skb, inirq);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -1704,7 +1704,7 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
|
||||
brcmf_rx_event(bus->sdiodev->dev, pfirst);
|
||||
else
|
||||
brcmf_rx_frame(bus->sdiodev->dev, pfirst,
|
||||
false);
|
||||
false, false);
|
||||
bus->sdcnt.rxglompkts++;
|
||||
}
|
||||
|
||||
@ -2038,7 +2038,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
|
||||
brcmf_rx_event(bus->sdiodev->dev, pkt);
|
||||
else
|
||||
brcmf_rx_frame(bus->sdiodev->dev, pkt,
|
||||
false);
|
||||
false, false);
|
||||
|
||||
/* prepare the descriptor for the next read */
|
||||
rd->len = rd->len_nxtfrm << 4;
|
||||
|
@ -532,7 +532,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
|
||||
if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP ||
|
||||
devinfo->bus_pub.state == BRCMFMAC_USB_STATE_SLEEP) {
|
||||
skb_put(skb, urb->actual_length);
|
||||
brcmf_rx_frame(devinfo->dev, skb, true);
|
||||
brcmf_rx_frame(devinfo->dev, skb, true, true);
|
||||
brcmf_usb_rx_refill(devinfo, req);
|
||||
usb_mark_last_busy(urb->dev);
|
||||
} else {
|
||||
|
Loading…
Reference in New Issue
Block a user