Merge branch 'reduce-open-coded-skb-next-access-for-gso-segment-walking'
Jason A. Donenfeld says: ==================== reduce open coded skb->next access for gso segment walking This patchset introduces the skb_list_walk_safe helper macro, in order to add some sanity to the myrid ways drivers have of walking through gso segments. The goal is to reduce future bugs commonly caused by open coding these sorts of things, and to in the future make it easier to swap out the underlying list representation. This first patch series addresses the easy uses of drivers iterating over the returned list of skb_gso_segments, for drivers that live in drivers/net/*. There are still other use cases to tackle later for net/*, and after these low-hanging fruits are taken care of, I imagine there are more subtle cases of gso segment walking that isn't just a direct return value from skb_gso_segments, and eventually this will have to be tackled. This series is the first in that direction. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
6ea0032f84
@ -7874,8 +7874,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
|
||||
static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
|
||||
struct netdev_queue *txq, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff *segs, *nskb;
|
||||
u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
|
||||
struct sk_buff *segs, *seg, *next;
|
||||
|
||||
/* Estimate the number of fragments in the worst case */
|
||||
if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
|
||||
@ -7898,12 +7898,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
|
||||
if (IS_ERR(segs) || !segs)
|
||||
goto tg3_tso_bug_end;
|
||||
|
||||
do {
|
||||
nskb = segs;
|
||||
segs = segs->next;
|
||||
nskb->next = NULL;
|
||||
tg3_start_xmit(nskb, tp->dev);
|
||||
} while (segs);
|
||||
skb_list_walk_safe(segs, seg, next) {
|
||||
skb_mark_not_on_list(seg);
|
||||
tg3_start_xmit(seg, tp->dev);
|
||||
}
|
||||
|
||||
tg3_tso_bug_end:
|
||||
dev_consume_skb_any(skb);
|
||||
|
@ -2892,7 +2892,7 @@ drop:
|
||||
static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
|
||||
struct net_device *dev)
|
||||
{
|
||||
struct sk_buff *segs, *curr;
|
||||
struct sk_buff *segs, *curr, *next;
|
||||
struct myri10ge_priv *mgp = netdev_priv(dev);
|
||||
struct myri10ge_slice_state *ss;
|
||||
netdev_tx_t status;
|
||||
@ -2901,10 +2901,8 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
|
||||
if (IS_ERR(segs))
|
||||
goto drop;
|
||||
|
||||
while (segs) {
|
||||
curr = segs;
|
||||
segs = segs->next;
|
||||
curr->next = NULL;
|
||||
skb_list_walk_safe(segs, curr, next) {
|
||||
skb_mark_not_on_list(curr);
|
||||
status = myri10ge_xmit(curr, dev);
|
||||
if (status != 0) {
|
||||
dev_kfree_skb_any(curr);
|
||||
|
@ -307,12 +307,9 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
|
||||
dev_consume_skb_any(skb);
|
||||
skb = segments;
|
||||
|
||||
while (skb) {
|
||||
next = skb->next;
|
||||
skb->next = NULL;
|
||||
|
||||
skb_list_walk_safe(skb, skb, next) {
|
||||
skb_mark_not_on_list(skb);
|
||||
efx_enqueue_skb(tx_queue, skb);
|
||||
skb = next;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1223,7 +1223,7 @@ vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
|
||||
{
|
||||
struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
|
||||
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
|
||||
struct sk_buff *segs;
|
||||
struct sk_buff *segs, *curr, *next;
|
||||
int maclen, datalen;
|
||||
int status;
|
||||
int gso_size, gso_type, gso_segs;
|
||||
@ -1282,11 +1282,8 @@ vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
status = 0;
|
||||
while (segs) {
|
||||
struct sk_buff *curr = segs;
|
||||
|
||||
segs = segs->next;
|
||||
curr->next = NULL;
|
||||
skb_list_walk_safe(segs, curr, next) {
|
||||
skb_mark_not_on_list(curr);
|
||||
if (port->tso && curr->len > dev->mtu) {
|
||||
skb_shinfo(curr)->gso_size = gso_size;
|
||||
skb_shinfo(curr)->gso_type = gso_type;
|
||||
|
@ -341,6 +341,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
|
||||
features |= tap->tap_features;
|
||||
if (netif_needs_gso(skb, features)) {
|
||||
struct sk_buff *segs = __skb_gso_segment(skb, features, false);
|
||||
struct sk_buff *next;
|
||||
|
||||
if (IS_ERR(segs))
|
||||
goto drop;
|
||||
@ -352,16 +353,13 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
|
||||
}
|
||||
|
||||
consume_skb(skb);
|
||||
while (segs) {
|
||||
struct sk_buff *nskb = segs->next;
|
||||
|
||||
segs->next = NULL;
|
||||
if (ptr_ring_produce(&q->ring, segs)) {
|
||||
kfree_skb(segs);
|
||||
kfree_skb_list(nskb);
|
||||
skb_list_walk_safe(segs, skb, next) {
|
||||
skb_mark_not_on_list(skb);
|
||||
if (ptr_ring_produce(&q->ring, skb)) {
|
||||
kfree_skb(skb);
|
||||
kfree_skb_list(next);
|
||||
break;
|
||||
}
|
||||
segs = nskb;
|
||||
}
|
||||
} else {
|
||||
/* If we receive a partial checksum and the tap side
|
||||
|
@ -1897,8 +1897,8 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
|
||||
{
|
||||
if (skb_shinfo(skb)->gso_size) {
|
||||
netdev_features_t features = tp->netdev->features;
|
||||
struct sk_buff *segs, *seg, *next;
|
||||
struct sk_buff_head seg_list;
|
||||
struct sk_buff *segs, *nskb;
|
||||
|
||||
features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
|
||||
segs = skb_gso_segment(skb, features);
|
||||
@ -1907,12 +1907,10 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
|
||||
|
||||
__skb_queue_head_init(&seg_list);
|
||||
|
||||
do {
|
||||
nskb = segs;
|
||||
segs = segs->next;
|
||||
nskb->next = NULL;
|
||||
__skb_queue_tail(&seg_list, nskb);
|
||||
} while (segs);
|
||||
skb_list_walk_safe(segs, seg, next) {
|
||||
skb_mark_not_on_list(seg);
|
||||
__skb_queue_tail(&seg_list, seg);
|
||||
}
|
||||
|
||||
skb_queue_splice(&seg_list, list);
|
||||
dev_kfree_skb(skb);
|
||||
|
@ -62,12 +62,4 @@ struct wg_device {
|
||||
int wg_device_init(void);
|
||||
void wg_device_uninit(void);
|
||||
|
||||
/* Later after the dust settles, this can be moved into include/linux/skbuff.h,
|
||||
* where virtually all code that deals with GSO segs can benefit, around ~30
|
||||
* drivers as of writing.
|
||||
*/
|
||||
#define skb_list_walk_safe(first, skb, next) \
|
||||
for (skb = first, next = skb->next; skb; \
|
||||
skb = next, next = skb ? skb->next : NULL)
|
||||
|
||||
#endif /* _WG_DEVICE_H */
|
||||
|
@ -847,10 +847,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
|
||||
else if (next)
|
||||
consume_skb(skb);
|
||||
|
||||
while (next) {
|
||||
tmp = next;
|
||||
next = tmp->next;
|
||||
|
||||
skb_list_walk_safe(next, tmp, next) {
|
||||
memcpy(tmp->cb, cb, sizeof(tmp->cb));
|
||||
/*
|
||||
* Compute the length of all the data added for the A-MSDU.
|
||||
@ -880,9 +877,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
|
||||
skb_shinfo(tmp)->gso_size = 0;
|
||||
}
|
||||
|
||||
tmp->prev = NULL;
|
||||
tmp->next = NULL;
|
||||
|
||||
skb_mark_not_on_list(tmp);
|
||||
__skb_queue_tail(mpdus_skb, tmp);
|
||||
i++;
|
||||
}
|
||||
|
@ -1478,6 +1478,11 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb)
|
||||
skb->next = NULL;
|
||||
}
|
||||
|
||||
/* Iterate through singly-linked GSO fragments of an skb. */
|
||||
#define skb_list_walk_safe(first, skb, next) \
|
||||
for ((skb) = (first), (next) = (skb) ? (skb)->next : NULL; (skb); \
|
||||
(skb) = (next), (next) = (skb) ? (skb)->next : NULL)
|
||||
|
||||
static inline void skb_list_del_init(struct sk_buff *skb)
|
||||
{
|
||||
__list_del_entry(&skb->list);
|
||||
|
Loading…
Reference in New Issue
Block a user