xen: XSA-448 security patches for v6.8

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRTLbB6QfY48x44uB6AXGG7T9hjvgUCZZusZAAKCRCAXGG7T9hj
 vusnAQDZVJ2Tl+zhOQiEZFn8N+6NXtcCCHvV9UFYGfH1nOBagAD9Fe/RO8WSSfUM
 f4aPMZJC4UFeizGm4Fjg0TUOy66hTQg=
 =7F1F
 -----END PGP SIGNATURE-----

Merge tag 'xsa448-6.8-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen netback fix from Juergen Gross:
 "Transmit requests in Xen's virtual network protocol can consist of
  multiple parts. While not really useful, except for the initial part
  any of them may be of zero length, i.e. carry no data at all.

  Besides a certain initial portion of the to be transferred data, these
  parts are directly translated into what Linux calls SKB fragments.
  Such converted request parts can, when for a particular SKB they are
  all of length zero, lead to a de-reference of NULL in core networking
  code"

* tag 'xsa448-6.8-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen-netback: don't produce zero-size SKB frags
This commit is contained in:
Linus Torvalds 2024-01-22 09:40:05 -08:00
commit 0f0d819aef

View File

@ -463,12 +463,25 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
}
for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
shinfo->nr_frags++, gop++, nr_slots--) {
nr_slots--) {
if (unlikely(!txp->size)) {
unsigned long flags;
spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
push_tx_responses(queue);
spin_unlock_irqrestore(&queue->response_lock, flags);
++txp;
continue;
}
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp,
txp == first ? extra_count : 0, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
++shinfo->nr_frags;
++gop;
if (txp == first)
txp = txfrags;
@ -481,20 +494,39 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
shinfo->nr_frags++, txp++, gop++) {
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
if (unlikely(!txp->size)) {
unsigned long flags;
spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, txp, 0,
XEN_NETIF_RSP_OKAY);
push_tx_responses(queue);
spin_unlock_irqrestore(&queue->response_lock,
flags);
continue;
}
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
gop);
frag_set_pending_idx(&frags[shinfo->nr_frags],
pending_idx);
++shinfo->nr_frags;
++gop;
}
skb_shinfo(skb)->frag_list = nskb;
} else if (nskb) {
if (shinfo->nr_frags) {
skb_shinfo(skb)->frag_list = nskb;
nskb = NULL;
}
}
if (nskb) {
/* A frag_list skb was allocated but it is no longer needed
* because enough slots were converted to copy ops above.
* because enough slots were converted to copy ops above or some
* were empty.
*/
kfree_skb(nskb);
}