mirror of
https://github.com/torvalds/linux.git
synced 2024-12-13 22:53:20 +00:00
Merge branch 'skb-sgvec-overflow'
Jason A. Donenfeld says: ==================== net: Avoiding stack overflow in skb_to_sgvec The recent bug with macsec and historical one with virtio have indicated that letting skb_to_sgvec trounce all over an sglist without checking the length is probably a bad idea. And it's not necessary either: an sglist already explicitly marks its last item, and the initialization functions are diligent in doing so. Thus there's a clear way of avoiding future overflows. So, this patchset, from a high level, makes skb_to_sgvec return a potential error code, and then adjusts all callers to check for the error code. There are two situations in which skb_to_sgvec might return such an error: 1) When the passed in sglist is too small; and 2) When the passed in skbuff is too deeply nested. So, the first patch in this series handles the issues with skb_to_sgvec directly, and the remaining ones then handle the call sites. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
a619cc8bed
@ -740,7 +740,12 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
|
||||
macsec_fill_iv(iv, secy->sci, pn);
|
||||
|
||||
sg_init_table(sg, ret);
|
||||
skb_to_sgvec(skb, sg, 0, skb->len);
|
||||
ret = skb_to_sgvec(skb, sg, 0, skb->len);
|
||||
if (unlikely(ret < 0)) {
|
||||
macsec_txsa_put(tx_sa);
|
||||
kfree_skb(skb);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
if (tx_sc->encrypt) {
|
||||
int len = skb->len - macsec_hdr_len(sci_present) -
|
||||
@ -947,7 +952,11 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
|
||||
macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
|
||||
|
||||
sg_init_table(sg, ret);
|
||||
skb_to_sgvec(skb, sg, 0, skb->len);
|
||||
ret = skb_to_sgvec(skb, sg, 0, skb->len);
|
||||
if (unlikely(ret < 0)) {
|
||||
kfree_skb(skb);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
if (hdr->tci_an & MACSEC_TCI_E) {
|
||||
/* confidentiality: ethernet + macsec header
|
||||
|
@ -1150,7 +1150,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
|
||||
struct virtio_net_hdr_mrg_rxbuf *hdr;
|
||||
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
|
||||
struct virtnet_info *vi = sq->vq->vdev->priv;
|
||||
unsigned num_sg;
|
||||
int num_sg;
|
||||
unsigned hdr_len = vi->hdr_len;
|
||||
bool can_push;
|
||||
|
||||
@ -1177,11 +1177,16 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
|
||||
if (can_push) {
|
||||
__skb_push(skb, hdr_len);
|
||||
num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
|
||||
if (unlikely(num_sg < 0))
|
||||
return num_sg;
|
||||
/* Pull header back to avoid skew in tx bytes calculations. */
|
||||
__skb_pull(skb, hdr_len);
|
||||
} else {
|
||||
sg_set_buf(sq->sg, hdr, hdr_len);
|
||||
num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
|
||||
num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
|
||||
if (unlikely(num_sg < 0))
|
||||
return num_sg;
|
||||
num_sg++;
|
||||
}
|
||||
return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
|
||||
}
|
||||
|
@ -953,10 +953,10 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
|
||||
unsigned int headroom);
|
||||
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
|
||||
int newtailroom, gfp_t priority);
|
||||
int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
|
||||
int offset, int len);
|
||||
int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
|
||||
int len);
|
||||
int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
|
||||
int offset, int len);
|
||||
int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
|
||||
int offset, int len);
|
||||
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
|
||||
int skb_pad(struct sk_buff *skb, int pad);
|
||||
#define dev_kfree_skb(a) consume_skb(a)
|
||||
|
@ -3508,24 +3508,18 @@ void __init skb_init(void)
|
||||
NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_to_sgvec - Fill a scatter-gather list from a socket buffer
|
||||
* @skb: Socket buffer containing the buffers to be mapped
|
||||
* @sg: The scatter-gather list to map into
|
||||
* @offset: The offset into the buffer's contents to start mapping
|
||||
* @len: Length of buffer space to be mapped
|
||||
*
|
||||
* Fill the specified scatter-gather list with mappings/pointers into a
|
||||
* region of the buffer space attached to a socket buffer.
|
||||
*/
|
||||
static int
|
||||
__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
|
||||
__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
|
||||
unsigned int recursion_level)
|
||||
{
|
||||
int start = skb_headlen(skb);
|
||||
int i, copy = start - offset;
|
||||
struct sk_buff *frag_iter;
|
||||
int elt = 0;
|
||||
|
||||
if (unlikely(recursion_level >= 24))
|
||||
return -EMSGSIZE;
|
||||
|
||||
if (copy > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
@ -3544,6 +3538,8 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
|
||||
end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
|
||||
if ((copy = end - offset) > 0) {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
if (unlikely(elt && sg_is_last(&sg[elt - 1])))
|
||||
return -EMSGSIZE;
|
||||
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
@ -3558,16 +3554,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
|
||||
}
|
||||
|
||||
skb_walk_frags(skb, frag_iter) {
|
||||
int end;
|
||||
int end, ret;
|
||||
|
||||
WARN_ON(start > offset + len);
|
||||
|
||||
end = start + frag_iter->len;
|
||||
if ((copy = end - offset) > 0) {
|
||||
if (unlikely(elt && sg_is_last(&sg[elt - 1])))
|
||||
return -EMSGSIZE;
|
||||
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
|
||||
copy);
|
||||
ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
|
||||
copy, recursion_level + 1);
|
||||
if (unlikely(ret < 0))
|
||||
return ret;
|
||||
elt += ret;
|
||||
if ((len -= copy) == 0)
|
||||
return elt;
|
||||
offset += copy;
|
||||
@ -3578,6 +3580,31 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
|
||||
return elt;
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_to_sgvec - Fill a scatter-gather list from a socket buffer
|
||||
* @skb: Socket buffer containing the buffers to be mapped
|
||||
* @sg: The scatter-gather list to map into
|
||||
* @offset: The offset into the buffer's contents to start mapping
|
||||
* @len: Length of buffer space to be mapped
|
||||
*
|
||||
* Fill the specified scatter-gather list with mappings/pointers into a
|
||||
* region of the buffer space attached to a socket buffer. Returns either
|
||||
* the number of scatterlist items used, or -EMSGSIZE if the contents
|
||||
* could not fit.
|
||||
*/
|
||||
int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
|
||||
{
|
||||
int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
|
||||
|
||||
if (nsg <= 0)
|
||||
return nsg;
|
||||
|
||||
sg_mark_end(&sg[nsg - 1]);
|
||||
|
||||
return nsg;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skb_to_sgvec);
|
||||
|
||||
/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
|
||||
* sglist without mark the sg which contain last skb data as the end.
|
||||
* So the caller can mannipulate sg list as will when padding new data after
|
||||
@ -3600,19 +3627,11 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
|
||||
int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
|
||||
int offset, int len)
|
||||
{
|
||||
return __skb_to_sgvec(skb, sg, offset, len);
|
||||
return __skb_to_sgvec(skb, sg, offset, len, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
|
||||
|
||||
int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
|
||||
{
|
||||
int nsg = __skb_to_sgvec(skb, sg, offset, len);
|
||||
|
||||
sg_mark_end(&sg[nsg - 1]);
|
||||
|
||||
return nsg;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skb_to_sgvec);
|
||||
|
||||
/**
|
||||
* skb_cow_data - Check that a socket buffer's data buffers are writable
|
||||
|
@ -220,7 +220,9 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
|
||||
ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
|
||||
|
||||
sg_init_table(sg, nfrags + sglists);
|
||||
skb_to_sgvec_nomark(skb, sg, 0, skb->len);
|
||||
err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
|
||||
if (unlikely(err < 0))
|
||||
goto out_free;
|
||||
|
||||
if (x->props.flags & XFRM_STATE_ESN) {
|
||||
/* Attach seqhi sg right after packet payload */
|
||||
@ -393,7 +395,9 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
|
||||
skb_push(skb, ihl);
|
||||
|
||||
sg_init_table(sg, nfrags + sglists);
|
||||
skb_to_sgvec_nomark(skb, sg, 0, skb->len);
|
||||
err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
|
||||
if (unlikely(err < 0))
|
||||
goto out_free;
|
||||
|
||||
if (x->props.flags & XFRM_STATE_ESN) {
|
||||
/* Attach seqhi sg right after packet payload */
|
||||
|
@ -377,9 +377,11 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
|
||||
esp->esph = esph;
|
||||
|
||||
sg_init_table(sg, esp->nfrags);
|
||||
skb_to_sgvec(skb, sg,
|
||||
(unsigned char *)esph - skb->data,
|
||||
assoclen + ivlen + esp->clen + alen);
|
||||
err = skb_to_sgvec(skb, sg,
|
||||
(unsigned char *)esph - skb->data,
|
||||
assoclen + ivlen + esp->clen + alen);
|
||||
if (unlikely(err < 0))
|
||||
goto error;
|
||||
|
||||
if (!esp->inplace) {
|
||||
int allocsize;
|
||||
@ -403,9 +405,11 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
|
||||
spin_unlock_bh(&x->lock);
|
||||
|
||||
sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
|
||||
skb_to_sgvec(skb, dsg,
|
||||
(unsigned char *)esph - skb->data,
|
||||
assoclen + ivlen + esp->clen + alen);
|
||||
err = skb_to_sgvec(skb, dsg,
|
||||
(unsigned char *)esph - skb->data,
|
||||
assoclen + ivlen + esp->clen + alen);
|
||||
if (unlikely(err < 0))
|
||||
goto error;
|
||||
}
|
||||
|
||||
if ((x->props.flags & XFRM_STATE_ESN))
|
||||
@ -690,7 +694,9 @@ skip_cow:
|
||||
esp_input_set_header(skb, seqhi);
|
||||
|
||||
sg_init_table(sg, nfrags);
|
||||
skb_to_sgvec(skb, sg, 0, skb->len);
|
||||
err = skb_to_sgvec(skb, sg, 0, skb->len);
|
||||
if (unlikely(err < 0))
|
||||
goto out;
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
|
@ -423,7 +423,9 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
|
||||
ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
|
||||
|
||||
sg_init_table(sg, nfrags + sglists);
|
||||
skb_to_sgvec_nomark(skb, sg, 0, skb->len);
|
||||
err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
|
||||
if (unlikely(err < 0))
|
||||
goto out_free;
|
||||
|
||||
if (x->props.flags & XFRM_STATE_ESN) {
|
||||
/* Attach seqhi sg right after packet payload */
|
||||
@ -606,7 +608,9 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
|
||||
ip6h->hop_limit = 0;
|
||||
|
||||
sg_init_table(sg, nfrags + sglists);
|
||||
skb_to_sgvec_nomark(skb, sg, 0, skb->len);
|
||||
err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
|
||||
if (unlikely(err < 0))
|
||||
goto out_free;
|
||||
|
||||
if (x->props.flags & XFRM_STATE_ESN) {
|
||||
/* Attach seqhi sg right after packet payload */
|
||||
|
@ -346,9 +346,11 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
|
||||
esph = esp_output_set_esn(skb, x, ip_esp_hdr(skb), seqhi);
|
||||
|
||||
sg_init_table(sg, esp->nfrags);
|
||||
skb_to_sgvec(skb, sg,
|
||||
(unsigned char *)esph - skb->data,
|
||||
assoclen + ivlen + esp->clen + alen);
|
||||
err = skb_to_sgvec(skb, sg,
|
||||
(unsigned char *)esph - skb->data,
|
||||
assoclen + ivlen + esp->clen + alen);
|
||||
if (unlikely(err < 0))
|
||||
goto error;
|
||||
|
||||
if (!esp->inplace) {
|
||||
int allocsize;
|
||||
@ -372,9 +374,11 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
|
||||
spin_unlock_bh(&x->lock);
|
||||
|
||||
sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
|
||||
skb_to_sgvec(skb, dsg,
|
||||
(unsigned char *)esph - skb->data,
|
||||
assoclen + ivlen + esp->clen + alen);
|
||||
err = skb_to_sgvec(skb, dsg,
|
||||
(unsigned char *)esph - skb->data,
|
||||
assoclen + ivlen + esp->clen + alen);
|
||||
if (unlikely(err < 0))
|
||||
goto error;
|
||||
}
|
||||
|
||||
if ((x->props.flags & XFRM_STATE_ESN))
|
||||
@ -618,7 +622,9 @@ skip_cow:
|
||||
esp_input_set_header(skb, seqhi);
|
||||
|
||||
sg_init_table(sg, nfrags);
|
||||
skb_to_sgvec(skb, sg, 0, skb->len);
|
||||
ret = skb_to_sgvec(skb, sg, 0, skb->len);
|
||||
if (unlikely(ret < 0))
|
||||
goto out;
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
|
@ -227,7 +227,9 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
|
||||
len &= ~(call->conn->size_align - 1);
|
||||
|
||||
sg_init_table(sg, nsg);
|
||||
skb_to_sgvec(skb, sg, 0, len);
|
||||
err = skb_to_sgvec(skb, sg, 0, len);
|
||||
if (unlikely(err < 0))
|
||||
goto out;
|
||||
skcipher_request_set_crypt(req, sg, sg, len, iv.x);
|
||||
crypto_skcipher_encrypt(req);
|
||||
|
||||
@ -324,7 +326,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
|
||||
bool aborted;
|
||||
u32 data_size, buf;
|
||||
u16 check;
|
||||
int nsg;
|
||||
int nsg, ret;
|
||||
|
||||
_enter("");
|
||||
|
||||
@ -342,7 +344,9 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
|
||||
goto nomem;
|
||||
|
||||
sg_init_table(sg, nsg);
|
||||
skb_to_sgvec(skb, sg, offset, 8);
|
||||
ret = skb_to_sgvec(skb, sg, offset, 8);
|
||||
if (unlikely(ret < 0))
|
||||
return ret;
|
||||
|
||||
/* start the decryption afresh */
|
||||
memset(&iv, 0, sizeof(iv));
|
||||
@ -409,7 +413,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
|
||||
bool aborted;
|
||||
u32 data_size, buf;
|
||||
u16 check;
|
||||
int nsg;
|
||||
int nsg, ret;
|
||||
|
||||
_enter(",{%d}", skb->len);
|
||||
|
||||
@ -434,7 +438,12 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
sg_init_table(sg, nsg);
|
||||
skb_to_sgvec(skb, sg, offset, len);
|
||||
ret = skb_to_sgvec(skb, sg, offset, len);
|
||||
if (unlikely(ret < 0)) {
|
||||
if (sg != _sg)
|
||||
kfree(sg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* decrypt from the session key */
|
||||
token = call->conn->params.key->payload.data[0];
|
||||
|
Loading…
Reference in New Issue
Block a user