mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
lib/scatterlist: Avoid potential scatterlist entry overflow
Since the scatterlist length field is an unsigned int, make sure that sg_alloc_table_from_pages does not overflow it while coalescing pages to a single entry. v2: Drop reference to future use. Use UINT_MAX. v3: max_segment must be page aligned. v4: Do not rely on compiler to optimise out the rounddown. (Joonas Lahtinen) v5: Simplified loops and use post-increments rather than pre-increments. Use PAGE_MASK and fix comment typo. (Andy Shevchenko) v6: Commit spelling fix. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Masahiro Yamada <yamada.masahiro@socionext.com> Cc: linux-kernel@vger.kernel.org Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Andy Shevchenko <andy.shevchenko@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170803091312.22875-1-tvrtko.ursulin@linux.intel.com
This commit is contained in:
parent
c4860ad605
commit
c125906b83
@ -20,6 +20,12 @@ struct scatterlist {
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* Since the above length field is an unsigned int, below we define the maximum
|
||||
* length in bytes that can be stored in one scatterlist entry.
|
||||
*/
|
||||
#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK)
|
||||
|
||||
/*
|
||||
* These macros should be used after a dma_map_sg call has been done
|
||||
* to get bus addresses of each of the SG entries and their lengths.
|
||||
|
@ -394,17 +394,22 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
|
||||
unsigned int offset, unsigned long size,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
unsigned int chunks;
|
||||
unsigned int i;
|
||||
unsigned int cur_page;
|
||||
const unsigned int max_segment = SCATTERLIST_MAX_SEGMENT;
|
||||
unsigned int chunks, cur_page, seg_len, i;
|
||||
int ret;
|
||||
struct scatterlist *s;
|
||||
|
||||
/* compute number of contiguous chunks */
|
||||
chunks = 1;
|
||||
for (i = 1; i < n_pages; ++i)
|
||||
if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
|
||||
++chunks;
|
||||
seg_len = 0;
|
||||
for (i = 1; i < n_pages; i++) {
|
||||
seg_len += PAGE_SIZE;
|
||||
if (seg_len >= max_segment ||
|
||||
page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
|
||||
chunks++;
|
||||
seg_len = 0;
|
||||
}
|
||||
}
|
||||
|
||||
ret = sg_alloc_table(sgt, chunks, gfp_mask);
|
||||
if (unlikely(ret))
|
||||
@ -413,17 +418,21 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
|
||||
/* merging chunks and putting them into the scatterlist */
|
||||
cur_page = 0;
|
||||
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
|
||||
unsigned long chunk_size;
|
||||
unsigned int j;
|
||||
unsigned int j, chunk_size;
|
||||
|
||||
/* look for the end of the current chunk */
|
||||
for (j = cur_page + 1; j < n_pages; ++j)
|
||||
if (page_to_pfn(pages[j]) !=
|
||||
seg_len = 0;
|
||||
for (j = cur_page + 1; j < n_pages; j++) {
|
||||
seg_len += PAGE_SIZE;
|
||||
if (seg_len >= max_segment ||
|
||||
page_to_pfn(pages[j]) !=
|
||||
page_to_pfn(pages[j - 1]) + 1)
|
||||
break;
|
||||
}
|
||||
|
||||
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
|
||||
sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
|
||||
sg_set_page(s, pages[cur_page],
|
||||
min_t(unsigned long, size, chunk_size), offset);
|
||||
size -= chunk_size;
|
||||
offset = 0;
|
||||
cur_page = j;
|
||||
|
Loading…
Reference in New Issue
Block a user