forked from Minki/linux
gve: fix dma sync bug where not all pages synced
The previous commit had a bug where the last page in the memory range
could not be synced. This change fixes the behavior so that all the
required pages are synced.
Fixes: 9cfeeb576d
("gve: Fixes DMA synchronization")
Signed-off-by: Adi Suresh <adisuresh@google.com>
Reviewed-by: Catherine Sullivan <csully@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
075e238d12
commit
db96c2cb48
@ -393,12 +393,13 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
|
|||||||
static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
|
static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
|
||||||
u64 iov_offset, u64 iov_len)
|
u64 iov_offset, u64 iov_len)
|
||||||
{
|
{
|
||||||
|
u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
|
||||||
|
u64 first_page = iov_offset / PAGE_SIZE;
|
||||||
dma_addr_t dma;
|
dma_addr_t dma;
|
||||||
u64 addr;
|
u64 page;
|
||||||
|
|
||||||
for (addr = iov_offset; addr < iov_offset + iov_len;
|
for (page = first_page; page <= last_page; page++) {
|
||||||
addr += PAGE_SIZE) {
|
dma = page_buses[page];
|
||||||
dma = page_buses[addr / PAGE_SIZE];
|
|
||||||
dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
|
dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user