* Don't assume that 0 as a physical address is incorrect and fail the request.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJTpJSbAAoJEFjIrFwIi8fJSDQH/3wm3iDUrW+yxxXofTG056v3
 mO4Yl2xAuP5bTVIQWEGU/3hxkfuHF4fKdHYULbZGmVMDfmT7SfnkT67i2uCFu3y5
 H7/GjZwbo5Oz6PCWrR+xAd2Xp3TuDhs24/wi7xFhWswgacZWEJzjC41jGqJEZbpB
 iidyI3z7/LppJcJAxzeBjCuqj//zL26Bk2nOKNtZIdRX4G+bEpRVRDLDBo7wSUmN
 BIZysIsQtid24GwiV3FsG5WQypdIFSDJRNlkIeUHOEpVd9QDuTZLU5xbtM0suwHy
 mAjaNTg3uGUoUbTrvf/lNmaxg1546moj2z2gKWCYJ9aEI28kNxI/G/mJ4ml2hWQ=
 =wH3p
 -----END PGP SIGNATURE-----

Merge tag 'stable/for-linus-3.16-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb

Pull swiotlb bugfix from Konrad Rzeszutek Wilk:
 "One bug-fix that had been in tree for quite some time.  We had assumed
  that the physical address zero was invalid and would fail it.  But
  that is not true and on some architectures it is not reserved and
  valid.  This fixes it"

* tag 'stable/for-linus-3.16-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb:
  swiotlb: don't assume PA 0 is invalid
This commit is contained in:
Linus Torvalds 2014-06-27 18:04:22 -07:00
commit 772205d8e4

View File

@ -86,6 +86,7 @@ static unsigned int io_tlb_index;
* We need to save away the original address corresponding to a mapped entry
* for the sync operations.
*/
#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
static phys_addr_t *io_tlb_orig_addr;
/*
@ -188,12 +189,14 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
io_tlb_list = memblock_virt_alloc(
PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
PAGE_SIZE);
for (i = 0; i < io_tlb_nslabs; i++)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_index = 0;
io_tlb_orig_addr = memblock_virt_alloc(
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
PAGE_SIZE);
for (i = 0; i < io_tlb_nslabs; i++) {
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
io_tlb_index = 0;
if (verbose)
swiotlb_print_info();
@ -313,10 +316,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
if (!io_tlb_list)
goto cleanup3;
for (i = 0; i < io_tlb_nslabs; i++)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_index = 0;
io_tlb_orig_addr = (phys_addr_t *)
__get_free_pages(GFP_KERNEL,
get_order(io_tlb_nslabs *
@ -324,7 +323,11 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
if (!io_tlb_orig_addr)
goto cleanup4;
memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
for (i = 0; i < io_tlb_nslabs; i++) {
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
io_tlb_index = 0;
swiotlb_print_info();
@ -556,7 +559,8 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
/*
* First, sync the memory before unmapping the entry
*/
if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
if (orig_addr != INVALID_PHYS_ADDR &&
((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
/*
@ -573,8 +577,10 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
* Step 1: return the slots to the free list, merging the
* slots with superceeding slots
*/
for (i = index + nslots - 1; i >= index; i--)
for (i = index + nslots - 1; i >= index; i--) {
io_tlb_list[i] = ++count;
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
/*
* Step 2: merge the returned slots with the preceding slots,
* if available (non zero)
@ -593,6 +599,8 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
phys_addr_t orig_addr = io_tlb_orig_addr[index];
if (orig_addr == INVALID_PHYS_ADDR)
return;
orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
switch (target) {