2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
|
|
|
|
*
|
2006-04-29 03:51:59 +00:00
|
|
|
* Rewrite, cleanup:
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
2005-11-21 08:12:32 +00:00
|
|
|
* Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
|
2006-04-29 03:51:59 +00:00
|
|
|
* Copyright (C) 2006 Olof Johansson <olof@lixom.net>
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
|
|
|
|
*
|
2006-04-29 03:51:59 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
2006-04-29 03:51:59 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
2006-04-29 03:51:59 +00:00
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/mm.h>
|
2012-07-25 21:20:03 +00:00
|
|
|
#include <linux/memblock.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/spinlock.h>
|
2011-05-27 18:25:11 +00:00
|
|
|
#include <linux/sched.h> /* for show_stack */
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2008-10-22 20:39:04 +00:00
|
|
|
#include <linux/crash_dump.h>
|
2011-02-10 09:10:47 +00:00
|
|
|
#include <linux/memory.h>
|
2012-10-02 16:57:57 +00:00
|
|
|
#include <linux/of.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/prom.h>
|
|
|
|
#include <asm/rtas.h>
|
|
|
|
#include <asm/iommu.h>
|
|
|
|
#include <asm/pci-bridge.h>
|
|
|
|
#include <asm/machdep.h>
|
2005-08-03 04:35:25 +00:00
|
|
|
#include <asm/firmware.h>
|
2005-09-20 03:45:41 +00:00
|
|
|
#include <asm/tce.h>
|
2005-09-27 16:50:25 +00:00
|
|
|
#include <asm/ppc-pci.h>
|
2005-11-07 02:18:13 +00:00
|
|
|
#include <asm/udbg.h>
|
2011-02-10 09:10:47 +00:00
|
|
|
#include <asm/mmzone.h>
|
2013-08-22 09:53:52 +00:00
|
|
|
#include <asm/plpar_wrappers.h>
|
2005-11-03 04:33:31 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-06-29 20:58:33 +00:00
|
|
|
static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
|
2013-10-17 12:21:15 +00:00
|
|
|
__be64 *startp, __be64 *endp)
|
2011-06-29 20:58:33 +00:00
|
|
|
{
|
|
|
|
u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
|
|
|
|
unsigned long start, end, inc;
|
|
|
|
|
|
|
|
start = __pa(startp);
|
|
|
|
end = __pa(endp);
|
|
|
|
inc = L1_CACHE_BYTES; /* invalidate a cacheline of TCEs at a time */
|
|
|
|
|
|
|
|
/* If this is non-zero, change the format. We shift the
|
|
|
|
* address and or in the magic from the device tree. */
|
|
|
|
if (tbl->it_busno) {
|
|
|
|
start <<= 12;
|
|
|
|
end <<= 12;
|
|
|
|
inc <<= 12;
|
|
|
|
start |= tbl->it_busno;
|
|
|
|
end |= tbl->it_busno;
|
|
|
|
}
|
|
|
|
|
|
|
|
end |= inc - 1; /* round up end to be different than start */
|
|
|
|
|
|
|
|
mb(); /* Make sure TCEs in memory are written */
|
|
|
|
while (start <= end) {
|
|
|
|
out_be64(invalidate, start);
|
|
|
|
start += inc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-23 18:31:16 +00:00
|
|
|
static int tce_build_pSeries(struct iommu_table *tbl, long index,
|
2006-04-29 03:51:59 +00:00
|
|
|
long npages, unsigned long uaddr,
|
2008-07-15 19:51:47 +00:00
|
|
|
enum dma_data_direction direction,
|
|
|
|
struct dma_attrs *attrs)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-04-29 03:51:59 +00:00
|
|
|
u64 proto_tce;
|
2013-10-17 12:21:15 +00:00
|
|
|
__be64 *tcep, *tces;
|
2006-04-29 03:51:59 +00:00
|
|
|
u64 rpn;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-04-29 03:51:59 +00:00
|
|
|
proto_tce = TCE_PCI_READ; // Read allowed
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (direction != DMA_TO_DEVICE)
|
2006-04-29 03:51:59 +00:00
|
|
|
proto_tce |= TCE_PCI_WRITE;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-10-17 12:21:15 +00:00
|
|
|
tces = tcep = ((__be64 *)tbl->it_base) + index;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
while (npages--) {
|
2010-07-12 04:36:09 +00:00
|
|
|
/* can't move this out since we might cross MEMBLOCK boundary */
|
2012-07-25 21:19:57 +00:00
|
|
|
rpn = __pa(uaddr) >> TCE_SHIFT;
|
2013-10-17 12:21:15 +00:00
|
|
|
*tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-09-20 03:46:44 +00:00
|
|
|
uaddr += TCE_PAGE_SIZE;
|
2006-04-29 03:51:59 +00:00
|
|
|
tcep++;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2011-06-29 20:58:33 +00:00
|
|
|
|
2012-06-26 21:26:37 +00:00
|
|
|
if (tbl->it_type & TCE_PCI_SWINV_CREATE)
|
2011-06-29 20:58:33 +00:00
|
|
|
tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
|
2008-07-23 18:31:16 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
|
|
|
|
{
|
2013-10-17 12:21:15 +00:00
|
|
|
__be64 *tcep, *tces;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-10-17 12:21:15 +00:00
|
|
|
tces = tcep = ((__be64 *)tbl->it_base) + index;
|
2006-04-29 03:51:59 +00:00
|
|
|
|
|
|
|
while (npages--)
|
|
|
|
*(tcep++) = 0;
|
2011-06-29 20:58:33 +00:00
|
|
|
|
2012-06-26 21:26:37 +00:00
|
|
|
if (tbl->it_type & TCE_PCI_SWINV_FREE)
|
2011-06-29 20:58:33 +00:00
|
|
|
tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-06-23 06:35:10 +00:00
|
|
|
static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
|
|
|
|
{
|
2013-10-17 12:21:15 +00:00
|
|
|
__be64 *tcep;
|
2006-06-23 06:35:10 +00:00
|
|
|
|
2013-10-17 12:21:15 +00:00
|
|
|
tcep = ((__be64 *)tbl->it_base) + index;
|
2006-06-23 06:35:10 +00:00
|
|
|
|
2013-10-17 12:21:15 +00:00
|
|
|
return be64_to_cpu(*tcep);
|
2006-06-23 06:35:10 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-07-23 18:31:16 +00:00
|
|
|
static void tce_free_pSeriesLP(struct iommu_table*, long, long);
|
|
|
|
static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
|
|
|
|
|
|
|
|
static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
2005-04-16 22:20:36 +00:00
|
|
|
long npages, unsigned long uaddr,
|
2008-07-15 19:51:47 +00:00
|
|
|
enum dma_data_direction direction,
|
|
|
|
struct dma_attrs *attrs)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-07-23 18:31:16 +00:00
|
|
|
u64 rc = 0;
|
2006-04-29 03:51:59 +00:00
|
|
|
u64 proto_tce, tce;
|
|
|
|
u64 rpn;
|
2008-07-23 18:31:16 +00:00
|
|
|
int ret = 0;
|
|
|
|
long tcenum_start = tcenum, npages_start = npages;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-07-25 21:19:57 +00:00
|
|
|
rpn = __pa(uaddr) >> TCE_SHIFT;
|
2006-04-29 03:51:59 +00:00
|
|
|
proto_tce = TCE_PCI_READ;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (direction != DMA_TO_DEVICE)
|
2006-04-29 03:51:59 +00:00
|
|
|
proto_tce |= TCE_PCI_WRITE;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
while (npages--) {
|
2006-04-29 03:51:59 +00:00
|
|
|
tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
|
|
|
|
rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
|
|
|
|
|
2008-07-23 18:31:16 +00:00
|
|
|
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
|
|
|
|
ret = (int)rc;
|
|
|
|
tce_free_pSeriesLP(tbl, tcenum_start,
|
|
|
|
(npages_start - (npages + 1)));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (rc && printk_ratelimit()) {
|
2009-01-06 14:26:03 +00:00
|
|
|
printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
|
|
|
|
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
|
|
|
|
printk("\ttcenum = 0x%llx\n", (u64)tcenum);
|
|
|
|
printk("\ttce val = 0x%llx\n", tce );
|
2005-04-16 22:20:36 +00:00
|
|
|
show_stack(current, (unsigned long *)__get_SP());
|
|
|
|
}
|
2006-04-29 03:51:59 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
tcenum++;
|
2006-04-29 03:51:59 +00:00
|
|
|
rpn++;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-07-23 18:31:16 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2013-10-17 12:21:15 +00:00
|
|
|
static DEFINE_PER_CPU(__be64 *, tce_page);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-07-23 18:31:16 +00:00
|
|
|
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
2005-04-16 22:20:36 +00:00
|
|
|
long npages, unsigned long uaddr,
|
2008-07-15 19:51:47 +00:00
|
|
|
enum dma_data_direction direction,
|
|
|
|
struct dma_attrs *attrs)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-07-23 18:31:16 +00:00
|
|
|
u64 rc = 0;
|
2006-04-29 03:51:59 +00:00
|
|
|
u64 proto_tce;
|
2013-10-17 12:21:15 +00:00
|
|
|
__be64 *tcep;
|
2006-04-29 03:51:59 +00:00
|
|
|
u64 rpn;
|
2005-04-16 22:20:36 +00:00
|
|
|
long l, limit;
|
2008-07-23 18:31:16 +00:00
|
|
|
long tcenum_start = tcenum, npages_start = npages;
|
|
|
|
int ret = 0;
|
2012-06-03 19:42:13 +00:00
|
|
|
unsigned long flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-05-08 04:27:23 +00:00
|
|
|
if (npages == 1) {
|
2008-07-23 18:31:16 +00:00
|
|
|
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
|
|
|
|
direction, attrs);
|
2008-05-08 04:27:23 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-06-03 19:42:13 +00:00
|
|
|
local_irq_save(flags); /* to protect tcep and the page behind it */
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
tcep = __get_cpu_var(tce_page);
|
|
|
|
|
|
|
|
/* This is safe to do since interrupts are off when we're called
|
|
|
|
* from iommu_alloc{,_sg}()
|
|
|
|
*/
|
|
|
|
if (!tcep) {
|
2013-10-17 12:21:15 +00:00
|
|
|
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
|
2005-04-16 22:20:36 +00:00
|
|
|
/* If allocation fails, fall back to the loop implementation */
|
2008-05-08 04:27:23 +00:00
|
|
|
if (!tcep) {
|
2012-06-03 19:42:13 +00:00
|
|
|
local_irq_restore(flags);
|
2008-07-23 18:31:16 +00:00
|
|
|
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
|
2008-07-15 19:51:47 +00:00
|
|
|
direction, attrs);
|
2008-05-08 04:27:23 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
__get_cpu_var(tce_page) = tcep;
|
|
|
|
}
|
|
|
|
|
2012-07-25 21:19:57 +00:00
|
|
|
rpn = __pa(uaddr) >> TCE_SHIFT;
|
2006-04-29 03:51:59 +00:00
|
|
|
proto_tce = TCE_PCI_READ;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (direction != DMA_TO_DEVICE)
|
2006-04-29 03:51:59 +00:00
|
|
|
proto_tce |= TCE_PCI_WRITE;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* We can map max one pageful of TCEs at a time */
|
|
|
|
do {
|
|
|
|
/*
|
|
|
|
* Set up the page with TCE data, looping through and setting
|
|
|
|
* the values.
|
|
|
|
*/
|
2006-04-29 03:51:59 +00:00
|
|
|
limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
for (l = 0; l < limit; l++) {
|
2013-10-17 12:21:15 +00:00
|
|
|
tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
|
2006-04-29 03:51:59 +00:00
|
|
|
rpn++;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rc = plpar_tce_put_indirect((u64)tbl->it_index,
|
|
|
|
(u64)tcenum << 12,
|
2012-07-25 21:19:57 +00:00
|
|
|
(u64)__pa(tcep),
|
2005-04-16 22:20:36 +00:00
|
|
|
limit);
|
|
|
|
|
|
|
|
npages -= limit;
|
|
|
|
tcenum += limit;
|
|
|
|
} while (npages > 0 && !rc);
|
|
|
|
|
2012-06-03 19:42:13 +00:00
|
|
|
local_irq_restore(flags);
|
|
|
|
|
2008-07-23 18:31:16 +00:00
|
|
|
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
|
|
|
|
ret = (int)rc;
|
|
|
|
tce_freemulti_pSeriesLP(tbl, tcenum_start,
|
|
|
|
(npages_start - (npages + limit)));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (rc && printk_ratelimit()) {
|
2009-01-06 14:26:03 +00:00
|
|
|
printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
|
|
|
|
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
|
|
|
|
printk("\tnpages = 0x%llx\n", (u64)npages);
|
|
|
|
printk("\ttce[0] val = 0x%llx\n", tcep[0]);
|
2005-04-16 22:20:36 +00:00
|
|
|
show_stack(current, (unsigned long *)__get_SP());
|
|
|
|
}
|
2008-07-23 18:31:16 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
|
|
|
|
{
|
|
|
|
u64 rc;
|
|
|
|
|
|
|
|
while (npages--) {
|
2006-04-29 03:51:59 +00:00
|
|
|
rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (rc && printk_ratelimit()) {
|
2009-01-06 14:26:03 +00:00
|
|
|
printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
|
|
|
|
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
|
|
|
|
printk("\ttcenum = 0x%llx\n", (u64)tcenum);
|
2005-04-16 22:20:36 +00:00
|
|
|
show_stack(current, (unsigned long *)__get_SP());
|
|
|
|
}
|
|
|
|
|
|
|
|
tcenum++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
|
|
|
|
{
|
|
|
|
u64 rc;
|
|
|
|
|
2006-04-29 03:51:59 +00:00
|
|
|
rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (rc && printk_ratelimit()) {
|
|
|
|
printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
|
2009-01-06 14:26:03 +00:00
|
|
|
printk("\trc = %lld\n", rc);
|
|
|
|
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
|
|
|
|
printk("\tnpages = 0x%llx\n", (u64)npages);
|
2005-04-16 22:20:36 +00:00
|
|
|
show_stack(current, (unsigned long *)__get_SP());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-06-23 06:35:10 +00:00
|
|
|
static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
|
|
|
|
{
|
|
|
|
u64 rc;
|
|
|
|
unsigned long tce_ret;
|
|
|
|
|
|
|
|
rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
|
|
|
|
|
|
|
|
if (rc && printk_ratelimit()) {
|
2009-01-06 14:26:03 +00:00
|
|
|
printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
|
|
|
|
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
|
|
|
|
printk("\ttcenum = 0x%llx\n", (u64)tcenum);
|
2006-06-23 06:35:10 +00:00
|
|
|
show_stack(current, (unsigned long *)__get_SP());
|
|
|
|
}
|
|
|
|
|
|
|
|
return tce_ret;
|
|
|
|
}
|
|
|
|
|
2011-03-31 01:57:33 +00:00
|
|
|
/* this is compatible with cells for the device tree property */
|
2011-02-10 09:10:47 +00:00
|
|
|
struct dynamic_dma_window_prop {
|
|
|
|
__be32 liobn; /* tce table number */
|
|
|
|
__be64 dma_base; /* address hi,lo */
|
|
|
|
__be32 tce_shift; /* ilog2(tce_page_size) */
|
|
|
|
__be32 window_shift; /* ilog2(tce_window_size) */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct direct_window {
|
|
|
|
struct device_node *device;
|
|
|
|
const struct dynamic_dma_window_prop *prop;
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Dynamic DMA Window support */
|
|
|
|
struct ddw_query_response {
|
2013-10-17 12:21:15 +00:00
|
|
|
__be32 windows_available;
|
|
|
|
__be32 largest_available_block;
|
|
|
|
__be32 page_size;
|
|
|
|
__be32 migration_capable;
|
2011-02-10 09:10:47 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ddw_create_response {
|
2013-10-17 12:21:15 +00:00
|
|
|
__be32 liobn;
|
|
|
|
__be32 addr_hi;
|
|
|
|
__be32 addr_lo;
|
2011-02-10 09:10:47 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static LIST_HEAD(direct_window_list);
|
|
|
|
/* prevents races between memory on/offline and window creation */
|
|
|
|
static DEFINE_SPINLOCK(direct_window_list_lock);
|
|
|
|
/* protects initializing window twice for same device */
|
|
|
|
static DEFINE_MUTEX(direct_window_init_mutex);
|
|
|
|
#define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
|
|
|
|
|
|
|
|
static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,
|
|
|
|
unsigned long num_pfn, const void *arg)
|
|
|
|
{
|
|
|
|
const struct dynamic_dma_window_prop *maprange = arg;
|
|
|
|
int rc;
|
|
|
|
u64 tce_size, num_tce, dma_offset, next;
|
|
|
|
u32 tce_shift;
|
|
|
|
long limit;
|
|
|
|
|
|
|
|
tce_shift = be32_to_cpu(maprange->tce_shift);
|
|
|
|
tce_size = 1ULL << tce_shift;
|
|
|
|
next = start_pfn << PAGE_SHIFT;
|
|
|
|
num_tce = num_pfn << PAGE_SHIFT;
|
|
|
|
|
|
|
|
/* round back to the beginning of the tce page size */
|
|
|
|
num_tce += next & (tce_size - 1);
|
|
|
|
next &= ~(tce_size - 1);
|
|
|
|
|
|
|
|
/* covert to number of tces */
|
|
|
|
num_tce |= tce_size - 1;
|
|
|
|
num_tce >>= tce_shift;
|
|
|
|
|
|
|
|
do {
|
|
|
|
/*
|
|
|
|
* Set up the page with TCE data, looping through and setting
|
|
|
|
* the values.
|
|
|
|
*/
|
|
|
|
limit = min_t(long, num_tce, 512);
|
|
|
|
dma_offset = next + be64_to_cpu(maprange->dma_base);
|
|
|
|
|
|
|
|
rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn),
|
|
|
|
dma_offset,
|
|
|
|
0, limit);
|
2013-01-18 09:16:24 +00:00
|
|
|
next += limit * tce_size;
|
2011-02-10 09:10:47 +00:00
|
|
|
num_tce -= limit;
|
|
|
|
} while (num_tce > 0 && !rc);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
|
|
|
|
unsigned long num_pfn, const void *arg)
|
|
|
|
{
|
|
|
|
const struct dynamic_dma_window_prop *maprange = arg;
|
2013-10-17 12:21:15 +00:00
|
|
|
u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn;
|
|
|
|
__be64 *tcep;
|
2011-02-10 09:10:47 +00:00
|
|
|
u32 tce_shift;
|
|
|
|
u64 rc = 0;
|
|
|
|
long l, limit;
|
|
|
|
|
|
|
|
local_irq_disable(); /* to protect tcep and the page behind it */
|
|
|
|
tcep = __get_cpu_var(tce_page);
|
|
|
|
|
|
|
|
if (!tcep) {
|
2013-10-17 12:21:15 +00:00
|
|
|
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
|
2011-02-10 09:10:47 +00:00
|
|
|
if (!tcep) {
|
|
|
|
local_irq_enable();
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
__get_cpu_var(tce_page) = tcep;
|
|
|
|
}
|
|
|
|
|
|
|
|
proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
|
|
|
|
|
|
|
|
liobn = (u64)be32_to_cpu(maprange->liobn);
|
|
|
|
tce_shift = be32_to_cpu(maprange->tce_shift);
|
|
|
|
tce_size = 1ULL << tce_shift;
|
|
|
|
next = start_pfn << PAGE_SHIFT;
|
|
|
|
num_tce = num_pfn << PAGE_SHIFT;
|
|
|
|
|
|
|
|
/* round back to the beginning of the tce page size */
|
|
|
|
num_tce += next & (tce_size - 1);
|
|
|
|
next &= ~(tce_size - 1);
|
|
|
|
|
|
|
|
/* covert to number of tces */
|
|
|
|
num_tce |= tce_size - 1;
|
|
|
|
num_tce >>= tce_shift;
|
|
|
|
|
|
|
|
/* We can map max one pageful of TCEs at a time */
|
|
|
|
do {
|
|
|
|
/*
|
|
|
|
* Set up the page with TCE data, looping through and setting
|
|
|
|
* the values.
|
|
|
|
*/
|
|
|
|
limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE);
|
|
|
|
dma_offset = next + be64_to_cpu(maprange->dma_base);
|
|
|
|
|
|
|
|
for (l = 0; l < limit; l++) {
|
2013-10-17 12:21:15 +00:00
|
|
|
tcep[l] = cpu_to_be64(proto_tce | next);
|
2011-02-10 09:10:47 +00:00
|
|
|
next += tce_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = plpar_tce_put_indirect(liobn,
|
|
|
|
dma_offset,
|
2012-07-25 21:19:57 +00:00
|
|
|
(u64)__pa(tcep),
|
2011-02-10 09:10:47 +00:00
|
|
|
limit);
|
|
|
|
|
|
|
|
num_tce -= limit;
|
|
|
|
} while (num_tce > 0 && !rc);
|
|
|
|
|
|
|
|
/* error cleanup: caller will clear whole range */
|
|
|
|
|
|
|
|
local_irq_enable();
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,
|
|
|
|
unsigned long num_pfn, void *arg)
|
|
|
|
{
|
|
|
|
return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-03-04 06:04:44 +00:00
|
|
|
#ifdef CONFIG_PCI
|
2005-04-16 22:20:36 +00:00
|
|
|
static void iommu_table_setparms(struct pci_controller *phb,
|
|
|
|
struct device_node *dn,
|
2006-04-29 03:51:59 +00:00
|
|
|
struct iommu_table *tbl)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct device_node *node;
|
2011-06-29 20:58:33 +00:00
|
|
|
const unsigned long *basep, *sw_inval;
|
2006-10-05 03:28:00 +00:00
|
|
|
const u32 *sizep;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-12-10 03:33:21 +00:00
|
|
|
node = phb->dn;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-04-03 12:26:41 +00:00
|
|
|
basep = of_get_property(node, "linux,tce-base", NULL);
|
|
|
|
sizep = of_get_property(node, "linux,tce-size", NULL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (basep == NULL || sizep == NULL) {
|
|
|
|
printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %s has "
|
|
|
|
"missing tce entries !\n", dn->full_name);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
tbl->it_base = (unsigned long)__va(*basep);
|
2006-06-23 06:35:10 +00:00
|
|
|
|
2008-10-22 20:39:04 +00:00
|
|
|
if (!is_kdump_kernel())
|
2008-10-21 17:38:10 +00:00
|
|
|
memset((void *)tbl->it_base, 0, *sizep);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
tbl->it_busno = phb->bus->number;
|
2013-12-09 07:17:02 +00:00
|
|
|
tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
|
2006-04-29 03:51:59 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Units of tce entries */
|
2013-12-09 07:17:02 +00:00
|
|
|
tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift;
|
2006-04-29 03:51:59 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Test if we are going over 2GB of DMA space */
|
2005-09-21 16:55:31 +00:00
|
|
|
if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
|
|
|
|
udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
|
2006-04-29 03:51:59 +00:00
|
|
|
panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
|
2005-09-21 16:55:31 +00:00
|
|
|
}
|
2006-04-29 03:51:59 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
phb->dma_window_base_cur += phb->dma_window_size;
|
|
|
|
|
|
|
|
/* Set the tce table size - measured in entries */
|
2013-12-09 07:17:02 +00:00
|
|
|
tbl->it_size = phb->dma_window_size >> tbl->it_page_shift;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
tbl->it_index = 0;
|
|
|
|
tbl->it_blocksize = 16;
|
|
|
|
tbl->it_type = TCE_PCI;
|
2011-06-29 20:58:33 +00:00
|
|
|
|
|
|
|
sw_inval = of_get_property(node, "linux,tce-sw-invalidate-info", NULL);
|
|
|
|
if (sw_inval) {
|
|
|
|
/*
|
|
|
|
* This property contains information on how to
|
|
|
|
* invalidate the TCE entry. The first property is
|
|
|
|
* the base MMIO address used to invalidate entries.
|
|
|
|
* The second property tells us the format of the TCE
|
|
|
|
* invalidate (whether it needs to be shifted) and
|
|
|
|
* some magic routing info to add to our invalidate
|
|
|
|
* command.
|
|
|
|
*/
|
|
|
|
tbl->it_index = (unsigned long) ioremap(sw_inval[0], 8);
|
|
|
|
tbl->it_busno = sw_inval[1]; /* overload this with magic */
|
2011-11-06 18:55:59 +00:00
|
|
|
tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
|
2011-06-29 20:58:33 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* iommu_table_setparms_lpar
|
|
|
|
*
|
|
|
|
* Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
|
|
|
|
*/
|
|
|
|
static void iommu_table_setparms_lpar(struct pci_controller *phb,
|
|
|
|
struct device_node *dn,
|
|
|
|
struct iommu_table *tbl,
|
2013-08-06 16:01:36 +00:00
|
|
|
const __be32 *dma_window)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-05-18 08:06:37 +00:00
|
|
|
unsigned long offset, size;
|
|
|
|
|
|
|
|
of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-12-09 04:24:01 +00:00
|
|
|
tbl->it_busno = phb->bus->number;
|
2013-12-09 07:17:02 +00:00
|
|
|
tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
|
2005-04-16 22:20:36 +00:00
|
|
|
tbl->it_base = 0;
|
|
|
|
tbl->it_blocksize = 16;
|
|
|
|
tbl->it_type = TCE_PCI;
|
2013-12-09 07:17:02 +00:00
|
|
|
tbl->it_offset = offset >> tbl->it_page_shift;
|
|
|
|
tbl->it_size = size >> tbl->it_page_shift;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-11-11 06:25:02 +00:00
|
|
|
static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-09-21 16:55:31 +00:00
|
|
|
struct device_node *dn;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct iommu_table *tbl;
|
2005-09-21 16:55:31 +00:00
|
|
|
struct device_node *isa_dn, *isa_dn_orig;
|
|
|
|
struct device_node *tmp;
|
|
|
|
struct pci_dn *pci;
|
|
|
|
int children;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-09-21 16:55:31 +00:00
|
|
|
dn = pci_bus_to_OF_node(bus);
|
2006-11-11 06:25:02 +00:00
|
|
|
|
2008-04-24 05:13:19 +00:00
|
|
|
pr_debug("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn->full_name);
|
2005-09-21 16:55:31 +00:00
|
|
|
|
|
|
|
if (bus->self) {
|
|
|
|
/* This is not a root bus, any setup will be done for the
|
|
|
|
* device-side of the bridge in iommu_dev_setup_pSeries().
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
2006-11-11 06:25:02 +00:00
|
|
|
pci = PCI_DN(dn);
|
2005-09-21 16:55:31 +00:00
|
|
|
|
|
|
|
/* Check if the ISA bus on the system is under
|
|
|
|
* this PHB.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2005-09-21 16:55:31 +00:00
|
|
|
isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-09-21 16:55:31 +00:00
|
|
|
while (isa_dn && isa_dn != dn)
|
|
|
|
isa_dn = isa_dn->parent;
|
|
|
|
|
|
|
|
if (isa_dn_orig)
|
|
|
|
of_node_put(isa_dn_orig);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-20 08:00:30 +00:00
|
|
|
/* Count number of direct PCI children of the PHB. */
|
2005-09-21 16:55:31 +00:00
|
|
|
for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
|
2006-06-20 08:00:30 +00:00
|
|
|
children++;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-04-24 05:13:19 +00:00
|
|
|
pr_debug("Children: %d\n", children);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-09-21 16:55:31 +00:00
|
|
|
/* Calculate amount of DMA window per slot. Each window must be
|
|
|
|
* a power of two (due to pci_alloc_consistent requirements).
|
|
|
|
*
|
|
|
|
* Keep 256MB aside for PHBs with ISA.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-09-21 16:55:31 +00:00
|
|
|
if (!isa_dn) {
|
|
|
|
/* No ISA/IDE - just set window size and return */
|
|
|
|
pci->phb->dma_window_size = 0x80000000ul; /* To be divided */
|
|
|
|
|
|
|
|
while (pci->phb->dma_window_size * children > 0x80000000ul)
|
|
|
|
pci->phb->dma_window_size >>= 1;
|
2009-06-02 18:21:30 +00:00
|
|
|
pr_debug("No ISA/IDE, window size is 0x%llx\n",
|
2008-04-24 05:13:19 +00:00
|
|
|
pci->phb->dma_window_size);
|
2005-09-21 16:55:31 +00:00
|
|
|
pci->phb->dma_window_base_cur = 0;
|
|
|
|
|
|
|
|
return;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-09-21 16:55:31 +00:00
|
|
|
|
|
|
|
/* If we have ISA, then we probably have an IDE
|
|
|
|
* controller too. Allocate a 128MB table but
|
|
|
|
* skip the first 128MB to avoid stepping on ISA
|
|
|
|
* space.
|
|
|
|
*/
|
|
|
|
pci->phb->dma_window_size = 0x8000000ul;
|
|
|
|
pci->phb->dma_window_base_cur = 0x8000000ul;
|
|
|
|
|
2010-08-11 16:42:48 +00:00
|
|
|
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
|
2006-06-10 10:58:08 +00:00
|
|
|
pci->phb->node);
|
2005-09-21 16:55:31 +00:00
|
|
|
|
|
|
|
iommu_table_setparms(pci->phb, dn, tbl);
|
2006-06-10 10:58:08 +00:00
|
|
|
pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
|
2013-05-21 03:33:11 +00:00
|
|
|
iommu_register_group(tbl, pci_domain_nr(bus), 0);
|
2005-09-21 16:55:31 +00:00
|
|
|
|
|
|
|
/* Divide the rest (1.75GB) among the children */
|
|
|
|
pci->phb->dma_window_size = 0x80000000ul;
|
|
|
|
while (pci->phb->dma_window_size * children > 0x70000000ul)
|
|
|
|
pci->phb->dma_window_size >>= 1;
|
|
|
|
|
2009-06-02 18:21:30 +00:00
|
|
|
pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-11-11 06:25:02 +00:00
|
|
|
static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct iommu_table *tbl;
|
|
|
|
struct device_node *dn, *pdn;
|
2005-09-06 03:17:54 +00:00
|
|
|
struct pci_dn *ppci;
|
2013-08-06 16:01:36 +00:00
|
|
|
const __be32 *dma_window = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
dn = pci_bus_to_OF_node(bus);
|
|
|
|
|
2008-04-24 05:13:19 +00:00
|
|
|
pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n",
|
|
|
|
dn->full_name);
|
2006-11-11 06:25:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Find nearest ibm,dma-window, walking up the device tree */
|
|
|
|
for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
|
2007-04-03 12:26:41 +00:00
|
|
|
dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (dma_window != NULL)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dma_window == NULL) {
|
2008-04-24 05:13:19 +00:00
|
|
|
pr_debug(" no ibm,dma-window property !\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2005-12-06 01:37:35 +00:00
|
|
|
ppci = PCI_DN(pdn);
|
2006-11-11 06:25:02 +00:00
|
|
|
|
2008-04-24 05:13:19 +00:00
|
|
|
pr_debug(" parent is %s, iommu_table: 0x%p\n",
|
|
|
|
pdn->full_name, ppci->iommu_table);
|
2006-11-11 06:25:02 +00:00
|
|
|
|
2005-09-06 03:17:54 +00:00
|
|
|
if (!ppci->iommu_table) {
|
2010-08-11 16:42:48 +00:00
|
|
|
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
|
2006-06-10 10:58:08 +00:00
|
|
|
ppci->phb->node);
|
2010-12-09 04:24:01 +00:00
|
|
|
iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
|
2006-06-10 10:58:08 +00:00
|
|
|
ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
|
2013-05-21 03:33:11 +00:00
|
|
|
iommu_register_group(tbl, pci_domain_nr(bus), 0);
|
2008-04-24 05:13:19 +00:00
|
|
|
pr_debug(" created table: %p\n", ppci->iommu_table);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-11-11 06:25:02 +00:00
|
|
|
static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-11-11 06:25:02 +00:00
|
|
|
struct device_node *dn;
|
2005-09-21 16:55:31 +00:00
|
|
|
struct iommu_table *tbl;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-04-24 05:13:19 +00:00
|
|
|
pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-04-13 23:12:56 +00:00
|
|
|
dn = dev->dev.of_node;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-09-21 16:55:31 +00:00
|
|
|
/* If we're the direct child of a root bus, then we need to allocate
|
|
|
|
* an iommu table ourselves. The bus setup code should have setup
|
|
|
|
* the window sizes already.
|
|
|
|
*/
|
|
|
|
if (!dev->bus->self) {
|
2006-11-11 06:25:02 +00:00
|
|
|
struct pci_controller *phb = PCI_DN(dn)->phb;
|
|
|
|
|
2008-04-24 05:13:19 +00:00
|
|
|
pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
|
2010-08-11 16:42:48 +00:00
|
|
|
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
|
2006-11-11 06:25:02 +00:00
|
|
|
phb->node);
|
|
|
|
iommu_table_setparms(phb, dn, tbl);
|
2007-01-11 01:16:29 +00:00
|
|
|
PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
|
2013-05-21 03:33:11 +00:00
|
|
|
iommu_register_group(tbl, pci_domain_nr(phb->bus), 0);
|
2013-11-21 06:43:14 +00:00
|
|
|
set_iommu_table_base_and_group(&dev->dev,
|
|
|
|
PCI_DN(dn)->iommu_table);
|
2005-09-21 16:55:31 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If this device is further down the bus tree, search upwards until
|
|
|
|
* an already allocated iommu table is found and use that.
|
|
|
|
*/
|
|
|
|
|
2005-12-06 01:37:35 +00:00
|
|
|
while (dn && PCI_DN(dn) && PCI_DN(dn)->iommu_table == NULL)
|
2005-04-16 22:20:36 +00:00
|
|
|
dn = dn->parent;
|
|
|
|
|
2006-11-11 06:25:02 +00:00
|
|
|
if (dn && PCI_DN(dn))
|
2013-11-21 06:43:14 +00:00
|
|
|
set_iommu_table_base_and_group(&dev->dev,
|
|
|
|
PCI_DN(dn)->iommu_table);
|
2006-11-11 06:25:02 +00:00
|
|
|
else
|
|
|
|
printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
|
|
|
|
pci_name(dev));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-02-10 09:10:47 +00:00
|
|
|
static int __read_mostly disable_ddw;
|
|
|
|
|
|
|
|
static int __init disable_ddw_setup(char *str)
|
|
|
|
{
|
|
|
|
disable_ddw = 1;
|
|
|
|
printk(KERN_INFO "ppc iommu: disabling ddw.\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
early_param("disable_ddw", disable_ddw_setup);
|
|
|
|
|
|
|
|
static void remove_ddw(struct device_node *np)
|
|
|
|
{
|
|
|
|
struct dynamic_dma_window_prop *dwp;
|
|
|
|
struct property *win64;
|
2011-05-11 12:25:00 +00:00
|
|
|
const u32 *ddw_avail;
|
2011-02-10 09:10:47 +00:00
|
|
|
u64 liobn;
|
|
|
|
int len, ret;
|
|
|
|
|
2011-05-11 12:25:00 +00:00
|
|
|
ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
|
2011-02-10 09:10:47 +00:00
|
|
|
win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
|
2011-05-11 12:24:58 +00:00
|
|
|
if (!win64)
|
2011-02-10 09:10:47 +00:00
|
|
|
return;
|
|
|
|
|
2011-05-11 12:25:00 +00:00
|
|
|
if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp))
|
2011-05-11 12:24:58 +00:00
|
|
|
goto delprop;
|
|
|
|
|
2011-02-10 09:10:47 +00:00
|
|
|
dwp = win64->value;
|
|
|
|
liobn = (u64)be32_to_cpu(dwp->liobn);
|
|
|
|
|
|
|
|
/* clear the whole window, note the arg is in kernel pages */
|
|
|
|
ret = tce_clearrange_multi_pSeriesLP(0,
|
|
|
|
1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp);
|
|
|
|
if (ret)
|
|
|
|
pr_warning("%s failed to clear tces in window.\n",
|
|
|
|
np->full_name);
|
|
|
|
else
|
|
|
|
pr_debug("%s successfully cleared tces in window.\n",
|
|
|
|
np->full_name);
|
|
|
|
|
2014-01-10 23:09:38 +00:00
|
|
|
ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
|
|
|
|
if (ret)
|
|
|
|
pr_warning("%s: failed to remove direct window: rtas returned "
|
|
|
|
"%d to ibm,remove-pe-dma-window(%x) %llx\n",
|
|
|
|
np->full_name, ret, ddw_avail[2], liobn);
|
|
|
|
else
|
|
|
|
pr_debug("%s: successfully removed direct window: rtas returned "
|
|
|
|
"%d to ibm,remove-pe-dma-window(%x) %llx\n",
|
|
|
|
np->full_name, ret, ddw_avail[2], liobn);
|
2011-02-10 09:10:47 +00:00
|
|
|
|
2011-05-11 12:24:58 +00:00
|
|
|
delprop:
|
2012-10-02 16:58:46 +00:00
|
|
|
ret = of_remove_property(np, win64);
|
2011-05-11 12:24:58 +00:00
|
|
|
if (ret)
|
2011-05-11 12:24:59 +00:00
|
|
|
pr_warning("%s: failed to remove direct window property: %d\n",
|
2011-05-11 12:24:58 +00:00
|
|
|
np->full_name, ret);
|
|
|
|
}
|
2011-02-10 09:10:47 +00:00
|
|
|
|
2011-05-11 12:25:00 +00:00
|
|
|
static u64 find_existing_ddw(struct device_node *pdn)
|
2011-02-10 09:10:47 +00:00
|
|
|
{
|
|
|
|
struct direct_window *window;
|
|
|
|
const struct dynamic_dma_window_prop *direct64;
|
|
|
|
u64 dma_addr = 0;
|
|
|
|
|
|
|
|
spin_lock(&direct_window_list_lock);
|
|
|
|
/* check if we already created a window and dupe that config if so */
|
|
|
|
list_for_each_entry(window, &direct_window_list, list) {
|
|
|
|
if (window->device == pdn) {
|
|
|
|
direct64 = window->prop;
|
2013-10-17 12:21:15 +00:00
|
|
|
dma_addr = be64_to_cpu(direct64->dma_base);
|
2011-02-10 09:10:47 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock(&direct_window_list_lock);
|
|
|
|
|
|
|
|
return dma_addr;
|
|
|
|
}
|
|
|
|
|
2013-01-28 16:03:58 +00:00
|
|
|
static void __restore_default_window(struct eeh_dev *edev,
|
|
|
|
u32 ddw_restore_token)
|
|
|
|
{
|
|
|
|
u32 cfg_addr;
|
|
|
|
u64 buid;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the config address and phb buid of the PE window.
|
|
|
|
* Rely on eeh to retrieve this for us.
|
|
|
|
* Retrieve them from the pci device, not the node with the
|
|
|
|
* dma-window property
|
|
|
|
*/
|
|
|
|
cfg_addr = edev->config_addr;
|
|
|
|
if (edev->pe_config_addr)
|
|
|
|
cfg_addr = edev->pe_config_addr;
|
|
|
|
buid = edev->phb->buid;
|
|
|
|
|
|
|
|
do {
|
|
|
|
ret = rtas_call(ddw_restore_token, 3, 1, NULL, cfg_addr,
|
|
|
|
BUID_HI(buid), BUID_LO(buid));
|
|
|
|
} while (rtas_busy_delay(ret));
|
|
|
|
pr_info("ibm,reset-pe-dma-windows(%x) %x %x %x returned %d\n",
|
|
|
|
ddw_restore_token, cfg_addr, BUID_HI(buid), BUID_LO(buid), ret);
|
|
|
|
}
|
|
|
|
|
2011-05-11 12:24:59 +00:00
|
|
|
static int find_existing_ddw_windows(void)
|
2011-02-10 09:10:47 +00:00
|
|
|
{
|
2011-05-11 12:24:59 +00:00
|
|
|
struct device_node *pdn;
|
2011-02-10 09:10:47 +00:00
|
|
|
const struct dynamic_dma_window_prop *direct64;
|
2013-01-28 16:03:58 +00:00
|
|
|
const u32 *ddw_extensions;
|
2011-02-10 09:10:47 +00:00
|
|
|
|
2011-05-11 12:24:59 +00:00
|
|
|
if (!firmware_has_feature(FW_FEATURE_LPAR))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
|
2013-01-28 16:03:58 +00:00
|
|
|
direct64 = of_get_property(pdn, DIRECT64_PROPNAME, NULL);
|
2011-05-11 12:24:59 +00:00
|
|
|
if (!direct64)
|
|
|
|
continue;
|
|
|
|
|
2013-01-28 16:03:58 +00:00
|
|
|
/*
|
|
|
|
* We need to ensure the IOMMU table is active when we
|
|
|
|
* return from the IOMMU setup so that the common code
|
|
|
|
* can clear the table or find the holes. To that end,
|
|
|
|
* first, remove any existing DDW configuration.
|
|
|
|
*/
|
|
|
|
remove_ddw(pdn);
|
2011-05-11 12:24:59 +00:00
|
|
|
|
2013-01-28 16:03:58 +00:00
|
|
|
/*
|
|
|
|
* Second, if we are running on a new enough level of
|
|
|
|
* firmware where the restore API is present, use it to
|
|
|
|
* restore the 32-bit window, which was removed in
|
|
|
|
* create_ddw.
|
|
|
|
* If the API is not present, then create_ddw couldn't
|
|
|
|
* have removed the 32-bit window in the first place, so
|
|
|
|
* removing the DDW configuration should be sufficient.
|
|
|
|
*/
|
|
|
|
ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions",
|
|
|
|
NULL);
|
|
|
|
if (ddw_extensions && ddw_extensions[0] > 0)
|
|
|
|
__restore_default_window(of_node_to_eeh_dev(pdn),
|
|
|
|
ddw_extensions[1]);
|
2011-02-10 09:10:47 +00:00
|
|
|
}
|
|
|
|
|
2011-05-11 12:24:59 +00:00
|
|
|
return 0;
|
2011-02-10 09:10:47 +00:00
|
|
|
}
|
2011-05-11 12:24:59 +00:00
|
|
|
machine_arch_initcall(pseries, find_existing_ddw_windows);
|
2011-02-10 09:10:47 +00:00
|
|
|
|
2011-05-11 12:25:00 +00:00
|
|
|
static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
|
2011-02-10 09:10:47 +00:00
|
|
|
struct ddw_query_response *query)
|
|
|
|
{
|
2012-03-20 21:30:28 +00:00
|
|
|
struct eeh_dev *edev;
|
2011-02-10 09:10:47 +00:00
|
|
|
u32 cfg_addr;
|
|
|
|
u64 buid;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the config address and phb buid of the PE window.
|
|
|
|
* Rely on eeh to retrieve this for us.
|
|
|
|
* Retrieve them from the pci device, not the node with the
|
|
|
|
* dma-window property
|
|
|
|
*/
|
2012-03-20 21:30:28 +00:00
|
|
|
edev = pci_dev_to_eeh_dev(dev);
|
|
|
|
cfg_addr = edev->config_addr;
|
|
|
|
if (edev->pe_config_addr)
|
|
|
|
cfg_addr = edev->pe_config_addr;
|
|
|
|
buid = edev->phb->buid;
|
|
|
|
|
2011-05-11 12:25:00 +00:00
|
|
|
ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
|
2011-02-10 09:10:47 +00:00
|
|
|
cfg_addr, BUID_HI(buid), BUID_LO(buid));
|
|
|
|
dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x"
|
2011-05-11 12:25:00 +00:00
|
|
|
" returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid),
|
2011-02-10 09:10:47 +00:00
|
|
|
BUID_LO(buid), ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-05-11 12:25:00 +00:00
|
|
|
static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
|
2011-02-10 09:10:47 +00:00
|
|
|
struct ddw_create_response *create, int page_shift,
|
|
|
|
int window_shift)
|
|
|
|
{
|
2012-03-20 21:30:28 +00:00
|
|
|
struct eeh_dev *edev;
|
2011-02-10 09:10:47 +00:00
|
|
|
u32 cfg_addr;
|
|
|
|
u64 buid;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the config address and phb buid of the PE window.
|
|
|
|
* Rely on eeh to retrieve this for us.
|
|
|
|
* Retrieve them from the pci device, not the node with the
|
|
|
|
* dma-window property
|
|
|
|
*/
|
2012-03-20 21:30:28 +00:00
|
|
|
edev = pci_dev_to_eeh_dev(dev);
|
|
|
|
cfg_addr = edev->config_addr;
|
|
|
|
if (edev->pe_config_addr)
|
|
|
|
cfg_addr = edev->pe_config_addr;
|
|
|
|
buid = edev->phb->buid;
|
2011-02-10 09:10:47 +00:00
|
|
|
|
|
|
|
do {
|
|
|
|
/* extra outputs are LIOBN and dma-addr (hi, lo) */
|
2011-05-11 12:25:00 +00:00
|
|
|
ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr,
|
2011-02-10 09:10:47 +00:00
|
|
|
BUID_HI(buid), BUID_LO(buid), page_shift, window_shift);
|
|
|
|
} while (rtas_busy_delay(ret));
|
|
|
|
dev_info(&dev->dev,
|
|
|
|
"ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
|
2011-05-11 12:25:00 +00:00
|
|
|
"(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1],
|
2011-02-10 09:10:47 +00:00
|
|
|
cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift,
|
|
|
|
window_shift, ret, create->liobn, create->addr_hi, create->addr_lo);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
powerpc/pseries: close DDW race between functions of adapter
Given a PCI device with multiple functions in a DDW capable slot, the
following situation can be encountered: When the first function sets a
64-bit DMA mask, enable_ddw() will be called and we can fail to properly
configure DDW (the most common reason being the new DMA window's size is
not large enough to map all of an LPAR's memory). With the recent
changes to DDW, we remove the base window in order to determine if the
new window is of sufficient size to cover an LPAR's memory. We correctly
replace the base window if we find that not to be the case. However,
once we go through and re-configured 32-bit DMA via the IOMMU, the next
function of the adapter will go through the same process. And since DDW
is a characteristic of the slot itself, we are most likely going to fail
again. But to determine we are going to fail the second slot, we again
remove the base window -- but that is now in-use by the first
function/driver, which might be issuing I/O already.
To close this window, keep a list of all the failed struct device_nodes
that have failed to configure DDW. If the current device_node is in that
list, just fail out immediately and fall back to 32-bit DMA without
doing any DDW manipulation.
Signed-off-by: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
2013-03-07 12:33:03 +00:00
|
|
|
struct failed_ddw_pdn {
|
|
|
|
struct device_node *pdn;
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
|
|
|
static LIST_HEAD(failed_ddw_pdn_list);
|
|
|
|
|
2011-02-10 09:10:47 +00:00
|
|
|
/*
|
|
|
|
* If the PE supports dynamic dma windows, and there is space for a table
|
|
|
|
* that can map all pages in a linear offset, then setup such a table,
|
|
|
|
* and record the dma-offset in the struct device.
|
|
|
|
*
|
|
|
|
* dev: the pci device we are checking
|
|
|
|
* pdn: the parent pe node with the ibm,dma_window property
|
|
|
|
* Future: also check if we can remap the base window for our base page size
|
|
|
|
*
|
|
|
|
* returns the dma offset for use by dma_set_mask
|
|
|
|
*/
|
|
|
|
static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|
|
|
{
|
|
|
|
int len, ret;
|
|
|
|
struct ddw_query_response query;
|
|
|
|
struct ddw_create_response create;
|
|
|
|
int page_shift;
|
|
|
|
u64 dma_addr, max_addr;
|
|
|
|
struct device_node *dn;
|
2011-05-11 12:25:00 +00:00
|
|
|
const u32 *uninitialized_var(ddw_avail);
|
2011-02-10 09:10:47 +00:00
|
|
|
struct direct_window *window;
|
2011-05-06 13:27:30 +00:00
|
|
|
struct property *win64;
|
2011-02-10 09:10:47 +00:00
|
|
|
struct dynamic_dma_window_prop *ddwprop;
|
powerpc/pseries: close DDW race between functions of adapter
Given a PCI device with multiple functions in a DDW capable slot, the
following situation can be encountered: When the first function sets a
64-bit DMA mask, enable_ddw() will be called and we can fail to properly
configure DDW (the most common reason being the new DMA window's size is
not large enough to map all of an LPAR's memory). With the recent
changes to DDW, we remove the base window in order to determine if the
new window is of sufficient size to cover an LPAR's memory. We correctly
replace the base window if we find that not to be the case. However,
once we go through and re-configured 32-bit DMA via the IOMMU, the next
function of the adapter will go through the same process. And since DDW
is a characteristic of the slot itself, we are most likely going to fail
again. But to determine we are going to fail the second slot, we again
remove the base window -- but that is now in-use by the first
function/driver, which might be issuing I/O already.
To close this window, keep a list of all the failed struct device_nodes
that have failed to configure DDW. If the current device_node is in that
list, just fail out immediately and fall back to 32-bit DMA without
doing any DDW manipulation.
Signed-off-by: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
2013-03-07 12:33:03 +00:00
|
|
|
struct failed_ddw_pdn *fpdn;
|
2011-02-10 09:10:47 +00:00
|
|
|
|
|
|
|
mutex_lock(&direct_window_init_mutex);
|
|
|
|
|
2011-05-11 12:25:00 +00:00
|
|
|
dma_addr = find_existing_ddw(pdn);
|
2011-02-10 09:10:47 +00:00
|
|
|
if (dma_addr != 0)
|
|
|
|
goto out_unlock;
|
|
|
|
|
powerpc/pseries: close DDW race between functions of adapter
Given a PCI device with multiple functions in a DDW capable slot, the
following situation can be encountered: When the first function sets a
64-bit DMA mask, enable_ddw() will be called and we can fail to properly
configure DDW (the most common reason being the new DMA window's size is
not large enough to map all of an LPAR's memory). With the recent
changes to DDW, we remove the base window in order to determine if the
new window is of sufficient size to cover an LPAR's memory. We correctly
replace the base window if we find that not to be the case. However,
once we go through and re-configured 32-bit DMA via the IOMMU, the next
function of the adapter will go through the same process. And since DDW
is a characteristic of the slot itself, we are most likely going to fail
again. But to determine we are going to fail the second slot, we again
remove the base window -- but that is now in-use by the first
function/driver, which might be issuing I/O already.
To close this window, keep a list of all the failed struct device_nodes
that have failed to configure DDW. If the current device_node is in that
list, just fail out immediately and fall back to 32-bit DMA without
doing any DDW manipulation.
Signed-off-by: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
2013-03-07 12:33:03 +00:00
|
|
|
/*
|
|
|
|
* If we already went through this for a previous function of
|
|
|
|
* the same device and failed, we don't want to muck with the
|
|
|
|
* DMA window again, as it will race with in-flight operations
|
|
|
|
* and can lead to EEHs. The above mutex protects access to the
|
|
|
|
* list.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
|
|
|
|
if (!strcmp(fpdn->pdn->full_name, pdn->full_name))
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2011-02-10 09:10:47 +00:00
|
|
|
/*
|
|
|
|
* the ibm,ddw-applicable property holds the tokens for:
|
|
|
|
* ibm,query-pe-dma-window
|
|
|
|
* ibm,create-pe-dma-window
|
|
|
|
* ibm,remove-pe-dma-window
|
|
|
|
* for the given node in that order.
|
|
|
|
* the property is actually in the parent, not the PE
|
|
|
|
*/
|
2011-05-11 12:25:00 +00:00
|
|
|
ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
|
|
|
|
if (!ddw_avail || len < 3 * sizeof(u32))
|
2014-01-10 23:09:38 +00:00
|
|
|
goto out_failed;
|
2012-05-15 07:04:32 +00:00
|
|
|
|
2014-01-10 23:09:38 +00:00
|
|
|
/*
|
2011-02-10 09:10:47 +00:00
|
|
|
* Query if there is a second window of size to map the
|
|
|
|
* whole partition. Query returns number of windows, largest
|
|
|
|
* block assigned to PE (partition endpoint), and two bitmasks
|
|
|
|
* of page sizes: supported and supported for migrate-dma.
|
|
|
|
*/
|
|
|
|
dn = pci_device_to_OF_node(dev);
|
2011-05-11 12:25:00 +00:00
|
|
|
ret = query_ddw(dev, ddw_avail, &query);
|
2011-02-10 09:10:47 +00:00
|
|
|
if (ret != 0)
|
2014-01-10 23:09:38 +00:00
|
|
|
goto out_failed;
|
2011-02-10 09:10:47 +00:00
|
|
|
|
|
|
|
if (query.windows_available == 0) {
|
|
|
|
/*
|
|
|
|
* no additional windows are available for this device.
|
|
|
|
* We might be able to reallocate the existing window,
|
|
|
|
* trading in for a larger page size.
|
|
|
|
*/
|
|
|
|
dev_dbg(&dev->dev, "no free dynamic windows");
|
2014-01-10 23:09:38 +00:00
|
|
|
goto out_failed;
|
2011-02-10 09:10:47 +00:00
|
|
|
}
|
2013-10-17 12:21:15 +00:00
|
|
|
if (be32_to_cpu(query.page_size) & 4) {
|
2011-02-10 09:10:47 +00:00
|
|
|
page_shift = 24; /* 16MB */
|
2013-10-17 12:21:15 +00:00
|
|
|
} else if (be32_to_cpu(query.page_size) & 2) {
|
2011-02-10 09:10:47 +00:00
|
|
|
page_shift = 16; /* 64kB */
|
2013-10-17 12:21:15 +00:00
|
|
|
} else if (be32_to_cpu(query.page_size) & 1) {
|
2011-02-10 09:10:47 +00:00
|
|
|
page_shift = 12; /* 4kB */
|
|
|
|
} else {
|
|
|
|
dev_dbg(&dev->dev, "no supported direct page size in mask %x",
|
|
|
|
query.page_size);
|
2014-01-10 23:09:38 +00:00
|
|
|
goto out_failed;
|
2011-02-10 09:10:47 +00:00
|
|
|
}
|
|
|
|
/* verify the window * number of ptes will map the partition */
|
|
|
|
/* check largest block * page size > max memory hotplug addr */
|
|
|
|
max_addr = memory_hotplug_max();
|
2013-10-17 12:21:15 +00:00
|
|
|
if (be32_to_cpu(query.largest_available_block) < (max_addr >> page_shift)) {
|
2011-02-10 09:10:47 +00:00
|
|
|
dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u "
|
|
|
|
"%llu-sized pages\n", max_addr, query.largest_available_block,
|
|
|
|
1ULL << page_shift);
|
2014-01-10 23:09:38 +00:00
|
|
|
goto out_failed;
|
2011-02-10 09:10:47 +00:00
|
|
|
}
|
|
|
|
len = order_base_2(max_addr);
|
|
|
|
win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
|
|
|
|
if (!win64) {
|
|
|
|
dev_info(&dev->dev,
|
|
|
|
"couldn't allocate property for 64bit dma window\n");
|
2014-01-10 23:09:38 +00:00
|
|
|
goto out_failed;
|
2011-02-10 09:10:47 +00:00
|
|
|
}
|
|
|
|
win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
|
|
|
|
win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
|
2011-05-06 13:27:30 +00:00
|
|
|
win64->length = sizeof(*ddwprop);
|
2011-02-10 09:10:47 +00:00
|
|
|
if (!win64->name || !win64->value) {
|
|
|
|
dev_info(&dev->dev,
|
|
|
|
"couldn't allocate property name and value\n");
|
|
|
|
goto out_free_prop;
|
|
|
|
}
|
|
|
|
|
2011-05-11 12:25:00 +00:00
|
|
|
ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
|
2011-02-10 09:10:47 +00:00
|
|
|
if (ret != 0)
|
|
|
|
goto out_free_prop;
|
|
|
|
|
2013-10-17 12:21:15 +00:00
|
|
|
ddwprop->liobn = create.liobn;
|
2011-02-10 09:10:47 +00:00
|
|
|
ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2));
|
|
|
|
ddwprop->tce_shift = cpu_to_be32(page_shift);
|
|
|
|
ddwprop->window_shift = cpu_to_be32(len);
|
|
|
|
|
|
|
|
dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %s\n",
|
|
|
|
create.liobn, dn->full_name);
|
|
|
|
|
|
|
|
window = kzalloc(sizeof(*window), GFP_KERNEL);
|
|
|
|
if (!window)
|
|
|
|
goto out_clear_window;
|
|
|
|
|
|
|
|
ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
|
|
|
|
win64->value, tce_setrange_multi_pSeriesLP_walk);
|
|
|
|
if (ret) {
|
|
|
|
dev_info(&dev->dev, "failed to map direct window for %s: %d\n",
|
|
|
|
dn->full_name, ret);
|
2011-08-08 01:18:00 +00:00
|
|
|
goto out_free_window;
|
2011-02-10 09:10:47 +00:00
|
|
|
}
|
|
|
|
|
2012-10-02 16:58:46 +00:00
|
|
|
ret = of_add_property(pdn, win64);
|
2011-02-10 09:10:47 +00:00
|
|
|
if (ret) {
|
|
|
|
dev_err(&dev->dev, "unable to add dma window property for %s: %d",
|
|
|
|
pdn->full_name, ret);
|
2011-08-08 01:18:00 +00:00
|
|
|
goto out_free_window;
|
2011-02-10 09:10:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
window->device = pdn;
|
|
|
|
window->prop = ddwprop;
|
|
|
|
spin_lock(&direct_window_list_lock);
|
|
|
|
list_add(&window->list, &direct_window_list);
|
|
|
|
spin_unlock(&direct_window_list_lock);
|
|
|
|
|
|
|
|
dma_addr = of_read_number(&create.addr_hi, 2);
|
|
|
|
goto out_unlock;
|
|
|
|
|
2011-08-08 01:18:00 +00:00
|
|
|
out_free_window:
|
|
|
|
kfree(window);
|
|
|
|
|
2011-02-10 09:10:47 +00:00
|
|
|
out_clear_window:
|
|
|
|
remove_ddw(pdn);
|
|
|
|
|
|
|
|
out_free_prop:
|
|
|
|
kfree(win64->name);
|
|
|
|
kfree(win64->value);
|
|
|
|
kfree(win64);
|
|
|
|
|
2014-01-10 23:09:38 +00:00
|
|
|
out_failed:
|
2012-05-15 07:04:32 +00:00
|
|
|
|
powerpc/pseries: close DDW race between functions of adapter
Given a PCI device with multiple functions in a DDW capable slot, the
following situation can be encountered: When the first function sets a
64-bit DMA mask, enable_ddw() will be called and we can fail to properly
configure DDW (the most common reason being the new DMA window's size is
not large enough to map all of an LPAR's memory). With the recent
changes to DDW, we remove the base window in order to determine if the
new window is of sufficient size to cover an LPAR's memory. We correctly
replace the base window if we find that not to be the case. However,
once we go through and re-configured 32-bit DMA via the IOMMU, the next
function of the adapter will go through the same process. And since DDW
is a characteristic of the slot itself, we are most likely going to fail
again. But to determine we are going to fail the second slot, we again
remove the base window -- but that is now in-use by the first
function/driver, which might be issuing I/O already.
To close this window, keep a list of all the failed struct device_nodes
that have failed to configure DDW. If the current device_node is in that
list, just fail out immediately and fall back to 32-bit DMA without
doing any DDW manipulation.
Signed-off-by: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
2013-03-07 12:33:03 +00:00
|
|
|
fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
|
|
|
|
if (!fpdn)
|
|
|
|
goto out_unlock;
|
|
|
|
fpdn->pdn = pdn;
|
|
|
|
list_add(&fpdn->list, &failed_ddw_pdn_list);
|
|
|
|
|
2011-02-10 09:10:47 +00:00
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&direct_window_init_mutex);
|
|
|
|
return dma_addr;
|
|
|
|
}
|
|
|
|
|
2006-11-11 06:25:02 +00:00
|
|
|
static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct device_node *pdn, *dn;
|
|
|
|
struct iommu_table *tbl;
|
2013-08-06 16:01:36 +00:00
|
|
|
const __be32 *dma_window = NULL;
|
2005-09-06 03:17:54 +00:00
|
|
|
struct pci_dn *pci;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-04-24 05:13:19 +00:00
|
|
|
pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
|
2006-11-11 06:25:02 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* dev setup for LPAR is a little tricky, since the device tree might
|
2011-03-31 01:57:33 +00:00
|
|
|
* contain the dma-window properties per-device and not necessarily
|
2005-04-16 22:20:36 +00:00
|
|
|
* for the bus. So we need to search upwards in the tree until we
|
|
|
|
* either hit a dma-window property, OR find a parent with a table
|
|
|
|
* already allocated.
|
|
|
|
*/
|
|
|
|
dn = pci_device_to_OF_node(dev);
|
2008-04-24 05:13:19 +00:00
|
|
|
pr_debug(" node is %s\n", dn->full_name);
|
2006-10-30 05:15:59 +00:00
|
|
|
|
2005-12-06 01:37:35 +00:00
|
|
|
for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
|
2005-09-06 03:17:54 +00:00
|
|
|
pdn = pdn->parent) {
|
2007-04-03 12:26:41 +00:00
|
|
|
dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (dma_window)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-04-10 20:11:23 +00:00
|
|
|
if (!pdn || !PCI_DN(pdn)) {
|
|
|
|
printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
|
|
|
|
"no DMA window found for pci dev=%s dn=%s\n",
|
2012-06-15 17:50:25 +00:00
|
|
|
pci_name(dev), of_node_full_name(dn));
|
2007-04-10 20:11:23 +00:00
|
|
|
return;
|
|
|
|
}
|
2008-04-24 05:13:19 +00:00
|
|
|
pr_debug(" parent is %s\n", pdn->full_name);
|
2006-11-11 06:25:02 +00:00
|
|
|
|
2005-12-06 01:37:35 +00:00
|
|
|
pci = PCI_DN(pdn);
|
2005-09-06 03:17:54 +00:00
|
|
|
if (!pci->iommu_table) {
|
2010-08-11 16:42:48 +00:00
|
|
|
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
|
2006-06-10 10:58:08 +00:00
|
|
|
pci->phb->node);
|
2010-12-09 04:24:01 +00:00
|
|
|
iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
|
2006-06-10 10:58:08 +00:00
|
|
|
pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
|
2013-05-21 03:33:11 +00:00
|
|
|
iommu_register_group(tbl, pci_domain_nr(pci->phb->bus), 0);
|
2008-04-24 05:13:19 +00:00
|
|
|
pr_debug(" created table: %p\n", pci->iommu_table);
|
2007-05-10 05:16:27 +00:00
|
|
|
} else {
|
2008-04-24 05:13:19 +00:00
|
|
|
pr_debug(" found DMA window, table: %p\n", pci->iommu_table);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2013-11-21 06:43:14 +00:00
|
|
|
set_iommu_table_base_and_group(&dev->dev, pci->iommu_table);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2011-02-10 09:10:47 +00:00
|
|
|
|
|
|
|
static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
|
|
|
|
{
|
|
|
|
bool ddw_enabled = false;
|
|
|
|
struct device_node *pdn, *dn;
|
|
|
|
struct pci_dev *pdev;
|
2013-08-06 16:01:36 +00:00
|
|
|
const __be32 *dma_window = NULL;
|
2011-02-10 09:10:47 +00:00
|
|
|
u64 dma_offset;
|
|
|
|
|
2011-05-11 12:24:57 +00:00
|
|
|
if (!dev->dma_mask)
|
2011-02-10 09:10:47 +00:00
|
|
|
return -EIO;
|
|
|
|
|
2011-05-11 12:24:57 +00:00
|
|
|
if (!dev_is_pci(dev))
|
|
|
|
goto check_mask;
|
|
|
|
|
2011-05-09 12:58:03 +00:00
|
|
|
pdev = to_pci_dev(dev);
|
|
|
|
|
2011-02-10 09:10:47 +00:00
|
|
|
/* only attempt to use a new window if 64-bit DMA is requested */
|
|
|
|
if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) {
|
|
|
|
dn = pci_device_to_OF_node(pdev);
|
|
|
|
dev_dbg(dev, "node is %s\n", dn->full_name);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* the device tree might contain the dma-window properties
|
2011-03-31 01:57:33 +00:00
|
|
|
* per-device and not necessarily for the bus. So we need to
|
2011-02-10 09:10:47 +00:00
|
|
|
* search upwards in the tree until we either hit a dma-window
|
|
|
|
* property, OR find a parent with a table already allocated.
|
|
|
|
*/
|
|
|
|
for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
|
|
|
|
pdn = pdn->parent) {
|
|
|
|
dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
|
|
|
|
if (dma_window)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (pdn && PCI_DN(pdn)) {
|
|
|
|
dma_offset = enable_ddw(pdev, pdn);
|
|
|
|
if (dma_offset != 0) {
|
|
|
|
dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset);
|
|
|
|
set_dma_offset(dev, dma_offset);
|
|
|
|
set_dma_ops(dev, &dma_direct_ops);
|
|
|
|
ddw_enabled = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-11 12:24:57 +00:00
|
|
|
/* fall back on iommu ops, restore table pointer with ops */
|
|
|
|
if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) {
|
|
|
|
dev_info(dev, "Restoring 32-bit DMA via iommu\n");
|
2011-02-10 09:10:47 +00:00
|
|
|
set_dma_ops(dev, &dma_iommu_ops);
|
2011-05-09 12:58:03 +00:00
|
|
|
pci_dma_dev_setup_pSeriesLP(pdev);
|
2011-02-10 09:10:47 +00:00
|
|
|
}
|
|
|
|
|
2011-05-11 12:24:57 +00:00
|
|
|
check_mask:
|
|
|
|
if (!dma_supported(dev, dma_mask))
|
|
|
|
return -EIO;
|
|
|
|
|
2011-02-10 09:10:47 +00:00
|
|
|
*dev->dma_mask = dma_mask;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-06-24 09:05:22 +00:00
|
|
|
static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
|
|
|
|
{
|
|
|
|
if (!dev->dma_mask)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!disable_ddw && dev_is_pci(dev)) {
|
|
|
|
struct pci_dev *pdev = to_pci_dev(dev);
|
|
|
|
struct device_node *dn;
|
|
|
|
|
|
|
|
dn = pci_device_to_OF_node(pdev);
|
|
|
|
|
|
|
|
/* search upwards for ibm,dma-window */
|
|
|
|
for (; dn && PCI_DN(dn) && !PCI_DN(dn)->iommu_table;
|
|
|
|
dn = dn->parent)
|
|
|
|
if (of_get_property(dn, "ibm,dma-window", NULL))
|
|
|
|
break;
|
|
|
|
/* if there is a ibm,ddw-applicable property require 64 bits */
|
|
|
|
if (dn && PCI_DN(dn) &&
|
|
|
|
of_get_property(dn, "ibm,ddw-applicable", NULL))
|
|
|
|
return DMA_BIT_MASK(64);
|
|
|
|
}
|
|
|
|
|
2011-06-24 09:05:24 +00:00
|
|
|
return dma_iommu_ops.get_required_mask(dev);
|
2011-06-24 09:05:22 +00:00
|
|
|
}
|
|
|
|
|
2007-03-04 06:04:44 +00:00
|
|
|
#else /* CONFIG_PCI */
|
|
|
|
#define pci_dma_bus_setup_pSeries NULL
|
|
|
|
#define pci_dma_dev_setup_pSeries NULL
|
|
|
|
#define pci_dma_bus_setup_pSeriesLP NULL
|
|
|
|
#define pci_dma_dev_setup_pSeriesLP NULL
|
2011-02-10 09:10:47 +00:00
|
|
|
#define dma_set_mask_pSeriesLP NULL
|
2011-06-24 09:05:22 +00:00
|
|
|
#define dma_get_required_mask_pSeriesLP NULL
|
2007-03-04 06:04:44 +00:00
|
|
|
#endif /* !CONFIG_PCI */
|
|
|
|
|
2011-02-10 09:10:47 +00:00
|
|
|
static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct direct_window *window;
|
|
|
|
struct memory_notify *arg = data;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
switch (action) {
|
|
|
|
case MEM_GOING_ONLINE:
|
|
|
|
spin_lock(&direct_window_list_lock);
|
|
|
|
list_for_each_entry(window, &direct_window_list, list) {
|
|
|
|
ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
|
|
|
|
arg->nr_pages, window->prop);
|
|
|
|
/* XXX log error */
|
|
|
|
}
|
|
|
|
spin_unlock(&direct_window_list_lock);
|
|
|
|
break;
|
|
|
|
case MEM_CANCEL_ONLINE:
|
|
|
|
case MEM_OFFLINE:
|
|
|
|
spin_lock(&direct_window_list_lock);
|
|
|
|
list_for_each_entry(window, &direct_window_list, list) {
|
|
|
|
ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
|
|
|
|
arg->nr_pages, window->prop);
|
|
|
|
/* XXX log error */
|
|
|
|
}
|
|
|
|
spin_unlock(&direct_window_list_lock);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (ret && action != MEM_CANCEL_ONLINE)
|
|
|
|
return NOTIFY_BAD;
|
|
|
|
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block iommu_mem_nb = {
|
|
|
|
.notifier_call = iommu_mem_notifier,
|
|
|
|
};
|
|
|
|
|
2007-03-04 06:04:44 +00:00
|
|
|
static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
|
|
|
|
{
|
|
|
|
int err = NOTIFY_OK;
|
|
|
|
struct device_node *np = node;
|
|
|
|
struct pci_dn *pci = PCI_DN(np);
|
2011-02-10 09:10:47 +00:00
|
|
|
struct direct_window *window;
|
2007-03-04 06:04:44 +00:00
|
|
|
|
|
|
|
switch (action) {
|
2012-10-02 16:57:57 +00:00
|
|
|
case OF_RECONFIG_DETACH_NODE:
|
2013-01-18 09:17:36 +00:00
|
|
|
remove_ddw(np);
|
2010-10-26 17:35:13 +00:00
|
|
|
if (pci && pci->iommu_table)
|
2007-12-06 02:39:19 +00:00
|
|
|
iommu_free_table(pci->iommu_table, np->full_name);
|
2011-02-10 09:10:47 +00:00
|
|
|
|
|
|
|
spin_lock(&direct_window_list_lock);
|
|
|
|
list_for_each_entry(window, &direct_window_list, list) {
|
|
|
|
if (window->device == np) {
|
|
|
|
list_del(&window->list);
|
|
|
|
kfree(window);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock(&direct_window_list_lock);
|
2007-03-04 06:04:44 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = NOTIFY_DONE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block iommu_reconfig_nb = {
|
|
|
|
.notifier_call = iommu_reconfig_notifier,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* These are called very early. */
|
|
|
|
void iommu_init_early_pSeries(void)
|
|
|
|
{
|
2010-10-18 07:27:03 +00:00
|
|
|
if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
|
2005-04-16 22:20:36 +00:00
|
|
|
return;
|
|
|
|
|
2006-03-21 09:45:59 +00:00
|
|
|
if (firmware_has_feature(FW_FEATURE_LPAR)) {
|
2005-08-03 04:35:25 +00:00
|
|
|
if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
ppc_md.tce_build = tce_buildmulti_pSeriesLP;
|
|
|
|
ppc_md.tce_free = tce_freemulti_pSeriesLP;
|
|
|
|
} else {
|
|
|
|
ppc_md.tce_build = tce_build_pSeriesLP;
|
|
|
|
ppc_md.tce_free = tce_free_pSeriesLP;
|
|
|
|
}
|
2006-06-23 06:35:10 +00:00
|
|
|
ppc_md.tce_get = tce_get_pSeriesLP;
|
2006-11-11 06:25:02 +00:00
|
|
|
ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
|
|
|
|
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
|
2011-02-10 09:10:47 +00:00
|
|
|
ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
|
2011-06-24 09:05:22 +00:00
|
|
|
ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
|
2005-04-16 22:20:36 +00:00
|
|
|
} else {
|
|
|
|
ppc_md.tce_build = tce_build_pSeries;
|
|
|
|
ppc_md.tce_free = tce_free_pSeries;
|
2006-06-23 06:35:10 +00:00
|
|
|
ppc_md.tce_get = tce_get_pseries;
|
2006-11-11 06:25:02 +00:00
|
|
|
ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeries;
|
|
|
|
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeries;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-02 16:57:57 +00:00
|
|
|
of_reconfig_notifier_register(&iommu_reconfig_nb);
|
2011-02-10 09:10:47 +00:00
|
|
|
register_memory_notifier(&iommu_mem_nb);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-03-04 05:58:39 +00:00
|
|
|
set_pci_dma_ops(&dma_iommu_ops);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2010-09-28 15:33:12 +00:00
|
|
|
static int __init disable_multitce(char *str)
|
|
|
|
{
|
|
|
|
if (strcmp(str, "off") == 0 &&
|
|
|
|
firmware_has_feature(FW_FEATURE_LPAR) &&
|
|
|
|
firmware_has_feature(FW_FEATURE_MULTITCE)) {
|
|
|
|
printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
|
|
|
|
ppc_md.tce_build = tce_build_pSeriesLP;
|
|
|
|
ppc_md.tce_free = tce_free_pSeriesLP;
|
|
|
|
powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
__setup("multitce=", disable_multitce);
|