linux/arch/sparc/kernel/pci_sun4v.c
chris hyser aa7bde1a8b sparc64: Enable setting "relaxed ordering" in IOMMU mappings
Enable relaxed ordering for memory writes in IOMMU TSB entry from
dma_4v_alloc_coherent(), dma_4v_map_page() and dma_4v_map_sg() when
dma_attrs DMA_ATTR_WEAK_ORDERING is set. This requires PCI IOMMU I/O
Translation Services version 2.0 API.

Many PCIe devices allow enabling relaxed-ordering (memory writes bypassing
other memory writes) for various DMA buffers. A notable exception is the
Mellanox mlx4 IB adapter. Due to the nature of x86 HW this appears to have
little performance impact there. On SPARC HW however, this results in major
performance degradation getting only about 3Gbps. Enabling RO in the IOMMU
entries corresponding to mlx4 data buffers increases the throughput to
about 13 Gbps.

Orabug: 19245907

Signed-off-by: Chris Hyser <chris.hyser@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-06 01:44:51 -04:00

1035 lines
24 KiB
C

/* pci_sun4v.c: SUN4V specific PCI controller support.
*
* Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/export.h>
#include <linux/log2.h>
#include <linux/of_device.h>
#include <linux/iommu-common.h>
#include <asm/iommu.h>
#include <asm/irq.h>
#include <asm/hypervisor.h>
#include <asm/prom.h>
#include "pci_impl.h"
#include "iommu_common.h"
#include "pci_sun4v.h"
#define DRIVER_NAME "pci_sun4v"
#define PFX DRIVER_NAME ": "
static unsigned long vpci_major;
static unsigned long vpci_minor;
struct vpci_version {
unsigned long major;
unsigned long minor;
};
/* Ordered from largest major to lowest */
static struct vpci_version vpci_versions[] = {
{ .major = 2, .minor = 0 },
{ .major = 1, .minor = 1 },
};
#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
struct iommu_batch {
struct device *dev; /* Device mapping is for. */
unsigned long prot; /* IOMMU page protections */
unsigned long entry; /* Index into IOTSB. */
u64 *pglist; /* List of physical pages */
unsigned long npages; /* Number of pages in list. */
};
static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
static int iommu_batch_initialized;
/* Interrupts must be disabled. */
static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
p->dev = dev;
p->prot = prot;
p->entry = entry;
p->npages = 0;
}
/* Interrupts must be disabled. */
static long iommu_batch_flush(struct iommu_batch *p)
{
struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
unsigned long devhandle = pbm->devhandle;
unsigned long prot = p->prot;
unsigned long entry = p->entry;
u64 *pglist = p->pglist;
unsigned long npages = p->npages;
/* VPCI maj=1, min=[0,1] only supports read and write */
if (vpci_major < 2)
prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
while (npages != 0) {
long num;
num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
npages, prot, __pa(pglist));
if (unlikely(num < 0)) {
if (printk_ratelimit())
printk("iommu_batch_flush: IOMMU map of "
"[%08lx:%08llx:%lx:%lx:%lx] failed with "
"status %ld\n",
devhandle, HV_PCI_TSBID(0, entry),
npages, prot, __pa(pglist), num);
return -1;
}
entry += num;
npages -= num;
pglist += num;
}
p->entry = entry;
p->npages = 0;
return 0;
}
static inline void iommu_batch_new_entry(unsigned long entry)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
if (p->entry + p->npages == entry)
return;
if (p->entry != ~0UL)
iommu_batch_flush(p);
p->entry = entry;
}
/* Interrupts must be disabled. */
static inline long iommu_batch_add(u64 phys_page)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
p->pglist[p->npages++] = phys_page;
if (p->npages == PGLIST_NENTS)
return iommu_batch_flush(p);
return 0;
}
/* Interrupts must be disabled. */
static inline long iommu_batch_end(void)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
return iommu_batch_flush(p);
}
static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
unsigned long attrs)
{
unsigned long flags, order, first_page, npages, n;
unsigned long prot = 0;
struct iommu *iommu;
struct page *page;
void *ret;
long entry;
int nid;
size = IO_PAGE_ALIGN(size);
order = get_order(size);
if (unlikely(order >= MAX_ORDER))
return NULL;
npages = size >> IO_PAGE_SHIFT;
if (attrs & DMA_ATTR_WEAK_ORDERING)
prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
nid = dev->archdata.numa_node;
page = alloc_pages_node(nid, gfp, order);
if (unlikely(!page))
return NULL;
first_page = (unsigned long) page_address(page);
memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu;
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
(unsigned long)(-1), 0);
if (unlikely(entry == IOMMU_ERROR_CODE))
goto range_alloc_fail;
*dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
ret = (void *) first_page;
first_page = __pa(first_page);
local_irq_save(flags);
iommu_batch_start(dev,
(HV_PCI_MAP_ATTR_READ | prot |
HV_PCI_MAP_ATTR_WRITE),
entry);
for (n = 0; n < npages; n++) {
long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
if (unlikely(err < 0L))
goto iommu_map_fail;
}
if (unlikely(iommu_batch_end() < 0L))
goto iommu_map_fail;
local_irq_restore(flags);
return ret;
iommu_map_fail:
iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
range_alloc_fail:
free_pages(first_page, order);
return NULL;
}
static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry,
unsigned long npages)
{
u32 devhandle = *(u32 *)demap_arg;
unsigned long num, flags;
local_irq_save(flags);
do {
num = pci_sun4v_iommu_demap(devhandle,
HV_PCI_TSBID(0, entry),
npages);
entry += num;
npages -= num;
} while (npages != 0);
local_irq_restore(flags);
}
static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
dma_addr_t dvma, unsigned long attrs)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
unsigned long order, npages, entry;
u32 devhandle;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
devhandle = pbm->devhandle;
entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
dma_4v_iommu_demap(&devhandle, entry, npages);
iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
}
static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t sz,
enum dma_data_direction direction,
unsigned long attrs)
{
struct iommu *iommu;
unsigned long flags, npages, oaddr;
unsigned long i, base_paddr;
u32 bus_addr, ret;
unsigned long prot;
long entry;
iommu = dev->archdata.iommu;
if (unlikely(direction == DMA_NONE))
goto bad;
oaddr = (unsigned long)(page_address(page) + offset);
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
(unsigned long)(-1), 0);
if (unlikely(entry == IOMMU_ERROR_CODE))
goto bad;
bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
prot = HV_PCI_MAP_ATTR_READ;
if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
if (attrs & DMA_ATTR_WEAK_ORDERING)
prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
local_irq_save(flags);
iommu_batch_start(dev, prot, entry);
for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
long err = iommu_batch_add(base_paddr);
if (unlikely(err < 0L))
goto iommu_map_fail;
}
if (unlikely(iommu_batch_end() < 0L))
goto iommu_map_fail;
local_irq_restore(flags);
return ret;
bad:
if (printk_ratelimit())
WARN_ON(1);
return DMA_ERROR_CODE;
iommu_map_fail:
iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
return DMA_ERROR_CODE;
}
static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction,
unsigned long attrs)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
unsigned long npages;
long entry;
u32 devhandle;
if (unlikely(direction == DMA_NONE)) {
if (printk_ratelimit())
WARN_ON(1);
return;
}
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
devhandle = pbm->devhandle;
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
bus_addr &= IO_PAGE_MASK;
entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
dma_4v_iommu_demap(&devhandle, entry, npages);
iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
}
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
struct scatterlist *s, *outs, *segstart;
unsigned long flags, handle, prot;
dma_addr_t dma_next = 0, dma_addr;
unsigned int max_seg_size;
unsigned long seg_boundary_size;
int outcount, incount, i;
struct iommu *iommu;
unsigned long base_shift;
long err;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
if (nelems == 0 || !iommu)
return 0;
prot = HV_PCI_MAP_ATTR_READ;
if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
if (attrs & DMA_ATTR_WEAK_ORDERING)
prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
outs = s = segstart = &sglist[0];
outcount = 1;
incount = nelems;
handle = 0;
/* Init first segment length for backout at failure */
outs->dma_length = 0;
local_irq_save(flags);
iommu_batch_start(dev, prot, ~0UL);
max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
slen = s->length;
/* Sanity check */
if (slen == 0) {
dma_next = 0;
continue;
}
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
&handle, (unsigned long)(-1), 0);
/* Handle failure */
if (unlikely(entry == IOMMU_ERROR_CODE)) {
if (printk_ratelimit())
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
" npages %lx\n", iommu, paddr, npages);
goto iommu_map_failed;
}
iommu_batch_new_entry(entry);
/* Convert entry to a dma_addr_t */
dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
paddr &= IO_PAGE_MASK;
while (npages--) {
err = iommu_batch_add(paddr);
if (unlikely(err < 0L))
goto iommu_map_failed;
paddr += IO_PAGE_SIZE;
}
/* If we are in an open segment, try merging */
if (segstart != s) {
/* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation
*/
if ((dma_addr != dma_next) ||
(outs->dma_length + s->length > max_seg_size) ||
(is_span_boundary(out_entry, base_shift,
seg_boundary_size, outs, s))) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
outs = sg_next(outs);
} else {
outs->dma_length += s->length;
}
}
if (segstart == s) {
/* This is a new segment, fill entries */
outs->dma_address = dma_addr;
outs->dma_length = slen;
out_entry = entry;
}
/* Calculate next page pointer for contiguous check */
dma_next = dma_addr + slen;
}
err = iommu_batch_end();
if (unlikely(err < 0L))
goto iommu_map_failed;
local_irq_restore(flags);
if (outcount < incount) {
outs = sg_next(outs);
outs->dma_address = DMA_ERROR_CODE;
outs->dma_length = 0;
}
return outcount;
iommu_map_failed:
for_each_sg(sglist, s, nelems, i) {
if (s->dma_length != 0) {
unsigned long vaddr, npages;
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
IOMMU_ERROR_CODE);
/* XXX demap? XXX */
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
}
if (s == outs)
break;
}
local_irq_restore(flags);
return 0;
}
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
struct pci_pbm_info *pbm;
struct scatterlist *sg;
struct iommu *iommu;
unsigned long flags, entry;
u32 devhandle;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
devhandle = pbm->devhandle;
local_irq_save(flags);
sg = sglist;
while (nelems--) {
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
unsigned long npages;
struct iommu_map_table *tbl = &iommu->tbl;
unsigned long shift = IO_PAGE_SHIFT;
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
entry = ((dma_handle - tbl->table_map_base) >> shift);
dma_4v_iommu_demap(&devhandle, entry, npages);
iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
IOMMU_ERROR_CODE);
sg = sg_next(sg);
}
local_irq_restore(flags);
}
static struct dma_map_ops sun4v_dma_ops = {
.alloc = dma_4v_alloc_coherent,
.free = dma_4v_free_coherent,
.map_page = dma_4v_map_page,
.unmap_page = dma_4v_unmap_page,
.map_sg = dma_4v_map_sg,
.unmap_sg = dma_4v_unmap_sg,
};
static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
{
struct property *prop;
struct device_node *dp;
dp = pbm->op->dev.of_node;
prop = of_find_property(dp, "66mhz-capable", NULL);
pbm->is_66mhz_capable = (prop != NULL);
pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
/* XXX register error interrupt handlers XXX */
}
static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
struct iommu_map_table *iommu)
{
struct iommu_pool *pool;
unsigned long i, pool_nr, cnt = 0;
u32 devhandle;
devhandle = pbm->devhandle;
for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
pool = &(iommu->pools[pool_nr]);
for (i = pool->start; i <= pool->end; i++) {
unsigned long ret, io_attrs, ra;
ret = pci_sun4v_iommu_getmap(devhandle,
HV_PCI_TSBID(0, i),
&io_attrs, &ra);
if (ret == HV_EOK) {
if (page_in_phys_avail(ra)) {
pci_sun4v_iommu_demap(devhandle,
HV_PCI_TSBID(0,
i), 1);
} else {
cnt++;
__set_bit(i, iommu->map);
}
}
}
}
return cnt;
}
static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{
static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
struct iommu *iommu = pbm->iommu;
unsigned long num_tsb_entries, sz;
u32 dma_mask, dma_offset;
const u32 *vdma;
vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
if (!vdma)
vdma = vdma_default;
if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
vdma[0], vdma[1]);
return -EINVAL;
}
dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
dma_offset = vdma[0];
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->ctx_lowest_free = 1;
iommu->tbl.table_map_base = dma_offset;
iommu->dma_addr_mask = dma_mask;
/* Allocate and initialize the free area map. */
sz = (num_tsb_entries + 7) / 8;
sz = (sz + 7UL) & ~7UL;
iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
if (!iommu->tbl.map) {
printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
return -ENOMEM;
}
iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
NULL, false /* no large_pool */,
0 /* default npools */,
false /* want span boundary checking */);
sz = probe_existing_entries(pbm, &iommu->tbl);
if (sz)
printk("%s: Imported %lu TSB entries from OBP\n",
pbm->name, sz);
return 0;
}
#ifdef CONFIG_PCI_MSI
struct pci_sun4v_msiq_entry {
u64 version_type;
#define MSIQ_VERSION_MASK 0xffffffff00000000UL
#define MSIQ_VERSION_SHIFT 32
#define MSIQ_TYPE_MASK 0x00000000000000ffUL
#define MSIQ_TYPE_SHIFT 0
#define MSIQ_TYPE_NONE 0x00
#define MSIQ_TYPE_MSG 0x01
#define MSIQ_TYPE_MSI32 0x02
#define MSIQ_TYPE_MSI64 0x03
#define MSIQ_TYPE_INTX 0x08
#define MSIQ_TYPE_NONE2 0xff
u64 intx_sysino;
u64 reserved1;
u64 stick;
u64 req_id; /* bus/device/func */
#define MSIQ_REQID_BUS_MASK 0xff00UL
#define MSIQ_REQID_BUS_SHIFT 8
#define MSIQ_REQID_DEVICE_MASK 0x00f8UL
#define MSIQ_REQID_DEVICE_SHIFT 3
#define MSIQ_REQID_FUNC_MASK 0x0007UL
#define MSIQ_REQID_FUNC_SHIFT 0
u64 msi_address;
/* The format of this value is message type dependent.
* For MSI bits 15:0 are the data from the MSI packet.
* For MSI-X bits 31:0 are the data from the MSI packet.
* For MSG, the message code and message routing code where:
* bits 39:32 is the bus/device/fn of the msg target-id
* bits 18:16 is the message routing code
* bits 7:0 is the message code
* For INTx the low order 2-bits are:
* 00 - INTA
* 01 - INTB
* 10 - INTC
* 11 - INTD
*/
u64 msi_data;
u64 reserved2;
};
static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long *head)
{
unsigned long err, limit;
err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
if (unlikely(err))
return -ENXIO;
limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
if (unlikely(*head >= limit))
return -EFBIG;
return 0;
}
static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
unsigned long msiqid, unsigned long *head,
unsigned long *msi)
{
struct pci_sun4v_msiq_entry *ep;
unsigned long err, type;
/* Note: void pointer arithmetic, 'head' is a byte offset */
ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
(pbm->msiq_ent_count *
sizeof(struct pci_sun4v_msiq_entry))) +
*head);
if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
return 0;
type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
if (unlikely(type != MSIQ_TYPE_MSI32 &&
type != MSIQ_TYPE_MSI64))
return -EINVAL;
*msi = ep->msi_data;
err = pci_sun4v_msi_setstate(pbm->devhandle,
ep->msi_data /* msi_num */,
HV_MSISTATE_IDLE);
if (unlikely(err))
return -ENXIO;
/* Clear the entry. */
ep->version_type &= ~MSIQ_TYPE_MASK;
(*head) += sizeof(struct pci_sun4v_msiq_entry);
if (*head >=
(pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
*head = 0;
return 1;
}
static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long head)
{
unsigned long err;
err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
if (unlikely(err))
return -EINVAL;
return 0;
}
static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long msi, int is_msi64)
{
if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
(is_msi64 ?
HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
return -ENXIO;
if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
return -ENXIO;
if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
return -ENXIO;
return 0;
}
static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
{
unsigned long err, msiqid;
err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
if (err)
return -ENXIO;
pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
return 0;
}
static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
{
unsigned long q_size, alloc_size, pages, order;
int i;
q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
alloc_size = (pbm->msiq_num * q_size);
order = get_order(alloc_size);
pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
if (pages == 0UL) {
printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
order);
return -ENOMEM;
}
memset((char *)pages, 0, PAGE_SIZE << order);
pbm->msi_queues = (void *) pages;
for (i = 0; i < pbm->msiq_num; i++) {
unsigned long err, base = __pa(pages + (i * q_size));
unsigned long ret1, ret2;
err = pci_sun4v_msiq_conf(pbm->devhandle,
pbm->msiq_first + i,
base, pbm->msiq_ent_count);
if (err) {
printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
err);
goto h_error;
}
err = pci_sun4v_msiq_info(pbm->devhandle,
pbm->msiq_first + i,
&ret1, &ret2);
if (err) {
printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
err);
goto h_error;
}
if (ret1 != base || ret2 != pbm->msiq_ent_count) {
printk(KERN_ERR "MSI: Bogus qconf "
"expected[%lx:%x] got[%lx:%lx]\n",
base, pbm->msiq_ent_count,
ret1, ret2);
goto h_error;
}
}
return 0;
h_error:
free_pages(pages, order);
return -EINVAL;
}
static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
{
unsigned long q_size, alloc_size, pages, order;
int i;
for (i = 0; i < pbm->msiq_num; i++) {
unsigned long msiqid = pbm->msiq_first + i;
(void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
}
q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
alloc_size = (pbm->msiq_num * q_size);
order = get_order(alloc_size);
pages = (unsigned long) pbm->msi_queues;
free_pages(pages, order);
pbm->msi_queues = NULL;
}
static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
unsigned long msiqid,
unsigned long devino)
{
unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
if (!irq)
return -ENOMEM;
if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
return -EINVAL;
if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
return -EINVAL;
return irq;
}
static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
.get_head = pci_sun4v_get_head,
.dequeue_msi = pci_sun4v_dequeue_msi,
.set_head = pci_sun4v_set_head,
.msi_setup = pci_sun4v_msi_setup,
.msi_teardown = pci_sun4v_msi_teardown,
.msiq_alloc = pci_sun4v_msiq_alloc,
.msiq_free = pci_sun4v_msiq_free,
.msiq_build_irq = pci_sun4v_msiq_build_irq,
};
static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
{
sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
}
#else /* CONFIG_PCI_MSI */
static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
{
}
#endif /* !(CONFIG_PCI_MSI) */
static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
struct platform_device *op, u32 devhandle)
{
struct device_node *dp = op->dev.of_node;
int err;
pbm->numa_node = of_node_to_nid(dp);
pbm->pci_ops = &sun4v_pci_ops;
pbm->config_space_reg_bits = 12;
pbm->index = pci_num_pbms++;
pbm->op = op;
pbm->devhandle = devhandle;
pbm->name = dp->full_name;
printk("%s: SUN4V PCI Bus Module\n", pbm->name);
printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
pci_determine_mem_io_space(pbm);
pci_get_pbm_props(pbm);
err = pci_sun4v_iommu_init(pbm);
if (err)
return err;
pci_sun4v_msi_init(pbm);
pci_sun4v_scan_bus(pbm, &op->dev);
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
return 0;
}
static int pci_sun4v_probe(struct platform_device *op)
{
const struct linux_prom64_registers *regs;
static int hvapi_negotiated = 0;
struct pci_pbm_info *pbm;
struct device_node *dp;
struct iommu *iommu;
u32 devhandle;
int i, err = -ENODEV;
dp = op->dev.of_node;
if (!hvapi_negotiated++) {
for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
vpci_major = vpci_versions[i].major;
vpci_minor = vpci_versions[i].minor;
err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
&vpci_minor);
if (!err)
break;
}
if (err) {
pr_err(PFX "Could not register hvapi, err=%d\n", err);
return err;
}
pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
vpci_major, vpci_minor);
dma_ops = &sun4v_dma_ops;
}
regs = of_get_property(dp, "reg", NULL);
err = -ENODEV;
if (!regs) {
printk(KERN_ERR PFX "Could not find config registers\n");
goto out_err;
}
devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
err = -ENOMEM;
if (!iommu_batch_initialized) {
for_each_possible_cpu(i) {
unsigned long page = get_zeroed_page(GFP_KERNEL);
if (!page)
goto out_err;
per_cpu(iommu_batch, i).pglist = (u64 *) page;
}
iommu_batch_initialized = 1;
}
pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
if (!pbm) {
printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
goto out_err;
}
iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
if (!iommu) {
printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
goto out_free_controller;
}
pbm->iommu = iommu;
err = pci_sun4v_pbm_init(pbm, op, devhandle);
if (err)
goto out_free_iommu;
dev_set_drvdata(&op->dev, pbm);
return 0;
out_free_iommu:
kfree(pbm->iommu);
out_free_controller:
kfree(pbm);
out_err:
return err;
}
static const struct of_device_id pci_sun4v_match[] = {
{
.name = "pci",
.compatible = "SUNW,sun4v-pci",
},
{},
};
static struct platform_driver pci_sun4v_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = pci_sun4v_match,
},
.probe = pci_sun4v_probe,
};
static int __init pci_sun4v_init(void)
{
return platform_driver_register(&pci_sun4v_driver);
}
subsys_initcall(pci_sun4v_init);