mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 22:02:28 +00:00
cad32d9d42
LoPAPR defines guest visible IOMMU with hypercalls to use it - H_PUT_TCE/etc. Implemented first on POWER7 where hypercalls would trap in the KVM in the real mode (with MMU off). The problem with the real mode is some memory is not available and some API usage crashed the host but enabling MMU was an expensive operation. The problems with the real mode handlers are: 1. Occasionally these cannot complete the request so the code is copied+modified to work in the virtual mode, very little is shared; 2. The real mode handlers have to be linked into vmlinux to work; 3. An exception in real mode immediately reboots the machine. If the small DMA window is used, the real mode handlers bring better performance. However since POWER8, there has always been a bigger DMA window which VMs use to map the entire VM memory to avoid calling H_PUT_TCE. Such 1:1 mapping happens once and uses H_PUT_TCE_INDIRECT (a bulk version of H_PUT_TCE) which virtual mode handler is even closer to its real mode version. On POWER9 hypercalls trap straight to the virtual mode so the real mode handlers never execute on POWER9 and later CPUs. So with the current use of the DMA windows and MMU improvements in POWER9 and later, there is no point in duplicating the code. The 32bit passed through devices may slow down but we do not have many of these in practice. For example, with this applied, a 1Gbit ethernet adapter still demostrates above 800Mbit/s of actual throughput. This removes the real mode handlers from KVM and related code from the powernv platform. This updates the list of implemented hcalls in KVM-HV as the realmode handlers are removed. This changes ABI - kvmppc_h_get_tce() moves to the KVM module and kvmppc_find_table() is static now. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20220506053755.3820702-1-aik@ozlabs.ru
431 lines
10 KiB
C
431 lines
10 KiB
C
// SPDX-License-Identifier: GPL-2.0+
|
|
/*
|
|
* TCE helpers for IODA PCI/PCIe on PowerNV platforms
|
|
*
|
|
* Copyright 2018 IBM Corp.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/iommu.h>
|
|
|
|
#include <asm/iommu.h>
|
|
#include <asm/tce.h>
|
|
#include "pci.h"
|
|
|
|
unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb)
|
|
{
|
|
struct pci_controller *hose = phb->hose;
|
|
struct device_node *dn = hose->dn;
|
|
unsigned long mask = 0;
|
|
int i, rc, count;
|
|
u32 val;
|
|
|
|
count = of_property_count_u32_elems(dn, "ibm,supported-tce-sizes");
|
|
if (count <= 0) {
|
|
mask = SZ_4K | SZ_64K;
|
|
/* Add 16M for POWER8 by default */
|
|
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
|
|
!cpu_has_feature(CPU_FTR_ARCH_300))
|
|
mask |= SZ_16M | SZ_256M;
|
|
return mask;
|
|
}
|
|
|
|
for (i = 0; i < count; i++) {
|
|
rc = of_property_read_u32_index(dn, "ibm,supported-tce-sizes",
|
|
i, &val);
|
|
if (rc == 0)
|
|
mask |= 1ULL << val;
|
|
}
|
|
|
|
return mask;
|
|
}
|
|
|
|
void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
|
|
void *tce_mem, u64 tce_size,
|
|
u64 dma_offset, unsigned int page_shift)
|
|
{
|
|
tbl->it_blocksize = 16;
|
|
tbl->it_base = (unsigned long)tce_mem;
|
|
tbl->it_page_shift = page_shift;
|
|
tbl->it_offset = dma_offset >> tbl->it_page_shift;
|
|
tbl->it_index = 0;
|
|
tbl->it_size = tce_size >> 3;
|
|
tbl->it_busno = 0;
|
|
tbl->it_type = TCE_PCI;
|
|
}
|
|
|
|
static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
|
|
{
|
|
struct page *tce_mem = NULL;
|
|
__be64 *addr;
|
|
|
|
tce_mem = alloc_pages_node(nid, GFP_ATOMIC | __GFP_NOWARN,
|
|
shift - PAGE_SHIFT);
|
|
if (!tce_mem) {
|
|
pr_err("Failed to allocate a TCE memory, level shift=%d\n",
|
|
shift);
|
|
return NULL;
|
|
}
|
|
addr = page_address(tce_mem);
|
|
memset(addr, 0, 1UL << shift);
|
|
|
|
return addr;
|
|
}
|
|
|
|
static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
|
|
unsigned long size, unsigned int levels);
|
|
|
|
static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
|
|
{
|
|
__be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base;
|
|
int level = tbl->it_indirect_levels;
|
|
const long shift = ilog2(tbl->it_level_size);
|
|
unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
|
|
|
|
while (level) {
|
|
int n = (idx & mask) >> (level * shift);
|
|
unsigned long oldtce, tce = be64_to_cpu(READ_ONCE(tmp[n]));
|
|
|
|
if (!tce) {
|
|
__be64 *tmp2;
|
|
|
|
if (!alloc)
|
|
return NULL;
|
|
|
|
tmp2 = pnv_alloc_tce_level(tbl->it_nid,
|
|
ilog2(tbl->it_level_size) + 3);
|
|
if (!tmp2)
|
|
return NULL;
|
|
|
|
tce = __pa(tmp2) | TCE_PCI_READ | TCE_PCI_WRITE;
|
|
oldtce = be64_to_cpu(cmpxchg(&tmp[n], 0,
|
|
cpu_to_be64(tce)));
|
|
if (oldtce) {
|
|
pnv_pci_ioda2_table_do_free_pages(tmp2,
|
|
ilog2(tbl->it_level_size) + 3, 1);
|
|
tce = oldtce;
|
|
}
|
|
}
|
|
|
|
tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
|
|
idx &= ~mask;
|
|
mask >>= shift;
|
|
--level;
|
|
}
|
|
|
|
return tmp + idx;
|
|
}
|
|
|
|
int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
|
|
unsigned long uaddr, enum dma_data_direction direction,
|
|
unsigned long attrs)
|
|
{
|
|
u64 proto_tce = iommu_direction_to_tce_perm(direction);
|
|
u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
|
|
long i;
|
|
|
|
if (proto_tce & TCE_PCI_WRITE)
|
|
proto_tce |= TCE_PCI_READ;
|
|
|
|
for (i = 0; i < npages; i++) {
|
|
unsigned long newtce = proto_tce |
|
|
((rpn + i) << tbl->it_page_shift);
|
|
unsigned long idx = index - tbl->it_offset + i;
|
|
|
|
*(pnv_tce(tbl, false, idx, true)) = cpu_to_be64(newtce);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_IOMMU_API
|
|
int pnv_tce_xchg(struct iommu_table *tbl, long index,
|
|
unsigned long *hpa, enum dma_data_direction *direction)
|
|
{
|
|
u64 proto_tce = iommu_direction_to_tce_perm(*direction);
|
|
unsigned long newtce = *hpa | proto_tce, oldtce;
|
|
unsigned long idx = index - tbl->it_offset;
|
|
__be64 *ptce = NULL;
|
|
|
|
BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
|
|
|
|
if (*direction == DMA_NONE) {
|
|
ptce = pnv_tce(tbl, false, idx, false);
|
|
if (!ptce) {
|
|
*hpa = 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
if (!ptce) {
|
|
ptce = pnv_tce(tbl, false, idx, true);
|
|
if (!ptce)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
if (newtce & TCE_PCI_WRITE)
|
|
newtce |= TCE_PCI_READ;
|
|
|
|
oldtce = be64_to_cpu(xchg(ptce, cpu_to_be64(newtce)));
|
|
*hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
|
|
*direction = iommu_tce_direction(oldtce);
|
|
|
|
return 0;
|
|
}
|
|
|
|
__be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index, bool alloc)
|
|
{
|
|
if (WARN_ON_ONCE(!tbl->it_userspace))
|
|
return NULL;
|
|
|
|
return pnv_tce(tbl, true, index - tbl->it_offset, alloc);
|
|
}
|
|
#endif
|
|
|
|
void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
|
|
{
|
|
long i;
|
|
|
|
for (i = 0; i < npages; i++) {
|
|
unsigned long idx = index - tbl->it_offset + i;
|
|
__be64 *ptce = pnv_tce(tbl, false, idx, false);
|
|
|
|
if (ptce)
|
|
*ptce = cpu_to_be64(0);
|
|
else
|
|
/* Skip the rest of the level */
|
|
i |= tbl->it_level_size - 1;
|
|
}
|
|
}
|
|
|
|
unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
|
|
{
|
|
__be64 *ptce = pnv_tce(tbl, false, index - tbl->it_offset, false);
|
|
|
|
if (!ptce)
|
|
return 0;
|
|
|
|
return be64_to_cpu(*ptce);
|
|
}
|
|
|
|
static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
|
|
unsigned long size, unsigned int levels)
|
|
{
|
|
const unsigned long addr_ul = (unsigned long) addr &
|
|
~(TCE_PCI_READ | TCE_PCI_WRITE);
|
|
|
|
if (levels) {
|
|
long i;
|
|
u64 *tmp = (u64 *) addr_ul;
|
|
|
|
for (i = 0; i < size; ++i) {
|
|
unsigned long hpa = be64_to_cpu(tmp[i]);
|
|
|
|
if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE)))
|
|
continue;
|
|
|
|
pnv_pci_ioda2_table_do_free_pages(__va(hpa), size,
|
|
levels - 1);
|
|
}
|
|
}
|
|
|
|
free_pages(addr_ul, get_order(size << 3));
|
|
}
|
|
|
|
void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl)
|
|
{
|
|
const unsigned long size = tbl->it_indirect_levels ?
|
|
tbl->it_level_size : tbl->it_size;
|
|
|
|
if (!tbl->it_size)
|
|
return;
|
|
|
|
pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size,
|
|
tbl->it_indirect_levels);
|
|
if (tbl->it_userspace) {
|
|
pnv_pci_ioda2_table_do_free_pages(tbl->it_userspace, size,
|
|
tbl->it_indirect_levels);
|
|
}
|
|
}
|
|
|
|
static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned int shift,
|
|
unsigned int levels, unsigned long limit,
|
|
unsigned long *current_offset, unsigned long *total_allocated)
|
|
{
|
|
__be64 *addr, *tmp;
|
|
unsigned long allocated = 1UL << shift;
|
|
unsigned int entries = 1UL << (shift - 3);
|
|
long i;
|
|
|
|
addr = pnv_alloc_tce_level(nid, shift);
|
|
*total_allocated += allocated;
|
|
|
|
--levels;
|
|
if (!levels) {
|
|
*current_offset += allocated;
|
|
return addr;
|
|
}
|
|
|
|
for (i = 0; i < entries; ++i) {
|
|
tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
|
|
levels, limit, current_offset, total_allocated);
|
|
if (!tmp)
|
|
break;
|
|
|
|
addr[i] = cpu_to_be64(__pa(tmp) |
|
|
TCE_PCI_READ | TCE_PCI_WRITE);
|
|
|
|
if (*current_offset >= limit)
|
|
break;
|
|
}
|
|
|
|
return addr;
|
|
}
|
|
|
|
long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|
__u32 page_shift, __u64 window_size, __u32 levels,
|
|
bool alloc_userspace_copy, struct iommu_table *tbl)
|
|
{
|
|
void *addr, *uas = NULL;
|
|
unsigned long offset = 0, level_shift, total_allocated = 0;
|
|
unsigned long total_allocated_uas = 0;
|
|
const unsigned int window_shift = ilog2(window_size);
|
|
unsigned int entries_shift = window_shift - page_shift;
|
|
unsigned int table_shift = max_t(unsigned int, entries_shift + 3,
|
|
PAGE_SHIFT);
|
|
const unsigned long tce_table_size = 1UL << table_shift;
|
|
|
|
if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
|
|
return -EINVAL;
|
|
|
|
if (!is_power_of_2(window_size))
|
|
return -EINVAL;
|
|
|
|
/* Adjust direct table size from window_size and levels */
|
|
entries_shift = (entries_shift + levels - 1) / levels;
|
|
level_shift = entries_shift + 3;
|
|
level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT);
|
|
|
|
if ((level_shift - 3) * levels + page_shift >= 55)
|
|
return -EINVAL;
|
|
|
|
/* Allocate TCE table */
|
|
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
|
|
1, tce_table_size, &offset, &total_allocated);
|
|
|
|
/* addr==NULL means that the first level allocation failed */
|
|
if (!addr)
|
|
return -ENOMEM;
|
|
|
|
/*
|
|
* First level was allocated but some lower level failed as
|
|
* we did not allocate as much as we wanted,
|
|
* release partially allocated table.
|
|
*/
|
|
if (levels == 1 && offset < tce_table_size)
|
|
goto free_tces_exit;
|
|
|
|
/* Allocate userspace view of the TCE table */
|
|
if (alloc_userspace_copy) {
|
|
offset = 0;
|
|
uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
|
|
1, tce_table_size, &offset,
|
|
&total_allocated_uas);
|
|
if (!uas)
|
|
goto free_tces_exit;
|
|
if (levels == 1 && (offset < tce_table_size ||
|
|
total_allocated_uas != total_allocated))
|
|
goto free_uas_exit;
|
|
}
|
|
|
|
/* Setup linux iommu table */
|
|
pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset,
|
|
page_shift);
|
|
tbl->it_level_size = 1ULL << (level_shift - 3);
|
|
tbl->it_indirect_levels = levels - 1;
|
|
tbl->it_userspace = uas;
|
|
tbl->it_nid = nid;
|
|
|
|
pr_debug("Created TCE table: ws=%08llx ts=%lx @%08llx base=%lx uas=%p levels=%d/%d\n",
|
|
window_size, tce_table_size, bus_offset, tbl->it_base,
|
|
tbl->it_userspace, 1, levels);
|
|
|
|
return 0;
|
|
|
|
free_uas_exit:
|
|
pnv_pci_ioda2_table_do_free_pages(uas,
|
|
1ULL << (level_shift - 3), levels - 1);
|
|
free_tces_exit:
|
|
pnv_pci_ioda2_table_do_free_pages(addr,
|
|
1ULL << (level_shift - 3), levels - 1);
|
|
|
|
return -ENOMEM;
|
|
}
|
|
|
|
void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
|
|
struct iommu_table_group *table_group)
|
|
{
|
|
long i;
|
|
bool found;
|
|
struct iommu_table_group_link *tgl;
|
|
|
|
if (!tbl || !table_group)
|
|
return;
|
|
|
|
/* Remove link to a group from table's list of attached groups */
|
|
found = false;
|
|
|
|
rcu_read_lock();
|
|
list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
|
|
if (tgl->table_group == table_group) {
|
|
list_del_rcu(&tgl->next);
|
|
kfree_rcu(tgl, rcu);
|
|
found = true;
|
|
break;
|
|
}
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
if (WARN_ON(!found))
|
|
return;
|
|
|
|
/* Clean a pointer to iommu_table in iommu_table_group::tables[] */
|
|
found = false;
|
|
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
|
|
if (table_group->tables[i] == tbl) {
|
|
iommu_tce_table_put(tbl);
|
|
table_group->tables[i] = NULL;
|
|
found = true;
|
|
break;
|
|
}
|
|
}
|
|
WARN_ON(!found);
|
|
}
|
|
|
|
long pnv_pci_link_table_and_group(int node, int num,
|
|
struct iommu_table *tbl,
|
|
struct iommu_table_group *table_group)
|
|
{
|
|
struct iommu_table_group_link *tgl = NULL;
|
|
|
|
if (WARN_ON(!tbl || !table_group))
|
|
return -EINVAL;
|
|
|
|
tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
|
|
node);
|
|
if (!tgl)
|
|
return -ENOMEM;
|
|
|
|
tgl->table_group = table_group;
|
|
list_add_rcu(&tgl->next, &tbl->it_group_list);
|
|
|
|
table_group->tables[num] = iommu_tce_table_get(tbl);
|
|
|
|
return 0;
|
|
}
|