mirror of
https://github.com/torvalds/linux.git
synced 2024-11-30 16:11:38 +00:00
iommu/amd: Introduce per PCI segment last_bdf
Current code uses global "amd_iommu_last_bdf" to track the last bdf supported by the system. This value is used for various memory allocation, device data flushing, etc. Introduce per PCI segment last_bdf which will be used to track last bdf supported by the given PCI segment and use this value for all per segment memory allocations. Eventually it will replace global "amd_iommu_last_bdf". Co-developed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Link: https://lore.kernel.org/r/20220706113825.25582-11-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
b618ae6247
commit
307959008d
@ -552,6 +552,9 @@ struct amd_iommu_pci_seg {
|
||||
/* PCI segment number */
|
||||
u16 id;
|
||||
|
||||
/* Largest PCI device id we expect translation requests for */
|
||||
u16 last_bdf;
|
||||
|
||||
/*
|
||||
* device table virtual address
|
||||
*
|
||||
|
@ -552,6 +552,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
|
||||
{
|
||||
u8 *p = (void *)h, *end = (void *)h;
|
||||
struct ivhd_entry *dev;
|
||||
int last_devid = -EINVAL;
|
||||
|
||||
u32 ivhd_size = get_ivhd_header_size(h);
|
||||
|
||||
@ -569,13 +570,15 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
|
||||
case IVHD_DEV_ALL:
|
||||
/* Use maximum BDF value for DEV_ALL */
|
||||
update_last_devid(0xffff);
|
||||
break;
|
||||
return 0xffff;
|
||||
case IVHD_DEV_SELECT:
|
||||
case IVHD_DEV_RANGE_END:
|
||||
case IVHD_DEV_ALIAS:
|
||||
case IVHD_DEV_EXT_SELECT:
|
||||
/* all the above subfield types refer to device ids */
|
||||
update_last_devid(dev->devid);
|
||||
if (dev->devid > last_devid)
|
||||
last_devid = dev->devid;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -585,7 +588,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
|
||||
|
||||
WARN_ON(p != end);
|
||||
|
||||
return 0;
|
||||
return last_devid;
|
||||
}
|
||||
|
||||
static int __init check_ivrs_checksum(struct acpi_table_header *table)
|
||||
@ -609,27 +612,31 @@ static int __init check_ivrs_checksum(struct acpi_table_header *table)
|
||||
* id which we need to handle. This is the first of three functions which parse
|
||||
* the ACPI table. So we check the checksum here.
|
||||
*/
|
||||
static int __init find_last_devid_acpi(struct acpi_table_header *table)
|
||||
static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg)
|
||||
{
|
||||
u8 *p = (u8 *)table, *end = (u8 *)table;
|
||||
struct ivhd_header *h;
|
||||
int last_devid, last_bdf = 0;
|
||||
|
||||
p += IVRS_HEADER_LENGTH;
|
||||
|
||||
end += table->length;
|
||||
while (p < end) {
|
||||
h = (struct ivhd_header *)p;
|
||||
if (h->type == amd_iommu_target_ivhd_type) {
|
||||
int ret = find_last_devid_from_ivhd(h);
|
||||
if (h->pci_seg == pci_seg &&
|
||||
h->type == amd_iommu_target_ivhd_type) {
|
||||
last_devid = find_last_devid_from_ivhd(h);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
if (last_devid < 0)
|
||||
return -EINVAL;
|
||||
if (last_devid > last_bdf)
|
||||
last_bdf = last_devid;
|
||||
}
|
||||
p += h->length;
|
||||
}
|
||||
WARN_ON(p != end);
|
||||
|
||||
return 0;
|
||||
return last_bdf;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
@ -1553,14 +1560,28 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
|
||||
}
|
||||
|
||||
/* Allocate PCI segment data structure */
|
||||
static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id)
|
||||
static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
|
||||
struct acpi_table_header *ivrs_base)
|
||||
{
|
||||
struct amd_iommu_pci_seg *pci_seg;
|
||||
int last_bdf;
|
||||
|
||||
/*
|
||||
* First parse ACPI tables to find the largest Bus/Dev/Func we need to
|
||||
* handle in this PCI segment. Upon this information the shared data
|
||||
* structures for the PCI segments in the system will be allocated.
|
||||
*/
|
||||
last_bdf = find_last_devid_acpi(ivrs_base, id);
|
||||
if (last_bdf < 0)
|
||||
return NULL;
|
||||
|
||||
pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL);
|
||||
if (pci_seg == NULL)
|
||||
return NULL;
|
||||
|
||||
pci_seg->last_bdf = last_bdf;
|
||||
DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
|
||||
|
||||
pci_seg->id = id;
|
||||
init_llist_head(&pci_seg->dev_data_list);
|
||||
INIT_LIST_HEAD(&pci_seg->unity_map);
|
||||
@ -1576,7 +1597,8 @@ static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id)
|
||||
return pci_seg;
|
||||
}
|
||||
|
||||
static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id)
|
||||
static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id,
|
||||
struct acpi_table_header *ivrs_base)
|
||||
{
|
||||
struct amd_iommu_pci_seg *pci_seg;
|
||||
|
||||
@ -1585,7 +1607,7 @@ static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id)
|
||||
return pci_seg;
|
||||
}
|
||||
|
||||
return alloc_pci_segment(id);
|
||||
return alloc_pci_segment(id, ivrs_base);
|
||||
}
|
||||
|
||||
static void __init free_pci_segments(void)
|
||||
@ -1686,12 +1708,13 @@ static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
|
||||
* together and also allocates the command buffer and programs the
|
||||
* hardware. It does NOT enable the IOMMU. This is done afterwards.
|
||||
*/
|
||||
static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
|
||||
static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
|
||||
struct acpi_table_header *ivrs_base)
|
||||
{
|
||||
struct amd_iommu_pci_seg *pci_seg;
|
||||
int ret;
|
||||
|
||||
pci_seg = get_pci_segment(h->pci_seg);
|
||||
pci_seg = get_pci_segment(h->pci_seg, ivrs_base);
|
||||
if (pci_seg == NULL)
|
||||
return -ENOMEM;
|
||||
iommu->pci_seg = pci_seg;
|
||||
@ -1866,7 +1889,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
|
||||
if (iommu == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = init_iommu_one(iommu, h);
|
||||
ret = init_iommu_one(iommu, h, table);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -2412,13 +2435,14 @@ static void __init free_unity_maps(void)
|
||||
}
|
||||
|
||||
/* called for unity map ACPI definition */
|
||||
static int __init init_unity_map_range(struct ivmd_header *m)
|
||||
static int __init init_unity_map_range(struct ivmd_header *m,
|
||||
struct acpi_table_header *ivrs_base)
|
||||
{
|
||||
struct unity_map_entry *e = NULL;
|
||||
struct amd_iommu_pci_seg *pci_seg;
|
||||
char *s;
|
||||
|
||||
pci_seg = get_pci_segment(m->pci_seg);
|
||||
pci_seg = get_pci_segment(m->pci_seg, ivrs_base);
|
||||
if (pci_seg == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2485,7 +2509,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
|
||||
while (p < end) {
|
||||
m = (struct ivmd_header *)p;
|
||||
if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
|
||||
init_unity_map_range(m);
|
||||
init_unity_map_range(m, table);
|
||||
|
||||
p += m->length;
|
||||
}
|
||||
@ -2909,15 +2933,6 @@ static int __init early_amd_iommu_init(void)
|
||||
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
|
||||
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
|
||||
|
||||
/*
|
||||
* First parse ACPI tables to find the largest Bus/Dev/Func
|
||||
* we need to handle. Upon this information the shared data
|
||||
* structures for the IOMMUs in the system will be allocated
|
||||
*/
|
||||
ret = find_last_devid_acpi(ivrs_base);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
|
||||
alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
|
||||
rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
|
||||
|
Loading…
Reference in New Issue
Block a user