mirror of
https://github.com/torvalds/linux.git
synced 2024-12-03 17:41:22 +00:00
iommu/iova: Add rbtree anchor node
Add a permanent dummy IOVA reservation to the rbtree, such that we can always access the top of the address space instantly. The immediate benefit is that we remove the overhead of the rb_last() traversal when not using the cached node, but it also paves the way for further simplifications. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
aa3ac9469c
commit
bb68b2fbfb
@ -24,6 +24,9 @@
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
/* The anchor node sits above the top of the usable address space */
|
||||
#define IOVA_ANCHOR ~0UL
|
||||
|
||||
static bool iova_rcache_insert(struct iova_domain *iovad,
|
||||
unsigned long pfn,
|
||||
unsigned long size);
|
||||
@ -55,6 +58,9 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
||||
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
|
||||
iovad->flush_cb = NULL;
|
||||
iovad->fq = NULL;
|
||||
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
|
||||
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
|
||||
rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
|
||||
init_iova_rcaches(iovad);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(init_iova_domain);
|
||||
@ -119,7 +125,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
|
||||
if (!cached_node)
|
||||
cached_node = iovad->cached_node;
|
||||
if (!cached_node)
|
||||
return rb_last(&iovad->rbroot);
|
||||
return rb_prev(&iovad->anchor.node);
|
||||
|
||||
curr_iova = rb_entry(cached_node, struct iova, node);
|
||||
*limit_pfn = min(*limit_pfn, curr_iova->pfn_lo);
|
||||
@ -242,7 +248,8 @@ EXPORT_SYMBOL(alloc_iova_mem);
|
||||
|
||||
void free_iova_mem(struct iova *iova)
|
||||
{
|
||||
kmem_cache_free(iova_cache, iova);
|
||||
if (iova->pfn_lo != IOVA_ANCHOR)
|
||||
kmem_cache_free(iova_cache, iova);
|
||||
}
|
||||
EXPORT_SYMBOL(free_iova_mem);
|
||||
|
||||
@ -676,6 +683,10 @@ reserve_iova(struct iova_domain *iovad,
|
||||
struct iova *iova;
|
||||
unsigned int overlap = 0;
|
||||
|
||||
/* Don't allow nonsensical pfns */
|
||||
if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
|
||||
return NULL;
|
||||
|
||||
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
||||
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
|
||||
if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
|
||||
|
@ -75,6 +75,7 @@ struct iova_domain {
|
||||
unsigned long granule; /* pfn granularity for this domain */
|
||||
unsigned long start_pfn; /* Lower limit for this domain */
|
||||
unsigned long dma_32bit_pfn;
|
||||
struct iova anchor; /* rbtree lookup anchor */
|
||||
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
|
||||
|
||||
iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
|
||||
|
Loading…
Reference in New Issue
Block a user