forked from Minki/linux
IB/hfi1: Don't remove RB entry when not needed.
An RB tree is used for the SDMA pinning cache. Cache entries are extracted and reinserted from the tree in case the address range for it changes. However, if the address range for the entry doesn't change, deleting the entry from the RB tree is not necessary. This affects performance since the tree needs to be rebalanced for each insertion, and this happens in the hot path. Optimize RB search by not removing entries when it's not needed. Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Reviewed-by: Mitko Haralanov <mitko.haralanov@intel.com> Signed-off-by: Sebastian Sanchez <sebastian.sanchez@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
14fe13fcd3
commit
7be85676f1
@ -217,21 +217,27 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
|
||||
return node;
|
||||
}
|
||||
|
||||
struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler,
|
||||
unsigned long addr, unsigned long len)
|
||||
bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
|
||||
unsigned long addr, unsigned long len,
|
||||
struct mmu_rb_node **rb_node)
|
||||
{
|
||||
struct mmu_rb_node *node;
|
||||
unsigned long flags;
|
||||
bool ret = false;
|
||||
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
node = __mmu_rb_search(handler, addr, len);
|
||||
if (node) {
|
||||
if (node->addr == addr && node->len == len)
|
||||
goto unlock;
|
||||
__mmu_int_rb_remove(node, &handler->root);
|
||||
list_del(&node->list); /* remove from LRU list */
|
||||
ret = true;
|
||||
}
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
|
||||
return node;
|
||||
*rb_node = node;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
|
||||
|
@ -81,7 +81,8 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
|
||||
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
|
||||
void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
|
||||
struct mmu_rb_node *mnode);
|
||||
struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler,
|
||||
unsigned long addr, unsigned long len);
|
||||
bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
|
||||
unsigned long addr, unsigned long len,
|
||||
struct mmu_rb_node **rb_node);
|
||||
|
||||
#endif /* _HFI1_MMU_RB_H */
|
||||
|
@ -1165,14 +1165,23 @@ static int pin_vector_pages(struct user_sdma_request *req,
|
||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||
struct sdma_mmu_node *node = NULL;
|
||||
struct mmu_rb_node *rb_node;
|
||||
bool extracted;
|
||||
|
||||
rb_node = hfi1_mmu_rb_extract(pq->handler,
|
||||
(unsigned long)iovec->iov.iov_base,
|
||||
iovec->iov.iov_len);
|
||||
if (rb_node)
|
||||
extracted =
|
||||
hfi1_mmu_rb_remove_unless_exact(pq->handler,
|
||||
(unsigned long)
|
||||
iovec->iov.iov_base,
|
||||
iovec->iov.iov_len, &rb_node);
|
||||
if (rb_node) {
|
||||
node = container_of(rb_node, struct sdma_mmu_node, rb);
|
||||
else
|
||||
rb_node = NULL;
|
||||
if (!extracted) {
|
||||
atomic_inc(&node->refcount);
|
||||
iovec->pages = node->pages;
|
||||
iovec->npages = node->npages;
|
||||
iovec->node = node;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (!node) {
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
|
Loading…
Reference in New Issue
Block a user