RDMA/umem: Introduce an option to revoke DMABUF umem

Introduce an option to revoke DMABUF umem.

This option will retain the umem allocation while revoking its DMA
mapping. Furthermore, any subsequent attempts to map the pages should
fail once the umem has been revoked.

This functionality will be utilized in the upcoming patches in the
series, where we aim to delay umem deallocation until the mkey
deregistration. However, we must unmap its pages immediately.

Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
Link: https://patch.msgid.link/a38270f2fe4a194868ca2312f4c1c760e51bcbff.1722512548.git.leon@kernel.org
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
Yishai Hadas 2024-08-01 15:05:14 +03:00 committed by Leon Romanovsky
parent 682358fd35
commit 253c61dc25
2 changed files with 22 additions and 2 deletions

View File

@ -23,6 +23,9 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
if (umem_dmabuf->revoked)
return -EINVAL;
if (umem_dmabuf->sgt) if (umem_dmabuf->sgt)
goto wait_fence; goto wait_fence;
@ -242,15 +245,29 @@ struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
} }
EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned); EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);
void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf)
{ {
struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf; struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
dma_resv_lock(dmabuf->resv, NULL); dma_resv_lock(dmabuf->resv, NULL);
if (umem_dmabuf->revoked)
goto end;
ib_umem_dmabuf_unmap_pages(umem_dmabuf); ib_umem_dmabuf_unmap_pages(umem_dmabuf);
if (umem_dmabuf->pinned) if (umem_dmabuf->pinned) {
dma_buf_unpin(umem_dmabuf->attach); dma_buf_unpin(umem_dmabuf->attach);
umem_dmabuf->pinned = 0;
}
umem_dmabuf->revoked = 1;
end:
dma_resv_unlock(dmabuf->resv); dma_resv_unlock(dmabuf->resv);
}
EXPORT_SYMBOL(ib_umem_dmabuf_revoke);
void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
{
struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
ib_umem_dmabuf_revoke(umem_dmabuf);
dma_buf_detach(dmabuf, umem_dmabuf->attach); dma_buf_detach(dmabuf, umem_dmabuf->attach);
dma_buf_put(dmabuf); dma_buf_put(dmabuf);

View File

@ -38,6 +38,7 @@ struct ib_umem_dmabuf {
unsigned long last_sg_trim; unsigned long last_sg_trim;
void *private; void *private;
u8 pinned : 1; u8 pinned : 1;
u8 revoked : 1;
}; };
static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem) static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
@ -158,6 +159,7 @@ ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf); int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf); void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf); void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf);
#else /* CONFIG_INFINIBAND_USER_MEM */ #else /* CONFIG_INFINIBAND_USER_MEM */
@ -217,6 +219,7 @@ static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
} }
static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { } static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { } static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
static inline void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf) {}
#endif /* CONFIG_INFINIBAND_USER_MEM */ #endif /* CONFIG_INFINIBAND_USER_MEM */
#endif /* IB_UMEM_H */ #endif /* IB_UMEM_H */