2024-03-27 20:59:09 +00:00
|
|
|
#ifndef IO_URING_MEMMAP_H
|
|
|
|
#define IO_URING_MEMMAP_H
|
|
|
|
|
|
|
|
struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
|
|
|
|
void io_pages_free(struct page ***pages, int npages);
|
|
|
|
int io_uring_mmap_pages(struct io_ring_ctx *ctx, struct vm_area_struct *vma,
|
|
|
|
struct page **pages, int npages);
|
|
|
|
|
|
|
|
void *io_pages_map(struct page ***out_pages, unsigned short *npages,
|
|
|
|
size_t size);
|
|
|
|
void io_pages_unmap(void *ptr, struct page ***pages, unsigned short *npages,
|
|
|
|
bool put_pages);
|
|
|
|
|
|
|
|
void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
|
|
|
|
unsigned long uaddr, size_t size);
|
|
|
|
|
|
|
|
#ifndef CONFIG_MMU
|
|
|
|
unsigned int io_uring_nommu_mmap_capabilities(struct file *file);
|
|
|
|
#endif
|
|
|
|
unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr,
|
|
|
|
unsigned long len, unsigned long pgoff,
|
|
|
|
unsigned long flags);
|
|
|
|
int io_uring_mmap(struct file *file, struct vm_area_struct *vma);
|
|
|
|
|
io_uring: introduce concept of memory regions
We've got a good number of mappings we share with the userspace, that
includes the main rings, provided buffer rings, upcoming rings for
zerocopy rx and more. All of them duplicate user argument parsing and
some internal details as well (page pinnning, huge page optimisations,
mmap'ing, etc.)
Introduce a notion of regions. For userspace for now it's just a new
structure called struct io_uring_region_desc which is supposed to
parameterise all such mapping / queue creations. A region either
represents a user provided chunk of memory, in which case the user_addr
field should point to it, or a request for the kernel to allocate the
memory, in which case the user would need to mmap it after using the
offset returned in the mmap_offset field. With a uniform userspace API
we can avoid additional boiler plate code and apply future optimisation
to all of them at once.
Internally, there is a new structure struct io_mapped_region holding all
relevant runtime information and some helpers to work with it. This
patch limits it to user provided regions.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/0e6fe25818dfbaebd1bd90b870a6cac503fe1a24.1731689588.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-11-15 16:54:41 +00:00
|
|
|
void io_free_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr);
|
|
|
|
int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
|
|
|
|
struct io_uring_region_desc *reg);
|
|
|
|
|
|
|
|
static inline void *io_region_get_ptr(struct io_mapped_region *mr)
|
|
|
|
{
|
|
|
|
return mr->vmap_ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool io_region_is_set(struct io_mapped_region *mr)
|
|
|
|
{
|
|
|
|
return !!mr->nr_pages;
|
|
|
|
}
|
|
|
|
|
2024-03-27 20:59:09 +00:00
|
|
|
#endif
|