mirror of
https://github.com/torvalds/linux.git
synced 2024-12-05 10:32:35 +00:00
3b06ac6707
Today there is a global limit of pages mapped via /dev/xen/gntdev set to 1 million pages per default. There is no reason why that limit is existing, as total number of grant mappings is limited by the hypervisor anyway and preferring kernel mappings over userspace ones doesn't make sense. It should be noted that the gntdev device is usable by root only. Additionally checking of that limit is fragile, as the number of pages to map via one call is specified in a 32-bit unsigned variable which isn't tested to stay within reasonable limits (the only test is the value to be <= zero, which basically excludes only calls without any mapping requested). So trying to map e.g. 0xffff0000 pages while already nearly 1000000 pages are mapped will effectively lower the global number of mapped pages such that a parallel call mapping a reasonable amount of pages can succeed in spite of the global limit being violated. So drop the global limit and introduce per call limit instead. This per call limit (default: 65536 grant mappings) protects against allocating insane large arrays in the kernel for doing a hypercall which will fail anyway in case a user is e.g. trying to map billions of pages. Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: Juergen Gross <jgross@suse.com>
89 lines
2.1 KiB
C
89 lines
2.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/*
|
|
* Common functionality of grant device.
|
|
*
|
|
* Copyright (c) 2006-2007, D G Murray.
|
|
* (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
|
|
* (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
|
|
*/
|
|
|
|
#ifndef _GNTDEV_COMMON_H
|
|
#define _GNTDEV_COMMON_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/mmu_notifier.h>
|
|
#include <linux/types.h>
|
|
|
|
struct gntdev_dmabuf_priv;
|
|
|
|
struct gntdev_priv {
|
|
/* Maps with visible offsets in the file descriptor. */
|
|
struct list_head maps;
|
|
/* lock protects maps and freeable_maps. */
|
|
struct mutex lock;
|
|
|
|
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
|
|
/* Device for which DMA memory is allocated. */
|
|
struct device *dma_dev;
|
|
#endif
|
|
|
|
#ifdef CONFIG_XEN_GNTDEV_DMABUF
|
|
struct gntdev_dmabuf_priv *dmabuf_priv;
|
|
#endif
|
|
};
|
|
|
|
struct gntdev_unmap_notify {
|
|
int flags;
|
|
/* Address relative to the start of the gntdev_grant_map. */
|
|
int addr;
|
|
int event;
|
|
};
|
|
|
|
struct gntdev_grant_map {
|
|
struct mmu_interval_notifier notifier;
|
|
struct list_head next;
|
|
struct vm_area_struct *vma;
|
|
int index;
|
|
int count;
|
|
int flags;
|
|
refcount_t users;
|
|
struct gntdev_unmap_notify notify;
|
|
struct ioctl_gntdev_grant_ref *grants;
|
|
struct gnttab_map_grant_ref *map_ops;
|
|
struct gnttab_unmap_grant_ref *unmap_ops;
|
|
struct gnttab_map_grant_ref *kmap_ops;
|
|
struct gnttab_unmap_grant_ref *kunmap_ops;
|
|
struct page **pages;
|
|
unsigned long pages_vm_start;
|
|
|
|
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
|
|
/*
|
|
* If dmabuf_vaddr is not NULL then this mapping is backed by DMA
|
|
* capable memory.
|
|
*/
|
|
|
|
struct device *dma_dev;
|
|
/* Flags used to create this DMA buffer: GNTDEV_DMA_FLAG_XXX. */
|
|
int dma_flags;
|
|
void *dma_vaddr;
|
|
dma_addr_t dma_bus_addr;
|
|
/* Needed to avoid allocation in gnttab_dma_free_pages(). */
|
|
xen_pfn_t *frames;
|
|
#endif
|
|
};
|
|
|
|
struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
|
|
int dma_flags);
|
|
|
|
void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add);
|
|
|
|
void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map);
|
|
|
|
bool gntdev_test_page_count(unsigned int count);
|
|
|
|
int gntdev_map_grant_pages(struct gntdev_grant_map *map);
|
|
|
|
#endif
|