Revert "i915: fix remap_io_sg to verify the pgprot"

This reverts commit b12d691ea5.

It turns out this is not ready for primetime yet.  The intentions are
good, but using remap_pfn_range() requires that there is nothing already
mapped in the area, and the i915 code seems to very much intentionally
remap the same area multiple times.

That will then just trigger the

                BUG_ON(!pte_none(*pte));

in mm/memory.c: remap_pte_range().

There are also reports of mapping type inconsistencies, resulting in
warnings and in screen corruption.

Link: https://lore.kernel.org/lkml/20210519024322.GA29704@xsang-OptiPlex-9020/
Link: https://lore.kernel.org/lkml/YKUjvoaKKggAmpIR@sf/
Link: https://lore.kernel.org/lkml/b6b61cf0-5874-f4c0-1fcc-4b3848451c31@redhat.com/
Reported-by: kernel test robot <oliver.sang@intel.com>
Reported-by: Kalle Valo <kvalo@codeaurora.org>
Reported-by: Hans de Goede <hdegoede@redhat.com>
Reported-by: Sergei Trofimovich <slyfox@gentoo.org>
Acked-by: Christoph Hellwig <hch@lst.de>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Linus Torvalds 2021-05-19 05:55:57 -10:00
parent 8ac91e6c60
commit 293837b9ac

View File

@ -28,10 +28,46 @@
#include "i915_drv.h"
#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
struct remap_pfn {
struct mm_struct *mm;
unsigned long pfn;
pgprot_t prot;
struct sgt_iter sgt;
resource_size_t iobase;
};
#define use_dma(io) ((io) != -1)
static inline unsigned long sgt_pfn(const struct remap_pfn *r)
{
if (use_dma(r->iobase))
return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
else
return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
}
static int remap_sg(pte_t *pte, unsigned long addr, void *data)
{
struct remap_pfn *r = data;
if (GEM_WARN_ON(!r->sgt.sgp))
return -EINVAL;
/* Special PTE are not associated with any struct page */
set_pte_at(r->mm, addr, pte,
pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
r->pfn++; /* track insertions in case we need to unwind later */
r->sgt.curr += PAGE_SIZE;
if (r->sgt.curr >= r->sgt.max)
r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
return 0;
}
#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
/**
* remap_io_sg - remap an IO mapping to userspace
* @vma: user vma to map to
@ -46,7 +82,12 @@ int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase)
{
unsigned long pfn, len, remapped = 0;
struct remap_pfn r = {
.mm = vma->vm_mm,
.prot = vma->vm_page_prot,
.sgt = __sgt_iter(sgl, use_dma(iobase)),
.iobase = iobase,
};
int err;
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
@ -55,25 +96,11 @@ int remap_io_sg(struct vm_area_struct *vma,
if (!use_dma(iobase))
flush_cache_range(vma, addr, size);
do {
if (use_dma(iobase)) {
if (!sg_dma_len(sgl))
break;
pfn = (sg_dma_address(sgl) + iobase) >> PAGE_SHIFT;
len = sg_dma_len(sgl);
} else {
pfn = page_to_pfn(sg_page(sgl));
len = sgl->length;
}
err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
if (unlikely(err)) {
zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
return err;
}
err = remap_pfn_range(vma, addr + remapped, pfn, len,
vma->vm_page_prot);
if (err)
break;
remapped += len;
} while ((sgl = __sg_next(sgl)));
if (err)
zap_vma_ptes(vma, addr, remapped);
return err;
return 0;
}