filemap: Cache the value of vm_flags

After we have unlocked the mmap_lock for I/O, the file is pinned, but
the VMA is not.  Checking this flag after that can be a use-after-free.
It's not a terribly interesting use-after-free as it can only read one
bit, and it's used to decide whether to read 2MB or 4MB.  But it
upsets the automated tools and it's generally bad practice anyway,
so let's fix it.

Reported-by: syzbot+5b96d55e5b54924c77ad@syzkaller.appspotmail.com
Fixes: 4687fdbb80 ("mm/filemap: Support VM_HUGEPAGE for file mappings")
Cc: stable@vger.kernel.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-05-25 14:23:45 -04:00
parent 6bf74cddcf
commit dcfa24ba68

View File

@ -2991,11 +2991,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
struct file *fpin = NULL; struct file *fpin = NULL;
unsigned long vm_flags = vmf->vma->vm_flags;
unsigned int mmap_miss; unsigned int mmap_miss;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* Use the readahead code, even if readahead is disabled */ /* Use the readahead code, even if readahead is disabled */
if (vmf->vma->vm_flags & VM_HUGEPAGE) { if (vm_flags & VM_HUGEPAGE) {
fpin = maybe_unlock_mmap_for_io(vmf, fpin); fpin = maybe_unlock_mmap_for_io(vmf, fpin);
ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
ra->size = HPAGE_PMD_NR; ra->size = HPAGE_PMD_NR;
@ -3003,7 +3004,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
* Fetch two PMD folios, so we get the chance to actually * Fetch two PMD folios, so we get the chance to actually
* readahead, unless we've been told not to. * readahead, unless we've been told not to.
*/ */
if (!(vmf->vma->vm_flags & VM_RAND_READ)) if (!(vm_flags & VM_RAND_READ))
ra->size *= 2; ra->size *= 2;
ra->async_size = HPAGE_PMD_NR; ra->async_size = HPAGE_PMD_NR;
page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER); page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
@ -3012,12 +3013,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
#endif #endif
/* If we don't want any read-ahead, don't bother */ /* If we don't want any read-ahead, don't bother */
if (vmf->vma->vm_flags & VM_RAND_READ) if (vm_flags & VM_RAND_READ)
return fpin; return fpin;
if (!ra->ra_pages) if (!ra->ra_pages)
return fpin; return fpin;
if (vmf->vma->vm_flags & VM_SEQ_READ) { if (vm_flags & VM_SEQ_READ) {
fpin = maybe_unlock_mmap_for_io(vmf, fpin); fpin = maybe_unlock_mmap_for_io(vmf, fpin);
page_cache_sync_ra(&ractl, ra->ra_pages); page_cache_sync_ra(&ractl, ra->ra_pages);
return fpin; return fpin;