mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 21:51:40 +00:00
2efbafb91e
Consider the following sequence of events: 1) A page in a PROT_READ|PROT_WRITE VMA is faulted. 2) Page migration allocates a page with the KASAN allocator, causing it to receive a non-match-all tag, and uses it to replace the page faulted in 1. 3) The program uses mprotect() to enable PROT_MTE on the page faulted in 1. As a result of step 3, we are left with a non-match-all tag for a page with tags accessible to userspace, which can lead to the same kind of tag check faults that commite74a684680
("arm64: Reset KASAN tag in copy_highpage with HW tags only") intended to fix. The general invariant that we have for pages in a VMA with VM_MTE_ALLOWED is that they cannot have a non-match-all tag. As a result of step 2, the invariant is broken. This means that the fix in the referenced commit was incomplete and we also need to reset the tag for pages without PG_mte_tagged. Fixes:e5b8d92189
("arm64: mte: reset the page tag in page->flags") Cc: <stable@vger.kernel.org> # 5.15 Link: https://linux-review.googlesource.com/id/I7409cdd41acbcb215c2a7417c1e50d37b875beff Signed-off-by: Peter Collingbourne <pcc@google.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20230420210945.2313627-1-pcc@google.com Signed-off-by: Will Deacon <will@kernel.org>
43 lines
999 B
C
43 lines
999 B
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Based on arch/arm/mm/copypage.c
|
|
*
|
|
* Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
|
|
#include <linux/bitops.h>
|
|
#include <linux/mm.h>
|
|
|
|
#include <asm/page.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/mte.h>
|
|
|
|
void copy_highpage(struct page *to, struct page *from)
|
|
{
|
|
void *kto = page_address(to);
|
|
void *kfrom = page_address(from);
|
|
|
|
copy_page(kto, kfrom);
|
|
|
|
if (kasan_hw_tags_enabled())
|
|
page_kasan_tag_reset(to);
|
|
|
|
if (system_supports_mte() && page_mte_tagged(from)) {
|
|
/* It's a new page, shouldn't have been tagged yet */
|
|
WARN_ON_ONCE(!try_page_mte_tagging(to));
|
|
mte_copy_page_tags(kto, kfrom);
|
|
set_page_mte_tagged(to);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(copy_highpage);
|
|
|
|
void copy_user_highpage(struct page *to, struct page *from,
|
|
unsigned long vaddr, struct vm_area_struct *vma)
|
|
{
|
|
copy_highpage(to, from);
|
|
flush_dcache_page(to);
|
|
}
|
|
EXPORT_SYMBOL_GPL(copy_user_highpage);
|