mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 05:32:00 +00:00
79f9bc5843
Check for a NULL page->mapping before dereferencing the mapping in
page_is_secretmem(), as the page's mapping can be nullified while gup()
is running, e.g. by reclaim or truncation.
BUG: kernel NULL pointer dereference, address: 0000000000000068
#PF: supervisor read access in kernel mode
#PF: error_code(0x0000) - not-present page
PGD 0 P4D 0
Oops: 0000 [#1] PREEMPT SMP NOPTI
CPU: 6 PID: 4173897 Comm: CPU 3/KVM Tainted: G W
RIP: 0010:internal_get_user_pages_fast+0x621/0x9d0
Code: <48> 81 7a 68 80 08 04 bc 0f 85 21 ff ff 8 89 c7 be
RSP: 0018:ffffaa90087679b0 EFLAGS: 00010046
RAX: ffffe3f37905b900 RBX: 00007f2dd561e000 RCX: ffffe3f37905b934
RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffe3f37905b900
...
CR2: 0000000000000068 CR3: 00000004c5898003 CR4: 00000000001726e0
Call Trace:
get_user_pages_fast_only+0x13/0x20
hva_to_pfn+0xa9/0x3e0
try_async_pf+0xa1/0x270
direct_page_fault+0x113/0xad0
kvm_mmu_page_fault+0x69/0x680
vmx_handle_exit+0xe1/0x5d0
kvm_arch_vcpu_ioctl_run+0xd81/0x1c70
kvm_vcpu_ioctl+0x267/0x670
__x64_sys_ioctl+0x83/0xa0
do_syscall_64+0x56/0x80
entry_SYSCALL_64_after_hwframe+0x44/0xae
Link: https://lkml.kernel.org/r/20211007231502.3552715-1-seanjc@google.com
Fixes: 1507f51255
("mm: introduce memfd_secret system call to create "secret" memory areas")
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reported-by: Darrick J. Wong <djwong@kernel.org>
Reported-by: Stephen <stephenackerman16@gmail.com>
Tested-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
55 lines
1.2 KiB
C
55 lines
1.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
|
#ifndef _LINUX_SECRETMEM_H
|
|
#define _LINUX_SECRETMEM_H
|
|
|
|
#ifdef CONFIG_SECRETMEM
|
|
|
|
extern const struct address_space_operations secretmem_aops;
|
|
|
|
static inline bool page_is_secretmem(struct page *page)
|
|
{
|
|
struct address_space *mapping;
|
|
|
|
/*
|
|
* Using page_mapping() is quite slow because of the actual call
|
|
* instruction and repeated compound_head(page) inside the
|
|
* page_mapping() function.
|
|
* We know that secretmem pages are not compound and LRU so we can
|
|
* save a couple of cycles here.
|
|
*/
|
|
if (PageCompound(page) || !PageLRU(page))
|
|
return false;
|
|
|
|
mapping = (struct address_space *)
|
|
((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
|
|
|
|
if (!mapping || mapping != page->mapping)
|
|
return false;
|
|
|
|
return mapping->a_ops == &secretmem_aops;
|
|
}
|
|
|
|
bool vma_is_secretmem(struct vm_area_struct *vma);
|
|
bool secretmem_active(void);
|
|
|
|
#else
|
|
|
|
static inline bool vma_is_secretmem(struct vm_area_struct *vma)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool page_is_secretmem(struct page *page)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool secretmem_active(void)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#endif /* CONFIG_SECRETMEM */
|
|
|
|
#endif /* _LINUX_SECRETMEM_H */
|