mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
fs/proc/kcore.c: allow translation of physical memory addresses
When /proc/kcore is read an attempt to read the first two pages results in HW-specific page swap on s390 and another (so called prefix) pages are accessed instead. That leads to a wrong read. Allow architecture-specific translation of memory addresses using kc_xlate_dev_mem_ptr() and kc_unxlate_dev_mem_ptr() callbacks similarily to /dev/mem xlate_dev_mem_ptr() and unxlate_dev_mem_ptr() callbacks. That way an architecture can deal with specific physical memory ranges. Re-use the existing /dev/mem callback implementation on s390, which handles the described prefix pages swapping correctly. For other architectures the default callback is basically NOP. It is expected the condition (vaddr == __va(__pa(vaddr))) always holds true for KCORE_RAM memory type. Link: https://lkml.kernel.org/r/20240930122119.1651546-1-agordeev@linux.ibm.com Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com> Suggested-by: Heiko Carstens <hca@linux.ibm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
76503e1fa1
commit
3d5854d75e
@ -16,8 +16,10 @@
|
|||||||
#include <asm/pci_io.h>
|
#include <asm/pci_io.h>
|
||||||
|
|
||||||
#define xlate_dev_mem_ptr xlate_dev_mem_ptr
|
#define xlate_dev_mem_ptr xlate_dev_mem_ptr
|
||||||
|
#define kc_xlate_dev_mem_ptr xlate_dev_mem_ptr
|
||||||
void *xlate_dev_mem_ptr(phys_addr_t phys);
|
void *xlate_dev_mem_ptr(phys_addr_t phys);
|
||||||
#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
|
#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
|
||||||
|
#define kc_unxlate_dev_mem_ptr unxlate_dev_mem_ptr
|
||||||
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
|
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
|
||||||
|
|
||||||
#define IO_SPACE_LIMIT 0
|
#define IO_SPACE_LIMIT 0
|
||||||
|
@ -50,6 +50,20 @@ static struct proc_dir_entry *proc_root_kcore;
|
|||||||
#define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
|
#define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef kc_xlate_dev_mem_ptr
|
||||||
|
#define kc_xlate_dev_mem_ptr kc_xlate_dev_mem_ptr
|
||||||
|
static inline void *kc_xlate_dev_mem_ptr(phys_addr_t phys)
|
||||||
|
{
|
||||||
|
return __va(phys);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
#ifndef kc_unxlate_dev_mem_ptr
|
||||||
|
#define kc_unxlate_dev_mem_ptr kc_unxlate_dev_mem_ptr
|
||||||
|
static inline void kc_unxlate_dev_mem_ptr(phys_addr_t phys, void *virt)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static LIST_HEAD(kclist_head);
|
static LIST_HEAD(kclist_head);
|
||||||
static DECLARE_RWSEM(kclist_lock);
|
static DECLARE_RWSEM(kclist_lock);
|
||||||
static int kcore_need_update = 1;
|
static int kcore_need_update = 1;
|
||||||
@ -471,6 +485,8 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
|
|||||||
while (buflen) {
|
while (buflen) {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
|
phys_addr_t phys;
|
||||||
|
void *__start;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this is the first iteration or the address is not within
|
* If this is the first iteration or the address is not within
|
||||||
@ -537,7 +553,8 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case KCORE_RAM:
|
case KCORE_RAM:
|
||||||
pfn = __pa(start) >> PAGE_SHIFT;
|
phys = __pa(start);
|
||||||
|
pfn = phys >> PAGE_SHIFT;
|
||||||
page = pfn_to_online_page(pfn);
|
page = pfn_to_online_page(pfn);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -557,13 +574,28 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
|
|||||||
fallthrough;
|
fallthrough;
|
||||||
case KCORE_VMEMMAP:
|
case KCORE_VMEMMAP:
|
||||||
case KCORE_TEXT:
|
case KCORE_TEXT:
|
||||||
|
if (m->type == KCORE_RAM) {
|
||||||
|
__start = kc_xlate_dev_mem_ptr(phys);
|
||||||
|
if (!__start) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
if (iov_iter_zero(tsz, iter) != tsz)
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
__start = (void *)start;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sadly we must use a bounce buffer here to be able to
|
* Sadly we must use a bounce buffer here to be able to
|
||||||
* make use of copy_from_kernel_nofault(), as these
|
* make use of copy_from_kernel_nofault(), as these
|
||||||
* memory regions might not always be mapped on all
|
* memory regions might not always be mapped on all
|
||||||
* architectures.
|
* architectures.
|
||||||
*/
|
*/
|
||||||
if (copy_from_kernel_nofault(buf, (void *)start, tsz)) {
|
ret = copy_from_kernel_nofault(buf, __start, tsz);
|
||||||
|
if (m->type == KCORE_RAM)
|
||||||
|
kc_unxlate_dev_mem_ptr(phys, __start);
|
||||||
|
if (ret) {
|
||||||
if (iov_iter_zero(tsz, iter) != tsz) {
|
if (iov_iter_zero(tsz, iter) != tsz) {
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto out;
|
goto out;
|
||||||
|
Loading…
Reference in New Issue
Block a user