arm64: Move patching utilities out of instruction encoding/decoding
Files insn.[c|h] containt some functions used for instruction patching. In order to reuse the instruction encoder/decoder, move the patching utilities to their own file. Signed-off-by: Julien Thierry <jthierry@redhat.com> Link: https://lore.kernel.org/r/20210303170536.1838032-2-jthierry@redhat.com [will: Include patching.h in insn.h to fix header mess; add __ASSEMBLY__ guards] Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
c4681547bc
commit
5f154c4e20
@ -11,6 +11,7 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/patching.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
@ -379,8 +380,6 @@ static inline bool aarch64_insn_is_adr_adrp(u32 insn)
|
||||
return aarch64_insn_is_adr(insn) || aarch64_insn_is_adrp(insn);
|
||||
}
|
||||
|
||||
int aarch64_insn_read(void *addr, u32 *insnp);
|
||||
int aarch64_insn_write(void *addr, u32 insn);
|
||||
enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
|
||||
bool aarch64_insn_uses_literal(u32 insn);
|
||||
bool aarch64_insn_is_branch(u32 insn);
|
||||
@ -487,9 +486,6 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
|
||||
s32 aarch64_get_branch_offset(u32 insn);
|
||||
u32 aarch64_set_branch_offset(u32 insn, s32 offset);
|
||||
|
||||
int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
|
||||
int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
|
||||
|
||||
s32 aarch64_insn_adrp_get_offset(u32 insn);
|
||||
u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset);
|
||||
|
||||
|
15
arch/arm64/include/asm/patching.h
Normal file
15
arch/arm64/include/asm/patching.h
Normal file
@ -0,0 +1,15 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef __ASM_PATCHING_H
|
||||
#define __ASM_PATCHING_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
int aarch64_insn_read(void *addr, u32 *insnp);
|
||||
int aarch64_insn_write(void *addr, u32 insn);
|
||||
|
||||
int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
|
||||
int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_PATCHING_H */
|
@ -22,7 +22,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
|
||||
return_address.o cpuinfo.o cpu_errata.o \
|
||||
cpufeature.o alternative.o cacheinfo.o \
|
||||
smp.o smp_spin_table.o topology.o smccc-call.o \
|
||||
syscall.o proton-pack.o idreg-override.o
|
||||
syscall.o proton-pack.o idreg-override.o patching.o
|
||||
|
||||
targets += efi-entry.o
|
||||
|
||||
|
@ -7,21 +7,14 @@
|
||||
*/
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#define AARCH64_INSN_SF_BIT BIT(31)
|
||||
#define AARCH64_INSN_N_BIT BIT(22)
|
||||
@ -83,81 +76,6 @@ bool aarch64_insn_is_branch_imm(u32 insn)
|
||||
aarch64_insn_is_bcond(insn));
|
||||
}
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(patch_lock);
|
||||
|
||||
static bool is_exit_text(unsigned long addr)
|
||||
{
|
||||
/* discarded with init text/data */
|
||||
return system_state < SYSTEM_RUNNING &&
|
||||
addr >= (unsigned long)__exittext_begin &&
|
||||
addr < (unsigned long)__exittext_end;
|
||||
}
|
||||
|
||||
static bool is_image_text(unsigned long addr)
|
||||
{
|
||||
return core_kernel_text(addr) || is_exit_text(addr);
|
||||
}
|
||||
|
||||
static void __kprobes *patch_map(void *addr, int fixmap)
|
||||
{
|
||||
unsigned long uintaddr = (uintptr_t) addr;
|
||||
bool image = is_image_text(uintaddr);
|
||||
struct page *page;
|
||||
|
||||
if (image)
|
||||
page = phys_to_page(__pa_symbol(addr));
|
||||
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
|
||||
page = vmalloc_to_page(addr);
|
||||
else
|
||||
return addr;
|
||||
|
||||
BUG_ON(!page);
|
||||
return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
|
||||
(uintaddr & ~PAGE_MASK));
|
||||
}
|
||||
|
||||
static void __kprobes patch_unmap(int fixmap)
|
||||
{
|
||||
clear_fixmap(fixmap);
|
||||
}
|
||||
/*
|
||||
* In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
|
||||
* little-endian.
|
||||
*/
|
||||
int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
|
||||
{
|
||||
int ret;
|
||||
__le32 val;
|
||||
|
||||
ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
|
||||
if (!ret)
|
||||
*insnp = le32_to_cpu(val);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
|
||||
{
|
||||
void *waddr = addr;
|
||||
unsigned long flags = 0;
|
||||
int ret;
|
||||
|
||||
raw_spin_lock_irqsave(&patch_lock, flags);
|
||||
waddr = patch_map(addr, FIX_TEXT_POKE0);
|
||||
|
||||
ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
|
||||
|
||||
patch_unmap(FIX_TEXT_POKE0);
|
||||
raw_spin_unlock_irqrestore(&patch_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __kprobes aarch64_insn_write(void *addr, u32 insn)
|
||||
{
|
||||
return __aarch64_insn_write(addr, cpu_to_le32(insn));
|
||||
}
|
||||
|
||||
bool __kprobes aarch64_insn_uses_literal(u32 insn)
|
||||
{
|
||||
/* ldr/ldrsw (literal), prfm */
|
||||
@ -187,67 +105,6 @@ bool __kprobes aarch64_insn_is_branch(u32 insn)
|
||||
aarch64_insn_is_bcond(insn);
|
||||
}
|
||||
|
||||
int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
|
||||
{
|
||||
u32 *tp = addr;
|
||||
int ret;
|
||||
|
||||
/* A64 instructions must be word aligned */
|
||||
if ((uintptr_t)tp & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
ret = aarch64_insn_write(tp, insn);
|
||||
if (ret == 0)
|
||||
__flush_icache_range((uintptr_t)tp,
|
||||
(uintptr_t)tp + AARCH64_INSN_SIZE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct aarch64_insn_patch {
|
||||
void **text_addrs;
|
||||
u32 *new_insns;
|
||||
int insn_cnt;
|
||||
atomic_t cpu_count;
|
||||
};
|
||||
|
||||
static int __kprobes aarch64_insn_patch_text_cb(void *arg)
|
||||
{
|
||||
int i, ret = 0;
|
||||
struct aarch64_insn_patch *pp = arg;
|
||||
|
||||
/* The first CPU becomes master */
|
||||
if (atomic_inc_return(&pp->cpu_count) == 1) {
|
||||
for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
|
||||
ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
|
||||
pp->new_insns[i]);
|
||||
/* Notify other processors with an additional increment. */
|
||||
atomic_inc(&pp->cpu_count);
|
||||
} else {
|
||||
while (atomic_read(&pp->cpu_count) <= num_online_cpus())
|
||||
cpu_relax();
|
||||
isb();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
|
||||
{
|
||||
struct aarch64_insn_patch patch = {
|
||||
.text_addrs = addrs,
|
||||
.new_insns = insns,
|
||||
.insn_cnt = cnt,
|
||||
.cpu_count = ATOMIC_INIT(0),
|
||||
};
|
||||
|
||||
if (cnt <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
|
||||
cpu_online_mask);
|
||||
}
|
||||
|
||||
static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
|
||||
u32 *maskp, int *shiftp)
|
||||
{
|
||||
|
148
arch/arm64/kernel/patching.c
Normal file
148
arch/arm64/kernel/patching.c
Normal file
@ -0,0 +1,148 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(patch_lock);
|
||||
|
||||
static bool is_exit_text(unsigned long addr)
|
||||
{
|
||||
/* discarded with init text/data */
|
||||
return system_state < SYSTEM_RUNNING &&
|
||||
addr >= (unsigned long)__exittext_begin &&
|
||||
addr < (unsigned long)__exittext_end;
|
||||
}
|
||||
|
||||
static bool is_image_text(unsigned long addr)
|
||||
{
|
||||
return core_kernel_text(addr) || is_exit_text(addr);
|
||||
}
|
||||
|
||||
static void __kprobes *patch_map(void *addr, int fixmap)
|
||||
{
|
||||
unsigned long uintaddr = (uintptr_t) addr;
|
||||
bool image = is_image_text(uintaddr);
|
||||
struct page *page;
|
||||
|
||||
if (image)
|
||||
page = phys_to_page(__pa_symbol(addr));
|
||||
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
|
||||
page = vmalloc_to_page(addr);
|
||||
else
|
||||
return addr;
|
||||
|
||||
BUG_ON(!page);
|
||||
return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
|
||||
(uintaddr & ~PAGE_MASK));
|
||||
}
|
||||
|
||||
static void __kprobes patch_unmap(int fixmap)
|
||||
{
|
||||
clear_fixmap(fixmap);
|
||||
}
|
||||
/*
|
||||
* In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
|
||||
* little-endian.
|
||||
*/
|
||||
int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
|
||||
{
|
||||
int ret;
|
||||
__le32 val;
|
||||
|
||||
ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
|
||||
if (!ret)
|
||||
*insnp = le32_to_cpu(val);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
|
||||
{
|
||||
void *waddr = addr;
|
||||
unsigned long flags = 0;
|
||||
int ret;
|
||||
|
||||
raw_spin_lock_irqsave(&patch_lock, flags);
|
||||
waddr = patch_map(addr, FIX_TEXT_POKE0);
|
||||
|
||||
ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
|
||||
|
||||
patch_unmap(FIX_TEXT_POKE0);
|
||||
raw_spin_unlock_irqrestore(&patch_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __kprobes aarch64_insn_write(void *addr, u32 insn)
|
||||
{
|
||||
return __aarch64_insn_write(addr, cpu_to_le32(insn));
|
||||
}
|
||||
|
||||
int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
|
||||
{
|
||||
u32 *tp = addr;
|
||||
int ret;
|
||||
|
||||
/* A64 instructions must be word aligned */
|
||||
if ((uintptr_t)tp & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
ret = aarch64_insn_write(tp, insn);
|
||||
if (ret == 0)
|
||||
__flush_icache_range((uintptr_t)tp,
|
||||
(uintptr_t)tp + AARCH64_INSN_SIZE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct aarch64_insn_patch {
|
||||
void **text_addrs;
|
||||
u32 *new_insns;
|
||||
int insn_cnt;
|
||||
atomic_t cpu_count;
|
||||
};
|
||||
|
||||
static int __kprobes aarch64_insn_patch_text_cb(void *arg)
|
||||
{
|
||||
int i, ret = 0;
|
||||
struct aarch64_insn_patch *pp = arg;
|
||||
|
||||
/* The first CPU becomes master */
|
||||
if (atomic_inc_return(&pp->cpu_count) == 1) {
|
||||
for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
|
||||
ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
|
||||
pp->new_insns[i]);
|
||||
/* Notify other processors with an additional increment. */
|
||||
atomic_inc(&pp->cpu_count);
|
||||
} else {
|
||||
while (atomic_read(&pp->cpu_count) <= num_online_cpus())
|
||||
cpu_relax();
|
||||
isb();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
|
||||
{
|
||||
struct aarch64_insn_patch patch = {
|
||||
.text_addrs = addrs,
|
||||
.new_insns = insns,
|
||||
.insn_cnt = cnt,
|
||||
.cpu_count = ATOMIC_INIT(0),
|
||||
};
|
||||
|
||||
if (cnt <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
|
||||
cpu_online_mask);
|
||||
}
|
Loading…
Reference in New Issue
Block a user