2022-09-15 11:11:23 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) "callthunks: " fmt
|
|
|
|
|
2022-09-15 11:11:30 +00:00
|
|
|
#include <linux/debugfs.h>
|
2022-09-15 11:11:23 +00:00
|
|
|
#include <linux/kallsyms.h>
|
|
|
|
#include <linux/memory.h>
|
|
|
|
#include <linux/moduleloader.h>
|
2022-09-15 11:11:31 +00:00
|
|
|
#include <linux/static_call.h>
|
2022-09-15 11:11:23 +00:00
|
|
|
|
|
|
|
#include <asm/alternative.h>
|
2022-09-15 11:11:29 +00:00
|
|
|
#include <asm/asm-offsets.h>
|
2022-09-15 11:11:23 +00:00
|
|
|
#include <asm/cpu.h>
|
|
|
|
#include <asm/ftrace.h>
|
|
|
|
#include <asm/insn.h>
|
|
|
|
#include <asm/kexec.h>
|
|
|
|
#include <asm/nospec-branch.h>
|
|
|
|
#include <asm/paravirt.h>
|
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/switch_to.h>
|
|
|
|
#include <asm/sync_core.h>
|
|
|
|
#include <asm/text-patching.h>
|
|
|
|
#include <asm/xen/hypercall.h>
|
|
|
|
|
|
|
|
static int __initdata_or_module debug_callthunks;
|
|
|
|
|
|
|
|
#define prdbg(fmt, args...) \
|
|
|
|
do { \
|
|
|
|
if (debug_callthunks) \
|
|
|
|
printk(KERN_DEBUG pr_fmt(fmt), ##args); \
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
static int __init debug_thunks(char *str)
|
|
|
|
{
|
|
|
|
debug_callthunks = 1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("debug-callthunks", debug_thunks);
|
|
|
|
|
2022-09-15 11:11:30 +00:00
|
|
|
#ifdef CONFIG_CALL_THUNKS_DEBUG
|
|
|
|
DEFINE_PER_CPU(u64, __x86_call_count);
|
|
|
|
DEFINE_PER_CPU(u64, __x86_ret_count);
|
|
|
|
DEFINE_PER_CPU(u64, __x86_stuffs_count);
|
|
|
|
DEFINE_PER_CPU(u64, __x86_ctxsw_count);
|
|
|
|
EXPORT_SYMBOL_GPL(__x86_ctxsw_count);
|
|
|
|
EXPORT_SYMBOL_GPL(__x86_call_count);
|
|
|
|
#endif
|
|
|
|
|
2022-09-15 11:11:23 +00:00
|
|
|
extern s32 __call_sites[], __call_sites_end[];
|
|
|
|
|
|
|
|
struct core_text {
|
|
|
|
unsigned long base;
|
|
|
|
unsigned long end;
|
|
|
|
const char *name;
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool thunks_initialized __ro_after_init;
|
|
|
|
|
|
|
|
static const struct core_text builtin_coretext = {
|
|
|
|
.base = (unsigned long)_text,
|
|
|
|
.end = (unsigned long)_etext,
|
|
|
|
.name = "builtin",
|
|
|
|
};
|
|
|
|
|
2022-09-15 11:11:29 +00:00
|
|
|
asm (
|
|
|
|
".pushsection .rodata \n"
|
|
|
|
".global skl_call_thunk_template \n"
|
|
|
|
"skl_call_thunk_template: \n"
|
|
|
|
__stringify(INCREMENT_CALL_DEPTH)" \n"
|
|
|
|
".global skl_call_thunk_tail \n"
|
|
|
|
"skl_call_thunk_tail: \n"
|
|
|
|
".popsection \n"
|
|
|
|
);
|
|
|
|
|
|
|
|
extern u8 skl_call_thunk_template[];
|
|
|
|
extern u8 skl_call_thunk_tail[];
|
|
|
|
|
|
|
|
#define SKL_TMPL_SIZE \
|
|
|
|
((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template))
|
2022-09-15 11:11:23 +00:00
|
|
|
|
|
|
|
extern void error_entry(void);
|
|
|
|
extern void xen_error_entry(void);
|
|
|
|
extern void paranoid_entry(void);
|
|
|
|
|
|
|
|
static inline bool within_coretext(const struct core_text *ct, void *addr)
|
|
|
|
{
|
|
|
|
unsigned long p = (unsigned long)addr;
|
|
|
|
|
|
|
|
return ct->base <= p && p < ct->end;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool within_module_coretext(void *addr)
|
|
|
|
{
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
#ifdef CONFIG_MODULES
|
|
|
|
struct module *mod;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
mod = __module_address((unsigned long)addr);
|
|
|
|
if (mod && within_module_core((unsigned long)addr, mod))
|
|
|
|
ret = true;
|
|
|
|
preempt_enable();
|
|
|
|
#endif
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_coretext(const struct core_text *ct, void *addr)
|
|
|
|
{
|
|
|
|
if (ct && within_coretext(ct, addr))
|
|
|
|
return true;
|
|
|
|
if (within_coretext(&builtin_coretext, addr))
|
|
|
|
return true;
|
|
|
|
return within_module_coretext(addr);
|
|
|
|
}
|
|
|
|
|
2022-12-15 16:43:23 +00:00
|
|
|
static bool skip_addr(void *dest)
|
2022-09-15 11:11:23 +00:00
|
|
|
{
|
|
|
|
if (dest == error_entry)
|
|
|
|
return true;
|
|
|
|
if (dest == paranoid_entry)
|
|
|
|
return true;
|
|
|
|
if (dest == xen_error_entry)
|
|
|
|
return true;
|
|
|
|
/* Does FILL_RSB... */
|
|
|
|
if (dest == __switch_to_asm)
|
|
|
|
return true;
|
|
|
|
/* Accounts directly */
|
|
|
|
if (dest == ret_from_fork)
|
|
|
|
return true;
|
2023-05-12 21:07:08 +00:00
|
|
|
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
|
2023-05-12 21:07:03 +00:00
|
|
|
if (dest == soft_restart_cpu)
|
2022-09-15 11:11:23 +00:00
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
|
|
if (dest == __fentry__)
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_KEXEC_CORE
|
|
|
|
if (dest >= (void *)relocate_kernel &&
|
|
|
|
dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_XEN
|
|
|
|
if (dest >= (void *)hypercall_page &&
|
|
|
|
dest < (void*)hypercall_page + PAGE_SIZE)
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __init_or_module void *call_get_dest(void *addr)
|
|
|
|
{
|
|
|
|
struct insn insn;
|
|
|
|
void *dest;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = insn_decode_kernel(&insn, addr);
|
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
|
|
|
/* Patched out call? */
|
|
|
|
if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
dest = addr + insn.length + insn.immediate.value;
|
|
|
|
if (skip_addr(dest))
|
|
|
|
return NULL;
|
|
|
|
return dest;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const u8 nops[] = {
|
|
|
|
0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
|
|
|
|
0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
|
|
|
|
0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
|
|
|
|
0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
|
|
|
|
};
|
|
|
|
|
2022-12-15 16:43:23 +00:00
|
|
|
static void *patch_dest(void *dest, bool direct)
|
2022-09-15 11:11:23 +00:00
|
|
|
{
|
2022-09-15 11:11:29 +00:00
|
|
|
unsigned int tsize = SKL_TMPL_SIZE;
|
2022-09-15 11:11:23 +00:00
|
|
|
u8 *pad = dest - tsize;
|
|
|
|
|
|
|
|
/* Already patched? */
|
2022-09-15 11:11:29 +00:00
|
|
|
if (!bcmp(pad, skl_call_thunk_template, tsize))
|
2022-09-15 11:11:23 +00:00
|
|
|
return pad;
|
|
|
|
|
|
|
|
/* Ensure there are nops */
|
|
|
|
if (bcmp(pad, nops, tsize)) {
|
|
|
|
pr_warn_once("Invalid padding area for %pS\n", dest);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (direct)
|
2022-09-15 11:11:29 +00:00
|
|
|
memcpy(pad, skl_call_thunk_template, tsize);
|
2022-09-15 11:11:23 +00:00
|
|
|
else
|
2022-09-15 11:11:29 +00:00
|
|
|
text_poke_copy_locked(pad, skl_call_thunk_template, tsize, true);
|
2022-09-15 11:11:23 +00:00
|
|
|
return pad;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __init_or_module void patch_call(void *addr, const struct core_text *ct)
|
|
|
|
{
|
|
|
|
void *pad, *dest;
|
|
|
|
u8 bytes[8];
|
|
|
|
|
|
|
|
if (!within_coretext(ct, addr))
|
|
|
|
return;
|
|
|
|
|
|
|
|
dest = call_get_dest(addr);
|
|
|
|
if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!is_coretext(ct, dest))
|
|
|
|
return;
|
|
|
|
|
|
|
|
pad = patch_dest(dest, within_coretext(ct, dest));
|
|
|
|
if (!pad)
|
|
|
|
return;
|
|
|
|
|
|
|
|
prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
|
|
|
|
dest, dest, pad);
|
|
|
|
__text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
|
|
|
|
text_poke_early(addr, bytes, CALL_INSN_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __init_or_module void
|
|
|
|
patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
|
|
|
|
{
|
|
|
|
s32 *s;
|
|
|
|
|
|
|
|
for (s = start; s < end; s++)
|
|
|
|
patch_call((void *)s + *s, ct);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __init_or_module void
|
|
|
|
patch_paravirt_call_sites(struct paravirt_patch_site *start,
|
|
|
|
struct paravirt_patch_site *end,
|
|
|
|
const struct core_text *ct)
|
|
|
|
{
|
|
|
|
struct paravirt_patch_site *p;
|
|
|
|
|
|
|
|
for (p = start; p < end; p++)
|
|
|
|
patch_call(p->instr, ct);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __init_or_module void
|
|
|
|
callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
|
|
|
|
{
|
|
|
|
prdbg("Patching call sites %s\n", ct->name);
|
|
|
|
patch_call_sites(cs->call_start, cs->call_end, ct);
|
|
|
|
patch_paravirt_call_sites(cs->pv_start, cs->pv_end, ct);
|
|
|
|
prdbg("Patching call sites done%s\n", ct->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init callthunks_patch_builtin_calls(void)
|
|
|
|
{
|
|
|
|
struct callthunk_sites cs = {
|
|
|
|
.call_start = __call_sites,
|
|
|
|
.call_end = __call_sites_end,
|
|
|
|
.pv_start = __parainstructions,
|
|
|
|
.pv_end = __parainstructions_end
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
|
|
|
|
return;
|
|
|
|
|
|
|
|
pr_info("Setting up call depth tracking\n");
|
|
|
|
mutex_lock(&text_mutex);
|
|
|
|
callthunks_setup(&cs, &builtin_coretext);
|
|
|
|
thunks_initialized = true;
|
|
|
|
mutex_unlock(&text_mutex);
|
|
|
|
}
|
2022-09-15 11:11:24 +00:00
|
|
|
|
2022-09-15 11:11:31 +00:00
|
|
|
void *callthunks_translate_call_dest(void *dest)
|
|
|
|
{
|
|
|
|
void *target;
|
|
|
|
|
|
|
|
lockdep_assert_held(&text_mutex);
|
|
|
|
|
|
|
|
if (!thunks_initialized || skip_addr(dest))
|
|
|
|
return dest;
|
|
|
|
|
|
|
|
if (!is_coretext(NULL, dest))
|
|
|
|
return dest;
|
|
|
|
|
|
|
|
target = patch_dest(dest, false);
|
|
|
|
return target ? : dest;
|
|
|
|
}
|
|
|
|
|
2023-06-09 09:04:53 +00:00
|
|
|
#ifdef CONFIG_BPF_JIT
|
2023-05-16 13:56:46 +00:00
|
|
|
static bool is_callthunk(void *addr)
|
2022-09-15 11:11:33 +00:00
|
|
|
{
|
|
|
|
unsigned int tmpl_size = SKL_TMPL_SIZE;
|
|
|
|
void *tmpl = skl_call_thunk_template;
|
|
|
|
unsigned long dest;
|
|
|
|
|
|
|
|
dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT);
|
|
|
|
if (!thunks_initialized || skip_addr((void *)dest))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return !bcmp((void *)(dest - tmpl_size), tmpl, tmpl_size);
|
|
|
|
}
|
|
|
|
|
2022-09-15 11:11:34 +00:00
|
|
|
int x86_call_depth_emit_accounting(u8 **pprog, void *func)
|
|
|
|
{
|
|
|
|
unsigned int tmpl_size = SKL_TMPL_SIZE;
|
|
|
|
void *tmpl = skl_call_thunk_template;
|
|
|
|
|
|
|
|
if (!thunks_initialized)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Is function call target a thunk? */
|
2022-09-15 11:11:37 +00:00
|
|
|
if (func && is_callthunk(func))
|
2022-09-15 11:11:34 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
memcpy(*pprog, tmpl, tmpl_size);
|
|
|
|
*pprog += tmpl_size;
|
|
|
|
return tmpl_size;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-09-15 11:11:24 +00:00
|
|
|
#ifdef CONFIG_MODULES
|
|
|
|
void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
|
|
|
|
struct module *mod)
|
|
|
|
{
|
|
|
|
struct core_text ct = {
|
module: replace module_layout with module_memory
module_layout manages different types of memory (text, data, rodata, etc.)
in one allocation, which is problematic for some reasons:
1. It is hard to enable CONFIG_STRICT_MODULE_RWX.
2. It is hard to use huge pages in modules (and not break strict rwx).
3. Many archs uses module_layout for arch-specific data, but it is not
obvious how these data are used (are they RO, RX, or RW?)
Improve the scenario by replacing 2 (or 3) module_layout per module with
up to 7 module_memory per module:
MOD_TEXT,
MOD_DATA,
MOD_RODATA,
MOD_RO_AFTER_INIT,
MOD_INIT_TEXT,
MOD_INIT_DATA,
MOD_INIT_RODATA,
and allocating them separately. This adds slightly more entries to
mod_tree (from up to 3 entries per module, to up to 7 entries per
module). However, this at most adds a small constant overhead to
__module_address(), which is expected to be fast.
Various archs use module_layout for different data. These data are put
into different module_memory based on their location in module_layout.
IOW, data that used to go with text is allocated with MOD_MEM_TYPE_TEXT;
data that used to go with data is allocated with MOD_MEM_TYPE_DATA, etc.
module_memory simplifies quite some of the module code. For example,
ARCH_WANTS_MODULES_DATA_IN_VMALLOC is a lot cleaner, as it just uses a
different allocator for the data. kernel/module/strict_rwx.c is also
much cleaner with module_memory.
Signed-off-by: Song Liu <song@kernel.org>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Guenter Roeck <linux@roeck-us.net>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
2023-02-07 00:28:02 +00:00
|
|
|
.base = (unsigned long)mod->mem[MOD_TEXT].base,
|
|
|
|
.end = (unsigned long)mod->mem[MOD_TEXT].base + mod->mem[MOD_TEXT].size,
|
2022-09-15 11:11:24 +00:00
|
|
|
.name = mod->name,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!thunks_initialized)
|
|
|
|
return;
|
|
|
|
|
|
|
|
mutex_lock(&text_mutex);
|
|
|
|
callthunks_setup(cs, &ct);
|
|
|
|
mutex_unlock(&text_mutex);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_MODULES */
|
2022-09-15 11:11:30 +00:00
|
|
|
|
|
|
|
#if defined(CONFIG_CALL_THUNKS_DEBUG) && defined(CONFIG_DEBUG_FS)
|
|
|
|
static int callthunks_debug_show(struct seq_file *m, void *p)
|
|
|
|
{
|
|
|
|
unsigned long cpu = (unsigned long)m->private;
|
|
|
|
|
|
|
|
seq_printf(m, "C: %16llu R: %16llu S: %16llu X: %16llu\n,",
|
|
|
|
per_cpu(__x86_call_count, cpu),
|
|
|
|
per_cpu(__x86_ret_count, cpu),
|
|
|
|
per_cpu(__x86_stuffs_count, cpu),
|
|
|
|
per_cpu(__x86_ctxsw_count, cpu));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int callthunks_debug_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, callthunks_debug_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations dfs_ops = {
|
|
|
|
.open = callthunks_debug_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init callthunks_debugfs_init(void)
|
|
|
|
{
|
|
|
|
struct dentry *dir;
|
|
|
|
unsigned long cpu;
|
|
|
|
|
|
|
|
dir = debugfs_create_dir("callthunks", NULL);
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
void *arg = (void *)cpu;
|
|
|
|
char name [10];
|
|
|
|
|
|
|
|
sprintf(name, "cpu%lu", cpu);
|
|
|
|
debugfs_create_file(name, 0644, dir, arg, &dfs_ops);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
__initcall(callthunks_debugfs_init);
|
|
|
|
#endif
|