2019-05-19 13:51:43 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2017-06-28 15:11:05 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
2018-01-16 16:16:32 +00:00
|
|
|
#include "builtin.h"
|
2020-05-19 20:55:33 +00:00
|
|
|
#include "cfi.h"
|
|
|
|
#include "arch.h"
|
2017-06-28 15:11:05 +00:00
|
|
|
#include "check.h"
|
|
|
|
#include "special.h"
|
|
|
|
#include "warn.h"
|
2020-06-12 14:05:26 +00:00
|
|
|
#include "arch_elf.h"
|
2017-06-28 15:11:05 +00:00
|
|
|
|
|
|
|
#include <linux/hashtable.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
|
2019-05-13 17:01:31 +00:00
|
|
|
#define FAKE_JUMP_OFFSET -1
|
|
|
|
|
2019-06-28 01:50:46 +00:00
|
|
|
#define C_JUMP_TABLE_SECTION ".rodata..c_jump_table"
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
struct alternative {
|
|
|
|
struct list_head list;
|
|
|
|
struct instruction *insn;
|
2019-03-01 10:19:03 +00:00
|
|
|
bool skip_orig;
|
2017-06-28 15:11:05 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
const char *objname;
|
2020-03-25 14:34:50 +00:00
|
|
|
struct cfi_init_state initial_func_cfi;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2017-07-11 15:33:42 +00:00
|
|
|
struct instruction *find_insn(struct objtool_file *file,
|
|
|
|
struct section *sec, unsigned long offset)
|
2017-06-28 15:11:05 +00:00
|
|
|
{
|
|
|
|
struct instruction *insn;
|
|
|
|
|
2020-03-16 14:47:27 +00:00
|
|
|
hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
|
2017-06-28 15:11:05 +00:00
|
|
|
if (insn->sec == sec && insn->offset == offset)
|
|
|
|
return insn;
|
2020-03-16 14:47:27 +00:00
|
|
|
}
|
2017-06-28 15:11:05 +00:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct instruction *next_insn_same_sec(struct objtool_file *file,
|
|
|
|
struct instruction *insn)
|
|
|
|
{
|
|
|
|
struct instruction *next = list_next_entry(insn, list);
|
|
|
|
|
2017-06-28 15:11:07 +00:00
|
|
|
if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
|
2017-06-28 15:11:05 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return next;
|
|
|
|
}
|
|
|
|
|
2018-05-10 03:39:15 +00:00
|
|
|
static struct instruction *next_insn_same_func(struct objtool_file *file,
|
|
|
|
struct instruction *insn)
|
|
|
|
{
|
|
|
|
struct instruction *next = list_next_entry(insn, list);
|
|
|
|
struct symbol *func = insn->func;
|
|
|
|
|
|
|
|
if (!func)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (&next->list != &file->insn_list && next->func == func)
|
|
|
|
return next;
|
|
|
|
|
|
|
|
/* Check if we're already in the subfunction: */
|
|
|
|
if (func == func->cfunc)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Move to the subfunction: */
|
|
|
|
return find_insn(file, func->cfunc->sec, func->cfunc->offset);
|
|
|
|
}
|
|
|
|
|
2020-04-28 21:45:16 +00:00
|
|
|
static struct instruction *prev_insn_same_sym(struct objtool_file *file,
|
|
|
|
struct instruction *insn)
|
|
|
|
{
|
|
|
|
struct instruction *prev = list_prev_entry(insn, list);
|
|
|
|
|
|
|
|
if (&prev->list != &file->insn_list && prev->func == insn->func)
|
|
|
|
return prev;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-03-10 17:27:24 +00:00
|
|
|
#define func_for_each_insn(file, func, insn) \
|
2018-05-10 03:39:15 +00:00
|
|
|
for (insn = find_insn(file, func->sec, func->offset); \
|
|
|
|
insn; \
|
|
|
|
insn = next_insn_same_func(file, insn))
|
|
|
|
|
2020-03-10 17:24:59 +00:00
|
|
|
#define sym_for_each_insn(file, sym, insn) \
|
|
|
|
for (insn = find_insn(file, sym->sec, sym->offset); \
|
2017-06-28 15:11:05 +00:00
|
|
|
insn && &insn->list != &file->insn_list && \
|
2020-03-10 17:24:59 +00:00
|
|
|
insn->sec == sym->sec && \
|
|
|
|
insn->offset < sym->offset + sym->len; \
|
2017-06-28 15:11:05 +00:00
|
|
|
insn = list_next_entry(insn, list))
|
|
|
|
|
2020-03-10 17:24:59 +00:00
|
|
|
#define sym_for_each_insn_continue_reverse(file, sym, insn) \
|
2017-06-28 15:11:05 +00:00
|
|
|
for (insn = list_prev_entry(insn, list); \
|
|
|
|
&insn->list != &file->insn_list && \
|
2020-03-10 17:24:59 +00:00
|
|
|
insn->sec == sym->sec && insn->offset >= sym->offset; \
|
2017-06-28 15:11:05 +00:00
|
|
|
insn = list_prev_entry(insn, list))
|
|
|
|
|
|
|
|
#define sec_for_each_insn_from(file, insn) \
|
|
|
|
for (; insn; insn = next_insn_same_sec(file, insn))
|
|
|
|
|
2017-06-28 15:11:07 +00:00
|
|
|
#define sec_for_each_insn_continue(file, insn) \
|
|
|
|
for (insn = next_insn_same_sec(file, insn); insn; \
|
|
|
|
insn = next_insn_same_sec(file, insn))
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2020-02-10 18:32:39 +00:00
|
|
|
static bool is_static_jump(struct instruction *insn)
|
|
|
|
{
|
|
|
|
return insn->type == INSN_JUMP_CONDITIONAL ||
|
|
|
|
insn->type == INSN_JUMP_UNCONDITIONAL;
|
|
|
|
}
|
|
|
|
|
2019-07-18 01:36:52 +00:00
|
|
|
static bool is_sibling_call(struct instruction *insn)
|
|
|
|
{
|
|
|
|
/* An indirect jump is either a sibling call or a jump to a table. */
|
|
|
|
if (insn->type == INSN_JUMP_DYNAMIC)
|
|
|
|
return list_empty(&insn->alts);
|
|
|
|
|
2020-02-10 18:32:39 +00:00
|
|
|
if (!is_static_jump(insn))
|
2019-07-18 01:36:52 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/* add_jump_destinations() sets insn->call_dest for sibling calls. */
|
|
|
|
return !!insn->call_dest;
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
/*
|
|
|
|
* This checks to see if the given function is a "noreturn" function.
|
|
|
|
*
|
|
|
|
* For global functions which are outside the scope of this object file, we
|
|
|
|
* have to keep a manual list of them.
|
|
|
|
*
|
|
|
|
* For local functions, we have to detect them manually by simply looking for
|
|
|
|
* the lack of a return instruction.
|
|
|
|
*/
|
2019-07-18 01:36:50 +00:00
|
|
|
static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
|
|
|
|
int recursion)
|
2017-06-28 15:11:05 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct instruction *insn;
|
|
|
|
bool empty = true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unfortunately these have to be hard coded because the noreturn
|
|
|
|
* attribute isn't provided in ELF data.
|
|
|
|
*/
|
|
|
|
static const char * const global_noreturns[] = {
|
|
|
|
"__stack_chk_fail",
|
|
|
|
"panic",
|
|
|
|
"do_exit",
|
|
|
|
"do_task_dead",
|
|
|
|
"__module_put_and_exit",
|
|
|
|
"complete_and_exit",
|
|
|
|
"__reiserfs_panic",
|
|
|
|
"lbug_with_loc",
|
|
|
|
"fortify_panic",
|
2018-01-10 22:22:38 +00:00
|
|
|
"usercopy_abort",
|
2018-06-19 15:47:50 +00:00
|
|
|
"machine_real_restart",
|
2019-04-04 17:17:35 +00:00
|
|
|
"rewind_stack_do_exit",
|
2019-09-23 09:02:38 +00:00
|
|
|
"kunit_try_catch_throw",
|
2017-06-28 15:11:05 +00:00
|
|
|
};
|
|
|
|
|
2019-07-18 01:36:51 +00:00
|
|
|
if (!func)
|
|
|
|
return false;
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
if (func->bind == STB_WEAK)
|
2019-07-18 01:36:50 +00:00
|
|
|
return false;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
|
|
|
if (func->bind == STB_GLOBAL)
|
|
|
|
for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
|
|
|
|
if (!strcmp(func->name, global_noreturns[i]))
|
2019-07-18 01:36:50 +00:00
|
|
|
return true;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2018-05-10 03:39:15 +00:00
|
|
|
if (!func->len)
|
2019-07-18 01:36:50 +00:00
|
|
|
return false;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2018-05-10 03:39:15 +00:00
|
|
|
insn = find_insn(file, func->sec, func->offset);
|
|
|
|
if (!insn->func)
|
2019-07-18 01:36:50 +00:00
|
|
|
return false;
|
2018-05-10 03:39:15 +00:00
|
|
|
|
2020-03-10 17:27:24 +00:00
|
|
|
func_for_each_insn(file, func, insn) {
|
2017-06-28 15:11:05 +00:00
|
|
|
empty = false;
|
|
|
|
|
|
|
|
if (insn->type == INSN_RETURN)
|
2019-07-18 01:36:50 +00:00
|
|
|
return false;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (empty)
|
2019-07-18 01:36:50 +00:00
|
|
|
return false;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A function can have a sibling call instead of a return. In that
|
|
|
|
* case, the function's dead-end status depends on whether the target
|
|
|
|
* of the sibling call returns.
|
|
|
|
*/
|
2020-03-10 17:27:24 +00:00
|
|
|
func_for_each_insn(file, func, insn) {
|
2019-07-18 01:36:52 +00:00
|
|
|
if (is_sibling_call(insn)) {
|
2017-06-28 15:11:05 +00:00
|
|
|
struct instruction *dest = insn->jump_dest;
|
|
|
|
|
|
|
|
if (!dest)
|
|
|
|
/* sibling call to another file */
|
2019-07-18 01:36:50 +00:00
|
|
|
return false;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2019-07-18 01:36:52 +00:00
|
|
|
/* local sibling call */
|
|
|
|
if (recursion == 5) {
|
|
|
|
/*
|
|
|
|
* Infinite recursion: two functions have
|
|
|
|
* sibling calls to each other. This is a very
|
|
|
|
* rare case. It means they aren't dead ends.
|
|
|
|
*/
|
|
|
|
return false;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
2019-07-18 01:36:52 +00:00
|
|
|
return __dead_end_function(file, dest->func, recursion+1);
|
|
|
|
}
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
2019-07-18 01:36:50 +00:00
|
|
|
return true;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
2019-07-18 01:36:50 +00:00
|
|
|
static bool dead_end_function(struct objtool_file *file, struct symbol *func)
|
2017-06-28 15:11:05 +00:00
|
|
|
{
|
|
|
|
return __dead_end_function(file, func, 0);
|
|
|
|
}
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
static void init_cfi_state(struct cfi_state *cfi)
|
2017-06-28 15:11:07 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2017-08-29 17:51:03 +00:00
|
|
|
for (i = 0; i < CFI_NUM_REGS; i++) {
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->regs[i].base = CFI_UNDEFINED;
|
|
|
|
cfi->vals[i].base = CFI_UNDEFINED;
|
2017-08-29 17:51:03 +00:00
|
|
|
}
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->cfa.base = CFI_UNDEFINED;
|
|
|
|
cfi->drap_reg = CFI_UNDEFINED;
|
|
|
|
cfi->drap_offset = -1;
|
|
|
|
}
|
|
|
|
|
2020-03-23 17:26:03 +00:00
|
|
|
static void init_insn_state(struct insn_state *state, struct section *sec)
|
2020-03-25 13:04:45 +00:00
|
|
|
{
|
|
|
|
memset(state, 0, sizeof(*state));
|
|
|
|
init_cfi_state(&state->cfi);
|
2020-03-23 17:26:03 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We need the full vmlinux for noinstr validation, otherwise we can
|
|
|
|
* not correctly determine insn->call_dest->sec (external symbols do
|
|
|
|
* not have a section).
|
|
|
|
*/
|
|
|
|
if (vmlinux && sec)
|
|
|
|
state->noinstr = sec->noinstr;
|
2017-06-28 15:11:07 +00:00
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
/*
|
|
|
|
* Call the arch-specific instruction decoder for all the instructions and add
|
|
|
|
* them to the global instruction list.
|
|
|
|
*/
|
|
|
|
static int decode_instructions(struct objtool_file *file)
|
|
|
|
{
|
|
|
|
struct section *sec;
|
|
|
|
struct symbol *func;
|
|
|
|
unsigned long offset;
|
|
|
|
struct instruction *insn;
|
2020-03-12 08:26:29 +00:00
|
|
|
unsigned long nr_insns = 0;
|
2017-06-28 15:11:05 +00:00
|
|
|
int ret;
|
|
|
|
|
2017-06-28 15:11:07 +00:00
|
|
|
for_each_sec(file, sec) {
|
2017-06-28 15:11:05 +00:00
|
|
|
|
|
|
|
if (!(sec->sh.sh_flags & SHF_EXECINSTR))
|
|
|
|
continue;
|
|
|
|
|
2017-07-11 15:33:42 +00:00
|
|
|
if (strcmp(sec->name, ".altinstr_replacement") &&
|
|
|
|
strcmp(sec->name, ".altinstr_aux") &&
|
|
|
|
strncmp(sec->name, ".discard.", 9))
|
|
|
|
sec->text = true;
|
|
|
|
|
2020-03-25 16:18:17 +00:00
|
|
|
if (!strcmp(sec->name, ".noinstr.text") ||
|
|
|
|
!strcmp(sec->name, ".entry.text"))
|
2020-03-10 17:57:41 +00:00
|
|
|
sec->noinstr = true;
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
for (offset = 0; offset < sec->len; offset += insn->len) {
|
|
|
|
insn = malloc(sizeof(*insn));
|
2017-06-28 15:11:07 +00:00
|
|
|
if (!insn) {
|
|
|
|
WARN("malloc failed");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-06-28 15:11:05 +00:00
|
|
|
memset(insn, 0, sizeof(*insn));
|
|
|
|
INIT_LIST_HEAD(&insn->alts);
|
2020-03-27 15:28:47 +00:00
|
|
|
INIT_LIST_HEAD(&insn->stack_ops);
|
2020-03-25 13:04:45 +00:00
|
|
|
init_cfi_state(&insn->cfi);
|
2017-06-28 15:11:07 +00:00
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
insn->sec = sec;
|
|
|
|
insn->offset = offset;
|
|
|
|
|
|
|
|
ret = arch_decode_instruction(file->elf, sec, offset,
|
|
|
|
sec->len - offset,
|
|
|
|
&insn->len, &insn->type,
|
2017-06-28 15:11:07 +00:00
|
|
|
&insn->immediate,
|
2020-03-27 15:28:47 +00:00
|
|
|
&insn->stack_ops);
|
2017-06-28 15:11:05 +00:00
|
|
|
if (ret)
|
2017-10-19 16:27:24 +00:00
|
|
|
goto err;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2020-03-16 14:47:27 +00:00
|
|
|
hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
|
2017-06-28 15:11:05 +00:00
|
|
|
list_add_tail(&insn->list, &file->insn_list);
|
2020-03-12 08:26:29 +00:00
|
|
|
nr_insns++;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry(func, &sec->symbol_list, list) {
|
2019-07-18 01:36:48 +00:00
|
|
|
if (func->type != STT_FUNC || func->alias != func)
|
2017-06-28 15:11:05 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!find_insn(file, sec, func->offset)) {
|
|
|
|
WARN("%s(): can't find starting instruction",
|
|
|
|
func->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-03-10 17:24:59 +00:00
|
|
|
sym_for_each_insn(file, func, insn)
|
2019-07-18 01:36:48 +00:00
|
|
|
insn->func = func;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-12 08:26:29 +00:00
|
|
|
if (stats)
|
|
|
|
printf("nr_insns: %lu\n", nr_insns);
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
return 0;
|
2017-10-19 16:27:24 +00:00
|
|
|
|
|
|
|
err:
|
|
|
|
free(insn);
|
|
|
|
return ret;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
2020-04-21 22:08:43 +00:00
|
|
|
static struct instruction *find_last_insn(struct objtool_file *file,
|
|
|
|
struct section *sec)
|
|
|
|
{
|
|
|
|
struct instruction *insn = NULL;
|
|
|
|
unsigned int offset;
|
|
|
|
unsigned int end = (sec->len > 10) ? sec->len - 10 : 0;
|
|
|
|
|
|
|
|
for (offset = sec->len - 1; offset >= end && !insn; offset--)
|
|
|
|
insn = find_insn(file, sec, offset);
|
|
|
|
|
|
|
|
return insn;
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
/*
|
objtool: Assume unannotated UD2 instructions are dead ends
Arnd reported some false positive warnings with GCC 7:
drivers/hid/wacom_wac.o: warning: objtool: wacom_bpt3_touch()+0x2a5: stack state mismatch: cfa1=7+8 cfa2=6+16
drivers/iio/adc/vf610_adc.o: warning: objtool: vf610_adc_calculate_rates() falls through to next function vf610_adc_sample_set()
drivers/pwm/pwm-hibvt.o: warning: objtool: hibvt_pwm_get_state() falls through to next function hibvt_pwm_remove()
drivers/pwm/pwm-mediatek.o: warning: objtool: mtk_pwm_config() falls through to next function mtk_pwm_enable()
drivers/spi/spi-bcm2835.o: warning: objtool: .text: unexpected end of section
drivers/spi/spi-bcm2835aux.o: warning: objtool: .text: unexpected end of section
drivers/watchdog/digicolor_wdt.o: warning: objtool: dc_wdt_get_timeleft() falls through to next function dc_wdt_restart()
When GCC 7 detects a potential divide-by-zero condition, it sometimes
inserts a UD2 instruction for the case where the divisor is zero,
instead of letting the hardware trap on the divide instruction.
Objtool doesn't consider UD2 to be fatal unless it's annotated with
unreachable(). So it considers the GCC-generated UD2 to be non-fatal,
and it tries to follow the control flow past the UD2 and gets
confused.
Previously, objtool *did* assume UD2 was always a dead end. That
changed with the following commit:
d1091c7fa3d5 ("objtool: Improve detection of BUG() and other dead ends")
The motivation behind that change was that Peter was planning on using
UD2 for __WARN(), which is *not* a dead end. However, it turns out
that some emulators rely on UD2 being fatal, so he ended up using
'ud0' instead:
9a93848fe787 ("x86/debug: Implement __WARN() using UD0")
For GCC 4.5+, it should be safe to go back to the previous assumption
that UD2 is fatal, even when it's not annotated with unreachable().
But for pre-4.5 versions of GCC, the unreachable() macro isn't
supported, so such cases of UD2 need to be explicitly annotated as
reachable.
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: d1091c7fa3d5 ("objtool: Improve detection of BUG() and other dead ends")
Link: http://lkml.kernel.org/r/e57fa9dfede25f79487da8126ee9cdf7b856db65.1501188854.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-07-27 20:56:53 +00:00
|
|
|
* Mark "ud2" instructions and manually annotated dead ends.
|
2017-06-28 15:11:05 +00:00
|
|
|
*/
|
|
|
|
static int add_dead_ends(struct objtool_file *file)
|
|
|
|
{
|
|
|
|
struct section *sec;
|
|
|
|
struct rela *rela;
|
|
|
|
struct instruction *insn;
|
|
|
|
|
objtool: Assume unannotated UD2 instructions are dead ends
Arnd reported some false positive warnings with GCC 7:
drivers/hid/wacom_wac.o: warning: objtool: wacom_bpt3_touch()+0x2a5: stack state mismatch: cfa1=7+8 cfa2=6+16
drivers/iio/adc/vf610_adc.o: warning: objtool: vf610_adc_calculate_rates() falls through to next function vf610_adc_sample_set()
drivers/pwm/pwm-hibvt.o: warning: objtool: hibvt_pwm_get_state() falls through to next function hibvt_pwm_remove()
drivers/pwm/pwm-mediatek.o: warning: objtool: mtk_pwm_config() falls through to next function mtk_pwm_enable()
drivers/spi/spi-bcm2835.o: warning: objtool: .text: unexpected end of section
drivers/spi/spi-bcm2835aux.o: warning: objtool: .text: unexpected end of section
drivers/watchdog/digicolor_wdt.o: warning: objtool: dc_wdt_get_timeleft() falls through to next function dc_wdt_restart()
When GCC 7 detects a potential divide-by-zero condition, it sometimes
inserts a UD2 instruction for the case where the divisor is zero,
instead of letting the hardware trap on the divide instruction.
Objtool doesn't consider UD2 to be fatal unless it's annotated with
unreachable(). So it considers the GCC-generated UD2 to be non-fatal,
and it tries to follow the control flow past the UD2 and gets
confused.
Previously, objtool *did* assume UD2 was always a dead end. That
changed with the following commit:
d1091c7fa3d5 ("objtool: Improve detection of BUG() and other dead ends")
The motivation behind that change was that Peter was planning on using
UD2 for __WARN(), which is *not* a dead end. However, it turns out
that some emulators rely on UD2 being fatal, so he ended up using
'ud0' instead:
9a93848fe787 ("x86/debug: Implement __WARN() using UD0")
For GCC 4.5+, it should be safe to go back to the previous assumption
that UD2 is fatal, even when it's not annotated with unreachable().
But for pre-4.5 versions of GCC, the unreachable() macro isn't
supported, so such cases of UD2 need to be explicitly annotated as
reachable.
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: d1091c7fa3d5 ("objtool: Improve detection of BUG() and other dead ends")
Link: http://lkml.kernel.org/r/e57fa9dfede25f79487da8126ee9cdf7b856db65.1501188854.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-07-27 20:56:53 +00:00
|
|
|
/*
|
|
|
|
* By default, "ud2" is a dead end unless otherwise annotated, because
|
|
|
|
* GCC 7 inserts it for certain divide-by-zero cases.
|
|
|
|
*/
|
|
|
|
for_each_insn(file, insn)
|
|
|
|
if (insn->type == INSN_BUG)
|
|
|
|
insn->dead_end = true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for manually annotated dead ends.
|
|
|
|
*/
|
2017-06-28 15:11:05 +00:00
|
|
|
sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
|
|
|
|
if (!sec)
|
objtool: Assume unannotated UD2 instructions are dead ends
Arnd reported some false positive warnings with GCC 7:
drivers/hid/wacom_wac.o: warning: objtool: wacom_bpt3_touch()+0x2a5: stack state mismatch: cfa1=7+8 cfa2=6+16
drivers/iio/adc/vf610_adc.o: warning: objtool: vf610_adc_calculate_rates() falls through to next function vf610_adc_sample_set()
drivers/pwm/pwm-hibvt.o: warning: objtool: hibvt_pwm_get_state() falls through to next function hibvt_pwm_remove()
drivers/pwm/pwm-mediatek.o: warning: objtool: mtk_pwm_config() falls through to next function mtk_pwm_enable()
drivers/spi/spi-bcm2835.o: warning: objtool: .text: unexpected end of section
drivers/spi/spi-bcm2835aux.o: warning: objtool: .text: unexpected end of section
drivers/watchdog/digicolor_wdt.o: warning: objtool: dc_wdt_get_timeleft() falls through to next function dc_wdt_restart()
When GCC 7 detects a potential divide-by-zero condition, it sometimes
inserts a UD2 instruction for the case where the divisor is zero,
instead of letting the hardware trap on the divide instruction.
Objtool doesn't consider UD2 to be fatal unless it's annotated with
unreachable(). So it considers the GCC-generated UD2 to be non-fatal,
and it tries to follow the control flow past the UD2 and gets
confused.
Previously, objtool *did* assume UD2 was always a dead end. That
changed with the following commit:
d1091c7fa3d5 ("objtool: Improve detection of BUG() and other dead ends")
The motivation behind that change was that Peter was planning on using
UD2 for __WARN(), which is *not* a dead end. However, it turns out
that some emulators rely on UD2 being fatal, so he ended up using
'ud0' instead:
9a93848fe787 ("x86/debug: Implement __WARN() using UD0")
For GCC 4.5+, it should be safe to go back to the previous assumption
that UD2 is fatal, even when it's not annotated with unreachable().
But for pre-4.5 versions of GCC, the unreachable() macro isn't
supported, so such cases of UD2 need to be explicitly annotated as
reachable.
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: d1091c7fa3d5 ("objtool: Improve detection of BUG() and other dead ends")
Link: http://lkml.kernel.org/r/e57fa9dfede25f79487da8126ee9cdf7b856db65.1501188854.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-07-27 20:56:53 +00:00
|
|
|
goto reachable;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
|
|
|
list_for_each_entry(rela, &sec->rela_list, list) {
|
|
|
|
if (rela->sym->type != STT_SECTION) {
|
|
|
|
WARN("unexpected relocation symbol type in %s", sec->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
insn = find_insn(file, rela->sym->sec, rela->addend);
|
|
|
|
if (insn)
|
|
|
|
insn = list_prev_entry(insn, list);
|
|
|
|
else if (rela->addend == rela->sym->sec->len) {
|
2020-04-21 22:08:43 +00:00
|
|
|
insn = find_last_insn(file, rela->sym->sec);
|
|
|
|
if (!insn) {
|
2017-06-28 15:11:05 +00:00
|
|
|
WARN("can't find unreachable insn at %s+0x%x",
|
|
|
|
rela->sym->sec->name, rela->addend);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
WARN("can't find unreachable insn at %s+0x%x",
|
|
|
|
rela->sym->sec->name, rela->addend);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
insn->dead_end = true;
|
|
|
|
}
|
|
|
|
|
objtool: Assume unannotated UD2 instructions are dead ends
Arnd reported some false positive warnings with GCC 7:
drivers/hid/wacom_wac.o: warning: objtool: wacom_bpt3_touch()+0x2a5: stack state mismatch: cfa1=7+8 cfa2=6+16
drivers/iio/adc/vf610_adc.o: warning: objtool: vf610_adc_calculate_rates() falls through to next function vf610_adc_sample_set()
drivers/pwm/pwm-hibvt.o: warning: objtool: hibvt_pwm_get_state() falls through to next function hibvt_pwm_remove()
drivers/pwm/pwm-mediatek.o: warning: objtool: mtk_pwm_config() falls through to next function mtk_pwm_enable()
drivers/spi/spi-bcm2835.o: warning: objtool: .text: unexpected end of section
drivers/spi/spi-bcm2835aux.o: warning: objtool: .text: unexpected end of section
drivers/watchdog/digicolor_wdt.o: warning: objtool: dc_wdt_get_timeleft() falls through to next function dc_wdt_restart()
When GCC 7 detects a potential divide-by-zero condition, it sometimes
inserts a UD2 instruction for the case where the divisor is zero,
instead of letting the hardware trap on the divide instruction.
Objtool doesn't consider UD2 to be fatal unless it's annotated with
unreachable(). So it considers the GCC-generated UD2 to be non-fatal,
and it tries to follow the control flow past the UD2 and gets
confused.
Previously, objtool *did* assume UD2 was always a dead end. That
changed with the following commit:
d1091c7fa3d5 ("objtool: Improve detection of BUG() and other dead ends")
The motivation behind that change was that Peter was planning on using
UD2 for __WARN(), which is *not* a dead end. However, it turns out
that some emulators rely on UD2 being fatal, so he ended up using
'ud0' instead:
9a93848fe787 ("x86/debug: Implement __WARN() using UD0")
For GCC 4.5+, it should be safe to go back to the previous assumption
that UD2 is fatal, even when it's not annotated with unreachable().
But for pre-4.5 versions of GCC, the unreachable() macro isn't
supported, so such cases of UD2 need to be explicitly annotated as
reachable.
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: d1091c7fa3d5 ("objtool: Improve detection of BUG() and other dead ends")
Link: http://lkml.kernel.org/r/e57fa9dfede25f79487da8126ee9cdf7b856db65.1501188854.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-07-27 20:56:53 +00:00
|
|
|
reachable:
|
|
|
|
/*
|
|
|
|
* These manually annotated reachable checks are needed for GCC 4.4,
|
|
|
|
* where the Linux unreachable() macro isn't supported. In that case
|
|
|
|
* GCC doesn't know the "ud2" is fatal, so it generates code as if it's
|
|
|
|
* not a dead end.
|
|
|
|
*/
|
|
|
|
sec = find_section_by_name(file->elf, ".rela.discard.reachable");
|
|
|
|
if (!sec)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
list_for_each_entry(rela, &sec->rela_list, list) {
|
|
|
|
if (rela->sym->type != STT_SECTION) {
|
|
|
|
WARN("unexpected relocation symbol type in %s", sec->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
insn = find_insn(file, rela->sym->sec, rela->addend);
|
|
|
|
if (insn)
|
|
|
|
insn = list_prev_entry(insn, list);
|
|
|
|
else if (rela->addend == rela->sym->sec->len) {
|
2020-04-21 22:08:43 +00:00
|
|
|
insn = find_last_insn(file, rela->sym->sec);
|
|
|
|
if (!insn) {
|
objtool: Assume unannotated UD2 instructions are dead ends
Arnd reported some false positive warnings with GCC 7:
drivers/hid/wacom_wac.o: warning: objtool: wacom_bpt3_touch()+0x2a5: stack state mismatch: cfa1=7+8 cfa2=6+16
drivers/iio/adc/vf610_adc.o: warning: objtool: vf610_adc_calculate_rates() falls through to next function vf610_adc_sample_set()
drivers/pwm/pwm-hibvt.o: warning: objtool: hibvt_pwm_get_state() falls through to next function hibvt_pwm_remove()
drivers/pwm/pwm-mediatek.o: warning: objtool: mtk_pwm_config() falls through to next function mtk_pwm_enable()
drivers/spi/spi-bcm2835.o: warning: objtool: .text: unexpected end of section
drivers/spi/spi-bcm2835aux.o: warning: objtool: .text: unexpected end of section
drivers/watchdog/digicolor_wdt.o: warning: objtool: dc_wdt_get_timeleft() falls through to next function dc_wdt_restart()
When GCC 7 detects a potential divide-by-zero condition, it sometimes
inserts a UD2 instruction for the case where the divisor is zero,
instead of letting the hardware trap on the divide instruction.
Objtool doesn't consider UD2 to be fatal unless it's annotated with
unreachable(). So it considers the GCC-generated UD2 to be non-fatal,
and it tries to follow the control flow past the UD2 and gets
confused.
Previously, objtool *did* assume UD2 was always a dead end. That
changed with the following commit:
d1091c7fa3d5 ("objtool: Improve detection of BUG() and other dead ends")
The motivation behind that change was that Peter was planning on using
UD2 for __WARN(), which is *not* a dead end. However, it turns out
that some emulators rely on UD2 being fatal, so he ended up using
'ud0' instead:
9a93848fe787 ("x86/debug: Implement __WARN() using UD0")
For GCC 4.5+, it should be safe to go back to the previous assumption
that UD2 is fatal, even when it's not annotated with unreachable().
But for pre-4.5 versions of GCC, the unreachable() macro isn't
supported, so such cases of UD2 need to be explicitly annotated as
reachable.
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: d1091c7fa3d5 ("objtool: Improve detection of BUG() and other dead ends")
Link: http://lkml.kernel.org/r/e57fa9dfede25f79487da8126ee9cdf7b856db65.1501188854.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-07-27 20:56:53 +00:00
|
|
|
WARN("can't find reachable insn at %s+0x%x",
|
|
|
|
rela->sym->sec->name, rela->addend);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
WARN("can't find reachable insn at %s+0x%x",
|
|
|
|
rela->sym->sec->name, rela->addend);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
insn->dead_end = false;
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Warnings shouldn't be reported for ignored functions.
|
|
|
|
*/
|
|
|
|
static void add_ignores(struct objtool_file *file)
|
|
|
|
{
|
|
|
|
struct instruction *insn;
|
|
|
|
struct section *sec;
|
|
|
|
struct symbol *func;
|
2019-02-27 13:04:13 +00:00
|
|
|
struct rela *rela;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2019-02-27 13:04:13 +00:00
|
|
|
sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
|
|
|
|
if (!sec)
|
|
|
|
return;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2019-02-27 13:04:13 +00:00
|
|
|
list_for_each_entry(rela, &sec->rela_list, list) {
|
|
|
|
switch (rela->sym->type) {
|
|
|
|
case STT_FUNC:
|
|
|
|
func = rela->sym;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case STT_SECTION:
|
2020-02-18 03:41:54 +00:00
|
|
|
func = find_func_by_offset(rela->sym->sec, rela->addend);
|
|
|
|
if (!func)
|
2017-06-28 15:11:05 +00:00
|
|
|
continue;
|
2019-02-27 13:04:13 +00:00
|
|
|
break;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2019-02-27 13:04:13 +00:00
|
|
|
default:
|
|
|
|
WARN("unexpected relocation symbol type in %s: %d", sec->name, rela->sym->type);
|
|
|
|
continue;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
2019-02-27 13:04:13 +00:00
|
|
|
|
2020-03-10 17:27:24 +00:00
|
|
|
func_for_each_insn(file, func, insn)
|
2019-02-27 13:04:13 +00:00
|
|
|
insn->ignore = true;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-25 11:50:09 +00:00
|
|
|
/*
|
|
|
|
* This is a whitelist of functions that is allowed to be called with AC set.
|
|
|
|
* The list is meant to be minimal and only contains compiler instrumentation
|
|
|
|
* ABI and a few functions used to implement *_{to,from}_user() functions.
|
|
|
|
*
|
|
|
|
* These functions must not directly change AC, but may PUSHF/POPF.
|
|
|
|
*/
|
|
|
|
static const char *uaccess_safe_builtin[] = {
|
|
|
|
/* KASAN */
|
|
|
|
"kasan_report",
|
|
|
|
"check_memory_region",
|
|
|
|
/* KASAN out-of-line */
|
|
|
|
"__asan_loadN_noabort",
|
|
|
|
"__asan_load1_noabort",
|
|
|
|
"__asan_load2_noabort",
|
|
|
|
"__asan_load4_noabort",
|
|
|
|
"__asan_load8_noabort",
|
|
|
|
"__asan_load16_noabort",
|
|
|
|
"__asan_storeN_noabort",
|
|
|
|
"__asan_store1_noabort",
|
|
|
|
"__asan_store2_noabort",
|
|
|
|
"__asan_store4_noabort",
|
|
|
|
"__asan_store8_noabort",
|
|
|
|
"__asan_store16_noabort",
|
|
|
|
/* KASAN in-line */
|
|
|
|
"__asan_report_load_n_noabort",
|
|
|
|
"__asan_report_load1_noabort",
|
|
|
|
"__asan_report_load2_noabort",
|
|
|
|
"__asan_report_load4_noabort",
|
|
|
|
"__asan_report_load8_noabort",
|
|
|
|
"__asan_report_load16_noabort",
|
|
|
|
"__asan_report_store_n_noabort",
|
|
|
|
"__asan_report_store1_noabort",
|
|
|
|
"__asan_report_store2_noabort",
|
|
|
|
"__asan_report_store4_noabort",
|
|
|
|
"__asan_report_store8_noabort",
|
|
|
|
"__asan_report_store16_noabort",
|
2019-11-14 18:02:57 +00:00
|
|
|
/* KCSAN */
|
2020-03-25 16:41:57 +00:00
|
|
|
"__kcsan_check_access",
|
2019-11-14 18:02:57 +00:00
|
|
|
"kcsan_found_watchpoint",
|
|
|
|
"kcsan_setup_watchpoint",
|
2020-03-25 16:41:57 +00:00
|
|
|
"kcsan_check_scoped_accesses",
|
2020-04-24 15:47:30 +00:00
|
|
|
"kcsan_disable_current",
|
|
|
|
"kcsan_enable_current_nowarn",
|
2019-11-14 18:02:57 +00:00
|
|
|
/* KCSAN/TSAN */
|
|
|
|
"__tsan_func_entry",
|
|
|
|
"__tsan_func_exit",
|
|
|
|
"__tsan_read_range",
|
|
|
|
"__tsan_write_range",
|
|
|
|
"__tsan_read1",
|
|
|
|
"__tsan_read2",
|
|
|
|
"__tsan_read4",
|
|
|
|
"__tsan_read8",
|
|
|
|
"__tsan_read16",
|
|
|
|
"__tsan_write1",
|
|
|
|
"__tsan_write2",
|
|
|
|
"__tsan_write4",
|
|
|
|
"__tsan_write8",
|
|
|
|
"__tsan_write16",
|
2019-02-25 11:50:09 +00:00
|
|
|
/* KCOV */
|
|
|
|
"write_comp_data",
|
2020-04-29 19:09:04 +00:00
|
|
|
"check_kcov_mode",
|
2019-02-25 11:50:09 +00:00
|
|
|
"__sanitizer_cov_trace_pc",
|
|
|
|
"__sanitizer_cov_trace_const_cmp1",
|
|
|
|
"__sanitizer_cov_trace_const_cmp2",
|
|
|
|
"__sanitizer_cov_trace_const_cmp4",
|
|
|
|
"__sanitizer_cov_trace_const_cmp8",
|
|
|
|
"__sanitizer_cov_trace_cmp1",
|
|
|
|
"__sanitizer_cov_trace_cmp2",
|
|
|
|
"__sanitizer_cov_trace_cmp4",
|
|
|
|
"__sanitizer_cov_trace_cmp8",
|
2020-02-16 18:07:49 +00:00
|
|
|
"__sanitizer_cov_trace_switch",
|
2019-02-25 11:50:09 +00:00
|
|
|
/* UBSAN */
|
|
|
|
"ubsan_type_mismatch_common",
|
|
|
|
"__ubsan_handle_type_mismatch",
|
|
|
|
"__ubsan_handle_type_mismatch_v1",
|
2019-10-21 13:11:49 +00:00
|
|
|
"__ubsan_handle_shift_out_of_bounds",
|
2019-02-25 11:50:09 +00:00
|
|
|
/* misc */
|
|
|
|
"csum_partial_copy_generic",
|
|
|
|
"__memcpy_mcsafe",
|
2019-07-18 01:36:46 +00:00
|
|
|
"mcsafe_handle_tail",
|
2019-02-25 11:50:09 +00:00
|
|
|
"ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static void add_uaccess_safe(struct objtool_file *file)
|
|
|
|
{
|
|
|
|
struct symbol *func;
|
|
|
|
const char **name;
|
|
|
|
|
|
|
|
if (!uaccess)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (name = uaccess_safe_builtin; *name; name++) {
|
|
|
|
func = find_symbol_by_name(file->elf, *name);
|
|
|
|
if (!func)
|
|
|
|
continue;
|
|
|
|
|
2019-07-18 01:36:48 +00:00
|
|
|
func->uaccess_safe = true;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-11 21:46:24 +00:00
|
|
|
/*
|
|
|
|
* FIXME: For now, just ignore any alternatives which add retpolines. This is
|
|
|
|
* a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
|
|
|
|
* But it at least allows objtool to understand the control flow *around* the
|
|
|
|
* retpoline.
|
|
|
|
*/
|
2019-03-18 13:33:07 +00:00
|
|
|
static int add_ignore_alternatives(struct objtool_file *file)
|
2018-01-11 21:46:24 +00:00
|
|
|
{
|
|
|
|
struct section *sec;
|
|
|
|
struct rela *rela;
|
|
|
|
struct instruction *insn;
|
|
|
|
|
2019-03-18 13:33:07 +00:00
|
|
|
sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
|
2018-01-11 21:46:24 +00:00
|
|
|
if (!sec)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
list_for_each_entry(rela, &sec->rela_list, list) {
|
|
|
|
if (rela->sym->type != STT_SECTION) {
|
|
|
|
WARN("unexpected relocation symbol type in %s", sec->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
insn = find_insn(file, rela->sym->sec, rela->addend);
|
|
|
|
if (!insn) {
|
2019-03-18 13:33:07 +00:00
|
|
|
WARN("bad .discard.ignore_alts entry");
|
2018-01-11 21:46:24 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
insn->ignore_alts = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
/*
|
|
|
|
* Find the destination instructions for all jumps.
|
|
|
|
*/
|
|
|
|
static int add_jump_destinations(struct objtool_file *file)
|
|
|
|
{
|
|
|
|
struct instruction *insn;
|
|
|
|
struct rela *rela;
|
|
|
|
struct section *dest_sec;
|
|
|
|
unsigned long dest_off;
|
|
|
|
|
|
|
|
for_each_insn(file, insn) {
|
2020-02-10 18:32:39 +00:00
|
|
|
if (!is_static_jump(insn))
|
2017-06-28 15:11:05 +00:00
|
|
|
continue;
|
|
|
|
|
2019-05-13 17:01:31 +00:00
|
|
|
if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET)
|
2017-06-28 15:11:05 +00:00
|
|
|
continue;
|
|
|
|
|
2020-03-12 10:23:36 +00:00
|
|
|
rela = find_rela_by_dest_range(file->elf, insn->sec,
|
|
|
|
insn->offset, insn->len);
|
2017-06-28 15:11:05 +00:00
|
|
|
if (!rela) {
|
|
|
|
dest_sec = insn->sec;
|
2020-03-27 15:28:45 +00:00
|
|
|
dest_off = arch_jump_destination(insn);
|
2017-06-28 15:11:05 +00:00
|
|
|
} else if (rela->sym->type == STT_SECTION) {
|
|
|
|
dest_sec = rela->sym->sec;
|
2020-03-27 15:28:45 +00:00
|
|
|
dest_off = arch_dest_rela_offset(rela->addend);
|
2017-06-28 15:11:05 +00:00
|
|
|
} else if (rela->sym->sec->idx) {
|
|
|
|
dest_sec = rela->sym->sec;
|
2020-03-27 15:28:45 +00:00
|
|
|
dest_off = rela->sym->sym.st_value +
|
|
|
|
arch_dest_rela_offset(rela->addend);
|
2018-01-11 21:46:23 +00:00
|
|
|
} else if (strstr(rela->sym->name, "_indirect_thunk_")) {
|
|
|
|
/*
|
|
|
|
* Retpoline jumps are really dynamic jumps in
|
|
|
|
* disguise, so convert them accordingly.
|
|
|
|
*/
|
2019-07-18 01:36:57 +00:00
|
|
|
if (insn->type == INSN_JUMP_UNCONDITIONAL)
|
|
|
|
insn->type = INSN_JUMP_DYNAMIC;
|
|
|
|
else
|
|
|
|
insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
|
|
|
|
|
2018-01-16 09:24:06 +00:00
|
|
|
insn->retpoline_safe = true;
|
2018-01-11 21:46:23 +00:00
|
|
|
continue;
|
2017-06-28 15:11:05 +00:00
|
|
|
} else {
|
2019-07-18 01:36:52 +00:00
|
|
|
/* external sibling call */
|
2019-03-06 11:58:15 +00:00
|
|
|
insn->call_dest = rela->sym;
|
2017-06-28 15:11:05 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
insn->jump_dest = find_insn(file, dest_sec, dest_off);
|
|
|
|
if (!insn->jump_dest) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a special case where an alt instruction
|
|
|
|
* jumps past the end of the section. These are
|
|
|
|
* handled later in handle_group_alt().
|
|
|
|
*/
|
|
|
|
if (!strcmp(insn->sec->name, ".altinstr_replacement"))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
|
|
|
|
insn->sec, insn->offset, dest_sec->name,
|
|
|
|
dest_off);
|
|
|
|
return -1;
|
|
|
|
}
|
2018-06-01 12:23:51 +00:00
|
|
|
|
|
|
|
/*
|
2019-03-06 11:58:15 +00:00
|
|
|
* Cross-function jump.
|
2018-06-01 12:23:51 +00:00
|
|
|
*/
|
|
|
|
if (insn->func && insn->jump_dest->func &&
|
2019-03-06 11:58:15 +00:00
|
|
|
insn->func != insn->jump_dest->func) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For GCC 8+, create parent/child links for any cold
|
|
|
|
* subfunctions. This is _mostly_ redundant with a
|
|
|
|
* similar initialization in read_symbols().
|
|
|
|
*
|
|
|
|
* If a function has aliases, we want the *first* such
|
|
|
|
* function in the symbol table to be the subfunction's
|
|
|
|
* parent. In that case we overwrite the
|
|
|
|
* initialization done in read_symbols().
|
|
|
|
*
|
|
|
|
* However this code can't completely replace the
|
|
|
|
* read_symbols() code because this doesn't detect the
|
|
|
|
* case where the parent function's only reference to a
|
2019-07-18 01:36:53 +00:00
|
|
|
* subfunction is through a jump table.
|
2019-03-06 11:58:15 +00:00
|
|
|
*/
|
|
|
|
if (!strstr(insn->func->name, ".cold.") &&
|
|
|
|
strstr(insn->jump_dest->func->name, ".cold.")) {
|
|
|
|
insn->func->cfunc = insn->jump_dest->func;
|
|
|
|
insn->jump_dest->func->pfunc = insn->func;
|
|
|
|
|
|
|
|
} else if (insn->jump_dest->func->pfunc != insn->func->pfunc &&
|
|
|
|
insn->jump_dest->offset == insn->jump_dest->func->offset) {
|
|
|
|
|
2019-07-18 01:36:52 +00:00
|
|
|
/* internal sibling call */
|
2019-03-06 11:58:15 +00:00
|
|
|
insn->call_dest = insn->jump_dest->func;
|
|
|
|
}
|
2018-06-01 12:23:51 +00:00
|
|
|
}
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-14 10:36:12 +00:00
|
|
|
static void remove_insn_ops(struct instruction *insn)
|
|
|
|
{
|
|
|
|
struct stack_op *op, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) {
|
|
|
|
list_del(&op->list);
|
|
|
|
free(op);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
/*
|
|
|
|
* Find the destination instructions for all calls.
|
|
|
|
*/
|
|
|
|
static int add_call_destinations(struct objtool_file *file)
|
|
|
|
{
|
|
|
|
struct instruction *insn;
|
|
|
|
unsigned long dest_off;
|
|
|
|
struct rela *rela;
|
|
|
|
|
|
|
|
for_each_insn(file, insn) {
|
|
|
|
if (insn->type != INSN_CALL)
|
|
|
|
continue;
|
|
|
|
|
2020-03-12 10:23:36 +00:00
|
|
|
rela = find_rela_by_dest_range(file->elf, insn->sec,
|
|
|
|
insn->offset, insn->len);
|
2017-06-28 15:11:05 +00:00
|
|
|
if (!rela) {
|
2020-03-27 15:28:45 +00:00
|
|
|
dest_off = arch_jump_destination(insn);
|
2020-02-18 03:41:54 +00:00
|
|
|
insn->call_dest = find_func_by_offset(insn->sec, dest_off);
|
|
|
|
if (!insn->call_dest)
|
|
|
|
insn->call_dest = find_symbol_by_offset(insn->sec, dest_off);
|
2018-01-30 04:00:39 +00:00
|
|
|
|
2020-02-18 03:41:54 +00:00
|
|
|
if (insn->ignore)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!insn->call_dest) {
|
2020-04-14 10:36:12 +00:00
|
|
|
WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
|
2017-06-28 15:11:05 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2018-01-30 04:00:39 +00:00
|
|
|
|
2020-02-18 03:41:54 +00:00
|
|
|
if (insn->func && insn->call_dest->type != STT_FUNC) {
|
|
|
|
WARN_FUNC("unsupported call to non-function",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
} else if (rela->sym->type == STT_SECTION) {
|
2020-03-27 15:28:45 +00:00
|
|
|
dest_off = arch_dest_rela_offset(rela->addend);
|
2020-02-18 03:41:54 +00:00
|
|
|
insn->call_dest = find_func_by_offset(rela->sym->sec,
|
2020-03-27 15:28:45 +00:00
|
|
|
dest_off);
|
2020-02-18 03:41:54 +00:00
|
|
|
if (!insn->call_dest) {
|
2020-03-27 15:28:45 +00:00
|
|
|
WARN_FUNC("can't find call dest symbol at %s+0x%lx",
|
2017-06-28 15:11:05 +00:00
|
|
|
insn->sec, insn->offset,
|
|
|
|
rela->sym->sec->name,
|
2020-03-27 15:28:45 +00:00
|
|
|
dest_off);
|
2017-06-28 15:11:05 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
insn->call_dest = rela->sym;
|
2020-04-14 10:36:12 +00:00
|
|
|
|
2020-06-12 14:05:26 +00:00
|
|
|
/*
|
|
|
|
* Many compilers cannot disable KCOV with a function attribute
|
|
|
|
* so they need a little help, NOP out any KCOV calls from noinstr
|
|
|
|
* text.
|
|
|
|
*/
|
|
|
|
if (insn->sec->noinstr &&
|
|
|
|
!strncmp(insn->call_dest->name, "__sanitizer_cov_", 16)) {
|
|
|
|
if (rela) {
|
|
|
|
rela->type = R_NONE;
|
|
|
|
elf_write_rela(file->elf, rela);
|
|
|
|
}
|
|
|
|
|
|
|
|
elf_write_insn(file->elf, insn->sec,
|
|
|
|
insn->offset, insn->len,
|
|
|
|
arch_nop_insn(insn->len));
|
|
|
|
insn->type = INSN_NOP;
|
|
|
|
}
|
|
|
|
|
2020-04-14 10:36:12 +00:00
|
|
|
/*
|
|
|
|
* Whatever stack impact regular CALLs have, should be undone
|
|
|
|
* by the RETURN of the called function.
|
|
|
|
*
|
|
|
|
* Annotated intra-function calls retain the stack_ops but
|
|
|
|
* are converted to JUMP, see read_intra_function_calls().
|
|
|
|
*/
|
|
|
|
remove_insn_ops(insn);
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The .alternatives section requires some extra special care, over and above
|
|
|
|
* what other special sections require:
|
|
|
|
*
|
|
|
|
* 1. Because alternatives are patched in-place, we need to insert a fake jump
|
|
|
|
* instruction at the end so that validate_branch() skips all the original
|
|
|
|
* replaced instructions when validating the new instruction path.
|
|
|
|
*
|
|
|
|
* 2. An added wrinkle is that the new instruction length might be zero. In
|
|
|
|
* that case the old instructions are replaced with noops. We simulate that
|
|
|
|
* by creating a fake jump as the only new instruction.
|
|
|
|
*
|
|
|
|
* 3. In some cases, the alternative section includes an instruction which
|
|
|
|
* conditionally jumps to the _end_ of the entry. We have to modify these
|
|
|
|
* jumps' destinations to point back to .text rather than the end of the
|
|
|
|
* entry in .altinstr_replacement.
|
|
|
|
*/
|
|
|
|
static int handle_group_alt(struct objtool_file *file,
|
|
|
|
struct special_alt *special_alt,
|
|
|
|
struct instruction *orig_insn,
|
|
|
|
struct instruction **new_insn)
|
|
|
|
{
|
2020-04-14 10:36:11 +00:00
|
|
|
static unsigned int alt_group_next_index = 1;
|
2018-01-30 04:00:40 +00:00
|
|
|
struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump = NULL;
|
2020-04-14 10:36:11 +00:00
|
|
|
unsigned int alt_group = alt_group_next_index++;
|
2017-06-28 15:11:05 +00:00
|
|
|
unsigned long dest_off;
|
|
|
|
|
|
|
|
last_orig_insn = NULL;
|
|
|
|
insn = orig_insn;
|
|
|
|
sec_for_each_insn_from(file, insn) {
|
|
|
|
if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
|
|
|
|
break;
|
|
|
|
|
2020-04-14 10:36:11 +00:00
|
|
|
insn->alt_group = alt_group;
|
2017-06-28 15:11:05 +00:00
|
|
|
last_orig_insn = insn;
|
|
|
|
}
|
|
|
|
|
2018-01-30 04:00:40 +00:00
|
|
|
if (next_insn_same_sec(file, last_orig_insn)) {
|
|
|
|
fake_jump = malloc(sizeof(*fake_jump));
|
|
|
|
if (!fake_jump) {
|
|
|
|
WARN("malloc failed");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
memset(fake_jump, 0, sizeof(*fake_jump));
|
|
|
|
INIT_LIST_HEAD(&fake_jump->alts);
|
2020-03-27 15:28:47 +00:00
|
|
|
INIT_LIST_HEAD(&fake_jump->stack_ops);
|
2020-03-25 13:04:45 +00:00
|
|
|
init_cfi_state(&fake_jump->cfi);
|
2018-01-30 04:00:40 +00:00
|
|
|
|
|
|
|
fake_jump->sec = special_alt->new_sec;
|
2019-05-13 17:01:31 +00:00
|
|
|
fake_jump->offset = FAKE_JUMP_OFFSET;
|
2018-01-30 04:00:40 +00:00
|
|
|
fake_jump->type = INSN_JUMP_UNCONDITIONAL;
|
|
|
|
fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
|
2019-05-13 17:01:31 +00:00
|
|
|
fake_jump->func = orig_insn->func;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!special_alt->new_len) {
|
2018-01-30 04:00:40 +00:00
|
|
|
if (!fake_jump) {
|
|
|
|
WARN("%s: empty alternative at end of section",
|
|
|
|
special_alt->orig_sec->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
*new_insn = fake_jump;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
last_new_insn = NULL;
|
2020-04-14 10:36:11 +00:00
|
|
|
alt_group = alt_group_next_index++;
|
2017-06-28 15:11:05 +00:00
|
|
|
insn = *new_insn;
|
|
|
|
sec_for_each_insn_from(file, insn) {
|
|
|
|
if (insn->offset >= special_alt->new_off + special_alt->new_len)
|
|
|
|
break;
|
|
|
|
|
|
|
|
last_new_insn = insn;
|
|
|
|
|
2018-01-30 04:00:39 +00:00
|
|
|
insn->ignore = orig_insn->ignore_alts;
|
2019-02-25 09:31:24 +00:00
|
|
|
insn->func = orig_insn->func;
|
2020-04-14 10:36:11 +00:00
|
|
|
insn->alt_group = alt_group;
|
2018-01-30 04:00:39 +00:00
|
|
|
|
2020-02-10 18:32:40 +00:00
|
|
|
/*
|
|
|
|
* Since alternative replacement code is copy/pasted by the
|
|
|
|
* kernel after applying relocations, generally such code can't
|
|
|
|
* have relative-address relocation references to outside the
|
|
|
|
* .altinstr_replacement section, unless the arch's
|
|
|
|
* alternatives code can adjust the relative offsets
|
|
|
|
* accordingly.
|
|
|
|
*
|
|
|
|
* The x86 alternatives code adjusts the offsets only when it
|
|
|
|
* encounters a branch instruction at the very beginning of the
|
|
|
|
* replacement group.
|
|
|
|
*/
|
|
|
|
if ((insn->offset != special_alt->new_off ||
|
|
|
|
(insn->type != INSN_CALL && !is_static_jump(insn))) &&
|
2020-03-12 10:23:36 +00:00
|
|
|
find_rela_by_dest_range(file->elf, insn->sec, insn->offset, insn->len)) {
|
2020-02-10 18:32:40 +00:00
|
|
|
|
|
|
|
WARN_FUNC("unsupported relocation in alternatives section",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-02-10 18:32:39 +00:00
|
|
|
if (!is_static_jump(insn))
|
2017-06-28 15:11:05 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!insn->immediate)
|
|
|
|
continue;
|
|
|
|
|
2020-03-27 15:28:45 +00:00
|
|
|
dest_off = arch_jump_destination(insn);
|
2018-01-30 04:00:40 +00:00
|
|
|
if (dest_off == special_alt->new_off + special_alt->new_len) {
|
|
|
|
if (!fake_jump) {
|
|
|
|
WARN("%s: alternative jump to end of section",
|
|
|
|
special_alt->orig_sec->name);
|
|
|
|
return -1;
|
|
|
|
}
|
2017-06-28 15:11:05 +00:00
|
|
|
insn->jump_dest = fake_jump;
|
2018-01-30 04:00:40 +00:00
|
|
|
}
|
2017-06-28 15:11:05 +00:00
|
|
|
|
|
|
|
if (!insn->jump_dest) {
|
|
|
|
WARN_FUNC("can't find alternative jump destination",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!last_new_insn) {
|
|
|
|
WARN_FUNC("can't find last new alternative instruction",
|
|
|
|
special_alt->new_sec, special_alt->new_off);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-01-30 04:00:40 +00:00
|
|
|
if (fake_jump)
|
|
|
|
list_add(&fake_jump->list, &last_new_insn->list);
|
2017-06-28 15:11:05 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A jump table entry can either convert a nop to a jump or a jump to a nop.
|
|
|
|
* If the original instruction is a jump, make the alt entry an effective nop
|
|
|
|
* by just skipping the original instruction.
|
|
|
|
*/
|
|
|
|
static int handle_jump_alt(struct objtool_file *file,
|
|
|
|
struct special_alt *special_alt,
|
|
|
|
struct instruction *orig_insn,
|
|
|
|
struct instruction **new_insn)
|
|
|
|
{
|
|
|
|
if (orig_insn->type == INSN_NOP)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
|
|
|
|
WARN_FUNC("unsupported instruction at jump label",
|
|
|
|
orig_insn->sec, orig_insn->offset);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
*new_insn = list_next_entry(orig_insn, list);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read all the special sections which have alternate instructions which can be
|
|
|
|
* patched in or redirected to at runtime. Each instruction having alternate
|
|
|
|
* instruction(s) has them added to its insn->alts list, which will be
|
|
|
|
* traversed in validate_branch().
|
|
|
|
*/
|
|
|
|
static int add_special_section_alts(struct objtool_file *file)
|
|
|
|
{
|
|
|
|
struct list_head special_alts;
|
|
|
|
struct instruction *orig_insn, *new_insn;
|
|
|
|
struct special_alt *special_alt, *tmp;
|
|
|
|
struct alternative *alt;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = special_get_alts(file->elf, &special_alts);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
|
|
|
|
|
|
|
|
orig_insn = find_insn(file, special_alt->orig_sec,
|
|
|
|
special_alt->orig_off);
|
|
|
|
if (!orig_insn) {
|
|
|
|
WARN_FUNC("special: can't find orig instruction",
|
|
|
|
special_alt->orig_sec, special_alt->orig_off);
|
|
|
|
ret = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
new_insn = NULL;
|
|
|
|
if (!special_alt->group || special_alt->new_len) {
|
|
|
|
new_insn = find_insn(file, special_alt->new_sec,
|
|
|
|
special_alt->new_off);
|
|
|
|
if (!new_insn) {
|
|
|
|
WARN_FUNC("special: can't find new instruction",
|
|
|
|
special_alt->new_sec,
|
|
|
|
special_alt->new_off);
|
|
|
|
ret = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (special_alt->group) {
|
2020-03-27 15:28:41 +00:00
|
|
|
if (!special_alt->orig_len) {
|
|
|
|
WARN_FUNC("empty alternative entry",
|
|
|
|
orig_insn->sec, orig_insn->offset);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
ret = handle_group_alt(file, special_alt, orig_insn,
|
|
|
|
&new_insn);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
} else if (special_alt->jump_or_nop) {
|
|
|
|
ret = handle_jump_alt(file, special_alt, orig_insn,
|
|
|
|
&new_insn);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-01-11 21:46:24 +00:00
|
|
|
alt = malloc(sizeof(*alt));
|
|
|
|
if (!alt) {
|
|
|
|
WARN("malloc failed");
|
|
|
|
ret = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
alt->insn = new_insn;
|
2019-03-01 10:19:03 +00:00
|
|
|
alt->skip_orig = special_alt->skip_orig;
|
2019-02-25 11:50:09 +00:00
|
|
|
orig_insn->ignore_alts |= special_alt->skip_alt;
|
2017-06-28 15:11:05 +00:00
|
|
|
list_add_tail(&alt->list, &orig_insn->alts);
|
|
|
|
|
|
|
|
list_del(&special_alt->list);
|
|
|
|
free(special_alt);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-07-18 01:36:53 +00:00
|
|
|
static int add_jump_table(struct objtool_file *file, struct instruction *insn,
|
objtool: Support repeated uses of the same C jump table
This fixes objtool for both a GCC issue and a Clang issue:
1) GCC issue:
kernel/bpf/core.o: warning: objtool: ___bpf_prog_run()+0x8d5: sibling call from callable instruction with modified stack frame
With CONFIG_RETPOLINE=n, GCC is doing the following optimization in
___bpf_prog_run().
Before:
select_insn:
jmp *jumptable(,%rax,8)
...
ALU64_ADD_X:
...
jmp select_insn
ALU_ADD_X:
...
jmp select_insn
After:
select_insn:
jmp *jumptable(, %rax, 8)
...
ALU64_ADD_X:
...
jmp *jumptable(, %rax, 8)
ALU_ADD_X:
...
jmp *jumptable(, %rax, 8)
This confuses objtool. It has never seen multiple indirect jump
sites which use the same jump table.
For GCC switch tables, the only way of detecting the size of a table
is by continuing to scan for more tables. The size of the previous
table can only be determined after another switch table is found, or
when the scan reaches the end of the function.
That logic was reused for C jump tables, and was based on the
assumption that each jump table only has a single jump site. The
above optimization breaks that assumption.
2) Clang issue:
drivers/usb/misc/sisusbvga/sisusb.o: warning: objtool: sisusb_write_mem_bulk()+0x588: can't find switch jump table
With clang 9, code can be generated where a function contains two
indirect jump instructions which use the same switch table.
The fix is the same for both issues: split the jump table parsing into
two passes.
In the first pass, locate the heads of all switch tables for the
function and mark their locations.
In the second pass, parse the switch tables and add them.
Fixes: e55a73251da3 ("bpf: Fix ORC unwinding in non-JIT BPF code")
Reported-by: Randy Dunlap <rdunlap@infradead.org>
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Nick Desaulniers <ndesaulniers@google.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/e995befaada9d4d8b2cf788ff3f566ba900d2b4d.1563413318.git.jpoimboe@redhat.com
Co-developed-by: Josh Poimboeuf <jpoimboe@redhat.com>
2019-07-18 01:36:54 +00:00
|
|
|
struct rela *table)
|
2017-06-28 15:11:05 +00:00
|
|
|
{
|
|
|
|
struct rela *rela = table;
|
2019-07-18 01:36:53 +00:00
|
|
|
struct instruction *dest_insn;
|
2017-06-28 15:11:05 +00:00
|
|
|
struct alternative *alt;
|
2018-05-10 22:48:49 +00:00
|
|
|
struct symbol *pfunc = insn->func->pfunc;
|
|
|
|
unsigned int prev_offset = 0;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2019-07-18 01:36:53 +00:00
|
|
|
/*
|
|
|
|
* Each @rela is a switch table relocation which points to the target
|
|
|
|
* instruction.
|
|
|
|
*/
|
|
|
|
list_for_each_entry_from(rela, &table->sec->rela_list, list) {
|
objtool: Support repeated uses of the same C jump table
This fixes objtool for both a GCC issue and a Clang issue:
1) GCC issue:
kernel/bpf/core.o: warning: objtool: ___bpf_prog_run()+0x8d5: sibling call from callable instruction with modified stack frame
With CONFIG_RETPOLINE=n, GCC is doing the following optimization in
___bpf_prog_run().
Before:
select_insn:
jmp *jumptable(,%rax,8)
...
ALU64_ADD_X:
...
jmp select_insn
ALU_ADD_X:
...
jmp select_insn
After:
select_insn:
jmp *jumptable(, %rax, 8)
...
ALU64_ADD_X:
...
jmp *jumptable(, %rax, 8)
ALU_ADD_X:
...
jmp *jumptable(, %rax, 8)
This confuses objtool. It has never seen multiple indirect jump
sites which use the same jump table.
For GCC switch tables, the only way of detecting the size of a table
is by continuing to scan for more tables. The size of the previous
table can only be determined after another switch table is found, or
when the scan reaches the end of the function.
That logic was reused for C jump tables, and was based on the
assumption that each jump table only has a single jump site. The
above optimization breaks that assumption.
2) Clang issue:
drivers/usb/misc/sisusbvga/sisusb.o: warning: objtool: sisusb_write_mem_bulk()+0x588: can't find switch jump table
With clang 9, code can be generated where a function contains two
indirect jump instructions which use the same switch table.
The fix is the same for both issues: split the jump table parsing into
two passes.
In the first pass, locate the heads of all switch tables for the
function and mark their locations.
In the second pass, parse the switch tables and add them.
Fixes: e55a73251da3 ("bpf: Fix ORC unwinding in non-JIT BPF code")
Reported-by: Randy Dunlap <rdunlap@infradead.org>
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Nick Desaulniers <ndesaulniers@google.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/e995befaada9d4d8b2cf788ff3f566ba900d2b4d.1563413318.git.jpoimboe@redhat.com
Co-developed-by: Josh Poimboeuf <jpoimboe@redhat.com>
2019-07-18 01:36:54 +00:00
|
|
|
|
|
|
|
/* Check for the end of the table: */
|
|
|
|
if (rela != table && rela->jump_table_start)
|
2017-06-28 15:11:05 +00:00
|
|
|
break;
|
|
|
|
|
2019-07-18 01:36:53 +00:00
|
|
|
/* Make sure the table entries are consecutive: */
|
2018-05-10 22:48:49 +00:00
|
|
|
if (prev_offset && rela->offset != prev_offset + 8)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Detect function pointers from contiguous objects: */
|
|
|
|
if (rela->sym->sec == pfunc->sec &&
|
|
|
|
rela->addend == pfunc->offset)
|
|
|
|
break;
|
|
|
|
|
2019-07-18 01:36:53 +00:00
|
|
|
dest_insn = find_insn(file, rela->sym->sec, rela->addend);
|
|
|
|
if (!dest_insn)
|
2017-06-28 15:11:05 +00:00
|
|
|
break;
|
|
|
|
|
2019-07-18 01:36:53 +00:00
|
|
|
/* Make sure the destination is in the same function: */
|
objtool: Fix seg fault on bad switch table entry
In one rare case, Clang generated the following code:
5ca: 83 e0 21 and $0x21,%eax
5cd: b9 04 00 00 00 mov $0x4,%ecx
5d2: ff 24 c5 00 00 00 00 jmpq *0x0(,%rax,8)
5d5: R_X86_64_32S .rodata+0x38
which uses the corresponding jump table relocations:
000000000038 000200000001 R_X86_64_64 0000000000000000 .text + 834
000000000040 000200000001 R_X86_64_64 0000000000000000 .text + 5d9
000000000048 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000050 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000058 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000060 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000068 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000070 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000078 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000080 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000088 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000090 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000098 000200000001 R_X86_64_64 0000000000000000 .text + b96
0000000000a0 000200000001 R_X86_64_64 0000000000000000 .text + b96
0000000000a8 000200000001 R_X86_64_64 0000000000000000 .text + b96
0000000000b0 000200000001 R_X86_64_64 0000000000000000 .text + b96
0000000000b8 000200000001 R_X86_64_64 0000000000000000 .text + b96
0000000000c0 000200000001 R_X86_64_64 0000000000000000 .text + b96
0000000000c8 000200000001 R_X86_64_64 0000000000000000 .text + b96
0000000000d0 000200000001 R_X86_64_64 0000000000000000 .text + b96
0000000000d8 000200000001 R_X86_64_64 0000000000000000 .text + b96
0000000000e0 000200000001 R_X86_64_64 0000000000000000 .text + b96
0000000000e8 000200000001 R_X86_64_64 0000000000000000 .text + b96
0000000000f0 000200000001 R_X86_64_64 0000000000000000 .text + b96
0000000000f8 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000100 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000108 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000110 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000118 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000120 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000128 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000130 000200000001 R_X86_64_64 0000000000000000 .text + b96
000000000138 000200000001 R_X86_64_64 0000000000000000 .text + 82f
000000000140 000200000001 R_X86_64_64 0000000000000000 .text + 828
Since %eax was masked with 0x21, only the first two and the last two
entries are possible.
Objtool doesn't actually emulate all the code, so it isn't smart enough
to know that all the middle entries aren't reachable. They point to the
NOP padding area after the end of the function, so objtool seg faulted
when it tried to dereference a NULL insn->func.
After this fix, objtool still gives an "unreachable" error because it
stops reading the jump table when it encounters the bad addresses:
/home/jpoimboe/objtool-tests/adm1275.o: warning: objtool: adm1275_probe()+0x828: unreachable instruction
While the above code is technically correct, it's very wasteful of
memory -- it uses 34 jump table entries when only 4 are needed. It's
also not possible for objtool to validate this type of switch table
because the unused entries point outside the function and objtool has no
way of determining if that's intentional. Hopefully the Clang folks can
fix it.
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Nick Desaulniers <ndesaulniers@google.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/a9db88eec4f1ca089e040989846961748238b6d8.1563413318.git.jpoimboe@redhat.com
2019-07-18 01:36:55 +00:00
|
|
|
if (!dest_insn->func || dest_insn->func->pfunc != pfunc)
|
2018-05-10 03:39:15 +00:00
|
|
|
break;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
|
|
|
alt = malloc(sizeof(*alt));
|
|
|
|
if (!alt) {
|
|
|
|
WARN("malloc failed");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-07-18 01:36:53 +00:00
|
|
|
alt->insn = dest_insn;
|
2017-06-28 15:11:05 +00:00
|
|
|
list_add_tail(&alt->list, &insn->alts);
|
2018-05-10 22:48:49 +00:00
|
|
|
prev_offset = rela->offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!prev_offset) {
|
|
|
|
WARN_FUNC("can't find switch jump table",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return -1;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-07-18 01:36:53 +00:00
|
|
|
* find_jump_table() - Given a dynamic jump, find the switch jump table in
|
2017-06-28 15:11:05 +00:00
|
|
|
* .rodata associated with it.
|
|
|
|
*
|
|
|
|
* There are 3 basic patterns:
|
|
|
|
*
|
|
|
|
* 1. jmpq *[rodata addr](,%reg,8)
|
|
|
|
*
|
|
|
|
* This is the most common case by far. It jumps to an address in a simple
|
|
|
|
* jump table which is stored in .rodata.
|
|
|
|
*
|
|
|
|
* 2. jmpq *[rodata addr](%rip)
|
|
|
|
*
|
|
|
|
* This is caused by a rare GCC quirk, currently only seen in three driver
|
|
|
|
* functions in the kernel, only with certain obscure non-distro configs.
|
|
|
|
*
|
|
|
|
* As part of an optimization, GCC makes a copy of an existing switch jump
|
|
|
|
* table, modifies it, and then hard-codes the jump (albeit with an indirect
|
|
|
|
* jump) to use a single entry in the table. The rest of the jump table and
|
|
|
|
* some of its jump targets remain as dead code.
|
|
|
|
*
|
|
|
|
* In such a case we can just crudely ignore all unreachable instruction
|
|
|
|
* warnings for the entire object file. Ideally we would just ignore them
|
|
|
|
* for the function, but that would require redesigning the code quite a
|
|
|
|
* bit. And honestly that's just not worth doing: unreachable instruction
|
|
|
|
* warnings are of questionable value anyway, and this is such a rare issue.
|
|
|
|
*
|
|
|
|
* 3. mov [rodata addr],%reg1
|
|
|
|
* ... some instructions ...
|
|
|
|
* jmpq *(%reg1,%reg2,8)
|
|
|
|
*
|
|
|
|
* This is a fairly uncommon pattern which is new for GCC 6. As of this
|
|
|
|
* writing, there are 11 occurrences of it in the allmodconfig kernel.
|
|
|
|
*
|
2018-02-08 13:02:32 +00:00
|
|
|
* As of GCC 7 there are quite a few more of these and the 'in between' code
|
|
|
|
* is significant. Esp. with KASAN enabled some of the code between the mov
|
|
|
|
* and jmpq uses .rodata itself, which can confuse things.
|
|
|
|
*
|
2017-06-28 15:11:05 +00:00
|
|
|
* TODO: Once we have DWARF CFI and smarter instruction decoding logic,
|
|
|
|
* ensure the same register is used in the mov and jump instructions.
|
2018-02-08 13:02:32 +00:00
|
|
|
*
|
|
|
|
* NOTE: RETPOLINE made it harder still to decode dynamic jumps.
|
2017-06-28 15:11:05 +00:00
|
|
|
*/
|
2019-07-18 01:36:53 +00:00
|
|
|
static struct rela *find_jump_table(struct objtool_file *file,
|
2017-06-28 15:11:05 +00:00
|
|
|
struct symbol *func,
|
|
|
|
struct instruction *insn)
|
|
|
|
{
|
2019-07-18 01:36:53 +00:00
|
|
|
struct rela *text_rela, *table_rela;
|
2020-02-18 03:41:53 +00:00
|
|
|
struct instruction *dest_insn, *orig_insn = insn;
|
2019-07-18 01:36:53 +00:00
|
|
|
struct section *table_sec;
|
2018-05-14 13:53:24 +00:00
|
|
|
unsigned long table_offset;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2018-02-08 13:02:32 +00:00
|
|
|
/*
|
|
|
|
* Backward search using the @first_jump_src links, these help avoid
|
|
|
|
* much of the 'in between' code. Which avoids us getting confused by
|
|
|
|
* it.
|
|
|
|
*/
|
2018-05-18 20:10:34 +00:00
|
|
|
for (;
|
2020-04-28 21:45:16 +00:00
|
|
|
insn && insn->func && insn->func->pfunc == func;
|
|
|
|
insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
|
2018-02-08 13:02:32 +00:00
|
|
|
|
2018-05-18 20:10:34 +00:00
|
|
|
if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
|
2017-06-28 15:11:05 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* allow small jumps within the range */
|
|
|
|
if (insn->type == INSN_JUMP_UNCONDITIONAL &&
|
|
|
|
insn->jump_dest &&
|
|
|
|
(insn->jump_dest->offset <= insn->offset ||
|
|
|
|
insn->jump_dest->offset > orig_insn->offset))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* look for a relocation which references .rodata */
|
2020-03-12 10:23:36 +00:00
|
|
|
text_rela = find_rela_by_dest_range(file->elf, insn->sec,
|
|
|
|
insn->offset, insn->len);
|
2018-09-07 13:12:01 +00:00
|
|
|
if (!text_rela || text_rela->sym->type != STT_SECTION ||
|
|
|
|
!text_rela->sym->sec->rodata)
|
2017-06-28 15:11:05 +00:00
|
|
|
continue;
|
|
|
|
|
2018-05-14 13:53:24 +00:00
|
|
|
table_offset = text_rela->addend;
|
2019-07-18 01:36:53 +00:00
|
|
|
table_sec = text_rela->sym->sec;
|
2018-09-07 13:12:01 +00:00
|
|
|
|
2018-05-14 13:53:24 +00:00
|
|
|
if (text_rela->type == R_X86_64_PC32)
|
|
|
|
table_offset += 4;
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
/*
|
|
|
|
* Make sure the .rodata address isn't associated with a
|
2019-06-28 01:50:46 +00:00
|
|
|
* symbol. GCC jump tables are anonymous data.
|
|
|
|
*
|
|
|
|
* Also support C jump tables which are in the same format as
|
|
|
|
* switch jump tables. For objtool to recognize them, they
|
|
|
|
* need to be placed in the C_JUMP_TABLE_SECTION section. They
|
|
|
|
* have symbols associated with them.
|
2017-06-28 15:11:05 +00:00
|
|
|
*/
|
2019-07-18 01:36:53 +00:00
|
|
|
if (find_symbol_containing(table_sec, table_offset) &&
|
|
|
|
strcmp(table_sec->name, C_JUMP_TABLE_SECTION))
|
2018-02-28 13:19:21 +00:00
|
|
|
continue;
|
|
|
|
|
2020-02-18 03:41:53 +00:00
|
|
|
/*
|
|
|
|
* Each table entry has a rela associated with it. The rela
|
|
|
|
* should reference text in the same function as the original
|
|
|
|
* instruction.
|
|
|
|
*/
|
2020-03-12 10:23:36 +00:00
|
|
|
table_rela = find_rela_by_dest(file->elf, table_sec, table_offset);
|
2019-07-18 01:36:53 +00:00
|
|
|
if (!table_rela)
|
|
|
|
continue;
|
2020-02-18 03:41:53 +00:00
|
|
|
dest_insn = find_insn(file, table_rela->sym->sec, table_rela->addend);
|
|
|
|
if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func)
|
|
|
|
continue;
|
2018-05-18 20:10:34 +00:00
|
|
|
|
2019-07-18 01:36:53 +00:00
|
|
|
/*
|
|
|
|
* Use of RIP-relative switch jumps is quite rare, and
|
|
|
|
* indicates a rare GCC quirk/bug which can leave dead code
|
|
|
|
* behind.
|
|
|
|
*/
|
|
|
|
if (text_rela->type == R_X86_64_PC32)
|
|
|
|
file->ignore_unreachables = true;
|
|
|
|
|
|
|
|
return table_rela;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
objtool: Support repeated uses of the same C jump table
This fixes objtool for both a GCC issue and a Clang issue:
1) GCC issue:
kernel/bpf/core.o: warning: objtool: ___bpf_prog_run()+0x8d5: sibling call from callable instruction with modified stack frame
With CONFIG_RETPOLINE=n, GCC is doing the following optimization in
___bpf_prog_run().
Before:
select_insn:
jmp *jumptable(,%rax,8)
...
ALU64_ADD_X:
...
jmp select_insn
ALU_ADD_X:
...
jmp select_insn
After:
select_insn:
jmp *jumptable(, %rax, 8)
...
ALU64_ADD_X:
...
jmp *jumptable(, %rax, 8)
ALU_ADD_X:
...
jmp *jumptable(, %rax, 8)
This confuses objtool. It has never seen multiple indirect jump
sites which use the same jump table.
For GCC switch tables, the only way of detecting the size of a table
is by continuing to scan for more tables. The size of the previous
table can only be determined after another switch table is found, or
when the scan reaches the end of the function.
That logic was reused for C jump tables, and was based on the
assumption that each jump table only has a single jump site. The
above optimization breaks that assumption.
2) Clang issue:
drivers/usb/misc/sisusbvga/sisusb.o: warning: objtool: sisusb_write_mem_bulk()+0x588: can't find switch jump table
With clang 9, code can be generated where a function contains two
indirect jump instructions which use the same switch table.
The fix is the same for both issues: split the jump table parsing into
two passes.
In the first pass, locate the heads of all switch tables for the
function and mark their locations.
In the second pass, parse the switch tables and add them.
Fixes: e55a73251da3 ("bpf: Fix ORC unwinding in non-JIT BPF code")
Reported-by: Randy Dunlap <rdunlap@infradead.org>
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Nick Desaulniers <ndesaulniers@google.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/e995befaada9d4d8b2cf788ff3f566ba900d2b4d.1563413318.git.jpoimboe@redhat.com
Co-developed-by: Josh Poimboeuf <jpoimboe@redhat.com>
2019-07-18 01:36:54 +00:00
|
|
|
/*
|
|
|
|
* First pass: Mark the head of each jump table so that in the next pass,
|
|
|
|
* we know when a given jump table ends and the next one starts.
|
|
|
|
*/
|
|
|
|
static void mark_func_jump_tables(struct objtool_file *file,
|
|
|
|
struct symbol *func)
|
2017-06-28 15:11:05 +00:00
|
|
|
{
|
objtool: Support repeated uses of the same C jump table
This fixes objtool for both a GCC issue and a Clang issue:
1) GCC issue:
kernel/bpf/core.o: warning: objtool: ___bpf_prog_run()+0x8d5: sibling call from callable instruction with modified stack frame
With CONFIG_RETPOLINE=n, GCC is doing the following optimization in
___bpf_prog_run().
Before:
select_insn:
jmp *jumptable(,%rax,8)
...
ALU64_ADD_X:
...
jmp select_insn
ALU_ADD_X:
...
jmp select_insn
After:
select_insn:
jmp *jumptable(, %rax, 8)
...
ALU64_ADD_X:
...
jmp *jumptable(, %rax, 8)
ALU_ADD_X:
...
jmp *jumptable(, %rax, 8)
This confuses objtool. It has never seen multiple indirect jump
sites which use the same jump table.
For GCC switch tables, the only way of detecting the size of a table
is by continuing to scan for more tables. The size of the previous
table can only be determined after another switch table is found, or
when the scan reaches the end of the function.
That logic was reused for C jump tables, and was based on the
assumption that each jump table only has a single jump site. The
above optimization breaks that assumption.
2) Clang issue:
drivers/usb/misc/sisusbvga/sisusb.o: warning: objtool: sisusb_write_mem_bulk()+0x588: can't find switch jump table
With clang 9, code can be generated where a function contains two
indirect jump instructions which use the same switch table.
The fix is the same for both issues: split the jump table parsing into
two passes.
In the first pass, locate the heads of all switch tables for the
function and mark their locations.
In the second pass, parse the switch tables and add them.
Fixes: e55a73251da3 ("bpf: Fix ORC unwinding in non-JIT BPF code")
Reported-by: Randy Dunlap <rdunlap@infradead.org>
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Nick Desaulniers <ndesaulniers@google.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/e995befaada9d4d8b2cf788ff3f566ba900d2b4d.1563413318.git.jpoimboe@redhat.com
Co-developed-by: Josh Poimboeuf <jpoimboe@redhat.com>
2019-07-18 01:36:54 +00:00
|
|
|
struct instruction *insn, *last = NULL;
|
|
|
|
struct rela *rela;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2020-03-10 17:27:24 +00:00
|
|
|
func_for_each_insn(file, func, insn) {
|
2018-02-08 13:02:32 +00:00
|
|
|
if (!last)
|
|
|
|
last = insn;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Store back-pointers for unconditional forward jumps such
|
2019-07-18 01:36:53 +00:00
|
|
|
* that find_jump_table() can back-track using those and
|
2018-02-08 13:02:32 +00:00
|
|
|
* avoid some potentially confusing code.
|
|
|
|
*/
|
|
|
|
if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
|
|
|
|
insn->offset > last->offset &&
|
|
|
|
insn->jump_dest->offset > insn->offset &&
|
|
|
|
!insn->jump_dest->first_jump_src) {
|
|
|
|
|
|
|
|
insn->jump_dest->first_jump_src = insn;
|
|
|
|
last = insn->jump_dest;
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
if (insn->type != INSN_JUMP_DYNAMIC)
|
|
|
|
continue;
|
|
|
|
|
2019-07-18 01:36:53 +00:00
|
|
|
rela = find_jump_table(file, func, insn);
|
objtool: Support repeated uses of the same C jump table
This fixes objtool for both a GCC issue and a Clang issue:
1) GCC issue:
kernel/bpf/core.o: warning: objtool: ___bpf_prog_run()+0x8d5: sibling call from callable instruction with modified stack frame
With CONFIG_RETPOLINE=n, GCC is doing the following optimization in
___bpf_prog_run().
Before:
select_insn:
jmp *jumptable(,%rax,8)
...
ALU64_ADD_X:
...
jmp select_insn
ALU_ADD_X:
...
jmp select_insn
After:
select_insn:
jmp *jumptable(, %rax, 8)
...
ALU64_ADD_X:
...
jmp *jumptable(, %rax, 8)
ALU_ADD_X:
...
jmp *jumptable(, %rax, 8)
This confuses objtool. It has never seen multiple indirect jump
sites which use the same jump table.
For GCC switch tables, the only way of detecting the size of a table
is by continuing to scan for more tables. The size of the previous
table can only be determined after another switch table is found, or
when the scan reaches the end of the function.
That logic was reused for C jump tables, and was based on the
assumption that each jump table only has a single jump site. The
above optimization breaks that assumption.
2) Clang issue:
drivers/usb/misc/sisusbvga/sisusb.o: warning: objtool: sisusb_write_mem_bulk()+0x588: can't find switch jump table
With clang 9, code can be generated where a function contains two
indirect jump instructions which use the same switch table.
The fix is the same for both issues: split the jump table parsing into
two passes.
In the first pass, locate the heads of all switch tables for the
function and mark their locations.
In the second pass, parse the switch tables and add them.
Fixes: e55a73251da3 ("bpf: Fix ORC unwinding in non-JIT BPF code")
Reported-by: Randy Dunlap <rdunlap@infradead.org>
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Nick Desaulniers <ndesaulniers@google.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/e995befaada9d4d8b2cf788ff3f566ba900d2b4d.1563413318.git.jpoimboe@redhat.com
Co-developed-by: Josh Poimboeuf <jpoimboe@redhat.com>
2019-07-18 01:36:54 +00:00
|
|
|
if (rela) {
|
|
|
|
rela->jump_table_start = true;
|
|
|
|
insn->jump_table = rela;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
}
|
objtool: Support repeated uses of the same C jump table
This fixes objtool for both a GCC issue and a Clang issue:
1) GCC issue:
kernel/bpf/core.o: warning: objtool: ___bpf_prog_run()+0x8d5: sibling call from callable instruction with modified stack frame
With CONFIG_RETPOLINE=n, GCC is doing the following optimization in
___bpf_prog_run().
Before:
select_insn:
jmp *jumptable(,%rax,8)
...
ALU64_ADD_X:
...
jmp select_insn
ALU_ADD_X:
...
jmp select_insn
After:
select_insn:
jmp *jumptable(, %rax, 8)
...
ALU64_ADD_X:
...
jmp *jumptable(, %rax, 8)
ALU_ADD_X:
...
jmp *jumptable(, %rax, 8)
This confuses objtool. It has never seen multiple indirect jump
sites which use the same jump table.
For GCC switch tables, the only way of detecting the size of a table
is by continuing to scan for more tables. The size of the previous
table can only be determined after another switch table is found, or
when the scan reaches the end of the function.
That logic was reused for C jump tables, and was based on the
assumption that each jump table only has a single jump site. The
above optimization breaks that assumption.
2) Clang issue:
drivers/usb/misc/sisusbvga/sisusb.o: warning: objtool: sisusb_write_mem_bulk()+0x588: can't find switch jump table
With clang 9, code can be generated where a function contains two
indirect jump instructions which use the same switch table.
The fix is the same for both issues: split the jump table parsing into
two passes.
In the first pass, locate the heads of all switch tables for the
function and mark their locations.
In the second pass, parse the switch tables and add them.
Fixes: e55a73251da3 ("bpf: Fix ORC unwinding in non-JIT BPF code")
Reported-by: Randy Dunlap <rdunlap@infradead.org>
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Nick Desaulniers <ndesaulniers@google.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/e995befaada9d4d8b2cf788ff3f566ba900d2b4d.1563413318.git.jpoimboe@redhat.com
Co-developed-by: Josh Poimboeuf <jpoimboe@redhat.com>
2019-07-18 01:36:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int add_func_jump_tables(struct objtool_file *file,
|
|
|
|
struct symbol *func)
|
|
|
|
{
|
|
|
|
struct instruction *insn;
|
|
|
|
int ret;
|
|
|
|
|
2020-03-10 17:27:24 +00:00
|
|
|
func_for_each_insn(file, func, insn) {
|
objtool: Support repeated uses of the same C jump table
This fixes objtool for both a GCC issue and a Clang issue:
1) GCC issue:
kernel/bpf/core.o: warning: objtool: ___bpf_prog_run()+0x8d5: sibling call from callable instruction with modified stack frame
With CONFIG_RETPOLINE=n, GCC is doing the following optimization in
___bpf_prog_run().
Before:
select_insn:
jmp *jumptable(,%rax,8)
...
ALU64_ADD_X:
...
jmp select_insn
ALU_ADD_X:
...
jmp select_insn
After:
select_insn:
jmp *jumptable(, %rax, 8)
...
ALU64_ADD_X:
...
jmp *jumptable(, %rax, 8)
ALU_ADD_X:
...
jmp *jumptable(, %rax, 8)
This confuses objtool. It has never seen multiple indirect jump
sites which use the same jump table.
For GCC switch tables, the only way of detecting the size of a table
is by continuing to scan for more tables. The size of the previous
table can only be determined after another switch table is found, or
when the scan reaches the end of the function.
That logic was reused for C jump tables, and was based on the
assumption that each jump table only has a single jump site. The
above optimization breaks that assumption.
2) Clang issue:
drivers/usb/misc/sisusbvga/sisusb.o: warning: objtool: sisusb_write_mem_bulk()+0x588: can't find switch jump table
With clang 9, code can be generated where a function contains two
indirect jump instructions which use the same switch table.
The fix is the same for both issues: split the jump table parsing into
two passes.
In the first pass, locate the heads of all switch tables for the
function and mark their locations.
In the second pass, parse the switch tables and add them.
Fixes: e55a73251da3 ("bpf: Fix ORC unwinding in non-JIT BPF code")
Reported-by: Randy Dunlap <rdunlap@infradead.org>
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Nick Desaulniers <ndesaulniers@google.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/e995befaada9d4d8b2cf788ff3f566ba900d2b4d.1563413318.git.jpoimboe@redhat.com
Co-developed-by: Josh Poimboeuf <jpoimboe@redhat.com>
2019-07-18 01:36:54 +00:00
|
|
|
if (!insn->jump_table)
|
|
|
|
continue;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
objtool: Support repeated uses of the same C jump table
This fixes objtool for both a GCC issue and a Clang issue:
1) GCC issue:
kernel/bpf/core.o: warning: objtool: ___bpf_prog_run()+0x8d5: sibling call from callable instruction with modified stack frame
With CONFIG_RETPOLINE=n, GCC is doing the following optimization in
___bpf_prog_run().
Before:
select_insn:
jmp *jumptable(,%rax,8)
...
ALU64_ADD_X:
...
jmp select_insn
ALU_ADD_X:
...
jmp select_insn
After:
select_insn:
jmp *jumptable(, %rax, 8)
...
ALU64_ADD_X:
...
jmp *jumptable(, %rax, 8)
ALU_ADD_X:
...
jmp *jumptable(, %rax, 8)
This confuses objtool. It has never seen multiple indirect jump
sites which use the same jump table.
For GCC switch tables, the only way of detecting the size of a table
is by continuing to scan for more tables. The size of the previous
table can only be determined after another switch table is found, or
when the scan reaches the end of the function.
That logic was reused for C jump tables, and was based on the
assumption that each jump table only has a single jump site. The
above optimization breaks that assumption.
2) Clang issue:
drivers/usb/misc/sisusbvga/sisusb.o: warning: objtool: sisusb_write_mem_bulk()+0x588: can't find switch jump table
With clang 9, code can be generated where a function contains two
indirect jump instructions which use the same switch table.
The fix is the same for both issues: split the jump table parsing into
two passes.
In the first pass, locate the heads of all switch tables for the
function and mark their locations.
In the second pass, parse the switch tables and add them.
Fixes: e55a73251da3 ("bpf: Fix ORC unwinding in non-JIT BPF code")
Reported-by: Randy Dunlap <rdunlap@infradead.org>
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Nick Desaulniers <ndesaulniers@google.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/e995befaada9d4d8b2cf788ff3f566ba900d2b4d.1563413318.git.jpoimboe@redhat.com
Co-developed-by: Josh Poimboeuf <jpoimboe@redhat.com>
2019-07-18 01:36:54 +00:00
|
|
|
ret = add_jump_table(file, insn, insn->jump_table);
|
2017-06-28 15:11:05 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For some switch statements, gcc generates a jump table in the .rodata
|
|
|
|
* section which contains a list of addresses within the function to jump to.
|
|
|
|
* This finds these jump tables and adds them to the insn->alts lists.
|
|
|
|
*/
|
2019-07-18 01:36:53 +00:00
|
|
|
static int add_jump_table_alts(struct objtool_file *file)
|
2017-06-28 15:11:05 +00:00
|
|
|
{
|
|
|
|
struct section *sec;
|
|
|
|
struct symbol *func;
|
|
|
|
int ret;
|
|
|
|
|
2018-09-07 13:12:01 +00:00
|
|
|
if (!file->rodata)
|
2017-06-28 15:11:05 +00:00
|
|
|
return 0;
|
|
|
|
|
2017-06-28 15:11:07 +00:00
|
|
|
for_each_sec(file, sec) {
|
2017-06-28 15:11:05 +00:00
|
|
|
list_for_each_entry(func, &sec->symbol_list, list) {
|
|
|
|
if (func->type != STT_FUNC)
|
|
|
|
continue;
|
|
|
|
|
objtool: Support repeated uses of the same C jump table
This fixes objtool for both a GCC issue and a Clang issue:
1) GCC issue:
kernel/bpf/core.o: warning: objtool: ___bpf_prog_run()+0x8d5: sibling call from callable instruction with modified stack frame
With CONFIG_RETPOLINE=n, GCC is doing the following optimization in
___bpf_prog_run().
Before:
select_insn:
jmp *jumptable(,%rax,8)
...
ALU64_ADD_X:
...
jmp select_insn
ALU_ADD_X:
...
jmp select_insn
After:
select_insn:
jmp *jumptable(, %rax, 8)
...
ALU64_ADD_X:
...
jmp *jumptable(, %rax, 8)
ALU_ADD_X:
...
jmp *jumptable(, %rax, 8)
This confuses objtool. It has never seen multiple indirect jump
sites which use the same jump table.
For GCC switch tables, the only way of detecting the size of a table
is by continuing to scan for more tables. The size of the previous
table can only be determined after another switch table is found, or
when the scan reaches the end of the function.
That logic was reused for C jump tables, and was based on the
assumption that each jump table only has a single jump site. The
above optimization breaks that assumption.
2) Clang issue:
drivers/usb/misc/sisusbvga/sisusb.o: warning: objtool: sisusb_write_mem_bulk()+0x588: can't find switch jump table
With clang 9, code can be generated where a function contains two
indirect jump instructions which use the same switch table.
The fix is the same for both issues: split the jump table parsing into
two passes.
In the first pass, locate the heads of all switch tables for the
function and mark their locations.
In the second pass, parse the switch tables and add them.
Fixes: e55a73251da3 ("bpf: Fix ORC unwinding in non-JIT BPF code")
Reported-by: Randy Dunlap <rdunlap@infradead.org>
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Nick Desaulniers <ndesaulniers@google.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/e995befaada9d4d8b2cf788ff3f566ba900d2b4d.1563413318.git.jpoimboe@redhat.com
Co-developed-by: Josh Poimboeuf <jpoimboe@redhat.com>
2019-07-18 01:36:54 +00:00
|
|
|
mark_func_jump_tables(file, func);
|
2019-07-18 01:36:53 +00:00
|
|
|
ret = add_func_jump_tables(file, func);
|
2017-06-28 15:11:05 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-07-11 15:33:43 +00:00
|
|
|
static int read_unwind_hints(struct objtool_file *file)
|
|
|
|
{
|
|
|
|
struct section *sec, *relasec;
|
|
|
|
struct rela *rela;
|
|
|
|
struct unwind_hint *hint;
|
|
|
|
struct instruction *insn;
|
|
|
|
struct cfi_reg *cfa;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
sec = find_section_by_name(file->elf, ".discard.unwind_hints");
|
|
|
|
if (!sec)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
relasec = sec->rela;
|
|
|
|
if (!relasec) {
|
|
|
|
WARN("missing .rela.discard.unwind_hints section");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sec->len % sizeof(struct unwind_hint)) {
|
|
|
|
WARN("struct unwind_hint size mismatch");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
file->hints = true;
|
|
|
|
|
|
|
|
for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) {
|
|
|
|
hint = (struct unwind_hint *)sec->data->d_buf + i;
|
|
|
|
|
2020-03-12 10:23:36 +00:00
|
|
|
rela = find_rela_by_dest(file->elf, sec, i * sizeof(*hint));
|
2017-07-11 15:33:43 +00:00
|
|
|
if (!rela) {
|
|
|
|
WARN("can't find rela for unwind_hints[%d]", i);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
insn = find_insn(file, rela->sym->sec, rela->addend);
|
|
|
|
if (!insn) {
|
|
|
|
WARN("can't find insn for unwind_hints[%d]", i);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
cfa = &insn->cfi.cfa;
|
2017-07-11 15:33:43 +00:00
|
|
|
|
2020-04-01 14:54:26 +00:00
|
|
|
if (hint->type == UNWIND_HINT_TYPE_RET_OFFSET) {
|
2020-04-01 14:38:19 +00:00
|
|
|
insn->ret_offset = hint->sp_offset;
|
2017-07-11 15:33:43 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
insn->hint = true;
|
|
|
|
|
|
|
|
switch (hint->sp_reg) {
|
|
|
|
case ORC_REG_UNDEFINED:
|
|
|
|
cfa->base = CFI_UNDEFINED;
|
|
|
|
break;
|
|
|
|
case ORC_REG_SP:
|
|
|
|
cfa->base = CFI_SP;
|
|
|
|
break;
|
|
|
|
case ORC_REG_BP:
|
|
|
|
cfa->base = CFI_BP;
|
|
|
|
break;
|
|
|
|
case ORC_REG_SP_INDIRECT:
|
|
|
|
cfa->base = CFI_SP_INDIRECT;
|
|
|
|
break;
|
|
|
|
case ORC_REG_R10:
|
|
|
|
cfa->base = CFI_R10;
|
|
|
|
break;
|
|
|
|
case ORC_REG_R13:
|
|
|
|
cfa->base = CFI_R13;
|
|
|
|
break;
|
|
|
|
case ORC_REG_DI:
|
|
|
|
cfa->base = CFI_DI;
|
|
|
|
break;
|
|
|
|
case ORC_REG_DX:
|
|
|
|
cfa->base = CFI_DX;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN_FUNC("unsupported unwind_hint sp base reg %d",
|
|
|
|
insn->sec, insn->offset, hint->sp_reg);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
cfa->offset = hint->sp_offset;
|
2020-03-25 13:04:45 +00:00
|
|
|
insn->cfi.type = hint->type;
|
|
|
|
insn->cfi.end = hint->end;
|
2017-07-11 15:33:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-16 09:24:06 +00:00
|
|
|
static int read_retpoline_hints(struct objtool_file *file)
|
|
|
|
{
|
2018-03-06 23:58:15 +00:00
|
|
|
struct section *sec;
|
2018-01-16 09:24:06 +00:00
|
|
|
struct instruction *insn;
|
|
|
|
struct rela *rela;
|
|
|
|
|
2018-03-06 23:58:15 +00:00
|
|
|
sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
|
2018-01-16 09:24:06 +00:00
|
|
|
if (!sec)
|
|
|
|
return 0;
|
|
|
|
|
2018-03-06 23:58:15 +00:00
|
|
|
list_for_each_entry(rela, &sec->rela_list, list) {
|
|
|
|
if (rela->sym->type != STT_SECTION) {
|
|
|
|
WARN("unexpected relocation symbol type in %s", sec->name);
|
2018-01-16 09:24:06 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
insn = find_insn(file, rela->sym->sec, rela->addend);
|
|
|
|
if (!insn) {
|
2018-03-06 23:58:15 +00:00
|
|
|
WARN("bad .discard.retpoline_safe entry");
|
2018-01-16 09:24:06 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (insn->type != INSN_JUMP_DYNAMIC &&
|
|
|
|
insn->type != INSN_CALL_DYNAMIC) {
|
2018-03-06 23:58:15 +00:00
|
|
|
WARN_FUNC("retpoline_safe hint not an indirect jump/call",
|
2018-01-16 09:24:06 +00:00
|
|
|
insn->sec, insn->offset);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
insn->retpoline_safe = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-10 17:57:41 +00:00
|
|
|
static int read_instr_hints(struct objtool_file *file)
|
|
|
|
{
|
|
|
|
struct section *sec;
|
|
|
|
struct instruction *insn;
|
|
|
|
struct rela *rela;
|
|
|
|
|
|
|
|
sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
|
|
|
|
if (!sec)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
list_for_each_entry(rela, &sec->rela_list, list) {
|
|
|
|
if (rela->sym->type != STT_SECTION) {
|
|
|
|
WARN("unexpected relocation symbol type in %s", sec->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
insn = find_insn(file, rela->sym->sec, rela->addend);
|
|
|
|
if (!insn) {
|
|
|
|
WARN("bad .discard.instr_end entry");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
insn->instr--;
|
|
|
|
}
|
|
|
|
|
|
|
|
sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
|
|
|
|
if (!sec)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
list_for_each_entry(rela, &sec->rela_list, list) {
|
|
|
|
if (rela->sym->type != STT_SECTION) {
|
|
|
|
WARN("unexpected relocation symbol type in %s", sec->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
insn = find_insn(file, rela->sym->sec, rela->addend);
|
|
|
|
if (!insn) {
|
|
|
|
WARN("bad .discard.instr_begin entry");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
insn->instr++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-14 10:36:12 +00:00
|
|
|
static int read_intra_function_calls(struct objtool_file *file)
|
|
|
|
{
|
|
|
|
struct instruction *insn;
|
|
|
|
struct section *sec;
|
|
|
|
struct rela *rela;
|
|
|
|
|
|
|
|
sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
|
|
|
|
if (!sec)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
list_for_each_entry(rela, &sec->rela_list, list) {
|
|
|
|
unsigned long dest_off;
|
|
|
|
|
|
|
|
if (rela->sym->type != STT_SECTION) {
|
|
|
|
WARN("unexpected relocation symbol type in %s",
|
|
|
|
sec->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
insn = find_insn(file, rela->sym->sec, rela->addend);
|
|
|
|
if (!insn) {
|
|
|
|
WARN("bad .discard.intra_function_call entry");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (insn->type != INSN_CALL) {
|
|
|
|
WARN_FUNC("intra_function_call not a direct call",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Treat intra-function CALLs as JMPs, but with a stack_op.
|
|
|
|
* See add_call_destinations(), which strips stack_ops from
|
|
|
|
* normal CALLs.
|
|
|
|
*/
|
|
|
|
insn->type = INSN_JUMP_UNCONDITIONAL;
|
|
|
|
|
|
|
|
dest_off = insn->offset + insn->len + insn->immediate;
|
|
|
|
insn->jump_dest = find_insn(file, insn->sec, dest_off);
|
|
|
|
if (!insn->jump_dest) {
|
|
|
|
WARN_FUNC("can't find call dest at %s+0x%lx",
|
|
|
|
insn->sec, insn->offset,
|
|
|
|
insn->sec->name, dest_off);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-07 13:12:01 +00:00
|
|
|
static void mark_rodata(struct objtool_file *file)
|
|
|
|
{
|
|
|
|
struct section *sec;
|
|
|
|
bool found = false;
|
|
|
|
|
|
|
|
/*
|
2019-06-28 01:50:46 +00:00
|
|
|
* Search for the following rodata sections, each of which can
|
|
|
|
* potentially contain jump tables:
|
|
|
|
*
|
|
|
|
* - .rodata: can contain GCC switch tables
|
|
|
|
* - .rodata.<func>: same, if -fdata-sections is being used
|
|
|
|
* - .rodata..c_jump_table: contains C annotated jump tables
|
|
|
|
*
|
|
|
|
* .rodata.str1.* sections are ignored; they don't contain jump tables.
|
2018-09-07 13:12:01 +00:00
|
|
|
*/
|
|
|
|
for_each_sec(file, sec) {
|
2020-04-12 14:44:05 +00:00
|
|
|
if (!strncmp(sec->name, ".rodata", 7) &&
|
|
|
|
!strstr(sec->name, ".str1.")) {
|
2018-09-07 13:12:01 +00:00
|
|
|
sec->rodata = true;
|
|
|
|
found = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
file->rodata = found;
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
static int decode_sections(struct objtool_file *file)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2018-09-07 13:12:01 +00:00
|
|
|
mark_rodata(file);
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
ret = decode_instructions(file);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = add_dead_ends(file);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
add_ignores(file);
|
2019-02-25 11:50:09 +00:00
|
|
|
add_uaccess_safe(file);
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2019-03-18 13:33:07 +00:00
|
|
|
ret = add_ignore_alternatives(file);
|
2018-01-11 21:46:24 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
ret = add_jump_destinations(file);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2018-01-30 04:00:39 +00:00
|
|
|
ret = add_special_section_alts(file);
|
2017-06-28 15:11:05 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2020-04-14 10:36:12 +00:00
|
|
|
ret = read_intra_function_calls(file);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2018-01-30 04:00:39 +00:00
|
|
|
ret = add_call_destinations(file);
|
2017-06-28 15:11:05 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2019-07-18 01:36:53 +00:00
|
|
|
ret = add_jump_table_alts(file);
|
2017-06-28 15:11:05 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-07-11 15:33:43 +00:00
|
|
|
ret = read_unwind_hints(file);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2018-01-16 09:24:06 +00:00
|
|
|
ret = read_retpoline_hints(file);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2020-03-10 17:57:41 +00:00
|
|
|
ret = read_instr_hints(file);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_fentry_call(struct instruction *insn)
|
|
|
|
{
|
2020-04-14 10:36:10 +00:00
|
|
|
if (insn->type == INSN_CALL && insn->call_dest &&
|
2017-06-28 15:11:05 +00:00
|
|
|
insn->call_dest->type == STT_NOTYPE &&
|
|
|
|
!strcmp(insn->call_dest->name, "__fentry__"))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-04-01 14:38:19 +00:00
|
|
|
static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
|
2017-06-28 15:11:05 +00:00
|
|
|
{
|
2020-04-01 14:38:19 +00:00
|
|
|
u8 ret_offset = insn->ret_offset;
|
2020-03-25 13:04:45 +00:00
|
|
|
struct cfi_state *cfi = &state->cfi;
|
2017-06-28 15:11:07 +00:00
|
|
|
int i;
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
|
2020-04-01 14:38:19 +00:00
|
|
|
return true;
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
if (cfi->cfa.offset != initial_func_cfi.cfa.offset + ret_offset)
|
2017-06-28 15:11:07 +00:00
|
|
|
return true;
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
if (cfi->stack_size != initial_func_cfi.cfa.offset + ret_offset)
|
2020-04-01 14:38:19 +00:00
|
|
|
return true;
|
|
|
|
|
2020-04-07 07:31:35 +00:00
|
|
|
/*
|
|
|
|
* If there is a ret offset hint then don't check registers
|
|
|
|
* because a callee-saved register might have been pushed on
|
|
|
|
* the stack.
|
|
|
|
*/
|
|
|
|
if (ret_offset)
|
|
|
|
return false;
|
|
|
|
|
2020-04-01 14:38:19 +00:00
|
|
|
for (i = 0; i < CFI_NUM_REGS; i++) {
|
2020-03-25 13:04:45 +00:00
|
|
|
if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
|
|
|
|
cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
|
2017-06-28 15:11:07 +00:00
|
|
|
return true;
|
2020-04-01 14:38:19 +00:00
|
|
|
}
|
2017-06-28 15:11:07 +00:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool has_valid_stack_frame(struct insn_state *state)
|
|
|
|
{
|
2020-03-25 13:04:45 +00:00
|
|
|
struct cfi_state *cfi = &state->cfi;
|
|
|
|
|
|
|
|
if (cfi->cfa.base == CFI_BP && cfi->regs[CFI_BP].base == CFI_CFA &&
|
|
|
|
cfi->regs[CFI_BP].offset == -16)
|
2017-06-28 15:11:07 +00:00
|
|
|
return true;
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
|
2017-06-28 15:11:07 +00:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
static int update_cfi_state_regs(struct instruction *insn,
|
|
|
|
struct cfi_state *cfi,
|
2020-03-27 15:28:47 +00:00
|
|
|
struct stack_op *op)
|
2017-07-11 15:33:42 +00:00
|
|
|
{
|
2020-03-25 13:04:45 +00:00
|
|
|
struct cfi_reg *cfa = &cfi->cfa;
|
2017-07-11 15:33:42 +00:00
|
|
|
|
2020-04-25 10:03:00 +00:00
|
|
|
if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
|
2017-07-11 15:33:42 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* push */
|
2019-02-25 11:50:09 +00:00
|
|
|
if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
|
2017-07-11 15:33:42 +00:00
|
|
|
cfa->offset += 8;
|
|
|
|
|
|
|
|
/* pop */
|
2019-02-25 11:50:09 +00:00
|
|
|
if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
|
2017-07-11 15:33:42 +00:00
|
|
|
cfa->offset -= 8;
|
|
|
|
|
|
|
|
/* add immediate to sp */
|
|
|
|
if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
|
|
|
|
op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
|
|
|
|
cfa->offset -= op->src.offset;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
|
2017-06-28 15:11:05 +00:00
|
|
|
{
|
objtool: Track DRAP separately from callee-saved registers
When GCC realigns a function's stack, it sometimes uses %r13 as the DRAP
register, like:
push %r13
lea 0x10(%rsp), %r13
and $0xfffffffffffffff0, %rsp
pushq -0x8(%r13)
push %rbp
mov %rsp, %rbp
push %r13
...
mov -0x8(%rbp),%r13
leaveq
lea -0x10(%r13), %rsp
pop %r13
retq
Since %r13 was pushed onto the stack twice, its two stack locations need
to be stored separately. The first push of %r13 is its original value,
and the second push of %r13 is the caller's stack frame address.
Since %r13 is a callee-saved register, we need to track the stack
location of its original value separately from the DRAP register.
This fixes the following false positive warning:
lib/ubsan.o: warning: objtool: val_to_string.constprop.7()+0x97: leave instruction with modified stack frame
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: baa41469a7b9 ("objtool: Implement stack validation 2.0")
Link: http://lkml.kernel.org/r/3da23a6d4c5b3c1e21fc2ccc21a73941b97ff20a.1502401017.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-08-10 21:37:26 +00:00
|
|
|
if (arch_callee_saved_reg(reg) &&
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->regs[reg].base == CFI_UNDEFINED) {
|
|
|
|
cfi->regs[reg].base = base;
|
|
|
|
cfi->regs[reg].offset = offset;
|
2017-06-28 15:11:07 +00:00
|
|
|
}
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
static void restore_reg(struct cfi_state *cfi, unsigned char reg)
|
2017-06-28 15:11:05 +00:00
|
|
|
{
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
|
|
|
|
cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
|
2017-06-28 15:11:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A note about DRAP stack alignment:
|
|
|
|
*
|
|
|
|
* GCC has the concept of a DRAP register, which is used to help keep track of
|
|
|
|
* the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
|
|
|
|
* register. The typical DRAP pattern is:
|
|
|
|
*
|
|
|
|
* 4c 8d 54 24 08 lea 0x8(%rsp),%r10
|
|
|
|
* 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
|
|
|
|
* 41 ff 72 f8 pushq -0x8(%r10)
|
|
|
|
* 55 push %rbp
|
|
|
|
* 48 89 e5 mov %rsp,%rbp
|
|
|
|
* (more pushes)
|
|
|
|
* 41 52 push %r10
|
|
|
|
* ...
|
|
|
|
* 41 5a pop %r10
|
|
|
|
* (more pops)
|
|
|
|
* 5d pop %rbp
|
|
|
|
* 49 8d 62 f8 lea -0x8(%r10),%rsp
|
|
|
|
* c3 retq
|
|
|
|
*
|
|
|
|
* There are some variations in the epilogues, like:
|
|
|
|
*
|
|
|
|
* 5b pop %rbx
|
|
|
|
* 41 5a pop %r10
|
|
|
|
* 41 5c pop %r12
|
|
|
|
* 41 5d pop %r13
|
|
|
|
* 41 5e pop %r14
|
|
|
|
* c9 leaveq
|
|
|
|
* 49 8d 62 f8 lea -0x8(%r10),%rsp
|
|
|
|
* c3 retq
|
|
|
|
*
|
|
|
|
* and:
|
|
|
|
*
|
|
|
|
* 4c 8b 55 e8 mov -0x18(%rbp),%r10
|
|
|
|
* 48 8b 5d e0 mov -0x20(%rbp),%rbx
|
|
|
|
* 4c 8b 65 f0 mov -0x10(%rbp),%r12
|
|
|
|
* 4c 8b 6d f8 mov -0x8(%rbp),%r13
|
|
|
|
* c9 leaveq
|
|
|
|
* 49 8d 62 f8 lea -0x8(%r10),%rsp
|
|
|
|
* c3 retq
|
|
|
|
*
|
|
|
|
* Sometimes r13 is used as the DRAP register, in which case it's saved and
|
|
|
|
* restored beforehand:
|
|
|
|
*
|
|
|
|
* 41 55 push %r13
|
|
|
|
* 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
|
|
|
|
* 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
|
|
|
|
* ...
|
|
|
|
* 49 8d 65 f0 lea -0x10(%r13),%rsp
|
|
|
|
* 41 5d pop %r13
|
|
|
|
* c3 retq
|
|
|
|
*/
|
2020-03-25 13:04:45 +00:00
|
|
|
static int update_cfi_state(struct instruction *insn, struct cfi_state *cfi,
|
2020-03-27 15:28:47 +00:00
|
|
|
struct stack_op *op)
|
2017-06-28 15:11:07 +00:00
|
|
|
{
|
2020-03-25 13:04:45 +00:00
|
|
|
struct cfi_reg *cfa = &cfi->cfa;
|
|
|
|
struct cfi_reg *regs = cfi->regs;
|
2017-06-28 15:11:07 +00:00
|
|
|
|
|
|
|
/* stack operations don't make sense with an undefined CFA */
|
|
|
|
if (cfa->base == CFI_UNDEFINED) {
|
|
|
|
if (insn->func) {
|
|
|
|
WARN_FUNC("undefined stack state", insn->sec, insn->offset);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
if (cfi->type == ORC_TYPE_REGS || cfi->type == ORC_TYPE_REGS_IRET)
|
|
|
|
return update_cfi_state_regs(insn, cfi, op);
|
2017-07-11 15:33:42 +00:00
|
|
|
|
2017-06-28 15:11:07 +00:00
|
|
|
switch (op->dest.type) {
|
|
|
|
|
|
|
|
case OP_DEST_REG:
|
|
|
|
switch (op->src.type) {
|
|
|
|
|
|
|
|
case OP_SRC_REG:
|
2017-09-20 21:24:32 +00:00
|
|
|
if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
|
|
|
|
cfa->base == CFI_SP &&
|
|
|
|
regs[CFI_BP].base == CFI_CFA &&
|
|
|
|
regs[CFI_BP].offset == -cfa->offset) {
|
|
|
|
|
|
|
|
/* mov %rsp, %rbp */
|
|
|
|
cfa->base = op->dest.reg;
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->bp_scratch = false;
|
2017-09-20 21:24:32 +00:00
|
|
|
}
|
2017-08-29 17:51:03 +00:00
|
|
|
|
2017-09-20 21:24:32 +00:00
|
|
|
else if (op->src.reg == CFI_SP &&
|
2020-03-25 13:04:45 +00:00
|
|
|
op->dest.reg == CFI_BP && cfi->drap) {
|
2017-08-29 17:51:03 +00:00
|
|
|
|
2017-09-20 21:24:32 +00:00
|
|
|
/* drap: mov %rsp, %rbp */
|
|
|
|
regs[CFI_BP].base = CFI_BP;
|
2020-03-25 13:04:45 +00:00
|
|
|
regs[CFI_BP].offset = -cfi->stack_size;
|
|
|
|
cfi->bp_scratch = false;
|
2017-09-20 21:24:32 +00:00
|
|
|
}
|
2017-08-29 17:51:03 +00:00
|
|
|
|
2017-09-20 21:24:32 +00:00
|
|
|
else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* mov %rsp, %reg
|
|
|
|
*
|
|
|
|
* This is needed for the rare case where GCC
|
|
|
|
* does:
|
|
|
|
*
|
|
|
|
* mov %rsp, %rax
|
|
|
|
* ...
|
|
|
|
* mov %rax, %rsp
|
|
|
|
*/
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->vals[op->dest.reg].base = CFI_CFA;
|
|
|
|
cfi->vals[op->dest.reg].offset = -cfi->stack_size;
|
2017-08-29 17:51:03 +00:00
|
|
|
}
|
|
|
|
|
2018-03-22 18:00:37 +00:00
|
|
|
else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
|
|
|
|
cfa->base == CFI_BP) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* mov %rbp, %rsp
|
|
|
|
*
|
|
|
|
* Restore the original stack pointer (Clang).
|
|
|
|
*/
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->stack_size = -cfi->regs[CFI_BP].offset;
|
2018-03-22 18:00:37 +00:00
|
|
|
}
|
|
|
|
|
2017-08-29 17:51:03 +00:00
|
|
|
else if (op->dest.reg == cfa->base) {
|
|
|
|
|
|
|
|
/* mov %reg, %rsp */
|
|
|
|
if (cfa->base == CFI_SP &&
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->vals[op->src.reg].base == CFI_CFA) {
|
2017-08-29 17:51:03 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is needed for the rare case
|
|
|
|
* where GCC does something dumb like:
|
|
|
|
*
|
|
|
|
* lea 0x8(%rsp), %rcx
|
|
|
|
* ...
|
|
|
|
* mov %rcx, %rsp
|
|
|
|
*/
|
2020-03-25 13:04:45 +00:00
|
|
|
cfa->offset = -cfi->vals[op->src.reg].offset;
|
|
|
|
cfi->stack_size = cfa->offset;
|
2017-08-29 17:51:03 +00:00
|
|
|
|
|
|
|
} else {
|
|
|
|
cfa->base = CFI_UNDEFINED;
|
|
|
|
cfa->offset = 0;
|
|
|
|
}
|
2017-06-28 15:11:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OP_SRC_ADD:
|
|
|
|
if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
|
|
|
|
|
|
|
|
/* add imm, %rsp */
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->stack_size -= op->src.offset;
|
2017-06-28 15:11:07 +00:00
|
|
|
if (cfa->base == CFI_SP)
|
|
|
|
cfa->offset -= op->src.offset;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
|
|
|
|
|
|
|
|
/* lea disp(%rbp), %rsp */
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
|
2017-06-28 15:11:07 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-08-29 17:51:03 +00:00
|
|
|
if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
|
2017-06-28 15:11:07 +00:00
|
|
|
|
|
|
|
/* drap: lea disp(%rsp), %drap */
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->drap_reg = op->dest.reg;
|
2017-08-29 17:51:03 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* lea disp(%rsp), %reg
|
|
|
|
*
|
|
|
|
* This is needed for the rare case where GCC
|
|
|
|
* does something dumb like:
|
|
|
|
*
|
|
|
|
* lea 0x8(%rsp), %rcx
|
|
|
|
* ...
|
|
|
|
* mov %rcx, %rsp
|
|
|
|
*/
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->vals[op->dest.reg].base = CFI_CFA;
|
|
|
|
cfi->vals[op->dest.reg].offset = \
|
|
|
|
-cfi->stack_size + op->src.offset;
|
2017-08-29 17:51:03 +00:00
|
|
|
|
2017-06-28 15:11:07 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
if (cfi->drap && op->dest.reg == CFI_SP &&
|
|
|
|
op->src.reg == cfi->drap_reg) {
|
2017-06-28 15:11:07 +00:00
|
|
|
|
|
|
|
/* drap: lea disp(%drap), %rsp */
|
|
|
|
cfa->base = CFI_SP;
|
2020-03-25 13:04:45 +00:00
|
|
|
cfa->offset = cfi->stack_size = -op->src.offset;
|
|
|
|
cfi->drap_reg = CFI_UNDEFINED;
|
|
|
|
cfi->drap = false;
|
2017-06-28 15:11:07 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
if (op->dest.reg == cfi->cfa.base) {
|
2017-06-28 15:11:07 +00:00
|
|
|
WARN_FUNC("unsupported stack register modification",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OP_SRC_AND:
|
|
|
|
if (op->dest.reg != CFI_SP ||
|
2020-03-25 13:04:45 +00:00
|
|
|
(cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
|
|
|
|
(cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
|
2017-06-28 15:11:07 +00:00
|
|
|
WARN_FUNC("unsupported stack pointer realignment",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
if (cfi->drap_reg != CFI_UNDEFINED) {
|
2017-06-28 15:11:07 +00:00
|
|
|
/* drap: and imm, %rsp */
|
2020-03-25 13:04:45 +00:00
|
|
|
cfa->base = cfi->drap_reg;
|
|
|
|
cfa->offset = cfi->stack_size = 0;
|
|
|
|
cfi->drap = true;
|
2017-06-28 15:11:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Older versions of GCC (4.8ish) realign the stack
|
|
|
|
* without DRAP, with a frame pointer.
|
|
|
|
*/
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OP_SRC_POP:
|
2019-02-25 11:50:09 +00:00
|
|
|
case OP_SRC_POPF:
|
2020-03-25 13:04:45 +00:00
|
|
|
if (!cfi->drap && op->dest.reg == cfa->base) {
|
2017-06-28 15:11:07 +00:00
|
|
|
|
|
|
|
/* pop %rbp */
|
|
|
|
cfa->base = CFI_SP;
|
|
|
|
}
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
|
|
|
|
op->dest.reg == cfi->drap_reg &&
|
|
|
|
cfi->drap_offset == -cfi->stack_size) {
|
2017-06-28 15:11:07 +00:00
|
|
|
|
objtool: Track DRAP separately from callee-saved registers
When GCC realigns a function's stack, it sometimes uses %r13 as the DRAP
register, like:
push %r13
lea 0x10(%rsp), %r13
and $0xfffffffffffffff0, %rsp
pushq -0x8(%r13)
push %rbp
mov %rsp, %rbp
push %r13
...
mov -0x8(%rbp),%r13
leaveq
lea -0x10(%r13), %rsp
pop %r13
retq
Since %r13 was pushed onto the stack twice, its two stack locations need
to be stored separately. The first push of %r13 is its original value,
and the second push of %r13 is the caller's stack frame address.
Since %r13 is a callee-saved register, we need to track the stack
location of its original value separately from the DRAP register.
This fixes the following false positive warning:
lib/ubsan.o: warning: objtool: val_to_string.constprop.7()+0x97: leave instruction with modified stack frame
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: baa41469a7b9 ("objtool: Implement stack validation 2.0")
Link: http://lkml.kernel.org/r/3da23a6d4c5b3c1e21fc2ccc21a73941b97ff20a.1502401017.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-08-10 21:37:26 +00:00
|
|
|
/* drap: pop %drap */
|
2020-03-25 13:04:45 +00:00
|
|
|
cfa->base = cfi->drap_reg;
|
objtool: Track DRAP separately from callee-saved registers
When GCC realigns a function's stack, it sometimes uses %r13 as the DRAP
register, like:
push %r13
lea 0x10(%rsp), %r13
and $0xfffffffffffffff0, %rsp
pushq -0x8(%r13)
push %rbp
mov %rsp, %rbp
push %r13
...
mov -0x8(%rbp),%r13
leaveq
lea -0x10(%r13), %rsp
pop %r13
retq
Since %r13 was pushed onto the stack twice, its two stack locations need
to be stored separately. The first push of %r13 is its original value,
and the second push of %r13 is the caller's stack frame address.
Since %r13 is a callee-saved register, we need to track the stack
location of its original value separately from the DRAP register.
This fixes the following false positive warning:
lib/ubsan.o: warning: objtool: val_to_string.constprop.7()+0x97: leave instruction with modified stack frame
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: baa41469a7b9 ("objtool: Implement stack validation 2.0")
Link: http://lkml.kernel.org/r/3da23a6d4c5b3c1e21fc2ccc21a73941b97ff20a.1502401017.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-08-10 21:37:26 +00:00
|
|
|
cfa->offset = 0;
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->drap_offset = -1;
|
2017-06-28 15:11:07 +00:00
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
} else if (regs[op->dest.reg].offset == -cfi->stack_size) {
|
2017-06-28 15:11:07 +00:00
|
|
|
|
objtool: Track DRAP separately from callee-saved registers
When GCC realigns a function's stack, it sometimes uses %r13 as the DRAP
register, like:
push %r13
lea 0x10(%rsp), %r13
and $0xfffffffffffffff0, %rsp
pushq -0x8(%r13)
push %rbp
mov %rsp, %rbp
push %r13
...
mov -0x8(%rbp),%r13
leaveq
lea -0x10(%r13), %rsp
pop %r13
retq
Since %r13 was pushed onto the stack twice, its two stack locations need
to be stored separately. The first push of %r13 is its original value,
and the second push of %r13 is the caller's stack frame address.
Since %r13 is a callee-saved register, we need to track the stack
location of its original value separately from the DRAP register.
This fixes the following false positive warning:
lib/ubsan.o: warning: objtool: val_to_string.constprop.7()+0x97: leave instruction with modified stack frame
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: baa41469a7b9 ("objtool: Implement stack validation 2.0")
Link: http://lkml.kernel.org/r/3da23a6d4c5b3c1e21fc2ccc21a73941b97ff20a.1502401017.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-08-10 21:37:26 +00:00
|
|
|
/* pop %reg */
|
2020-03-25 13:04:45 +00:00
|
|
|
restore_reg(cfi, op->dest.reg);
|
2017-06-28 15:11:07 +00:00
|
|
|
}
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->stack_size -= 8;
|
2017-06-28 15:11:07 +00:00
|
|
|
if (cfa->base == CFI_SP)
|
|
|
|
cfa->offset -= 8;
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OP_SRC_REG_INDIRECT:
|
2020-03-25 13:04:45 +00:00
|
|
|
if (cfi->drap && op->src.reg == CFI_BP &&
|
|
|
|
op->src.offset == cfi->drap_offset) {
|
objtool: Track DRAP separately from callee-saved registers
When GCC realigns a function's stack, it sometimes uses %r13 as the DRAP
register, like:
push %r13
lea 0x10(%rsp), %r13
and $0xfffffffffffffff0, %rsp
pushq -0x8(%r13)
push %rbp
mov %rsp, %rbp
push %r13
...
mov -0x8(%rbp),%r13
leaveq
lea -0x10(%r13), %rsp
pop %r13
retq
Since %r13 was pushed onto the stack twice, its two stack locations need
to be stored separately. The first push of %r13 is its original value,
and the second push of %r13 is the caller's stack frame address.
Since %r13 is a callee-saved register, we need to track the stack
location of its original value separately from the DRAP register.
This fixes the following false positive warning:
lib/ubsan.o: warning: objtool: val_to_string.constprop.7()+0x97: leave instruction with modified stack frame
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: baa41469a7b9 ("objtool: Implement stack validation 2.0")
Link: http://lkml.kernel.org/r/3da23a6d4c5b3c1e21fc2ccc21a73941b97ff20a.1502401017.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-08-10 21:37:26 +00:00
|
|
|
|
|
|
|
/* drap: mov disp(%rbp), %drap */
|
2020-03-25 13:04:45 +00:00
|
|
|
cfa->base = cfi->drap_reg;
|
objtool: Track DRAP separately from callee-saved registers
When GCC realigns a function's stack, it sometimes uses %r13 as the DRAP
register, like:
push %r13
lea 0x10(%rsp), %r13
and $0xfffffffffffffff0, %rsp
pushq -0x8(%r13)
push %rbp
mov %rsp, %rbp
push %r13
...
mov -0x8(%rbp),%r13
leaveq
lea -0x10(%r13), %rsp
pop %r13
retq
Since %r13 was pushed onto the stack twice, its two stack locations need
to be stored separately. The first push of %r13 is its original value,
and the second push of %r13 is the caller's stack frame address.
Since %r13 is a callee-saved register, we need to track the stack
location of its original value separately from the DRAP register.
This fixes the following false positive warning:
lib/ubsan.o: warning: objtool: val_to_string.constprop.7()+0x97: leave instruction with modified stack frame
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: baa41469a7b9 ("objtool: Implement stack validation 2.0")
Link: http://lkml.kernel.org/r/3da23a6d4c5b3c1e21fc2ccc21a73941b97ff20a.1502401017.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-08-10 21:37:26 +00:00
|
|
|
cfa->offset = 0;
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->drap_offset = -1;
|
objtool: Track DRAP separately from callee-saved registers
When GCC realigns a function's stack, it sometimes uses %r13 as the DRAP
register, like:
push %r13
lea 0x10(%rsp), %r13
and $0xfffffffffffffff0, %rsp
pushq -0x8(%r13)
push %rbp
mov %rsp, %rbp
push %r13
...
mov -0x8(%rbp),%r13
leaveq
lea -0x10(%r13), %rsp
pop %r13
retq
Since %r13 was pushed onto the stack twice, its two stack locations need
to be stored separately. The first push of %r13 is its original value,
and the second push of %r13 is the caller's stack frame address.
Since %r13 is a callee-saved register, we need to track the stack
location of its original value separately from the DRAP register.
This fixes the following false positive warning:
lib/ubsan.o: warning: objtool: val_to_string.constprop.7()+0x97: leave instruction with modified stack frame
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: baa41469a7b9 ("objtool: Implement stack validation 2.0")
Link: http://lkml.kernel.org/r/3da23a6d4c5b3c1e21fc2ccc21a73941b97ff20a.1502401017.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-08-10 21:37:26 +00:00
|
|
|
}
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
if (cfi->drap && op->src.reg == CFI_BP &&
|
2017-06-28 15:11:07 +00:00
|
|
|
op->src.offset == regs[op->dest.reg].offset) {
|
|
|
|
|
|
|
|
/* drap: mov disp(%rbp), %reg */
|
2020-03-25 13:04:45 +00:00
|
|
|
restore_reg(cfi, op->dest.reg);
|
2017-06-28 15:11:07 +00:00
|
|
|
|
|
|
|
} else if (op->src.reg == cfa->base &&
|
|
|
|
op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
|
|
|
|
|
|
|
|
/* mov disp(%rbp), %reg */
|
|
|
|
/* mov disp(%rsp), %reg */
|
2020-03-25 13:04:45 +00:00
|
|
|
restore_reg(cfi, op->dest.reg);
|
2017-06-28 15:11:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
WARN_FUNC("unknown stack-related instruction",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OP_DEST_PUSH:
|
2019-02-25 11:50:09 +00:00
|
|
|
case OP_DEST_PUSHF:
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->stack_size += 8;
|
2017-06-28 15:11:07 +00:00
|
|
|
if (cfa->base == CFI_SP)
|
|
|
|
cfa->offset += 8;
|
|
|
|
|
|
|
|
if (op->src.type != OP_SRC_REG)
|
|
|
|
break;
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
if (cfi->drap) {
|
|
|
|
if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
|
2017-06-28 15:11:07 +00:00
|
|
|
|
|
|
|
/* drap: push %drap */
|
|
|
|
cfa->base = CFI_BP_INDIRECT;
|
2020-03-25 13:04:45 +00:00
|
|
|
cfa->offset = -cfi->stack_size;
|
2017-06-28 15:11:07 +00:00
|
|
|
|
objtool: Track DRAP separately from callee-saved registers
When GCC realigns a function's stack, it sometimes uses %r13 as the DRAP
register, like:
push %r13
lea 0x10(%rsp), %r13
and $0xfffffffffffffff0, %rsp
pushq -0x8(%r13)
push %rbp
mov %rsp, %rbp
push %r13
...
mov -0x8(%rbp),%r13
leaveq
lea -0x10(%r13), %rsp
pop %r13
retq
Since %r13 was pushed onto the stack twice, its two stack locations need
to be stored separately. The first push of %r13 is its original value,
and the second push of %r13 is the caller's stack frame address.
Since %r13 is a callee-saved register, we need to track the stack
location of its original value separately from the DRAP register.
This fixes the following false positive warning:
lib/ubsan.o: warning: objtool: val_to_string.constprop.7()+0x97: leave instruction with modified stack frame
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: baa41469a7b9 ("objtool: Implement stack validation 2.0")
Link: http://lkml.kernel.org/r/3da23a6d4c5b3c1e21fc2ccc21a73941b97ff20a.1502401017.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-08-10 21:37:26 +00:00
|
|
|
/* save drap so we know when to restore it */
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->drap_offset = -cfi->stack_size;
|
2017-06-28 15:11:07 +00:00
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
} else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
|
2017-06-28 15:11:07 +00:00
|
|
|
|
|
|
|
/* drap: push %rbp */
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->stack_size = 0;
|
2017-06-28 15:11:07 +00:00
|
|
|
|
|
|
|
} else if (regs[op->src.reg].base == CFI_UNDEFINED) {
|
|
|
|
|
|
|
|
/* drap: push %reg */
|
2020-03-25 13:04:45 +00:00
|
|
|
save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
|
2017-06-28 15:11:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
/* push %reg */
|
2020-03-25 13:04:45 +00:00
|
|
|
save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
|
2017-06-28 15:11:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* detect when asm code uses rbp as a scratch register */
|
2017-07-24 23:34:14 +00:00
|
|
|
if (!no_fp && insn->func && op->src.reg == CFI_BP &&
|
2017-06-28 15:11:07 +00:00
|
|
|
cfa->base != CFI_BP)
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->bp_scratch = true;
|
2017-06-28 15:11:07 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case OP_DEST_REG_INDIRECT:
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
if (cfi->drap) {
|
|
|
|
if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
|
2017-06-28 15:11:07 +00:00
|
|
|
|
|
|
|
/* drap: mov %drap, disp(%rbp) */
|
|
|
|
cfa->base = CFI_BP_INDIRECT;
|
|
|
|
cfa->offset = op->dest.offset;
|
|
|
|
|
objtool: Track DRAP separately from callee-saved registers
When GCC realigns a function's stack, it sometimes uses %r13 as the DRAP
register, like:
push %r13
lea 0x10(%rsp), %r13
and $0xfffffffffffffff0, %rsp
pushq -0x8(%r13)
push %rbp
mov %rsp, %rbp
push %r13
...
mov -0x8(%rbp),%r13
leaveq
lea -0x10(%r13), %rsp
pop %r13
retq
Since %r13 was pushed onto the stack twice, its two stack locations need
to be stored separately. The first push of %r13 is its original value,
and the second push of %r13 is the caller's stack frame address.
Since %r13 is a callee-saved register, we need to track the stack
location of its original value separately from the DRAP register.
This fixes the following false positive warning:
lib/ubsan.o: warning: objtool: val_to_string.constprop.7()+0x97: leave instruction with modified stack frame
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: baa41469a7b9 ("objtool: Implement stack validation 2.0")
Link: http://lkml.kernel.org/r/3da23a6d4c5b3c1e21fc2ccc21a73941b97ff20a.1502401017.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-08-10 21:37:26 +00:00
|
|
|
/* save drap offset so we know when to restore it */
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->drap_offset = op->dest.offset;
|
2017-06-28 15:11:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
else if (regs[op->src.reg].base == CFI_UNDEFINED) {
|
|
|
|
|
|
|
|
/* drap: mov reg, disp(%rbp) */
|
2020-03-25 13:04:45 +00:00
|
|
|
save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
|
2017-06-28 15:11:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
} else if (op->dest.reg == cfa->base) {
|
|
|
|
|
|
|
|
/* mov reg, disp(%rbp) */
|
|
|
|
/* mov reg, disp(%rsp) */
|
2020-03-25 13:04:45 +00:00
|
|
|
save_reg(cfi, op->src.reg, CFI_CFA,
|
|
|
|
op->dest.offset - cfi->cfa.offset);
|
2017-06-28 15:11:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OP_DEST_LEAVE:
|
2020-03-25 13:04:45 +00:00
|
|
|
if ((!cfi->drap && cfa->base != CFI_BP) ||
|
|
|
|
(cfi->drap && cfa->base != cfi->drap_reg)) {
|
2017-06-28 15:11:07 +00:00
|
|
|
WARN_FUNC("leave instruction with modified stack frame",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* leave (mov %rbp, %rsp; pop %rbp) */
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->stack_size = -cfi->regs[CFI_BP].offset - 8;
|
|
|
|
restore_reg(cfi, CFI_BP);
|
2017-06-28 15:11:07 +00:00
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
if (!cfi->drap) {
|
2017-06-28 15:11:07 +00:00
|
|
|
cfa->base = CFI_SP;
|
|
|
|
cfa->offset -= 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OP_DEST_MEM:
|
2019-02-25 11:50:09 +00:00
|
|
|
if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
|
2017-06-28 15:11:07 +00:00
|
|
|
WARN_FUNC("unknown stack-related memory operation",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* pop mem */
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi->stack_size -= 8;
|
2017-06-28 15:11:07 +00:00
|
|
|
if (cfa->base == CFI_SP)
|
|
|
|
cfa->offset -= 8;
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
WARN_FUNC("unknown stack-related instruction",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-27 15:28:47 +00:00
|
|
|
static int handle_insn_ops(struct instruction *insn, struct insn_state *state)
|
|
|
|
{
|
|
|
|
struct stack_op *op;
|
|
|
|
|
|
|
|
list_for_each_entry(op, &insn->stack_ops, list) {
|
2020-05-08 10:34:33 +00:00
|
|
|
struct cfi_state old_cfi = state->cfi;
|
2020-03-27 15:28:47 +00:00
|
|
|
int res;
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
res = update_cfi_state(insn, &state->cfi, op);
|
2020-03-27 15:28:47 +00:00
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
|
2020-05-08 10:34:33 +00:00
|
|
|
if (insn->alt_group && memcmp(&state->cfi, &old_cfi, sizeof(struct cfi_state))) {
|
|
|
|
WARN_FUNC("alternative modifies stack", insn->sec, insn->offset);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-03-27 15:28:47 +00:00
|
|
|
if (op->dest.type == OP_DEST_PUSHF) {
|
|
|
|
if (!state->uaccess_stack) {
|
|
|
|
state->uaccess_stack = 1;
|
|
|
|
} else if (state->uaccess_stack >> 31) {
|
|
|
|
WARN_FUNC("PUSHF stack exhausted",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
state->uaccess_stack <<= 1;
|
|
|
|
state->uaccess_stack |= state->uaccess;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (op->src.type == OP_SRC_POPF) {
|
|
|
|
if (state->uaccess_stack) {
|
|
|
|
state->uaccess = state->uaccess_stack & 1;
|
|
|
|
state->uaccess_stack >>= 1;
|
|
|
|
if (state->uaccess_stack == 1)
|
|
|
|
state->uaccess_stack = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
|
2017-06-28 15:11:07 +00:00
|
|
|
{
|
2020-03-25 13:04:45 +00:00
|
|
|
struct cfi_state *cfi1 = &insn->cfi;
|
2017-06-28 15:11:07 +00:00
|
|
|
int i;
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
|
|
|
|
|
2017-06-28 15:11:07 +00:00
|
|
|
WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
|
|
|
|
insn->sec, insn->offset,
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi1->cfa.base, cfi1->cfa.offset,
|
|
|
|
cfi2->cfa.base, cfi2->cfa.offset);
|
2017-06-28 15:11:07 +00:00
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
} else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
|
2017-06-28 15:11:07 +00:00
|
|
|
for (i = 0; i < CFI_NUM_REGS; i++) {
|
2020-03-25 13:04:45 +00:00
|
|
|
if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
|
2017-06-28 15:11:07 +00:00
|
|
|
sizeof(struct cfi_reg)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
|
|
|
|
insn->sec, insn->offset,
|
2020-03-25 13:04:45 +00:00
|
|
|
i, cfi1->regs[i].base, cfi1->regs[i].offset,
|
|
|
|
i, cfi2->regs[i].base, cfi2->regs[i].offset);
|
2017-06-28 15:11:07 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
} else if (cfi1->type != cfi2->type) {
|
|
|
|
|
2017-07-11 15:33:42 +00:00
|
|
|
WARN_FUNC("stack state mismatch: type1=%d type2=%d",
|
2020-03-25 13:04:45 +00:00
|
|
|
insn->sec, insn->offset, cfi1->type, cfi2->type);
|
|
|
|
|
|
|
|
} else if (cfi1->drap != cfi2->drap ||
|
|
|
|
(cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
|
|
|
|
(cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
|
2017-07-11 15:33:42 +00:00
|
|
|
|
objtool: Track DRAP separately from callee-saved registers
When GCC realigns a function's stack, it sometimes uses %r13 as the DRAP
register, like:
push %r13
lea 0x10(%rsp), %r13
and $0xfffffffffffffff0, %rsp
pushq -0x8(%r13)
push %rbp
mov %rsp, %rbp
push %r13
...
mov -0x8(%rbp),%r13
leaveq
lea -0x10(%r13), %rsp
pop %r13
retq
Since %r13 was pushed onto the stack twice, its two stack locations need
to be stored separately. The first push of %r13 is its original value,
and the second push of %r13 is the caller's stack frame address.
Since %r13 is a callee-saved register, we need to track the stack
location of its original value separately from the DRAP register.
This fixes the following false positive warning:
lib/ubsan.o: warning: objtool: val_to_string.constprop.7()+0x97: leave instruction with modified stack frame
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: baa41469a7b9 ("objtool: Implement stack validation 2.0")
Link: http://lkml.kernel.org/r/3da23a6d4c5b3c1e21fc2ccc21a73941b97ff20a.1502401017.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-08-10 21:37:26 +00:00
|
|
|
WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
|
2017-06-28 15:11:07 +00:00
|
|
|
insn->sec, insn->offset,
|
2020-03-25 13:04:45 +00:00
|
|
|
cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
|
|
|
|
cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
|
2017-06-28 15:11:07 +00:00
|
|
|
|
|
|
|
} else
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
2019-02-25 11:50:09 +00:00
|
|
|
static inline bool func_uaccess_safe(struct symbol *func)
|
|
|
|
{
|
|
|
|
if (func)
|
2019-07-18 01:36:48 +00:00
|
|
|
return func->uaccess_safe;
|
2019-02-25 11:50:09 +00:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-18 01:36:52 +00:00
|
|
|
static inline const char *call_dest_name(struct instruction *insn)
|
2019-02-25 11:50:09 +00:00
|
|
|
{
|
|
|
|
if (insn->call_dest)
|
|
|
|
return insn->call_dest->name;
|
|
|
|
|
|
|
|
return "{dynamic}";
|
|
|
|
}
|
|
|
|
|
2020-06-03 18:09:06 +00:00
|
|
|
static inline bool noinstr_call_dest(struct symbol *func)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We can't deal with indirect function calls at present;
|
|
|
|
* assume they're instrumented.
|
|
|
|
*/
|
|
|
|
if (!func)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the symbol is from a noinstr section; we good.
|
|
|
|
*/
|
|
|
|
if (func->sec->noinstr)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The __ubsan_handle_*() calls are like WARN(), they only happen when
|
|
|
|
* something 'BAD' happened. At the risk of taking the machine down,
|
|
|
|
* let them proceed to get the message out.
|
|
|
|
*/
|
|
|
|
if (!strncmp(func->name, "__ubsan_handle_", 15))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-02-25 11:50:09 +00:00
|
|
|
static int validate_call(struct instruction *insn, struct insn_state *state)
|
|
|
|
{
|
2020-03-10 17:57:41 +00:00
|
|
|
if (state->noinstr && state->instr <= 0 &&
|
2020-06-03 18:09:06 +00:00
|
|
|
!noinstr_call_dest(insn->call_dest)) {
|
2020-03-10 17:57:41 +00:00
|
|
|
WARN_FUNC("call to %s() leaves .noinstr.text section",
|
|
|
|
insn->sec, insn->offset, call_dest_name(insn));
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2019-02-25 11:50:09 +00:00
|
|
|
if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
|
|
|
|
WARN_FUNC("call to %s() with UACCESS enabled",
|
2019-07-18 01:36:52 +00:00
|
|
|
insn->sec, insn->offset, call_dest_name(insn));
|
2019-02-25 11:50:09 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2019-02-25 10:10:55 +00:00
|
|
|
if (state->df) {
|
|
|
|
WARN_FUNC("call to %s() with DF set",
|
2019-07-18 01:36:52 +00:00
|
|
|
insn->sec, insn->offset, call_dest_name(insn));
|
2019-02-25 10:10:55 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2019-02-25 11:50:09 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-03-06 11:58:15 +00:00
|
|
|
static int validate_sibling_call(struct instruction *insn, struct insn_state *state)
|
|
|
|
{
|
2020-04-01 14:38:19 +00:00
|
|
|
if (has_modified_stack_frame(insn, state)) {
|
2019-03-06 11:58:15 +00:00
|
|
|
WARN_FUNC("sibling call from callable instruction with modified stack frame",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2019-02-25 11:50:09 +00:00
|
|
|
return validate_call(insn, state);
|
2019-03-06 11:58:15 +00:00
|
|
|
}
|
|
|
|
|
2020-03-10 17:07:44 +00:00
|
|
|
static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
|
|
|
|
{
|
2020-03-10 17:57:41 +00:00
|
|
|
if (state->noinstr && state->instr > 0) {
|
|
|
|
WARN_FUNC("return with instrumentation enabled",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-03-10 17:07:44 +00:00
|
|
|
if (state->uaccess && !func_uaccess_safe(func)) {
|
|
|
|
WARN_FUNC("return with UACCESS enabled",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!state->uaccess && func_uaccess_safe(func)) {
|
|
|
|
WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (state->df) {
|
|
|
|
WARN_FUNC("return with DF set",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-04-01 14:38:19 +00:00
|
|
|
if (func && has_modified_stack_frame(insn, state)) {
|
2020-03-10 17:07:44 +00:00
|
|
|
WARN_FUNC("return with modified stack frame",
|
|
|
|
insn->sec, insn->offset);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-03-25 13:04:45 +00:00
|
|
|
if (state->cfi.bp_scratch) {
|
2020-04-01 18:23:29 +00:00
|
|
|
WARN_FUNC("BP used as a scratch register",
|
|
|
|
insn->sec, insn->offset);
|
2020-03-10 17:07:44 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-28 17:37:01 +00:00
|
|
|
/*
|
|
|
|
* Alternatives should not contain any ORC entries, this in turn means they
|
|
|
|
* should not contain any CFI ops, which implies all instructions should have
|
|
|
|
* the same same CFI state.
|
|
|
|
*
|
|
|
|
* It is possible to constuct alternatives that have unreachable holes that go
|
|
|
|
* unreported (because they're NOPs), such holes would result in CFI_UNDEFINED
|
|
|
|
* states which then results in ORC entries, which we just said we didn't want.
|
|
|
|
*
|
|
|
|
* Avoid them by copying the CFI entry of the first instruction into the whole
|
|
|
|
* alternative.
|
|
|
|
*/
|
|
|
|
static void fill_alternative_cfi(struct objtool_file *file, struct instruction *insn)
|
|
|
|
{
|
|
|
|
struct instruction *first_insn = insn;
|
|
|
|
int alt_group = insn->alt_group;
|
|
|
|
|
|
|
|
sec_for_each_insn_continue(file, insn) {
|
|
|
|
if (insn->alt_group != alt_group)
|
|
|
|
break;
|
|
|
|
insn->cfi = first_insn->cfi;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
/*
|
|
|
|
* Follow the branch starting at the given instruction, and recursively follow
|
|
|
|
* any other branches (jumps). Meanwhile, track the frame pointer state at
|
|
|
|
* each instruction and validate all the rules described in
|
|
|
|
* tools/objtool/Documentation/stack-validation.txt.
|
|
|
|
*/
|
2019-07-18 01:36:47 +00:00
|
|
|
static int validate_branch(struct objtool_file *file, struct symbol *func,
|
2020-04-02 08:15:51 +00:00
|
|
|
struct instruction *insn, struct insn_state state)
|
2017-06-28 15:11:05 +00:00
|
|
|
{
|
|
|
|
struct alternative *alt;
|
2020-04-02 08:15:51 +00:00
|
|
|
struct instruction *next_insn;
|
2017-06-28 15:11:05 +00:00
|
|
|
struct section *sec;
|
2019-07-24 22:47:26 +00:00
|
|
|
u8 visited;
|
2017-06-28 15:11:05 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
sec = insn->sec;
|
|
|
|
|
|
|
|
while (1) {
|
2017-07-11 15:33:43 +00:00
|
|
|
next_insn = next_insn_same_sec(file, insn);
|
|
|
|
|
2018-05-10 03:39:15 +00:00
|
|
|
if (file->c_file && func && insn->func && func != insn->func->pfunc) {
|
2017-08-11 17:24:15 +00:00
|
|
|
WARN("%s() falls through to next function %s()",
|
|
|
|
func->name, insn->func->name);
|
|
|
|
return 1;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
2017-07-07 14:19:42 +00:00
|
|
|
if (func && insn->ignore) {
|
|
|
|
WARN_FUNC("BUG: why am I validating an ignored function?",
|
|
|
|
sec, insn->offset);
|
2017-08-10 21:37:25 +00:00
|
|
|
return 1;
|
2017-07-07 14:19:42 +00:00
|
|
|
}
|
|
|
|
|
2019-07-24 22:47:26 +00:00
|
|
|
visited = 1 << state.uaccess;
|
2017-06-28 15:11:05 +00:00
|
|
|
if (insn->visited) {
|
2020-03-25 13:04:45 +00:00
|
|
|
if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
|
2017-06-28 15:11:05 +00:00
|
|
|
return 1;
|
|
|
|
|
2019-07-24 22:47:26 +00:00
|
|
|
if (insn->visited & visited)
|
2019-02-25 11:50:09 +00:00
|
|
|
return 0;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
2020-03-10 17:57:41 +00:00
|
|
|
if (state.noinstr)
|
|
|
|
state.instr += insn->instr;
|
|
|
|
|
2020-04-01 14:54:26 +00:00
|
|
|
if (insn->hint)
|
2020-03-25 13:04:45 +00:00
|
|
|
state.cfi = insn->cfi;
|
2020-04-01 14:54:26 +00:00
|
|
|
else
|
2020-03-25 13:04:45 +00:00
|
|
|
insn->cfi = state.cfi;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2019-07-24 22:47:26 +00:00
|
|
|
insn->visited |= visited;
|
2017-06-28 15:11:07 +00:00
|
|
|
|
2020-04-28 17:37:01 +00:00
|
|
|
if (!insn->ignore_alts && !list_empty(&insn->alts)) {
|
2019-03-01 10:19:03 +00:00
|
|
|
bool skip_orig = false;
|
|
|
|
|
2018-01-30 04:00:39 +00:00
|
|
|
list_for_each_entry(alt, &insn->alts, list) {
|
2019-03-01 10:19:03 +00:00
|
|
|
if (alt->skip_orig)
|
|
|
|
skip_orig = true;
|
|
|
|
|
2019-07-18 01:36:47 +00:00
|
|
|
ret = validate_branch(file, func, alt->insn, state);
|
objtool: Add --backtrace support
For when you want to know the path that reached your fail state:
$ ./objtool check --no-fp --backtrace arch/x86/lib/usercopy_64.o
arch/x86/lib/usercopy_64.o: warning: objtool: .altinstr_replacement+0x3: UACCESS disable without MEMOPs: __clear_user()
arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x3a: (alt)
arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x2e: (branch)
arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x18: (branch)
arch/x86/lib/usercopy_64.o: warning: objtool: .altinstr_replacement+0xffffffffffffffff: (branch)
arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x5: (alt)
arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x0: <=== (func)
0000000000000000 <__clear_user>:
0: e8 00 00 00 00 callq 5 <__clear_user+0x5>
1: R_X86_64_PLT32 __fentry__-0x4
5: 90 nop
6: 90 nop
7: 90 nop
8: 48 89 f0 mov %rsi,%rax
b: 48 c1 ee 03 shr $0x3,%rsi
f: 83 e0 07 and $0x7,%eax
12: 48 89 f1 mov %rsi,%rcx
15: 48 85 c9 test %rcx,%rcx
18: 74 0f je 29 <__clear_user+0x29>
1a: 48 c7 07 00 00 00 00 movq $0x0,(%rdi)
21: 48 83 c7 08 add $0x8,%rdi
25: ff c9 dec %ecx
27: 75 f1 jne 1a <__clear_user+0x1a>
29: 48 89 c1 mov %rax,%rcx
2c: 85 c9 test %ecx,%ecx
2e: 74 0a je 3a <__clear_user+0x3a>
30: c6 07 00 movb $0x0,(%rdi)
33: 48 ff c7 inc %rdi
36: ff c9 dec %ecx
38: 75 f6 jne 30 <__clear_user+0x30>
3a: 90 nop
3b: 90 nop
3c: 90 nop
3d: 48 89 c8 mov %rcx,%rax
40: c3 retq
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-03-01 10:15:49 +00:00
|
|
|
if (ret) {
|
|
|
|
if (backtrace)
|
|
|
|
BT_FUNC("(alt)", insn);
|
|
|
|
return ret;
|
|
|
|
}
|
2018-01-30 04:00:39 +00:00
|
|
|
}
|
2019-03-01 10:19:03 +00:00
|
|
|
|
2020-04-28 17:37:01 +00:00
|
|
|
if (insn->alt_group)
|
|
|
|
fill_alternative_cfi(file, insn);
|
|
|
|
|
2019-03-01 10:19:03 +00:00
|
|
|
if (skip_orig)
|
|
|
|
return 0;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
2020-04-24 14:16:41 +00:00
|
|
|
if (handle_insn_ops(insn, &state))
|
|
|
|
return 1;
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
switch (insn->type) {
|
|
|
|
|
|
|
|
case INSN_RETURN:
|
2020-03-10 17:07:44 +00:00
|
|
|
return validate_return(func, insn, &state);
|
2017-06-28 15:11:05 +00:00
|
|
|
|
|
|
|
case INSN_CALL:
|
2019-02-25 11:50:09 +00:00
|
|
|
case INSN_CALL_DYNAMIC:
|
|
|
|
ret = validate_call(insn, &state);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2019-07-18 01:36:51 +00:00
|
|
|
if (!no_fp && func && !is_fentry_call(insn) &&
|
|
|
|
!has_valid_stack_frame(&state)) {
|
2017-06-28 15:11:05 +00:00
|
|
|
WARN_FUNC("call without frame pointer save/setup",
|
|
|
|
sec, insn->offset);
|
|
|
|
return 1;
|
|
|
|
}
|
2019-07-18 01:36:51 +00:00
|
|
|
|
|
|
|
if (dead_end_function(file, insn->call_dest))
|
|
|
|
return 0;
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case INSN_JUMP_CONDITIONAL:
|
|
|
|
case INSN_JUMP_UNCONDITIONAL:
|
2019-07-18 01:36:52 +00:00
|
|
|
if (func && is_sibling_call(insn)) {
|
2019-03-06 11:58:15 +00:00
|
|
|
ret = validate_sibling_call(insn, &state);
|
2017-06-28 15:11:05 +00:00
|
|
|
if (ret)
|
2019-03-06 11:58:15 +00:00
|
|
|
return ret;
|
2017-07-07 14:19:42 +00:00
|
|
|
|
2019-07-18 01:36:52 +00:00
|
|
|
} else if (insn->jump_dest) {
|
2019-07-18 01:36:47 +00:00
|
|
|
ret = validate_branch(file, func,
|
|
|
|
insn->jump_dest, state);
|
objtool: Add --backtrace support
For when you want to know the path that reached your fail state:
$ ./objtool check --no-fp --backtrace arch/x86/lib/usercopy_64.o
arch/x86/lib/usercopy_64.o: warning: objtool: .altinstr_replacement+0x3: UACCESS disable without MEMOPs: __clear_user()
arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x3a: (alt)
arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x2e: (branch)
arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x18: (branch)
arch/x86/lib/usercopy_64.o: warning: objtool: .altinstr_replacement+0xffffffffffffffff: (branch)
arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x5: (alt)
arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x0: <=== (func)
0000000000000000 <__clear_user>:
0: e8 00 00 00 00 callq 5 <__clear_user+0x5>
1: R_X86_64_PLT32 __fentry__-0x4
5: 90 nop
6: 90 nop
7: 90 nop
8: 48 89 f0 mov %rsi,%rax
b: 48 c1 ee 03 shr $0x3,%rsi
f: 83 e0 07 and $0x7,%eax
12: 48 89 f1 mov %rsi,%rcx
15: 48 85 c9 test %rcx,%rcx
18: 74 0f je 29 <__clear_user+0x29>
1a: 48 c7 07 00 00 00 00 movq $0x0,(%rdi)
21: 48 83 c7 08 add $0x8,%rdi
25: ff c9 dec %ecx
27: 75 f1 jne 1a <__clear_user+0x1a>
29: 48 89 c1 mov %rax,%rcx
2c: 85 c9 test %ecx,%ecx
2e: 74 0a je 3a <__clear_user+0x3a>
30: c6 07 00 movb $0x0,(%rdi)
33: 48 ff c7 inc %rdi
36: ff c9 dec %ecx
38: 75 f6 jne 30 <__clear_user+0x30>
3a: 90 nop
3b: 90 nop
3c: 90 nop
3d: 48 89 c8 mov %rcx,%rax
40: c3 retq
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-03-01 10:15:49 +00:00
|
|
|
if (ret) {
|
|
|
|
if (backtrace)
|
|
|
|
BT_FUNC("(branch)", insn);
|
|
|
|
return ret;
|
|
|
|
}
|
2017-07-07 14:19:42 +00:00
|
|
|
}
|
2017-06-28 15:11:05 +00:00
|
|
|
|
|
|
|
if (insn->type == INSN_JUMP_UNCONDITIONAL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INSN_JUMP_DYNAMIC:
|
2019-07-18 01:36:57 +00:00
|
|
|
case INSN_JUMP_DYNAMIC_CONDITIONAL:
|
2019-07-18 01:36:52 +00:00
|
|
|
if (func && is_sibling_call(insn)) {
|
2019-03-06 11:58:15 +00:00
|
|
|
ret = validate_sibling_call(insn, &state);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
2019-07-18 01:36:57 +00:00
|
|
|
if (insn->type == INSN_JUMP_DYNAMIC)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
break;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2017-07-11 15:33:43 +00:00
|
|
|
case INSN_CONTEXT_SWITCH:
|
|
|
|
if (func && (!next_insn || !next_insn->hint)) {
|
|
|
|
WARN_FUNC("unsupported instruction in callable function",
|
|
|
|
sec, insn->offset);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
2019-02-25 11:50:09 +00:00
|
|
|
case INSN_STAC:
|
|
|
|
if (state.uaccess) {
|
|
|
|
WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
state.uaccess = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INSN_CLAC:
|
2019-07-18 01:36:47 +00:00
|
|
|
if (!state.uaccess && func) {
|
2019-02-25 11:50:09 +00:00
|
|
|
WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (func_uaccess_safe(func) && !state.uaccess_stack) {
|
|
|
|
WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
state.uaccess = false;
|
2017-06-28 15:11:07 +00:00
|
|
|
break;
|
|
|
|
|
2019-02-25 10:10:55 +00:00
|
|
|
case INSN_STD:
|
|
|
|
if (state.df)
|
|
|
|
WARN_FUNC("recursive STD", sec, insn->offset);
|
|
|
|
|
|
|
|
state.df = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INSN_CLD:
|
2019-07-18 01:36:47 +00:00
|
|
|
if (!state.df && func)
|
2019-02-25 10:10:55 +00:00
|
|
|
WARN_FUNC("redundant CLD", sec, insn->offset);
|
|
|
|
|
|
|
|
state.df = false;
|
2017-06-28 15:11:07 +00:00
|
|
|
break;
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (insn->dead_end)
|
|
|
|
return 0;
|
|
|
|
|
2017-09-19 02:43:30 +00:00
|
|
|
if (!next_insn) {
|
2020-03-25 13:04:45 +00:00
|
|
|
if (state.cfi.cfa.base == CFI_UNDEFINED)
|
2017-09-19 02:43:30 +00:00
|
|
|
return 0;
|
2017-06-28 15:11:05 +00:00
|
|
|
WARN("%s: unexpected end of section", sec->name);
|
|
|
|
return 1;
|
|
|
|
}
|
2017-09-19 02:43:30 +00:00
|
|
|
|
|
|
|
insn = next_insn;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-23 17:26:03 +00:00
|
|
|
static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
|
2017-07-11 15:33:43 +00:00
|
|
|
{
|
|
|
|
struct instruction *insn;
|
|
|
|
struct insn_state state;
|
2020-03-23 17:26:03 +00:00
|
|
|
int ret, warnings = 0;
|
2017-07-11 15:33:43 +00:00
|
|
|
|
|
|
|
if (!file->hints)
|
|
|
|
return 0;
|
|
|
|
|
2020-03-23 17:26:03 +00:00
|
|
|
init_insn_state(&state, sec);
|
2017-07-11 15:33:43 +00:00
|
|
|
|
2020-03-23 17:26:03 +00:00
|
|
|
if (sec) {
|
|
|
|
insn = find_insn(file, sec, 0);
|
|
|
|
if (!insn)
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
insn = list_first_entry(&file->insn_list, typeof(*insn), list);
|
|
|
|
}
|
|
|
|
|
|
|
|
while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) {
|
2017-07-11 15:33:43 +00:00
|
|
|
if (insn->hint && !insn->visited) {
|
2019-07-18 01:36:47 +00:00
|
|
|
ret = validate_branch(file, insn->func, insn, state);
|
objtool: Add --backtrace support
For when you want to know the path that reached your fail state:
$ ./objtool check --no-fp --backtrace arch/x86/lib/usercopy_64.o
arch/x86/lib/usercopy_64.o: warning: objtool: .altinstr_replacement+0x3: UACCESS disable without MEMOPs: __clear_user()
arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x3a: (alt)
arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x2e: (branch)
arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x18: (branch)
arch/x86/lib/usercopy_64.o: warning: objtool: .altinstr_replacement+0xffffffffffffffff: (branch)
arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x5: (alt)
arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x0: <=== (func)
0000000000000000 <__clear_user>:
0: e8 00 00 00 00 callq 5 <__clear_user+0x5>
1: R_X86_64_PLT32 __fentry__-0x4
5: 90 nop
6: 90 nop
7: 90 nop
8: 48 89 f0 mov %rsi,%rax
b: 48 c1 ee 03 shr $0x3,%rsi
f: 83 e0 07 and $0x7,%eax
12: 48 89 f1 mov %rsi,%rcx
15: 48 85 c9 test %rcx,%rcx
18: 74 0f je 29 <__clear_user+0x29>
1a: 48 c7 07 00 00 00 00 movq $0x0,(%rdi)
21: 48 83 c7 08 add $0x8,%rdi
25: ff c9 dec %ecx
27: 75 f1 jne 1a <__clear_user+0x1a>
29: 48 89 c1 mov %rax,%rcx
2c: 85 c9 test %ecx,%ecx
2e: 74 0a je 3a <__clear_user+0x3a>
30: c6 07 00 movb $0x0,(%rdi)
33: 48 ff c7 inc %rdi
36: ff c9 dec %ecx
38: 75 f6 jne 30 <__clear_user+0x30>
3a: 90 nop
3b: 90 nop
3c: 90 nop
3d: 48 89 c8 mov %rcx,%rax
40: c3 retq
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-03-01 10:15:49 +00:00
|
|
|
if (ret && backtrace)
|
|
|
|
BT_FUNC("<=== (hint)", insn);
|
2017-07-11 15:33:43 +00:00
|
|
|
warnings += ret;
|
|
|
|
}
|
2020-03-23 17:26:03 +00:00
|
|
|
|
|
|
|
insn = list_next_entry(insn, list);
|
2017-07-11 15:33:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return warnings;
|
|
|
|
}
|
|
|
|
|
2018-01-16 09:24:06 +00:00
|
|
|
static int validate_retpoline(struct objtool_file *file)
|
|
|
|
{
|
|
|
|
struct instruction *insn;
|
|
|
|
int warnings = 0;
|
|
|
|
|
|
|
|
for_each_insn(file, insn) {
|
|
|
|
if (insn->type != INSN_JUMP_DYNAMIC &&
|
|
|
|
insn->type != INSN_CALL_DYNAMIC)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (insn->retpoline_safe)
|
|
|
|
continue;
|
|
|
|
|
2018-01-31 09:18:28 +00:00
|
|
|
/*
|
|
|
|
* .init.text code is ran before userspace and thus doesn't
|
|
|
|
* strictly need retpolines, except for modules which are
|
|
|
|
* loaded late, they very much do need retpoline in their
|
|
|
|
* .init.text
|
|
|
|
*/
|
|
|
|
if (!strcmp(insn->sec->name, ".init.text") && !module)
|
|
|
|
continue;
|
|
|
|
|
2018-01-16 09:24:06 +00:00
|
|
|
WARN_FUNC("indirect %s found in RETPOLINE build",
|
|
|
|
insn->sec, insn->offset,
|
|
|
|
insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
|
|
|
|
|
|
|
|
warnings++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return warnings;
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
static bool is_kasan_insn(struct instruction *insn)
|
|
|
|
{
|
|
|
|
return (insn->type == INSN_CALL &&
|
|
|
|
!strcmp(insn->call_dest->name, "__asan_handle_no_return"));
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_ubsan_insn(struct instruction *insn)
|
|
|
|
{
|
|
|
|
return (insn->type == INSN_CALL &&
|
|
|
|
!strcmp(insn->call_dest->name,
|
|
|
|
"__ubsan_handle_builtin_unreachable"));
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:07 +00:00
|
|
|
static bool ignore_unreachable_insn(struct instruction *insn)
|
2017-06-28 15:11:05 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2017-06-28 15:11:07 +00:00
|
|
|
if (insn->ignore || insn->type == INSN_NOP)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ignore any unused exceptions. This can happen when a whitelisted
|
|
|
|
* function has an exception table entry.
|
2017-07-27 20:56:54 +00:00
|
|
|
*
|
|
|
|
* Also ignore alternative replacement instructions. This can happen
|
|
|
|
* when a whitelisted function uses one of the ALTERNATIVE macros.
|
2017-06-28 15:11:07 +00:00
|
|
|
*/
|
2017-07-27 20:56:54 +00:00
|
|
|
if (!strcmp(insn->sec->name, ".fixup") ||
|
|
|
|
!strcmp(insn->sec->name, ".altinstr_replacement") ||
|
|
|
|
!strcmp(insn->sec->name, ".altinstr_aux"))
|
2017-06-28 15:11:05 +00:00
|
|
|
return true;
|
|
|
|
|
2020-04-01 18:23:25 +00:00
|
|
|
if (!insn->func)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* CONFIG_UBSAN_TRAP inserts a UD2 when it sees
|
|
|
|
* __builtin_unreachable(). The BUG() macro has an unreachable() after
|
|
|
|
* the UD2, which causes GCC's undefined trap logic to emit another UD2
|
|
|
|
* (or occasionally a JMP to UD2).
|
|
|
|
*/
|
|
|
|
if (list_prev_entry(insn, list)->dead_end &&
|
|
|
|
(insn->type == INSN_BUG ||
|
|
|
|
(insn->type == INSN_JUMP_UNCONDITIONAL &&
|
|
|
|
insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
|
|
|
|
return true;
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
/*
|
|
|
|
* Check if this (or a subsequent) instruction is related to
|
|
|
|
* CONFIG_UBSAN or CONFIG_KASAN.
|
|
|
|
*
|
|
|
|
* End the search at 5 instructions to avoid going into the weeds.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < 5; i++) {
|
|
|
|
|
|
|
|
if (is_kasan_insn(insn) || is_ubsan_insn(insn))
|
|
|
|
return true;
|
|
|
|
|
2018-02-08 23:09:25 +00:00
|
|
|
if (insn->type == INSN_JUMP_UNCONDITIONAL) {
|
|
|
|
if (insn->jump_dest &&
|
|
|
|
insn->jump_dest->func == insn->func) {
|
|
|
|
insn = insn->jump_dest;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:07 +00:00
|
|
|
if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
|
2017-06-28 15:11:05 +00:00
|
|
|
break;
|
2018-02-08 23:09:25 +00:00
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
insn = list_next_entry(insn, list);
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-03-23 20:17:50 +00:00
|
|
|
static int validate_symbol(struct objtool_file *file, struct section *sec,
|
|
|
|
struct symbol *sym, struct insn_state *state)
|
2017-06-28 15:11:05 +00:00
|
|
|
{
|
|
|
|
struct instruction *insn;
|
2020-03-23 20:17:50 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!sym->len) {
|
|
|
|
WARN("%s() is missing an ELF size annotation", sym->name);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sym->pfunc != sym || sym->alias != sym)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
insn = find_insn(file, sec, sym->offset);
|
|
|
|
if (!insn || insn->ignore || insn->visited)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
state->uaccess = sym->uaccess_safe;
|
|
|
|
|
|
|
|
ret = validate_branch(file, insn->func, insn, *state);
|
|
|
|
if (ret && backtrace)
|
|
|
|
BT_FUNC("<=== (sym)", insn);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int validate_section(struct objtool_file *file, struct section *sec)
|
|
|
|
{
|
2017-06-28 15:11:07 +00:00
|
|
|
struct insn_state state;
|
2020-03-23 20:17:50 +00:00
|
|
|
struct symbol *func;
|
|
|
|
int warnings = 0;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2020-03-23 19:57:13 +00:00
|
|
|
list_for_each_entry(func, &sec->symbol_list, list) {
|
|
|
|
if (func->type != STT_FUNC)
|
|
|
|
continue;
|
2019-07-18 01:36:48 +00:00
|
|
|
|
2020-03-23 17:26:03 +00:00
|
|
|
init_insn_state(&state, sec);
|
2020-03-25 13:04:45 +00:00
|
|
|
state.cfi.cfa = initial_func_cfi.cfa;
|
|
|
|
memcpy(&state.cfi.regs, &initial_func_cfi.regs,
|
2020-03-27 15:28:40 +00:00
|
|
|
CFI_NUM_REGS * sizeof(struct cfi_reg));
|
2020-03-25 13:04:45 +00:00
|
|
|
state.cfi.stack_size = initial_func_cfi.cfa.offset;
|
2020-03-27 15:28:40 +00:00
|
|
|
|
2020-03-23 20:17:50 +00:00
|
|
|
warnings += validate_symbol(file, sec, func, &state);
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return warnings;
|
|
|
|
}
|
|
|
|
|
2020-03-10 17:57:41 +00:00
|
|
|
static int validate_vmlinux_functions(struct objtool_file *file)
|
|
|
|
{
|
|
|
|
struct section *sec;
|
2020-03-23 17:26:03 +00:00
|
|
|
int warnings = 0;
|
2020-03-10 17:57:41 +00:00
|
|
|
|
|
|
|
sec = find_section_by_name(file->elf, ".noinstr.text");
|
2020-03-25 16:18:17 +00:00
|
|
|
if (sec) {
|
|
|
|
warnings += validate_section(file, sec);
|
|
|
|
warnings += validate_unwind_hints(file, sec);
|
|
|
|
}
|
2020-03-10 17:57:41 +00:00
|
|
|
|
2020-03-25 16:18:17 +00:00
|
|
|
sec = find_section_by_name(file->elf, ".entry.text");
|
|
|
|
if (sec) {
|
|
|
|
warnings += validate_section(file, sec);
|
|
|
|
warnings += validate_unwind_hints(file, sec);
|
|
|
|
}
|
2020-03-23 17:26:03 +00:00
|
|
|
|
|
|
|
return warnings;
|
2020-03-10 17:57:41 +00:00
|
|
|
}
|
|
|
|
|
2020-03-23 19:57:13 +00:00
|
|
|
static int validate_functions(struct objtool_file *file)
|
|
|
|
{
|
|
|
|
struct section *sec;
|
|
|
|
int warnings = 0;
|
|
|
|
|
2020-03-23 20:11:14 +00:00
|
|
|
for_each_sec(file, sec) {
|
|
|
|
if (!(sec->sh.sh_flags & SHF_EXECINSTR))
|
|
|
|
continue;
|
|
|
|
|
2020-03-23 19:57:13 +00:00
|
|
|
warnings += validate_section(file, sec);
|
2020-03-23 20:11:14 +00:00
|
|
|
}
|
2020-03-23 19:57:13 +00:00
|
|
|
|
|
|
|
return warnings;
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:07 +00:00
|
|
|
static int validate_reachable_instructions(struct objtool_file *file)
|
2017-06-28 15:11:05 +00:00
|
|
|
{
|
|
|
|
struct instruction *insn;
|
2017-06-28 15:11:07 +00:00
|
|
|
|
|
|
|
if (file->ignore_unreachables)
|
|
|
|
return 0;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
|
|
|
for_each_insn(file, insn) {
|
2017-06-28 15:11:07 +00:00
|
|
|
if (insn->visited || ignore_unreachable_insn(insn))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
|
|
|
|
return 1;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:07 +00:00
|
|
|
return 0;
|
2017-06-28 15:11:05 +00:00
|
|
|
}
|
|
|
|
|
2019-03-19 00:09:38 +00:00
|
|
|
static struct objtool_file file;
|
|
|
|
|
2018-01-16 16:16:32 +00:00
|
|
|
int check(const char *_objname, bool orc)
|
2017-06-28 15:11:05 +00:00
|
|
|
{
|
|
|
|
int ret, warnings = 0;
|
|
|
|
|
|
|
|
objname = _objname;
|
|
|
|
|
2020-04-17 21:15:00 +00:00
|
|
|
file.elf = elf_open_read(objname, O_RDWR);
|
2017-06-28 15:11:07 +00:00
|
|
|
if (!file.elf)
|
2017-06-28 15:11:05 +00:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&file.insn_list);
|
|
|
|
hash_init(file.insn_hash);
|
2020-06-17 16:22:31 +00:00
|
|
|
file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment");
|
2017-07-24 23:34:14 +00:00
|
|
|
file.ignore_unreachables = no_unreachable;
|
2017-07-11 15:33:43 +00:00
|
|
|
file.hints = false;
|
2017-06-28 15:11:05 +00:00
|
|
|
|
2017-06-28 15:11:07 +00:00
|
|
|
arch_initial_func_cfi_state(&initial_func_cfi);
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
ret = decode_sections(&file);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
warnings += ret;
|
|
|
|
|
2017-06-28 15:11:07 +00:00
|
|
|
if (list_empty(&file.insn_list))
|
2017-06-28 15:11:05 +00:00
|
|
|
goto out;
|
|
|
|
|
2020-03-10 17:57:41 +00:00
|
|
|
if (vmlinux && !validate_dup) {
|
|
|
|
ret = validate_vmlinux_functions(&file);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
warnings += ret;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-01-16 09:24:06 +00:00
|
|
|
if (retpoline) {
|
|
|
|
ret = validate_retpoline(&file);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
warnings += ret;
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:07 +00:00
|
|
|
ret = validate_functions(&file);
|
2017-06-28 15:11:05 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
warnings += ret;
|
|
|
|
|
2020-03-23 17:26:03 +00:00
|
|
|
ret = validate_unwind_hints(&file, NULL);
|
2017-07-11 15:33:43 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
warnings += ret;
|
|
|
|
|
2017-06-28 15:11:07 +00:00
|
|
|
if (!warnings) {
|
|
|
|
ret = validate_reachable_instructions(&file);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
warnings += ret;
|
|
|
|
}
|
|
|
|
|
2017-07-11 15:33:42 +00:00
|
|
|
if (orc) {
|
|
|
|
ret = create_orc(&file);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = create_orc_sections(&file);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2020-04-17 21:15:00 +00:00
|
|
|
}
|
2017-07-11 15:33:42 +00:00
|
|
|
|
2020-04-17 21:15:00 +00:00
|
|
|
if (file.elf->changed) {
|
2017-07-11 15:33:42 +00:00
|
|
|
ret = elf_write(file.elf);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
out:
|
2020-02-10 18:32:38 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
/*
|
|
|
|
* Fatal error. The binary is corrupt or otherwise broken in
|
|
|
|
* some way, or objtool itself is broken. Fail the kernel
|
|
|
|
* build.
|
|
|
|
*/
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:11:05 +00:00
|
|
|
return 0;
|
|
|
|
}
|