2019-05-19 12:08:55 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-16 22:20:36 +00:00
|
|
|
/* By Ross Biro 1/23/92 */
|
|
|
|
/*
|
|
|
|
* Pentium III FXSR, SSE support
|
|
|
|
* Gareth Hughes <gareth@valinux.com>, May 2000
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/sched.h>
|
2017-02-08 17:51:37 +00:00
|
|
|
#include <linux/sched/task_stack.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/errno.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/user.h>
|
2008-01-30 12:31:53 +00:00
|
|
|
#include <linux/elf.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/security.h>
|
|
|
|
#include <linux/audit.h>
|
|
|
|
#include <linux/seccomp.h>
|
2005-05-01 15:59:14 +00:00
|
|
|
#include <linux/signal.h>
|
2009-09-09 17:22:48 +00:00
|
|
|
#include <linux/perf_event.h>
|
|
|
|
#include <linux/hw_breakpoint.h>
|
2012-07-11 18:26:34 +00:00
|
|
|
#include <linux/rcupdate.h>
|
2013-02-14 20:14:02 +00:00
|
|
|
#include <linux/export.h>
|
2012-11-27 18:33:25 +00:00
|
|
|
#include <linux/context_tracking.h>
|
2019-06-25 15:30:17 +00:00
|
|
|
#include <linux/nospec.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-12-24 19:46:01 +00:00
|
|
|
#include <linux/uaccess.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/processor.h>
|
2015-04-30 06:45:02 +00:00
|
|
|
#include <asm/fpu/signal.h>
|
2015-04-30 06:53:18 +00:00
|
|
|
#include <asm/fpu/regset.h>
|
2021-10-15 01:16:35 +00:00
|
|
|
#include <asm/fpu/xstate.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/debugreg.h>
|
|
|
|
#include <asm/ldt.h>
|
|
|
|
#include <asm/desc.h>
|
2008-01-30 12:31:01 +00:00
|
|
|
#include <asm/prctl.h>
|
|
|
|
#include <asm/proto.h>
|
2009-06-01 18:15:48 +00:00
|
|
|
#include <asm/hw_breakpoint.h>
|
2012-03-12 09:25:55 +00:00
|
|
|
#include <asm/traps.h>
|
2015-07-03 19:44:23 +00:00
|
|
|
#include <asm/syscall.h>
|
2018-09-18 23:08:53 +00:00
|
|
|
#include <asm/fsgsbase.h>
|
2019-11-11 22:03:21 +00:00
|
|
|
#include <asm/io_bitmap.h>
|
2008-01-30 12:31:09 +00:00
|
|
|
|
2008-01-30 12:31:53 +00:00
|
|
|
#include "tls.h"
|
|
|
|
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
enum x86_regset_32 {
|
|
|
|
REGSET32_GENERAL,
|
|
|
|
REGSET32_FP,
|
|
|
|
REGSET32_XFP,
|
|
|
|
REGSET32_XSTATE,
|
|
|
|
REGSET32_TLS,
|
|
|
|
REGSET32_IOPERM,
|
2008-01-30 12:31:53 +00:00
|
|
|
};
|
2008-01-30 12:31:09 +00:00
|
|
|
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
enum x86_regset_64 {
|
|
|
|
REGSET64_GENERAL,
|
|
|
|
REGSET64_FP,
|
|
|
|
REGSET64_IOPERM,
|
|
|
|
REGSET64_XSTATE,
|
x86: Add PTRACE interface for shadow stack
Some applications (like GDB) would like to tweak shadow stack state via
ptrace. This allows for existing functionality to continue to work for
seized shadow stack applications. Provide a regset interface for
manipulating the shadow stack pointer (SSP).
There is already ptrace functionality for accessing xstate, but this
does not include supervisor xfeatures. So there is not a completely
clear place for where to put the shadow stack state. Adding it to the
user xfeatures regset would complicate that code, as it currently shares
logic with signals which should not have supervisor features.
Don't add a general supervisor xfeature regset like the user one,
because it is better to maintain flexibility for other supervisor
xfeatures to define their own interface. For example, an xfeature may
decide not to expose all of it's state to userspace, as is actually the
case for shadow stack ptrace functionality. A lot of enum values remain
to be used, so just put it in dedicated shadow stack regset.
The only downside to not having a generic supervisor xfeature regset,
is that apps need to be enlightened of any new supervisor xfeature
exposed this way (i.e. they can't try to have generic save/restore
logic). But maybe that is a good thing, because they have to think
through each new xfeature instead of encountering issues when a new
supervisor xfeature was added.
By adding a shadow stack regset, it also has the effect of including the
shadow stack state in a core dump, which could be useful for debugging.
The shadow stack specific xstate includes the SSP, and the shadow stack
and WRSS enablement status. Enabling shadow stack or WRSS in the kernel
involves more than just flipping the bit. The kernel is made aware that
it has to do extra things when cloning or handling signals. That logic
is triggered off of separate feature enablement state kept in the task
struct. So the flipping on HW shadow stack enforcement without notifying
the kernel to change its behavior would severely limit what an application
could do without crashing, and the results would depend on kernel
internal implementation details. There is also no known use for controlling
this state via ptrace today. So only expose the SSP, which is something
that userspace already has indirect control over.
Co-developed-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Kees Cook <keescook@chromium.org>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
Tested-by: Pengfei Xu <pengfei.xu@intel.com>
Tested-by: John Allen <john.allen@amd.com>
Tested-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/all/20230613001108.3040476-41-rick.p.edgecombe%40intel.com
2023-06-13 00:11:06 +00:00
|
|
|
REGSET64_SSP,
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define REGSET_GENERAL \
|
|
|
|
({ \
|
|
|
|
BUILD_BUG_ON((int)REGSET32_GENERAL != (int)REGSET64_GENERAL); \
|
|
|
|
REGSET32_GENERAL; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define REGSET_FP \
|
|
|
|
({ \
|
|
|
|
BUILD_BUG_ON((int)REGSET32_FP != (int)REGSET64_FP); \
|
|
|
|
REGSET32_FP; \
|
|
|
|
})
|
|
|
|
|
|
|
|
|
2009-08-13 20:34:44 +00:00
|
|
|
struct pt_regs_offset {
|
|
|
|
const char *name;
|
|
|
|
int offset;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
|
|
|
|
#define REG_OFFSET_END {.name = NULL, .offset = 0}
|
|
|
|
|
|
|
|
static const struct pt_regs_offset regoffset_table[] = {
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
REG_OFFSET_NAME(r15),
|
|
|
|
REG_OFFSET_NAME(r14),
|
|
|
|
REG_OFFSET_NAME(r13),
|
|
|
|
REG_OFFSET_NAME(r12),
|
|
|
|
REG_OFFSET_NAME(r11),
|
|
|
|
REG_OFFSET_NAME(r10),
|
|
|
|
REG_OFFSET_NAME(r9),
|
|
|
|
REG_OFFSET_NAME(r8),
|
|
|
|
#endif
|
|
|
|
REG_OFFSET_NAME(bx),
|
|
|
|
REG_OFFSET_NAME(cx),
|
|
|
|
REG_OFFSET_NAME(dx),
|
|
|
|
REG_OFFSET_NAME(si),
|
|
|
|
REG_OFFSET_NAME(di),
|
|
|
|
REG_OFFSET_NAME(bp),
|
|
|
|
REG_OFFSET_NAME(ax),
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
REG_OFFSET_NAME(ds),
|
|
|
|
REG_OFFSET_NAME(es),
|
|
|
|
REG_OFFSET_NAME(fs),
|
|
|
|
REG_OFFSET_NAME(gs),
|
|
|
|
#endif
|
|
|
|
REG_OFFSET_NAME(orig_ax),
|
|
|
|
REG_OFFSET_NAME(ip),
|
|
|
|
REG_OFFSET_NAME(cs),
|
|
|
|
REG_OFFSET_NAME(flags),
|
|
|
|
REG_OFFSET_NAME(sp),
|
|
|
|
REG_OFFSET_NAME(ss),
|
|
|
|
REG_OFFSET_END,
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* regs_query_register_offset() - query register offset from its name
|
|
|
|
* @name: the name of a register
|
|
|
|
*
|
|
|
|
* regs_query_register_offset() returns the offset of a register in struct
|
|
|
|
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
|
|
|
|
*/
|
|
|
|
int regs_query_register_offset(const char *name)
|
|
|
|
{
|
|
|
|
const struct pt_regs_offset *roff;
|
|
|
|
for (roff = regoffset_table; roff->name != NULL; roff++)
|
|
|
|
if (!strcmp(roff->name, name))
|
|
|
|
return roff->offset;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* regs_query_register_name() - query register name from its offset
|
|
|
|
* @offset: the offset of a register in struct pt_regs.
|
|
|
|
*
|
|
|
|
* regs_query_register_name() returns the name of a register from its
|
|
|
|
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
|
|
|
|
*/
|
|
|
|
const char *regs_query_register_name(unsigned int offset)
|
|
|
|
{
|
|
|
|
const struct pt_regs_offset *roff;
|
|
|
|
for (roff = regoffset_table; roff->name != NULL; roff++)
|
|
|
|
if (roff->offset == offset)
|
|
|
|
return roff->name;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* does not yet catch signals sent when the child dies.
|
|
|
|
* in exit.c or in signal.c.
|
|
|
|
*/
|
|
|
|
|
2006-01-06 04:11:29 +00:00
|
|
|
/*
|
|
|
|
* Determines which flags the user has access to [1 = access, 0 = no access].
|
|
|
|
*/
|
2008-01-30 12:31:01 +00:00
|
|
|
#define FLAG_MASK_32 ((unsigned long) \
|
|
|
|
(X86_EFLAGS_CF | X86_EFLAGS_PF | \
|
|
|
|
X86_EFLAGS_AF | X86_EFLAGS_ZF | \
|
|
|
|
X86_EFLAGS_SF | X86_EFLAGS_TF | \
|
|
|
|
X86_EFLAGS_DF | X86_EFLAGS_OF | \
|
|
|
|
X86_EFLAGS_RF | X86_EFLAGS_AC))
|
|
|
|
|
2008-01-30 12:31:01 +00:00
|
|
|
/*
|
|
|
|
* Determines whether a value may be installed in a segment register.
|
|
|
|
*/
|
|
|
|
static inline bool invalid_selector(u16 value)
|
|
|
|
{
|
|
|
|
return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
|
2008-01-30 12:31:01 +00:00
|
|
|
#define FLAG_MASK FLAG_MASK_32
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-07-25 04:49:27 +00:00
|
|
|
static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-30 12:30:56 +00:00
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
|
2009-02-09 13:17:40 +00:00
|
|
|
return ®s->bx + (regno >> 2);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-01-30 12:31:01 +00:00
|
|
|
static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-30 12:31:01 +00:00
|
|
|
/*
|
|
|
|
* Returning the value truncates it to 16 bits.
|
|
|
|
*/
|
|
|
|
unsigned int retval;
|
|
|
|
if (offset != offsetof(struct user_regs_struct, gs))
|
|
|
|
retval = *pt_regs_access(task_pt_regs(task), offset);
|
|
|
|
else {
|
|
|
|
if (task == current)
|
2022-03-25 15:39:52 +00:00
|
|
|
savesegment(gs, retval);
|
2009-02-09 13:17:40 +00:00
|
|
|
else
|
2022-03-25 15:39:52 +00:00
|
|
|
retval = task->thread.gs;
|
2008-01-30 12:31:01 +00:00
|
|
|
}
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_segment_reg(struct task_struct *task,
|
|
|
|
unsigned long offset, u16 value)
|
|
|
|
{
|
2019-07-15 17:08:48 +00:00
|
|
|
if (WARN_ON_ONCE(task == current))
|
|
|
|
return -EIO;
|
|
|
|
|
2008-01-30 12:31:01 +00:00
|
|
|
/*
|
|
|
|
* The value argument was already truncated to 16 bits.
|
|
|
|
*/
|
2008-01-30 12:31:01 +00:00
|
|
|
if (invalid_selector(value))
|
2008-01-30 12:31:01 +00:00
|
|
|
return -EIO;
|
|
|
|
|
2008-02-06 21:39:44 +00:00
|
|
|
/*
|
|
|
|
* For %cs and %ss we cannot permit a null selector.
|
|
|
|
* We can permit a bogus selector as long as it has USER_RPL.
|
|
|
|
* Null selectors are fine for other segment registers, but
|
|
|
|
* we will never get back to user mode with invalid %cs or %ss
|
|
|
|
* and will take the trap in iret instead. Much code relies
|
|
|
|
* on user_mode() to distinguish a user trap frame (which can
|
|
|
|
* safely use invalid selectors) from a kernel trap frame.
|
|
|
|
*/
|
|
|
|
switch (offset) {
|
|
|
|
case offsetof(struct user_regs_struct, cs):
|
|
|
|
case offsetof(struct user_regs_struct, ss):
|
|
|
|
if (unlikely(value == 0))
|
|
|
|
return -EIO;
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2008-02-06 21:39:44 +00:00
|
|
|
|
|
|
|
default:
|
2008-01-30 12:31:01 +00:00
|
|
|
*pt_regs_access(task_pt_regs(task), offset) = value;
|
2008-02-06 21:39:44 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case offsetof(struct user_regs_struct, gs):
|
2022-03-25 15:39:52 +00:00
|
|
|
task->thread.gs = value;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-01-30 12:31:01 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-01-30 12:31:01 +00:00
|
|
|
#else /* CONFIG_X86_64 */
|
|
|
|
|
|
|
|
#define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
|
|
|
|
|
|
|
|
static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
|
|
|
|
{
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
|
|
|
|
return ®s->r15 + (offset / sizeof(regs->r15));
|
|
|
|
}
|
|
|
|
|
|
|
|
static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Returning the value truncates it to 16 bits.
|
|
|
|
*/
|
|
|
|
unsigned int seg;
|
|
|
|
|
|
|
|
switch (offset) {
|
|
|
|
case offsetof(struct user_regs_struct, fs):
|
|
|
|
if (task == current) {
|
|
|
|
/* Older gas can't assemble movq %?s,%r?? */
|
|
|
|
asm("movl %%fs,%0" : "=r" (seg));
|
|
|
|
return seg;
|
|
|
|
}
|
|
|
|
return task->thread.fsindex;
|
|
|
|
case offsetof(struct user_regs_struct, gs):
|
|
|
|
if (task == current) {
|
|
|
|
asm("movl %%gs,%0" : "=r" (seg));
|
|
|
|
return seg;
|
|
|
|
}
|
|
|
|
return task->thread.gsindex;
|
|
|
|
case offsetof(struct user_regs_struct, ds):
|
|
|
|
if (task == current) {
|
|
|
|
asm("movl %%ds,%0" : "=r" (seg));
|
|
|
|
return seg;
|
|
|
|
}
|
|
|
|
return task->thread.ds;
|
|
|
|
case offsetof(struct user_regs_struct, es):
|
|
|
|
if (task == current) {
|
|
|
|
asm("movl %%es,%0" : "=r" (seg));
|
|
|
|
return seg;
|
|
|
|
}
|
|
|
|
return task->thread.es;
|
|
|
|
|
|
|
|
case offsetof(struct user_regs_struct, cs):
|
|
|
|
case offsetof(struct user_regs_struct, ss):
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return *pt_regs_access(task_pt_regs(task), offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_segment_reg(struct task_struct *task,
|
|
|
|
unsigned long offset, u16 value)
|
|
|
|
{
|
2019-07-15 17:08:48 +00:00
|
|
|
if (WARN_ON_ONCE(task == current))
|
|
|
|
return -EIO;
|
|
|
|
|
2008-01-30 12:31:01 +00:00
|
|
|
/*
|
|
|
|
* The value argument was already truncated to 16 bits.
|
|
|
|
*/
|
|
|
|
if (invalid_selector(value))
|
|
|
|
return -EIO;
|
|
|
|
|
2019-07-17 13:44:16 +00:00
|
|
|
/*
|
2020-06-26 17:24:29 +00:00
|
|
|
* Writes to FS and GS will change the stored selector. Whether
|
|
|
|
* this changes the segment base as well depends on whether
|
|
|
|
* FSGSBASE is enabled.
|
2019-07-17 13:44:16 +00:00
|
|
|
*/
|
|
|
|
|
2008-01-30 12:31:01 +00:00
|
|
|
switch (offset) {
|
|
|
|
case offsetof(struct user_regs_struct,fs):
|
|
|
|
task->thread.fsindex = value;
|
|
|
|
break;
|
|
|
|
case offsetof(struct user_regs_struct,gs):
|
|
|
|
task->thread.gsindex = value;
|
|
|
|
break;
|
|
|
|
case offsetof(struct user_regs_struct,ds):
|
|
|
|
task->thread.ds = value;
|
|
|
|
break;
|
|
|
|
case offsetof(struct user_regs_struct,es):
|
|
|
|
task->thread.es = value;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Can't actually change these in 64-bit mode.
|
|
|
|
*/
|
|
|
|
case offsetof(struct user_regs_struct,cs):
|
2008-02-06 21:39:44 +00:00
|
|
|
if (unlikely(value == 0))
|
|
|
|
return -EIO;
|
2015-02-25 00:01:38 +00:00
|
|
|
task_pt_regs(task)->cs = value;
|
2008-01-30 12:31:01 +00:00
|
|
|
break;
|
2008-01-30 12:31:01 +00:00
|
|
|
case offsetof(struct user_regs_struct,ss):
|
2008-02-06 21:39:44 +00:00
|
|
|
if (unlikely(value == 0))
|
|
|
|
return -EIO;
|
2015-02-25 00:01:38 +00:00
|
|
|
task_pt_regs(task)->ss = value;
|
2008-01-30 12:31:01 +00:00
|
|
|
break;
|
2008-01-30 12:31:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
|
2008-01-30 12:31:01 +00:00
|
|
|
static unsigned long get_flags(struct task_struct *task)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-30 12:31:01 +00:00
|
|
|
unsigned long retval = task_pt_regs(task)->flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the debugger set TF, hide it from the readout.
|
|
|
|
*/
|
|
|
|
if (test_tsk_thread_flag(task, TIF_FORCED_TF))
|
|
|
|
retval &= ~X86_EFLAGS_TF;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2008-01-30 12:31:01 +00:00
|
|
|
static int set_flags(struct task_struct *task, unsigned long value)
|
|
|
|
{
|
|
|
|
struct pt_regs *regs = task_pt_regs(task);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the user value contains TF, mark that
|
|
|
|
* it was not "us" (the debugger) that set it.
|
|
|
|
* If not, make sure it stays set if we had.
|
|
|
|
*/
|
|
|
|
if (value & X86_EFLAGS_TF)
|
|
|
|
clear_tsk_thread_flag(task, TIF_FORCED_TF);
|
|
|
|
else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
|
|
|
|
value |= X86_EFLAGS_TF;
|
|
|
|
|
|
|
|
regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int putreg(struct task_struct *child,
|
|
|
|
unsigned long offset, unsigned long value)
|
|
|
|
{
|
|
|
|
switch (offset) {
|
|
|
|
case offsetof(struct user_regs_struct, cs):
|
|
|
|
case offsetof(struct user_regs_struct, ds):
|
|
|
|
case offsetof(struct user_regs_struct, es):
|
|
|
|
case offsetof(struct user_regs_struct, fs):
|
|
|
|
case offsetof(struct user_regs_struct, gs):
|
|
|
|
case offsetof(struct user_regs_struct, ss):
|
|
|
|
return set_segment_reg(child, offset, value);
|
|
|
|
|
|
|
|
case offsetof(struct user_regs_struct, flags):
|
|
|
|
return set_flags(child, value);
|
2008-01-30 12:31:01 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
case offsetof(struct user_regs_struct,fs_base):
|
2016-05-10 16:18:46 +00:00
|
|
|
if (value >= TASK_SIZE_MAX)
|
2008-01-30 12:31:01 +00:00
|
|
|
return -EIO;
|
2020-05-28 20:13:47 +00:00
|
|
|
x86_fsbase_write_task(child, value);
|
2008-01-30 12:31:01 +00:00
|
|
|
return 0;
|
|
|
|
case offsetof(struct user_regs_struct,gs_base):
|
2016-05-10 16:18:46 +00:00
|
|
|
if (value >= TASK_SIZE_MAX)
|
2008-01-30 12:31:01 +00:00
|
|
|
return -EIO;
|
2020-05-28 20:13:47 +00:00
|
|
|
x86_gsbase_write_task(child, value);
|
2008-01-30 12:31:01 +00:00
|
|
|
return 0;
|
|
|
|
#endif
|
2008-01-30 12:31:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
*pt_regs_access(task_pt_regs(child), offset) = value;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long getreg(struct task_struct *task, unsigned long offset)
|
|
|
|
{
|
|
|
|
switch (offset) {
|
|
|
|
case offsetof(struct user_regs_struct, cs):
|
|
|
|
case offsetof(struct user_regs_struct, ds):
|
|
|
|
case offsetof(struct user_regs_struct, es):
|
|
|
|
case offsetof(struct user_regs_struct, fs):
|
|
|
|
case offsetof(struct user_regs_struct, gs):
|
|
|
|
case offsetof(struct user_regs_struct, ss):
|
|
|
|
return get_segment_reg(task, offset);
|
|
|
|
|
|
|
|
case offsetof(struct user_regs_struct, flags):
|
|
|
|
return get_flags(task);
|
2008-01-30 12:31:01 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
2018-09-18 23:08:54 +00:00
|
|
|
case offsetof(struct user_regs_struct, fs_base):
|
|
|
|
return x86_fsbase_read_task(task);
|
|
|
|
case offsetof(struct user_regs_struct, gs_base):
|
|
|
|
return x86_gsbase_read_task(task);
|
2008-01-30 12:31:01 +00:00
|
|
|
#endif
|
2008-01-30 12:31:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return *pt_regs_access(task_pt_regs(task), offset);
|
|
|
|
}
|
|
|
|
|
2008-01-30 12:31:52 +00:00
|
|
|
static int genregs_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
2020-02-18 17:14:34 +00:00
|
|
|
struct membuf to)
|
2008-01-30 12:31:52 +00:00
|
|
|
{
|
2020-02-18 17:14:34 +00:00
|
|
|
int reg;
|
2008-01-30 12:31:52 +00:00
|
|
|
|
2020-02-18 17:14:34 +00:00
|
|
|
for (reg = 0; to.left; reg++)
|
|
|
|
membuf_store(&to, getreg(target, reg * sizeof(unsigned long)));
|
2008-01-30 12:31:52 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int genregs_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
if (kbuf) {
|
|
|
|
const unsigned long *k = kbuf;
|
2009-12-17 15:04:56 +00:00
|
|
|
while (count >= sizeof(*k) && !ret) {
|
2008-01-30 12:31:52 +00:00
|
|
|
ret = putreg(target, pos, *k++);
|
|
|
|
count -= sizeof(*k);
|
|
|
|
pos += sizeof(*k);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
const unsigned long __user *u = ubuf;
|
2009-12-17 15:04:56 +00:00
|
|
|
while (count >= sizeof(*u) && !ret) {
|
2008-01-30 12:31:52 +00:00
|
|
|
unsigned long word;
|
|
|
|
ret = __get_user(word, u++);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
ret = putreg(target, pos, word);
|
|
|
|
count -= sizeof(*u);
|
|
|
|
pos += sizeof(*u);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-06-27 12:41:57 +00:00
|
|
|
static void ptrace_triggered(struct perf_event *bp,
|
2009-12-05 08:44:31 +00:00
|
|
|
struct perf_sample_data *data,
|
|
|
|
struct pt_regs *regs)
|
2008-01-30 12:30:52 +00:00
|
|
|
{
|
2008-01-30 12:30:59 +00:00
|
|
|
int i;
|
2009-09-09 17:22:48 +00:00
|
|
|
struct thread_struct *thread = &(current->thread);
|
2008-01-30 12:30:59 +00:00
|
|
|
|
2009-06-01 18:15:48 +00:00
|
|
|
/*
|
|
|
|
* Store in the virtual DR6 register the fact that the breakpoint
|
|
|
|
* was hit so the thread's debugger will see it.
|
|
|
|
*/
|
2009-09-09 17:22:48 +00:00
|
|
|
for (i = 0; i < HBP_NUM; i++) {
|
|
|
|
if (thread->ptrace_bps[i] == bp)
|
2009-06-01 18:15:48 +00:00
|
|
|
break;
|
2009-09-09 17:22:48 +00:00
|
|
|
}
|
2008-01-30 12:30:52 +00:00
|
|
|
|
2020-09-02 13:26:02 +00:00
|
|
|
thread->virtual_dr6 |= (DR_TRAP0 << i);
|
2009-06-01 18:15:48 +00:00
|
|
|
}
|
2008-01-30 12:30:52 +00:00
|
|
|
|
|
|
|
/*
|
2009-09-09 17:22:48 +00:00
|
|
|
* Walk through every ptrace breakpoints for this thread and
|
|
|
|
* build the dr7 value on top of their attributes.
|
|
|
|
*
|
2008-01-30 12:30:52 +00:00
|
|
|
*/
|
2009-09-09 17:22:48 +00:00
|
|
|
static unsigned long ptrace_get_dr7(struct perf_event *bp[])
|
2008-01-30 12:30:52 +00:00
|
|
|
{
|
2009-09-09 17:22:48 +00:00
|
|
|
int i;
|
|
|
|
int dr7 = 0;
|
|
|
|
struct arch_hw_breakpoint *info;
|
|
|
|
|
|
|
|
for (i = 0; i < HBP_NUM; i++) {
|
|
|
|
if (bp[i] && !bp[i]->attr.disabled) {
|
|
|
|
info = counter_arch_bp(bp[i]);
|
|
|
|
dr7 |= encode_dr7(i, info->len, info->type);
|
|
|
|
}
|
2008-01-30 12:30:59 +00:00
|
|
|
}
|
2009-09-09 17:22:48 +00:00
|
|
|
|
|
|
|
return dr7;
|
2008-01-30 12:30:52 +00:00
|
|
|
}
|
|
|
|
|
2013-07-08 23:00:59 +00:00
|
|
|
static int ptrace_fill_bp_fields(struct perf_event_attr *attr,
|
|
|
|
int len, int type, bool disabled)
|
|
|
|
{
|
|
|
|
int err, bp_len, bp_type;
|
|
|
|
|
|
|
|
err = arch_bp_generic_fields(len, type, &bp_len, &bp_type);
|
|
|
|
if (!err) {
|
|
|
|
attr->bp_len = bp_len;
|
|
|
|
attr->bp_type = bp_type;
|
|
|
|
attr->disabled = disabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct perf_event *
|
|
|
|
ptrace_register_breakpoint(struct task_struct *tsk, int len, int type,
|
|
|
|
unsigned long addr, bool disabled)
|
2009-11-27 03:55:53 +00:00
|
|
|
{
|
2009-12-05 08:44:31 +00:00
|
|
|
struct perf_event_attr attr;
|
2013-07-08 23:00:59 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
ptrace_breakpoint_init(&attr);
|
|
|
|
attr.bp_addr = addr;
|
2009-11-27 03:55:53 +00:00
|
|
|
|
2013-07-08 23:00:59 +00:00
|
|
|
err = ptrace_fill_bp_fields(&attr, len, type, disabled);
|
2009-11-27 03:55:53 +00:00
|
|
|
if (err)
|
2013-07-08 23:00:59 +00:00
|
|
|
return ERR_PTR(err);
|
|
|
|
|
|
|
|
return register_user_hw_breakpoint(&attr, ptrace_triggered,
|
|
|
|
NULL, tsk);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
|
|
|
|
int disabled)
|
|
|
|
{
|
|
|
|
struct perf_event_attr attr = bp->attr;
|
|
|
|
int err;
|
2009-11-27 03:55:53 +00:00
|
|
|
|
2013-07-08 23:00:59 +00:00
|
|
|
err = ptrace_fill_bp_fields(&attr, len, type, disabled);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2009-11-27 03:55:53 +00:00
|
|
|
|
2009-12-05 06:06:10 +00:00
|
|
|
return modify_user_hw_breakpoint(bp, &attr);
|
2009-11-27 03:55:53 +00:00
|
|
|
}
|
|
|
|
|
2009-06-01 18:15:48 +00:00
|
|
|
/*
|
|
|
|
* Handle ptrace writes to debug register 7.
|
|
|
|
*/
|
|
|
|
static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
|
2008-01-30 12:30:52 +00:00
|
|
|
{
|
2013-07-08 23:00:58 +00:00
|
|
|
struct thread_struct *thread = &tsk->thread;
|
2009-09-09 17:22:48 +00:00
|
|
|
unsigned long old_dr7;
|
2013-07-08 23:00:58 +00:00
|
|
|
bool second_pass = false;
|
|
|
|
int i, rc, ret = 0;
|
2009-06-01 18:15:48 +00:00
|
|
|
|
|
|
|
data &= ~DR_CONTROL_RESERVED;
|
2009-09-09 17:22:48 +00:00
|
|
|
old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
|
2013-07-08 23:00:58 +00:00
|
|
|
|
2009-06-01 18:15:48 +00:00
|
|
|
restore:
|
2013-07-08 23:00:58 +00:00
|
|
|
rc = 0;
|
2009-06-01 18:15:48 +00:00
|
|
|
for (i = 0; i < HBP_NUM; i++) {
|
2013-07-08 23:00:56 +00:00
|
|
|
unsigned len, type;
|
|
|
|
bool disabled = !decode_dr7(data, i, &len, &type);
|
|
|
|
struct perf_event *bp = thread->ptrace_bps[i];
|
|
|
|
|
2013-07-08 23:00:58 +00:00
|
|
|
if (!bp) {
|
|
|
|
if (disabled)
|
|
|
|
continue;
|
2013-07-08 23:01:01 +00:00
|
|
|
|
|
|
|
bp = ptrace_register_breakpoint(tsk,
|
|
|
|
len, type, 0, disabled);
|
|
|
|
if (IS_ERR(bp)) {
|
|
|
|
rc = PTR_ERR(bp);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
thread->ptrace_bps[i] = bp;
|
|
|
|
continue;
|
2009-06-01 18:15:48 +00:00
|
|
|
}
|
2008-01-30 12:30:59 +00:00
|
|
|
|
2013-07-08 23:00:59 +00:00
|
|
|
rc = ptrace_modify_breakpoint(bp, len, type, disabled);
|
2009-12-09 08:25:48 +00:00
|
|
|
if (rc)
|
2009-09-09 17:22:48 +00:00
|
|
|
break;
|
2009-06-01 18:15:48 +00:00
|
|
|
}
|
2013-07-08 23:00:58 +00:00
|
|
|
|
|
|
|
/* Restore if the first pass failed, second_pass shouldn't fail. */
|
|
|
|
if (rc && !WARN_ON(second_pass)) {
|
|
|
|
ret = rc;
|
|
|
|
data = old_dr7;
|
|
|
|
second_pass = true;
|
2009-06-01 18:15:48 +00:00
|
|
|
goto restore;
|
|
|
|
}
|
2011-04-08 15:29:36 +00:00
|
|
|
|
2013-07-08 23:00:58 +00:00
|
|
|
return ret;
|
2009-06-01 18:15:48 +00:00
|
|
|
}
|
2008-01-30 12:30:59 +00:00
|
|
|
|
2009-06-01 18:15:48 +00:00
|
|
|
/*
|
|
|
|
* Handle PTRACE_PEEKUSR calls for the debug register area.
|
|
|
|
*/
|
2009-07-01 14:22:30 +00:00
|
|
|
static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
|
2009-06-01 18:15:48 +00:00
|
|
|
{
|
2013-07-08 23:01:03 +00:00
|
|
|
struct thread_struct *thread = &tsk->thread;
|
2009-06-01 18:15:48 +00:00
|
|
|
unsigned long val = 0;
|
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
if (n < HBP_NUM) {
|
2019-07-08 19:23:00 +00:00
|
|
|
int index = array_index_nospec(n, HBP_NUM);
|
2019-06-25 15:30:17 +00:00
|
|
|
struct perf_event *bp = thread->ptrace_bps[index];
|
2011-04-08 15:29:36 +00:00
|
|
|
|
2013-07-08 23:00:47 +00:00
|
|
|
if (bp)
|
2011-04-08 15:29:36 +00:00
|
|
|
val = bp->hw.info.address;
|
2009-09-09 17:22:48 +00:00
|
|
|
} else if (n == 6) {
|
2020-09-02 13:26:02 +00:00
|
|
|
val = thread->virtual_dr6 ^ DR6_RESERVED; /* Flip back to arch polarity */
|
2013-07-08 23:01:03 +00:00
|
|
|
} else if (n == 7) {
|
2010-02-18 17:24:18 +00:00
|
|
|
val = thread->ptrace_dr7;
|
2009-09-09 17:22:48 +00:00
|
|
|
}
|
2009-06-01 18:15:48 +00:00
|
|
|
return val;
|
|
|
|
}
|
2008-01-30 12:30:59 +00:00
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
|
|
|
|
unsigned long addr)
|
|
|
|
{
|
|
|
|
struct thread_struct *t = &tsk->thread;
|
2013-07-08 23:00:59 +00:00
|
|
|
struct perf_event *bp = t->ptrace_bps[nr];
|
2011-04-08 15:29:36 +00:00
|
|
|
int err = 0;
|
|
|
|
|
2013-07-08 23:00:59 +00:00
|
|
|
if (!bp) {
|
2009-12-09 08:25:48 +00:00
|
|
|
/*
|
2013-07-08 23:00:59 +00:00
|
|
|
* Put stub len and type to create an inactive but correct bp.
|
|
|
|
*
|
2009-12-09 08:25:48 +00:00
|
|
|
* CHECKME: the previous code returned -EIO if the addr wasn't
|
|
|
|
* a valid task virtual addr. The new one will return -EINVAL in
|
|
|
|
* this case.
|
|
|
|
* -EINVAL may be what we want for in-kernel breakpoints users,
|
|
|
|
* but -EIO looks better for ptrace, since we refuse a register
|
|
|
|
* writing for the user. And anyway this is the previous
|
|
|
|
* behaviour.
|
|
|
|
*/
|
2013-07-08 23:00:59 +00:00
|
|
|
bp = ptrace_register_breakpoint(tsk,
|
|
|
|
X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE,
|
|
|
|
addr, true);
|
|
|
|
if (IS_ERR(bp))
|
2011-04-08 15:29:36 +00:00
|
|
|
err = PTR_ERR(bp);
|
2013-07-08 23:00:59 +00:00
|
|
|
else
|
|
|
|
t->ptrace_bps[nr] = bp;
|
2009-09-09 17:22:48 +00:00
|
|
|
} else {
|
2013-07-08 23:00:59 +00:00
|
|
|
struct perf_event_attr attr = bp->attr;
|
2009-11-27 03:55:53 +00:00
|
|
|
|
|
|
|
attr.bp_addr = addr;
|
2009-12-09 08:25:48 +00:00
|
|
|
err = modify_user_hw_breakpoint(bp, &attr);
|
2008-01-30 12:30:52 +00:00
|
|
|
}
|
2013-07-08 23:00:59 +00:00
|
|
|
|
2011-04-08 15:29:36 +00:00
|
|
|
return err;
|
2008-01-30 12:30:52 +00:00
|
|
|
}
|
|
|
|
|
2009-06-01 18:15:48 +00:00
|
|
|
/*
|
|
|
|
* Handle PTRACE_POKEUSR calls for the debug register area.
|
|
|
|
*/
|
2011-11-15 22:48:58 +00:00
|
|
|
static int ptrace_set_debugreg(struct task_struct *tsk, int n,
|
|
|
|
unsigned long val)
|
2009-06-01 18:15:48 +00:00
|
|
|
{
|
2013-07-08 23:01:03 +00:00
|
|
|
struct thread_struct *thread = &tsk->thread;
|
2009-06-01 18:15:48 +00:00
|
|
|
/* There are no DR4 or DR5 registers */
|
2013-07-08 23:01:03 +00:00
|
|
|
int rc = -EIO;
|
2009-06-01 18:15:48 +00:00
|
|
|
|
|
|
|
if (n < HBP_NUM) {
|
2009-09-09 17:22:48 +00:00
|
|
|
rc = ptrace_set_breakpoint_addr(tsk, n, val);
|
2013-07-08 23:01:03 +00:00
|
|
|
} else if (n == 6) {
|
2020-09-02 13:26:02 +00:00
|
|
|
thread->virtual_dr6 = val ^ DR6_RESERVED; /* Flip to positive polarity */
|
2013-07-08 23:01:03 +00:00
|
|
|
rc = 0;
|
|
|
|
} else if (n == 7) {
|
2009-06-01 18:15:48 +00:00
|
|
|
rc = ptrace_write_dr7(tsk, val);
|
2010-02-18 17:24:18 +00:00
|
|
|
if (!rc)
|
|
|
|
thread->ptrace_dr7 = val;
|
|
|
|
}
|
2009-06-01 18:15:48 +00:00
|
|
|
return rc;
|
2008-01-30 12:30:52 +00:00
|
|
|
}
|
|
|
|
|
2008-08-08 22:58:39 +00:00
|
|
|
/*
|
|
|
|
* These access the current or another (stopped) task's io permission
|
|
|
|
* bitmap for debugging or core dump.
|
|
|
|
*/
|
|
|
|
static int ioperm_active(struct task_struct *target,
|
|
|
|
const struct user_regset *regset)
|
|
|
|
{
|
2019-11-11 22:03:21 +00:00
|
|
|
struct io_bitmap *iobm = target->thread.io_bitmap;
|
|
|
|
|
|
|
|
return iobm ? DIV_ROUND_UP(iobm->max, regset->size) : 0;
|
2008-08-08 22:58:39 +00:00
|
|
|
}
|
2008-02-26 08:40:27 +00:00
|
|
|
|
2008-08-08 22:58:39 +00:00
|
|
|
static int ioperm_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
2020-02-18 17:14:34 +00:00
|
|
|
struct membuf to)
|
2008-01-30 12:31:09 +00:00
|
|
|
{
|
2019-11-11 22:03:21 +00:00
|
|
|
struct io_bitmap *iobm = target->thread.io_bitmap;
|
|
|
|
|
|
|
|
if (!iobm)
|
2008-01-30 12:31:09 +00:00
|
|
|
return -ENXIO;
|
|
|
|
|
2020-02-18 17:14:34 +00:00
|
|
|
return membuf_write(&to, iobm->bitmap, IO_BITMAP_BYTES);
|
2008-08-08 22:58:39 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Called by kernel/ptrace.c when detaching..
|
|
|
|
*
|
|
|
|
* Make sure the single step bit is not set.
|
|
|
|
*/
|
|
|
|
void ptrace_disable(struct task_struct *child)
|
2008-01-30 12:30:58 +00:00
|
|
|
{
|
2008-01-30 12:30:48 +00:00
|
|
|
user_disable_single_step(child);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-01-30 12:31:54 +00:00
|
|
|
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
|
|
|
|
static const struct user_regset_view user_x86_32_view; /* Initialized below. */
|
|
|
|
#endif
|
x86/ptrace: Clean up PTRACE_GETREGS/PTRACE_PUTREGS regset selection
task_user_regset_view() has nonsensical semantics, but those semantics
appear to be relied on by existing users of PTRACE_GETREGSET and
PTRACE_SETREGSET. (See added comments below for details.)
It shouldn't be used for PTRACE_GETREGS or PTRACE_SETREGS, though. A
native 64-bit ptrace() call and an x32 ptrace() call using GETREGS
or SETREGS wants the 64-bit regset views, and a 32-bit ptrace() call
(native or compat) should use the 32-bit regset.
task_user_regset_view() almost does this except that it will
malfunction if a ptracer is itself ptraced and the outer ptracer
modifies CS on entry to a ptrace() syscall. Hopefully that has never
happened. (The compat ptrace() code already hardcoded the 32-bit
regset, so this change has no effect on that path.)
Improve the situation and deobfuscate the code by hardcoding the
64-bit view in the x32 ptrace() and selecting the view based on the
kernel config in the native ptrace().
I tried to figure out the history behind this API. I naïvely assumed
that PTRAGE_GETREGSET and PTRACE_SETREGSET were ancient APIs that
predated compat, but no. They were introduced by
2225a122ae26 ("ptrace: Add support for generic PTRACE_GETREGSET/PTRACE_SETREGSET")
in 2010, and they are simply a poor design. ELF core dumps have the
ELF e_machine field and a bunch of register sets in ELF notes, and the
pair (e_machine, NT_XXX) indicates the format of the regset blob. But
the new PTRACE_GET/SETREGSET API coopted the NT_XXX numbering without
any way to specify which e_machine was in effect. This is especially
bad on x86, where a process can freely switch between 32-bit and
64-bit mode, and, in fact, the PTRAGE_SETREGSET call itself can cause
this switch to happen. Oops.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/9daa791d0c7eaebd59c5bc2b2af1b0e7bebe707d.1612375698.git.luto@kernel.org
2021-02-03 18:09:58 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
static const struct user_regset_view user_x86_64_view; /* Initialized below. */
|
|
|
|
#endif
|
2008-01-30 12:31:54 +00:00
|
|
|
|
2010-10-27 22:33:47 +00:00
|
|
|
long arch_ptrace(struct task_struct *child, long request,
|
|
|
|
unsigned long addr, unsigned long data)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-30 12:31:54 +00:00
|
|
|
int ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long __user *datap = (unsigned long __user *)data;
|
|
|
|
|
x86/ptrace: Clean up PTRACE_GETREGS/PTRACE_PUTREGS regset selection
task_user_regset_view() has nonsensical semantics, but those semantics
appear to be relied on by existing users of PTRACE_GETREGSET and
PTRACE_SETREGSET. (See added comments below for details.)
It shouldn't be used for PTRACE_GETREGS or PTRACE_SETREGS, though. A
native 64-bit ptrace() call and an x32 ptrace() call using GETREGS
or SETREGS wants the 64-bit regset views, and a 32-bit ptrace() call
(native or compat) should use the 32-bit regset.
task_user_regset_view() almost does this except that it will
malfunction if a ptracer is itself ptraced and the outer ptracer
modifies CS on entry to a ptrace() syscall. Hopefully that has never
happened. (The compat ptrace() code already hardcoded the 32-bit
regset, so this change has no effect on that path.)
Improve the situation and deobfuscate the code by hardcoding the
64-bit view in the x32 ptrace() and selecting the view based on the
kernel config in the native ptrace().
I tried to figure out the history behind this API. I naïvely assumed
that PTRAGE_GETREGSET and PTRACE_SETREGSET were ancient APIs that
predated compat, but no. They were introduced by
2225a122ae26 ("ptrace: Add support for generic PTRACE_GETREGSET/PTRACE_SETREGSET")
in 2010, and they are simply a poor design. ELF core dumps have the
ELF e_machine field and a bunch of register sets in ELF notes, and the
pair (e_machine, NT_XXX) indicates the format of the regset blob. But
the new PTRACE_GET/SETREGSET API coopted the NT_XXX numbering without
any way to specify which e_machine was in effect. This is especially
bad on x86, where a process can freely switch between 32-bit and
64-bit mode, and, in fact, the PTRAGE_SETREGSET call itself can cause
this switch to happen. Oops.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/9daa791d0c7eaebd59c5bc2b2af1b0e7bebe707d.1612375698.git.luto@kernel.org
2021-02-03 18:09:58 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
/* This is native 64-bit ptrace() */
|
|
|
|
const struct user_regset_view *regset_view = &user_x86_64_view;
|
|
|
|
#else
|
|
|
|
/* This is native 32-bit ptrace() */
|
|
|
|
const struct user_regset_view *regset_view = &user_x86_32_view;
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (request) {
|
|
|
|
/* read the word at location addr in the USER area. */
|
|
|
|
case PTRACE_PEEKUSR: {
|
|
|
|
unsigned long tmp;
|
|
|
|
|
|
|
|
ret = -EIO;
|
2010-10-27 22:33:48 +00:00
|
|
|
if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
tmp = 0; /* Default return condition */
|
2008-01-30 12:31:01 +00:00
|
|
|
if (addr < sizeof(struct user_regs_struct))
|
2005-04-16 22:20:36 +00:00
|
|
|
tmp = getreg(child, addr);
|
2008-01-30 12:31:01 +00:00
|
|
|
else if (addr >= offsetof(struct user, u_debugreg[0]) &&
|
|
|
|
addr <= offsetof(struct user, u_debugreg[7])) {
|
|
|
|
addr -= offsetof(struct user, u_debugreg[0]);
|
|
|
|
tmp = ptrace_get_debugreg(child, addr / sizeof(data));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
ret = put_user(tmp, datap);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
|
|
|
|
ret = -EIO;
|
2010-10-27 22:33:48 +00:00
|
|
|
if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
|
2008-01-30 12:31:01 +00:00
|
|
|
if (addr < sizeof(struct user_regs_struct))
|
2005-04-16 22:20:36 +00:00
|
|
|
ret = putreg(child, addr, data);
|
2008-01-30 12:31:01 +00:00
|
|
|
else if (addr >= offsetof(struct user, u_debugreg[0]) &&
|
|
|
|
addr <= offsetof(struct user, u_debugreg[7])) {
|
|
|
|
addr -= offsetof(struct user, u_debugreg[0]);
|
|
|
|
ret = ptrace_set_debugreg(child,
|
|
|
|
addr / sizeof(data), data);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-01-30 12:31:01 +00:00
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:31:54 +00:00
|
|
|
case PTRACE_GETREGS: /* Get all gp regs from the child. */
|
|
|
|
return copy_regset_to_user(child,
|
x86/ptrace: Clean up PTRACE_GETREGS/PTRACE_PUTREGS regset selection
task_user_regset_view() has nonsensical semantics, but those semantics
appear to be relied on by existing users of PTRACE_GETREGSET and
PTRACE_SETREGSET. (See added comments below for details.)
It shouldn't be used for PTRACE_GETREGS or PTRACE_SETREGS, though. A
native 64-bit ptrace() call and an x32 ptrace() call using GETREGS
or SETREGS wants the 64-bit regset views, and a 32-bit ptrace() call
(native or compat) should use the 32-bit regset.
task_user_regset_view() almost does this except that it will
malfunction if a ptracer is itself ptraced and the outer ptracer
modifies CS on entry to a ptrace() syscall. Hopefully that has never
happened. (The compat ptrace() code already hardcoded the 32-bit
regset, so this change has no effect on that path.)
Improve the situation and deobfuscate the code by hardcoding the
64-bit view in the x32 ptrace() and selecting the view based on the
kernel config in the native ptrace().
I tried to figure out the history behind this API. I naïvely assumed
that PTRAGE_GETREGSET and PTRACE_SETREGSET were ancient APIs that
predated compat, but no. They were introduced by
2225a122ae26 ("ptrace: Add support for generic PTRACE_GETREGSET/PTRACE_SETREGSET")
in 2010, and they are simply a poor design. ELF core dumps have the
ELF e_machine field and a bunch of register sets in ELF notes, and the
pair (e_machine, NT_XXX) indicates the format of the regset blob. But
the new PTRACE_GET/SETREGSET API coopted the NT_XXX numbering without
any way to specify which e_machine was in effect. This is especially
bad on x86, where a process can freely switch between 32-bit and
64-bit mode, and, in fact, the PTRAGE_SETREGSET call itself can cause
this switch to happen. Oops.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/9daa791d0c7eaebd59c5bc2b2af1b0e7bebe707d.1612375698.git.luto@kernel.org
2021-02-03 18:09:58 +00:00
|
|
|
regset_view,
|
2008-01-30 12:31:54 +00:00
|
|
|
REGSET_GENERAL,
|
|
|
|
0, sizeof(struct user_regs_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_SETREGS: /* Set all gp regs in the child. */
|
|
|
|
return copy_regset_from_user(child,
|
x86/ptrace: Clean up PTRACE_GETREGS/PTRACE_PUTREGS regset selection
task_user_regset_view() has nonsensical semantics, but those semantics
appear to be relied on by existing users of PTRACE_GETREGSET and
PTRACE_SETREGSET. (See added comments below for details.)
It shouldn't be used for PTRACE_GETREGS or PTRACE_SETREGS, though. A
native 64-bit ptrace() call and an x32 ptrace() call using GETREGS
or SETREGS wants the 64-bit regset views, and a 32-bit ptrace() call
(native or compat) should use the 32-bit regset.
task_user_regset_view() almost does this except that it will
malfunction if a ptracer is itself ptraced and the outer ptracer
modifies CS on entry to a ptrace() syscall. Hopefully that has never
happened. (The compat ptrace() code already hardcoded the 32-bit
regset, so this change has no effect on that path.)
Improve the situation and deobfuscate the code by hardcoding the
64-bit view in the x32 ptrace() and selecting the view based on the
kernel config in the native ptrace().
I tried to figure out the history behind this API. I naïvely assumed
that PTRAGE_GETREGSET and PTRACE_SETREGSET were ancient APIs that
predated compat, but no. They were introduced by
2225a122ae26 ("ptrace: Add support for generic PTRACE_GETREGSET/PTRACE_SETREGSET")
in 2010, and they are simply a poor design. ELF core dumps have the
ELF e_machine field and a bunch of register sets in ELF notes, and the
pair (e_machine, NT_XXX) indicates the format of the regset blob. But
the new PTRACE_GET/SETREGSET API coopted the NT_XXX numbering without
any way to specify which e_machine was in effect. This is especially
bad on x86, where a process can freely switch between 32-bit and
64-bit mode, and, in fact, the PTRAGE_SETREGSET call itself can cause
this switch to happen. Oops.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/9daa791d0c7eaebd59c5bc2b2af1b0e7bebe707d.1612375698.git.luto@kernel.org
2021-02-03 18:09:58 +00:00
|
|
|
regset_view,
|
2008-01-30 12:31:54 +00:00
|
|
|
REGSET_GENERAL,
|
|
|
|
0, sizeof(struct user_regs_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_GETFPREGS: /* Get the child FPU state. */
|
|
|
|
return copy_regset_to_user(child,
|
x86/ptrace: Clean up PTRACE_GETREGS/PTRACE_PUTREGS regset selection
task_user_regset_view() has nonsensical semantics, but those semantics
appear to be relied on by existing users of PTRACE_GETREGSET and
PTRACE_SETREGSET. (See added comments below for details.)
It shouldn't be used for PTRACE_GETREGS or PTRACE_SETREGS, though. A
native 64-bit ptrace() call and an x32 ptrace() call using GETREGS
or SETREGS wants the 64-bit regset views, and a 32-bit ptrace() call
(native or compat) should use the 32-bit regset.
task_user_regset_view() almost does this except that it will
malfunction if a ptracer is itself ptraced and the outer ptracer
modifies CS on entry to a ptrace() syscall. Hopefully that has never
happened. (The compat ptrace() code already hardcoded the 32-bit
regset, so this change has no effect on that path.)
Improve the situation and deobfuscate the code by hardcoding the
64-bit view in the x32 ptrace() and selecting the view based on the
kernel config in the native ptrace().
I tried to figure out the history behind this API. I naïvely assumed
that PTRAGE_GETREGSET and PTRACE_SETREGSET were ancient APIs that
predated compat, but no. They were introduced by
2225a122ae26 ("ptrace: Add support for generic PTRACE_GETREGSET/PTRACE_SETREGSET")
in 2010, and they are simply a poor design. ELF core dumps have the
ELF e_machine field and a bunch of register sets in ELF notes, and the
pair (e_machine, NT_XXX) indicates the format of the regset blob. But
the new PTRACE_GET/SETREGSET API coopted the NT_XXX numbering without
any way to specify which e_machine was in effect. This is especially
bad on x86, where a process can freely switch between 32-bit and
64-bit mode, and, in fact, the PTRAGE_SETREGSET call itself can cause
this switch to happen. Oops.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/9daa791d0c7eaebd59c5bc2b2af1b0e7bebe707d.1612375698.git.luto@kernel.org
2021-02-03 18:09:58 +00:00
|
|
|
regset_view,
|
2008-01-30 12:31:54 +00:00
|
|
|
REGSET_FP,
|
|
|
|
0, sizeof(struct user_i387_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_SETFPREGS: /* Set the child FPU state. */
|
|
|
|
return copy_regset_from_user(child,
|
x86/ptrace: Clean up PTRACE_GETREGS/PTRACE_PUTREGS regset selection
task_user_regset_view() has nonsensical semantics, but those semantics
appear to be relied on by existing users of PTRACE_GETREGSET and
PTRACE_SETREGSET. (See added comments below for details.)
It shouldn't be used for PTRACE_GETREGS or PTRACE_SETREGS, though. A
native 64-bit ptrace() call and an x32 ptrace() call using GETREGS
or SETREGS wants the 64-bit regset views, and a 32-bit ptrace() call
(native or compat) should use the 32-bit regset.
task_user_regset_view() almost does this except that it will
malfunction if a ptracer is itself ptraced and the outer ptracer
modifies CS on entry to a ptrace() syscall. Hopefully that has never
happened. (The compat ptrace() code already hardcoded the 32-bit
regset, so this change has no effect on that path.)
Improve the situation and deobfuscate the code by hardcoding the
64-bit view in the x32 ptrace() and selecting the view based on the
kernel config in the native ptrace().
I tried to figure out the history behind this API. I naïvely assumed
that PTRAGE_GETREGSET and PTRACE_SETREGSET were ancient APIs that
predated compat, but no. They were introduced by
2225a122ae26 ("ptrace: Add support for generic PTRACE_GETREGSET/PTRACE_SETREGSET")
in 2010, and they are simply a poor design. ELF core dumps have the
ELF e_machine field and a bunch of register sets in ELF notes, and the
pair (e_machine, NT_XXX) indicates the format of the regset blob. But
the new PTRACE_GET/SETREGSET API coopted the NT_XXX numbering without
any way to specify which e_machine was in effect. This is especially
bad on x86, where a process can freely switch between 32-bit and
64-bit mode, and, in fact, the PTRAGE_SETREGSET call itself can cause
this switch to happen. Oops.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/9daa791d0c7eaebd59c5bc2b2af1b0e7bebe707d.1612375698.git.luto@kernel.org
2021-02-03 18:09:58 +00:00
|
|
|
regset_view,
|
2008-01-30 12:31:54 +00:00
|
|
|
REGSET_FP,
|
|
|
|
0, sizeof(struct user_i387_struct),
|
|
|
|
datap);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:31:01 +00:00
|
|
|
#ifdef CONFIG_X86_32
|
2008-01-30 12:31:54 +00:00
|
|
|
case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
|
|
|
|
return copy_regset_to_user(child, &user_x86_32_view,
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
REGSET32_XFP,
|
2008-01-30 12:31:54 +00:00
|
|
|
0, sizeof(struct user_fxsr_struct),
|
2008-06-30 21:02:41 +00:00
|
|
|
datap) ? -EIO : 0;
|
2008-01-30 12:31:54 +00:00
|
|
|
|
|
|
|
case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
|
|
|
|
return copy_regset_from_user(child, &user_x86_32_view,
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
REGSET32_XFP,
|
2008-01-30 12:31:54 +00:00
|
|
|
0, sizeof(struct user_fxsr_struct),
|
2008-06-30 21:02:41 +00:00
|
|
|
datap) ? -EIO : 0;
|
2008-01-30 12:31:01 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:31:01 +00:00
|
|
|
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
|
2005-04-16 22:20:36 +00:00
|
|
|
case PTRACE_GET_THREAD_AREA:
|
2010-10-27 22:33:47 +00:00
|
|
|
if ((int) addr < 0)
|
2008-01-30 12:30:46 +00:00
|
|
|
return -EIO;
|
|
|
|
ret = do_get_thread_area(child, addr,
|
2010-10-27 22:33:48 +00:00
|
|
|
(struct user_desc __user *)data);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PTRACE_SET_THREAD_AREA:
|
2010-10-27 22:33:47 +00:00
|
|
|
if ((int) addr < 0)
|
2008-01-30 12:30:46 +00:00
|
|
|
return -EIO;
|
|
|
|
ret = do_set_thread_area(child, addr,
|
2010-10-27 22:33:48 +00:00
|
|
|
(struct user_desc __user *)data, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
2008-01-30 12:31:01 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
/* normal 64bit interface to access TLS data.
|
|
|
|
Works just like arch_prctl, except that the arguments
|
|
|
|
are reversed. */
|
|
|
|
case PTRACE_ARCH_PRCTL:
|
2017-03-20 08:16:22 +00:00
|
|
|
ret = do_arch_prctl_64(child, data, addr);
|
2008-01-30 12:31:01 +00:00
|
|
|
break;
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
ret = ptrace_request(child, request, addr, data);
|
|
|
|
break;
|
|
|
|
}
|
2008-01-30 12:30:52 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-01-30 12:31:01 +00:00
|
|
|
#ifdef CONFIG_IA32_EMULATION
|
|
|
|
|
2008-01-30 12:31:01 +00:00
|
|
|
#include <linux/compat.h>
|
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <asm/ia32.h>
|
2008-01-30 12:31:01 +00:00
|
|
|
#include <asm/user32.h>
|
|
|
|
|
|
|
|
#define R32(l,q) \
|
|
|
|
case offsetof(struct user32, regs.l): \
|
|
|
|
regs->q = value; break
|
|
|
|
|
|
|
|
#define SEG32(rs) \
|
|
|
|
case offsetof(struct user32, regs.rs): \
|
|
|
|
return set_segment_reg(child, \
|
|
|
|
offsetof(struct user_regs_struct, rs), \
|
|
|
|
value); \
|
|
|
|
break
|
|
|
|
|
|
|
|
static int putreg32(struct task_struct *child, unsigned regno, u32 value)
|
|
|
|
{
|
|
|
|
struct pt_regs *regs = task_pt_regs(child);
|
2020-06-26 17:24:29 +00:00
|
|
|
int ret;
|
2008-01-30 12:31:01 +00:00
|
|
|
|
|
|
|
switch (regno) {
|
|
|
|
|
|
|
|
SEG32(cs);
|
|
|
|
SEG32(ds);
|
|
|
|
SEG32(es);
|
2020-06-26 17:24:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A 32-bit ptracer on a 64-bit kernel expects that writing
|
|
|
|
* FS or GS will also update the base. This is needed for
|
|
|
|
* operations like PTRACE_SETREGS to fully restore a saved
|
|
|
|
* CPU state.
|
|
|
|
*/
|
|
|
|
|
|
|
|
case offsetof(struct user32, regs.fs):
|
|
|
|
ret = set_segment_reg(child,
|
|
|
|
offsetof(struct user_regs_struct, fs),
|
|
|
|
value);
|
|
|
|
if (ret == 0)
|
|
|
|
child->thread.fsbase =
|
|
|
|
x86_fsgsbase_read_task(child, value);
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
case offsetof(struct user32, regs.gs):
|
|
|
|
ret = set_segment_reg(child,
|
|
|
|
offsetof(struct user_regs_struct, gs),
|
|
|
|
value);
|
|
|
|
if (ret == 0)
|
|
|
|
child->thread.gsbase =
|
|
|
|
x86_fsgsbase_read_task(child, value);
|
|
|
|
return ret;
|
|
|
|
|
2008-01-30 12:31:01 +00:00
|
|
|
SEG32(ss);
|
|
|
|
|
|
|
|
R32(ebx, bx);
|
|
|
|
R32(ecx, cx);
|
|
|
|
R32(edx, dx);
|
|
|
|
R32(edi, di);
|
|
|
|
R32(esi, si);
|
|
|
|
R32(ebp, bp);
|
|
|
|
R32(eax, ax);
|
|
|
|
R32(eip, ip);
|
|
|
|
R32(esp, sp);
|
|
|
|
|
2008-02-29 03:57:07 +00:00
|
|
|
case offsetof(struct user32, regs.orig_eax):
|
|
|
|
/*
|
2016-07-27 06:12:22 +00:00
|
|
|
* Warning: bizarre corner case fixup here. A 32-bit
|
|
|
|
* debugger setting orig_eax to -1 wants to disable
|
|
|
|
* syscall restart. Make sure that the syscall
|
|
|
|
* restart code sign-extends orig_ax. Also make sure
|
|
|
|
* we interpret the -ERESTART* codes correctly if
|
|
|
|
* loaded into regs->ax in case the task is not
|
|
|
|
* actually still sitting at the exit from a 32-bit
|
|
|
|
* syscall with TS_COMPAT still set.
|
2008-02-29 03:57:07 +00:00
|
|
|
*/
|
2009-09-23 03:12:07 +00:00
|
|
|
regs->orig_ax = value;
|
2021-05-10 18:53:15 +00:00
|
|
|
if (syscall_get_nr(child, regs) != -1)
|
2018-01-28 18:38:50 +00:00
|
|
|
child->thread_info.status |= TS_I386_REGS_POKED;
|
2008-02-29 03:57:07 +00:00
|
|
|
break;
|
|
|
|
|
2008-01-30 12:31:01 +00:00
|
|
|
case offsetof(struct user32, regs.eflags):
|
|
|
|
return set_flags(child, value);
|
|
|
|
|
|
|
|
case offsetof(struct user32, u_debugreg[0]) ...
|
|
|
|
offsetof(struct user32, u_debugreg[7]):
|
|
|
|
regno -= offsetof(struct user32, u_debugreg[0]);
|
|
|
|
return ptrace_set_debugreg(child, regno / 4, value);
|
|
|
|
|
|
|
|
default:
|
|
|
|
if (regno > sizeof(struct user32) || (regno & 3))
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Other dummy fields in the virtual user structure
|
|
|
|
* are ignored
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef R32
|
|
|
|
#undef SEG32
|
|
|
|
|
|
|
|
#define R32(l,q) \
|
|
|
|
case offsetof(struct user32, regs.l): \
|
|
|
|
*val = regs->q; break
|
|
|
|
|
|
|
|
#define SEG32(rs) \
|
|
|
|
case offsetof(struct user32, regs.rs): \
|
|
|
|
*val = get_segment_reg(child, \
|
|
|
|
offsetof(struct user_regs_struct, rs)); \
|
|
|
|
break
|
|
|
|
|
|
|
|
static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
|
|
|
|
{
|
|
|
|
struct pt_regs *regs = task_pt_regs(child);
|
|
|
|
|
|
|
|
switch (regno) {
|
|
|
|
|
|
|
|
SEG32(ds);
|
|
|
|
SEG32(es);
|
|
|
|
SEG32(fs);
|
|
|
|
SEG32(gs);
|
|
|
|
|
|
|
|
R32(cs, cs);
|
|
|
|
R32(ss, ss);
|
|
|
|
R32(ebx, bx);
|
|
|
|
R32(ecx, cx);
|
|
|
|
R32(edx, dx);
|
|
|
|
R32(edi, di);
|
|
|
|
R32(esi, si);
|
|
|
|
R32(ebp, bp);
|
|
|
|
R32(eax, ax);
|
|
|
|
R32(orig_eax, orig_ax);
|
|
|
|
R32(eip, ip);
|
|
|
|
R32(esp, sp);
|
|
|
|
|
|
|
|
case offsetof(struct user32, regs.eflags):
|
|
|
|
*val = get_flags(child);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case offsetof(struct user32, u_debugreg[0]) ...
|
|
|
|
offsetof(struct user32, u_debugreg[7]):
|
|
|
|
regno -= offsetof(struct user32, u_debugreg[0]);
|
|
|
|
*val = ptrace_get_debugreg(child, regno / 4);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
if (regno > sizeof(struct user32) || (regno & 3))
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Other dummy fields in the virtual user structure
|
|
|
|
* are ignored
|
|
|
|
*/
|
|
|
|
*val = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef R32
|
|
|
|
#undef SEG32
|
|
|
|
|
2008-01-30 12:31:52 +00:00
|
|
|
static int genregs32_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
2020-02-18 17:14:34 +00:00
|
|
|
struct membuf to)
|
2008-01-30 12:31:52 +00:00
|
|
|
{
|
2020-02-18 17:14:34 +00:00
|
|
|
int reg;
|
2008-01-30 12:31:52 +00:00
|
|
|
|
2020-02-18 17:14:34 +00:00
|
|
|
for (reg = 0; to.left; reg++) {
|
|
|
|
u32 val;
|
|
|
|
getreg32(target, reg * 4, &val);
|
|
|
|
membuf_store(&to, val);
|
|
|
|
}
|
2008-01-30 12:31:52 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int genregs32_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
if (kbuf) {
|
|
|
|
const compat_ulong_t *k = kbuf;
|
2009-12-17 15:04:56 +00:00
|
|
|
while (count >= sizeof(*k) && !ret) {
|
2008-02-22 04:37:24 +00:00
|
|
|
ret = putreg32(target, pos, *k++);
|
2008-01-30 12:31:52 +00:00
|
|
|
count -= sizeof(*k);
|
|
|
|
pos += sizeof(*k);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
const compat_ulong_t __user *u = ubuf;
|
2009-12-17 15:04:56 +00:00
|
|
|
while (count >= sizeof(*u) && !ret) {
|
2008-01-30 12:31:52 +00:00
|
|
|
compat_ulong_t word;
|
|
|
|
ret = __get_user(word, u++);
|
|
|
|
if (ret)
|
|
|
|
break;
|
2008-02-22 04:37:24 +00:00
|
|
|
ret = putreg32(target, pos, word);
|
2008-01-30 12:31:52 +00:00
|
|
|
count -= sizeof(*u);
|
|
|
|
pos += sizeof(*u);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-06-22 11:55:14 +00:00
|
|
|
static long ia32_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|
|
|
compat_ulong_t caddr, compat_ulong_t cdata)
|
|
|
|
{
|
|
|
|
unsigned long addr = caddr;
|
|
|
|
unsigned long data = cdata;
|
|
|
|
void __user *datap = compat_ptr(data);
|
|
|
|
int ret;
|
|
|
|
__u32 val;
|
|
|
|
|
|
|
|
switch (request) {
|
|
|
|
case PTRACE_PEEKUSR:
|
|
|
|
ret = getreg32(child, addr, &val);
|
|
|
|
if (ret == 0)
|
|
|
|
ret = put_user(val, (__u32 __user *)datap);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PTRACE_POKEUSR:
|
|
|
|
ret = putreg32(child, addr, data);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PTRACE_GETREGS: /* Get all gp regs from the child. */
|
|
|
|
return copy_regset_to_user(child, &user_x86_32_view,
|
|
|
|
REGSET_GENERAL,
|
|
|
|
0, sizeof(struct user_regs_struct32),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_SETREGS: /* Set all gp regs in the child. */
|
|
|
|
return copy_regset_from_user(child, &user_x86_32_view,
|
|
|
|
REGSET_GENERAL, 0,
|
|
|
|
sizeof(struct user_regs_struct32),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_GETFPREGS: /* Get the child FPU state. */
|
|
|
|
return copy_regset_to_user(child, &user_x86_32_view,
|
|
|
|
REGSET_FP, 0,
|
|
|
|
sizeof(struct user_i387_ia32_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_SETFPREGS: /* Set the child FPU state. */
|
|
|
|
return copy_regset_from_user(
|
|
|
|
child, &user_x86_32_view, REGSET_FP,
|
|
|
|
0, sizeof(struct user_i387_ia32_struct), datap);
|
|
|
|
|
|
|
|
case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
|
|
|
|
return copy_regset_to_user(child, &user_x86_32_view,
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
REGSET32_XFP, 0,
|
2015-06-22 11:55:14 +00:00
|
|
|
sizeof(struct user32_fxsr_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
|
|
|
|
return copy_regset_from_user(child, &user_x86_32_view,
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
REGSET32_XFP, 0,
|
2015-06-22 11:55:14 +00:00
|
|
|
sizeof(struct user32_fxsr_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_GET_THREAD_AREA:
|
|
|
|
case PTRACE_SET_THREAD_AREA:
|
|
|
|
return arch_ptrace(child, request, addr, data);
|
|
|
|
|
|
|
|
default:
|
|
|
|
return compat_ptrace_request(child, request, addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_IA32_EMULATION */
|
|
|
|
|
2012-03-05 23:32:11 +00:00
|
|
|
#ifdef CONFIG_X86_X32_ABI
|
|
|
|
static long x32_arch_ptrace(struct task_struct *child,
|
|
|
|
compat_long_t request, compat_ulong_t caddr,
|
|
|
|
compat_ulong_t cdata)
|
|
|
|
{
|
|
|
|
unsigned long addr = caddr;
|
|
|
|
unsigned long data = cdata;
|
|
|
|
void __user *datap = compat_ptr(data);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
switch (request) {
|
|
|
|
/* Read 32bits at location addr in the USER area. Only allow
|
|
|
|
to return the lower 32bits of segment and debug registers. */
|
|
|
|
case PTRACE_PEEKUSR: {
|
|
|
|
u32 tmp;
|
|
|
|
|
|
|
|
ret = -EIO;
|
|
|
|
if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
|
|
|
|
addr < offsetof(struct user_regs_struct, cs))
|
|
|
|
break;
|
|
|
|
|
|
|
|
tmp = 0; /* Default return condition */
|
|
|
|
if (addr < sizeof(struct user_regs_struct))
|
|
|
|
tmp = getreg(child, addr);
|
|
|
|
else if (addr >= offsetof(struct user, u_debugreg[0]) &&
|
|
|
|
addr <= offsetof(struct user, u_debugreg[7])) {
|
|
|
|
addr -= offsetof(struct user, u_debugreg[0]);
|
|
|
|
tmp = ptrace_get_debugreg(child, addr / sizeof(data));
|
|
|
|
}
|
|
|
|
ret = put_user(tmp, (__u32 __user *)datap);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Write the word at location addr in the USER area. Only allow
|
|
|
|
to update segment and debug registers with the upper 32bits
|
|
|
|
zero-extended. */
|
|
|
|
case PTRACE_POKEUSR:
|
|
|
|
ret = -EIO;
|
|
|
|
if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
|
|
|
|
addr < offsetof(struct user_regs_struct, cs))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (addr < sizeof(struct user_regs_struct))
|
|
|
|
ret = putreg(child, addr, data);
|
|
|
|
else if (addr >= offsetof(struct user, u_debugreg[0]) &&
|
|
|
|
addr <= offsetof(struct user, u_debugreg[7])) {
|
|
|
|
addr -= offsetof(struct user, u_debugreg[0]);
|
|
|
|
ret = ptrace_set_debugreg(child,
|
|
|
|
addr / sizeof(data), data);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PTRACE_GETREGS: /* Get all gp regs from the child. */
|
|
|
|
return copy_regset_to_user(child,
|
x86/ptrace: Clean up PTRACE_GETREGS/PTRACE_PUTREGS regset selection
task_user_regset_view() has nonsensical semantics, but those semantics
appear to be relied on by existing users of PTRACE_GETREGSET and
PTRACE_SETREGSET. (See added comments below for details.)
It shouldn't be used for PTRACE_GETREGS or PTRACE_SETREGS, though. A
native 64-bit ptrace() call and an x32 ptrace() call using GETREGS
or SETREGS wants the 64-bit regset views, and a 32-bit ptrace() call
(native or compat) should use the 32-bit regset.
task_user_regset_view() almost does this except that it will
malfunction if a ptracer is itself ptraced and the outer ptracer
modifies CS on entry to a ptrace() syscall. Hopefully that has never
happened. (The compat ptrace() code already hardcoded the 32-bit
regset, so this change has no effect on that path.)
Improve the situation and deobfuscate the code by hardcoding the
64-bit view in the x32 ptrace() and selecting the view based on the
kernel config in the native ptrace().
I tried to figure out the history behind this API. I naïvely assumed
that PTRAGE_GETREGSET and PTRACE_SETREGSET were ancient APIs that
predated compat, but no. They were introduced by
2225a122ae26 ("ptrace: Add support for generic PTRACE_GETREGSET/PTRACE_SETREGSET")
in 2010, and they are simply a poor design. ELF core dumps have the
ELF e_machine field and a bunch of register sets in ELF notes, and the
pair (e_machine, NT_XXX) indicates the format of the regset blob. But
the new PTRACE_GET/SETREGSET API coopted the NT_XXX numbering without
any way to specify which e_machine was in effect. This is especially
bad on x86, where a process can freely switch between 32-bit and
64-bit mode, and, in fact, the PTRAGE_SETREGSET call itself can cause
this switch to happen. Oops.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/9daa791d0c7eaebd59c5bc2b2af1b0e7bebe707d.1612375698.git.luto@kernel.org
2021-02-03 18:09:58 +00:00
|
|
|
&user_x86_64_view,
|
2012-03-05 23:32:11 +00:00
|
|
|
REGSET_GENERAL,
|
|
|
|
0, sizeof(struct user_regs_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_SETREGS: /* Set all gp regs in the child. */
|
|
|
|
return copy_regset_from_user(child,
|
x86/ptrace: Clean up PTRACE_GETREGS/PTRACE_PUTREGS regset selection
task_user_regset_view() has nonsensical semantics, but those semantics
appear to be relied on by existing users of PTRACE_GETREGSET and
PTRACE_SETREGSET. (See added comments below for details.)
It shouldn't be used for PTRACE_GETREGS or PTRACE_SETREGS, though. A
native 64-bit ptrace() call and an x32 ptrace() call using GETREGS
or SETREGS wants the 64-bit regset views, and a 32-bit ptrace() call
(native or compat) should use the 32-bit regset.
task_user_regset_view() almost does this except that it will
malfunction if a ptracer is itself ptraced and the outer ptracer
modifies CS on entry to a ptrace() syscall. Hopefully that has never
happened. (The compat ptrace() code already hardcoded the 32-bit
regset, so this change has no effect on that path.)
Improve the situation and deobfuscate the code by hardcoding the
64-bit view in the x32 ptrace() and selecting the view based on the
kernel config in the native ptrace().
I tried to figure out the history behind this API. I naïvely assumed
that PTRAGE_GETREGSET and PTRACE_SETREGSET were ancient APIs that
predated compat, but no. They were introduced by
2225a122ae26 ("ptrace: Add support for generic PTRACE_GETREGSET/PTRACE_SETREGSET")
in 2010, and they are simply a poor design. ELF core dumps have the
ELF e_machine field and a bunch of register sets in ELF notes, and the
pair (e_machine, NT_XXX) indicates the format of the regset blob. But
the new PTRACE_GET/SETREGSET API coopted the NT_XXX numbering without
any way to specify which e_machine was in effect. This is especially
bad on x86, where a process can freely switch between 32-bit and
64-bit mode, and, in fact, the PTRAGE_SETREGSET call itself can cause
this switch to happen. Oops.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/9daa791d0c7eaebd59c5bc2b2af1b0e7bebe707d.1612375698.git.luto@kernel.org
2021-02-03 18:09:58 +00:00
|
|
|
&user_x86_64_view,
|
2012-03-05 23:32:11 +00:00
|
|
|
REGSET_GENERAL,
|
|
|
|
0, sizeof(struct user_regs_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_GETFPREGS: /* Get the child FPU state. */
|
|
|
|
return copy_regset_to_user(child,
|
x86/ptrace: Clean up PTRACE_GETREGS/PTRACE_PUTREGS regset selection
task_user_regset_view() has nonsensical semantics, but those semantics
appear to be relied on by existing users of PTRACE_GETREGSET and
PTRACE_SETREGSET. (See added comments below for details.)
It shouldn't be used for PTRACE_GETREGS or PTRACE_SETREGS, though. A
native 64-bit ptrace() call and an x32 ptrace() call using GETREGS
or SETREGS wants the 64-bit regset views, and a 32-bit ptrace() call
(native or compat) should use the 32-bit regset.
task_user_regset_view() almost does this except that it will
malfunction if a ptracer is itself ptraced and the outer ptracer
modifies CS on entry to a ptrace() syscall. Hopefully that has never
happened. (The compat ptrace() code already hardcoded the 32-bit
regset, so this change has no effect on that path.)
Improve the situation and deobfuscate the code by hardcoding the
64-bit view in the x32 ptrace() and selecting the view based on the
kernel config in the native ptrace().
I tried to figure out the history behind this API. I naïvely assumed
that PTRAGE_GETREGSET and PTRACE_SETREGSET were ancient APIs that
predated compat, but no. They were introduced by
2225a122ae26 ("ptrace: Add support for generic PTRACE_GETREGSET/PTRACE_SETREGSET")
in 2010, and they are simply a poor design. ELF core dumps have the
ELF e_machine field and a bunch of register sets in ELF notes, and the
pair (e_machine, NT_XXX) indicates the format of the regset blob. But
the new PTRACE_GET/SETREGSET API coopted the NT_XXX numbering without
any way to specify which e_machine was in effect. This is especially
bad on x86, where a process can freely switch between 32-bit and
64-bit mode, and, in fact, the PTRAGE_SETREGSET call itself can cause
this switch to happen. Oops.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/9daa791d0c7eaebd59c5bc2b2af1b0e7bebe707d.1612375698.git.luto@kernel.org
2021-02-03 18:09:58 +00:00
|
|
|
&user_x86_64_view,
|
2012-03-05 23:32:11 +00:00
|
|
|
REGSET_FP,
|
|
|
|
0, sizeof(struct user_i387_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
case PTRACE_SETFPREGS: /* Set the child FPU state. */
|
|
|
|
return copy_regset_from_user(child,
|
x86/ptrace: Clean up PTRACE_GETREGS/PTRACE_PUTREGS regset selection
task_user_regset_view() has nonsensical semantics, but those semantics
appear to be relied on by existing users of PTRACE_GETREGSET and
PTRACE_SETREGSET. (See added comments below for details.)
It shouldn't be used for PTRACE_GETREGS or PTRACE_SETREGS, though. A
native 64-bit ptrace() call and an x32 ptrace() call using GETREGS
or SETREGS wants the 64-bit regset views, and a 32-bit ptrace() call
(native or compat) should use the 32-bit regset.
task_user_regset_view() almost does this except that it will
malfunction if a ptracer is itself ptraced and the outer ptracer
modifies CS on entry to a ptrace() syscall. Hopefully that has never
happened. (The compat ptrace() code already hardcoded the 32-bit
regset, so this change has no effect on that path.)
Improve the situation and deobfuscate the code by hardcoding the
64-bit view in the x32 ptrace() and selecting the view based on the
kernel config in the native ptrace().
I tried to figure out the history behind this API. I naïvely assumed
that PTRAGE_GETREGSET and PTRACE_SETREGSET were ancient APIs that
predated compat, but no. They were introduced by
2225a122ae26 ("ptrace: Add support for generic PTRACE_GETREGSET/PTRACE_SETREGSET")
in 2010, and they are simply a poor design. ELF core dumps have the
ELF e_machine field and a bunch of register sets in ELF notes, and the
pair (e_machine, NT_XXX) indicates the format of the regset blob. But
the new PTRACE_GET/SETREGSET API coopted the NT_XXX numbering without
any way to specify which e_machine was in effect. This is especially
bad on x86, where a process can freely switch between 32-bit and
64-bit mode, and, in fact, the PTRAGE_SETREGSET call itself can cause
this switch to happen. Oops.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/9daa791d0c7eaebd59c5bc2b2af1b0e7bebe707d.1612375698.git.luto@kernel.org
2021-02-03 18:09:58 +00:00
|
|
|
&user_x86_64_view,
|
2012-03-05 23:32:11 +00:00
|
|
|
REGSET_FP,
|
|
|
|
0, sizeof(struct user_i387_struct),
|
|
|
|
datap);
|
|
|
|
|
|
|
|
default:
|
|
|
|
return compat_ptrace_request(child, request, addr, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-06-22 11:55:14 +00:00
|
|
|
#ifdef CONFIG_COMPAT
|
2008-04-22 19:21:25 +00:00
|
|
|
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|
|
|
compat_ulong_t caddr, compat_ulong_t cdata)
|
2008-01-30 12:31:01 +00:00
|
|
|
{
|
2012-03-05 23:32:11 +00:00
|
|
|
#ifdef CONFIG_X86_X32_ABI
|
2016-04-18 13:43:43 +00:00
|
|
|
if (!in_ia32_syscall())
|
2012-03-05 23:32:11 +00:00
|
|
|
return x32_arch_ptrace(child, request, caddr, cdata);
|
|
|
|
#endif
|
2015-06-22 11:55:14 +00:00
|
|
|
#ifdef CONFIG_IA32_EMULATION
|
|
|
|
return ia32_arch_ptrace(child, request, caddr, cdata);
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
2008-01-30 12:31:01 +00:00
|
|
|
}
|
2015-06-22 11:55:14 +00:00
|
|
|
#endif /* CONFIG_COMPAT */
|
2008-01-30 12:31:01 +00:00
|
|
|
|
2008-01-30 12:31:53 +00:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
|
2016-08-08 23:29:06 +00:00
|
|
|
static struct user_regset x86_64_regsets[] __ro_after_init = {
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
[REGSET64_GENERAL] = {
|
2022-10-21 22:18:03 +00:00
|
|
|
.core_note_type = NT_PRSTATUS,
|
|
|
|
.n = sizeof(struct user_regs_struct) / sizeof(long),
|
|
|
|
.size = sizeof(long),
|
|
|
|
.align = sizeof(long),
|
|
|
|
.regset_get = genregs_get,
|
|
|
|
.set = genregs_set
|
2008-01-30 12:31:53 +00:00
|
|
|
},
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
[REGSET64_FP] = {
|
2022-10-21 22:18:03 +00:00
|
|
|
.core_note_type = NT_PRFPREG,
|
|
|
|
.n = sizeof(struct fxregs_state) / sizeof(long),
|
|
|
|
.size = sizeof(long),
|
|
|
|
.align = sizeof(long),
|
|
|
|
.active = regset_xregset_fpregs_active,
|
|
|
|
.regset_get = xfpregs_get,
|
|
|
|
.set = xfpregs_set
|
2008-01-30 12:31:53 +00:00
|
|
|
},
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
[REGSET64_XSTATE] = {
|
2022-10-21 22:18:03 +00:00
|
|
|
.core_note_type = NT_X86_XSTATE,
|
|
|
|
.size = sizeof(u64),
|
|
|
|
.align = sizeof(u64),
|
|
|
|
.active = xstateregs_active,
|
|
|
|
.regset_get = xstateregs_get,
|
|
|
|
.set = xstateregs_set
|
2010-02-11 19:50:59 +00:00
|
|
|
},
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
[REGSET64_IOPERM] = {
|
2022-10-21 22:18:03 +00:00
|
|
|
.core_note_type = NT_386_IOPERM,
|
|
|
|
.n = IO_BITMAP_LONGS,
|
|
|
|
.size = sizeof(long),
|
|
|
|
.align = sizeof(long),
|
|
|
|
.active = ioperm_active,
|
|
|
|
.regset_get = ioperm_get
|
2008-08-08 22:58:39 +00:00
|
|
|
},
|
x86: Add PTRACE interface for shadow stack
Some applications (like GDB) would like to tweak shadow stack state via
ptrace. This allows for existing functionality to continue to work for
seized shadow stack applications. Provide a regset interface for
manipulating the shadow stack pointer (SSP).
There is already ptrace functionality for accessing xstate, but this
does not include supervisor xfeatures. So there is not a completely
clear place for where to put the shadow stack state. Adding it to the
user xfeatures regset would complicate that code, as it currently shares
logic with signals which should not have supervisor features.
Don't add a general supervisor xfeature regset like the user one,
because it is better to maintain flexibility for other supervisor
xfeatures to define their own interface. For example, an xfeature may
decide not to expose all of it's state to userspace, as is actually the
case for shadow stack ptrace functionality. A lot of enum values remain
to be used, so just put it in dedicated shadow stack regset.
The only downside to not having a generic supervisor xfeature regset,
is that apps need to be enlightened of any new supervisor xfeature
exposed this way (i.e. they can't try to have generic save/restore
logic). But maybe that is a good thing, because they have to think
through each new xfeature instead of encountering issues when a new
supervisor xfeature was added.
By adding a shadow stack regset, it also has the effect of including the
shadow stack state in a core dump, which could be useful for debugging.
The shadow stack specific xstate includes the SSP, and the shadow stack
and WRSS enablement status. Enabling shadow stack or WRSS in the kernel
involves more than just flipping the bit. The kernel is made aware that
it has to do extra things when cloning or handling signals. That logic
is triggered off of separate feature enablement state kept in the task
struct. So the flipping on HW shadow stack enforcement without notifying
the kernel to change its behavior would severely limit what an application
could do without crashing, and the results would depend on kernel
internal implementation details. There is also no known use for controlling
this state via ptrace today. So only expose the SSP, which is something
that userspace already has indirect control over.
Co-developed-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Kees Cook <keescook@chromium.org>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
Tested-by: Pengfei Xu <pengfei.xu@intel.com>
Tested-by: John Allen <john.allen@amd.com>
Tested-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/all/20230613001108.3040476-41-rick.p.edgecombe%40intel.com
2023-06-13 00:11:06 +00:00
|
|
|
#ifdef CONFIG_X86_USER_SHADOW_STACK
|
|
|
|
[REGSET64_SSP] = {
|
|
|
|
.core_note_type = NT_X86_SHSTK,
|
|
|
|
.n = 1,
|
|
|
|
.size = sizeof(u64),
|
|
|
|
.align = sizeof(u64),
|
|
|
|
.active = ssp_active,
|
|
|
|
.regset_get = ssp_get,
|
|
|
|
.set = ssp_set
|
|
|
|
},
|
|
|
|
#endif
|
2008-01-30 12:31:53 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct user_regset_view user_x86_64_view = {
|
|
|
|
.name = "x86_64", .e_machine = EM_X86_64,
|
|
|
|
.regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
|
|
|
|
};
|
|
|
|
|
|
|
|
#else /* CONFIG_X86_32 */
|
|
|
|
|
|
|
|
#define user_regs_struct32 user_regs_struct
|
|
|
|
#define genregs32_get genregs_get
|
|
|
|
#define genregs32_set genregs_set
|
|
|
|
|
|
|
|
#endif /* CONFIG_X86_64 */
|
|
|
|
|
|
|
|
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
|
2016-08-08 23:29:06 +00:00
|
|
|
static struct user_regset x86_32_regsets[] __ro_after_init = {
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
[REGSET32_GENERAL] = {
|
2022-10-21 22:18:03 +00:00
|
|
|
.core_note_type = NT_PRSTATUS,
|
|
|
|
.n = sizeof(struct user_regs_struct32) / sizeof(u32),
|
|
|
|
.size = sizeof(u32),
|
|
|
|
.align = sizeof(u32),
|
|
|
|
.regset_get = genregs32_get,
|
|
|
|
.set = genregs32_set
|
2008-01-30 12:31:53 +00:00
|
|
|
},
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
[REGSET32_FP] = {
|
2022-10-21 22:18:03 +00:00
|
|
|
.core_note_type = NT_PRFPREG,
|
|
|
|
.n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
|
|
|
|
.size = sizeof(u32),
|
|
|
|
.align = sizeof(u32),
|
|
|
|
.active = regset_fpregs_active,
|
|
|
|
.regset_get = fpregs_get,
|
|
|
|
.set = fpregs_set
|
2008-01-30 12:31:53 +00:00
|
|
|
},
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
[REGSET32_XFP] = {
|
2022-10-21 22:18:03 +00:00
|
|
|
.core_note_type = NT_PRXFPREG,
|
|
|
|
.n = sizeof(struct fxregs_state) / sizeof(u32),
|
|
|
|
.size = sizeof(u32),
|
|
|
|
.align = sizeof(u32),
|
|
|
|
.active = regset_xregset_fpregs_active,
|
|
|
|
.regset_get = xfpregs_get,
|
|
|
|
.set = xfpregs_set
|
2008-01-30 12:31:53 +00:00
|
|
|
},
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
[REGSET32_XSTATE] = {
|
2022-10-21 22:18:03 +00:00
|
|
|
.core_note_type = NT_X86_XSTATE,
|
|
|
|
.size = sizeof(u64),
|
|
|
|
.align = sizeof(u64),
|
|
|
|
.active = xstateregs_active,
|
|
|
|
.regset_get = xstateregs_get,
|
|
|
|
.set = xstateregs_set
|
2010-02-11 19:50:59 +00:00
|
|
|
},
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
[REGSET32_TLS] = {
|
2022-10-21 22:18:03 +00:00
|
|
|
.core_note_type = NT_386_TLS,
|
|
|
|
.n = GDT_ENTRY_TLS_ENTRIES,
|
|
|
|
.bias = GDT_ENTRY_TLS_MIN,
|
|
|
|
.size = sizeof(struct user_desc),
|
|
|
|
.align = sizeof(struct user_desc),
|
|
|
|
.active = regset_tls_active,
|
|
|
|
.regset_get = regset_tls_get,
|
|
|
|
.set = regset_tls_set
|
2008-01-30 12:31:53 +00:00
|
|
|
},
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
[REGSET32_IOPERM] = {
|
2022-10-21 22:18:03 +00:00
|
|
|
.core_note_type = NT_386_IOPERM,
|
|
|
|
.n = IO_BITMAP_BYTES / sizeof(u32),
|
|
|
|
.size = sizeof(u32),
|
|
|
|
.align = sizeof(u32),
|
|
|
|
.active = ioperm_active,
|
|
|
|
.regset_get = ioperm_get
|
2008-08-08 22:58:39 +00:00
|
|
|
},
|
2008-01-30 12:31:53 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct user_regset_view user_x86_32_view = {
|
|
|
|
.name = "i386", .e_machine = EM_386,
|
|
|
|
.regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2010-02-11 19:50:59 +00:00
|
|
|
/*
|
|
|
|
* This represents bytes 464..511 in the memory layout exported through
|
|
|
|
* the REGSET_XSTATE interface.
|
|
|
|
*/
|
|
|
|
u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
|
|
|
|
|
2016-08-08 23:29:06 +00:00
|
|
|
void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
|
2010-02-11 19:50:59 +00:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_X86_64
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
x86_64_regsets[REGSET64_XSTATE].n = size / sizeof(u64);
|
2010-02-11 19:50:59 +00:00
|
|
|
#endif
|
|
|
|
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
|
x86: Separate out x86_regset for 32 and 64 bit
In fill_thread_core_info() the ptrace accessible registers are collected
for a core file to be written out as notes. The note array is allocated
from a size calculated by iterating the user regset view, and counting the
regsets that have a non-zero core_note_type. However, this only allows for
there to be non-zero core_note_type at the end of the regset view. If
there are any in the middle, fill_thread_core_info() will overflow the
note allocation, as it iterates over the size of the view and the
allocation would be smaller than that.
To apparently avoid this problem, x86_32_regsets and x86_64_regsets need
to be constructed in a special way. They both draw their indices from a
shared enum x86_regset, but 32 bit and 64 bit don't all support the same
regsets and can be compiled in at the same time in the case of
IA32_EMULATION. So this enum has to be laid out in a special way such that
there are no gaps for both x86_32_regsets and x86_64_regsets. This
involves ordering them just right by creating aliases for enum’s that
are only in one view or the other, or creating multiple versions like
REGSET32_IOPERM/REGSET64_IOPERM.
So the collection of the registers tries to minimize the size of the
allocation, but it doesn’t quite work. Then the x86 ptrace side works
around it by constructing the enum just right to avoid a problem. In the
end there is no functional problem, but it is somewhat strange and
fragile.
It could also be improved like this [1], by better utilizing the smaller
array, but this still wastes space in the regset array’s if they are not
carefully crafted to avoid gaps. Instead, just fully separate out the
enums and give them separate 32 and 64 enum names. Add some bitsize-free
defines for REGSET_GENERAL and REGSET_FP since they are the only two
referred to in bitsize generic code.
While introducing a bunch of new 32/64 enums, change the pattern of the
name from REGSET_FOO32 to REGSET32_FOO to better indicate that the 32 is
in reference to the CPU mode and not the register size, as suggested by
Eric Biederman.
This should have no functional change and is only changing how constants
are generated and referred to.
[1] https://lore.kernel.org/lkml/20180717162502.32274-1-yu-cheng.yu@intel.com/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/all/20221021221803.10910-2-rick.p.edgecombe%40intel.com
2022-10-21 22:18:02 +00:00
|
|
|
x86_32_regsets[REGSET32_XSTATE].n = size / sizeof(u64);
|
2010-02-11 19:50:59 +00:00
|
|
|
#endif
|
|
|
|
xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
|
|
|
|
}
|
|
|
|
|
x86/ptrace: Clean up PTRACE_GETREGS/PTRACE_PUTREGS regset selection
task_user_regset_view() has nonsensical semantics, but those semantics
appear to be relied on by existing users of PTRACE_GETREGSET and
PTRACE_SETREGSET. (See added comments below for details.)
It shouldn't be used for PTRACE_GETREGS or PTRACE_SETREGS, though. A
native 64-bit ptrace() call and an x32 ptrace() call using GETREGS
or SETREGS wants the 64-bit regset views, and a 32-bit ptrace() call
(native or compat) should use the 32-bit regset.
task_user_regset_view() almost does this except that it will
malfunction if a ptracer is itself ptraced and the outer ptracer
modifies CS on entry to a ptrace() syscall. Hopefully that has never
happened. (The compat ptrace() code already hardcoded the 32-bit
regset, so this change has no effect on that path.)
Improve the situation and deobfuscate the code by hardcoding the
64-bit view in the x32 ptrace() and selecting the view based on the
kernel config in the native ptrace().
I tried to figure out the history behind this API. I naïvely assumed
that PTRAGE_GETREGSET and PTRACE_SETREGSET were ancient APIs that
predated compat, but no. They were introduced by
2225a122ae26 ("ptrace: Add support for generic PTRACE_GETREGSET/PTRACE_SETREGSET")
in 2010, and they are simply a poor design. ELF core dumps have the
ELF e_machine field and a bunch of register sets in ELF notes, and the
pair (e_machine, NT_XXX) indicates the format of the regset blob. But
the new PTRACE_GET/SETREGSET API coopted the NT_XXX numbering without
any way to specify which e_machine was in effect. This is especially
bad on x86, where a process can freely switch between 32-bit and
64-bit mode, and, in fact, the PTRAGE_SETREGSET call itself can cause
this switch to happen. Oops.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/9daa791d0c7eaebd59c5bc2b2af1b0e7bebe707d.1612375698.git.luto@kernel.org
2021-02-03 18:09:58 +00:00
|
|
|
/*
|
|
|
|
* This is used by the core dump code to decide which regset to dump. The
|
|
|
|
* core dump code writes out the resulting .e_machine and the corresponding
|
|
|
|
* regsets. This is suboptimal if the task is messing around with its CS.L
|
|
|
|
* field, but at worst the core dump will end up missing some information.
|
|
|
|
*
|
|
|
|
* Unfortunately, it is also used by the broken PTRACE_GETREGSET and
|
|
|
|
* PTRACE_SETREGSET APIs. These APIs look at the .regsets field but have
|
|
|
|
* no way to make sure that the e_machine they use matches the caller's
|
|
|
|
* expectations. The result is that the data format returned by
|
|
|
|
* PTRACE_GETREGSET depends on the returned CS field (and even the offset
|
|
|
|
* of the returned CS field depends on its value!) and the data format
|
|
|
|
* accepted by PTRACE_SETREGSET is determined by the old CS value. The
|
|
|
|
* upshot is that it is basically impossible to use these APIs correctly.
|
|
|
|
*
|
|
|
|
* The best way to fix it in the long run would probably be to add new
|
|
|
|
* improved ptrace() APIs to read and write registers reliably, possibly by
|
|
|
|
* allowing userspace to select the ELF e_machine variant that they expect.
|
|
|
|
*/
|
2008-01-30 12:31:53 +00:00
|
|
|
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_IA32_EMULATION
|
2016-09-05 13:33:07 +00:00
|
|
|
if (!user_64bit_mode(task_pt_regs(task)))
|
2008-01-30 12:31:53 +00:00
|
|
|
#endif
|
|
|
|
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
|
|
|
|
return &user_x86_32_view;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
return &user_x86_64_view;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-02-06 00:25:11 +00:00
|
|
|
void send_sigtrap(struct pt_regs *regs, int error_code, int si_code)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2019-02-06 00:25:11 +00:00
|
|
|
struct task_struct *tsk = current;
|
|
|
|
|
2012-03-12 09:25:55 +00:00
|
|
|
tsk->thread.trap_nr = X86_TRAP_DB;
|
2005-04-16 22:20:36 +00:00
|
|
|
tsk->thread.error_code = error_code;
|
|
|
|
|
2007-10-19 23:13:56 +00:00
|
|
|
/* Send us the fake SIGTRAP */
|
2018-09-17 23:16:39 +00:00
|
|
|
force_sig_fault(SIGTRAP, si_code,
|
2019-05-23 16:04:24 +00:00
|
|
|
user_mode(regs) ? (void __user *)regs->ip : NULL);
|
2009-12-16 00:47:20 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-04-16 19:18:26 +00:00
|
|
|
void user_single_step_report(struct pt_regs *regs)
|
2009-12-16 00:47:20 +00:00
|
|
|
{
|
2019-02-06 00:25:11 +00:00
|
|
|
send_sigtrap(regs, 0, TRAP_BRKPT);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|