mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 22:21:42 +00:00
arm64/ptrace: Ensure that SME is set up for target when writing SSVE state
When we use NT_ARM_SSVE to either enable streaming mode or change the
vector length for a process we do not currently do anything to ensure that
there is storage allocated for the SME specific register state. If the
task had not previously used SME or we changed the vector length then
the task will not have had TIF_SME set or backing storage for ZA/ZT
allocated, resulting in inconsistent register sizes when saving state
and spurious traps which flush the newly set register state.
We should set TIF_SME to disable traps and ensure that storage is
allocated for ZA and ZT if it is not already allocated. This requires
modifying sme_alloc() to make the flush of any existing register state
optional so we don't disturb existing state for ZA and ZT.
Fixes: e12310a0d3
("arm64/sme: Implement ptrace support for streaming mode SVE registers")
Reported-by: David Spickett <David.Spickett@arm.com>
Signed-off-by: Mark Brown <broonie@kernel.org>
Cc: <stable@vger.kernel.org> # 5.19.x
Link: https://lore.kernel.org/r/20230810-arm64-fix-ptrace-race-v1-1-a5361fad2bd6@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
69af56ae56
commit
5d0a8d2fba
@ -356,7 +356,7 @@ static inline int sme_max_virtualisable_vl(void)
|
|||||||
return vec_max_virtualisable_vl(ARM64_VEC_SME);
|
return vec_max_virtualisable_vl(ARM64_VEC_SME);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void sme_alloc(struct task_struct *task);
|
extern void sme_alloc(struct task_struct *task, bool flush);
|
||||||
extern unsigned int sme_get_vl(void);
|
extern unsigned int sme_get_vl(void);
|
||||||
extern int sme_set_current_vl(unsigned long arg);
|
extern int sme_set_current_vl(unsigned long arg);
|
||||||
extern int sme_get_current_vl(void);
|
extern int sme_get_current_vl(void);
|
||||||
@ -388,7 +388,7 @@ static inline void sme_smstart_sm(void) { }
|
|||||||
static inline void sme_smstop_sm(void) { }
|
static inline void sme_smstop_sm(void) { }
|
||||||
static inline void sme_smstop(void) { }
|
static inline void sme_smstop(void) { }
|
||||||
|
|
||||||
static inline void sme_alloc(struct task_struct *task) { }
|
static inline void sme_alloc(struct task_struct *task, bool flush) { }
|
||||||
static inline void sme_setup(void) { }
|
static inline void sme_setup(void) { }
|
||||||
static inline unsigned int sme_get_vl(void) { return 0; }
|
static inline unsigned int sme_get_vl(void) { return 0; }
|
||||||
static inline int sme_max_vl(void) { return 0; }
|
static inline int sme_max_vl(void) { return 0; }
|
||||||
|
@ -1285,9 +1285,9 @@ void fpsimd_release_task(struct task_struct *dead_task)
|
|||||||
* the interest of testability and predictability, the architecture
|
* the interest of testability and predictability, the architecture
|
||||||
* guarantees that when ZA is enabled it will be zeroed.
|
* guarantees that when ZA is enabled it will be zeroed.
|
||||||
*/
|
*/
|
||||||
void sme_alloc(struct task_struct *task)
|
void sme_alloc(struct task_struct *task, bool flush)
|
||||||
{
|
{
|
||||||
if (task->thread.sme_state) {
|
if (task->thread.sme_state && flush) {
|
||||||
memset(task->thread.sme_state, 0, sme_state_size(task));
|
memset(task->thread.sme_state, 0, sme_state_size(task));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1515,7 +1515,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
sve_alloc(current, false);
|
sve_alloc(current, false);
|
||||||
sme_alloc(current);
|
sme_alloc(current, true);
|
||||||
if (!current->thread.sve_state || !current->thread.sme_state) {
|
if (!current->thread.sve_state || !current->thread.sme_state) {
|
||||||
force_sig(SIGKILL);
|
force_sig(SIGKILL);
|
||||||
return;
|
return;
|
||||||
|
@ -881,6 +881,13 @@ static int sve_set_common(struct task_struct *target,
|
|||||||
break;
|
break;
|
||||||
case ARM64_VEC_SME:
|
case ARM64_VEC_SME:
|
||||||
target->thread.svcr |= SVCR_SM_MASK;
|
target->thread.svcr |= SVCR_SM_MASK;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Disable traps and ensure there is SME storage but
|
||||||
|
* preserve any currently set values in ZA/ZT.
|
||||||
|
*/
|
||||||
|
sme_alloc(target, false);
|
||||||
|
set_tsk_thread_flag(target, TIF_SME);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
@ -1100,7 +1107,7 @@ static int za_set(struct task_struct *target,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate/reinit ZA storage */
|
/* Allocate/reinit ZA storage */
|
||||||
sme_alloc(target);
|
sme_alloc(target, true);
|
||||||
if (!target->thread.sme_state) {
|
if (!target->thread.sme_state) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
@ -1171,7 +1178,7 @@ static int zt_set(struct task_struct *target,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!thread_za_enabled(&target->thread)) {
|
if (!thread_za_enabled(&target->thread)) {
|
||||||
sme_alloc(target);
|
sme_alloc(target, true);
|
||||||
if (!target->thread.sme_state)
|
if (!target->thread.sme_state)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -475,7 +475,7 @@ static int restore_za_context(struct user_ctxs *user)
|
|||||||
fpsimd_flush_task_state(current);
|
fpsimd_flush_task_state(current);
|
||||||
/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
|
/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
|
||||||
|
|
||||||
sme_alloc(current);
|
sme_alloc(current, true);
|
||||||
if (!current->thread.sme_state) {
|
if (!current->thread.sme_state) {
|
||||||
current->thread.svcr &= ~SVCR_ZA_MASK;
|
current->thread.svcr &= ~SVCR_ZA_MASK;
|
||||||
clear_thread_flag(TIF_SME);
|
clear_thread_flag(TIF_SME);
|
||||||
|
Loading…
Reference in New Issue
Block a user