s390/ctlreg: add struct ctlreg

Add struct ctlreg to enforce strict type checking / usage for control
register functions.

Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
Heiko Carstens 2023-09-11 21:40:04 +02:00 committed by Vasily Gorbik
parent ecc53818f6
commit 527618abb9
21 changed files with 97 additions and 86 deletions

View File

@ -12,7 +12,7 @@
#include "decompressor.h"
#include "boot.h"
unsigned long __bootdata_preserved(s390_invalid_asce);
struct ctlreg __bootdata_preserved(s390_invalid_asce);
#ifdef CONFIG_PROC_FS
atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
@ -422,7 +422,7 @@ void setup_vmem(unsigned long asce_limit)
asce_type = _REGION3_ENTRY_EMPTY;
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
}
s390_invalid_asce = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
s390_invalid_asce.val = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
crst_table_init((unsigned long *)swapper_pg_dir, asce_type);
crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
@ -443,12 +443,12 @@ void setup_vmem(unsigned long asce_limit)
kasan_populate_shadow();
S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
S390_lowcore.kernel_asce.val = swapper_pg_dir | asce_bits;
S390_lowcore.user_asce = s390_invalid_asce;
local_ctl_load(1, &S390_lowcore.kernel_asce);
local_ctl_load(7, &S390_lowcore.user_asce);
local_ctl_load(13, &S390_lowcore.kernel_asce);
init_mm.context.asce = S390_lowcore.kernel_asce;
init_mm.context.asce = S390_lowcore.kernel_asce.val;
}

View File

@ -35,6 +35,10 @@
#include <linux/bug.h>
struct ctlreg {
unsigned long val;
};
#define __local_ctl_load(low, high, array) do { \
struct addrtype { \
char _[sizeof(array)]; \
@ -43,9 +47,9 @@
int _low = low; \
int _esize; \
\
_esize = (_high - _low + 1) * sizeof(unsigned long); \
_esize = (_high - _low + 1) * sizeof(struct ctlreg); \
BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \
typecheck(unsigned long, array[0]); \
typecheck(struct ctlreg, array[0]); \
asm volatile( \
" lctlg %[_low],%[_high],%[_arr]\n" \
: \
@ -62,16 +66,16 @@
int _low = low; \
int _esize; \
\
_esize = (_high - _low + 1) * sizeof(unsigned long); \
_esize = (_high - _low + 1) * sizeof(struct ctlreg); \
BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \
typecheck(unsigned long, array[0]); \
typecheck(struct ctlreg, array[0]); \
asm volatile( \
" stctg %[_low],%[_high],%[_arr]\n" \
: [_arr] "=Q" (*(struct addrtype *)(&array)) \
: [_low] "i" (low), [_high] "i" (high)); \
} while (0)
static __always_inline void local_ctl_load(unsigned int cr, unsigned long *reg)
static __always_inline void local_ctl_load(unsigned int cr, struct ctlreg *reg)
{
asm volatile(
" lctlg %[cr],%[cr],%[reg]\n"
@ -80,7 +84,7 @@ static __always_inline void local_ctl_load(unsigned int cr, unsigned long *reg)
: "memory");
}
static __always_inline void local_ctl_store(unsigned int cr, unsigned long *reg)
static __always_inline void local_ctl_store(unsigned int cr, struct ctlreg *reg)
{
asm volatile(
" stctg %[cr],%[cr],%[reg]\n"
@ -90,19 +94,19 @@ static __always_inline void local_ctl_store(unsigned int cr, unsigned long *reg)
static __always_inline void local_ctl_set_bit(unsigned int cr, unsigned int bit)
{
unsigned long reg;
struct ctlreg reg;
local_ctl_store(cr, &reg);
reg |= 1UL << bit;
reg.val |= 1UL << bit;
local_ctl_load(cr, &reg);
}
static __always_inline void local_ctl_clear_bit(unsigned int cr, unsigned int bit)
{
unsigned long reg;
struct ctlreg reg;
local_ctl_store(cr, &reg);
reg &= ~(1UL << bit);
reg.val &= ~(1UL << bit);
local_ctl_load(cr, &reg);
}
@ -122,6 +126,7 @@ static inline void system_ctl_clear_bit(unsigned int cr, unsigned int bit)
union ctlreg0 {
unsigned long val;
struct ctlreg reg;
struct {
unsigned long : 8;
unsigned long tcx : 1; /* Transactional-Execution control */
@ -148,6 +153,7 @@ union ctlreg0 {
union ctlreg2 {
unsigned long val;
struct ctlreg reg;
struct {
unsigned long : 33;
unsigned long ducto : 25;
@ -161,6 +167,7 @@ union ctlreg2 {
union ctlreg5 {
unsigned long val;
struct ctlreg reg;
struct {
unsigned long : 33;
unsigned long pasteo: 25;
@ -170,6 +177,7 @@ union ctlreg5 {
union ctlreg15 {
unsigned long val;
struct ctlreg reg;
struct {
unsigned long lsea : 61;
unsigned long : 3;

View File

@ -15,6 +15,7 @@
* <grundym@us.ibm.com>
*/
#include <linux/types.h>
#include <asm/ctlreg.h>
#include <asm-generic/kprobes.h>
#define BREAKPOINT_INSTRUCTION 0x0002
@ -65,7 +66,7 @@ struct prev_kprobe {
struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_saved_imask;
unsigned long kprobe_saved_ctl[3];
struct ctlreg kprobe_saved_ctl[3];
struct prev_kprobe prev_kprobe;
};

View File

@ -11,6 +11,7 @@
#include <linux/types.h>
#include <asm/ptrace.h>
#include <asm/ctlreg.h>
#include <asm/cpu.h>
#include <asm/types.h>
@ -139,8 +140,8 @@ struct lowcore {
__u32 restart_flags; /* 0x0384 */
/* Address space pointer. */
unsigned long kernel_asce; /* 0x0388 */
unsigned long user_asce; /* 0x0390 */
struct ctlreg kernel_asce; /* 0x0388 */
struct ctlreg user_asce; /* 0x0390 */
/*
* The lpp and current_pid fields form a
@ -199,7 +200,7 @@ struct lowcore {
__u32 clock_comp_save_area[2]; /* 0x1330 */
__u64 last_break_save_area; /* 0x1338 */
__u32 access_regs_save_area[16]; /* 0x1340 */
unsigned long cregs_save_area[16]; /* 0x1380 */
struct ctlreg cregs_save_area[16]; /* 0x1380 */
__u8 pad_0x1400[0x1500-0x1400]; /* 0x1400 */
/* Cryptography-counter designation */
__u64 ccd; /* 0x1500 */

View File

@ -78,7 +78,7 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *
if (next == &init_mm)
S390_lowcore.user_asce = s390_invalid_asce;
else
S390_lowcore.user_asce = next->context.asce;
S390_lowcore.user_asce.val = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
/* Clear previous user-ASCE from CR7 */
local_ctl_load(7, &s390_invalid_asce);

View File

@ -18,6 +18,7 @@
#include <linux/radix-tree.h>
#include <linux/atomic.h>
#include <asm/sections.h>
#include <asm/ctlreg.h>
#include <asm/bug.h>
#include <asm/page.h>
#include <asm/uv.h>
@ -25,7 +26,7 @@
extern pgd_t swapper_pg_dir[];
extern pgd_t invalid_pg_dir[];
extern void paging_init(void);
extern unsigned long s390_invalid_asce;
extern struct ctlreg s390_invalid_asce;
enum {
PG_DIRECT_MAP_4K = 0,

View File

@ -36,11 +36,11 @@ struct ctl_bit_parms {
static void ctl_bit_callback(void *info)
{
struct ctl_bit_parms *pp = info;
unsigned long regs[16];
struct ctlreg regs[16];
__local_ctl_store(0, 15, regs);
regs[pp->cr] &= pp->andval;
regs[pp->cr] |= pp->orval;
regs[pp->cr].val &= pp->andval;
regs[pp->cr].val |= pp->orval;
__local_ctl_load(0, 15, regs);
}
@ -53,8 +53,8 @@ void system_ctl_set_clear_bit(unsigned int cr, unsigned int bit, bool set)
pp.andval = set ? -1UL : ~(1UL << bit);
system_ctlreg_lock();
abs_lc = get_abs_lowcore();
abs_lc->cregs_save_area[cr] &= pp.andval;
abs_lc->cregs_save_area[cr] |= pp.orval;
abs_lc->cregs_save_area[cr].val &= pp.andval;
abs_lc->cregs_save_area[cr].val |= pp.orval;
put_abs_lowcore(abs_lc);
on_each_cpu(ctl_bit_callback, &pp, 1);
system_ctlreg_unlock();

View File

@ -225,18 +225,18 @@ static void enable_singlestep(struct kprobe_ctlblk *kcb,
unsigned long ip)
{
union {
unsigned long regs[3];
struct ctlreg regs[3];
struct {
unsigned long control;
unsigned long start;
unsigned long end;
struct ctlreg control;
struct ctlreg start;
struct ctlreg end;
};
} per_kprobe;
/* Set up the PER control registers %cr9-%cr11 */
per_kprobe.control = PER_EVENT_IFETCH;
per_kprobe.start = ip;
per_kprobe.end = ip;
per_kprobe.control.val = PER_EVENT_IFETCH;
per_kprobe.start.val = ip;
per_kprobe.end.val = ip;
/* Save control regs and psw mask */
__local_ctl_store(9, 11, kcb->kprobe_saved_ctl);

View File

@ -94,12 +94,12 @@ static noinline void __machine_kdump(void *image)
if (MACHINE_HAS_VX)
save_vx_regs((__vector128 *) mcesa->vector_save_area);
if (MACHINE_HAS_GS) {
local_ctl_store(2, &cr2_old.val);
local_ctl_store(2, &cr2_old.reg);
cr2_new = cr2_old;
cr2_new.gse = 1;
local_ctl_load(2, &cr2_new.val);
local_ctl_load(2, &cr2_new.reg);
save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
local_ctl_load(2, &cr2_old.val);
local_ctl_load(2, &cr2_old.reg);
}
/*
* To create a good backchain for this CPU in the dump store_status

View File

@ -131,10 +131,10 @@ static notrace void s390_handle_damage(void)
* Disable low address protection and make machine check new PSW a
* disabled wait PSW. Any additional machine check cannot be handled.
*/
local_ctl_store(0, &cr0.val);
local_ctl_store(0, &cr0.reg);
cr0_new = cr0;
cr0_new.lap = 0;
local_ctl_load(0, &cr0_new.val);
local_ctl_load(0, &cr0_new.reg);
psw_save = S390_lowcore.mcck_new_psw;
psw_bits(S390_lowcore.mcck_new_psw).io = 0;
psw_bits(S390_lowcore.mcck_new_psw).ext = 0;
@ -146,7 +146,7 @@ static notrace void s390_handle_damage(void)
* values. This makes possible system dump analysis easier.
*/
S390_lowcore.mcck_new_psw = psw_save;
local_ctl_load(0, &cr0.val);
local_ctl_load(0, &cr0.reg);
disabled_wait();
while (1);
}
@ -269,9 +269,9 @@ static int notrace s390_validate_registers(union mci mci)
*/
if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST))
kill_task = 1;
cr0.val = S390_lowcore.cregs_save_area[0];
cr0.reg = S390_lowcore.cregs_save_area[0];
cr0.afp = cr0.vx = 1;
local_ctl_load(0, &cr0.val);
local_ctl_load(0, &cr0.reg);
asm volatile(
" la 1,%0\n"
" VLM 0,15,0,1\n"
@ -290,7 +290,7 @@ static int notrace s390_validate_registers(union mci mci)
if (!mci.ar)
kill_task = 1;
/* Validate guarded storage registers */
cr2.val = S390_lowcore.cregs_save_area[2];
cr2.reg = S390_lowcore.cregs_save_area[2];
if (cr2.gse) {
if (!mci.gs) {
/*

View File

@ -45,16 +45,16 @@ void update_cr_regs(struct task_struct *task)
union ctlreg2 cr2_old, cr2_new;
int cr0_changed, cr2_changed;
union {
unsigned long regs[3];
struct ctlreg regs[3];
struct {
unsigned long control;
unsigned long start;
unsigned long end;
struct ctlreg control;
struct ctlreg start;
struct ctlreg end;
};
} old, new;
local_ctl_store(0, &cr0_old.val);
local_ctl_store(2, &cr2_old.val);
local_ctl_store(0, &cr0_old.reg);
local_ctl_store(2, &cr2_old.reg);
cr0_new = cr0_old;
cr2_new = cr2_old;
/* Take care of the enable/disable of transactional execution. */
@ -82,31 +82,31 @@ void update_cr_regs(struct task_struct *task)
cr0_changed = cr0_new.val != cr0_old.val;
cr2_changed = cr2_new.val != cr2_old.val;
if (cr0_changed)
local_ctl_load(0, &cr0_new.val);
local_ctl_load(0, &cr0_new.reg);
if (cr2_changed)
local_ctl_load(2, &cr2_new.val);
local_ctl_load(2, &cr2_new.reg);
/* Copy user specified PER registers */
new.control = thread->per_user.control;
new.start = thread->per_user.start;
new.end = thread->per_user.end;
new.control.val = thread->per_user.control;
new.start.val = thread->per_user.start;
new.end.val = thread->per_user.end;
/* merge TIF_SINGLE_STEP into user specified PER registers. */
if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
new.control |= PER_EVENT_BRANCH;
new.control.val |= PER_EVENT_BRANCH;
else
new.control |= PER_EVENT_IFETCH;
new.control |= PER_CONTROL_SUSPENSION;
new.control |= PER_EVENT_TRANSACTION_END;
new.control.val |= PER_EVENT_IFETCH;
new.control.val |= PER_CONTROL_SUSPENSION;
new.control.val |= PER_EVENT_TRANSACTION_END;
if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
new.control |= PER_EVENT_IFETCH;
new.start = 0;
new.end = -1UL;
new.control.val |= PER_EVENT_IFETCH;
new.start.val = 0;
new.end.val = -1UL;
}
/* Take care of the PER enablement bit in the PSW. */
if (!(new.control & PER_EVENT_MASK)) {
if (!(new.control.val & PER_EVENT_MASK)) {
regs->psw.mask &= ~PSW_MASK_PER;
return;
}

View File

@ -791,15 +791,15 @@ static void __init setup_cr(void)
__ctl_duct[4] = (unsigned long)__ctl_duald;
/* Update control registers CR2, CR5 and CR15 */
local_ctl_store(2, &cr2.val);
local_ctl_store(5, &cr5.val);
local_ctl_store(15, &cr15.val);
local_ctl_store(2, &cr2.reg);
local_ctl_store(5, &cr5.reg);
local_ctl_store(15, &cr15.reg);
cr2.ducto = (unsigned long)__ctl_duct >> 6;
cr5.pasteo = (unsigned long)__ctl_duct >> 6;
cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
local_ctl_load(2, &cr2.val);
local_ctl_load(5, &cr5.val);
local_ctl_load(15, &cr15.val);
local_ctl_load(2, &cr2.reg);
local_ctl_load(5, &cr5.reg);
local_ctl_load(15, &cr15.reg);
}
/*

View File

@ -910,7 +910,7 @@ early_param("possible_cpus", _setup_possible_cpus);
int __cpu_disable(void)
{
unsigned long cregs[16];
struct ctlreg cregs[16];
int cpu;
/* Handle possible pending IPIs */
@ -923,9 +923,9 @@ int __cpu_disable(void)
pfault_fini();
/* Disable interrupt sources via control register. */
__local_ctl_store(0, 15, cregs);
cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
cregs[0].val &= ~0x0000ee70UL; /* disable all external interrupts */
cregs[6].val &= ~0xff000000UL; /* disable all I/O interrupts */
cregs[14].val &= ~0x1f000000UL; /* disable most machine checks */
__local_ctl_load(0, 15, cregs);
clear_cpu_flag(CIF_NOHZ_DELAY);
return 0;

View File

@ -17,17 +17,17 @@
#ifdef CONFIG_DEBUG_ENTRY
void debug_user_asce(int exit)
{
unsigned long cr1, cr7;
struct ctlreg cr1, cr7;
local_ctl_store(1, &cr1);
local_ctl_store(7, &cr7);
if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce)
if (cr1.val == S390_lowcore.kernel_asce.val && cr7.val == S390_lowcore.user_asce.val)
return;
panic("incorrect ASCE on kernel %s\n"
"cr1: %016lx cr7: %016lx\n"
"kernel: %016lx user: %016lx\n",
exit ? "exit" : "entry", cr1, cr7,
S390_lowcore.kernel_asce, S390_lowcore.user_asce);
exit ? "exit" : "entry", cr1.val, cr7.val,
S390_lowcore.kernel_asce.val, S390_lowcore.user_asce.val);
}
#endif /*CONFIG_DEBUG_ENTRY */

View File

@ -287,7 +287,7 @@ static int pt_dump_init(void)
* kernel ASCE. We need this to keep the page table walker functions
* from accessing non-existent entries.
*/
max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = (S390_lowcore.kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;

View File

@ -196,7 +196,7 @@ static void dump_fault_info(struct pt_regs *regs)
pr_cont("mode while using ");
switch (get_fault_type(regs)) {
case USER_FAULT:
asce = S390_lowcore.user_asce;
asce = S390_lowcore.user_asce.val;
pr_cont("user ");
break;
case GMAP_FAULT:
@ -204,7 +204,7 @@ static void dump_fault_info(struct pt_regs *regs)
pr_cont("gmap ");
break;
case KERNEL_FAULT:
asce = S390_lowcore.kernel_asce;
asce = S390_lowcore.kernel_asce.val;
pr_cont("kernel ");
break;
default:

View File

@ -54,7 +54,7 @@
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
unsigned long __bootdata_preserved(s390_invalid_asce);
struct ctlreg __bootdata_preserved(s390_invalid_asce);
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL(empty_zero_page);

View File

@ -75,7 +75,7 @@ static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
break;
}
table = (unsigned long *)((unsigned long)old & mask);
crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce);
crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce.val);
} else if (MACHINE_HAS_IDTE) {
cspg(old, *old, new);
} else {

View File

@ -62,7 +62,7 @@ static void __crst_table_upgrade(void *arg)
/* change all active ASCEs to avoid the creation of new TLBs */
if (current->active_mm == mm) {
S390_lowcore.user_asce = mm->context.asce;
S390_lowcore.user_asce.val = mm->context.asce;
local_ctl_load(7, &S390_lowcore.user_asce);
}
__tlb_flush_local();

View File

@ -706,8 +706,8 @@ void
sclp_sync_wait(void)
{
unsigned long long old_tick;
struct ctlreg cr0, cr0_sync;
unsigned long flags;
unsigned long cr0, cr0_sync;
static u64 sync_count;
u64 timeout;
int irq_context;
@ -733,8 +733,8 @@ sclp_sync_wait(void)
old_tick = local_tick_disable();
trace_hardirqs_on();
local_ctl_store(0, &cr0);
cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
cr0_sync |= 1UL << (63 - 54);
cr0_sync.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
cr0_sync.val |= 1UL << (63 - 54);
local_ctl_load(0, &cr0_sync);
__arch_local_irq_stosm(0x01);
/* Loop until driver state indicates finished request */

View File

@ -32,11 +32,11 @@ void sclp_early_wait_irq(void)
psw_t psw_ext_save, psw_wait;
union ctlreg0 cr0, cr0_new;
local_ctl_store(0, &cr0.val);
local_ctl_store(0, &cr0.reg);
cr0_new.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
cr0_new.lap = 0;
cr0_new.sssm = 1;
local_ctl_load(0, &cr0_new.val);
local_ctl_load(0, &cr0_new.reg);
psw_ext_save = S390_lowcore.external_new_psw;
psw_mask = __extract_psw();
@ -59,7 +59,7 @@ void sclp_early_wait_irq(void)
} while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG);
S390_lowcore.external_new_psw = psw_ext_save;
local_ctl_load(0, &cr0.val);
local_ctl_load(0, &cr0.reg);
}
int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb)