RISC-V Patches for the 6.7 Merge Window, Part 1

* Support for cbo.zero in userspace.
 * Support for CBOs on ACPI-based systems.
 * A handful of improvements for the T-Head cache flushing ops.
 * Support for software shadow call stacks.
 * Various cleanups and fixes.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCAAxFiEEKzw3R0RoQ7JKlDp6LhMZ81+7GIkFAmVJAJoTHHBhbG1lckBk
 YWJiZWx0LmNvbQAKCRAuExnzX7sYiWZrD/9ECV/0tuX5LbS56kA0ElkwiakyIVGu
 ZVuF26yGJ6w+XvwnHPhqKNVN0ReYR6s6CwH1WpHI5Du9QHZGQU3DKJ43dFMTP3Dn
 dQFli7QJ+tsNo1nre8NZWKj5Ac+Cu906F794qM0q0XrZmyb9DY3ojVYJAYy+dtoo
 /9gwbB7P0GLyDlURLn48oQyz36WQW3CkL5Jkfu+uYwnFe9DAFtfakIKq5mLlNuaH
 PgUk8pAVhSy2GdPOGFtnFFhdXMrTjpgxdo62ZIZC0lbsts26Dxp95oUygqMg51Iy
 ilaXkA2U1c1+gFQNpEove7BVZa5708Kaj6RLQ3/kAJblAzibszwQvIWlWOh7RVni
 3GQAS7/0D0+0cjDwXdWaPIaFFzLfi3bDxRYkc7n59p6nOz+GrxnSNsRPQJGgYxeU
 oTtJfaqWKntm72iutiHmXgx/pvAxWOHpqDnSTlDdtjvgzXCplqBbxZFF/azj30o5
 jplNW5YvdvD9fviYMAoGSOz03IwDeZF5rMlAhqu6vXlyD2//mID82yw/hBluIA3+
 /hLo5QfTLiUGs9nnijxMcfoyusN6AXsJOxwYdAJCIuJOr78YUj0S974gd9KvJXma
 KedrwRVwW7KE7CwY1jhrWBsZEpzl8YrtpMDN47y4gRtDZN8XJMQ+lHqd+BHT/DUO
 TGUCYi5xvr6Vlw==
 =hKWl
 -----END PGP SIGNATURE-----

Merge tag 'riscv-for-linus-6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V updates from Palmer Dabbelt:

 - Support for cbo.zero in userspace

 - Support for CBOs on ACPI-based systems

 - A handful of improvements for the T-Head cache flushing ops

 - Support for software shadow call stacks

 - Various cleanups and fixes

* tag 'riscv-for-linus-6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (31 commits)
  RISC-V: hwprobe: Fix vDSO SIGSEGV
  riscv: configs: defconfig: Enable configs required for RZ/Five SoC
  riscv: errata: prefix T-Head mnemonics with th.
  riscv: put interrupt entries into .irqentry.text
  riscv: mm: Update the comment of CONFIG_PAGE_OFFSET
  riscv: Using TOOLCHAIN_HAS_ZIHINTPAUSE marco replace zihintpause
  riscv/mm: Fix the comment for swap pte format
  RISC-V: clarify the QEMU workaround in ISA parser
  riscv: correct pt_level name via pgtable_l5/4_enabled
  RISC-V: Provide pgtable_l5_enabled on rv32
  clocksource: timer-riscv: Increase rating of clock_event_device for Sstc
  clocksource: timer-riscv: Don't enable/disable timer interrupt
  lkdtm: Fix CFI_BACKWARD on RISC-V
  riscv: Use separate IRQ shadow call stacks
  riscv: Implement Shadow Call Stack
  riscv: Move global pointer loading to a macro
  riscv: Deduplicate IRQ stack switching
  riscv: VMAP_STACK overflow detection thread-safe
  RISC-V: cacheflush: Initialize CBO variables on ACPI systems
  RISC-V: ACPI: RHCT: Add function to get CBO block sizes
  ...
This commit is contained in:
Linus Torvalds 2023-11-08 09:21:18 -08:00
commit d46392bbf5
44 changed files with 918 additions and 298 deletions

View File

@ -77,6 +77,9 @@ The following keys are defined:
* :c:macro:`RISCV_HWPROBE_EXT_ZBS`: The Zbs extension is supported, as defined
in version 1.0 of the Bit-Manipulation ISA extensions.
* :c:macro:`RISCV_HWPROBE_EXT_ZICBOZ`: The Zicboz extension is supported, as
ratified in commit 3dd606f ("Create cmobase-v1.0.pdf") of riscv-CMOs.
* :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: A bitmask that contains performance
information about the selected set of processors.
@ -96,3 +99,6 @@ The following keys are defined:
* :c:macro:`RISCV_HWPROBE_MISALIGNED_UNSUPPORTED`: Misaligned accesses are
not supported at all and will generate a misaligned address fault.
* :c:macro:`RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE`: An unsigned int which
represents the size of the Zicboz block in bytes.

View File

@ -39,6 +39,7 @@ config RISCV
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_VDSO_DATA
select ARCH_KEEP_MEMBLOCK if ACPI
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_STACKWALK
@ -48,6 +49,7 @@ config RISCV
select ARCH_SUPPORTS_HUGETLBFS if MMU
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
select ARCH_SUPPORTS_PER_VMA_LOCK if MMU
select ARCH_SUPPORTS_SHADOW_CALL_STACK if HAVE_SHADOW_CALL_STACK
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USES_CFI_TRAPS if CFI_CLANG
@ -174,6 +176,11 @@ config GCC_SUPPORTS_DYNAMIC_FTRACE
def_bool CC_IS_GCC
depends on $(cc-option,-fpatchable-function-entry=8)
config HAVE_SHADOW_CALL_STACK
def_bool $(cc-option,-fsanitize=shadow-call-stack)
# https://github.com/riscv-non-isa/riscv-elf-psabi-doc/commit/a484e843e6eeb51f0cb7b8819e50da6d2444d769
depends on $(ld-option,--no-relax-gp)
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
default 8

View File

@ -54,6 +54,10 @@ endif
endif
endif
ifeq ($(CONFIG_SHADOW_CALL_STACK),y)
KBUILD_LDFLAGS += --no-relax-gp
endif
# ISA string setting
riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima

View File

@ -37,6 +37,13 @@ CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
CONFIG_PM=y
CONFIG_CPU_IDLE=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
CONFIG_CPUFREQ_DT=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
CONFIG_ACPI=y
@ -95,6 +102,7 @@ CONFIG_NETLINK_DIAG=y
CONFIG_CGROUP_NET_PRIO=y
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
CONFIG_CAN=m
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_HOST_GENERIC=y
@ -102,6 +110,11 @@ CONFIG_PCIE_XILINX=y
CONFIG_PCIE_FU740=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_NVME=m
@ -124,8 +137,11 @@ CONFIG_VIRTIO_NET=y
CONFIG_MACB=y
CONFIG_E1000E=y
CONFIG_R8169=y
CONFIG_RAVB=y
CONFIG_STMMAC_ETH=m
CONFIG_MICREL_PHY=y
CONFIG_MICROSEMI_PHY=y
CONFIG_CAN_RCAR_CANFD=m
CONFIG_INPUT_MOUSEDEV=y
CONFIG_KEYBOARD_SUN4I_LRADC=m
CONFIG_SERIAL_8250=y
@ -136,16 +152,24 @@ CONFIG_SERIAL_SH_SCI=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_MV64XXX=m
CONFIG_I2C_RIIC=y
CONFIG_SPI=y
CONFIG_SPI_RSPI=m
CONFIG_SPI_SIFIVE=y
CONFIG_SPI_SUN6I=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_GPIO_SIFIVE=y
CONFIG_CPU_THERMAL=y
CONFIG_DEVFREQ_THERMAL=y
CONFIG_RZG2L_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_SUNXI_WATCHDOG=y
CONFIG_RENESAS_RZG2LWDT=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_GPIO=y
CONFIG_DRM=m
CONFIG_DRM_RADEON=m
CONFIG_DRM_NOUVEAU=m
@ -153,39 +177,67 @@ CONFIG_DRM_SUN4I=m
CONFIG_DRM_VIRTIO_GPU=m
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
CONFIG_SND_SOC_RZ=m
CONFIG_SND_SOC_WM8978=m
CONFIG_SND_SIMPLE_CARD=m
CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PLATFORM=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_RENESAS_USBHS=m
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
CONFIG_USB_MUSB_HDRC=m
CONFIG_USB_MUSB_SUNXI=m
CONFIG_NOP_USB_XCEIV=m
CONFIG_USB_GADGET=y
CONFIG_USB_RENESAS_USBHS_UDC=m
CONFIG_USB_CONFIGFS=m
CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_ACM=y
CONFIG_USB_CONFIGFS_OBEX=y
CONFIG_USB_CONFIGFS_NCM=y
CONFIG_USB_CONFIGFS_ECM=y
CONFIG_USB_CONFIGFS_ECM_SUBSET=y
CONFIG_USB_CONFIGFS_RNDIS=y
CONFIG_USB_CONFIGFS_EEM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_CADENCE=y
CONFIG_MMC_SPI=y
CONFIG_MMC_SDHI=y
CONFIG_MMC_SUNXI=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_SUN6I=y
CONFIG_DMADEVICES=y
CONFIG_DMA_SUN6I=m
CONFIG_RZ_DMAC=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_INPUT=y
CONFIG_VIRTIO_MMIO=y
CONFIG_RENESAS_OSTM=y
CONFIG_SUN8I_DE2_CCU=m
CONFIG_SUN50I_IOMMU=y
CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_CTRL=y
CONFIG_RPMSG_VIRTIO=y
CONFIG_ARCH_R9A07G043=y
CONFIG_IIO=y
CONFIG_RZG2L_ADC=m
CONFIG_RESET_RZG2L_USBPHY_CTRL=y
CONFIG_PHY_SUN4I_USB=m
CONFIG_PHY_RCAR_GEN3_USB2=y
CONFIG_LIBNVDIMM=y
CONFIG_NVMEM_SUNXI_SID=y
CONFIG_EXT4_FS=y

View File

@ -66,6 +66,8 @@ int acpi_get_riscv_isa(struct acpi_table_header *table,
unsigned int cpu, const char **isa);
static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size,
u32 *cboz_size, u32 *cbop_size);
#else
static inline void acpi_init_rintc_map(void) { }
static inline struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu)
@ -79,6 +81,10 @@ static inline int acpi_get_riscv_isa(struct acpi_table_header *table,
return -EINVAL;
}
static inline void acpi_get_cbo_block_size(struct acpi_table_header *table,
u32 *cbom_size, u32 *cboz_size,
u32 *cbop_size) { }
#endif /* CONFIG_ACPI */
#endif /*_ASM_ACPI_H*/

View File

@ -25,7 +25,6 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
DECLARE_DO_ERROR_INFO(do_trap_break);
asmlinkage unsigned long get_overflow_stack(void);
asmlinkage void handle_bad_stack(struct pt_regs *regs);
asmlinkage void do_page_fault(struct pt_regs *regs);
asmlinkage void do_irq(struct pt_regs *regs);

View File

@ -82,6 +82,47 @@
.endr
.endm
#ifdef CONFIG_SMP
#ifdef CONFIG_32BIT
#define PER_CPU_OFFSET_SHIFT 2
#else
#define PER_CPU_OFFSET_SHIFT 3
#endif
.macro asm_per_cpu dst sym tmp
REG_L \tmp, TASK_TI_CPU_NUM(tp)
slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
la \dst, __per_cpu_offset
add \dst, \dst, \tmp
REG_L \tmp, 0(\dst)
la \dst, \sym
add \dst, \dst, \tmp
.endm
#else /* CONFIG_SMP */
.macro asm_per_cpu dst sym tmp
la \dst, \sym
.endm
#endif /* CONFIG_SMP */
.macro load_per_cpu dst ptr tmp
asm_per_cpu \dst \ptr \tmp
REG_L \dst, 0(\dst)
.endm
#ifdef CONFIG_SHADOW_CALL_STACK
/* gp is used as the shadow call stack pointer instead */
.macro load_global_pointer
.endm
#else
/* load __global_pointer to gp */
.macro load_global_pointer
.option push
.option norelax
la gp, __global_pointer$
.option pop
.endm
#endif /* CONFIG_SHADOW_CALL_STACK */
/* save all GPs except x1 ~ x5 */
.macro save_from_x6_to_x31
REG_S x6, PT_T1(sp)

View File

@ -31,5 +31,6 @@ DECLARE_PER_CPU(long, misaligned_access_speed);
extern struct riscv_isainfo hart_isa[NR_CPUS];
void check_unaligned_access(int cpu);
void riscv_user_isa_enable(void);
#endif

View File

@ -95,25 +95,25 @@ asm volatile(ALTERNATIVE( \
#endif
/*
* dcache.ipa rs1 (invalidate, physical address)
* th.dcache.ipa rs1 (invalidate, physical address)
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
* 0000001 01010 rs1 000 00000 0001011
* dache.iva rs1 (invalida, virtual address)
* th.dache.iva rs1 (invalida, virtual address)
* 0000001 00110 rs1 000 00000 0001011
*
* dcache.cpa rs1 (clean, physical address)
* th.dcache.cpa rs1 (clean, physical address)
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
* 0000001 01001 rs1 000 00000 0001011
* dcache.cva rs1 (clean, virtual address)
* th.dcache.cva rs1 (clean, virtual address)
* 0000001 00101 rs1 000 00000 0001011
*
* dcache.cipa rs1 (clean then invalidate, physical address)
* th.dcache.cipa rs1 (clean then invalidate, physical address)
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
* 0000001 01011 rs1 000 00000 0001011
* dcache.civa rs1 (... virtual address)
* th.dcache.civa rs1 (... virtual address)
* 0000001 00111 rs1 000 00000 0001011
*
* sync.s (make sure all cache operations finished)
* th.sync.s (make sure all cache operations finished)
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
* 0000000 11001 00000 000 00000 0001011
*/

View File

@ -72,6 +72,7 @@
#ifndef __ASSEMBLY__
#include <linux/jump_label.h>
#include <asm/cpufeature.h>
unsigned long riscv_get_elf_hwcap(void);
@ -139,6 +140,21 @@ l_yes:
return true;
}
static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext)
{
if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_likely(ext))
return true;
return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
}
static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext)
{
if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_unlikely(ext))
return true;
return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
}
#endif
#endif /* _ASM_RISCV_HWCAP_H */

View File

@ -8,6 +8,11 @@
#include <uapi/asm/hwprobe.h>
#define RISCV_HWPROBE_MAX_KEY 5
#define RISCV_HWPROBE_MAX_KEY 6
static inline bool riscv_hwprobe_key_is_valid(__s64 key)
{
return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY;
}
#endif

View File

@ -12,6 +12,9 @@
DECLARE_PER_CPU(ulong *, irq_stack_ptr);
asmlinkage void call_on_irq_stack(struct pt_regs *regs,
void (*func)(struct pt_regs *));
#ifdef CONFIG_VMAP_STACK
/*
* To ensure that VMAP'd stack overflow detection works correctly, all VMAP'd

View File

@ -33,8 +33,8 @@
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#endif
/*
* By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so
* define the PAGE_OFFSET value for SV39.
* By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
* define the PAGE_OFFSET value for SV48 and SV39.
*/
#define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
#define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL)

View File

@ -33,4 +33,7 @@
_PAGE_WRITE | _PAGE_EXEC | \
_PAGE_USER | _PAGE_GLOBAL))
static const __maybe_unused int pgtable_l4_enabled;
static const __maybe_unused int pgtable_l5_enabled;
#endif /* _ASM_RISCV_PGTABLE_32_H */

View File

@ -811,7 +811,7 @@ extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
* bit 5: _PAGE_PROT_NONE (zero)
* bit 6: exclusive marker
* bits 7 to 11: swap type
* bits 11 to XLEN-1: swap offset
* bits 12 to XLEN-1: swap offset
*/
#define __SWP_TYPE_SHIFT 7
#define __SWP_TYPE_BITS 5
@ -914,7 +914,6 @@ extern uintptr_t _dtb_early_pa;
#define dtb_early_pa _dtb_early_pa
#endif /* CONFIG_XIP_KERNEL */
extern u64 satp_mode;
extern bool pgtable_l4_enabled;
void paging_init(void);
void misc_mem_init(void);

View File

@ -0,0 +1,54 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SCS_H
#define _ASM_SCS_H
#ifdef __ASSEMBLY__
#include <asm/asm-offsets.h>
#ifdef CONFIG_SHADOW_CALL_STACK
/* Load init_shadow_call_stack to gp. */
.macro scs_load_init_stack
la gp, init_shadow_call_stack
XIP_FIXUP_OFFSET gp
.endm
/* Load the per-CPU IRQ shadow call stack to gp. */
.macro scs_load_irq_stack tmp
load_per_cpu gp, irq_shadow_call_stack_ptr, \tmp
.endm
/* Load task_scs_sp(current) to gp. */
.macro scs_load_current
REG_L gp, TASK_TI_SCS_SP(tp)
.endm
/* Load task_scs_sp(current) to gp, but only if tp has changed. */
.macro scs_load_current_if_task_changed prev
beq \prev, tp, _skip_scs
scs_load_current
_skip_scs:
.endm
/* Save gp to task_scs_sp(current). */
.macro scs_save_current
REG_S gp, TASK_TI_SCS_SP(tp)
.endm
#else /* CONFIG_SHADOW_CALL_STACK */
.macro scs_load_init_stack
.endm
.macro scs_load_irq_stack tmp
.endm
.macro scs_load_current
.endm
.macro scs_load_current_if_task_changed prev
.endm
.macro scs_save_current
.endm
#endif /* CONFIG_SHADOW_CALL_STACK */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_SCS_H */

View File

@ -34,9 +34,6 @@
#ifndef __ASSEMBLY__
extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
extern unsigned long spin_shadow_stack;
#include <asm/processor.h>
#include <asm/csr.h>
@ -60,8 +57,20 @@ struct thread_info {
long user_sp; /* User stack pointer */
int cpu;
unsigned long syscall_work; /* SYSCALL_WORK_ flags */
#ifdef CONFIG_SHADOW_CALL_STACK
void *scs_base;
void *scs_sp;
#endif
};
#ifdef CONFIG_SHADOW_CALL_STACK
#define INIT_SCS \
.scs_base = init_shadow_call_stack, \
.scs_sp = init_shadow_call_stack,
#else
#define INIT_SCS
#endif
/*
* macros/functions for gaining access to the thread information structure
*
@ -71,6 +80,7 @@ struct thread_info {
{ \
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
INIT_SCS \
}
void arch_release_task_struct(struct task_struct *tsk);

View File

@ -14,7 +14,7 @@ static inline void cpu_relax(void)
__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
#endif
#ifdef __riscv_zihintpause
#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
/*
* Reduce instruction retirement.
* This assumes the PC changes.

View File

@ -29,6 +29,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_EXT_ZBA (1 << 3)
#define RISCV_HWPROBE_EXT_ZBB (1 << 4)
#define RISCV_HWPROBE_EXT_ZBS (1 << 5)
#define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6)
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
#define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
@ -36,6 +37,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0)
#define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
#define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0)
#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
#endif

View File

@ -14,9 +14,10 @@
*/
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/pci.h>
#include <linux/efi.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/pci.h>
int acpi_noirq = 1; /* skip ACPI IRQ initialization */
int acpi_disabled = 1;
@ -217,7 +218,89 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
{
return (void __iomem *)memremap(phys, size, MEMREMAP_WB);
efi_memory_desc_t *md, *region = NULL;
pgprot_t prot;
if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP)))
return NULL;
for_each_efi_memory_desc(md) {
u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
if (phys < md->phys_addr || phys >= end)
continue;
if (phys + size > end) {
pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n");
return NULL;
}
region = md;
break;
}
/*
* It is fine for AML to remap regions that are not represented in the
* EFI memory map at all, as it only describes normal memory, and MMIO
* regions that require a virtual mapping to make them accessible to
* the EFI runtime services.
*/
prot = PAGE_KERNEL_IO;
if (region) {
switch (region->type) {
case EFI_LOADER_CODE:
case EFI_LOADER_DATA:
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
case EFI_PERSISTENT_MEMORY:
if (memblock_is_map_memory(phys) ||
!memblock_is_region_memory(phys, size)) {
pr_warn(FW_BUG "requested region covers kernel memory\n");
return NULL;
}
/*
* Mapping kernel memory is permitted if the region in
* question is covered by a single memblock with the
* NOMAP attribute set: this enables the use of ACPI
* table overrides passed via initramfs.
* This particular use case only requires read access.
*/
fallthrough;
case EFI_RUNTIME_SERVICES_CODE:
/*
* This would be unusual, but not problematic per se,
* as long as we take care not to create a writable
* mapping for executable code.
*/
prot = PAGE_KERNEL_RO;
break;
case EFI_ACPI_RECLAIM_MEMORY:
/*
* ACPI reclaim memory is used to pass firmware tables
* and other data that is intended for consumption by
* the OS only, which may decide it wants to reclaim
* that memory and use it for something else. We never
* do that, but we usually add it to the linear map
* anyway, in which case we should use the existing
* mapping.
*/
if (memblock_is_map_memory(phys))
return (void __iomem *)__va(phys);
fallthrough;
default:
if (region->attribute & EFI_MEMORY_WB)
prot = PAGE_KERNEL;
else if ((region->attribute & EFI_MEMORY_WC) ||
(region->attribute & EFI_MEMORY_WT))
prot = pgprot_writecombine(PAGE_KERNEL);
}
}
return ioremap_prot(phys, size, pgprot_val(prot));
}
#ifdef CONFIG_PCI

View File

@ -14,6 +14,7 @@
#include <asm/thread_info.h>
#include <asm/ptrace.h>
#include <asm/cpu_ops_sbi.h>
#include <asm/stacktrace.h>
#include <asm/suspend.h>
void asm_offsets(void);
@ -38,7 +39,11 @@ void asm_offsets(void)
OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
#ifdef CONFIG_SHADOW_CALL_STACK
OFFSET(TASK_TI_SCS_SP, task_struct, thread_info.scs_sp);
#endif
OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]);
@ -479,4 +484,8 @@ void asm_offsets(void)
OFFSET(KERNEL_MAP_VIRT_ADDR, kernel_mapping, virt_addr);
OFFSET(SBI_HART_BOOT_TASK_PTR_OFFSET, sbi_hart_boot_data, task_ptr);
OFFSET(SBI_HART_BOOT_STACK_PTR_OFFSET, sbi_hart_boot_data, stack_ptr);
DEFINE(STACKFRAME_SIZE_ON_STACK, ALIGN(sizeof(struct stackframe), STACK_ALIGN));
OFFSET(STACKFRAME_FP, stackframe, fp);
OFFSET(STACKFRAME_RA, stackframe, ra);
}

View File

@ -93,10 +93,10 @@ static bool riscv_isa_extension_check(int id)
return true;
case RISCV_ISA_EXT_ZICBOZ:
if (!riscv_cboz_block_size) {
pr_err("Zicboz detected in ISA string, but no cboz-block-size found\n");
pr_err("Zicboz detected in ISA string, disabling as no cboz-block-size found\n");
return false;
} else if (!is_power_of_2(riscv_cboz_block_size)) {
pr_err("cboz-block-size present, but is not a power-of-2\n");
pr_err("Zicboz disabled as cboz-block-size present, but is not a power-of-2\n");
return false;
}
return true;
@ -206,10 +206,11 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc
switch (*ext) {
case 's':
/*
* Workaround for invalid single-letter 's' & 'u'(QEMU).
* Workaround for invalid single-letter 's' & 'u' (QEMU).
* No need to set the bit in riscv_isa as 's' & 'u' are
* not valid ISA extensions. It works until multi-letter
* extension starting with "Su" appears.
* not valid ISA extensions. It works unless the first
* multi-letter extension in the ISA string begins with
* "Su" and is not prefixed with an underscore.
*/
if (ext[-1] != '_' && ext[1] == 'u') {
++isa;
@ -655,6 +656,12 @@ static int check_unaligned_access_boot_cpu(void)
arch_initcall(check_unaligned_access_boot_cpu);
void riscv_user_isa_enable(void)
{
if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ))
csr_set(CSR_SENVCFG, ENVCFG_CBZE);
}
#ifdef CONFIG_RISCV_ALTERNATIVE
/*
* Alternative patch sites consider 48 bits when determining when to patch

View File

@ -9,10 +9,15 @@
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/scs.h>
#include <asm/unistd.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/errata_list.h>
#include <linux/sizes.h>
.section .irqentry.text, "ax"
SYM_CODE_START(handle_exception)
/*
@ -73,10 +78,11 @@ _save_context:
csrw CSR_SCRATCH, x0
/* Load the global pointer */
.option push
.option norelax
la gp, __global_pointer$
.option pop
load_global_pointer
/* Load the kernel shadow call stack pointer if coming from userspace */
scs_load_current_if_task_changed s5
move a0, sp /* pt_regs */
la ra, ret_from_exception
@ -123,6 +129,9 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
addi s0, sp, PT_SIZE_ON_STACK
REG_S s0, TASK_TI_KERNEL_SP(tp)
/* Save the kernel shadow call stack pointer */
scs_save_current
/*
* Save TP into the scratch register , so we can find the kernel data
* structures again.
@ -170,67 +179,15 @@ SYM_CODE_END(ret_from_exception)
#ifdef CONFIG_VMAP_STACK
SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
/*
* Takes the psuedo-spinlock for the shadow stack, in case multiple
* harts are concurrently overflowing their kernel stacks. We could
* store any value here, but since we're overflowing the kernel stack
* already we only have SP to use as a scratch register. So we just
* swap in the address of the spinlock, as that's definately non-zero.
*
* Pairs with a store_release in handle_bad_stack().
*/
1: la sp, spin_shadow_stack
REG_AMOSWAP_AQ sp, sp, (sp)
bnez sp, 1b
/* we reach here from kernel context, sscratch must be 0 */
csrrw x31, CSR_SCRATCH, x31
asm_per_cpu sp, overflow_stack, x31
li x31, OVERFLOW_STACK_SIZE
add sp, sp, x31
/* zero out x31 again and restore x31 */
xor x31, x31, x31
csrrw x31, CSR_SCRATCH, x31
la sp, shadow_stack
addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
//save caller register to shadow stack
addi sp, sp, -(PT_SIZE_ON_STACK)
REG_S x1, PT_RA(sp)
REG_S x5, PT_T0(sp)
REG_S x6, PT_T1(sp)
REG_S x7, PT_T2(sp)
REG_S x10, PT_A0(sp)
REG_S x11, PT_A1(sp)
REG_S x12, PT_A2(sp)
REG_S x13, PT_A3(sp)
REG_S x14, PT_A4(sp)
REG_S x15, PT_A5(sp)
REG_S x16, PT_A6(sp)
REG_S x17, PT_A7(sp)
REG_S x28, PT_T3(sp)
REG_S x29, PT_T4(sp)
REG_S x30, PT_T5(sp)
REG_S x31, PT_T6(sp)
la ra, restore_caller_reg
tail get_overflow_stack
restore_caller_reg:
//save per-cpu overflow stack
REG_S a0, -8(sp)
//restore caller register from shadow_stack
REG_L x1, PT_RA(sp)
REG_L x5, PT_T0(sp)
REG_L x6, PT_T1(sp)
REG_L x7, PT_T2(sp)
REG_L x10, PT_A0(sp)
REG_L x11, PT_A1(sp)
REG_L x12, PT_A2(sp)
REG_L x13, PT_A3(sp)
REG_L x14, PT_A4(sp)
REG_L x15, PT_A5(sp)
REG_L x16, PT_A6(sp)
REG_L x17, PT_A7(sp)
REG_L x28, PT_T3(sp)
REG_L x29, PT_T4(sp)
REG_L x30, PT_T5(sp)
REG_L x31, PT_T6(sp)
//load per-cpu overflow stack
REG_L sp, -8(sp)
addi sp, sp, -(PT_SIZE_ON_STACK)
//save context to overflow stack
@ -268,6 +225,43 @@ SYM_CODE_START(ret_from_fork)
tail syscall_exit_to_user_mode
SYM_CODE_END(ret_from_fork)
#ifdef CONFIG_IRQ_STACKS
/*
* void call_on_irq_stack(struct pt_regs *regs,
* void (*func)(struct pt_regs *));
*
* Calls func(regs) using the per-CPU IRQ stack.
*/
SYM_FUNC_START(call_on_irq_stack)
/* Create a frame record to save ra and s0 (fp) */
addi sp, sp, -STACKFRAME_SIZE_ON_STACK
REG_S ra, STACKFRAME_RA(sp)
REG_S s0, STACKFRAME_FP(sp)
addi s0, sp, STACKFRAME_SIZE_ON_STACK
/* Switch to the per-CPU shadow call stack */
scs_save_current
scs_load_irq_stack t0
/* Switch to the per-CPU IRQ stack and call the handler */
load_per_cpu t0, irq_stack_ptr, t1
li t1, IRQ_STACK_SIZE
add sp, t0, t1
jalr a1
/* Switch back to the thread shadow call stack */
scs_load_current
/* Switch back to the thread stack and restore ra and s0 */
addi sp, s0, -STACKFRAME_SIZE_ON_STACK
REG_L ra, STACKFRAME_RA(sp)
REG_L s0, STACKFRAME_FP(sp)
addi sp, sp, STACKFRAME_SIZE_ON_STACK
ret
SYM_FUNC_END(call_on_irq_stack)
#endif /* CONFIG_IRQ_STACKS */
/*
* Integer register context switch
* The callee-saved registers must be saved and restored.
@ -297,6 +291,8 @@ SYM_FUNC_START(__switch_to)
REG_S s9, TASK_THREAD_S9_RA(a3)
REG_S s10, TASK_THREAD_S10_RA(a3)
REG_S s11, TASK_THREAD_S11_RA(a3)
/* Save the kernel shadow call stack pointer */
scs_save_current
/* Restore context from next->thread */
REG_L ra, TASK_THREAD_RA_RA(a4)
REG_L sp, TASK_THREAD_SP_RA(a4)
@ -314,6 +310,8 @@ SYM_FUNC_START(__switch_to)
REG_L s11, TASK_THREAD_S11_RA(a4)
/* The offset of thread_info in task_struct is zero. */
move tp, a1
/* Switch to the next shadow call stack */
scs_load_current
ret
SYM_FUNC_END(__switch_to)

View File

@ -14,6 +14,7 @@
#include <asm/cpu_ops_sbi.h>
#include <asm/hwcap.h>
#include <asm/image.h>
#include <asm/scs.h>
#include <asm/xip_fixup.h>
#include "efi-header.S"
@ -110,10 +111,7 @@ relocate_enable_mmu:
csrw CSR_TVEC, a0
/* Reload the global pointer */
.option push
.option norelax
la gp, __global_pointer$
.option pop
load_global_pointer
/*
* Switch to kernel page tables. A full fence is necessary in order to
@ -134,10 +132,7 @@ secondary_start_sbi:
csrw CSR_IP, zero
/* Load the global pointer */
.option push
.option norelax
la gp, __global_pointer$
.option pop
load_global_pointer
/*
* Disable FPU & VECTOR to detect illegal usage of
@ -159,6 +154,7 @@ secondary_start_sbi:
XIP_FIXUP_OFFSET a3
add a3, a3, a1
REG_L sp, (a3)
scs_load_current
.Lsecondary_start_common:
@ -228,10 +224,7 @@ pmp_done:
#endif /* CONFIG_RISCV_M_MODE */
/* Load the global pointer */
.option push
.option norelax
la gp, __global_pointer$
.option pop
load_global_pointer
/*
* Disable FPU & VECTOR to detect illegal usage of
@ -298,6 +291,7 @@ clear_bss_done:
la sp, init_thread_union + THREAD_SIZE
XIP_FIXUP_OFFSET sp
addi sp, sp, -PT_SIZE_ON_STACK
scs_load_init_stack
#ifdef CONFIG_BUILTIN_DTB
la a0, __dtb_start
XIP_FIXUP_OFFSET a0
@ -316,6 +310,7 @@ clear_bss_done:
la tp, init_task
la sp, init_thread_union + THREAD_SIZE
addi sp, sp, -PT_SIZE_ON_STACK
scs_load_current
#ifdef CONFIG_KASAN
call kasan_early_init

View File

@ -9,6 +9,7 @@
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/scs.h>
#include <linux/seq_file.h>
#include <asm/sbi.h>
#include <asm/smp.h>
@ -34,6 +35,24 @@ EXPORT_SYMBOL_GPL(riscv_get_intc_hwnode);
#ifdef CONFIG_IRQ_STACKS
#include <asm/irq_stack.h>
DECLARE_PER_CPU(ulong *, irq_shadow_call_stack_ptr);
#ifdef CONFIG_SHADOW_CALL_STACK
DEFINE_PER_CPU(ulong *, irq_shadow_call_stack_ptr);
#endif
static void init_irq_scs(void)
{
int cpu;
if (!scs_is_enabled())
return;
for_each_possible_cpu(cpu)
per_cpu(irq_shadow_call_stack_ptr, cpu) =
scs_alloc(cpu_to_node(cpu));
}
DEFINE_PER_CPU(ulong *, irq_stack_ptr);
#ifdef CONFIG_VMAP_STACK
@ -61,40 +80,22 @@ static void init_irq_stacks(void)
#endif /* CONFIG_VMAP_STACK */
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
static void ___do_softirq(struct pt_regs *regs)
{
__do_softirq();
}
void do_softirq_own_stack(void)
{
#ifdef CONFIG_IRQ_STACKS
if (on_thread_stack()) {
ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
+ IRQ_STACK_SIZE/sizeof(ulong);
__asm__ __volatile(
"addi sp, sp, -"RISCV_SZPTR "\n"
REG_S" ra, (sp) \n"
"addi sp, sp, -"RISCV_SZPTR "\n"
REG_S" s0, (sp) \n"
"addi s0, sp, 2*"RISCV_SZPTR "\n"
"move sp, %[sp] \n"
"call __do_softirq \n"
"addi sp, s0, -2*"RISCV_SZPTR"\n"
REG_L" s0, (sp) \n"
"addi sp, sp, "RISCV_SZPTR "\n"
REG_L" ra, (sp) \n"
"addi sp, sp, "RISCV_SZPTR "\n"
:
: [sp] "r" (sp)
: "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
"t0", "t1", "t2", "t3", "t4", "t5", "t6",
#ifndef CONFIG_FRAME_POINTER
"s0",
#endif
"memory");
} else
#endif
if (on_thread_stack())
call_on_irq_stack(NULL, ___do_softirq);
else
__do_softirq();
}
#endif /* CONFIG_SOFTIRQ_ON_OWN_STACK */
#else
static void init_irq_scs(void) {}
static void init_irq_stacks(void) {}
#endif /* CONFIG_IRQ_STACKS */
@ -106,6 +107,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
void __init init_IRQ(void)
{
init_irq_scs();
init_irq_stacks();
irqchip_init();
if (!handle_arch_irq)

View File

@ -17,27 +17,17 @@ SYM_CODE_START(riscv_kexec_relocate)
* s1: (const) Phys address to jump to after relocation
* s2: (const) Phys address of the FDT image
* s3: (const) The hartid of the current hart
* s4: Pointer to the destination address for the relocation
* s5: (const) Number of words per page
* s6: (const) 1, used for subtraction
* s7: (const) kernel_map.va_pa_offset, used when switching MMU off
* s8: (const) Physical address of the main loop
* s9: (debug) indirection page counter
* s10: (debug) entry counter
* s11: (debug) copied words counter
* s4: (const) kernel_map.va_pa_offset, used when switching MMU off
* s5: Pointer to the destination address for the relocation
* s6: (const) Physical address of the main loop
*/
mv s0, a0
mv s1, a1
mv s2, a2
mv s3, a3
mv s4, zero
li s5, (PAGE_SIZE / RISCV_SZPTR)
li s6, 1
mv s7, a4
mv s8, zero
mv s9, zero
mv s10, zero
mv s11, zero
mv s4, a4
mv s5, zero
mv s6, zero
/* Disable / cleanup interrupts */
csrw CSR_SIE, zero
@ -52,21 +42,27 @@ SYM_CODE_START(riscv_kexec_relocate)
* the start of the loop below so that we jump there in
* any case.
*/
la s8, 1f
sub s8, s8, s7
csrw CSR_STVEC, s8
la s6, 1f
sub s6, s6, s4
csrw CSR_STVEC, s6
/*
* With C-extension, here we get 42 Bytes and the next
* .align directive would pad zeros here up to 44 Bytes.
* So manually put a nop here to avoid zeros padding.
*/
nop
/* Process entries in a loop */
.align 2
1:
addi s10, s10, 1
REG_L t0, 0(s0) /* t0 = *image->entry */
addi s0, s0, RISCV_SZPTR /* image->entry++ */
/* IND_DESTINATION entry ? -> save destination address */
andi t1, t0, 0x1
beqz t1, 2f
andi s4, t0, ~0x1
andi s5, t0, ~0x1
j 1b
2:
@ -74,9 +70,8 @@ SYM_CODE_START(riscv_kexec_relocate)
andi t1, t0, 0x2
beqz t1, 2f
andi s0, t0, ~0x2
addi s9, s9, 1
csrw CSR_SATP, zero
jalr zero, s8, 0
jr s6
2:
/* IND_DONE entry ? -> jump to done label */
@ -92,14 +87,13 @@ SYM_CODE_START(riscv_kexec_relocate)
andi t1, t0, 0x8
beqz t1, 1b /* Unknown entry type, ignore it */
andi t0, t0, ~0x8
mv t3, s5 /* i = num words per page */
li t3, (PAGE_SIZE / RISCV_SZPTR) /* i = num words per page */
3: /* copy loop */
REG_L t1, (t0) /* t1 = *src_ptr */
REG_S t1, (s4) /* *dst_ptr = *src_ptr */
REG_S t1, (s5) /* *dst_ptr = *src_ptr */
addi t0, t0, RISCV_SZPTR /* stc_ptr++ */
addi s4, s4, RISCV_SZPTR /* dst_ptr++ */
sub t3, t3, s6 /* i-- */
addi s11, s11, 1 /* c++ */
addi s5, s5, RISCV_SZPTR /* dst_ptr++ */
addi t3, t3, -0x1 /* i-- */
beqz t3, 1b /* copy done ? */
j 3b
@ -146,7 +140,7 @@ SYM_CODE_START(riscv_kexec_relocate)
*/
fence.i
jalr zero, a2, 0
jr a2
SYM_CODE_END(riscv_kexec_relocate)
riscv_kexec_relocate_end:

View File

@ -25,6 +25,7 @@
#include <asm/acpi.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/early_ioremap.h>
#include <asm/pgtable.h>
@ -289,10 +290,13 @@ void __init setup_arch(char **cmdline_p)
riscv_fill_hwcap();
init_rt_signal_env();
apply_boot_alternatives();
if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
riscv_isa_extension_available(NULL, ZICBOM))
riscv_noncoherent_supported();
riscv_set_dma_cache_alignment();
riscv_user_isa_enable();
}
static int __init topology_init(void)

View File

@ -25,6 +25,8 @@
#include <linux/of.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/cpufeature.h>
#include <asm/irq.h>
@ -253,6 +255,8 @@ asmlinkage __visible void smp_callin(void)
elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
}
riscv_user_isa_enable();
/*
* Remote TLB flushes are ignored while the CPU is offline, so emit
* a local TLB flush right now just in case.

View File

@ -61,10 +61,7 @@ END(__cpu_suspend_enter)
SYM_TYPED_FUNC_START(__cpu_resume_enter)
/* Load the global pointer */
.option push
.option norelax
la gp, __global_pointer$
.option pop
load_global_pointer
#ifdef CONFIG_MMU
/* Save A0 and A1 */

View File

@ -145,26 +145,38 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
for_each_cpu(cpu, cpus) {
struct riscv_isainfo *isainfo = &hart_isa[cpu];
if (riscv_isa_extension_available(isainfo->isa, ZBA))
pair->value |= RISCV_HWPROBE_EXT_ZBA;
else
missing |= RISCV_HWPROBE_EXT_ZBA;
#define EXT_KEY(ext) \
do { \
if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
pair->value |= RISCV_HWPROBE_EXT_##ext; \
else \
missing |= RISCV_HWPROBE_EXT_##ext; \
} while (false)
if (riscv_isa_extension_available(isainfo->isa, ZBB))
pair->value |= RISCV_HWPROBE_EXT_ZBB;
else
missing |= RISCV_HWPROBE_EXT_ZBB;
if (riscv_isa_extension_available(isainfo->isa, ZBS))
pair->value |= RISCV_HWPROBE_EXT_ZBS;
else
missing |= RISCV_HWPROBE_EXT_ZBS;
/*
* Only use EXT_KEY() for extensions which can be exposed to userspace,
* regardless of the kernel's configuration, as no other checks, besides
* presence in the hart_isa bitmap, are made.
*/
EXT_KEY(ZBA);
EXT_KEY(ZBB);
EXT_KEY(ZBS);
EXT_KEY(ZICBOZ);
#undef EXT_KEY
}
/* Now turn off reporting features if any CPU is missing it. */
pair->value &= ~missing;
}
static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
{
struct riscv_hwprobe pair;
hwprobe_isa_ext0(&pair, cpus);
return (pair.value & ext);
}
static u64 hwprobe_misaligned(const struct cpumask *cpus)
{
int cpu;
@ -215,6 +227,12 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair,
pair->value = hwprobe_misaligned(cpus);
break;
case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
pair->value = 0;
if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
pair->value = riscv_cboz_block_size;
break;
/*
* For forward compatibility, unknown keys don't fail the whole
* call, but get their element key set to -1 and value set to 0

View File

@ -360,34 +360,10 @@ static void noinstr handle_riscv_irq(struct pt_regs *regs)
asmlinkage void noinstr do_irq(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
#ifdef CONFIG_IRQ_STACKS
if (on_thread_stack()) {
ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
+ IRQ_STACK_SIZE/sizeof(ulong);
__asm__ __volatile(
"addi sp, sp, -"RISCV_SZPTR "\n"
REG_S" ra, (sp) \n"
"addi sp, sp, -"RISCV_SZPTR "\n"
REG_S" s0, (sp) \n"
"addi s0, sp, 2*"RISCV_SZPTR "\n"
"move sp, %[sp] \n"
"move a0, %[regs] \n"
"call handle_riscv_irq \n"
"addi sp, s0, -2*"RISCV_SZPTR"\n"
REG_L" s0, (sp) \n"
"addi sp, sp, "RISCV_SZPTR "\n"
REG_L" ra, (sp) \n"
"addi sp, sp, "RISCV_SZPTR "\n"
:
: [sp] "r" (sp), [regs] "r" (regs)
: "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
"t0", "t1", "t2", "t3", "t4", "t5", "t6",
#ifndef CONFIG_FRAME_POINTER
"s0",
#endif
"memory");
} else
#endif
if (IS_ENABLED(CONFIG_IRQ_STACKS) && on_thread_stack())
call_on_irq_stack(regs, handle_riscv_irq);
else
handle_riscv_irq(regs);
irqentry_exit(regs, state);
@ -410,48 +386,14 @@ int is_valid_bugaddr(unsigned long pc)
#endif /* CONFIG_GENERIC_BUG */
#ifdef CONFIG_VMAP_STACK
/*
* Extra stack space that allows us to provide panic messages when the kernel
* has overflowed its stack.
*/
static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
overflow_stack)__aligned(16);
/*
* A temporary stack for use by handle_kernel_stack_overflow. This is used so
* we can call into C code to get the per-hart overflow stack. Usage of this
* stack must be protected by spin_shadow_stack.
*/
long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
/*
* A pseudo spinlock to protect the shadow stack from being used by multiple
* harts concurrently. This isn't a real spinlock because the lock side must
* be taken without a valid stack and only a single register, it's only taken
* while in the process of panicing anyway so the performance and error
* checking a proper spinlock gives us doesn't matter.
*/
unsigned long spin_shadow_stack;
asmlinkage unsigned long get_overflow_stack(void)
{
return (unsigned long)this_cpu_ptr(overflow_stack) +
OVERFLOW_STACK_SIZE;
}
asmlinkage void handle_bad_stack(struct pt_regs *regs)
{
unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
/*
* We're done with the shadow stack by this point, as we're on the
* overflow stack. Tell any other concurrent overflowing harts that
* they can proceed with panicing by releasing the pseudo-spinlock.
*
* This pairs with an amoswap.aq in handle_kernel_stack_overflow.
*/
smp_store_release(&spin_shadow_stack, 0);
console_verbose();
pr_emerg("Insufficient stack space to handle exception!\n");

View File

@ -36,7 +36,7 @@ CPPFLAGS_vdso.lds += -DHAS_VGETTIMEOFDAY
endif
# Disable -pg to prevent insert call site
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS)
# Disable profiling and instrumentation for VDSO code
GCOV_PROFILE := n

View File

@ -37,7 +37,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
/* This is something we can handle, fill out the pairs. */
while (p < end) {
if (p->key <= RISCV_HWPROBE_MAX_KEY) {
if (riscv_hwprobe_key_is_valid(p->key)) {
p->value = avd->all_cpu_hwprobe_values[p->key];
} else {

View File

@ -3,7 +3,9 @@
* Copyright (C) 2017 SiFive
*/
#include <linux/acpi.h>
#include <linux/of.h>
#include <asm/acpi.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_SMP
@ -124,13 +126,24 @@ void __init riscv_init_cbo_blocksizes(void)
unsigned long cbom_hartid, cboz_hartid;
u32 cbom_block_size = 0, cboz_block_size = 0;
struct device_node *node;
struct acpi_table_header *rhct;
acpi_status status;
for_each_of_cpu_node(node) {
/* set block-size for cbom and/or cboz extension if available */
cbo_get_block_size(node, "riscv,cbom-block-size",
&cbom_block_size, &cbom_hartid);
cbo_get_block_size(node, "riscv,cboz-block-size",
&cboz_block_size, &cboz_hartid);
if (acpi_disabled) {
for_each_of_cpu_node(node) {
/* set block-size for cbom and/or cboz extension if available */
cbo_get_block_size(node, "riscv,cbom-block-size",
&cbom_block_size, &cbom_hartid);
cbo_get_block_size(node, "riscv,cboz-block-size",
&cboz_block_size, &cboz_hartid);
}
} else {
status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
if (ACPI_FAILURE(status))
return;
acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, NULL);
acpi_put_table((struct acpi_table_header *)rhct);
}
if (cbom_block_size)

View File

@ -49,10 +49,12 @@ u64 satp_mode __ro_after_init = SATP_MODE_32;
#endif
EXPORT_SYMBOL(satp_mode);
#ifdef CONFIG_64BIT
bool pgtable_l4_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
bool pgtable_l5_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
EXPORT_SYMBOL(pgtable_l4_enabled);
EXPORT_SYMBOL(pgtable_l5_enabled);
#endif
phys_addr_t phys_ram_base __ro_after_init;
EXPORT_SYMBOL(phys_ram_base);

View File

@ -384,6 +384,9 @@ static int __init ptdump_init(void)
kernel_ptd_info.base_addr = KERN_VIRT_START;
pg_level[1].name = pgtable_l5_enabled ? "P4D" : "PGD";
pg_level[2].name = pgtable_l4_enabled ? "PUD" : "PGD";
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
pg_level[i].mask |= pte_bits[j].mask;

View File

@ -81,6 +81,14 @@ ifdef CONFIG_CFI_CLANG
PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_CFI)
endif
ifdef CONFIG_RELOCATABLE
PURGATORY_CFLAGS_REMOVE += -fPIE
endif
ifdef CONFIG_SHADOW_CALL_STACK
PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_SCS)
endif
CFLAGS_REMOVE_purgatory.o += $(PURGATORY_CFLAGS_REMOVE)
CFLAGS_purgatory.o += $(PURGATORY_CFLAGS)

View File

@ -8,8 +8,9 @@
#define pr_fmt(fmt) "ACPI: RHCT: " fmt
#include <linux/acpi.h>
#include <linux/bits.h>
static struct acpi_table_header *acpi_get_rhct(void)
static struct acpi_table_rhct *acpi_get_rhct(void)
{
static struct acpi_table_header *rhct;
acpi_status status;
@ -26,7 +27,7 @@ static struct acpi_table_header *acpi_get_rhct(void)
}
}
return rhct;
return (struct acpi_table_rhct *)rhct;
}
/*
@ -48,7 +49,7 @@ int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const
BUG_ON(acpi_disabled);
if (!table) {
rhct = (struct acpi_table_rhct *)acpi_get_rhct();
rhct = acpi_get_rhct();
if (!rhct)
return -ENOENT;
} else {
@ -81,3 +82,89 @@ int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const
return -1;
}
static void acpi_parse_hart_info_cmo_node(struct acpi_table_rhct *rhct,
struct acpi_rhct_hart_info *hart_info,
u32 *cbom_size, u32 *cboz_size, u32 *cbop_size)
{
u32 size_hartinfo = sizeof(struct acpi_rhct_hart_info);
u32 size_hdr = sizeof(struct acpi_rhct_node_header);
struct acpi_rhct_node_header *ref_node;
struct acpi_rhct_cmo_node *cmo_node;
u32 *hart_info_node_offset;
hart_info_node_offset = ACPI_ADD_PTR(u32, hart_info, size_hartinfo);
for (int i = 0; i < hart_info->num_offsets; i++) {
ref_node = ACPI_ADD_PTR(struct acpi_rhct_node_header,
rhct, hart_info_node_offset[i]);
if (ref_node->type == ACPI_RHCT_NODE_TYPE_CMO) {
cmo_node = ACPI_ADD_PTR(struct acpi_rhct_cmo_node,
ref_node, size_hdr);
if (cbom_size && cmo_node->cbom_size <= 30) {
if (!*cbom_size)
*cbom_size = BIT(cmo_node->cbom_size);
else if (*cbom_size != BIT(cmo_node->cbom_size))
pr_warn("CBOM size is not the same across harts\n");
}
if (cboz_size && cmo_node->cboz_size <= 30) {
if (!*cboz_size)
*cboz_size = BIT(cmo_node->cboz_size);
else if (*cboz_size != BIT(cmo_node->cboz_size))
pr_warn("CBOZ size is not the same across harts\n");
}
if (cbop_size && cmo_node->cbop_size <= 30) {
if (!*cbop_size)
*cbop_size = BIT(cmo_node->cbop_size);
else if (*cbop_size != BIT(cmo_node->cbop_size))
pr_warn("CBOP size is not the same across harts\n");
}
}
}
}
/*
* During early boot, the caller should call acpi_get_table() and pass its pointer to
* these functions (and free up later). At run time, since this table can be used
* multiple times, pass NULL so that the table remains in memory.
*/
void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size,
u32 *cboz_size, u32 *cbop_size)
{
u32 size_hdr = sizeof(struct acpi_rhct_node_header);
struct acpi_rhct_node_header *node, *end;
struct acpi_rhct_hart_info *hart_info;
struct acpi_table_rhct *rhct;
if (acpi_disabled)
return;
if (table) {
rhct = (struct acpi_table_rhct *)table;
} else {
rhct = acpi_get_rhct();
if (!rhct)
return;
}
if (cbom_size)
*cbom_size = 0;
if (cboz_size)
*cboz_size = 0;
if (cbop_size)
*cbop_size = 0;
end = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->header.length);
for (node = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->node_offset);
node < end;
node = ACPI_ADD_PTR(struct acpi_rhct_node_header, node, node->length)) {
if (node->type == ACPI_RHCT_NODE_TYPE_HART_INFO) {
hart_info = ACPI_ADD_PTR(struct acpi_rhct_hart_info, node, size_hdr);
acpi_parse_hart_info_cmo_node(rhct, hart_info, cbom_size,
cboz_size, cbop_size);
}
}
}

View File

@ -22,6 +22,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/limits.h>
#include <clocksource/timer-riscv.h>
#include <asm/smp.h>
#include <asm/hwcap.h>
@ -31,12 +32,22 @@
static DEFINE_STATIC_KEY_FALSE(riscv_sstc_available);
static bool riscv_timer_cannot_wake_cpu;
static void riscv_clock_event_stop(void)
{
if (static_branch_likely(&riscv_sstc_available)) {
csr_write(CSR_STIMECMP, ULONG_MAX);
if (IS_ENABLED(CONFIG_32BIT))
csr_write(CSR_STIMECMPH, ULONG_MAX);
} else {
sbi_set_timer(U64_MAX);
}
}
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
u64 next_tval = get_cycles64() + delta;
csr_set(CSR_IE, IE_TIE);
if (static_branch_likely(&riscv_sstc_available)) {
#if defined(CONFIG_32BIT)
csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF);
@ -94,6 +105,8 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
ce->irq = riscv_clock_event_irq;
if (riscv_timer_cannot_wake_cpu)
ce->features |= CLOCK_EVT_FEAT_C3STOP;
if (static_branch_likely(&riscv_sstc_available))
ce->rating = 450;
clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
enable_percpu_irq(riscv_clock_event_irq,
@ -119,7 +132,7 @@ static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
csr_clear(CSR_IE, IE_TIE);
riscv_clock_event_stop();
evdev->event_handler(evdev);
return IRQ_HANDLED;

View File

@ -68,12 +68,20 @@ static void lkdtm_CFI_FORWARD_PROTO(void)
#define no_pac_addr(addr) \
((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET))
#ifdef CONFIG_RISCV
/* https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#frame-pointer-convention */
#define FRAME_RA_OFFSET (-1)
#else
#define FRAME_RA_OFFSET 1
#endif
/* The ultimate ROP gadget. */
static noinline __no_ret_protection
void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr)
{
/* Use of volatile is to make sure final write isn't seen as a dead store. */
unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
unsigned long * volatile *ret_addr =
(unsigned long **)__builtin_frame_address(0) + FRAME_RA_OFFSET;
/* Make sure we've found the right place on the stack before writing it. */
if (no_pac_addr(*ret_addr) == expected)
@ -88,7 +96,8 @@ static noinline
void set_return_addr(unsigned long *expected, unsigned long *addr)
{
/* Use of volatile is to make sure final write isn't seen as a dead store. */
unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
unsigned long * volatile *ret_addr =
(unsigned long **)__builtin_frame_address(0) + FRAME_RA_OFFSET;
/* Make sure we've found the right place on the stack before writing it. */
if (no_pac_addr(*ret_addr) == expected)

View File

@ -2,9 +2,14 @@
# Copyright (C) 2021 ARM Limited
# Originally tools/testing/arm64/abi/Makefile
TEST_GEN_PROGS := hwprobe
CFLAGS += -I$(top_srcdir)/tools/include
TEST_GEN_PROGS := hwprobe cbo
include ../../lib.mk
$(OUTPUT)/hwprobe: hwprobe.c sys_hwprobe.S
$(CC) -o$@ $(CFLAGS) $(LDFLAGS) $^
$(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^
$(OUTPUT)/cbo: cbo.c sys_hwprobe.S
$(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^

View File

@ -0,0 +1,228 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023 Ventana Micro Systems Inc.
*
* Run with 'taskset -c <cpu-list> cbo' to only execute hwprobe on a
* subset of cpus, as well as only executing the tests on those cpus.
*/
#define _GNU_SOURCE
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <sched.h>
#include <signal.h>
#include <assert.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <asm/ucontext.h>
#include "hwprobe.h"
#include "../../kselftest.h"
#define MK_CBO(fn) cpu_to_le32((fn) << 20 | 10 << 15 | 2 << 12 | 0 << 7 | 15)
static char mem[4096] __aligned(4096) = { [0 ... 4095] = 0xa5 };
static bool illegal_insn;
static void sigill_handler(int sig, siginfo_t *info, void *context)
{
unsigned long *regs = (unsigned long *)&((ucontext_t *)context)->uc_mcontext;
uint32_t insn = *(uint32_t *)regs[0];
assert(insn == MK_CBO(regs[11]));
illegal_insn = true;
regs[0] += 4;
}
static void cbo_insn(char *base, int fn)
{
uint32_t insn = MK_CBO(fn);
asm volatile(
"mv a0, %0\n"
"li a1, %1\n"
".4byte %2\n"
: : "r" (base), "i" (fn), "i" (insn) : "a0", "a1", "memory");
}
static void cbo_inval(char *base) { cbo_insn(base, 0); }
static void cbo_clean(char *base) { cbo_insn(base, 1); }
static void cbo_flush(char *base) { cbo_insn(base, 2); }
static void cbo_zero(char *base) { cbo_insn(base, 4); }
static void test_no_zicbom(void *arg)
{
ksft_print_msg("Testing Zicbom instructions remain privileged\n");
illegal_insn = false;
cbo_clean(&mem[0]);
ksft_test_result(illegal_insn, "No cbo.clean\n");
illegal_insn = false;
cbo_flush(&mem[0]);
ksft_test_result(illegal_insn, "No cbo.flush\n");
illegal_insn = false;
cbo_inval(&mem[0]);
ksft_test_result(illegal_insn, "No cbo.inval\n");
}
static void test_no_zicboz(void *arg)
{
ksft_print_msg("No Zicboz, testing cbo.zero remains privileged\n");
illegal_insn = false;
cbo_zero(&mem[0]);
ksft_test_result(illegal_insn, "No cbo.zero\n");
}
static bool is_power_of_2(__u64 n)
{
return n != 0 && (n & (n - 1)) == 0;
}
static void test_zicboz(void *arg)
{
struct riscv_hwprobe pair = {
.key = RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE,
};
cpu_set_t *cpus = (cpu_set_t *)arg;
__u64 block_size;
int i, j;
long rc;
rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)cpus, 0);
block_size = pair.value;
ksft_test_result(rc == 0 && pair.key == RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE &&
is_power_of_2(block_size), "Zicboz block size\n");
ksft_print_msg("Zicboz block size: %ld\n", block_size);
illegal_insn = false;
cbo_zero(&mem[block_size]);
ksft_test_result(!illegal_insn, "cbo.zero\n");
if (illegal_insn || !is_power_of_2(block_size)) {
ksft_test_result_skip("cbo.zero check\n");
return;
}
assert(block_size <= 1024);
for (i = 0; i < 4096 / block_size; ++i) {
if (i % 2)
cbo_zero(&mem[i * block_size]);
}
for (i = 0; i < 4096 / block_size; ++i) {
char expected = i % 2 ? 0x0 : 0xa5;
for (j = 0; j < block_size; ++j) {
if (mem[i * block_size + j] != expected) {
ksft_test_result_fail("cbo.zero check\n");
ksft_print_msg("cbo.zero check: mem[%d] != 0x%x\n",
i * block_size + j, expected);
return;
}
}
}
ksft_test_result_pass("cbo.zero check\n");
}
static void check_no_zicboz_cpus(cpu_set_t *cpus)
{
struct riscv_hwprobe pair = {
.key = RISCV_HWPROBE_KEY_IMA_EXT_0,
};
cpu_set_t one_cpu;
int i = 0, c = 0;
long rc;
while (i++ < CPU_COUNT(cpus)) {
while (!CPU_ISSET(c, cpus))
++c;
CPU_ZERO(&one_cpu);
CPU_SET(c, &one_cpu);
rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)&one_cpu, 0);
assert(rc == 0 && pair.key == RISCV_HWPROBE_KEY_IMA_EXT_0);
if (pair.value & RISCV_HWPROBE_EXT_ZICBOZ)
ksft_exit_fail_msg("Zicboz is only present on a subset of harts.\n"
"Use taskset to select a set of harts where Zicboz\n"
"presence (present or not) is consistent for each hart\n");
++c;
}
}
enum {
TEST_ZICBOZ,
TEST_NO_ZICBOZ,
TEST_NO_ZICBOM,
};
static struct test_info {
bool enabled;
unsigned int nr_tests;
void (*test_fn)(void *arg);
} tests[] = {
[TEST_ZICBOZ] = { .nr_tests = 3, test_zicboz },
[TEST_NO_ZICBOZ] = { .nr_tests = 1, test_no_zicboz },
[TEST_NO_ZICBOM] = { .nr_tests = 3, test_no_zicbom },
};
int main(int argc, char **argv)
{
struct sigaction act = {
.sa_sigaction = &sigill_handler,
.sa_flags = SA_SIGINFO,
};
struct riscv_hwprobe pair;
unsigned int plan = 0;
cpu_set_t cpus;
long rc;
int i;
if (argc > 1 && !strcmp(argv[1], "--sigill")) {
rc = sigaction(SIGILL, &act, NULL);
assert(rc == 0);
tests[TEST_NO_ZICBOZ].enabled = true;
tests[TEST_NO_ZICBOM].enabled = true;
}
rc = sched_getaffinity(0, sizeof(cpu_set_t), &cpus);
assert(rc == 0);
ksft_print_header();
pair.key = RISCV_HWPROBE_KEY_IMA_EXT_0;
rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)&cpus, 0);
if (rc < 0)
ksft_exit_fail_msg("hwprobe() failed with %d\n", rc);
assert(rc == 0 && pair.key == RISCV_HWPROBE_KEY_IMA_EXT_0);
if (pair.value & RISCV_HWPROBE_EXT_ZICBOZ) {
tests[TEST_ZICBOZ].enabled = true;
tests[TEST_NO_ZICBOZ].enabled = false;
} else {
check_no_zicboz_cpus(&cpus);
}
for (i = 0; i < ARRAY_SIZE(tests); ++i)
plan += tests[i].enabled ? tests[i].nr_tests : 0;
if (plan == 0)
ksft_print_msg("No tests enabled.\n");
else
ksft_set_plan(plan);
for (i = 0; i < ARRAY_SIZE(tests); ++i) {
if (tests[i].enabled)
tests[i].test_fn(&cpus);
}
ksft_finished();
}

View File

@ -1,14 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <stddef.h>
#include <asm/hwprobe.h>
/*
* Rather than relying on having a new enough libc to define this, just do it
* ourselves. This way we don't need to be coupled to a new-enough libc to
* contain the call.
*/
long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpu_count, unsigned long *cpus, unsigned int flags);
#include "hwprobe.h"
#include "../../kselftest.h"
int main(int argc, char **argv)
{
@ -16,6 +8,9 @@ int main(int argc, char **argv)
unsigned long cpus;
long out;
ksft_print_header();
ksft_set_plan(5);
/* Fake the CPU_SET ops. */
cpus = -1;
@ -25,13 +20,16 @@ int main(int argc, char **argv)
*/
for (long i = 0; i < 8; i++)
pairs[i].key = i;
out = riscv_hwprobe(pairs, 8, 1, &cpus, 0);
if (out != 0)
return -1;
ksft_exit_fail_msg("hwprobe() failed with %ld\n", out);
for (long i = 0; i < 4; ++i) {
/* Fail if the kernel claims not to recognize a base key. */
if ((i < 4) && (pairs[i].key != i))
return -2;
ksft_exit_fail_msg("Failed to recognize base key: key != i, "
"key=%ld, i=%ld\n", pairs[i].key, i);
if (pairs[i].key != RISCV_HWPROBE_KEY_BASE_BEHAVIOR)
continue;
@ -39,52 +37,30 @@ int main(int argc, char **argv)
if (pairs[i].value & RISCV_HWPROBE_BASE_BEHAVIOR_IMA)
continue;
return -3;
ksft_exit_fail_msg("Unexpected pair: (%ld, %ld)\n", pairs[i].key, pairs[i].value);
}
/*
* This should also work with a NULL CPU set, but should not work
* with an improperly supplied CPU set.
*/
out = riscv_hwprobe(pairs, 8, 0, 0, 0);
if (out != 0)
return -4;
ksft_test_result(out == 0, "NULL CPU set\n");
out = riscv_hwprobe(pairs, 8, 0, &cpus, 0);
if (out == 0)
return -5;
ksft_test_result(out != 0, "Bad CPU set\n");
out = riscv_hwprobe(pairs, 8, 1, 0, 0);
if (out == 0)
return -6;
ksft_test_result(out != 0, "NULL CPU set with non-zero count\n");
/*
* Check that keys work by providing one that we know exists, and
* checking to make sure the resultig pair is what we asked for.
*/
pairs[0].key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR;
out = riscv_hwprobe(pairs, 1, 1, &cpus, 0);
if (out != 0)
return -7;
if (pairs[0].key != RISCV_HWPROBE_KEY_BASE_BEHAVIOR)
return -8;
ksft_test_result(out == 0 && pairs[0].key == RISCV_HWPROBE_KEY_BASE_BEHAVIOR,
"Existing key is maintained\n");
/*
* Check that an unknown key gets overwritten with -1,
* but doesn't block elements after it.
*/
pairs[0].key = 0x5555;
pairs[1].key = 1;
pairs[1].value = 0xAAAA;
out = riscv_hwprobe(pairs, 2, 0, 0, 0);
if (out != 0)
return -9;
ksft_test_result(out == 0 && pairs[0].key == -1 &&
pairs[1].key == 1 && pairs[1].value != 0xAAAA,
"Unknown key overwritten with -1 and doesn't block other elements\n");
if (pairs[0].key != -1)
return -10;
if ((pairs[1].key != 1) || (pairs[1].value == 0xAAAA))
return -11;
return 0;
ksft_finished();
}

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef SELFTEST_RISCV_HWPROBE_H
#define SELFTEST_RISCV_HWPROBE_H
#include <stddef.h>
#include <asm/hwprobe.h>
/*
* Rather than relying on having a new enough libc to define this, just do it
* ourselves. This way we don't need to be coupled to a new-enough libc to
* contain the call.
*/
long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpu_count, unsigned long *cpus, unsigned int flags);
#endif